From 4872d7b4a9a3f1ce504057519e8af32b989a7916 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 15 Oct 2020 09:09:09 -0700 Subject: [PATCH 001/761] cleanup Intel RDT readme (#8268) * cleanup Intel RDT readme * Update intel_rdt.go --- plugins/inputs/intel_rdt/README.md | 93 ++++++++++++++------------- plugins/inputs/intel_rdt/intel_rdt.go | 2 +- 2 files changed, 51 insertions(+), 44 deletions(-) diff --git a/plugins/inputs/intel_rdt/README.md b/plugins/inputs/intel_rdt/README.md index 1a6e55f6a7fb9..8a0f0a1ea6e75 100644 --- a/plugins/inputs/intel_rdt/README.md +++ b/plugins/inputs/intel_rdt/README.md @@ -1,19 +1,26 @@ # Intel RDT Input Plugin -The intel_rdt plugin collects information provided by monitoring features of -Intel Resource Director Technology (Intel(R) RDT) like Cache Monitoring Technology (CMT), -Memory Bandwidth Monitoring (MBM), Cache Allocation Technology (CAT) and Code -and Data Prioritization (CDP) Technology provide the hardware framework to monitor -and control the utilization of shared resources, like last level cache, memory bandwidth. -These Technologies comprise Intel’s Resource Director Technology (RDT). -As multithreaded and multicore platform architectures emerge, -running workloads in single-threaded, multithreaded, or complex virtual machine environment, -the last level cache and memory bandwidth are key resources to manage. Intel introduces CMT, -MBM, CAT and CDP to manage these various workloads across shared resources. - -To gather Intel RDT metrics plugin uses _pqos_ cli tool which is a part of [Intel(R) RDT Software Package](https://github.com/intel/intel-cmt-cat). +The `intel_rdt` plugin collects information provided by monitoring features of +the Intel Resource Director Technology (Intel(R) RDT). Intel RDT provides the hardware framework to monitor +and control the utilization of shared resources (ex: last level cache, memory bandwidth). + +### About Intel RDT +Intel’s Resource Director Technology (RDT) framework consists of: +- Cache Monitoring Technology (CMT) +- Memory Bandwidth Monitoring (MBM) +- Cache Allocation Technology (CAT) +- Code and Data Prioritization (CDP) + +As multithreaded and multicore platform architectures emerge, the last level cache and +memory bandwidth are key resources to manage for running workloads in single-threaded, +multithreaded, or complex virtual machine environments. Intel introduces CMT, MBM, CAT +and CDP to manage these workloads across shared resources. + +### Prerequsities - PQoS Tool +To gather Intel RDT metrics, the `intel_rdt` plugin uses _pqos_ cli tool which is a +part of [Intel(R) RDT Software Package](https://github.com/intel/intel-cmt-cat). Before using this plugin please be sure _pqos_ is properly installed and configured regarding that the plugin run _pqos_ to work with `OS Interface` mode. This plugin supports _pqos_ version 4.0.0 and above. -Be aware pqos tool needs root privileges to work properly. +Note: pqos tool needs root privileges to work properly. Metrics will be constantly reported from the following `pqos` commands within the given interval: @@ -46,29 +53,29 @@ More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-t ### Configuration ```toml # Read Intel RDT metrics -[[inputs.IntelRDT]] - ## Optionally set sampling interval to Nx100ms. - ## This value is propagated to pqos tool. Interval format is defined by pqos itself. - ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. - # sampling_interval = "10" +[[inputs.intel_rdt]] + ## Optionally set sampling interval to Nx100ms. + ## This value is propagated to pqos tool. Interval format is defined by pqos itself. + ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. + # sampling_interval = "10" - ## Optionally specify the path to pqos executable. - ## If not provided, auto discovery will be performed. - # pqos_path = "/usr/local/bin/pqos" + ## Optionally specify the path to pqos executable. + ## If not provided, auto discovery will be performed. + # pqos_path = "/usr/local/bin/pqos" - ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. - ## If not provided, default value is false. - # shortened_metrics = false - - ## Specify the list of groups of CPU core(s) to be provided as pqos input. - ## Mandatory if processes aren't set and forbidden if processes are specified. - ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] - # cores = ["0-3"] - - ## Specify the list of processes for which Metrics will be collected. - ## Mandatory if cores aren't set and forbidden if cores are specified. - ## e.g. ["qemu", "pmd"] - # processes = ["process"] + ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. + ## If not provided, default value is false. + # shortened_metrics = false + + ## Specify the list of groups of CPU core(s) to be provided as pqos input. + ## Mandatory if processes aren't set and forbidden if processes are specified. + ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] + # cores = ["0-3"] + + ## Specify the list of processes for which Metrics will be collected. + ## Mandatory if cores aren't set and forbidden if cores are specified. + ## e.g. ["qemu", "pmd"] + # processes = ["process"] ``` ### Exposed metrics @@ -78,20 +85,20 @@ More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-t | MBR | Memory Bandwidth on Remote NUMA Node | Memory bandwidth utilization by the relevant CPU core/process on the remote NUMA memory channel | | MBT | Total Memory Bandwidth | Total memory bandwidth utilized by a CPU core/process on local and remote NUMA memory channels | | LLC | L3 Cache Occupancy | Total Last Level Cache occupancy by a CPU core/process | -| *LLC_Misses | L3 Cache Misses | Total Last Level Cache misses by a CPU core/process | -| *IPC | Instructions Per Cycle | Total instructions per cycle executed by a CPU core/process | +| LLC_Misses* | L3 Cache Misses | Total Last Level Cache misses by a CPU core/process | +| IPC* | Instructions Per Cycle | Total instructions per cycle executed by a CPU core/process | *optional ### Troubleshooting -Pointing to non-existing core will lead to throwing an error by _pqos_ and plugin will not work properly. -Be sure to check if provided core number exists within desired system. +Pointing to non-existing cores will lead to throwing an error by _pqos_ and the plugin will not work properly. +Be sure to check provided core number exists within desired system. -Be aware reading Intel RDT metrics by _pqos_ cannot be done simultaneously on the same resource. -So be sure to not use any other _pqos_ instance which is monitoring the same cores or PIDs within working system. -Also there is no possibility to monitor same cores or PIDs on different groups. +Be aware, reading Intel RDT metrics by _pqos_ cannot be done simultaneously on the same resource. +Do not use any other _pqos_ instance that is monitoring the same cores or PIDs within the working system. +It is not possible to monitor same cores or PIDs on different groups. -Pids association for the given process could be manually checked by `pidof` command. E.g: +PIDs associated for the given process could be manually checked by `pidof` command. E.g: ``` pidof PROCESS ``` @@ -105,4 +112,4 @@ where `PROCESS` is process name. > rdt_metric,cores=12\,19,host=r2-compute-20,name=MBL,process=top value=0 1598962030000000000 > rdt_metric,cores=12\,19,host=r2-compute-20,name=MBR,process=top value=0 1598962030000000000 > rdt_metric,cores=12\,19,host=r2-compute-20,name=MBT,process=top value=0 1598962030000000000 -``` \ No newline at end of file +``` diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index e61266c0a4f6b..bcbc1c72a9597 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -541,7 +541,7 @@ func makeRange(min, max int) []int { } func init() { - inputs.Add("IntelRDT", func() telegraf.Input { + inputs.Add("intel_rdt", func() telegraf.Input { rdt := IntelRDT{} pathPqos, _ := exec.LookPath("pqos") if len(pathPqos) > 0 { From 796b3b8d412ce50dce3550c23477d58eebbbce7f Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 15 Oct 2020 13:46:13 -0400 Subject: [PATCH 002/761] fix issue with loading processor config from execd (#8274) --- plugins/common/shim/config.go | 10 ++++++- plugins/common/shim/config_test.go | 29 +++++++++++++++++++++ plugins/common/shim/testdata/processor.conf | 2 ++ 3 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 plugins/common/shim/testdata/processor.conf diff --git a/plugins/common/shim/config.go b/plugins/common/shim/config.go index d5d1910964e7c..439ec90a16283 100644 --- a/plugins/common/shim/config.go +++ b/plugins/common/shim/config.go @@ -116,7 +116,11 @@ func createPluginsWithTomlConfig(md toml.MetaData, conf config) (loadedConfig, e plugin := creator() if len(primitives) > 0 { primitive := primitives[0] - if err := md.PrimitiveDecode(primitive, plugin); err != nil { + var p telegraf.PluginDescriber = plugin + if processor, ok := plugin.(unwrappable); ok { + p = processor.Unwrap() + } + if err := md.PrimitiveDecode(primitive, p); err != nil { return loadedConf, err } } @@ -169,3 +173,7 @@ func DefaultImportedPlugins() (config, error) { } return conf, nil } + +type unwrappable interface { + Unwrap() telegraf.Processor +} diff --git a/plugins/common/shim/config_test.go b/plugins/common/shim/config_test.go index be4ee4140feb5..97d2004200b44 100644 --- a/plugins/common/shim/config_test.go +++ b/plugins/common/shim/config_test.go @@ -8,6 +8,7 @@ import ( "github.com/influxdata/telegraf" tgConfig "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/processors" "github.com/stretchr/testify/require" ) @@ -55,6 +56,19 @@ func TestLoadingSpecialTypes(t *testing.T) { require.EqualValues(t, 3*1000*1000, inp.Size) } +func TestLoadingProcessorWithConfig(t *testing.T) { + proc := &testConfigProcessor{} + processors.Add("test_config_load", func() telegraf.Processor { + return proc + }) + + c := "./testdata/processor.conf" + _, err := LoadConfig(&c) + require.NoError(t, err) + + require.EqualValues(t, "yep", proc.Loaded) +} + type testDurationInput struct { Duration tgConfig.Duration `toml:"duration"` Size tgConfig.Size `toml:"size"` @@ -70,3 +84,18 @@ func (i *testDurationInput) Description() string { func (i *testDurationInput) Gather(acc telegraf.Accumulator) error { return nil } + +type testConfigProcessor struct { + Loaded string `toml:"loaded"` +} + +func (p *testConfigProcessor) SampleConfig() string { + return "" +} + +func (p *testConfigProcessor) Description() string { + return "" +} +func (p *testConfigProcessor) Apply(metrics ...telegraf.Metric) []telegraf.Metric { + return metrics +} diff --git a/plugins/common/shim/testdata/processor.conf b/plugins/common/shim/testdata/processor.conf new file mode 100644 index 0000000000000..d45cc659d75a2 --- /dev/null +++ b/plugins/common/shim/testdata/processor.conf @@ -0,0 +1,2 @@ +[[processors.test_config_load]] + loaded = "yep" \ No newline at end of file From 7c2c2c5d8b49277655c908cc55f7e33d7106cc5b Mon Sep 17 00:00:00 2001 From: piotrwest Date: Thu, 15 Oct 2020 12:51:17 -0500 Subject: [PATCH 003/761] Add the Timestream Output Plugin (#8239) Co-authored-by: Piotr Westfalewicz --- config/aws/credentials.go | 6 +- go.mod | 4 +- go.sum | 12 +- plugins/outputs/all/all.go | 1 + plugins/outputs/timestream/README.md | 152 ++++ plugins/outputs/timestream/timestream.go | 608 ++++++++++++++ .../timestream/timestream_internal_test.go | 92 +++ plugins/outputs/timestream/timestream_test.go | 742 ++++++++++++++++++ 8 files changed, 1609 insertions(+), 8 deletions(-) create mode 100644 plugins/outputs/timestream/README.md create mode 100644 plugins/outputs/timestream/timestream.go create mode 100644 plugins/outputs/timestream/timestream_internal_test.go create mode 100644 plugins/outputs/timestream/timestream_test.go diff --git a/config/aws/credentials.go b/config/aws/credentials.go index 1e4f91b132a3b..f9c98edbf0a4f 100644 --- a/config/aws/credentials.go +++ b/config/aws/credentials.go @@ -29,8 +29,10 @@ func (c *CredentialConfig) Credentials() client.ConfigProvider { func (c *CredentialConfig) rootCredentials() client.ConfigProvider { config := &aws.Config{ - Region: aws.String(c.Region), - Endpoint: &c.EndpointURL, + Region: aws.String(c.Region), + } + if c.EndpointURL != "" { + config.Endpoint = &c.EndpointURL } if c.AccessKey != "" || c.SecretKey != "" { config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token) diff --git a/go.mod b/go.mod index 2cae7859a7d66..474f8512b6280 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.0 // indirect - github.com/aws/aws-sdk-go v1.33.12 + github.com/aws/aws-sdk-go v1.34.34 github.com/benbjohnson/clock v1.0.3 github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 @@ -151,7 +151,7 @@ require ( gopkg.in/ldap.v3 v3.1.0 gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce gopkg.in/olivere/elastic.v5 v5.0.70 - gopkg.in/yaml.v2 v2.2.5 + gopkg.in/yaml.v2 v2.2.8 gotest.tools v2.2.0+incompatible // indirect honnef.co/go/tools v0.0.1-2020.1.3 // indirect k8s.io/apimachinery v0.17.1 // indirect diff --git a/go.sum b/go.sum index 5973e475a3d35..a81c02d0ed8bb 100644 --- a/go.sum +++ b/go.sum @@ -114,8 +114,8 @@ github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1: github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= -github.com/aws/aws-sdk-go v1.33.12 h1:eydMoSwfrSTD9PWKUJOiDL7+/UwDW8AjInUGVE5Llh4= -github.com/aws/aws-sdk-go v1.33.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.34 h1:5dC0ZU0xy25+UavGNEkQ/5MOQwxXDA2YXtjCL1HfYKI= +github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= @@ -354,8 +354,10 @@ github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGk github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME= @@ -893,6 +895,8 @@ gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index a1ac7762156f5..f81aa9d71b072 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -37,6 +37,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/stackdriver" _ "github.com/influxdata/telegraf/plugins/outputs/sumologic" _ "github.com/influxdata/telegraf/plugins/outputs/syslog" + _ "github.com/influxdata/telegraf/plugins/outputs/timestream" _ "github.com/influxdata/telegraf/plugins/outputs/warp10" _ "github.com/influxdata/telegraf/plugins/outputs/wavefront" ) diff --git a/plugins/outputs/timestream/README.md b/plugins/outputs/timestream/README.md new file mode 100644 index 0000000000000..4c35df04e9c9b --- /dev/null +++ b/plugins/outputs/timestream/README.md @@ -0,0 +1,152 @@ +# Timestream Output Plugin + +The Timestream output plugin writes metrics to the [Amazon Timestream] service. + +### Configuration + +```toml +# Configuration for sending metrics to Amazon Timestream. +[[outputs.timestream]] + ## Amazon Region + region = "us-east-1" + + ## Amazon Credentials + ## Credentials are loaded in the following order: + ## 1) Assumed credentials via STS if role_arn is specified + ## 2) Explicit credentials from 'access_key' and 'secret_key' + ## 3) Shared profile from 'profile' + ## 4) Environment variables + ## 5) Shared credentials file + ## 6) EC2 Instance Profile + #access_key = "" + #secret_key = "" + #token = "" + #role_arn = "" + #profile = "" + #shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Timestream database where the metrics will be inserted. + ## The database must exist prior to starting Telegraf. + database_name = "yourDatabaseNameHere" + + ## Specifies if the plugin should describe the Timestream database upon starting + ## to validate if it has access necessary permissions, connection, etc., as a safety check. + ## If the describe operation fails, the plugin will not start + ## and therefore the Telegraf agent will not start. + describe_database_on_start = false + + ## The mapping mode specifies how Telegraf records are represented in Timestream. + ## Valid values are: single-table, multi-table. + ## For example, consider the following data in line protocol format: + ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 + ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 + ## where weather and airquality are the measurement names, location and season are tags, + ## and temperature, humidity, no2, pm25 are fields. + ## In multi-table mode: + ## - first line will be ingested to table named weather + ## - second line will be ingested to table named airquality + ## - the tags will be represented as dimensions + ## - first table (weather) will have two records: + ## one with measurement name equals to temperature, + ## another with measurement name equals to humidity + ## - second table (airquality) will have two records: + ## one with measurement name equals to no2, + ## another with measurement name equals to pm25 + ## - the Timestream tables from the example will look like this: + ## TABLE "weather": + ## time | location | season | measure_name | measure_value::bigint + ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 + ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 + ## TABLE "airquality": + ## time | location | measure_name | measure_value::bigint + ## 2016-06-13 17:43:50 | us-west | no2 | 5 + ## 2016-06-13 17:43:50 | us-west | pm25 | 16 + ## In single-table mode: + ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) + ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) + ## - location and season will be represented as dimensions + ## - temperature, humidity, no2, pm25 will be represented as measurement name + ## - the Timestream table from the example will look like this: + ## Assuming: + ## - single_table_name = "my_readings" + ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" + ## TABLE "my_readings": + ## time | location | season | namespace | measure_name | measure_value::bigint + ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 + ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 + ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 + ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 + ## In most cases, using multi-table mapping mode is recommended. + ## However, you can consider using single-table in situations when you have thousands of measurement names. + mapping_mode = "multi-table" + + ## Only valid and required for mapping_mode = "single-table" + ## Specifies the Timestream table where the metrics will be uploaded. + # single_table_name = "yourTableNameHere" + + ## Only valid and required for mapping_mode = "single-table" + ## Describes what will be the Timestream dimension name for the Telegraf + ## measurement name. + # single_table_dimension_name_for_telegraf_measurement_name = "namespace" + + ## Specifies if the plugin should create the table, if the table do not exist. + ## The plugin writes the data without prior checking if the table exists. + ## When the table does not exist, the error returned from Timestream will cause + ## the plugin to create the table, if this parameter is set to true. + create_table_if_not_exists = true + + ## Only valid and required if create_table_if_not_exists = true + ## Specifies the Timestream table magnetic store retention period in days. + ## Check Timestream documentation for more details. + create_table_magnetic_store_retention_period_in_days = 365 + + ## Only valid and required if create_table_if_not_exists = true + ## Specifies the Timestream table memory store retention period in hours. + ## Check Timestream documentation for more details. + create_table_memory_store_retention_period_in_hours = 24 + + ## Only valid and optional if create_table_if_not_exists = true + ## Specifies the Timestream table tags. + ## Check Timestream documentation for more details + # create_table_tags = { "foo" = "bar", "environment" = "dev"} +``` + +### Batching + +Timestream WriteInputRequest.CommonAttributes are used to efficiently write data to Timestream. + +### Multithreading + +Single thread is used to write the data to Timestream, following general plugin design pattern. + +### Errors + +In case of an attempt to write an unsupported by Timestream Telegraf Field type, the field is dropped and error is emitted to the logs. + +In case of receiving ThrottlingException or InternalServerException from Timestream, the errors are returned to Telegraf, in which case Telegraf will keep the metrics in buffer and retry writing those metrics on the next flush. + +In case of receiving ResourceNotFoundException: + - If `create_table_if_not_exists` configuration is set to `true`, the plugin will try to create appropriate table and write the records again, if the table creation was successful. + - If `create_table_if_not_exists` configuration is set to `false`, the records are dropped, and an error is emitted to the logs. + +In case of receiving any other AWS error from Timestream, the records are dropped, and an error is emitted to the logs, as retrying such requests isn't likely to succeed. + +### Logging + +Turn on debug flag in the Telegraf to turn on detailed logging (including records being written to Timestream). + +### Testing + +Execute unit tests with: + +``` +go test -v ./plugins/outputs/timestream/... +``` + +[Amazon Timestream]: https://aws.amazon.com/timestream/ \ No newline at end of file diff --git a/plugins/outputs/timestream/timestream.go b/plugins/outputs/timestream/timestream.go new file mode 100644 index 0000000000000..8af7c56656e9f --- /dev/null +++ b/plugins/outputs/timestream/timestream.go @@ -0,0 +1,608 @@ +package timestream + +import ( + "encoding/binary" + "fmt" + "hash/fnv" + "reflect" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/timestreamwrite" + internalaws "github.com/influxdata/telegraf/config/aws" +) + +type ( + Timestream struct { + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` + + MappingMode string `toml:"mapping_mode"` + DescribeDatabaseOnStart bool `toml:"describe_database_on_start"` + DatabaseName string `toml:"database_name"` + + SingleTableName string `toml:"single_table_name"` + SingleTableDimensionNameForTelegrafMeasurementName string `toml:"single_table_dimension_name_for_telegraf_measurement_name"` + + CreateTableIfNotExists bool `toml:"create_table_if_not_exists"` + CreateTableMagneticStoreRetentionPeriodInDays int64 `toml:"create_table_magnetic_store_retention_period_in_days"` + CreateTableMemoryStoreRetentionPeriodInHours int64 `toml:"create_table_memory_store_retention_period_in_hours"` + CreateTableTags map[string]string `toml:"create_table_tags"` + + Log telegraf.Logger + svc WriteClient + } + + WriteClient interface { + CreateTable(*timestreamwrite.CreateTableInput) (*timestreamwrite.CreateTableOutput, error) + WriteRecords(*timestreamwrite.WriteRecordsInput) (*timestreamwrite.WriteRecordsOutput, error) + DescribeDatabase(*timestreamwrite.DescribeDatabaseInput) (*timestreamwrite.DescribeDatabaseOutput, error) + } +) + +// Mapping modes specify how Telegraf model should be represented in Timestream model. +// See sample config for more details. +const ( + MappingModeSingleTable = "single-table" + MappingModeMultiTable = "multi-table" +) + +// MaxRecordsPerCall reflects Timestream limit of WriteRecords API call +const MaxRecordsPerCall = 100 + +var sampleConfig = ` + ## Amazon Region + region = "us-east-1" + + ## Amazon Credentials + ## Credentials are loaded in the following order: + ## 1) Assumed credentials via STS if role_arn is specified + ## 2) Explicit credentials from 'access_key' and 'secret_key' + ## 3) Shared profile from 'profile' + ## 4) Environment variables + ## 5) Shared credentials file + ## 6) EC2 Instance Profile + #access_key = "" + #secret_key = "" + #token = "" + #role_arn = "" + #profile = "" + #shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Timestream database where the metrics will be inserted. + ## The database must exist prior to starting Telegraf. + database_name = "yourDatabaseNameHere" + + ## Specifies if the plugin should describe the Timestream database upon starting + ## to validate if it has access necessary permissions, connection, etc., as a safety check. + ## If the describe operation fails, the plugin will not start + ## and therefore the Telegraf agent will not start. + describe_database_on_start = false + + ## The mapping mode specifies how Telegraf records are represented in Timestream. + ## Valid values are: single-table, multi-table. + ## For example, consider the following data in line protocol format: + ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 + ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 + ## where weather and airquality are the measurement names, location and season are tags, + ## and temperature, humidity, no2, pm25 are fields. + ## In multi-table mode: + ## - first line will be ingested to table named weather + ## - second line will be ingested to table named airquality + ## - the tags will be represented as dimensions + ## - first table (weather) will have two records: + ## one with measurement name equals to temperature, + ## another with measurement name equals to humidity + ## - second table (airquality) will have two records: + ## one with measurement name equals to no2, + ## another with measurement name equals to pm25 + ## - the Timestream tables from the example will look like this: + ## TABLE "weather": + ## time | location | season | measure_name | measure_value::bigint + ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 + ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 + ## TABLE "airquality": + ## time | location | measure_name | measure_value::bigint + ## 2016-06-13 17:43:50 | us-west | no2 | 5 + ## 2016-06-13 17:43:50 | us-west | pm25 | 16 + ## In single-table mode: + ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) + ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) + ## - location and season will be represented as dimensions + ## - temperature, humidity, no2, pm25 will be represented as measurement name + ## - the Timestream table from the example will look like this: + ## Assuming: + ## - single_table_name = "my_readings" + ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" + ## TABLE "my_readings": + ## time | location | season | namespace | measure_name | measure_value::bigint + ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 + ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 + ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 + ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 + ## In most cases, using multi-table mapping mode is recommended. + ## However, you can consider using single-table in situations when you have thousands of measurement names. + mapping_mode = "multi-table" + + ## Only valid and required for mapping_mode = "single-table" + ## Specifies the Timestream table where the metrics will be uploaded. + # single_table_name = "yourTableNameHere" + + ## Only valid and required for mapping_mode = "single-table" + ## Describes what will be the Timestream dimension name for the Telegraf + ## measurement name. + # single_table_dimension_name_for_telegraf_measurement_name = "namespace" + + ## Specifies if the plugin should create the table, if the table do not exist. + ## The plugin writes the data without prior checking if the table exists. + ## When the table does not exist, the error returned from Timestream will cause + ## the plugin to create the table, if this parameter is set to true. + create_table_if_not_exists = true + + ## Only valid and required if create_table_if_not_exists = true + ## Specifies the Timestream table magnetic store retention period in days. + ## Check Timestream documentation for more details. + create_table_magnetic_store_retention_period_in_days = 365 + + ## Only valid and required if create_table_if_not_exists = true + ## Specifies the Timestream table memory store retention period in hours. + ## Check Timestream documentation for more details. + create_table_memory_store_retention_period_in_hours = 24 + + ## Only valid and optional if create_table_if_not_exists = true + ## Specifies the Timestream table tags. + ## Check Timestream documentation for more details + # create_table_tags = { "foo" = "bar", "environment" = "dev"} +` + +// WriteFactory function provides a way to mock the client instantiation for testing purposes. +var WriteFactory = func(credentialConfig *internalaws.CredentialConfig) WriteClient { + configProvider := credentialConfig.Credentials() + return timestreamwrite.New(configProvider) +} + +func (t *Timestream) Connect() error { + if t.DatabaseName == "" { + return fmt.Errorf("DatabaseName key is required") + } + + if t.MappingMode == "" { + return fmt.Errorf("MappingMode key is required") + } + + if t.MappingMode != MappingModeSingleTable && t.MappingMode != MappingModeMultiTable { + return fmt.Errorf("correct MappingMode key values are: '%s', '%s'", + MappingModeSingleTable, MappingModeMultiTable) + } + + if t.MappingMode == MappingModeSingleTable { + if t.SingleTableName == "" { + return fmt.Errorf("in '%s' mapping mode, SingleTableName key is required", MappingModeSingleTable) + } + + if t.SingleTableDimensionNameForTelegrafMeasurementName == "" { + return fmt.Errorf("in '%s' mapping mode, SingleTableDimensionNameForTelegrafMeasurementName key is required", + MappingModeSingleTable) + } + } + + if t.MappingMode == MappingModeMultiTable { + if t.SingleTableName != "" { + return fmt.Errorf("in '%s' mapping mode, do not specify SingleTableName key", MappingModeMultiTable) + } + + if t.SingleTableDimensionNameForTelegrafMeasurementName != "" { + return fmt.Errorf("in '%s' mapping mode, do not specify SingleTableDimensionNameForTelegrafMeasurementName key", MappingModeMultiTable) + } + } + + if t.CreateTableIfNotExists { + if t.CreateTableMagneticStoreRetentionPeriodInDays < 1 { + return fmt.Errorf("if Telegraf should create tables, CreateTableMagneticStoreRetentionPeriodInDays key should have a value greater than 0") + } + + if t.CreateTableMemoryStoreRetentionPeriodInHours < 1 { + return fmt.Errorf("if Telegraf should create tables, CreateTableMemoryStoreRetentionPeriodInHours key should have a value greater than 0") + } + } + + t.Log.Infof("Constructing Timestream client for '%s' mode", t.MappingMode) + + credentialConfig := &internalaws.CredentialConfig{ + Region: t.Region, + AccessKey: t.AccessKey, + SecretKey: t.SecretKey, + RoleARN: t.RoleARN, + Profile: t.Profile, + Filename: t.Filename, + Token: t.Token, + EndpointURL: t.EndpointURL, + } + svc := WriteFactory(credentialConfig) + + if t.DescribeDatabaseOnStart { + t.Log.Infof("Describing database '%s' in region '%s'", t.DatabaseName, t.Region) + + describeDatabaseInput := ×treamwrite.DescribeDatabaseInput{ + DatabaseName: aws.String(t.DatabaseName), + } + describeDatabaseOutput, err := svc.DescribeDatabase(describeDatabaseInput) + if err != nil { + t.Log.Errorf("Couldn't describe database '%s'. Check error, fix permissions, connectivity, create database.", t.DatabaseName) + return err + } + t.Log.Infof("Describe database '%s' returned: '%s'.", t.DatabaseName, describeDatabaseOutput) + } + + t.svc = svc + return nil +} + +func (t *Timestream) Close() error { + return nil +} + +func (t *Timestream) SampleConfig() string { + return sampleConfig +} + +func (t *Timestream) Description() string { + return "Configuration for Amazon Timestream output." +} + +func init() { + outputs.Add("timestream", func() telegraf.Output { + return &Timestream{} + }) +} + +func (t *Timestream) Write(metrics []telegraf.Metric) error { + writeRecordsInputs := t.TransformMetrics(metrics) + for _, writeRecordsInput := range writeRecordsInputs { + if err := t.writeToTimestream(writeRecordsInput, true); err != nil { + return err + } + } + return nil +} + +func (t *Timestream) writeToTimestream(writeRecordsInput *timestreamwrite.WriteRecordsInput, resourceNotFoundRetry bool) error { + t.Log.Debugf("Writing to Timestream: '%v' with ResourceNotFoundRetry: '%t'", writeRecordsInput, resourceNotFoundRetry) + + _, err := t.svc.WriteRecords(writeRecordsInput) + if err != nil { + // Telegraf will retry ingesting the metrics if an error is returned from the plugin. + // Therefore, return error only for retryable exceptions: ThrottlingException and 5xx exceptions. + if e, ok := err.(awserr.Error); ok { + switch e.Code() { + case timestreamwrite.ErrCodeResourceNotFoundException: + if resourceNotFoundRetry { + t.Log.Warnf("Failed to write to Timestream database '%s' table '%s'. Error: '%s'", + t.DatabaseName, *writeRecordsInput.TableName, e) + return t.createTableAndRetry(writeRecordsInput) + } + t.logWriteToTimestreamError(err, writeRecordsInput.TableName) + case timestreamwrite.ErrCodeThrottlingException: + return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", + t.DatabaseName, *writeRecordsInput.TableName, err) + case timestreamwrite.ErrCodeInternalServerException: + return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", + t.DatabaseName, *writeRecordsInput.TableName, err) + default: + t.logWriteToTimestreamError(err, writeRecordsInput.TableName) + } + } else { + // Retry other, non-aws errors. + return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", + t.DatabaseName, *writeRecordsInput.TableName, err) + } + } + return nil +} + +func (t *Timestream) logWriteToTimestreamError(err error, tableName *string) { + t.Log.Errorf("Failed to write to Timestream database '%s' table '%s'. Skipping metric! Error: '%s'", + t.DatabaseName, *tableName, err) +} + +func (t *Timestream) createTableAndRetry(writeRecordsInput *timestreamwrite.WriteRecordsInput) error { + if t.CreateTableIfNotExists { + t.Log.Infof("Trying to create table '%s' in database '%s', as 'CreateTableIfNotExists' config key is 'true'.", *writeRecordsInput.TableName, t.DatabaseName) + if err := t.createTable(writeRecordsInput.TableName); err != nil { + t.Log.Errorf("Failed to create table '%s' in database '%s': %s. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName, err) + } else { + t.Log.Infof("Table '%s' in database '%s' created. Retrying writing.", *writeRecordsInput.TableName, t.DatabaseName) + return t.writeToTimestream(writeRecordsInput, false) + } + } else { + t.Log.Errorf("Not trying to create table '%s' in database '%s', as 'CreateTableIfNotExists' config key is 'false'. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName) + } + return nil +} + +// createTable creates a Timestream table according to the configuration. +func (t *Timestream) createTable(tableName *string) error { + createTableInput := ×treamwrite.CreateTableInput{ + DatabaseName: aws.String(t.DatabaseName), + TableName: aws.String(*tableName), + RetentionProperties: ×treamwrite.RetentionProperties{ + MagneticStoreRetentionPeriodInDays: aws.Int64(t.CreateTableMagneticStoreRetentionPeriodInDays), + MemoryStoreRetentionPeriodInHours: aws.Int64(t.CreateTableMemoryStoreRetentionPeriodInHours), + }, + } + var tags []*timestreamwrite.Tag + for key, val := range t.CreateTableTags { + tags = append(tags, ×treamwrite.Tag{ + Key: aws.String(key), + Value: aws.String(val), + }) + } + createTableInput.SetTags(tags) + + _, err := t.svc.CreateTable(createTableInput) + if err != nil { + if e, ok := err.(awserr.Error); ok { + // if the table was created in the meantime, it's ok. + if e.Code() == timestreamwrite.ErrCodeConflictException { + return nil + } + } + return err + } + return nil +} + +// TransformMetrics transforms a collection of Telegraf Metrics into write requests to Timestream. +// Telegraf Metrics are grouped by Name, Tag Keys and Time to use Timestream CommonAttributes. +// Returns collection of write requests to be performed to Timestream. +func (t *Timestream) TransformMetrics(metrics []telegraf.Metric) []*timestreamwrite.WriteRecordsInput { + writeRequests := make(map[uint64]*timestreamwrite.WriteRecordsInput, len(metrics)) + for _, m := range metrics { + // build MeasureName, MeasureValue, MeasureValueType + records := t.buildWriteRecords(m) + if len(records) == 0 { + continue + } + id := hashFromMetricTimeNameTagKeys(m) + if curr, ok := writeRequests[id]; !ok { + // No current CommonAttributes/WriteRecordsInput found for current Telegraf Metric + dimensions := t.buildDimensions(m) + timeUnit, timeValue := getTimestreamTime(m.Time()) + newWriteRecord := ×treamwrite.WriteRecordsInput{ + DatabaseName: aws.String(t.DatabaseName), + Records: records, + CommonAttributes: ×treamwrite.Record{ + Dimensions: dimensions, + Time: aws.String(timeValue), + TimeUnit: aws.String(timeUnit), + }, + } + if t.MappingMode == MappingModeSingleTable { + newWriteRecord.SetTableName(t.SingleTableName) + } + if t.MappingMode == MappingModeMultiTable { + newWriteRecord.SetTableName(m.Name()) + } + + writeRequests[id] = newWriteRecord + } else { + curr.Records = append(curr.Records, records...) + } + } + + // Create result as array of WriteRecordsInput. Split requests over records count limit to smaller requests. + var result []*timestreamwrite.WriteRecordsInput + for _, writeRequest := range writeRequests { + if len(writeRequest.Records) > MaxRecordsPerCall { + for _, recordsPartition := range partitionRecords(MaxRecordsPerCall, writeRequest.Records) { + newWriteRecord := ×treamwrite.WriteRecordsInput{ + DatabaseName: writeRequest.DatabaseName, + TableName: writeRequest.TableName, + Records: recordsPartition, + CommonAttributes: writeRequest.CommonAttributes, + } + result = append(result, newWriteRecord) + } + } else { + result = append(result, writeRequest) + } + } + return result +} + +func hashFromMetricTimeNameTagKeys(m telegraf.Metric) uint64 { + h := fnv.New64a() + h.Write([]byte(m.Name())) + h.Write([]byte("\n")) + for _, tag := range m.TagList() { + if tag.Key == "" { + continue + } + + h.Write([]byte(tag.Key)) + h.Write([]byte("\n")) + h.Write([]byte(tag.Value)) + h.Write([]byte("\n")) + } + b := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(b, uint64(m.Time().UnixNano())) + h.Write(b[:n]) + h.Write([]byte("\n")) + return h.Sum64() +} + +func (t *Timestream) buildDimensions(point telegraf.Metric) []*timestreamwrite.Dimension { + var dimensions []*timestreamwrite.Dimension + for tagName, tagValue := range point.Tags() { + dimension := ×treamwrite.Dimension{ + Name: aws.String(tagName), + Value: aws.String(tagValue), + } + dimensions = append(dimensions, dimension) + } + if t.MappingMode == MappingModeSingleTable { + dimension := ×treamwrite.Dimension{ + Name: aws.String(t.SingleTableDimensionNameForTelegrafMeasurementName), + Value: aws.String(point.Name()), + } + dimensions = append(dimensions, dimension) + } + return dimensions +} + +// buildWriteRecords builds the Timestream write records from Metric Fields only. +// Tags and time are not included - common attributes are built separately. +// Records with unsupported Metric Field type are skipped. +// It returns an array of Timestream write records. +func (t *Timestream) buildWriteRecords(point telegraf.Metric) []*timestreamwrite.Record { + var records []*timestreamwrite.Record + for fieldName, fieldValue := range point.Fields() { + stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue) + if !ok { + t.Log.Errorf("Skipping field '%s'. The type '%s' is not supported in Timestream as MeasureValue. "+ + "Supported values are: [int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool]", + fieldName, reflect.TypeOf(fieldValue)) + continue + } + record := ×treamwrite.Record{ + MeasureName: aws.String(fieldName), + MeasureValueType: aws.String(stringFieldValueType), + MeasureValue: aws.String(stringFieldValue), + } + records = append(records, record) + } + return records +} + +// partitionRecords splits the Timestream records into smaller slices of a max size +// so that are under the limit for the Timestream API call. +// It returns the array of array of records. +func partitionRecords(size int, records []*timestreamwrite.Record) [][]*timestreamwrite.Record { + numberOfPartitions := len(records) / size + if len(records)%size != 0 { + numberOfPartitions++ + } + + partitions := make([][]*timestreamwrite.Record, numberOfPartitions) + + for i := 0; i < numberOfPartitions; i++ { + start := size * i + end := size * (i + 1) + if end > len(records) { + end = len(records) + } + + partitions[i] = records[start:end] + } + + return partitions +} + +// getTimestreamTime produces Timestream TimeUnit and TimeValue with minimum possible granularity +// while maintaining the same information. +func getTimestreamTime(time time.Time) (timeUnit string, timeValue string) { + const ( + TimeUnitS = "SECONDS" + TimeUnitMS = "MILLISECONDS" + TimeUnitUS = "MICROSECONDS" + TimeUnitNS = "NANOSECONDS" + ) + nanosTime := time.UnixNano() + if nanosTime%1e9 == 0 { + timeUnit = TimeUnitS + timeValue = strconv.FormatInt(nanosTime/1e9, 10) + } else if nanosTime%1e6 == 0 { + timeUnit = TimeUnitMS + timeValue = strconv.FormatInt(nanosTime/1e6, 10) + } else if nanosTime%1e3 == 0 { + timeUnit = TimeUnitUS + timeValue = strconv.FormatInt(nanosTime/1e3, 10) + } else { + timeUnit = TimeUnitNS + timeValue = strconv.FormatInt(nanosTime, 10) + } + return +} + +// convertValue converts single Field value from Telegraf Metric and produces +// value, valueType Timestream representation. +func convertValue(v interface{}) (value string, valueType string, ok bool) { + const ( + TypeBigInt = "BIGINT" + TypeDouble = "DOUBLE" + TypeBoolean = "BOOLEAN" + TypeVarchar = "VARCHAR" + ) + ok = true + + switch t := v.(type) { + case int: + valueType = TypeBigInt + value = strconv.FormatInt(int64(t), 10) + case int8: + valueType = TypeBigInt + value = strconv.FormatInt(int64(t), 10) + case int16: + valueType = TypeBigInt + value = strconv.FormatInt(int64(t), 10) + case int32: + valueType = TypeBigInt + value = strconv.FormatInt(int64(t), 10) + case int64: + valueType = TypeBigInt + value = strconv.FormatInt(t, 10) + case uint: + valueType = TypeBigInt + value = strconv.FormatUint(uint64(t), 10) + case uint8: + valueType = TypeBigInt + value = strconv.FormatUint(uint64(t), 10) + case uint16: + valueType = TypeBigInt + value = strconv.FormatUint(uint64(t), 10) + case uint32: + valueType = TypeBigInt + value = strconv.FormatUint(uint64(t), 10) + case uint64: + valueType = TypeBigInt + value = strconv.FormatUint(t, 10) + case float32: + valueType = TypeDouble + value = strconv.FormatFloat(float64(t), 'f', -1, 32) + case float64: + valueType = TypeDouble + value = strconv.FormatFloat(t, 'f', -1, 64) + case bool: + valueType = TypeBoolean + if t { + value = "true" + } else { + value = "false" + } + case string: + valueType = TypeVarchar + value = t + default: + // Skip unsupported type. + ok = false + return + } + return +} diff --git a/plugins/outputs/timestream/timestream_internal_test.go b/plugins/outputs/timestream/timestream_internal_test.go new file mode 100644 index 0000000000000..27b19487ac898 --- /dev/null +++ b/plugins/outputs/timestream/timestream_internal_test.go @@ -0,0 +1,92 @@ +package timestream + +import ( + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/timestreamwrite" + + "github.com/stretchr/testify/assert" +) + +func TestGetTimestreamTime(t *testing.T) { + assertions := assert.New(t) + + tWithNanos := time.Date(2020, time.November, 10, 23, 44, 20, 123, time.UTC) + tWithMicros := time.Date(2020, time.November, 10, 23, 44, 20, 123000, time.UTC) + tWithMillis := time.Date(2020, time.November, 10, 23, 44, 20, 123000000, time.UTC) + tOnlySeconds := time.Date(2020, time.November, 10, 23, 44, 20, 0, time.UTC) + + tUnitNanos, tValueNanos := getTimestreamTime(tWithNanos) + assertions.Equal("NANOSECONDS", tUnitNanos) + assertions.Equal("1605051860000000123", tValueNanos) + + tUnitMicros, tValueMicros := getTimestreamTime(tWithMicros) + assertions.Equal("MICROSECONDS", tUnitMicros) + assertions.Equal("1605051860000123", tValueMicros) + + tUnitMillis, tValueMillis := getTimestreamTime(tWithMillis) + assertions.Equal("MILLISECONDS", tUnitMillis) + assertions.Equal("1605051860123", tValueMillis) + + tUnitSeconds, tValueSeconds := getTimestreamTime(tOnlySeconds) + assertions.Equal("SECONDS", tUnitSeconds) + assertions.Equal("1605051860", tValueSeconds) +} + +func TestPartitionRecords(t *testing.T) { + + assertions := assert.New(t) + + testDatum := timestreamwrite.Record{ + MeasureName: aws.String("Foo"), + MeasureValueType: aws.String("DOUBLE"), + MeasureValue: aws.String("123"), + } + + var zeroDatum []*timestreamwrite.Record + oneDatum := []*timestreamwrite.Record{&testDatum} + twoDatum := []*timestreamwrite.Record{&testDatum, &testDatum} + threeDatum := []*timestreamwrite.Record{&testDatum, &testDatum, &testDatum} + + assertions.Equal([][]*timestreamwrite.Record{}, partitionRecords(2, zeroDatum)) + assertions.Equal([][]*timestreamwrite.Record{oneDatum}, partitionRecords(2, oneDatum)) + assertions.Equal([][]*timestreamwrite.Record{oneDatum}, partitionRecords(2, oneDatum)) + assertions.Equal([][]*timestreamwrite.Record{twoDatum}, partitionRecords(2, twoDatum)) + assertions.Equal([][]*timestreamwrite.Record{twoDatum, oneDatum}, partitionRecords(2, threeDatum)) +} + +func TestConvertValueSupported(t *testing.T) { + intInputValues := []interface{}{-1, int8(-2), int16(-3), int32(-4), int64(-5)} + intOutputValues := []string{"-1", "-2", "-3", "-4", "-5"} + intOutputValueTypes := []string{"BIGINT", "BIGINT", "BIGINT", "BIGINT", "BIGINT"} + testConvertValueSupportedCases(t, intInputValues, intOutputValues, intOutputValueTypes) + + uintInputValues := []interface{}{uint(1), uint8(2), uint16(3), uint32(4), uint64(5)} + uintOutputValues := []string{"1", "2", "3", "4", "5"} + uintOutputValueTypes := []string{"BIGINT", "BIGINT", "BIGINT", "BIGINT", "BIGINT"} + testConvertValueSupportedCases(t, uintInputValues, uintOutputValues, uintOutputValueTypes) + + otherInputValues := []interface{}{"foo", float32(22.123), 22.1234, true} + otherOutputValues := []string{"foo", "22.123", "22.1234", "true"} + otherOutputValueTypes := []string{"VARCHAR", "DOUBLE", "DOUBLE", "BOOLEAN"} + testConvertValueSupportedCases(t, otherInputValues, otherOutputValues, otherOutputValueTypes) +} + +func TestConvertValueUnsupported(t *testing.T) { + assertions := assert.New(t) + _, _, ok := convertValue(time.Date(2020, time.November, 10, 23, 44, 20, 0, time.UTC)) + assertions.False(ok, "Expected unsuccessful conversion") +} + +func testConvertValueSupportedCases(t *testing.T, + inputValues []interface{}, outputValues []string, outputValueTypes []string) { + assertions := assert.New(t) + for i, inputValue := range inputValues { + v, vt, ok := convertValue(inputValue) + assertions.Equal(true, ok, "Expected successful conversion") + assertions.Equal(outputValues[i], v, "Expected different string representation of converted value") + assertions.Equal(outputValueTypes[i], vt, "Expected different value type of converted value") + } +} diff --git a/plugins/outputs/timestream/timestream_test.go b/plugins/outputs/timestream/timestream_test.go new file mode 100644 index 0000000000000..58984c50b8ad2 --- /dev/null +++ b/plugins/outputs/timestream/timestream_test.go @@ -0,0 +1,742 @@ +package timestream_test + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws/awserr" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/timestreamwrite" + "github.com/influxdata/telegraf" + internalaws "github.com/influxdata/telegraf/config/aws" + ts "github.com/influxdata/telegraf/plugins/outputs/timestream" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +const tsDbName = "testDb" + +const testSingleTableName = "SingleTableName" +const testSingleTableDim = "namespace" + +var time1 = time.Date(2009, time.November, 10, 22, 0, 0, 0, time.UTC) + +const time1Epoch = "1257890400" + +var time2 = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + +const time2Epoch = "1257894000" + +const timeUnit = "SECONDS" + +const metricName1 = "metricName1" +const metricName2 = "metricName2" + +type mockTimestreamClient struct { +} + +func (m *mockTimestreamClient) CreateTable(*timestreamwrite.CreateTableInput) (*timestreamwrite.CreateTableOutput, error) { + return nil, nil +} +func (m *mockTimestreamClient) WriteRecords(*timestreamwrite.WriteRecordsInput) (*timestreamwrite.WriteRecordsOutput, error) { + return nil, nil +} +func (m *mockTimestreamClient) DescribeDatabase(*timestreamwrite.DescribeDatabaseInput) (*timestreamwrite.DescribeDatabaseOutput, error) { + return nil, fmt.Errorf("hello from DescribeDatabase") +} + +func TestConnectValidatesConfigParameters(t *testing.T) { + assertions := assert.New(t) + ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient { + return &mockTimestreamClient{} + } + + // checking base arguments + noDatabaseName := ts.Timestream{Log: testutil.Logger{}} + assertions.Contains(noDatabaseName.Connect().Error(), "DatabaseName") + + noMappingMode := ts.Timestream{ + DatabaseName: tsDbName, + Log: testutil.Logger{}, + } + assertions.Contains(noMappingMode.Connect().Error(), "MappingMode") + + incorrectMappingMode := ts.Timestream{ + DatabaseName: tsDbName, + MappingMode: "foo", + Log: testutil.Logger{}, + } + assertions.Contains(incorrectMappingMode.Connect().Error(), "single-table") + + // multi-table arguments + validMappingModeMultiTable := ts.Timestream{ + DatabaseName: tsDbName, + MappingMode: ts.MappingModeMultiTable, + Log: testutil.Logger{}, + } + assertions.Nil(validMappingModeMultiTable.Connect()) + + singleTableNameWithMultiTable := ts.Timestream{ + DatabaseName: tsDbName, + MappingMode: ts.MappingModeMultiTable, + SingleTableName: testSingleTableName, + Log: testutil.Logger{}, + } + assertions.Contains(singleTableNameWithMultiTable.Connect().Error(), "SingleTableName") + + singleTableDimensionWithMultiTable := ts.Timestream{ + DatabaseName: tsDbName, + MappingMode: ts.MappingModeMultiTable, + SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, + Log: testutil.Logger{}, + } + assertions.Contains(singleTableDimensionWithMultiTable.Connect().Error(), + "SingleTableDimensionNameForTelegrafMeasurementName") + + // single-table arguments + noTableNameMappingModeSingleTable := ts.Timestream{ + DatabaseName: tsDbName, + MappingMode: ts.MappingModeSingleTable, + Log: testutil.Logger{}, + } + assertions.Contains(noTableNameMappingModeSingleTable.Connect().Error(), "SingleTableName") + + noDimensionNameMappingModeSingleTable := ts.Timestream{ + DatabaseName: tsDbName, + MappingMode: ts.MappingModeSingleTable, + SingleTableName: testSingleTableName, + Log: testutil.Logger{}, + } + assertions.Contains(noDimensionNameMappingModeSingleTable.Connect().Error(), + "SingleTableDimensionNameForTelegrafMeasurementName") + + validConfigurationMappingModeSingleTable := ts.Timestream{ + DatabaseName: tsDbName, + MappingMode: ts.MappingModeSingleTable, + SingleTableName: testSingleTableName, + SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, + Log: testutil.Logger{}, + } + assertions.Nil(validConfigurationMappingModeSingleTable.Connect()) + + // create table arguments + createTableNoMagneticRetention := ts.Timestream{ + DatabaseName: tsDbName, + MappingMode: ts.MappingModeMultiTable, + CreateTableIfNotExists: true, + Log: testutil.Logger{}, + } + assertions.Contains(createTableNoMagneticRetention.Connect().Error(), + "CreateTableMagneticStoreRetentionPeriodInDays") + + createTableNoMemoryRetention := ts.Timestream{ + DatabaseName: tsDbName, + MappingMode: ts.MappingModeMultiTable, + CreateTableIfNotExists: true, + CreateTableMagneticStoreRetentionPeriodInDays: 3, + Log: testutil.Logger{}, + } + assertions.Contains(createTableNoMemoryRetention.Connect().Error(), + "CreateTableMemoryStoreRetentionPeriodInHours") + + createTableValid := ts.Timestream{ + DatabaseName: tsDbName, + MappingMode: ts.MappingModeMultiTable, + CreateTableIfNotExists: true, + CreateTableMagneticStoreRetentionPeriodInDays: 3, + CreateTableMemoryStoreRetentionPeriodInHours: 3, + Log: testutil.Logger{}, + } + assertions.Nil(createTableValid.Connect()) + + // describe table on start arguments + describeTableInvoked := ts.Timestream{ + DatabaseName: tsDbName, + MappingMode: ts.MappingModeMultiTable, + DescribeDatabaseOnStart: true, + Log: testutil.Logger{}, + } + assertions.Contains(describeTableInvoked.Connect().Error(), "hello from DescribeDatabase") +} + +type mockTimestreamErrorClient struct { + ErrorToReturnOnWriteRecords error +} + +func (m *mockTimestreamErrorClient) CreateTable(*timestreamwrite.CreateTableInput) (*timestreamwrite.CreateTableOutput, error) { + return nil, nil +} +func (m *mockTimestreamErrorClient) WriteRecords(*timestreamwrite.WriteRecordsInput) (*timestreamwrite.WriteRecordsOutput, error) { + return nil, m.ErrorToReturnOnWriteRecords +} +func (m *mockTimestreamErrorClient) DescribeDatabase(*timestreamwrite.DescribeDatabaseInput) (*timestreamwrite.DescribeDatabaseOutput, error) { + return nil, nil +} + +func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { + assertions := assert.New(t) + + ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient { + return &mockTimestreamErrorClient{ + awserr.New(timestreamwrite.ErrCodeThrottlingException, + "Throttling Test", nil), + } + } + plugin := ts.Timestream{ + MappingMode: ts.MappingModeMultiTable, + DatabaseName: tsDbName, + Log: testutil.Logger{}, + } + plugin.Connect() + input := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{"value": float64(1)}, + time1, + ) + + err := plugin.Write([]telegraf.Metric{input}) + + assertions.NotNil(err, "Expected an error to be returned to Telegraf, "+ + "so that the write will be retried by Telegraf later.") +} + +func TestRejectedRecordsErrorResultsInMetricsBeingSkipped(t *testing.T) { + assertions := assert.New(t) + + ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient { + return &mockTimestreamErrorClient{ + awserr.New(timestreamwrite.ErrCodeRejectedRecordsException, + "RejectedRecords Test", nil), + } + } + plugin := ts.Timestream{ + MappingMode: ts.MappingModeMultiTable, + DatabaseName: tsDbName, + Log: testutil.Logger{}, + } + plugin.Connect() + input := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{"value": float64(1)}, + time1, + ) + + err := plugin.Write([]telegraf.Metric{input}) + + assertions.Nil(err, "Expected to silently swallow the RejectedRecordsException, "+ + "as retrying this error doesn't make sense.") +} + +func TestTransformMetricsSkipEmptyMetric(t *testing.T) { + input1 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{}, //no fields here + time1, + ) + input2 := testutil.MustMetric( + metricName1, + map[string]string{"tag2": "value2"}, + map[string]interface{}{ + "value": float64(10), + }, + time1, + ) + input3 := testutil.MustMetric( + metricName1, + map[string]string{}, //record with no dimensions should appear in the results + map[string]interface{}{ + "value": float64(20), + }, + time1, + ) + + expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag2": "value2", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value": "10"}, + }) + expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{testSingleTableDim: metricName1}, + measureValues: map[string]string{"value": "20"}, + }) + comparisonTest(t, ts.MappingModeSingleTable, + []telegraf.Metric{input1, input2, input3}, + []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + + expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag2": "value2"}, + measureValues: map[string]string{"value": "10"}, + }) + expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{}, + measureValues: map[string]string{"value": "20"}, + }) + comparisonTest(t, ts.MappingModeMultiTable, + []telegraf.Metric{input1, input2, input3}, + []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) +} + +func TestTransformMetricsRequestsAboveLimitAreSplit(t *testing.T) { + const maxRecordsInWriteRecordsCall = 100 + + var inputs []telegraf.Metric + for i := 1; i <= maxRecordsInWriteRecordsCall+1; i++ { + fieldName := "value_supported" + strconv.Itoa(i) + inputs = append(inputs, testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + fieldName: float64(10), + }, + time1, + )) + } + + resultFields := make(map[string]string) + for i := 1; i <= maxRecordsInWriteRecordsCall; i++ { + fieldName := "value_supported" + strconv.Itoa(i) + resultFields[fieldName] = "10" + } + + expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: resultFields, + }) + expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported" + strconv.Itoa(maxRecordsInWriteRecordsCall+1): "10"}, + }) + comparisonTest(t, ts.MappingModeSingleTable, + inputs, + []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + + expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: resultFields, + }) + expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported" + strconv.Itoa(maxRecordsInWriteRecordsCall+1): "10"}, + }) + comparisonTest(t, ts.MappingModeMultiTable, + inputs, + []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) +} + +func TestTransformMetricsDifferentDimensionsSameTimestampsAreWrittenSeparate(t *testing.T) { + input1 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported1": float64(10), "value_supported2": float64(20), + }, + time1, + ) + input2 := testutil.MustMetric( + metricName1, + map[string]string{"tag2": "value2"}, + map[string]interface{}{ + "value_supported3": float64(30), + }, + time1, + ) + + expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }) + expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag2": "value2", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported3": "30"}, + }) + + comparisonTest(t, ts.MappingModeSingleTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + + expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }) + expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag2": "value2"}, + measureValues: map[string]string{"value_supported3": "30"}, + }) + + comparisonTest(t, ts.MappingModeMultiTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) +} + +func TestTransformMetricsSameDimensionsDifferentDimensionValuesAreWrittenSeparate(t *testing.T) { + input1 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported1": float64(10), + }, + time1, + ) + input2 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value2"}, + map[string]interface{}{ + "value_supported1": float64(20), + }, + time1, + ) + + expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10"}, + }) + expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value2", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "20"}, + }) + + comparisonTest(t, ts.MappingModeSingleTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + + expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported1": "10"}, + }) + expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value2"}, + measureValues: map[string]string{"value_supported1": "20"}, + }) + + comparisonTest(t, ts.MappingModeMultiTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) +} + +func TestTransformMetricsSameDimensionsDifferentTimestampsAreWrittenSeparate(t *testing.T) { + input1 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported1": float64(10), "value_supported2": float64(20), + }, + time1, + ) + input2 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported3": float64(30), + }, + time2, + ) + + expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }) + expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ + t: time2Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported3": "30"}, + }) + + comparisonTest(t, ts.MappingModeSingleTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + + expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }) + expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + t: time2Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported3": "30"}, + }) + + comparisonTest(t, ts.MappingModeMultiTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) +} + +func TestTransformMetricsSameDimensionsSameTimestampsAreWrittenTogether(t *testing.T) { + input1 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported1": float64(10), "value_supported2": float64(20), + }, + time1, + ) + input2 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported3": float64(30), + }, + time1, + ) + + expectedResultSingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20", "value_supported3": "30"}, + }) + + comparisonTest(t, ts.MappingModeSingleTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) + + expectedResultMultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20", "value_supported3": "30"}, + }) + + comparisonTest(t, ts.MappingModeMultiTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResultMultiTable}) +} + +func TestTransformMetricsDifferentMetricsAreWrittenToDifferentTablesInMultiTableMapping(t *testing.T) { + input1 := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported1": float64(10), "value_supported2": float64(20), + }, + time1, + ) + input2 := testutil.MustMetric( + metricName2, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported3": float64(30), + }, + time1, + ) + + expectedResult1SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }) + expectedResult2SingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName2}, + measureValues: map[string]string{"value_supported3": "30"}, + }) + + comparisonTest(t, ts.MappingModeSingleTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) + + expectedResult1MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20"}, + }) + expectedResult2MultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName2, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported3": "30"}, + }) + + comparisonTest(t, ts.MappingModeMultiTable, + []telegraf.Metric{input1, input2}, + []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) +} + +func TestTransformMetricsUnsupportedFieldsAreSkipped(t *testing.T) { + metricWithUnsupportedField := testutil.MustMetric( + metricName1, + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "value_supported1": float64(10), "value_unsupported": time.Now(), + }, + time1, + ) + expectedResultSingleTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: testSingleTableName, + dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, + measureValues: map[string]string{"value_supported1": "10"}, + }) + + comparisonTest(t, ts.MappingModeSingleTable, + []telegraf.Metric{metricWithUnsupportedField}, + []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) + + expectedResultMultiTable := buildExpectedRecords(SimpleInput{ + t: time1Epoch, + tableName: metricName1, + dimensions: map[string]string{"tag1": "value1"}, + measureValues: map[string]string{"value_supported1": "10"}, + }) + + comparisonTest(t, ts.MappingModeMultiTable, + []telegraf.Metric{metricWithUnsupportedField}, + []*timestreamwrite.WriteRecordsInput{expectedResultMultiTable}) +} + +func comparisonTest(t *testing.T, + mappingMode string, + telegrafMetrics []telegraf.Metric, + timestreamRecords []*timestreamwrite.WriteRecordsInput) { + + var plugin ts.Timestream + switch mappingMode { + case ts.MappingModeSingleTable: + plugin = ts.Timestream{ + MappingMode: mappingMode, + DatabaseName: tsDbName, + + SingleTableName: testSingleTableName, + SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, + Log: testutil.Logger{}, + } + case ts.MappingModeMultiTable: + plugin = ts.Timestream{ + MappingMode: mappingMode, + DatabaseName: tsDbName, + Log: testutil.Logger{}, + } + } + assertions := assert.New(t) + + result := plugin.TransformMetrics(telegrafMetrics) + + assertions.Equal(len(timestreamRecords), len(result), "The number of transformed records was expected to be different") + for _, tsRecord := range timestreamRecords { + assertions.True(arrayContains(result, tsRecord), "Expected that the list of requests to Timestream: \n%s\n\n "+ + "will contain request: \n%s\n\nUsed MappingMode: %s", result, tsRecord, mappingMode) + } +} + +func arrayContains( + array []*timestreamwrite.WriteRecordsInput, + element *timestreamwrite.WriteRecordsInput) bool { + + sortWriteInputForComparison(*element) + + for _, a := range array { + sortWriteInputForComparison(*a) + + if reflect.DeepEqual(a, element) { + return true + } + } + return false +} + +func sortWriteInputForComparison(element timestreamwrite.WriteRecordsInput) { + // sort the records by MeasureName, as they are kept in an array, but the order of records doesn't matter + sort.Slice(element.Records, func(i, j int) bool { + return strings.Compare(*element.Records[i].MeasureName, *element.Records[j].MeasureName) < 0 + }) + // sort the dimensions in CommonAttributes + if element.CommonAttributes != nil { + sort.Slice(element.CommonAttributes.Dimensions, func(i, j int) bool { + return strings.Compare(*element.CommonAttributes.Dimensions[i].Name, + *element.CommonAttributes.Dimensions[j].Name) < 0 + }) + } + // sort the dimensions in Records + for _, r := range element.Records { + sort.Slice(r.Dimensions, func(i, j int) bool { + return strings.Compare(*r.Dimensions[i].Name, *r.Dimensions[j].Name) < 0 + }) + } +} + +type SimpleInput struct { + t string + tableName string + dimensions map[string]string + measureValues map[string]string +} + +func buildExpectedRecords(i SimpleInput) *timestreamwrite.WriteRecordsInput { + var tsDimensions []*timestreamwrite.Dimension + for k, v := range i.dimensions { + tsDimensions = append(tsDimensions, ×treamwrite.Dimension{ + Name: aws.String(k), + Value: aws.String(v), + }) + } + + var tsRecords []*timestreamwrite.Record + for k, v := range i.measureValues { + tsRecords = append(tsRecords, ×treamwrite.Record{ + MeasureName: aws.String(k), + MeasureValue: aws.String(v), + MeasureValueType: aws.String("DOUBLE"), + }) + } + + result := ×treamwrite.WriteRecordsInput{ + DatabaseName: aws.String(tsDbName), + TableName: aws.String(i.tableName), + Records: tsRecords, + CommonAttributes: ×treamwrite.Record{ + Dimensions: tsDimensions, + Time: aws.String(i.t), + TimeUnit: aws.String(timeUnit), + }, + } + + return result +} From c85fb585ad31cb49ee345905601f4cb33068bee6 Mon Sep 17 00:00:00 2001 From: Thomas Schuetz <38893055+thschue@users.noreply.github.com> Date: Fri, 16 Oct 2020 17:55:35 +0200 Subject: [PATCH 004/761] Dynatrace Output Plugin: Fixed behaviour when state map is cleared (#8251) --- plugins/outputs/dynatrace/README.md | 2 +- plugins/outputs/dynatrace/dynatrace.go | 24 +++++++++++++++++------- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/plugins/outputs/dynatrace/README.md b/plugins/outputs/dynatrace/README.md index 78a7ee4990067..ea4b42777752d 100644 --- a/plugins/outputs/dynatrace/README.md +++ b/plugins/outputs/dynatrace/README.md @@ -1,6 +1,6 @@ # Dynatrace Output Plugin -This plugin is sending telegraf metrics to [Dynatrace](www.dynatrace.com). It has two operational modes. +This plugin is sending telegraf metrics to [Dynatrace](https://www.dynatrace.com). It has two operational modes. Telegraf minimum version: Telegraf 1.16 Plugin minimum tested version: 1.16 diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 8c8fa984d82a8..596366ae8470f 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -195,17 +195,26 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { // write metric id,tags and value switch metric.Type() { case telegraf.Counter: + var delta float64 = 0 + + // Check if LastValue exists if lastvalue, ok := counts[metricID+tagb.String()]; ok { - // only send a counter if a lastvalue is found in the map - // if last value is found we can calc and send the delta value - if v, err := strconv.ParseFloat(lastvalue, 32); err == nil { - if v2, err := strconv.ParseFloat(value, 32); err == nil { - fmt.Fprintf(&buf, "%s%s count,delta=%f\n", metricID, tagb.String(), v2-v) - } + // Convert Strings to Floats + floatLastValue, err := strconv.ParseFloat(lastvalue, 32) + if err != nil { + d.Log.Debugf("Could not parse last value: %s", lastvalue) + } + floatCurrentValue, err := strconv.ParseFloat(value, 32) + if err != nil { + d.Log.Debugf("Could not parse current value: %s", value) + } + if floatCurrentValue > floatLastValue { + delta = floatCurrentValue - floatLastValue + fmt.Fprintf(&buf, "%s%s count,delta=%f\n", metricID, tagb.String(), delta) } } - // put the current value into the map as last value counts[metricID+tagb.String()] = value + default: fmt.Fprintf(&buf, "%s%s %v\n", metricID, tagb.String(), value) } @@ -214,6 +223,7 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { } sent++ // in typical interval of 10s, we will clean the counter state once in 24h which is 8640 iterations + if sent%8640 == 0 { counts = make(map[string]string) } From 78cf0b7ea69602b5825c8a33737c9d99fe971d32 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 16 Oct 2020 13:07:29 -0400 Subject: [PATCH 005/761] turn gzip on by default for InfluxDB v1 output (#8269) --- plugins/outputs/influxdb/README.md | 2 +- plugins/outputs/influxdb/influxdb.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index aefc03690a8da..cd1b36a723aeb 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -75,7 +75,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser ## HTTP Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" + # content_encoding = "gzip" ## When true, Telegraf will output unsigned integers as unsigned values, ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 68e8c93ac4aa5..49ca7d6435249 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -131,7 +131,7 @@ var sampleConfig = ` ## HTTP Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" + # content_encoding = "gzip" ## When true, Telegraf will output unsigned integers as unsigned values, ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned @@ -315,6 +315,7 @@ func init() { CreateUDPClientF: func(config *UDPConfig) (Client, error) { return NewUDPClient(*config) }, + ContentEncoding: "gzip", } }) } From 527a11a656390841de86cf80f2752648e35353a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Labesse=20K=C3=A9vin?= Date: Fri, 16 Oct 2020 19:40:17 +0200 Subject: [PATCH 006/761] http_response: match on status code (#8032) --- plugins/inputs/http_response/README.md | 24 ++- plugins/inputs/http_response/http_response.go | 41 ++++-- .../http_response/http_response_test.go | 139 ++++++++++++++++++ 3 files changed, 187 insertions(+), 17 deletions(-) diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 67d0dc067f691..4e01bc0bbdfaf 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -51,6 +51,12 @@ This input plugin checks HTTP/HTTPS connections. # response_string_match = "ok" # response_string_match = "\".*_status\".?:.?\"up\"" + ## Expected response status code. + ## The status code of the response is compared to this value. If they match, the field + ## "response_status_code_match" will be 1, otherwise it will be 0. If the + ## expected status code is 0, the check is disabled and the field won't be added. + # response_status_code = 0 + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -83,6 +89,7 @@ This input plugin checks HTTP/HTTPS connections. - response_time (float, seconds) - content_length (int, response body length) - response_string_match (int, 0 = mismatch / body read error, 1 = match) + - response_status_code_match (int, 0 = mismatch, 1 = match) - http_response_code (int, response status code) - result_type (string, deprecated in 1.6: use `result` tag and `result_code` field) - result_code (int, [see below](#result--result_code)) @@ -93,14 +100,15 @@ Upon finishing polling the target server, the plugin registers the result of the This tag is used to expose network and plugin errors. HTTP errors are considered a successful connection. -|Tag value |Corresponding field value|Description| ---------------------------|-------------------------|-----------| -|success | 0 |The HTTP request completed, even if the HTTP code represents an error| -|response_string_mismatch | 1 |The option `response_string_match` was used, and the body of the response didn't match the regex. HTTP errors with content in their body (like 4xx, 5xx) will trigger this error| -|body_read_error | 2 |The option `response_string_match` was used, but the plugin wasn't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error. Or the option `response_body_field` was used and the content of the response body was not a valid utf-8. Or the size of the body of the response exceeded the `response_body_max_size` | -|connection_failed | 3 |Catch all for any network error not specifically handled by the plugin| -|timeout | 4 |The plugin timed out while awaiting the HTTP connection to complete| -|dns_error | 5 |There was a DNS error while attempting to connect to the host| +|Tag value |Corresponding field value|Description| +-------------------------------|-------------------------|-----------| +|success | 0 |The HTTP request completed, even if the HTTP code represents an error| +|response_string_mismatch | 1 |The option `response_string_match` was used, and the body of the response didn't match the regex. HTTP errors with content in their body (like 4xx, 5xx) will trigger this error| +|body_read_error | 2 |The option `response_string_match` was used, but the plugin wasn't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error. Or the option `response_body_field` was used and the content of the response body was not a valid utf-8. Or the size of the body of the response exceeded the `response_body_max_size` | +|connection_failed | 3 |Catch all for any network error not specifically handled by the plugin| +|timeout | 4 |The plugin timed out while awaiting the HTTP connection to complete| +|dns_error | 5 |There was a DNS error while attempting to connect to the host| +|response_status_code_mismatch | 6 |The option `response_status_code_match` was used, and the status code of the response didn't match the value.| ### Example Output: diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 33888503b068f..bd3078e490c33 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -42,6 +42,7 @@ type HTTPResponse struct { ResponseBodyField string `toml:"response_body_field"` ResponseBodyMaxSize internal.Size `toml:"response_body_max_size"` ResponseStringMatch string + ResponseStatusCode int Interface string // HTTP Basic Auth Credentials Username string `toml:"username"` @@ -106,6 +107,12 @@ var sampleConfig = ` # response_string_match = "ok" # response_string_match = "\".*_status\".?:.?\"up\"" + ## Expected response status code. + ## The status code of the response is compared to this value. If they match, the field + ## "response_status_code_match" will be 1, otherwise it will be 0. If the + ## expected status code is 0, the check is disabled and the field won't be added. + # response_status_code = 0 + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -208,12 +215,13 @@ func localAddress(interfaceName string) (net.Addr, error) { func setResult(result_string string, fields map[string]interface{}, tags map[string]string) { result_codes := map[string]int{ - "success": 0, - "response_string_mismatch": 1, - "body_read_error": 2, - "connection_failed": 3, - "timeout": 4, - "dns_error": 5, + "success": 0, + "response_string_mismatch": 1, + "body_read_error": 2, + "connection_failed": 3, + "timeout": 4, + "dns_error": 5, + "response_status_code_mismatch": 6, } tags["result"] = result_string @@ -352,16 +360,31 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] } fields["content_length"] = len(bodyBytes) - // Check the response for a regex match. + var success = true + + // Check the response for a regex if h.ResponseStringMatch != "" { if h.compiledStringMatch.Match(bodyBytes) { - setResult("success", fields, tags) fields["response_string_match"] = 1 } else { + success = false setResult("response_string_mismatch", fields, tags) fields["response_string_match"] = 0 } - } else { + } + + // Check the response status code + if h.ResponseStatusCode > 0 { + if resp.StatusCode == h.ResponseStatusCode { + fields["response_status_code_match"] = 1 + } else { + success = false + setResult("response_status_code_mismatch", fields, tags) + fields["response_status_code_match"] = 0 + } + } + + if success { setResult("success", fields, tags) } diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 5a256e6e58d2a..adf4e7999aa94 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -126,6 +126,9 @@ func setUpTestMux() http.Handler { time.Sleep(time.Second * 2) return }) + mux.HandleFunc("/nocontent", func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNoContent) + }) return mux } @@ -1110,3 +1113,139 @@ func TestBasicAuth(t *testing.T) { absentFields := []string{"response_string_match"} checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) } + +func TestStatusCodeMatchFail(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + Address: ts.URL + "/nocontent", + ResponseStatusCode: http.StatusOK, + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + } + + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusNoContent, + "response_status_code_match": 0, + "result_type": "response_status_code_mismatch", + "result_code": 6, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": http.MethodGet, + "status_code": "204", + "result": "response_status_code_mismatch", + } + checkOutput(t, &acc, expectedFields, expectedTags, nil, nil) +} + +func TestStatusCodeMatch(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + Address: ts.URL + "/nocontent", + ResponseStatusCode: http.StatusNoContent, + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + } + + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusNoContent, + "response_status_code_match": 1, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": http.MethodGet, + "status_code": "204", + "result": "success", + } + checkOutput(t, &acc, expectedFields, expectedTags, nil, nil) +} + +func TestStatusCodeAndStringMatch(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + Address: ts.URL + "/good", + ResponseStatusCode: http.StatusOK, + ResponseStringMatch: "hit the good page", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + } + + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusOK, + "response_status_code_match": 1, + "response_string_match": 1, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": http.MethodGet, + "status_code": "200", + "result": "success", + } + checkOutput(t, &acc, expectedFields, expectedTags, nil, nil) +} + +func TestStatusCodeAndStringMatchFail(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + Address: ts.URL + "/nocontent", + ResponseStatusCode: http.StatusOK, + ResponseStringMatch: "hit the good page", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + } + + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusNoContent, + "response_status_code_match": 0, + "response_string_match": 0, + "result_type": "response_status_code_mismatch", + "result_code": 6, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": http.MethodGet, + "status_code": "204", + "result": "response_status_code_mismatch", + } + checkOutput(t, &acc, expectedFields, expectedTags, nil, nil) +} From 75e90c63df4d342776c72ff83ebaf67c8ebf0d22 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 16 Oct 2020 14:01:15 -0400 Subject: [PATCH 007/761] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a5b3dfc4e85cf..037e4006d0e50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,8 @@ - [#8189](https://github.com/influxdata/telegraf/pull/8189) `inputs.snmp_trap` If the community string is available, add it as a tag - [#8190](https://github.com/influxdata/telegraf/pull/8190) `inputs.tail` Semigroupoid multiline (#8167) - [#8196](https://github.com/influxdata/telegraf/pull/8196) `inputs.redis` add functionality to get values from redis commands + - [#8032](https://github.com/influxdata/telegraf/pull/8032) `inputs.http_response` http_response: match on status code + - [#8172](https://github.com/influxdata/telegraf/pull/8172) `inputs.sqlserver` New sql server queries (on-prem) - refactoring and formatting #### Bugfixes @@ -64,6 +66,8 @@ - [#8210](https://github.com/influxdata/telegraf/pull/8210) update gopsutil: fix procstat performance regression - [#8162](https://github.com/influxdata/telegraf/pull/8162) Fix bool serialization when using carbon2 - [#8240](https://github.com/influxdata/telegraf/pull/8240) Fix bugs found by LGTM analysis platform + - [#8251](https://github.com/influxdata/telegraf/pull/8251) `outputs.dynatrace` Dynatrace Output Plugin: Fixed behaviour when state map is cleared + - [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd #### New Input Plugins @@ -79,6 +83,7 @@ - [dynatrace](/plugins/outputs/dynatrace/README.md) Dynatrace output plugin - Contributed by @thschue - [sumologic](/plugins/outputs/sumologic/README.md) Sumo Logic output plugin - Contributed by @pmalek-sumo + - [timestream](/plugins/outputs/timestream) Timestream Output Plugin - Contributed by @piotrwest #### New External Plugins @@ -87,6 +92,7 @@ - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [youtube-telegraf-plugin](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather view and subscriber stats from your youtube videos - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. + - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. ## v1.15.3 [2020-09-11] From a422c8f93be3174c00f1d1be2e6dd52a6a202fe5 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 16 Oct 2020 14:18:20 -0400 Subject: [PATCH 008/761] update changelog --- CHANGELOG.md | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 037e4006d0e50..fd133ba0d95dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,17 +2,12 @@ #### Release Notes - - Many documentation updates - New [code examples](/plugins/processors/starlark/testdata) for the [Starlark processor](/plugins/processors/starlark/README.md) - - [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to go 1.15 - - [#7864](https://github.com/influxdata/telegraf/pull/7864) `processors.starlark` Add logic starlark example - - [#7865](https://github.com/influxdata/telegraf/pull/7865) `common.shim` shim logger improvements - [#7920](https://github.com/influxdata/telegraf/pull/7920) `inputs.rabbitmq` remove deprecated healthcheck - - [#7932](https://github.com/influxdata/telegraf/pull/7932) Support for AWS Cloudwatch Alarms #7931 - [#7953](https://github.com/influxdata/telegraf/pull/7953) Add details to connect to InfluxDB OSS 2 and Cloud 2 - - [#7980](https://github.com/influxdata/telegraf/pull/7980) `processors.starlark` add example input/outputs to starlark examples - [#8054](https://github.com/influxdata/telegraf/pull/8054) add guidelines run to external plugins with execd - [#8198](https://github.com/influxdata/telegraf/pull/8198) `inputs.influxdb_v2_listener` change default influxdb port from 9999 to 8086 to match OSS 2.0 release + - [starlark](https://github.com/influxdata/telegraf/tree/release-1.16/plugins/processors/starlark/testdata) `processors.starlark` add various code exampels for the Starlark processor #### Features @@ -37,6 +32,7 @@ - [#8189](https://github.com/influxdata/telegraf/pull/8189) `inputs.snmp_trap` If the community string is available, add it as a tag - [#8190](https://github.com/influxdata/telegraf/pull/8190) `inputs.tail` Semigroupoid multiline (#8167) - [#8196](https://github.com/influxdata/telegraf/pull/8196) `inputs.redis` add functionality to get values from redis commands + - [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to Go 1.15 - [#8032](https://github.com/influxdata/telegraf/pull/8032) `inputs.http_response` http_response: match on status code - [#8172](https://github.com/influxdata/telegraf/pull/8172) `inputs.sqlserver` New sql server queries (on-prem) - refactoring and formatting @@ -46,6 +42,7 @@ - [#7818](https://github.com/influxdata/telegraf/pull/7818) `build` Fix darwin package build flags - [#7819](https://github.com/influxdata/telegraf/pull/7819) `inputs.tail` Close file to ensure it has been flushed - [#7853](https://github.com/influxdata/telegraf/pull/7853) Initialize aggregation processors + - [#7865](https://github.com/influxdata/telegraf/pull/7865) `common.shim` shim logger improvements - [#7867](https://github.com/influxdata/telegraf/pull/7867) `inputs.execd` fix issue with execd restart_delay being ignored - [#7872](https://github.com/influxdata/telegraf/pull/7872) `inputs.gnmi` Recv next message after send returns EOF - [#7877](https://github.com/influxdata/telegraf/pull/7877) Fix arch name in deb/rpm builds From 933925e8e3f1594d32f16d79799c66ef265d165c Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 16 Oct 2020 14:44:15 -0400 Subject: [PATCH 009/761] temporarily remove RAS plugin --- CHANGELOG.md | 1 - plugins/inputs/all/all.go | 1 - plugins/inputs/ras/README.md | 58 ------ plugins/inputs/ras/ras.go | 294 ------------------------------ plugins/inputs/ras/ras_test.go | 254 -------------------------- plugins/inputs/ras/ras_windows.go | 3 - 6 files changed, 611 deletions(-) delete mode 100644 plugins/inputs/ras/README.md delete mode 100644 plugins/inputs/ras/ras.go delete mode 100644 plugins/inputs/ras/ras_test.go delete mode 100644 plugins/inputs/ras/ras_windows.go diff --git a/CHANGELOG.md b/CHANGELOG.md index fd133ba0d95dd..74ab8ef402ead 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,7 +73,6 @@ - [nsd](/plugins/inputs/nsd/README.md) add nsd input plugin - Contributed by @gearnode - [opcua](/plugins/inputs/opcua/README.md) Add OPC UA input plugin - Contributed by InfluxData - [proxmox](/plugins/inputs/proxmox/README.md) Proxmox plugin - Contributed by @effitient - - [ras](/plugins/inputs/ras/README.md) New input plugin for RAS (Reliability, Availability and Serviceability) - Contributed by @p-zak - [win_eventlog](/plugins/inputs/win_eventlog/README.md) Windows eventlog input plugin - Contributed by @simnv #### New Output Plugins diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 1d1b8eb58b463..d25d329d4899a 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -141,7 +141,6 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/puppetagent" _ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq" _ "github.com/influxdata/telegraf/plugins/inputs/raindrops" - _ "github.com/influxdata/telegraf/plugins/inputs/ras" _ "github.com/influxdata/telegraf/plugins/inputs/redfish" _ "github.com/influxdata/telegraf/plugins/inputs/redis" _ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" diff --git a/plugins/inputs/ras/README.md b/plugins/inputs/ras/README.md deleted file mode 100644 index 044118d1517ff..0000000000000 --- a/plugins/inputs/ras/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# RAS Daemon Input Plugin - -The `RAS` plugin gathers and counts errors provided by [RASDaemon](https://github.com/mchehab/rasdaemon). - -### Configuration - -```toml -[[inputs.ras]] - ## Optional path to RASDaemon sqlite3 database. - ## Default: /var/lib/rasdaemon/ras-mc_event.db - # db_path = "" -``` - -In addition `RASDaemon` runs, by default, with `--enable-sqlite3` flag. In case of problems with SQLite3 database please verify this is still a default option. - -### Metrics - -- ras - - tags: - - socket_id - - fields: - - memory_read_corrected_errors - - memory_read_uncorrectable_errors - - memory_write_corrected_errors - - memory_write_uncorrectable_errors - - cache_l0_l1_errors - - tlb_instruction_errors - - cache_l2_errors - - upi_errors - - processor_base_errors - - processor_bus_errors - - internal_timer_errors - - smm_handler_code_access_violation_errors - - internal_parity_errors - - frc_errors - - external_mce_errors - - microcode_rom_parity_errors - - unclassified_mce_errors - -Please note that `processor_base_errors` is aggregate counter measuring the following MCE events: -- internal_timer_errors -- smm_handler_code_access_violation_errors -- internal_parity_errors -- frc_errors -- external_mce_errors -- microcode_rom_parity_errors -- unclassified_mce_errors - -### Permissions - -This plugin requires access to SQLite3 database from `RASDaemon`. Please make sure that user has required permissions to this database. - -### Example Output - -``` -ras,host=ubuntu,socket_id=0 external_mce_base_errors=1i,frc_errors=1i,instruction_tlb_errors=5i,internal_parity_errors=1i,internal_timer_errors=1i,l0_and_l1_cache_errors=7i,memory_read_corrected_errors=25i,memory_read_uncorrectable_errors=0i,memory_write_corrected_errors=5i,memory_write_uncorrectable_errors=0i,microcode_rom_parity_errors=1i,processor_base_errors=7i,processor_bus_errors=1i,smm_handler_code_access_violation_errors=1i,unclassified_mce_base_errors=1i 1598867393000000000 -ras,host=ubuntu level_2_cache_errors=0i,upi_errors=0i 1598867393000000000 -``` diff --git a/plugins/inputs/ras/ras.go b/plugins/inputs/ras/ras.go deleted file mode 100644 index 036402eb87438..0000000000000 --- a/plugins/inputs/ras/ras.go +++ /dev/null @@ -1,294 +0,0 @@ -// +build !windows - -package ras - -import ( - "database/sql" - "strconv" - "strings" - "time" - - _ "github.com/mattn/go-sqlite3" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" -) - -type Ras struct { - DbPath string - latestTimestamp time.Time - cpuSocketCounters map[int]metricCounters - serverCounters metricCounters -} - -type machineCheckError struct { - Id int - Timestamp string - SocketId int - ErrorMsg string - MciStatusMsg string -} - -type metricCounters map[string]int64 - -const ( - mceQuery = ` - SELECT - id, timestamp, error_msg, mcistatus_msg, socketid - FROM mce_record - WHERE timestamp > ? - ` - defaultDbPath = "/var/lib/rasdaemon/ras-mc_event.db" - dateLayout = "2006-01-02 15:04:05 -0700" - memoryReadCorrected = "memory_read_corrected_errors" - memoryReadUncorrected = "memory_read_uncorrectable_errors" - memoryWriteCorrected = "memory_write_corrected_errors" - memoryWriteUncorrected = "memory_write_uncorrectable_errors" - instructionCache = "cache_l0_l1_errors" - instructionTLB = "tlb_instruction_errors" - levelTwoCache = "cache_l2_errors" - upi = "upi_errors" - processorBase = "processor_base_errors" - processorBus = "processor_bus_errors" - internalTimer = "internal_timer_errors" - smmHandlerCode = "smm_handler_code_access_violation_errors" - internalParity = "internal_parity_errors" - frc = "frc_errors" - externalMCEBase = "external_mce_errors" - microcodeROMParity = "microcode_rom_parity_errors" - unclassifiedMCEBase = "unclassified_mce_errors" -) - -func (r *Ras) SampleConfig() string { - return ` - ## Optional path to RASDaemon sqlite3 database. - ## Default: /var/lib/rasdaemon/ras-mc_event.db - # db_path = "" -` -} - -func (r *Ras) Description() string { - return "RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required)." -} - -func (r *Ras) Gather(acc telegraf.Accumulator) error { - db, err := connectToDB(r.DbPath) - if err != nil { - return err - } - defer db.Close() - - rows, err := db.Query(mceQuery, r.latestTimestamp) - if err != nil { - return err - } - defer rows.Close() - - for rows.Next() { - mcError, err := fetchMachineCheckError(rows) - if err != nil { - return err - } - tsErr := r.updateLatestTimestamp(mcError.Timestamp) - if tsErr != nil { - return err - } - r.updateCounters(mcError) - } - - addCpuSocketMetrics(acc, r.cpuSocketCounters) - addServerMetrics(acc, r.serverCounters) - - return nil -} - -func (r *Ras) updateLatestTimestamp(timestamp string) error { - ts, err := parseDate(timestamp) - if err != nil { - return err - } - if ts.After(r.latestTimestamp) { - r.latestTimestamp = ts - } - - return nil -} - -func (r *Ras) updateCounters(mcError *machineCheckError) { - if strings.Contains(mcError.ErrorMsg, "No Error") { - return - } - - r.initializeCpuMetricDataIfRequired(mcError.SocketId) - r.updateSocketCounters(mcError) - r.updateServerCounters(mcError) -} - -func newMetricCounters() *metricCounters { - return &metricCounters{ - memoryReadCorrected: 0, - memoryReadUncorrected: 0, - memoryWriteCorrected: 0, - memoryWriteUncorrected: 0, - instructionCache: 0, - instructionTLB: 0, - processorBase: 0, - processorBus: 0, - internalTimer: 0, - smmHandlerCode: 0, - internalParity: 0, - frc: 0, - externalMCEBase: 0, - microcodeROMParity: 0, - unclassifiedMCEBase: 0, - } -} - -func (r *Ras) updateServerCounters(mcError *machineCheckError) { - if strings.Contains(mcError.ErrorMsg, "CACHE Level-2") && strings.Contains(mcError.ErrorMsg, "Error") { - r.serverCounters[levelTwoCache] += 1 - } - - if strings.Contains(mcError.ErrorMsg, "UPI:") { - r.serverCounters[upi] += 1 - } -} - -func connectToDB(server string) (*sql.DB, error) { - return sql.Open("sqlite3", server) -} - -func (r *Ras) initializeCpuMetricDataIfRequired(socketId int) { - if _, ok := r.cpuSocketCounters[socketId]; !ok { - r.cpuSocketCounters[socketId] = *newMetricCounters() - } -} - -func (r *Ras) updateSocketCounters(mcError *machineCheckError) { - r.updateMemoryCounters(mcError) - r.updateProcessorBaseCounters(mcError) - - if strings.Contains(mcError.ErrorMsg, "Instruction TLB") && strings.Contains(mcError.ErrorMsg, "Error") { - r.cpuSocketCounters[mcError.SocketId][instructionTLB] += 1 - } - - if strings.Contains(mcError.ErrorMsg, "BUS") && strings.Contains(mcError.ErrorMsg, "Error") { - r.cpuSocketCounters[mcError.SocketId][processorBus] += 1 - } - - if (strings.Contains(mcError.ErrorMsg, "CACHE Level-0") || - strings.Contains(mcError.ErrorMsg, "CACHE Level-1")) && - strings.Contains(mcError.ErrorMsg, "Error") { - r.cpuSocketCounters[mcError.SocketId][instructionCache] += 1 - } -} - -func (r *Ras) updateProcessorBaseCounters(mcError *machineCheckError) { - if strings.Contains(mcError.ErrorMsg, "Internal Timer error") { - r.cpuSocketCounters[mcError.SocketId][internalTimer] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 - } - - if strings.Contains(mcError.ErrorMsg, "SMM Handler Code Access Violation") { - r.cpuSocketCounters[mcError.SocketId][smmHandlerCode] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 - } - - if strings.Contains(mcError.ErrorMsg, "Internal parity error") { - r.cpuSocketCounters[mcError.SocketId][internalParity] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 - } - - if strings.Contains(mcError.ErrorMsg, "FRC error") { - r.cpuSocketCounters[mcError.SocketId][frc] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 - } - - if strings.Contains(mcError.ErrorMsg, "External error") { - r.cpuSocketCounters[mcError.SocketId][externalMCEBase] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 - } - - if strings.Contains(mcError.ErrorMsg, "Microcode ROM parity error") { - r.cpuSocketCounters[mcError.SocketId][microcodeROMParity] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 - } - - if strings.Contains(mcError.ErrorMsg, "Unclassified") || strings.Contains(mcError.ErrorMsg, "Internal unclassified") { - r.cpuSocketCounters[mcError.SocketId][unclassifiedMCEBase] += 1 - r.cpuSocketCounters[mcError.SocketId][processorBase] += 1 - } -} - -func (r *Ras) updateMemoryCounters(mcError *machineCheckError) { - if strings.Contains(mcError.ErrorMsg, "Memory read error") { - if strings.Contains(mcError.MciStatusMsg, "Corrected_error") { - r.cpuSocketCounters[mcError.SocketId][memoryReadCorrected] += 1 - } else { - r.cpuSocketCounters[mcError.SocketId][memoryReadUncorrected] += 1 - } - } - if strings.Contains(mcError.ErrorMsg, "Memory write error") { - if strings.Contains(mcError.MciStatusMsg, "Corrected_error") { - r.cpuSocketCounters[mcError.SocketId][memoryWriteCorrected] += 1 - } else { - r.cpuSocketCounters[mcError.SocketId][memoryWriteUncorrected] += 1 - } - } -} - -func addCpuSocketMetrics(acc telegraf.Accumulator, cpuSocketCounters map[int]metricCounters) { - for socketId, data := range cpuSocketCounters { - tags := map[string]string{ - "socket_id": strconv.Itoa(socketId), - } - fields := make(map[string]interface{}) - - for errorName, count := range data { - fields[errorName] = count - } - - acc.AddCounter("ras", fields, tags) - } -} - -func addServerMetrics(acc telegraf.Accumulator, counters map[string]int64) { - fields := make(map[string]interface{}) - for errorName, count := range counters { - fields[errorName] = count - } - - acc.AddCounter("ras", fields, map[string]string{}) -} - -func fetchMachineCheckError(rows *sql.Rows) (*machineCheckError, error) { - mcError := &machineCheckError{} - err := rows.Scan(&mcError.Id, &mcError.Timestamp, &mcError.ErrorMsg, &mcError.MciStatusMsg, &mcError.SocketId) - - if err != nil { - return nil, err - } - - return mcError, nil -} - -func parseDate(date string) (time.Time, error) { - return time.Parse(dateLayout, date) -} - -func init() { - inputs.Add("ras", func() telegraf.Input { - defaultTimestamp, _ := parseDate("1970-01-01 00:00:01 -0700") - return &Ras{ - DbPath: defaultDbPath, - latestTimestamp: defaultTimestamp, - cpuSocketCounters: map[int]metricCounters{ - 0: *newMetricCounters(), - }, - serverCounters: map[string]int64{ - levelTwoCache: 0, - upi: 0, - }, - } - }) -} diff --git a/plugins/inputs/ras/ras_test.go b/plugins/inputs/ras/ras_test.go deleted file mode 100644 index 7b34074218b5c..0000000000000 --- a/plugins/inputs/ras/ras_test.go +++ /dev/null @@ -1,254 +0,0 @@ -// +build !windows - -package ras - -import ( - "fmt" - "testing" - - "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" -) - -func TestUpdateCounters(t *testing.T) { - ras := newRas() - for _, mce := range testData { - ras.updateCounters(&mce) - } - - assert.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain counters only for single socket") - - for metric, value := range ras.cpuSocketCounters[0] { - if metric == processorBase { - // processor_base_errors is sum of other seven errors: internal_timer_errors, smm_handler_code_access_violation_errors, - // internal_parity_errors, frc_errors, external_mce_errors, microcode_rom_parity_errors and unclassified_mce_errors - assert.Equal(t, int64(7), value, fmt.Sprintf("%s should have value of 7", processorBase)) - } else { - assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) - } - } - - for metric, value := range ras.serverCounters { - assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) - } -} - -func TestUpdateLatestTimestamp(t *testing.T) { - ras := newRas() - ts := "2020-08-01 15:13:27 +0200" - testData = append(testData, []machineCheckError{ - { - Timestamp: "2019-05-20 08:25:55 +0200", - SocketId: 0, - ErrorMsg: "", - MciStatusMsg: "", - }, - { - Timestamp: "2018-02-21 12:27:22 +0200", - SocketId: 0, - ErrorMsg: "", - MciStatusMsg: "", - }, - { - Timestamp: ts, - SocketId: 0, - ErrorMsg: "", - MciStatusMsg: "", - }, - }...) - for _, mce := range testData { - err := ras.updateLatestTimestamp(mce.Timestamp) - assert.NoError(t, err) - } - assert.Equal(t, ts, ras.latestTimestamp.Format(dateLayout)) -} - -func TestMultipleSockets(t *testing.T) { - ras := newRas() - cacheL2 := "Instruction CACHE Level-2 Generic Error" - overflow := "Error_overflow Corrected_error" - testData = []machineCheckError{ - { - Timestamp: "2019-05-20 08:25:55 +0200", - SocketId: 0, - ErrorMsg: cacheL2, - MciStatusMsg: overflow, - }, - { - Timestamp: "2018-02-21 12:27:22 +0200", - SocketId: 1, - ErrorMsg: cacheL2, - MciStatusMsg: overflow, - }, - { - Timestamp: "2020-03-21 14:17:28 +0200", - SocketId: 2, - ErrorMsg: cacheL2, - MciStatusMsg: overflow, - }, - { - Timestamp: "2020-03-21 17:24:18 +0200", - SocketId: 3, - ErrorMsg: cacheL2, - MciStatusMsg: overflow, - }, - } - for _, mce := range testData { - ras.updateCounters(&mce) - } - assert.Equal(t, 4, len(ras.cpuSocketCounters), "Should contain counters for four sockets") - - for _, metricData := range ras.cpuSocketCounters { - for metric, value := range metricData { - if metric == levelTwoCache { - assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", levelTwoCache)) - } else { - assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) - } - } - } -} - -func TestMissingDatabase(t *testing.T) { - var acc testutil.Accumulator - ras := newRas() - ras.DbPath = "/tmp/test.db" - err := ras.Gather(&acc) - assert.Error(t, err) -} - -func TestEmptyDatabase(t *testing.T) { - ras := newRas() - - assert.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain default counters for one socket") - assert.Equal(t, 2, len(ras.serverCounters), "Should contain default counters for server") - - for metric, value := range ras.cpuSocketCounters[0] { - assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) - } - - for metric, value := range ras.serverCounters { - assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) - } -} - -func newRas() *Ras { - defaultTimestamp, _ := parseDate("1970-01-01 00:00:01 -0700") - return &Ras{ - DbPath: defaultDbPath, - latestTimestamp: defaultTimestamp, - cpuSocketCounters: map[int]metricCounters{ - 0: *newMetricCounters(), - }, - serverCounters: map[string]int64{ - levelTwoCache: 0, - upi: 0, - }, - } -} - -var testData = []machineCheckError{ - { - Timestamp: "2020-05-20 07:34:53 +0200", - SocketId: 0, - ErrorMsg: "MEMORY CONTROLLER RD_CHANNEL0_ERR Transaction: Memory read error", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 07:35:11 +0200", - SocketId: 0, - ErrorMsg: "MEMORY CONTROLLER RD_CHANNEL0_ERR Transaction: Memory read error", - MciStatusMsg: "Uncorrected_error", - }, - { - Timestamp: "2020-05-20 07:37:50 +0200", - SocketId: 0, - ErrorMsg: "MEMORY CONTROLLER RD_CHANNEL2_ERR Transaction: Memory write error", - MciStatusMsg: "Uncorrected_error", - }, - { - Timestamp: "2020-05-20 08:14:51 +0200", - SocketId: 0, - ErrorMsg: "MEMORY CONTROLLER WR_CHANNEL2_ERR Transaction: Memory write error", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 08:15:31 +0200", - SocketId: 0, - ErrorMsg: "corrected filtering (some unreported errors in same region) Instruction CACHE Level-0 Read Error", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 08:16:32 +0200", - SocketId: 0, - ErrorMsg: "Instruction TLB Level-0 Error", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 08:16:56 +0200", - SocketId: 0, - ErrorMsg: "No Error", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 08:17:24 +0200", - SocketId: 0, - ErrorMsg: "Unclassified", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 08:17:41 +0200", - SocketId: 0, - ErrorMsg: "Microcode ROM parity error", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 08:17:48 +0200", - SocketId: 0, - ErrorMsg: "FRC error", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 08:18:18 +0200", - SocketId: 0, - ErrorMsg: "Internal parity error", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 08:18:34 +0200", - SocketId: 0, - ErrorMsg: "SMM Handler Code Access Violation", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 08:18:54 +0200", - SocketId: 0, - ErrorMsg: "Internal Timer error", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 08:21:23 +0200", - SocketId: 0, - ErrorMsg: "BUS Level-3 Generic Generic IO Request-did-not-timeout Error", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 08:23:23 +0200", - SocketId: 0, - ErrorMsg: "External error", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 08:25:31 +0200", - SocketId: 0, - ErrorMsg: "UPI: COR LL Rx detected CRC error - successful LLR without Phy Reinit", - MciStatusMsg: "Error_overflow Corrected_error", - }, - { - Timestamp: "2020-05-20 08:25:55 +0200", - SocketId: 0, - ErrorMsg: "Instruction CACHE Level-2 Generic Error", - MciStatusMsg: "Error_overflow Corrected_error", - }, -} diff --git a/plugins/inputs/ras/ras_windows.go b/plugins/inputs/ras/ras_windows.go deleted file mode 100644 index ac7dadd567381..0000000000000 --- a/plugins/inputs/ras/ras_windows.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build windows - -package ras From a6a63fddabe789064175b50a3419b4949d714dcf Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 16 Oct 2020 15:06:49 -0400 Subject: [PATCH 010/761] go mod tidy --- go.mod | 1 - go.sum | 7 ------- 2 files changed, 8 deletions(-) diff --git a/go.mod b/go.mod index 474f8512b6280..6226fd81c7dd7 100644 --- a/go.mod +++ b/go.mod @@ -88,7 +88,6 @@ require ( github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 // indirect github.com/lib/pq v1.3.0 // indirect github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 // indirect - github.com/mattn/go-sqlite3 v1.14.0 github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe github.com/miekg/dns v1.0.14 diff --git a/go.sum b/go.sum index a81c02d0ed8bb..e9b596a7ca8aa 100644 --- a/go.sum +++ b/go.sum @@ -83,7 +83,6 @@ github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcV github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ= github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -104,7 +103,6 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1C github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= -github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= @@ -407,8 +405,6 @@ github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 h1:8/+Y8SKf0xCZ8cCTfnrMdY7HNzlEjPAt3bPjalNb6CA= github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mattn/go-sqlite3 v1.14.0 h1:mLyGNKR8+Vv9CAU7PphKa2hkEqxxhn8i32J6FPj1/QA= -github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe h1:yMrL+YorbzaBpj/h3BbLMP+qeslPZYMbzcpHFBNy1Yk= @@ -649,7 +645,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -678,8 +673,6 @@ golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= From bfc1a87e1dd5e8915e274cd825f1618640a211e0 Mon Sep 17 00:00:00 2001 From: reimda Date: Fri, 16 Oct 2020 13:08:07 -0600 Subject: [PATCH 011/761] Add glibc version check for linux builds (#8278) --- Makefile | 2 + scripts/check-dynamic-glibc-versions.sh | 77 +++++++++++++++++++++++++ 2 files changed, 79 insertions(+) create mode 100755 scripts/check-dynamic-glibc-versions.sh diff --git a/Makefile b/Makefile index 4dd2754ec0910..33c804794dadb 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,7 @@ next_version := 1.16.0 tag := $(shell git describe --exact-match --tags 2>git_describe_error.tmp; rm -f git_describe_error.tmp) branch := $(shell git rev-parse --abbrev-ref HEAD) commit := $(shell git rev-parse --short=8 HEAD) +glibc_version := 2.17 ifdef NIGHTLY version := $(next_version) @@ -191,6 +192,7 @@ install: $(buildbin) @if [ $(GOOS) != "windows" ]; then cp -fv etc/telegraf.conf $(DESTDIR)$(sysconfdir)/telegraf/telegraf.conf$(conf_suffix); fi @if [ $(GOOS) != "windows" ]; then cp -fv etc/logrotate.d/telegraf $(DESTDIR)$(sysconfdir)/logrotate.d; fi @if [ $(GOOS) = "windows" ]; then cp -fv etc/telegraf_windows.conf $(DESTDIR)/telegraf.conf; fi + @if [ $(GOOS) = "linux" ]; then scripts/check-dynamic-glibc-versions.sh $(buildbin) $(glibc_version); fi @if [ $(GOOS) = "linux" ]; then mkdir -pv $(DESTDIR)$(prefix)/lib/telegraf/scripts; fi @if [ $(GOOS) = "linux" ]; then cp -fv scripts/telegraf.service $(DESTDIR)$(prefix)/lib/telegraf/scripts; fi @if [ $(GOOS) = "linux" ]; then cp -fv scripts/init.sh $(DESTDIR)$(prefix)/lib/telegraf/scripts; fi diff --git a/scripts/check-dynamic-glibc-versions.sh b/scripts/check-dynamic-glibc-versions.sh new file mode 100755 index 0000000000000..a89dae107bc8d --- /dev/null +++ b/scripts/check-dynamic-glibc-versions.sh @@ -0,0 +1,77 @@ +#!/bin/bash +set -euo pipefail +IFS=$'\n\t' + +usage () { + echo "Check that no dynamic symbols provided by glibc are newer than a given version" + echo "Usage:" + echo " $0 program version" + echo "where program is the elf binary to check and version is a dotted version string like 2.3.4" + exit 1 +} + +#validate input and display help +[[ $# = 2 ]] || usage +prog=$1 +max=$2 + +#make sure dependencies are installed +have_deps=true +for i in objdump sort uniq sed; do + if ! command -v "$i" > /dev/null; then + echo "$i not in path" + have_deps=false + fi +done +if [[ $have_deps = false ]]; then + exit 1 +fi + +#compare dotted versions +#see https://stackoverflow.com/questions/4023830/how-to-compare-two-strings-in-dot-separated-version-format-in-bash +vercomp () { + if [[ $1 == $2 ]] + then + return 0 + fi + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +objdump -T "$prog" | # get the dynamic symbol table + sed -n "s/.* GLIBC_\([0-9.]\+\).*/\1/p" | # find the entries for glibc and grab the version + sort | uniq | # remove duplicates + while read v; do + set +e + vercomp "$v" "$max" # fail if any version is newer than our max + comp=$? + set -e + if [[ $comp -eq 1 ]]; then + echo "$v is newer than $max" + exit 1 + fi + done + +exit 0 From f7f0c7324b0a4497b3ef18157603fcb14117079a Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 16 Oct 2020 15:21:56 -0400 Subject: [PATCH 012/761] remove unused dependency from list --- docs/LICENSE_OF_DEPENDENCIES.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index d8a942e63e1ad..677c8046a16bb 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -94,7 +94,6 @@ following works: - github.com/kubernetes/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE) - github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) -- github.com/mattn/go-sqlite3 [MIT License](https://github.com/mattn/go-sqlite3/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) - github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md) - github.com/mdlayher/genetlink [MIT License](https://github.com/mdlayher/genetlink/blob/master/LICENSE.md) From 771f65eb853bddbf5f90a3aab6f885bf454500bf Mon Sep 17 00:00:00 2001 From: reimda Date: Fri, 16 Oct 2020 14:45:15 -0600 Subject: [PATCH 013/761] Skip statically linked program during glibc version check (#8280) --- scripts/check-dynamic-glibc-versions.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/scripts/check-dynamic-glibc-versions.sh b/scripts/check-dynamic-glibc-versions.sh index a89dae107bc8d..b00e3bf9b7d08 100755 --- a/scripts/check-dynamic-glibc-versions.sh +++ b/scripts/check-dynamic-glibc-versions.sh @@ -17,7 +17,7 @@ max=$2 #make sure dependencies are installed have_deps=true -for i in objdump sort uniq sed; do +for i in objdump grep sort uniq sed; do if ! command -v "$i" > /dev/null; then echo "$i not in path" have_deps=false @@ -60,6 +60,11 @@ vercomp () { return 0 } +if ! objdump -p "$prog" | grep -q NEEDED; then + echo "$prog doesn't have dynamic library dependencies" + exit 0 +fi + objdump -T "$prog" | # get the dynamic symbol table sed -n "s/.* GLIBC_\([0-9.]\+\).*/\1/p" | # find the entries for glibc and grab the version sort | uniq | # remove duplicates From 0ae6fe7c9cea1103a53f0e3078c4eb15c4bdce49 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 16 Oct 2020 17:03:36 -0400 Subject: [PATCH 014/761] fix flakey proc test (#8279) --- plugins/inputs/processes/processes_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go index 630ecd65e7666..ca74bd0f59442 100644 --- a/plugins/inputs/processes/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -15,10 +15,11 @@ import ( ) func TestProcesses(t *testing.T) { + tester := tester{} processes := &Processes{ Log: testutil.Logger{}, execPS: testExecPS("STAT\n Ss \n S \n Z \n R \n S< \n SNs \n Ss+ \n \n \n"), - readProcFile: readProcFile, + readProcFile: tester.testProcFile, } var acc testutil.Accumulator From 87fcea5e9bf5e41f6779640c820159b58cab6c27 Mon Sep 17 00:00:00 2001 From: a-bali Date: Mon, 19 Oct 2020 16:49:48 +0200 Subject: [PATCH 015/761] Add support for fields and protocol lookups in port_name. (#8157) --- plugins/processors/port_name/README.md | 16 ++- plugins/processors/port_name/port_name.go | 76 +++++++++++-- .../processors/port_name/port_name_test.go | 107 ++++++++++++++++-- 3 files changed, 181 insertions(+), 18 deletions(-) diff --git a/plugins/processors/port_name/README.md b/plugins/processors/port_name/README.md index ad4e52d6bc187..3629aff84e90a 100644 --- a/plugins/processors/port_name/README.md +++ b/plugins/processors/port_name/README.md @@ -1,8 +1,10 @@ # Port Name Lookup Processor Plugin -Use the `port_name` processor to convert a tag containing a well-known port number to the registered service name. +Use the `port_name` processor to convert a tag or field containing a well-known port number to the registered service name. -Tag can contain a number ("80") or number and protocol separated by slash ("443/tcp"). If protocol is not provided it defaults to tcp but can be changed with the default_protocol setting. +Tag or field can contain a number ("80") or number and protocol separated by slash ("443/tcp"). If protocol is not provided it defaults to tcp but can be changed with the default_protocol setting. An additional tag or field can be specified for the protocol. + +If the source was found in tag, the service name will be added as a tag. If the source was found in a field, the service name will also be a field. Telegraf minimum version: Telegraf 1.15.0 @@ -12,12 +14,20 @@ Telegraf minimum version: Telegraf 1.15.0 [[processors.port_name]] ## Name of tag holding the port number # tag = "port" + ## Or name of the field holding the port number + # field = "port" - ## Name of output tag where service name will be added + ## Name of output tag or field (depending on the source) where service name will be added # dest = "service" ## Default tcp or udp # default_protocol = "tcp" + + ## Tag containing the protocol (tcp or udp, case-insensitive) + # protocol_tag = "proto" + + ## Field containing the protocol (tcp or udp, case-insensitive) + # protocol_field = "proto" ``` ### Example diff --git a/plugins/processors/port_name/port_name.go b/plugins/processors/port_name/port_name.go index 50c893e60d6dc..7866952314f2a 100644 --- a/plugins/processors/port_name/port_name.go +++ b/plugins/processors/port_name/port_name.go @@ -15,12 +15,20 @@ var sampleConfig = ` [[processors.port_name]] ## Name of tag holding the port number # tag = "port" + ## Or name of the field holding the port number + # field = "port" - ## Name of output tag where service name will be added + ## Name of output tag or field (depending on the source) where service name will be added # dest = "service" ## Default tcp or udp # default_protocol = "tcp" + + ## Tag containing the protocol (tcp or udp, case-insensitive) + # protocol_tag = "proto" + + ## Field containing the protocol (tcp or udp, case-insensitive) + # protocol_field = "proto" ` type sMap map[string]map[int]string // "https" == services["tcp"][443] @@ -29,8 +37,11 @@ var services sMap type PortName struct { SourceTag string `toml:"tag"` - DestTag string `toml:"dest"` + SourceField string `toml:"field"` + Dest string `toml:"dest"` DefaultProtocol string `toml:"default_protocol"` + ProtocolTag string `toml:"protocol_tag"` + ProtocolField string `toml:"protocol_field"` Log telegraf.Logger `toml:"-"` } @@ -40,7 +51,7 @@ func (d *PortName) SampleConfig() string { } func (d *PortName) Description() string { - return "Given a tag of a TCP or UDP port number, add a tag of the service name looked up in the system services file" + return "Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file" } func readServicesFile() { @@ -97,11 +108,36 @@ func readServices(r io.Reader) sMap { func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { for _, m := range metrics { - portProto, ok := m.GetTag(d.SourceTag) - if !ok { - // Nonexistent tag + + var portProto string + var fromField bool + + if len(d.SourceTag) > 0 { + if tag, ok := m.GetTag(d.SourceTag); ok { + portProto = string([]byte(tag)) + } + } + if len(d.SourceField) > 0 { + if field, ok := m.GetField(d.SourceField); ok { + switch v := field.(type) { + default: + d.Log.Errorf("Unexpected type %t in source field; must be string or int", v) + continue + case int64: + portProto = strconv.FormatInt(v, 10) + case uint64: + portProto = strconv.FormatUint(v, 10) + case string: + portProto = v + } + fromField = true + } + } + + if len(portProto) == 0 { continue } + portProtoSlice := strings.SplitN(portProto, "/", 2) l := len(portProtoSlice) @@ -127,6 +163,23 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { if l > 1 && len(portProtoSlice[1]) > 0 { proto = portProtoSlice[1] } + if len(d.ProtocolTag) > 0 { + if tag, ok := m.GetTag(d.ProtocolTag); ok { + proto = tag + } + } + if len(d.ProtocolField) > 0 { + if field, ok := m.GetField(d.ProtocolField); ok { + switch v := field.(type) { + default: + d.Log.Errorf("Unexpected type %t in protocol field; must be string", v) + continue + case string: + proto = v + } + } + } + proto = strings.ToLower(proto) protoMap, ok := services[proto] @@ -151,7 +204,11 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { continue } - m.AddTag(d.DestTag, service) + if fromField { + m.AddField(d.Dest, service) + } else { + m.AddTag(d.Dest, service) + } } return metrics @@ -167,8 +224,11 @@ func init() { processors.Add("port_name", func() telegraf.Processor { return &PortName{ SourceTag: "port", - DestTag: "service", + SourceField: "port", + Dest: "service", DefaultProtocol: "tcp", + ProtocolTag: "proto", + ProtocolField: "proto", } }) } diff --git a/plugins/processors/port_name/port_name_test.go b/plugins/processors/port_name/port_name_test.go index b58f95a9eb75a..46839b2bea80b 100644 --- a/plugins/processors/port_name/port_name_test.go +++ b/plugins/processors/port_name/port_name_test.go @@ -28,12 +28,15 @@ func TestFakeServices(t *testing.T) { func TestTable(t *testing.T) { var tests = []struct { - name string - tag string - dest string - prot string - input []telegraf.Metric - expected []telegraf.Metric + name string + tag string + field string + dest string + prot string + protField string + protTag string + input []telegraf.Metric + expected []telegraf.Metric }{ { name: "ordinary tcp default", @@ -239,6 +242,93 @@ func TestTable(t *testing.T) { ), }, }, + { + name: "read from field instead of tag", + field: "foo", + dest: "bar", + prot: "tcp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{}, + map[string]interface{}{ + "foo": "80", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{}, + map[string]interface{}{ + "foo": "80", + "bar": "http", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "read proto from field", + field: "foo", + dest: "bar", + prot: "udp", + protField: "proto", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{}, + map[string]interface{}{ + "foo": "80", + "proto": "tcp", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{}, + map[string]interface{}{ + "foo": "80", + "bar": "http", + "proto": "tcp", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "read proto from tag", + tag: "foo", + dest: "bar", + prot: "udp", + protTag: "proto", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "foo": "80", + "proto": "tcp", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "foo": "80", + "bar": "http", + "proto": "tcp", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, } r := strings.NewReader(fakeServices) @@ -248,8 +338,11 @@ func TestTable(t *testing.T) { t.Run(tt.name, func(t *testing.T) { p := PortName{ SourceTag: tt.tag, - DestTag: tt.dest, + SourceField: tt.field, + Dest: tt.dest, DefaultProtocol: tt.prot, + ProtocolField: tt.protField, + ProtocolTag: tt.protTag, Log: testutil.Logger{}, } From e3aa6eb57723d33ca490f725d81576e8c708daf7 Mon Sep 17 00:00:00 2001 From: simnv Date: Mon, 19 Oct 2020 21:24:46 +0500 Subject: [PATCH 016/761] Fix Event timestamps (#8216) Closes #8204 --- plugins/inputs/win_eventlog/README.md | 8 +++++++- plugins/inputs/win_eventlog/win_eventlog.go | 18 ++++++++++++++++-- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/win_eventlog/README.md b/plugins/inputs/win_eventlog/README.md index e3c48656f79c9..97c5cdd79a8a6 100644 --- a/plugins/inputs/win_eventlog/README.md +++ b/plugins/inputs/win_eventlog/README.md @@ -72,6 +72,10 @@ Telegraf minimum version: Telegraf 1.16.0 ## Get only first line of Message field. For most events first line is usually more than enough only_first_line_of_message = true + ## Parse timestamp from TimeCreated.SystemTime event field. + ## Will default to current time of telegraf processing on parsing error or if set to false + timestamp_from_event = true + ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] @@ -79,7 +83,7 @@ Telegraf minimum version: Telegraf 1.16.0 event_fields = ["*"] ## Fields to exclude. Also applied to data fields. Globbing supported - exclude_fields = ["Binary", "Data_Address*"] + exclude_fields = ["TimeCreated", "Binary", "Data_Address*"] ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported exclude_empty = ["*ActivityID", "UserID"] @@ -154,6 +158,8 @@ Fields `Level`, `Opcode` and `Task` are converted to text and saved as computed `Message` field is rendered from the event data, and can be several kilobytes of text with line breaks. For most events the first line of this text is more then enough, and additional info is more useful to be parsed as XML fields. So, for brevity, plugin takes only the first line. You can set `only_first_line_of_message` parameter to `false` to take full message text. +`TimeCreated` field is a string in RFC3339Nano format. By default Telegraf parses it as an event timestamp. If there is a field parse error or `timestamp_from_event` configration parameter is set to `false`, then event timestamp will be set to the exact time when Telegraf has parsed this event, so it will be rounded to the nearest minute. + ### Additional Fields The content of **Event Data** and **User Data** XML Nodes can be added as additional fields, and is added by default. You can disable that by setting `process_userdata` or `process_eventdata` parameters to `false`. diff --git a/plugins/inputs/win_eventlog/win_eventlog.go b/plugins/inputs/win_eventlog/win_eventlog.go index 376ef4169d902..8d0efe3119d97 100644 --- a/plugins/inputs/win_eventlog/win_eventlog.go +++ b/plugins/inputs/win_eventlog/win_eventlog.go @@ -13,6 +13,7 @@ import ( "reflect" "strings" "syscall" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -80,6 +81,10 @@ var sampleConfig = ` ## Get only first line of Message field. For most events first line is usually more than enough only_first_line_of_message = true + ## Parse timestamp from TimeCreated.SystemTime event field. + ## Will default to current time of telegraf processing on parsing error or if set to false + timestamp_from_event = true + ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] @@ -87,7 +92,7 @@ var sampleConfig = ` event_fields = ["*"] ## Fields to exclude. Also applied to data fields. Globbing supported - exclude_fields = ["Binary", "Data_Address*"] + exclude_fields = ["TimeCreated", "Binary", "Data_Address*"] ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported exclude_empty = ["*ActivityID", "UserID"] @@ -102,6 +107,7 @@ type WinEventLog struct { ProcessEventData bool `toml:"process_eventdata"` Separator string `toml:"separator"` OnlyFirstLineOfMessage bool `toml:"only_first_line_of_message"` + TimeStampFromEvent bool `toml:"timestamp_from_event"` EventTags []string `toml:"event_tags"` EventFields []string `toml:"event_fields"` ExcludeFields []string `toml:"exclude_fields"` @@ -157,6 +163,7 @@ loop: tags := map[string]string{} fields := map[string]interface{}{} evt := reflect.ValueOf(&event).Elem() + timeStamp := time.Now() // Walk through all fields of Event struct to process System tags or fields for i := 0; i < evt.NumField(); i++ { fieldName := evt.Type().Field(i).Name @@ -181,6 +188,12 @@ loop: case "TimeCreated": fieldValue = event.TimeCreated.SystemTime fieldType = reflect.TypeOf(fieldValue).String() + if w.TimeStampFromEvent { + timeStamp, err = time.Parse(time.RFC3339Nano, fmt.Sprintf("%v", fieldValue)) + if err != nil { + w.Log.Warnf("Error parsing timestamp %q: %v", fieldValue, err) + } + } case "Correlation": if should, _ := w.shouldProcessField("ActivityID"); should { activityID := event.Correlation.ActivityID @@ -258,7 +271,7 @@ loop: } // Pass collected metrics - acc.AddFields("win_eventlog", fields, tags) + acc.AddFields("win_eventlog", fields, tags, timeStamp) } } @@ -510,6 +523,7 @@ func init() { ProcessEventData: true, Separator: "_", OnlyFirstLineOfMessage: true, + TimeStampFromEvent: true, EventTags: []string{"Source", "EventID", "Level", "LevelText", "Keywords", "Channel", "Computer"}, EventFields: []string{"*"}, ExcludeEmpty: []string{"Task", "Opcode", "*ActivityID", "UserID"}, From 1696cca283cdf2ae53f7b72683e33c1820954413 Mon Sep 17 00:00:00 2001 From: simnv Date: Tue, 20 Oct 2020 19:16:22 +0500 Subject: [PATCH 017/761] Fix using empty string as the namespace prefix in azure_monitor output plugin (#8282) * Fix using empty string as the namespace prefix Fixes #8256 * Test using empty string as the namespace prefix --- config/config_test.go | 21 +++++++++++++++++++ .../outputs/azure_monitor/azure_monitor.go | 7 ++----- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index 6c5e3662a3151..42aefff151761 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -12,6 +12,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" "github.com/influxdata/telegraf/plugins/inputs/memcached" "github.com/influxdata/telegraf/plugins/inputs/procstat" + "github.com/influxdata/telegraf/plugins/outputs/azure_monitor" httpOut "github.com/influxdata/telegraf/plugins/outputs/http" "github.com/influxdata/telegraf/plugins/parsers" "github.com/stretchr/testify/assert" @@ -257,3 +258,23 @@ func TestConfig_BadOrdering(t *testing.T) { require.Error(t, err, "bad ordering") assert.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: Error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) } + +func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { + // #8256 Cannot use empty string as the namespace prefix + c := NewConfig() + defaultPrefixConfig := `[[outputs.azure_monitor]]` + err := c.LoadConfigData([]byte(defaultPrefixConfig)) + assert.NoError(t, err) + azureMonitor, ok := c.Outputs[0].Output.(*azure_monitor.AzureMonitor) + assert.Equal(t, "Telegraf/", azureMonitor.NamespacePrefix) + assert.Equal(t, true, ok) + + c = NewConfig() + customPrefixConfig := `[[outputs.azure_monitor]] + namespace_prefix = ""` + err = c.LoadConfigData([]byte(customPrefixConfig)) + assert.NoError(t, err) + azureMonitor, ok = c.Outputs[0].Output.(*azure_monitor.AzureMonitor) + assert.Equal(t, "", azureMonitor.NamespacePrefix) + assert.Equal(t, true, ok) +} diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index f2b1db1dd6868..a90dac049d6eb 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -155,10 +155,6 @@ func (a *AzureMonitor) Connect() error { Timeout: a.Timeout.Duration, } - if a.NamespacePrefix == "" { - a.NamespacePrefix = defaultNamespacePrefix - } - var err error var region string var resourceID string @@ -646,7 +642,8 @@ func (a *AzureMonitor) Reset() { func init() { outputs.Add("azure_monitor", func() telegraf.Output { return &AzureMonitor{ - timeFunc: time.Now, + timeFunc: time.Now, + NamespacePrefix: defaultNamespacePrefix, } }) } From f61457f87ae14f58cef77e353d160768852014de Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Tue, 20 Oct 2020 16:17:17 +0200 Subject: [PATCH 018/761] sqlAzureMIResourceStats - added sorting (#8286) --- plugins/inputs/sqlserver/azuresqlqueries.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go index 04a76cc983cb2..06782ecbadd0f 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqlqueries.go @@ -709,7 +709,9 @@ SELECT TOP(1) ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] ,cast([avg_cpu_percent] as float) as [avg_cpu_percent] FROM - sys.server_resource_stats; + sys.server_resource_stats +ORDER BY + [end_time] DESC; ` const sqlAzureMIResourceGovernance string = ` From 5a5f6fbd475476f554d40eeeb4d77e9961ad6e09 Mon Sep 17 00:00:00 2001 From: reimda Date: Tue, 20 Oct 2020 09:42:37 -0600 Subject: [PATCH 019/761] Update sarama library to 1.27.1 (#8289) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 11 ++-- go.sum | 59 ++++++++++++------- .../kafka_consumer/kafka_consumer_test.go | 3 + 4 files changed, 46 insertions(+), 28 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 677c8046a16bb..d5321af4f95a6 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -170,6 +170,7 @@ following works: - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) - gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE) +- gopkg.in/yaml.v3 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v3/LICENSE) ## telegraf used and modified code from these projects - github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) diff --git a/go.mod b/go.mod index 6226fd81c7dd7..d8f2ea5ebf941 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee github.com/Microsoft/ApplicationInsights-Go v0.4.2 github.com/Microsoft/go-winio v0.4.9 // indirect - github.com/Shopify/sarama v1.24.1 + github.com/Shopify/sarama v1.27.1 github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect github.com/aerospike/aerospike-client-go v1.27.0 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 @@ -61,7 +61,7 @@ require ( github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/protobuf v1.3.5 - github.com/google/go-cmp v0.4.0 + github.com/google/go-cmp v0.5.2 github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.12 github.com/gorilla/mux v1.6.2 @@ -78,11 +78,9 @@ require ( github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect github.com/jackc/pgx v3.6.0+incompatible - github.com/jcmturner/gofork v1.0.0 // indirect github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.12.0 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/klauspost/compress v1.9.2 // indirect github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee github.com/kylelemons/godebug v1.1.0 // indirect github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 // indirect @@ -118,7 +116,7 @@ require ( github.com/sirupsen/logrus v1.4.2 github.com/soniah/gosnmp v1.25.0 github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 - github.com/stretchr/testify v1.5.1 + github.com/stretchr/testify v1.6.1 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect github.com/tidwall/gjson v1.6.0 @@ -133,7 +131,7 @@ require ( go.starlark.net v0.0.0-20200901195727-6e684ef5eeee golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - golang.org/x/net v0.0.0-20200707034311-ab3426394381 + golang.org/x/net v0.0.0-20200904194848-62affa334b73 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 @@ -146,7 +144,6 @@ require ( google.golang.org/grpc v1.28.0 gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 - gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect gopkg.in/ldap.v3 v3.1.0 gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce gopkg.in/olivere/elastic.v5 v5.0.70 diff --git a/go.sum b/go.sum index e9b596a7ca8aa..d92b1044fcdba 100644 --- a/go.sum +++ b/go.sum @@ -86,8 +86,8 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.24.1 h1:svn9vfN3R1Hz21WR2Gj0VW9ehaDGkiOS+VqlIcZOkMI= -github.com/Shopify/sarama v1.24.1/go.mod h1:fGP8eQ6PugKEI0iUETYYtnP6d1pH/bdDMTel1X5ajsU= +github.com/Shopify/sarama v1.27.1 h1:iUlzHymqWsITyttu6KxazcAz8WEj5FqcwFK/oEi7rE8= +github.com/Shopify/sarama v1.27.1/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= @@ -151,6 +151,7 @@ github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 h1:F8nmbiuX+ github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8= github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -176,6 +177,8 @@ github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= @@ -194,8 +197,8 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg= -github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= +github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= +github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -275,6 +278,8 @@ github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= @@ -325,8 +330,8 @@ github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -349,7 +354,6 @@ github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGU github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q= github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -380,18 +384,21 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY= -github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg= +github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee h1:MB75LRhfeLER2RF7neSVpYuX/lL8aPi3yPtv5vdOJmk= github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee/go.mod h1:Pe/YBTPc3vqoMkbuIWPH8CF9ehINdvNyS0dP3J6HC0s= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -453,6 +460,8 @@ github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 h1:W8+lNIfAldCScGiikToSprbf3DCaMXk0VIM9l73BIpY= github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY= github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -481,8 +490,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= -github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -521,6 +530,8 @@ github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLk github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= @@ -550,8 +561,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg= @@ -595,7 +606,6 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -607,6 +617,8 @@ golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -673,8 +685,8 @@ golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -701,7 +713,6 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -782,6 +793,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.zx2c4.com/wireguard v0.0.20200121 h1:vcswa5Q6f+sylDfjqyrVNNrjsFUUbPsgAQTBCAg/Qf8= golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc= @@ -852,6 +865,8 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= @@ -868,9 +883,8 @@ gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/gokrb5.v7 v7.3.0 h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4= -gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE= @@ -890,6 +904,9 @@ gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 01146e180a8c8..5973fa82a6629 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -218,6 +218,9 @@ func (s *FakeConsumerGroupSession) Context() context.Context { return s.ctx } +func (s *FakeConsumerGroupSession) Commit() { +} + type FakeConsumerGroupClaim struct { messages chan *sarama.ConsumerMessage } From 01230889b48eb9310cd8ee22403fdf044c35e8ee Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 20 Oct 2020 11:45:30 -0400 Subject: [PATCH 020/761] fix issue with phpfpm url usage (#8292) --- plugins/inputs/phpfpm/phpfpm.go | 2 +- plugins/inputs/phpfpm/phpfpm_test.go | 13 +++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index f191844a34d56..52907bb50749e 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -201,7 +201,7 @@ func (p *phpfpm) gatherHttp(addr string, acc telegraf.Accumulator) error { return fmt.Errorf("unable parse server address '%s': %v", addr, err) } - req, err := http.NewRequest("GET", fmt.Sprintf("%s://%s%s", u.Scheme, u.Host, u.Path), nil) + req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return fmt.Errorf("unable to create new request '%s': %v", addr, err) } diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index f3b72a8281b7e..7be2e6a27dbf8 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -25,12 +25,17 @@ func (s statServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { - sv := statServer{} - ts := httptest.NewServer(sv) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, "ok", r.URL.Query().Get("test")) + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("Content-Length", fmt.Sprint(len(outputSample))) + fmt.Fprint(w, outputSample) + })) defer ts.Close() + url := ts.URL + "?test=ok" r := &phpfpm{ - Urls: []string{ts.URL}, + Urls: []string{url}, } err := r.Init() @@ -43,7 +48,7 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { tags := map[string]string{ "pool": "www", - "url": ts.URL, + "url": url, } fields := map[string]interface{}{ From 14a73055f4035d4b6e535a0f5ac971b65c23450c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 20 Oct 2020 23:59:05 +0200 Subject: [PATCH 021/761] New input plugin for RAS with fixed GLIBC issue (#8293) --- CHANGELOG.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 4 + go.mod | 1 + go.sum | 18 ++ plugins/inputs/all/all.go | 1 + plugins/inputs/ras/README.md | 60 ++++++ plugins/inputs/ras/ras.go | 323 +++++++++++++++++++++++++++++ plugins/inputs/ras/ras_notlinux.go | 3 + plugins/inputs/ras/ras_test.go | 254 +++++++++++++++++++++++ 9 files changed, 665 insertions(+) create mode 100644 plugins/inputs/ras/README.md create mode 100644 plugins/inputs/ras/ras.go create mode 100644 plugins/inputs/ras/ras_notlinux.go create mode 100644 plugins/inputs/ras/ras_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 74ab8ef402ead..fd133ba0d95dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,6 +73,7 @@ - [nsd](/plugins/inputs/nsd/README.md) add nsd input plugin - Contributed by @gearnode - [opcua](/plugins/inputs/opcua/README.md) Add OPC UA input plugin - Contributed by InfluxData - [proxmox](/plugins/inputs/proxmox/README.md) Proxmox plugin - Contributed by @effitient + - [ras](/plugins/inputs/ras/README.md) New input plugin for RAS (Reliability, Availability and Serviceability) - Contributed by @p-zak - [win_eventlog](/plugins/inputs/win_eventlog/README.md) Windows eventlog input plugin - Contributed by @simnv #### New Output Plugins diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index d5321af4f95a6..a6ade91a52087 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -94,6 +94,7 @@ following works: - github.com/kubernetes/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE) - github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) +- github.com/mattn/go-isatty [MIT License](https://github.com/mattn/go-isatty/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) - github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md) - github.com/mdlayher/genetlink [MIT License](https://github.com/mdlayher/genetlink/blob/master/LICENSE.md) @@ -171,6 +172,9 @@ following works: - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) - gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE) - gopkg.in/yaml.v3 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v3/LICENSE) +- modernc.org/libc [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/libc/-/blob/master/LICENSE) +- modernc.org/memory [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/memory/-/blob/master/LICENSE) +- modernc.org/sqlite [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/sqlite/-/blob/master/LICENSE) ## telegraf used and modified code from these projects - github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) diff --git a/go.mod b/go.mod index d8f2ea5ebf941..92de24ee19120 100644 --- a/go.mod +++ b/go.mod @@ -151,6 +151,7 @@ require ( gotest.tools v2.2.0+incompatible // indirect honnef.co/go/tools v0.0.1-2020.1.3 // indirect k8s.io/apimachinery v0.17.1 // indirect + modernc.org/sqlite v1.7.4 ) // replaced due to https://github.com/satori/go.uuid/issues/73 diff --git a/go.sum b/go.sum index d92b1044fcdba..1297eec30d08f 100644 --- a/go.sum +++ b/go.sum @@ -412,6 +412,8 @@ github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 h1:8/+Y8SKf0xCZ8cCTfnrMdY7HNzlEjPAt3bPjalNb6CA= github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe h1:yMrL+YorbzaBpj/h3BbLMP+qeslPZYMbzcpHFBNy1Yk= @@ -532,6 +534,8 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhD github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= @@ -730,6 +734,7 @@ golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= @@ -909,6 +914,7 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclp gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/netdb v0.0.0-20150201073656-a416d700ae39/go.mod h1:rbNo0ST5hSazCG4rGfpHrwnwvzP1QX62WbhzD+ghGzs= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -924,6 +930,18 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +modernc.org/httpfs v1.0.0 h1:LtuKNg6JMiaBKVQHKd6Phhvk+2GFp+pUcmDQgRjrds0= +modernc.org/httpfs v1.0.0/go.mod h1:BSkfoMUcahSijQD5J/Vu4UMOxzmEf5SNRwyXC4PJBEw= +modernc.org/libc v1.3.1 h1:ZAAaxQZtb94hXvlPMEQybXBLLxEtJlQtVfvLkKOPZ5w= +modernc.org/libc v1.3.1/go.mod h1:f8sp9GAfEyGYh3lsRIKtBh/XwACdFvGznxm6GJmQvXk= +modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE= +modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.0.1 h1:bhVo78NAdgvRD4N+b2hGnAwL5RP2+QyiEJDsX3jpeDA= +modernc.org/memory v1.0.1/go.mod h1:NSjvC08+g3MLOpcAxQbdctcThAEX4YlJ20WWHYEhvRg= +modernc.org/sqlite v1.7.4 h1:pJVbc3NLKENbO1PJ3/uH+kDeuJiTShqc8eZarwANJgU= +modernc.org/sqlite v1.7.4/go.mod h1:xse4RHCm8Fzw0COf5SJqAyiDrVeDwAQthAS1V/woNIA= +modernc.org/tcl v1.4.1 h1:8ERwg+o+EFtrXmXDOVuGGmo+EkEh8Bkokb/ybI3kXPQ= +modernc.org/tcl v1.4.1/go.mod h1:8YCvzidU9SIwkz7RZwlCWK61mhV8X9UwfkRDRp7y5e0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index d25d329d4899a..1d1b8eb58b463 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -141,6 +141,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/puppetagent" _ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq" _ "github.com/influxdata/telegraf/plugins/inputs/raindrops" + _ "github.com/influxdata/telegraf/plugins/inputs/ras" _ "github.com/influxdata/telegraf/plugins/inputs/redfish" _ "github.com/influxdata/telegraf/plugins/inputs/redis" _ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" diff --git a/plugins/inputs/ras/README.md b/plugins/inputs/ras/README.md new file mode 100644 index 0000000000000..8d89cf385de65 --- /dev/null +++ b/plugins/inputs/ras/README.md @@ -0,0 +1,60 @@ +# RAS Daemon Input Plugin + +This plugin is only available on Linux. + +The `RAS` plugin gathers and counts errors provided by [RASDaemon](https://github.com/mchehab/rasdaemon). + +### Configuration + +```toml +[[inputs.ras]] + ## Optional path to RASDaemon sqlite3 database. + ## Default: /var/lib/rasdaemon/ras-mc_event.db + # db_path = "" +``` + +In addition `RASDaemon` runs, by default, with `--enable-sqlite3` flag. In case of problems with SQLite3 database please verify this is still a default option. + +### Metrics + +- ras + - tags: + - socket_id + - fields: + - memory_read_corrected_errors + - memory_read_uncorrectable_errors + - memory_write_corrected_errors + - memory_write_uncorrectable_errors + - cache_l0_l1_errors + - tlb_instruction_errors + - cache_l2_errors + - upi_errors + - processor_base_errors + - processor_bus_errors + - internal_timer_errors + - smm_handler_code_access_violation_errors + - internal_parity_errors + - frc_errors + - external_mce_errors + - microcode_rom_parity_errors + - unclassified_mce_errors + +Please note that `processor_base_errors` is aggregate counter measuring the following MCE events: +- internal_timer_errors +- smm_handler_code_access_violation_errors +- internal_parity_errors +- frc_errors +- external_mce_errors +- microcode_rom_parity_errors +- unclassified_mce_errors + +### Permissions + +This plugin requires access to SQLite3 database from `RASDaemon`. Please make sure that user has required permissions to this database. + +### Example Output + +``` +ras,host=ubuntu,socket_id=0 external_mce_base_errors=1i,frc_errors=1i,instruction_tlb_errors=5i,internal_parity_errors=1i,internal_timer_errors=1i,l0_and_l1_cache_errors=7i,memory_read_corrected_errors=25i,memory_read_uncorrectable_errors=0i,memory_write_corrected_errors=5i,memory_write_uncorrectable_errors=0i,microcode_rom_parity_errors=1i,processor_base_errors=7i,processor_bus_errors=1i,smm_handler_code_access_violation_errors=1i,unclassified_mce_base_errors=1i 1598867393000000000 +ras,host=ubuntu level_2_cache_errors=0i,upi_errors=0i 1598867393000000000 +``` diff --git a/plugins/inputs/ras/ras.go b/plugins/inputs/ras/ras.go new file mode 100644 index 0000000000000..ae7da02a6904a --- /dev/null +++ b/plugins/inputs/ras/ras.go @@ -0,0 +1,323 @@ +// +build linux,!mips,!mipsle,!s390x + +package ras + +import ( + "database/sql" + "fmt" + "os" + "strconv" + "strings" + "time" + + _ "modernc.org/sqlite" //to register SQLite driver + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Ras plugin gathers and counts errors provided by RASDaemon +type Ras struct { + DBPath string `toml:"db_path"` + + latestTimestamp time.Time `toml:"-"` + cpuSocketCounters map[int]metricCounters `toml:"-"` + serverCounters metricCounters `toml:"-"` +} + +type machineCheckError struct { + ID int + Timestamp string + SocketID int + ErrorMsg string + MciStatusMsg string +} + +type metricCounters map[string]int64 + +const ( + mceQuery = ` + SELECT + id, timestamp, error_msg, mcistatus_msg, socketid + FROM mce_record + WHERE timestamp > ? + ` + defaultDbPath = "/var/lib/rasdaemon/ras-mc_event.db" + dateLayout = "2006-01-02 15:04:05 -0700" + memoryReadCorrected = "memory_read_corrected_errors" + memoryReadUncorrected = "memory_read_uncorrectable_errors" + memoryWriteCorrected = "memory_write_corrected_errors" + memoryWriteUncorrected = "memory_write_uncorrectable_errors" + instructionCache = "cache_l0_l1_errors" + instructionTLB = "tlb_instruction_errors" + levelTwoCache = "cache_l2_errors" + upi = "upi_errors" + processorBase = "processor_base_errors" + processorBus = "processor_bus_errors" + internalTimer = "internal_timer_errors" + smmHandlerCode = "smm_handler_code_access_violation_errors" + internalParity = "internal_parity_errors" + frc = "frc_errors" + externalMCEBase = "external_mce_errors" + microcodeROMParity = "microcode_rom_parity_errors" + unclassifiedMCEBase = "unclassified_mce_errors" +) + +// SampleConfig returns sample configuration for this plugin. +func (r *Ras) SampleConfig() string { + return ` + ## Optional path to RASDaemon sqlite3 database. + ## Default: /var/lib/rasdaemon/ras-mc_event.db + # db_path = "" +` +} + +// Description returns the plugin description. +func (r *Ras) Description() string { + return "RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required)." +} + +// Gather reads the stats provided by RASDaemon and writes it to the Accumulator. +func (r *Ras) Gather(acc telegraf.Accumulator) error { + err := validateDbPath(r.DBPath) + if err != nil { + return err + } + + db, err := connectToDB(r.DBPath) + if err != nil { + return err + } + defer db.Close() + + rows, err := db.Query(mceQuery, r.latestTimestamp) + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + mcError, err := fetchMachineCheckError(rows) + if err != nil { + return err + } + tsErr := r.updateLatestTimestamp(mcError.Timestamp) + if tsErr != nil { + return err + } + r.updateCounters(mcError) + } + + addCPUSocketMetrics(acc, r.cpuSocketCounters) + addServerMetrics(acc, r.serverCounters) + + return nil +} + +func (r *Ras) updateLatestTimestamp(timestamp string) error { + ts, err := parseDate(timestamp) + if err != nil { + return err + } + if ts.After(r.latestTimestamp) { + r.latestTimestamp = ts + } + + return nil +} + +func (r *Ras) updateCounters(mcError *machineCheckError) { + if strings.Contains(mcError.ErrorMsg, "No Error") { + return + } + + r.initializeCPUMetricDataIfRequired(mcError.SocketID) + r.updateSocketCounters(mcError) + r.updateServerCounters(mcError) +} + +func newMetricCounters() *metricCounters { + return &metricCounters{ + memoryReadCorrected: 0, + memoryReadUncorrected: 0, + memoryWriteCorrected: 0, + memoryWriteUncorrected: 0, + instructionCache: 0, + instructionTLB: 0, + processorBase: 0, + processorBus: 0, + internalTimer: 0, + smmHandlerCode: 0, + internalParity: 0, + frc: 0, + externalMCEBase: 0, + microcodeROMParity: 0, + unclassifiedMCEBase: 0, + } +} + +func (r *Ras) updateServerCounters(mcError *machineCheckError) { + if strings.Contains(mcError.ErrorMsg, "CACHE Level-2") && strings.Contains(mcError.ErrorMsg, "Error") { + r.serverCounters[levelTwoCache]++ + } + + if strings.Contains(mcError.ErrorMsg, "UPI:") { + r.serverCounters[upi]++ + } +} + +func validateDbPath(dbPath string) error { + pathInfo, err := os.Stat(dbPath) + if os.IsNotExist(err) { + return fmt.Errorf("provided db_path does not exist: [%s]", dbPath) + } + + if err != nil { + return fmt.Errorf("cannot get system information for db_path file: [%s] - %v", dbPath, err) + } + + if mode := pathInfo.Mode(); !mode.IsRegular() { + return fmt.Errorf("provided db_path does not point to a regular file: [%s]", dbPath) + } + + return nil +} + +func connectToDB(dbPath string) (*sql.DB, error) { + return sql.Open("sqlite", dbPath) +} + +func (r *Ras) initializeCPUMetricDataIfRequired(socketID int) { + if _, ok := r.cpuSocketCounters[socketID]; !ok { + r.cpuSocketCounters[socketID] = *newMetricCounters() + } +} + +func (r *Ras) updateSocketCounters(mcError *machineCheckError) { + r.updateMemoryCounters(mcError) + r.updateProcessorBaseCounters(mcError) + + if strings.Contains(mcError.ErrorMsg, "Instruction TLB") && strings.Contains(mcError.ErrorMsg, "Error") { + r.cpuSocketCounters[mcError.SocketID][instructionTLB]++ + } + + if strings.Contains(mcError.ErrorMsg, "BUS") && strings.Contains(mcError.ErrorMsg, "Error") { + r.cpuSocketCounters[mcError.SocketID][processorBus]++ + } + + if (strings.Contains(mcError.ErrorMsg, "CACHE Level-0") || + strings.Contains(mcError.ErrorMsg, "CACHE Level-1")) && + strings.Contains(mcError.ErrorMsg, "Error") { + r.cpuSocketCounters[mcError.SocketID][instructionCache]++ + } +} + +func (r *Ras) updateProcessorBaseCounters(mcError *machineCheckError) { + if strings.Contains(mcError.ErrorMsg, "Internal Timer error") { + r.cpuSocketCounters[mcError.SocketID][internalTimer]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ + } + + if strings.Contains(mcError.ErrorMsg, "SMM Handler Code Access Violation") { + r.cpuSocketCounters[mcError.SocketID][smmHandlerCode]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ + } + + if strings.Contains(mcError.ErrorMsg, "Internal parity error") { + r.cpuSocketCounters[mcError.SocketID][internalParity]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ + } + + if strings.Contains(mcError.ErrorMsg, "FRC error") { + r.cpuSocketCounters[mcError.SocketID][frc]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ + } + + if strings.Contains(mcError.ErrorMsg, "External error") { + r.cpuSocketCounters[mcError.SocketID][externalMCEBase]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ + } + + if strings.Contains(mcError.ErrorMsg, "Microcode ROM parity error") { + r.cpuSocketCounters[mcError.SocketID][microcodeROMParity]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ + } + + if strings.Contains(mcError.ErrorMsg, "Unclassified") || strings.Contains(mcError.ErrorMsg, "Internal unclassified") { + r.cpuSocketCounters[mcError.SocketID][unclassifiedMCEBase]++ + r.cpuSocketCounters[mcError.SocketID][processorBase]++ + } +} + +func (r *Ras) updateMemoryCounters(mcError *machineCheckError) { + if strings.Contains(mcError.ErrorMsg, "Memory read error") { + if strings.Contains(mcError.MciStatusMsg, "Corrected_error") { + r.cpuSocketCounters[mcError.SocketID][memoryReadCorrected]++ + } else { + r.cpuSocketCounters[mcError.SocketID][memoryReadUncorrected]++ + } + } + if strings.Contains(mcError.ErrorMsg, "Memory write error") { + if strings.Contains(mcError.MciStatusMsg, "Corrected_error") { + r.cpuSocketCounters[mcError.SocketID][memoryWriteCorrected]++ + } else { + r.cpuSocketCounters[mcError.SocketID][memoryWriteUncorrected]++ + } + } +} + +func addCPUSocketMetrics(acc telegraf.Accumulator, cpuSocketCounters map[int]metricCounters) { + for socketID, data := range cpuSocketCounters { + tags := map[string]string{ + "socket_id": strconv.Itoa(socketID), + } + fields := make(map[string]interface{}) + + for errorName, count := range data { + fields[errorName] = count + } + + acc.AddCounter("ras", fields, tags) + } +} + +func addServerMetrics(acc telegraf.Accumulator, counters map[string]int64) { + fields := make(map[string]interface{}) + for errorName, count := range counters { + fields[errorName] = count + } + + acc.AddCounter("ras", fields, map[string]string{}) +} + +func fetchMachineCheckError(rows *sql.Rows) (*machineCheckError, error) { + mcError := &machineCheckError{} + err := rows.Scan(&mcError.ID, &mcError.Timestamp, &mcError.ErrorMsg, &mcError.MciStatusMsg, &mcError.SocketID) + + if err != nil { + return nil, err + } + + return mcError, nil +} + +func parseDate(date string) (time.Time, error) { + return time.Parse(dateLayout, date) +} + +func init() { + inputs.Add("ras", func() telegraf.Input { + defaultTimestamp, _ := parseDate("1970-01-01 00:00:01 -0700") + return &Ras{ + DBPath: defaultDbPath, + latestTimestamp: defaultTimestamp, + cpuSocketCounters: map[int]metricCounters{ + 0: *newMetricCounters(), + }, + serverCounters: map[string]int64{ + levelTwoCache: 0, + upi: 0, + }, + } + }) +} diff --git a/plugins/inputs/ras/ras_notlinux.go b/plugins/inputs/ras/ras_notlinux.go new file mode 100644 index 0000000000000..b529a4b644038 --- /dev/null +++ b/plugins/inputs/ras/ras_notlinux.go @@ -0,0 +1,3 @@ +// +build !linux mips mipsle s390x + +package ras diff --git a/plugins/inputs/ras/ras_test.go b/plugins/inputs/ras/ras_test.go new file mode 100644 index 0000000000000..1c14c6d8f76ce --- /dev/null +++ b/plugins/inputs/ras/ras_test.go @@ -0,0 +1,254 @@ +// +build linux,!mips,!mipsle,!s390x + +package ras + +import ( + "fmt" + "testing" + + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +func TestUpdateCounters(t *testing.T) { + ras := newRas() + for _, mce := range testData { + ras.updateCounters(&mce) + } + + assert.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain counters only for single socket") + + for metric, value := range ras.cpuSocketCounters[0] { + if metric == processorBase { + // processor_base_errors is sum of other seven errors: internal_timer_errors, smm_handler_code_access_violation_errors, + // internal_parity_errors, frc_errors, external_mce_errors, microcode_rom_parity_errors and unclassified_mce_errors + assert.Equal(t, int64(7), value, fmt.Sprintf("%s should have value of 7", processorBase)) + } else { + assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) + } + } + + for metric, value := range ras.serverCounters { + assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) + } +} + +func TestUpdateLatestTimestamp(t *testing.T) { + ras := newRas() + ts := "2020-08-01 15:13:27 +0200" + testData = append(testData, []machineCheckError{ + { + Timestamp: "2019-05-20 08:25:55 +0200", + SocketID: 0, + ErrorMsg: "", + MciStatusMsg: "", + }, + { + Timestamp: "2018-02-21 12:27:22 +0200", + SocketID: 0, + ErrorMsg: "", + MciStatusMsg: "", + }, + { + Timestamp: ts, + SocketID: 0, + ErrorMsg: "", + MciStatusMsg: "", + }, + }...) + for _, mce := range testData { + err := ras.updateLatestTimestamp(mce.Timestamp) + assert.NoError(t, err) + } + assert.Equal(t, ts, ras.latestTimestamp.Format(dateLayout)) +} + +func TestMultipleSockets(t *testing.T) { + ras := newRas() + cacheL2 := "Instruction CACHE Level-2 Generic Error" + overflow := "Error_overflow Corrected_error" + testData = []machineCheckError{ + { + Timestamp: "2019-05-20 08:25:55 +0200", + SocketID: 0, + ErrorMsg: cacheL2, + MciStatusMsg: overflow, + }, + { + Timestamp: "2018-02-21 12:27:22 +0200", + SocketID: 1, + ErrorMsg: cacheL2, + MciStatusMsg: overflow, + }, + { + Timestamp: "2020-03-21 14:17:28 +0200", + SocketID: 2, + ErrorMsg: cacheL2, + MciStatusMsg: overflow, + }, + { + Timestamp: "2020-03-21 17:24:18 +0200", + SocketID: 3, + ErrorMsg: cacheL2, + MciStatusMsg: overflow, + }, + } + for _, mce := range testData { + ras.updateCounters(&mce) + } + assert.Equal(t, 4, len(ras.cpuSocketCounters), "Should contain counters for four sockets") + + for _, metricData := range ras.cpuSocketCounters { + for metric, value := range metricData { + if metric == levelTwoCache { + assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", levelTwoCache)) + } else { + assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) + } + } + } +} + +func TestMissingDatabase(t *testing.T) { + var acc testutil.Accumulator + ras := newRas() + ras.DBPath = "/tmp/test.db" + err := ras.Gather(&acc) + assert.Error(t, err) +} + +func TestEmptyDatabase(t *testing.T) { + ras := newRas() + + assert.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain default counters for one socket") + assert.Equal(t, 2, len(ras.serverCounters), "Should contain default counters for server") + + for metric, value := range ras.cpuSocketCounters[0] { + assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) + } + + for metric, value := range ras.serverCounters { + assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) + } +} + +func newRas() *Ras { + defaultTimestamp, _ := parseDate("1970-01-01 00:00:01 -0700") + return &Ras{ + DBPath: defaultDbPath, + latestTimestamp: defaultTimestamp, + cpuSocketCounters: map[int]metricCounters{ + 0: *newMetricCounters(), + }, + serverCounters: map[string]int64{ + levelTwoCache: 0, + upi: 0, + }, + } +} + +var testData = []machineCheckError{ + { + Timestamp: "2020-05-20 07:34:53 +0200", + SocketID: 0, + ErrorMsg: "MEMORY CONTROLLER RD_CHANNEL0_ERR Transaction: Memory read error", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 07:35:11 +0200", + SocketID: 0, + ErrorMsg: "MEMORY CONTROLLER RD_CHANNEL0_ERR Transaction: Memory read error", + MciStatusMsg: "Uncorrected_error", + }, + { + Timestamp: "2020-05-20 07:37:50 +0200", + SocketID: 0, + ErrorMsg: "MEMORY CONTROLLER RD_CHANNEL2_ERR Transaction: Memory write error", + MciStatusMsg: "Uncorrected_error", + }, + { + Timestamp: "2020-05-20 08:14:51 +0200", + SocketID: 0, + ErrorMsg: "MEMORY CONTROLLER WR_CHANNEL2_ERR Transaction: Memory write error", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 08:15:31 +0200", + SocketID: 0, + ErrorMsg: "corrected filtering (some unreported errors in same region) Instruction CACHE Level-0 Read Error", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 08:16:32 +0200", + SocketID: 0, + ErrorMsg: "Instruction TLB Level-0 Error", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 08:16:56 +0200", + SocketID: 0, + ErrorMsg: "No Error", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 08:17:24 +0200", + SocketID: 0, + ErrorMsg: "Unclassified", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 08:17:41 +0200", + SocketID: 0, + ErrorMsg: "Microcode ROM parity error", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 08:17:48 +0200", + SocketID: 0, + ErrorMsg: "FRC error", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 08:18:18 +0200", + SocketID: 0, + ErrorMsg: "Internal parity error", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 08:18:34 +0200", + SocketID: 0, + ErrorMsg: "SMM Handler Code Access Violation", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 08:18:54 +0200", + SocketID: 0, + ErrorMsg: "Internal Timer error", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 08:21:23 +0200", + SocketID: 0, + ErrorMsg: "BUS Level-3 Generic Generic IO Request-did-not-timeout Error", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 08:23:23 +0200", + SocketID: 0, + ErrorMsg: "External error", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 08:25:31 +0200", + SocketID: 0, + ErrorMsg: "UPI: COR LL Rx detected CRC error - successful LLR without Phy Reinit", + MciStatusMsg: "Error_overflow Corrected_error", + }, + { + Timestamp: "2020-05-20 08:25:55 +0200", + SocketID: 0, + ErrorMsg: "Instruction CACHE Level-2 Generic Error", + MciStatusMsg: "Error_overflow Corrected_error", + }, +} From 9c2979dcedda7e007332d1f9afa4701295373796 Mon Sep 17 00:00:00 2001 From: tlusser-inv <39266023+tlusser-inv@users.noreply.github.com> Date: Wed, 21 Oct 2020 16:50:29 +0200 Subject: [PATCH 022/761] Fix wrong memory measurements of containers and vms (#8290) --- plugins/inputs/proxmox/README.md | 2 ++ plugins/inputs/proxmox/proxmox.go | 48 +++++++++++++++++++++----- plugins/inputs/proxmox/proxmox_test.go | 10 ++++-- plugins/inputs/proxmox/structs.go | 7 +++- 4 files changed, 55 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/proxmox/README.md b/plugins/inputs/proxmox/README.md index ac81633a3f461..24e39ade24ea3 100644 --- a/plugins/inputs/proxmox/README.md +++ b/plugins/inputs/proxmox/README.md @@ -11,6 +11,8 @@ Telegraf minimum version: Telegraf 1.16.0 ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. base_url = "https://localhost:8006/api2/json" api_token = "USER@REALM!TOKENID=UUID" + ## Optional node name config + # node_name = "localhost" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index 13dcb4a95f304..7c14356849d6b 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -2,6 +2,7 @@ package proxmox import ( "encoding/json" + "errors" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "io/ioutil" @@ -48,11 +49,10 @@ func (px *Proxmox) Gather(acc telegraf.Accumulator) error { } func (px *Proxmox) Init() error { - hostname, err := os.Hostname() - if err != nil { - return err + + if px.NodeName == "" { + return errors.New("node_name must be configured") } - px.hostname = hostname tlsCfg, err := px.ClientConfig.TLSConfig() if err != nil { @@ -73,11 +73,15 @@ func init() { requestFunction: performRequest, } + // Set hostname as default node name for backwards compatibility + hostname, _ := os.Hostname() + px.NodeName = hostname + inputs.Add("proxmox", func() telegraf.Input { return &px }) } func getNodeSearchDomain(px *Proxmox) error { - apiUrl := "/nodes/" + px.hostname + "/dns" + apiUrl := "/nodes/" + px.NodeName + "/dns" jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) if err != nil { return err @@ -88,6 +92,10 @@ func getNodeSearchDomain(px *Proxmox) error { if err != nil { return err } + + if nodeDns.Data.Searchdomain == "" { + return errors.New("node_name not found") + } px.nodeSearchDomain = nodeDns.Data.Searchdomain return nil @@ -137,7 +145,12 @@ func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { return } tags := getTags(px, vmStat.Name, vmConfig, rt) - fields, err := getFields(vmStat) + currentVMStatus, err := getCurrentVMStatus(px, rt, vmStat.ID) + if err != nil { + px.Log.Error("Error getting VM curent VM status: %v", err) + return + } + fields, err := getFields(currentVMStatus) if err != nil { px.Log.Error("Error getting VM measurements: %v", err) return @@ -146,8 +159,25 @@ func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { } } +func getCurrentVMStatus(px *Proxmox, rt ResourceType, id string) (VmStat, error) { + apiUrl := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + id + "/status/current" + + jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) + if err != nil { + return VmStat{}, err + } + + var currentVmStatus VmCurrentStats + err = json.Unmarshal(jsonData, ¤tVmStatus) + if err != nil { + return VmStat{}, err + } + + return currentVmStatus.Data, nil +} + func getVmStats(px *Proxmox, rt ResourceType) (VmStats, error) { - apiUrl := "/nodes/" + px.hostname + "/" + string(rt) + apiUrl := "/nodes/" + px.NodeName + "/" + string(rt) jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) if err != nil { return VmStats{}, err @@ -163,7 +193,7 @@ func getVmStats(px *Proxmox, rt ResourceType) (VmStats, error) { } func getVmConfig(px *Proxmox, vmId string, rt ResourceType) (VmConfig, error) { - apiUrl := "/nodes/" + px.hostname + "/" + string(rt) + "/" + vmId + "/config" + apiUrl := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + vmId + "/config" jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) if err != nil { return VmConfig{}, err @@ -245,7 +275,7 @@ func getTags(px *Proxmox, name string, vmConfig VmConfig, rt ResourceType) map[s fqdn := hostname + "." + domain return map[string]string{ - "node_fqdn": px.hostname + "." + px.nodeSearchDomain, + "node_fqdn": px.NodeName + "." + px.nodeSearchDomain, "vm_name": name, "vm_fqdn": fqdn, "vm_type": string(rt), diff --git a/plugins/inputs/proxmox/proxmox_test.go b/plugins/inputs/proxmox/proxmox_test.go index 274ebdf69ff28..524a105e7b1ab 100644 --- a/plugins/inputs/proxmox/proxmox_test.go +++ b/plugins/inputs/proxmox/proxmox_test.go @@ -14,6 +14,8 @@ var qemuTestData = `{"data":[{"name":"qemu1","status":"running","maxdisk":107374 var qemuConfigTestData = `{"data":{"hostname":"qemu1","searchdomain":"test.example.com"}}` var lxcTestData = `{"data":[{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}]}` var lxcConfigTestData = `{"data":{"hostname":"container1","searchdomain":"test.example.com"}}` +var lxcCurrentStatusTestData = `{"data":{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}}` +var qemuCurrentStatusTestData = `{"data":{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}}` func performTestRequest(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) { var bytedata = []byte("") @@ -28,6 +30,10 @@ func performTestRequest(px *Proxmox, apiUrl string, method string, data url.Valu bytedata = []byte(lxcTestData) } else if strings.HasSuffix(apiUrl, "111/config") { bytedata = []byte(lxcConfigTestData) + } else if strings.HasSuffix(apiUrl, "111/status/current") { + bytedata = []byte(lxcCurrentStatusTestData) + } else if strings.HasSuffix(apiUrl, "113/status/current") { + bytedata = []byte(qemuCurrentStatusTestData) } return bytedata, nil @@ -36,12 +42,12 @@ func performTestRequest(px *Proxmox, apiUrl string, method string, data url.Valu func setUp(t *testing.T) *Proxmox { px := &Proxmox{ requestFunction: performTestRequest, + NodeName: "testnode", } require.NoError(t, px.Init()) - // Override hostname and logger for test - px.hostname = "testnode" + // Override logger for test px.Log = testutil.Logger{} return px } diff --git a/plugins/inputs/proxmox/structs.go b/plugins/inputs/proxmox/structs.go index eef5dffff1f28..461e71d767d6a 100644 --- a/plugins/inputs/proxmox/structs.go +++ b/plugins/inputs/proxmox/structs.go @@ -13,9 +13,10 @@ type Proxmox struct { BaseURL string `toml:"base_url"` APIToken string `toml:"api_token"` ResponseTimeout internal.Duration `toml:"response_timeout"` + NodeName string `toml:"node_name"` + tls.ClientConfig - hostname string httpClient *http.Client nodeSearchDomain string @@ -34,6 +35,10 @@ type VmStats struct { Data []VmStat `json:"data"` } +type VmCurrentStats struct { + Data VmStat `json:"data"` +} + type VmStat struct { ID string `json:"vmid"` Name string `json:"name"` From 885d0affa603809be2e9ea9ca4ada90299badbd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patryk=20Ma=C5=82ek?= <69143962+pmalek-sumo@users.noreply.github.com> Date: Wed, 21 Oct 2020 20:43:24 +0200 Subject: [PATCH 023/761] Sumo Logic output plugin: fix unparsable config.Size from sample config (#8243) --- etc/telegraf.conf | 2 +- plugins/outputs/sumologic/README.md | 2 +- plugins/outputs/sumologic/sumologic.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index a07e922c3aeed..f67ddfbf19dcd 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1334,7 +1334,7 @@ # ## Bear in mind that in some serializer a metric even though serialized to multiple # ## lines cannot be split any further so setting this very low might not work # ## as expected. -# # max_request_body_size = 1_000_000 +# # max_request_body_size = 1000000 # # ## Additional, Sumo specific options. # ## Full list can be found here: diff --git a/plugins/outputs/sumologic/README.md b/plugins/outputs/sumologic/README.md index 78f0eb3370a80..20fb757999a80 100644 --- a/plugins/outputs/sumologic/README.md +++ b/plugins/outputs/sumologic/README.md @@ -45,7 +45,7 @@ by Sumologic HTTP Source: ## Bear in mind that in some serializer a metric even though serialized to multiple ## lines cannot be split any further so setting this very low might not work ## as expected. - # max_request_body_size = 1_000_000 + # max_request_body_size = 1000000 ## Additional, Sumo specific options. ## Full list can be found here: diff --git a/plugins/outputs/sumologic/sumologic.go b/plugins/outputs/sumologic/sumologic.go index 3c3f4a649705d..fd9fe908ba099 100644 --- a/plugins/outputs/sumologic/sumologic.go +++ b/plugins/outputs/sumologic/sumologic.go @@ -50,7 +50,7 @@ const ( ## Bear in mind that in some serializer a metric even though serialized to multiple ## lines cannot be split any further so setting this very low might not work ## as expected. - # max_request_body_size = 1_000_000 + # max_request_body_size = 1000000 ## Additional, Sumo specific options. ## Full list can be found here: From dd9078d5d9d732107ab1af9a506516f2ba84edcc Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 20 Oct 2020 15:10:03 -0400 Subject: [PATCH 024/761] Update changelog (cherry picked from commit 6cdf4020fad03cc660516300b3854c56ec95c430) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fd133ba0d95dd..2acd1e1d8566b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -91,6 +91,13 @@ - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. +## v1.15.4 [2020-10-20] + +#### Bugfixes + + - [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd + - [#8176](https://github.com/influxdata/telegraf/pull/8176) `agent` fix panic on streaming processers using logging + ## v1.15.3 [2020-09-11] #### Release Notes From 81dd120876f72121c339d160403bced724d7b49a Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Wed, 21 Oct 2020 16:10:35 -0600 Subject: [PATCH 025/761] Update next_version to 1.17.0 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 33c804794dadb..bb7eb8f33c3e2 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -next_version := 1.16.0 +next_version := 1.17.0 tag := $(shell git describe --exact-match --tags 2>git_describe_error.tmp; rm -f git_describe_error.tmp) branch := $(shell git rev-parse --abbrev-ref HEAD) commit := $(shell git rev-parse --short=8 HEAD) From e158255d9b905c845fc5acd9c2642d9e2953a8af Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 21 Oct 2020 23:12:18 -0400 Subject: [PATCH 026/761] Get the build version from a static file --- Makefile | 4 ++-- build_version.txt | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 build_version.txt diff --git a/Makefile b/Makefile index bb7eb8f33c3e2..eebd15c30bffc 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -next_version := 1.17.0 +next_version := $(shell cat build_version.txt) tag := $(shell git describe --exact-match --tags 2>git_describe_error.tmp; rm -f git_describe_error.tmp) branch := $(shell git rev-parse --abbrev-ref HEAD) commit := $(shell git rev-parse --short=8 HEAD) @@ -41,7 +41,7 @@ GOOS ?= $(shell go env GOOS) GOARCH ?= $(shell go env GOARCH) HOSTGO := env -u GOOS -u GOARCH -u GOARM -- go -LDFLAGS := $(LDFLAGS) -X main.commit=$(commit) -X main.branch=$(branch) +LDFLAGS := $(LDFLAGS) -X main.commit=$(commit) -X main.branch=$(branch) -X main.goos=$(GOOS) -X main.goarch=$(GOARCH) ifneq ($(tag),) LDFLAGS += -X main.version=$(version) endif diff --git a/build_version.txt b/build_version.txt new file mode 100644 index 0000000000000..092afa15df4df --- /dev/null +++ b/build_version.txt @@ -0,0 +1 @@ +1.17.0 From 9b23a04b69c1ce2ba230e13c0af6656dbec71fec Mon Sep 17 00:00:00 2001 From: Ido Halevi <32218210+idohalevi@users.noreply.github.com> Date: Thu, 22 Oct 2020 18:53:08 +0300 Subject: [PATCH 027/761] A new Logz.io output plugin (#8202) --- README.md | 1 + plugins/outputs/all/all.go | 1 + plugins/outputs/logzio/README.md | 43 +++++++ plugins/outputs/logzio/logzio.go | 175 ++++++++++++++++++++++++++ plugins/outputs/logzio/logzio_test.go | 94 ++++++++++++++ 5 files changed, 314 insertions(+) create mode 100644 plugins/outputs/logzio/README.md create mode 100644 plugins/outputs/logzio/logzio.go create mode 100644 plugins/outputs/logzio/logzio_test.go diff --git a/README.md b/README.md index 168db50fd6a24..acaf9c1818cba 100644 --- a/README.md +++ b/README.md @@ -430,6 +430,7 @@ For documentation on the latest development code see the [documentation index][d * [instrumental](./plugins/outputs/instrumental) * [kafka](./plugins/outputs/kafka) * [librato](./plugins/outputs/librato) +* [logz.io](./plugins/outputs/logzio) * [mqtt](./plugins/outputs/mqtt) * [nats](./plugins/outputs/nats) * [newrelic](./plugins/outputs/newrelic) diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index f81aa9d71b072..9d89976dd6cca 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -25,6 +25,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/kafka" _ "github.com/influxdata/telegraf/plugins/outputs/kinesis" _ "github.com/influxdata/telegraf/plugins/outputs/librato" + _ "github.com/influxdata/telegraf/plugins/outputs/logzio" _ "github.com/influxdata/telegraf/plugins/outputs/mqtt" _ "github.com/influxdata/telegraf/plugins/outputs/nats" _ "github.com/influxdata/telegraf/plugins/outputs/newrelic" diff --git a/plugins/outputs/logzio/README.md b/plugins/outputs/logzio/README.md new file mode 100644 index 0000000000000..5cf61233e3274 --- /dev/null +++ b/plugins/outputs/logzio/README.md @@ -0,0 +1,43 @@ +# Logz.io Output Plugin + +This plugin sends metrics to Logz.io over HTTPs. + +### Configuration: + +```toml +# A plugin that can send metrics over HTTPs to Logz.io +[[outputs.logzio]] + ## Set to true if Logz.io sender checks the disk space before adding metrics to the disk queue. + # check_disk_space = true + + ## The percent of used file system space at which the sender will stop queueing. + ## When we will reach that percentage, the file system in which the queue is stored will drop + ## all new logs until the percentage of used space drops below that threshold. + # disk_threshold = 98 + + ## How often Logz.io sender should drain the queue. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + # drain_duration = "3s" + + ## Where Logz.io sender should store the queue + ## queue_dir = Sprintf("%s%s%s%s%d", os.TempDir(), string(os.PathSeparator), + ## "logzio-buffer", string(os.PathSeparator), time.Now().UnixNano()) + + ## Logz.io account token + token = "your Logz.io token" # required + + ## Use your listener URL for your Logz.io account region. + # url = "https://listener.logz.io:8071" +``` + +### Required parameters: + +* `token`: Your Logz.io token, which can be found under "settings" in your account. + +### Optional parameters: + +* `check_disk_space`: Set to true if Logz.io sender checks the disk space before adding metrics to the disk queue. +* `disk_threshold`: If the queue_dir space crosses this threshold (in % of disk usage), the plugin will start dropping logs. +* `drain_duration`: Time to sleep between sending attempts. +* `queue_dir`: Metrics disk path. All the unsent metrics are saved to the disk in this location. +* `url`: Logz.io listener URL. \ No newline at end of file diff --git a/plugins/outputs/logzio/logzio.go b/plugins/outputs/logzio/logzio.go new file mode 100644 index 0000000000000..e46e9bf821320 --- /dev/null +++ b/plugins/outputs/logzio/logzio.go @@ -0,0 +1,175 @@ +package logzio + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + + "net/http" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/outputs" +) + +const ( + defaultLogzioURL = "https://listener.logz.io:8071" + + logzioDescription = "Send aggregate metrics to Logz.io" + logzioType = "telegraf" +) + +var sampleConfig = ` + ## Connection timeout, defaults to "5s" if not set. + timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Logz.io account token + token = "your logz.io token" # required + + ## Use your listener URL for your Logz.io account region. + # url = "https://listener.logz.io:8071" +` + +type Logzio struct { + Log telegraf.Logger `toml:"-"` + Timeout internal.Duration `toml:"timeout"` + Token string `toml:"token"` + URL string `toml:"url"` + + tls.ClientConfig + client *http.Client +} + +type TimeSeries struct { + Series []*Metric +} + +type Metric struct { + Metric map[string]interface{} `json:"metrics"` + Dimensions map[string]string `json:"dimensions"` + Time time.Time `json:"@timestamp"` + Type string `json:"type"` +} + +// Connect to the Output +func (l *Logzio) Connect() error { + l.Log.Debug("Connecting to logz.io output...") + + if l.Token == "" || l.Token == "your logz.io token" { + return fmt.Errorf("token is required") + } + + tlsCfg, err := l.ClientConfig.TLSConfig() + if err != nil { + return err + } + + l.client = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: tlsCfg, + }, + Timeout: l.Timeout.Duration, + } + + return nil +} + +// Close any connections to the Output +func (l *Logzio) Close() error { + l.Log.Debug("Closing logz.io output") + return nil +} + +// Description returns a one-sentence description on the Output +func (l *Logzio) Description() string { + return logzioDescription +} + +// SampleConfig returns the default configuration of the Output +func (l *Logzio) SampleConfig() string { + return sampleConfig +} + +// Write takes in group of points to be written to the Output +func (l *Logzio) Write(metrics []telegraf.Metric) error { + if len(metrics) == 0 { + return nil + } + + var buff bytes.Buffer + gz := gzip.NewWriter(&buff) + for _, metric := range metrics { + m := l.parseMetric(metric) + + serialized, err := json.Marshal(m) + if err != nil { + return fmt.Errorf("unable to marshal metric, %s\n", err.Error()) + } + + _, err = gz.Write(append(serialized, '\n')) + if err != nil { + return fmt.Errorf("unable to write gzip meric, %s\n", err.Error()) + } + } + + err := gz.Close() + if err != nil { + return fmt.Errorf("unable to close gzip, %s\n", err.Error()) + } + + return l.send(buff.Bytes()) +} + +func (l *Logzio) send(metrics []byte) error { + req, err := http.NewRequest("POST", l.authUrl(), bytes.NewBuffer(metrics)) + if err != nil { + return fmt.Errorf("unable to create http.Request, %s\n", err.Error()) + } + req.Header.Add("Content-Type", "application/json") + req.Header.Set("Content-Encoding", "gzip") + + resp, err := l.client.Do(req) + if err != nil { + return fmt.Errorf("error POSTing metrics, %s\n", err.Error()) + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode > 209 { + return fmt.Errorf("received bad status code, %d\n", resp.StatusCode) + } + + return nil +} + +func (l *Logzio) authUrl() string { + return fmt.Sprintf("%s/?token=%s", l.URL, l.Token) +} + +func (l *Logzio) parseMetric(metric telegraf.Metric) *Metric { + return &Metric{ + Metric: map[string]interface{}{ + metric.Name(): metric.Fields(), + }, + Dimensions: metric.Tags(), + Time: metric.Time(), + Type: logzioType, + } +} + +func init() { + outputs.Add("logzio", func() telegraf.Output { + return &Logzio{ + URL: defaultLogzioURL, + Timeout: internal.Duration{Duration: time.Second * 5}, + } + }) +} diff --git a/plugins/outputs/logzio/logzio_test.go b/plugins/outputs/logzio/logzio_test.go new file mode 100644 index 0000000000000..074192e06f0e2 --- /dev/null +++ b/plugins/outputs/logzio/logzio_test.go @@ -0,0 +1,94 @@ +package logzio + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "io" + "net/http" + "net/http/httptest" + "testing" +) + +const ( + testToken = "123456789" + testURL = "https://logzio.com" +) + +func TestConnetWithoutToken(t *testing.T) { + l := &Logzio{ + URL: testURL, + Log: testutil.Logger{}, + } + err := l.Connect() + require.Error(t, err) +} + +func TestParseMetric(t *testing.T) { + l := &Logzio{} + for _, tm := range testutil.MockMetrics() { + lm := l.parseMetric(tm) + require.Equal(t, tm.Fields(), lm.Metric[tm.Name()]) + require.Equal(t, logzioType, lm.Type) + require.Equal(t, tm.Tags(), lm.Dimensions) + require.Equal(t, tm.Time(), lm.Time) + } +} + +func TestBadStatusCode(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer ts.Close() + + l := &Logzio{ + Token: testToken, + URL: ts.URL, + Log: testutil.Logger{}, + } + + err := l.Connect() + require.NoError(t, err) + + err = l.Write(testutil.MockMetrics()) + require.Error(t, err) +} + +func TestWrite(t *testing.T) { + tm := testutil.TestMetric(float64(3.14), "test1") + var body bytes.Buffer + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gz, err := gzip.NewReader(r.Body) + require.NoError(t, err) + + _, err = io.Copy(&body, gz) + require.NoError(t, err) + + var lm Metric + err = json.Unmarshal(body.Bytes(), &lm) + require.NoError(t, err) + + require.Equal(t, tm.Fields(), lm.Metric[tm.Name()]) + require.Equal(t, logzioType, lm.Type) + require.Equal(t, tm.Tags(), lm.Dimensions) + require.Equal(t, tm.Time(), lm.Time) + + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + l := &Logzio{ + Token: testToken, + URL: ts.URL, + Log: testutil.Logger{}, + } + + err := l.Connect() + require.NoError(t, err) + + err = l.Write([]telegraf.Metric{tm}) + require.NoError(t, err) +} From 69230017b0e89216d002e3923a38af9c8a9e713d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Fri, 23 Oct 2020 23:11:32 +0200 Subject: [PATCH 028/761] RAS plugin - fix for too many open files handlers (#8306) --- plugins/inputs/ras/ras.go | 27 ++++++++++++++++++++++----- plugins/inputs/ras/ras_test.go | 2 +- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/ras/ras.go b/plugins/inputs/ras/ras.go index ae7da02a6904a..630e712d8a941 100644 --- a/plugins/inputs/ras/ras.go +++ b/plugins/inputs/ras/ras.go @@ -20,6 +20,9 @@ import ( type Ras struct { DBPath string `toml:"db_path"` + Log telegraf.Logger `toml:"-"` + db *sql.DB `toml:"-"` + latestTimestamp time.Time `toml:"-"` cpuSocketCounters map[int]metricCounters `toml:"-"` serverCounters metricCounters `toml:"-"` @@ -77,20 +80,34 @@ func (r *Ras) Description() string { return "RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required)." } -// Gather reads the stats provided by RASDaemon and writes it to the Accumulator. -func (r *Ras) Gather(acc telegraf.Accumulator) error { +// Start initializes connection to DB, metrics are gathered in Gather +func (r *Ras) Start(telegraf.Accumulator) error { err := validateDbPath(r.DBPath) if err != nil { return err } - db, err := connectToDB(r.DBPath) + r.db, err = connectToDB(r.DBPath) if err != nil { return err } - defer db.Close() - rows, err := db.Query(mceQuery, r.latestTimestamp) + return nil +} + +// Stop closes any existing DB connection +func (r *Ras) Stop() { + if r.db != nil { + err := r.db.Close() + if err != nil { + r.Log.Errorf("Error appeared during closing DB (%s): %v", r.DBPath, err) + } + } +} + +// Gather reads the stats provided by RASDaemon and writes it to the Accumulator. +func (r *Ras) Gather(acc telegraf.Accumulator) error { + rows, err := r.db.Query(mceQuery, r.latestTimestamp) if err != nil { return err } diff --git a/plugins/inputs/ras/ras_test.go b/plugins/inputs/ras/ras_test.go index 1c14c6d8f76ce..900eb8fb86832 100644 --- a/plugins/inputs/ras/ras_test.go +++ b/plugins/inputs/ras/ras_test.go @@ -114,7 +114,7 @@ func TestMissingDatabase(t *testing.T) { var acc testutil.Accumulator ras := newRas() ras.DBPath = "/tmp/test.db" - err := ras.Gather(&acc) + err := ras.Start(&acc) assert.Error(t, err) } From 0ec97a2bdd5bad18c0f53c11d064ba9882965873 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 26 Oct 2020 11:06:39 -0400 Subject: [PATCH 029/761] fix issue with PDH_CALC_NEGATIVE_DENOMINATOR error (#8308) --- .../win_perf_counters/win_perf_counters.go | 70 +++++++++++-------- 1 file changed, 40 insertions(+), 30 deletions(-) diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index bd130a3fd79e9..73cc7dc7311d4 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -386,46 +386,36 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { // collect if m.UseWildcardsExpansion { value, err := m.query.GetFormattedCounterValueDouble(metric.counterHandle) - if err == nil { - addCounterMeasurement(metric, metric.instance, value, collectFields) - } else { + if err != nil { //ignore invalid data as some counters from process instances returns this sometimes if !isKnownCounterDataError(err) { return fmt.Errorf("error while getting value for counter %s: %v", metric.counterPath, err) } + m.Log.Warnf("error while getting value for counter %q, will skip metric: %v", metric.counterPath, err) + continue } + addCounterMeasurement(metric, metric.instance, value, collectFields) } else { counterValues, err := m.query.GetFormattedCounterArrayDouble(metric.counterHandle) - if err == nil { - for _, cValue := range counterValues { - var add bool - if metric.includeTotal { - // If IncludeTotal is set, include all. - add = true - } else if metric.instance == "*" && !strings.Contains(cValue.InstanceName, "_Total") { - // Catch if set to * and that it is not a '*_Total*' instance. - add = true - } else if metric.instance == cValue.InstanceName { - // Catch if we set it to total or some form of it - add = true - } else if strings.Contains(metric.instance, "#") && strings.HasPrefix(metric.instance, cValue.InstanceName) { - // If you are using a multiple instance identifier such as "w3wp#1" - // phd.dll returns only the first 2 characters of the identifier. - add = true - cValue.InstanceName = metric.instance - } else if metric.instance == "------" { - add = true - } - - if add { - addCounterMeasurement(metric, cValue.InstanceName, cValue.Value, collectFields) - } - } - } else { - //ignore invalid data as some counters from process instances returns this sometimes + if err != nil { + //ignore invalid data as some counters from process instances returns this sometimes if !isKnownCounterDataError(err) { return fmt.Errorf("error while getting value for counter %s: %v", metric.counterPath, err) } + m.Log.Warnf("error while getting value for counter %q, will skip metric: %v", metric.counterPath, err) + continue + } + for _, cValue := range counterValues { + + if strings.Contains(metric.instance, "#") && strings.HasPrefix(metric.instance, cValue.InstanceName) { + // If you are using a multiple instance identifier such as "w3wp#1" + // phd.dll returns only the first 2 characters of the identifier. + cValue.InstanceName = metric.instance + } + + if shouldIncludeMetric(metric, cValue) { + addCounterMeasurement(metric, cValue.InstanceName, cValue.Value, collectFields) + } } } } @@ -443,6 +433,25 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { return nil } +func shouldIncludeMetric(metric *counter, cValue CounterValue) bool { + if metric.includeTotal { + // If IncludeTotal is set, include all. + return true + } + if metric.instance == "*" && !strings.Contains(cValue.InstanceName, "_Total") { + // Catch if set to * and that it is not a '*_Total*' instance. + return true + } + if metric.instance == cValue.InstanceName { + // Catch if we set it to total or some form of it + return true + } + if metric.instance == "------" { + return true + } + return false +} + func addCounterMeasurement(metric *counter, instanceName string, value float64, collectFields map[instanceGrouping]map[string]interface{}) { measurement := sanitizedChars.Replace(metric.measurement) if measurement == "" { @@ -457,6 +466,7 @@ func addCounterMeasurement(metric *counter, instanceName string, value float64, func isKnownCounterDataError(err error) bool { if pdhErr, ok := err.(*PdhError); ok && (pdhErr.ErrorCode == PDH_INVALID_DATA || + pdhErr.ErrorCode == PDH_CALC_NEGATIVE_DENOMINATOR || pdhErr.ErrorCode == PDH_CALC_NEGATIVE_VALUE || pdhErr.ErrorCode == PDH_CSTATUS_INVALID_DATA || pdhErr.ErrorCode == PDH_NO_DATA) { From 0e310208360a2b444c64eea0f4adf6cf6f81ba52 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 26 Oct 2020 11:35:10 -0400 Subject: [PATCH 030/761] processes: fix issue with stat no such file/dir (#8309) --- plugins/inputs/processes/processes_notwindows.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/plugins/inputs/processes/processes_notwindows.go b/plugins/inputs/processes/processes_notwindows.go index 61092aad96998..9faec83afa7d0 100644 --- a/plugins/inputs/processes/processes_notwindows.go +++ b/plugins/inputs/processes/processes_notwindows.go @@ -129,7 +129,6 @@ func (p *Processes) gatherFromPS(fields map[string]interface{}) error { // get process states from /proc/(pid)/stat files func (p *Processes) gatherFromProc(fields map[string]interface{}) error { filenames, err := filepath.Glob(linux_sysctl_fs.GetHostProc() + "/[0-9]*/stat") - if err != nil { return err } @@ -192,10 +191,6 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { } func readProcFile(filename string) ([]byte, error) { - _, err := os.Stat(filename) - if err != nil { - return nil, err - } data, err := ioutil.ReadFile(filename) if err != nil { if os.IsNotExist(err) { From 14e81f479dee6e74c8d2499713740b447845060b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Mon, 26 Oct 2020 22:33:26 +0100 Subject: [PATCH 031/761] Disable RAS input plugin on specific Linux architectures: mips64, mips64le, ppc64le, riscv64 (#8317) --- plugins/inputs/ras/README.md | 2 +- plugins/inputs/ras/ras.go | 3 ++- plugins/inputs/ras/ras_notlinux.go | 2 +- plugins/inputs/ras/ras_test.go | 3 ++- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/ras/README.md b/plugins/inputs/ras/README.md index 8d89cf385de65..9c1cda75bff10 100644 --- a/plugins/inputs/ras/README.md +++ b/plugins/inputs/ras/README.md @@ -1,6 +1,6 @@ # RAS Daemon Input Plugin -This plugin is only available on Linux. +This plugin is only available on Linux (only for `386`, `amd64`, `arm` and `arm64` architectures). The `RAS` plugin gathers and counts errors provided by [RASDaemon](https://github.com/mchehab/rasdaemon). diff --git a/plugins/inputs/ras/ras.go b/plugins/inputs/ras/ras.go index 630e712d8a941..a8599c4a78d0f 100644 --- a/plugins/inputs/ras/ras.go +++ b/plugins/inputs/ras/ras.go @@ -1,4 +1,5 @@ -// +build linux,!mips,!mipsle,!s390x +// +build linux +// +build 386 amd64 arm arm64 package ras diff --git a/plugins/inputs/ras/ras_notlinux.go b/plugins/inputs/ras/ras_notlinux.go index b529a4b644038..74f0aaf9fc59f 100644 --- a/plugins/inputs/ras/ras_notlinux.go +++ b/plugins/inputs/ras/ras_notlinux.go @@ -1,3 +1,3 @@ -// +build !linux mips mipsle s390x +// +build !linux linux,!386,!amd64,!arm,!arm64 package ras diff --git a/plugins/inputs/ras/ras_test.go b/plugins/inputs/ras/ras_test.go index 900eb8fb86832..b8b70d55d66d7 100644 --- a/plugins/inputs/ras/ras_test.go +++ b/plugins/inputs/ras/ras_test.go @@ -1,4 +1,5 @@ -// +build linux,!mips,!mipsle,!s390x +// +build linux +// +build 386 amd64 arm arm64 package ras From 0621b4bcbed866fc3f3c0741690e60ccc0614b82 Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Tue, 27 Oct 2020 16:06:39 +0100 Subject: [PATCH 032/761] re-added changes to sqlserverqueries.go (#8323) From f14a50b51144f4bdb6f604c39326ea1d0e88e41d Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Tue, 27 Oct 2020 19:36:21 +0100 Subject: [PATCH 033/761] SQL Server - server_properties added sql_version_desc (#8324) --- plugins/inputs/sqlserver/azuresqlqueries.go | 1 + plugins/inputs/sqlserver/sqlserverqueries.go | 1 + 2 files changed, 2 insertions(+) diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go index 06782ecbadd0f..3ea95b956988c 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqlqueries.go @@ -677,6 +677,7 @@ SELECT TOP 1 ,cast(([reserved_storage_mb] - [storage_space_used_mb]) as bigint) AS [available_storage_mb] ,(SELECT DATEDIFF(MINUTE,[sqlserver_start_time],GETDATE()) from sys.dm_os_sys_info) as [uptime] ,SERVERPROPERTY('ProductVersion') AS [sql_version] + ,LEFT(@@VERSION,CHARINDEX(' - ',@@VERSION)) AS [sql_version_desc] ,[db_online] ,[db_restoring] ,[db_recovering] diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index d413986037c02..4953193233caa 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -216,6 +216,7 @@ SELECT ,CAST(SERVERPROPERTY(''EngineEdition'') AS int) AS [engine_edition] ,DATEDIFF(MINUTE,si.[sqlserver_start_time],GETDATE()) AS [uptime] ,SERVERPROPERTY(''ProductVersion'') AS [sql_version] + ,LEFT(@@VERSION,CHARINDEX(' - ',@@VERSION)) AS [sql_version_desc] ,dbs.[db_online] ,dbs.[db_restoring] ,dbs.[db_recovering] From 1313f2314ffa66c4d46bd7336736876f8e0b3d60 Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Tue, 27 Oct 2020 20:19:49 +0100 Subject: [PATCH 034/761] SQL Server - PerformanceCounters - removed synthetic counters (#8325) --- plugins/inputs/sqlserver/sqlserverqueries.go | 221 ++++++++----------- 1 file changed, 92 insertions(+), 129 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index 4953193233caa..f3d3aa3ca34c9 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -293,8 +293,6 @@ END DECLARE @SqlStatement AS nvarchar(max) ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),3) AS int) - ,@Columns AS nvarchar(MAX) = '' - ,@PivotColumns AS nvarchar(MAX) = '' DECLARE @PCounters TABLE ( @@ -306,7 +304,7 @@ DECLARE @PCounters TABLE PRIMARY KEY([object_name], [counter_name], [instance_name]) ); -SET @SqlStatement = N' +WITH PerfCounters AS ( SELECT DISTINCT RTRIM(spi.[object_name]) [object_name] ,RTRIM(spi.[counter_name]) [counter_name] @@ -316,138 +314,103 @@ SELECT DISTINCT FROM sys.dm_os_performance_counters AS spi WHERE counter_name IN ( - ''SQL Compilations/sec'' - ,''SQL Re-Compilations/sec'' - ,''User Connections'' - ,''Batch Requests/sec'' - ,''Logouts/sec'' - ,''Logins/sec'' - ,''Processes blocked'' - ,''Latch Waits/sec'' - ,''Full Scans/sec'' - ,''Index Searches/sec'' - ,''Page Splits/sec'' - ,''Page lookups/sec'' - ,''Page reads/sec'' - ,''Page writes/sec'' - ,''Readahead pages/sec'' - ,''Lazy writes/sec'' - ,''Checkpoint pages/sec'' - ,''Page life expectancy'' - ,''Log File(s) Size (KB)'' - ,''Log File(s) Used Size (KB)'' - ,''Data File(s) Size (KB)'' - ,''Transactions/sec'' - ,''Write Transactions/sec'' - ,''Active Temp Tables'' - ,''Temp Tables Creation Rate'' - ,''Temp Tables For Destruction'' - ,''Free Space in tempdb (KB)'' - ,''Version Store Size (KB)'' - ,''Memory Grants Pending'' - ,''Memory Grants Outstanding'' - ,''Free list stalls/sec'' - ,''Buffer cache hit ratio'' - ,''Buffer cache hit ratio base'' - ,''Backup/Restore Throughput/sec'' - ,''Total Server Memory (KB)'' - ,''Target Server Memory (KB)'' - ,''Log Flushes/sec'' - ,''Log Flush Wait Time'' - ,''Memory broker clerk size'' - ,''Log Bytes Flushed/sec'' - ,''Bytes Sent to Replica/sec'' - ,''Log Send Queue'' - ,''Bytes Sent to Transport/sec'' - ,''Sends to Replica/sec'' - ,''Bytes Sent to Transport/sec'' - ,''Sends to Transport/sec'' - ,''Bytes Received from Replica/sec'' - ,''Receives from Replica/sec'' - ,''Flow Control Time (ms/sec)'' - ,''Flow Control/sec'' - ,''Resent Messages/sec'' - ,''Redone Bytes/sec'' - ,''XTP Memory Used (KB)'' - ,''Transaction Delay'' - ,''Log Bytes Received/sec'' - ,''Log Apply Pending Queue'' - ,''Redone Bytes/sec'' - ,''Recovery Queue'' - ,''Log Apply Ready Queue'' - ,''CPU usage %'' - ,''CPU usage % base'' - ,''Queued requests'' - ,''Requests completed/sec'' - ,''Blocked tasks'' - ,''Active memory grant amount (KB)'' - ,''Disk Read Bytes/sec'' - ,''Disk Read IO Throttled/sec'' - ,''Disk Read IO/sec'' - ,''Disk Write Bytes/sec'' - ,''Disk Write IO Throttled/sec'' - ,''Disk Write IO/sec'' - ,''Used memory (KB)'' - ,''Forwarded Records/sec'' - ,''Background Writer pages/sec'' - ,''Percent Log Used'' - ,''Log Send Queue KB'' - ,''Redo Queue KB'' - ,''Mirrored Write Transactions/sec'' - ,''Group Commit Time'' - ,''Group Commits/Sec'' + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Temp Tables' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' ) OR ( - spi.[object_name] LIKE ''%User Settable%'' - OR spi.[object_name] LIKE ''%SQL Errors%'' - OR spi.[object_name] LIKE ''%Batch Resp Statistics%'' + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' ) OR ( - spi.[instance_name] IN (''_Total'') + spi.[instance_name] IN ('_Total') AND spi.[counter_name] IN ( - ''Lock Timeouts/sec'' - ,''Lock Timeouts (timeout > 0)/sec'' - ,''Number of Deadlocks/sec'' - ,''Lock Waits/sec'' - ,''Latch Waits/sec'' + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' ) ) -' - -INSERT INTO @PCounters EXEC(@SqlStatement) - -IF @MajorMinorVersion >= 1300 BEGIN - SET @Columns += N' - ,rgwg.[total_cpu_usage_preemptive_ms] AS [Preemptive CPU Usage (time)]' - SET @PivotColumns += N',[Preemptive CPU Usage (time)]' -END - -SET @SqlStatement = N' -SELECT - ''SQLServer:Workload Group Stats'' AS [object] - ,[counter] - ,[instance] - ,CAST(vs.[value] AS bigint) AS [value] - ,1 -FROM -( - SELECT - rgwg.[name] AS [instance] - ,rgwg.[total_request_count] AS [Request Count] - ,rgwg.[total_queued_request_count] AS [Queued Request Count] - ,rgwg.[total_cpu_limit_violation_count] AS [CPU Limit Violation Count] - ,rgwg.[total_cpu_usage_ms] AS [CPU Usage (time)] - ,rgwg.[total_lock_wait_count] AS [Lock Wait Count] - ,rgwg.[total_lock_wait_time_ms] AS [Lock Wait Time] - ,rgwg.[total_reduced_memgrant_count] AS [Reduced Memory Grant Count]' - + @Columns + N' - FROM sys.dm_resource_governor_workload_groups AS rgwg - INNER JOIN sys.dm_resource_governor_resource_pools AS rgrp /*No fields from this table. remove?*/ - ON rgwg.[pool_id] = rgrp.[pool_id] -) AS rg -UNPIVOT ( - [value] FOR [counter] IN ( [Request Count], [Queued Request Count], [CPU Limit Violation Count], [CPU Usage (time)], [Lock Wait Count], [Lock Wait Time], [Reduced Memory Grant Count] ' + @PivotColumns + N') -) AS vs' +) -INSERT INTO @PCounters EXEC(@SqlStatement) +INSERT INTO @PCounters SELECT * FROM PerfCounters; SELECT 'sqlserver_performance' AS [measurement] From e83a1656351058571a7235471b9cdf46326c010a Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 28 Oct 2020 12:16:59 -0400 Subject: [PATCH 035/761] kafka sasl-mechanism auth support for SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI (#8318) --- docs/LICENSE_OF_DEPENDENCIES.md | 2 + go.mod | 1 + go.sum | 2 + plugins/common/kafka/sasl.go | 84 ++++++++++++ plugins/common/kafka/scram_client.go | 36 ++++++ plugins/inputs/kafka_consumer/README.md | 17 +++ .../inputs/kafka_consumer/kafka_consumer.go | 34 +++-- plugins/outputs/kafka/README.md | 17 +++ plugins/outputs/kafka/kafka.go | 120 +++++++++--------- 9 files changed, 243 insertions(+), 70 deletions(-) create mode 100644 plugins/common/kafka/scram_client.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index a6ade91a52087..66dc38b43eb08 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -141,6 +141,8 @@ following works: - github.com/wavefronthq/wavefront-sdk-go [Apache License 2.0](https://github.com/wavefrontHQ/wavefront-sdk-go/blob/master/LICENSE) - github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE) - github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) +- github.com/xdg/scram [Apache License 2.0](https://github.com/xdg-go/scram/blob/master/LICENSE) +- github.com/xdg/stringprep [Apache License 2.0](https://github.com/xdg-go/stringprep/blob/master/LICENSE) - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) - go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) - go.starlark.net [BSD 3-Clause "New" or "Revised" License](https://github.com/google/starlark-go/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 92de24ee19120..ecc7f1d9f0bd8 100644 --- a/go.mod +++ b/go.mod @@ -127,6 +127,7 @@ require ( github.com/wavefronthq/wavefront-sdk-go v0.9.2 github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect + github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect go.starlark.net v0.0.0-20200901195727-6e684ef5eeee golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect diff --git a/go.sum b/go.sum index 1297eec30d08f..37d106f3f4e02 100644 --- a/go.sum +++ b/go.sum @@ -592,7 +592,9 @@ github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOF github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= diff --git a/plugins/common/kafka/sasl.go b/plugins/common/kafka/sasl.go index cd3358b3833ec..e565aea5813ce 100644 --- a/plugins/common/kafka/sasl.go +++ b/plugins/common/kafka/sasl.go @@ -6,6 +6,78 @@ import ( "github.com/Shopify/sarama" ) +type SASLAuth struct { + SASLUsername string `toml:"sasl_username"` + SASLPassword string `toml:"sasl_password"` + SASLMechanism string `toml:"sasl_mechanism"` + SASLVersion *int `toml:"sasl_version"` + + // GSSAPI config + SASLGSSAPIServiceName string `toml:"sasl_gssapi_service_name"` + SASLGSSAPIAuthType string `toml:"sasl_gssapi_auth_type"` + SASLGSSAPIDisablePAFXFAST bool `toml:"sasl_gssapi_disable_pafxfast"` + SASLGSSAPIKerberosConfigPath string `toml:"sasl_gssapi_kerberos_config_path"` + SASLGSSAPIKeyTabPath string `toml:"sasl_gssapi_key_tab_path"` + SASLGSSAPIRealm string `toml:"sasl_gssapi_realm"` + + // OAUTHBEARER config. experimental. undoubtedly this is not good enough. + SASLAccessToken string `toml:"sasl_access_token"` +} + +// SetSASLConfig configures SASL for kafka (sarama) +func (k *SASLAuth) SetSASLConfig(config *sarama.Config) error { + config.Net.SASL.User = k.SASLUsername + config.Net.SASL.Password = k.SASLPassword + + if k.SASLMechanism != "" { + config.Net.SASL.Mechanism = sarama.SASLMechanism(k.SASLMechanism) + switch config.Net.SASL.Mechanism { + case sarama.SASLTypeSCRAMSHA256: + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &XDGSCRAMClient{HashGeneratorFcn: SHA256} + } + case sarama.SASLTypeSCRAMSHA512: + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &XDGSCRAMClient{HashGeneratorFcn: SHA512} + } + case sarama.SASLTypeOAuth: + config.Net.SASL.TokenProvider = k // use self as token provider. + case sarama.SASLTypeGSSAPI: + config.Net.SASL.GSSAPI.ServiceName = k.SASLGSSAPIServiceName + config.Net.SASL.GSSAPI.AuthType = gssapiAuthType(k.SASLGSSAPIAuthType) + config.Net.SASL.GSSAPI.Username = k.SASLUsername + config.Net.SASL.GSSAPI.Password = k.SASLPassword + config.Net.SASL.GSSAPI.DisablePAFXFAST = k.SASLGSSAPIDisablePAFXFAST + config.Net.SASL.GSSAPI.KerberosConfigPath = k.SASLGSSAPIKerberosConfigPath + config.Net.SASL.GSSAPI.KeyTabPath = k.SASLGSSAPIKeyTabPath + config.Net.SASL.GSSAPI.Realm = k.SASLGSSAPIRealm + + case sarama.SASLTypePlaintext: + // nothing. + default: + } + } + + if k.SASLUsername != "" || k.SASLMechanism != "" { + config.Net.SASL.Enable = true + + version, err := SASLVersion(config.Version, k.SASLVersion) + if err != nil { + return err + } + config.Net.SASL.Version = version + } + return nil +} + +// Token does nothing smart, it just grabs a hard-coded token from config. +func (k *SASLAuth) Token() (*sarama.AccessToken, error) { + return &sarama.AccessToken{ + Token: k.SASLAccessToken, + Extensions: map[string]string{}, + }, nil +} + func SASLVersion(kafkaVersion sarama.KafkaVersion, saslVersion *int) (int16, error) { if saslVersion == nil { if kafkaVersion.IsAtLeast(sarama.V1_0_0_0) { @@ -23,3 +95,15 @@ func SASLVersion(kafkaVersion sarama.KafkaVersion, saslVersion *int) (int16, err return 0, errors.New("invalid SASL version") } } + +func gssapiAuthType(authType string) int { + switch authType { + case "KRB5_USER_AUTH": + return sarama.KRB5_USER_AUTH + case "KRB5_KEYTAB_AUTH": + return sarama.KRB5_KEYTAB_AUTH + default: + return 0 + } + +} diff --git a/plugins/common/kafka/scram_client.go b/plugins/common/kafka/scram_client.go new file mode 100644 index 0000000000000..f6aa9d6c4e285 --- /dev/null +++ b/plugins/common/kafka/scram_client.go @@ -0,0 +1,36 @@ +package kafka + +import ( + "crypto/sha256" + "crypto/sha512" + "hash" + + "github.com/xdg/scram" +) + +var SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() } +var SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() } + +type XDGSCRAMClient struct { + *scram.Client + *scram.ClientConversation + scram.HashGeneratorFcn +} + +func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { + x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + if err != nil { + return err + } + x.ClientConversation = x.Client.NewConversation() + return nil +} + +func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { + response, err = x.ClientConversation.Step(challenge) + return +} + +func (x *XDGSCRAMClient) Done() bool { + return x.ClientConversation.Done() +} diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index dec39cc32871b..3535f8fce5b5a 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -39,6 +39,23 @@ and use the old zookeeper connection method. # sasl_username = "kafka" # sasl_password = "secret" + ## Optional SASL: + ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI + ## (defaults to PLAIN) + # sasl_mechanism = "" + + ## used if sasl_mechanism is GSSAPI (experimental) + # sasl_gssapi_service_name = "" + # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH + # sasl_gssapi_auth_type = "KRB5_USER_AUTH" + # sasl_gssapi_kerberos_config_path = "/" + # sasl_gssapi_realm = "realm" + # sasl_gssapi_key_tab_path = "" + # sasl_gssapi_disable_pafxfast = false + + ## used if sasl_mechanism is OAUTHBEARER (experimental) + # sasl_access_token = "" + ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 0fd7d3693d48c..a0b4b41cf6167 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -48,6 +48,23 @@ const sampleConfig = ` # sasl_username = "kafka" # sasl_password = "secret" + ## Optional SASL: + ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI + ## (defaults to PLAIN) + # sasl_mechanism = "" + + ## used if sasl_mechanism is GSSAPI (experimental) + # sasl_gssapi_service_name = "" + # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH + # sasl_gssapi_auth_type = "KRB5_USER_AUTH" + # sasl_gssapi_kerberos_config_path = "/" + # sasl_gssapi_realm = "realm" + # sasl_gssapi_key_tab_path = "" + # sasl_gssapi_disable_pafxfast = false + + ## used if sasl_mechanism is OAUTHBEARER (experimental) + # sasl_access_token = "" + ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 @@ -102,9 +119,8 @@ type KafkaConsumer struct { Topics []string `toml:"topics"` TopicTag string `toml:"topic_tag"` Version string `toml:"version"` - SASLPassword string `toml:"sasl_password"` - SASLUsername string `toml:"sasl_username"` - SASLVersion *int `toml:"sasl_version"` + + kafka.SASLAuth EnableTLS *bool `toml:"enable_tls"` tls.ClientConfig @@ -191,16 +207,8 @@ func (k *KafkaConsumer) Init() error { } } - if k.SASLUsername != "" && k.SASLPassword != "" { - config.Net.SASL.User = k.SASLUsername - config.Net.SASL.Password = k.SASLPassword - config.Net.SASL.Enable = true - - version, err := kafka.SASLVersion(config.Version, k.SASLVersion) - if err != nil { - return err - } - config.Net.SASL.Version = version + if err := k.SetSASLConfig(config); err != nil { + return err } if k.ClientID != "" { diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index d1cc9f0cbb18b..8c16ee0541f61 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -111,6 +111,23 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm # sasl_username = "kafka" # sasl_password = "secret" + ## Optional SASL: + ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI + ## (defaults to PLAIN) + # sasl_mechanism = "" + + ## used if sasl_mechanism is GSSAPI (experimental) + # sasl_gssapi_service_name = "" + # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH + # sasl_gssapi_auth_type = "KRB5_USER_AUTH" + # sasl_gssapi_kerberos_config_path = "/" + # sasl_gssapi_realm = "realm" + # sasl_gssapi_key_tab_path = "" + # sasl_gssapi_disable_pafxfast = false + + ## used if sasl_mechanism is OAUTHBEARER (experimental) + # sasl_access_token = "" + ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 26a0c5bdb9a65..5fdfae48d221b 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -24,53 +24,50 @@ var ValidTopicSuffixMethods = []string{ var zeroTime = time.Unix(0, 0) -type ( - Kafka struct { - Brokers []string `toml:"brokers"` - Topic string `toml:"topic"` - TopicTag string `toml:"topic_tag"` - ExcludeTopicTag bool `toml:"exclude_topic_tag"` - ClientID string `toml:"client_id"` - TopicSuffix TopicSuffix `toml:"topic_suffix"` - RoutingTag string `toml:"routing_tag"` - RoutingKey string `toml:"routing_key"` - CompressionCodec int `toml:"compression_codec"` - RequiredAcks int `toml:"required_acks"` - MaxRetry int `toml:"max_retry"` - MaxMessageBytes int `toml:"max_message_bytes"` - - Version string `toml:"version"` - - // Legacy TLS config options - // TLS client certificate - Certificate string - // TLS client key - Key string - // TLS certificate authority - CA string - - EnableTLS *bool `toml:"enable_tls"` - tlsint.ClientConfig - - SASLUsername string `toml:"sasl_username"` - SASLPassword string `toml:"sasl_password"` - SASLVersion *int `toml:"sasl_version"` - - Log telegraf.Logger `toml:"-"` - - tlsConfig tls.Config - - producerFunc func(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) - producer sarama.SyncProducer - - serializer serializers.Serializer - } - TopicSuffix struct { - Method string `toml:"method"` - Keys []string `toml:"keys"` - Separator string `toml:"separator"` - } -) +type Kafka struct { + Brokers []string `toml:"brokers"` + Topic string `toml:"topic"` + TopicTag string `toml:"topic_tag"` + ExcludeTopicTag bool `toml:"exclude_topic_tag"` + ClientID string `toml:"client_id"` + TopicSuffix TopicSuffix `toml:"topic_suffix"` + RoutingTag string `toml:"routing_tag"` + RoutingKey string `toml:"routing_key"` + CompressionCodec int `toml:"compression_codec"` + RequiredAcks int `toml:"required_acks"` + MaxRetry int `toml:"max_retry"` + MaxMessageBytes int `toml:"max_message_bytes"` + + Version string `toml:"version"` + + // Legacy TLS config options + // TLS client certificate + Certificate string + // TLS client key + Key string + // TLS certificate authority + CA string + + EnableTLS *bool `toml:"enable_tls"` + tlsint.ClientConfig + + kafka.SASLAuth + + Log telegraf.Logger `toml:"-"` + + tlsConfig tls.Config + + producerFunc func(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) + producer sarama.SyncProducer + + serializer serializers.Serializer +} + +type TopicSuffix struct { + Method string `toml:"method"` + Keys []string `toml:"keys"` + Separator string `toml:"separator"` +} // DebugLogger logs messages from sarama at the debug level. type DebugLogger struct { @@ -205,6 +202,23 @@ var sampleConfig = ` # sasl_username = "kafka" # sasl_password = "secret" + ## Optional SASL: + ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI + ## (defaults to PLAIN) + # sasl_mechanism = "" + + ## used if sasl_mechanism is GSSAPI (experimental) + # sasl_gssapi_service_name = "" + # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH + # sasl_gssapi_auth_type = "KRB5_USER_AUTH" + # sasl_gssapi_kerberos_config_path = "/" + # sasl_gssapi_realm = "realm" + # sasl_gssapi_key_tab_path = "" + # sasl_gssapi_disable_pafxfast = false + + ## used if sasl_mechanism is OAUTHBEARER (experimental) + # sasl_access_token = "" + ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 @@ -321,16 +335,8 @@ func (k *Kafka) Connect() error { } } - if k.SASLUsername != "" && k.SASLPassword != "" { - config.Net.SASL.User = k.SASLUsername - config.Net.SASL.Password = k.SASLPassword - config.Net.SASL.Enable = true - - version, err := kafka.SASLVersion(config.Version, k.SASLVersion) - if err != nil { - return err - } - config.Net.SASL.Version = version + if err := k.SetSASLConfig(config); err != nil { + return err } producer, err := k.producerFunc(k.Brokers, config) From c5e04325aba1de80e23fef5f1184cf291c7fb86d Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Wed, 28 Oct 2020 19:00:37 +0100 Subject: [PATCH 036/761] SQL Server Azure PerfCounters Fix (#8331) --- plugins/inputs/sqlserver/azuresqlqueries.go | 362 ++++++++++---------- 1 file changed, 182 insertions(+), 180 deletions(-) diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go index 3ea95b956988c..fa4eb197723b1 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqlqueries.go @@ -432,100 +432,101 @@ WITH PerfCounters AS ( END WHERE counter_name IN ( - ''SQL Compilations/sec'' - ,''SQL Re-Compilations/sec'' - ,''User Connections'' - ,''Batch Requests/sec'' - ,''Logouts/sec'' - ,''Logins/sec'' - ,''Processes blocked'' - ,''Latch Waits/sec'' - ,''Full Scans/sec'' - ,''Index Searches/sec'' - ,''Page Splits/sec'' - ,''Page lookups/sec'' - ,''Page reads/sec'' - ,''Page writes/sec'' - ,''Readahead pages/sec'' - ,''Lazy writes/sec'' - ,''Checkpoint pages/sec'' - ,''Page life expectancy'' - ,''Log File(s) Size (KB)'' - ,''Log File(s) Used Size (KB)'' - ,''Data File(s) Size (KB)'' - ,''Transactions/sec'' - ,''Write Transactions/sec'' - ,''Active Temp Tables'' - ,''Temp Tables Creation Rate'' - ,''Temp Tables For Destruction'' - ,''Free Space in tempdb (KB)'' - ,''Version Store Size (KB)'' - ,''Memory Grants Pending'' - ,''Memory Grants Outstanding'' - ,''Free list stalls/sec'' - ,''Buffer cache hit ratio'' - ,''Buffer cache hit ratio base'' - ,''Backup/Restore Throughput/sec'' - ,''Total Server Memory (KB)'' - ,''Target Server Memory (KB)'' - ,''Log Flushes/sec'' - ,''Log Flush Wait Time'' - ,''Memory broker clerk size'' - ,''Log Bytes Flushed/sec'' - ,''Bytes Sent to Replica/sec'' - ,''Log Send Queue'' - ,''Bytes Sent to Transport/sec'' - ,''Sends to Replica/sec'' - ,''Bytes Sent to Transport/sec'' - ,''Sends to Transport/sec'' - ,''Bytes Received from Replica/sec'' - ,''Receives from Replica/sec'' - ,''Flow Control Time (ms/sec)'' - ,''Flow Control/sec'' - ,''Resent Messages/sec'' - ,''Redone Bytes/sec'' - ,''XTP Memory Used (KB)'' - ,''Transaction Delay'' - ,''Log Bytes Received/sec'' - ,''Log Apply Pending Queue'' - ,''Redone Bytes/sec'' - ,''Recovery Queue'' - ,''Log Apply Ready Queue'' - ,''CPU usage %'' - ,''CPU usage % base'' - ,''Queued requests'' - ,''Requests completed/sec'' - ,''Blocked tasks'' - ,''Active memory grant amount (KB)'' - ,''Disk Read Bytes/sec'' - ,''Disk Read IO Throttled/sec'' - ,''Disk Read IO/sec'' - ,''Disk Write Bytes/sec'' - ,''Disk Write IO Throttled/sec'' - ,''Disk Write IO/sec'' - ,''Used memory (KB)'' - ,''Forwarded Records/sec'' - ,''Background Writer pages/sec'' - ,''Percent Log Used'' - ,''Log Send Queue KB'' - ,''Redo Queue KB'' - ,''Mirrored Write Transactions/sec'' - ,''Group Commit Time'' - ,''Group Commits/Sec'' + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Temp Tables' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' ) OR ( - spi.[object_name] LIKE ''%User Settable%'' - OR spi.[object_name] LIKE ''%SQL Errors%'' - OR spi.[object_name] LIKE ''%Batch Resp Statistics%'' + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' ) OR ( - spi.[instance_name] IN (''_Total'') + spi.[instance_name] IN ('_Total') AND spi.[counter_name] IN ( - ''Lock Timeouts/sec'' - ,''Lock Timeouts (timeout > 0)/sec'' - ,''Number of Deadlocks/sec'' - ,''Lock Waits/sec'' - ,''Latch Waits/sec'' + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' ) ) +) INSERT INTO @PCounters select * from PerfCounters @@ -539,7 +540,7 @@ SELECT WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance] - ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value], + ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] from @PCounters pc LEFT OUTER JOIN @PCounters AS pc1 @@ -954,100 +955,101 @@ WITH PerfCounters AS ( END WHERE counter_name IN ( - ''SQL Compilations/sec'' - ,''SQL Re-Compilations/sec'' - ,''User Connections'' - ,''Batch Requests/sec'' - ,''Logouts/sec'' - ,''Logins/sec'' - ,''Processes blocked'' - ,''Latch Waits/sec'' - ,''Full Scans/sec'' - ,''Index Searches/sec'' - ,''Page Splits/sec'' - ,''Page lookups/sec'' - ,''Page reads/sec'' - ,''Page writes/sec'' - ,''Readahead pages/sec'' - ,''Lazy writes/sec'' - ,''Checkpoint pages/sec'' - ,''Page life expectancy'' - ,''Log File(s) Size (KB)'' - ,''Log File(s) Used Size (KB)'' - ,''Data File(s) Size (KB)'' - ,''Transactions/sec'' - ,''Write Transactions/sec'' - ,''Active Temp Tables'' - ,''Temp Tables Creation Rate'' - ,''Temp Tables For Destruction'' - ,''Free Space in tempdb (KB)'' - ,''Version Store Size (KB)'' - ,''Memory Grants Pending'' - ,''Memory Grants Outstanding'' - ,''Free list stalls/sec'' - ,''Buffer cache hit ratio'' - ,''Buffer cache hit ratio base'' - ,''Backup/Restore Throughput/sec'' - ,''Total Server Memory (KB)'' - ,''Target Server Memory (KB)'' - ,''Log Flushes/sec'' - ,''Log Flush Wait Time'' - ,''Memory broker clerk size'' - ,''Log Bytes Flushed/sec'' - ,''Bytes Sent to Replica/sec'' - ,''Log Send Queue'' - ,''Bytes Sent to Transport/sec'' - ,''Sends to Replica/sec'' - ,''Bytes Sent to Transport/sec'' - ,''Sends to Transport/sec'' - ,''Bytes Received from Replica/sec'' - ,''Receives from Replica/sec'' - ,''Flow Control Time (ms/sec)'' - ,''Flow Control/sec'' - ,''Resent Messages/sec'' - ,''Redone Bytes/sec'' - ,''XTP Memory Used (KB)'' - ,''Transaction Delay'' - ,''Log Bytes Received/sec'' - ,''Log Apply Pending Queue'' - ,''Redone Bytes/sec'' - ,''Recovery Queue'' - ,''Log Apply Ready Queue'' - ,''CPU usage %'' - ,''CPU usage % base'' - ,''Queued requests'' - ,''Requests completed/sec'' - ,''Blocked tasks'' - ,''Active memory grant amount (KB)'' - ,''Disk Read Bytes/sec'' - ,''Disk Read IO Throttled/sec'' - ,''Disk Read IO/sec'' - ,''Disk Write Bytes/sec'' - ,''Disk Write IO Throttled/sec'' - ,''Disk Write IO/sec'' - ,''Used memory (KB)'' - ,''Forwarded Records/sec'' - ,''Background Writer pages/sec'' - ,''Percent Log Used'' - ,''Log Send Queue KB'' - ,''Redo Queue KB'' - ,''Mirrored Write Transactions/sec'' - ,''Group Commit Time'' - ,''Group Commits/Sec'' + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Temp Tables' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' ) OR ( - spi.[object_name] LIKE ''%User Settable%'' - OR spi.[object_name] LIKE ''%SQL Errors%'' - OR spi.[object_name] LIKE ''%Batch Resp Statistics%'' + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' ) OR ( - spi.[instance_name] IN (''_Total'') + spi.[instance_name] IN ('_Total') AND spi.[counter_name] IN ( - ''Lock Timeouts/sec'' - ,''Lock Timeouts (timeout > 0)/sec'' - ,''Number of Deadlocks/sec'' - ,''Lock Waits/sec'' - ,''Latch Waits/sec'' + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' ) ) +) INSERT INTO @PCounters select * from PerfCounters @@ -1060,7 +1062,7 @@ SELECT WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance] - ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value], + ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] from @PCounters pc LEFT OUTER JOIN @PCounters AS pc1 From 5f5e87b596b772e7a3c8cfe2340e23587fc2d4ab Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 28 Oct 2020 18:35:44 -0400 Subject: [PATCH 037/761] Update changelog (cherry picked from commit 2fcd7e56185d3c97020f3a661d38071b5a9ff1f2) --- CHANGELOG.md | 18 ++++++++++++++++++ build_version.txt | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2acd1e1d8566b..bb13db8412f22 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,22 @@ ## v1.16.0 [unreleased] +## v1.16.1 [2020-10-28] + +#### Release Notes + + - [#8318](https://github.com/influxdata/telegraf/pull/8318) `common.kafka` kafka sasl-mechanism auth support for SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI + +#### Bugfixes + + - [#8331](https://github.com/influxdata/telegraf/pull/8331) `inputs.sqlserver` SQL Server Azure PerfCounters Fix + - [#8325](https://github.com/influxdata/telegraf/pull/8325) `inputs.sqlserver` SQL Server - PerformanceCounters - removed synthetic counters + - [#8324](https://github.com/influxdata/telegraf/pull/8324) `inputs.sqlserver` SQL Server - server_properties added sql_version_desc + - [#8317](https://github.com/influxdata/telegraf/pull/8317) `inputs.ras` Disable RAS input plugin on specific Linux architectures: mips64, mips64le, ppc64le, riscv64 + - [#8309](https://github.com/influxdata/telegraf/pull/8309) `inputs.processes` processes: fix issue with stat no such file/dir + - [#8308](https://github.com/influxdata/telegraf/pull/8308) `inputs.win_perf_counters` fix issue with PDH_CALC_NEGATIVE_DENOMINATOR error + - [#8306](https://github.com/influxdata/telegraf/pull/8306) `inputs.ras` RAS plugin - fix for too many open files handlers + + +## v1.16.0 [2020-10-21] #### Release Notes diff --git a/build_version.txt b/build_version.txt index 092afa15df4df..41c11ffb730cf 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.17.0 +1.16.1 From a49e37a2a608abdd8654d803ef4d3c61327714e4 Mon Sep 17 00:00:00 2001 From: Thomas Schuetz <38893055+thschue@users.noreply.github.com> Date: Thu, 29 Oct 2020 16:04:11 +0100 Subject: [PATCH 038/761] #8328 Fixed a bug with the state map in Dynatrace Plugin (#8329) --- plugins/outputs/dynatrace/dynatrace.go | 32 ++++++++++++++------------ 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 596366ae8470f..522512f7f6c42 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -28,16 +28,15 @@ var ( maxMetricKeyLen = 250 ) -var counts map[string]string -var sent = 0 - // Dynatrace Configuration for the Dynatrace output plugin type Dynatrace struct { - URL string `toml:"url"` - APIToken string `toml:"api_token"` - Prefix string `toml:"prefix"` - Log telegraf.Logger `toml:"-"` - Timeout internal.Duration `toml:"timeout"` + URL string `toml:"url"` + APIToken string `toml:"api_token"` + Prefix string `toml:"prefix"` + Log telegraf.Logger `toml:"-"` + Timeout internal.Duration `toml:"timeout"` + State map[string]string + SendCounter int tls.ClientConfig @@ -117,6 +116,8 @@ func (d *Dynatrace) normalize(s string, max int) (string, error) { normalizedString = normalizedString[:len(normalizedString)-1] } + normalizedString = strings.ReplaceAll(normalizedString, "..", "_") + if len(normalizedString) == 0 { return "", fmt.Errorf("error normalizing the string: %s", s) } @@ -198,7 +199,7 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { var delta float64 = 0 // Check if LastValue exists - if lastvalue, ok := counts[metricID+tagb.String()]; ok { + if lastvalue, ok := d.State[metricID+tagb.String()]; ok { // Convert Strings to Floats floatLastValue, err := strconv.ParseFloat(lastvalue, 32) if err != nil { @@ -213,7 +214,7 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { fmt.Fprintf(&buf, "%s%s count,delta=%f\n", metricID, tagb.String(), delta) } } - counts[metricID+tagb.String()] = value + d.State[metricID+tagb.String()] = value default: fmt.Fprintf(&buf, "%s%s %v\n", metricID, tagb.String(), value) @@ -221,11 +222,11 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { } } } - sent++ + d.SendCounter++ // in typical interval of 10s, we will clean the counter state once in 24h which is 8640 iterations - if sent%8640 == 0 { - counts = make(map[string]string) + if d.SendCounter%8640 == 0 { + d.State = make(map[string]string) } return d.send(buf.Bytes()) } @@ -269,7 +270,7 @@ func (d *Dynatrace) send(msg []byte) error { } func (d *Dynatrace) Init() error { - counts = make(map[string]string) + d.State = make(map[string]string) if len(d.URL) == 0 { d.Log.Infof("Dynatrace URL is empty, defaulting to OneAgent metrics interface") d.URL = oneAgentMetricsUrl @@ -297,7 +298,8 @@ func (d *Dynatrace) Init() error { func init() { outputs.Add("dynatrace", func() telegraf.Output { return &Dynatrace{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: internal.Duration{Duration: time.Second * 5}, + SendCounter: 0, } }) } From 2f2e3d750c022761c25bc44c442bce0909c0c276 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 30 Oct 2020 10:06:47 -0400 Subject: [PATCH 039/761] fix changelog --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bb13db8412f22..23c7d2d063743 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,3 @@ -## v1.16.0 [unreleased] ## v1.16.1 [2020-10-28] #### Release Notes From 9be445c9580d8f8c0bd293800350d445837d8814 Mon Sep 17 00:00:00 2001 From: Konstantin Kulikov Date: Mon, 2 Nov 2020 18:17:54 +0300 Subject: [PATCH 040/761] fix plugins/input/ras test (#8350) On some systems /tmp/test.db happens to exist which makes this test fail. --- plugins/inputs/ras/ras_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/ras/ras_test.go b/plugins/inputs/ras/ras_test.go index b8b70d55d66d7..a90258bb4423b 100644 --- a/plugins/inputs/ras/ras_test.go +++ b/plugins/inputs/ras/ras_test.go @@ -114,7 +114,7 @@ func TestMultipleSockets(t *testing.T) { func TestMissingDatabase(t *testing.T) { var acc testutil.Accumulator ras := newRas() - ras.DBPath = "/tmp/test.db" + ras.DBPath = "/nonexistent/ras.db" err := ras.Start(&acc) assert.Error(t, err) } From 73b2f988c4475ae130f1d9436954c2abd3358545 Mon Sep 17 00:00:00 2001 From: reimda Date: Mon, 2 Nov 2020 09:07:37 -0700 Subject: [PATCH 041/761] Update jwt-go module to address CVE-2020-26160 (#8337) --- go.mod | 2 +- go.sum | 2 ++ plugins/inputs/dcos/client.go | 4 ++-- plugins/inputs/dcos/client_test.go | 2 +- plugins/inputs/dcos/dcos.go | 2 +- scripts/check-deps.sh | 6 ++++++ 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index ecc7f1d9f0bd8..81464d821d18e 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 - github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.0 github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible // indirect github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible diff --git a/go.sum b/go.sum index 37d106f3f4e02..866e6d15d95a1 100644 --- a/go.sum +++ b/go.sum @@ -162,6 +162,8 @@ github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible h1:357nGVUC8gSpeSc2Axup8HfrfTLLUfWfCsCUhiQSKIg= diff --git a/plugins/inputs/dcos/client.go b/plugins/inputs/dcos/client.go index c7561ee359d5a..8f171638a5844 100644 --- a/plugins/inputs/dcos/client.go +++ b/plugins/inputs/dcos/client.go @@ -10,7 +10,7 @@ import ( "net/url" "time" - jwt "github.com/dgrijalva/jwt-go" + jwt "github.com/dgrijalva/jwt-go/v4" ) const ( @@ -330,7 +330,7 @@ func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) { UID: sa.AccountID, StandardClaims: jwt.StandardClaims{ // How long we have to login with this token - ExpiresAt: time.Now().Add(5 * time.Minute).Unix(), + ExpiresAt: jwt.At(time.Now().Add(5 * time.Minute)), }, }) return token.SignedString(sa.PrivateKey) diff --git a/plugins/inputs/dcos/client_test.go b/plugins/inputs/dcos/client_test.go index 7d154a43e09f1..0b7772dccb994 100644 --- a/plugins/inputs/dcos/client_test.go +++ b/plugins/inputs/dcos/client_test.go @@ -8,7 +8,7 @@ import ( "net/url" "testing" - jwt "github.com/dgrijalva/jwt-go" + jwt "github.com/dgrijalva/jwt-go/v4" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index d74c792494378..1cdd40f1112fc 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -9,7 +9,7 @@ import ( "sync" "time" - jwt "github.com/dgrijalva/jwt-go" + jwt "github.com/dgrijalva/jwt-go/v4" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" diff --git a/scripts/check-deps.sh b/scripts/check-deps.sh index b76d47d579004..c52c553f5d1ba 100755 --- a/scripts/check-deps.sh +++ b/scripts/check-deps.sh @@ -51,6 +51,12 @@ for dep in $(LC_ALL=C sort -u "${tmpdir}/golist"); do echo "${dep}" >> "${tmpdir}/HEAD" done +# If there are two versions of a library that have the same base (like +# github.com/foo/bar github.com/foo/bar/v3) there will be a duplicate +# in the list. Remove duplicates again. +mv "${tmpdir}/HEAD" "${tmpdir}/HEAD-dup" +uniq "${tmpdir}/HEAD-dup" > "${tmpdir}/HEAD" + grep '^-' docs/LICENSE_OF_DEPENDENCIES.md | grep -v github.com/DataDog/datadog-agent | cut -f 2 -d' ' > "${tmpdir}/LICENSE_OF_DEPENDENCIES.md" diff -U0 "${tmpdir}/LICENSE_OF_DEPENDENCIES.md" "${tmpdir}/HEAD" || { From 68a4f18e3d52dbb657c3b042d89e885d04c9200c Mon Sep 17 00:00:00 2001 From: Roman Dodin Date: Mon, 2 Nov 2020 17:11:28 +0100 Subject: [PATCH 042/761] added support for bytes encoding (#7938) --- plugins/inputs/gnmi/README.md | 2 +- plugins/inputs/gnmi/gnmi.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/gnmi/README.md b/plugins/inputs/gnmi/README.md index 7387e566dd21b..aa940f76d4e14 100644 --- a/plugins/inputs/gnmi/README.md +++ b/plugins/inputs/gnmi/README.md @@ -17,7 +17,7 @@ It has been optimized to support gNMI telemetry as produced by Cisco IOS XR (64- username = "cisco" password = "cisco" - ## gNMI encoding requested (one of: "proto", "json", "json_ietf") + ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") # encoding = "proto" ## redial in case of failures after diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index 3c5826ba40033..09332cc29f532 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -179,7 +179,7 @@ func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { return nil, err } - if c.Encoding != "proto" && c.Encoding != "json" && c.Encoding != "json_ietf" { + if c.Encoding != "proto" && c.Encoding != "json" && c.Encoding != "json_ietf" && c.Encoding != "bytes" { return nil, fmt.Errorf("unsupported encoding %s", c.Encoding) } @@ -486,7 +486,7 @@ const sampleConfig = ` username = "cisco" password = "cisco" - ## gNMI encoding requested (one of: "proto", "json", "json_ietf") + ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") # encoding = "proto" ## redial in case of failures after From 38796f035bb1fc4b106e1032eb4fc459f1386012 Mon Sep 17 00:00:00 2001 From: peter-volkov Date: Mon, 2 Nov 2020 19:11:47 +0300 Subject: [PATCH 043/761] #8295 Initial Yandex.Cloud monitoring (#8296) --- README.md | 1 + plugins/outputs/all/all.go | 1 + .../outputs/yandex_cloud_monitoring/README.md | 26 ++ .../yandex_cloud_monitoring.go | 259 ++++++++++++++++++ .../yandex_cloud_monitoring_test.go | 96 +++++++ 5 files changed, 383 insertions(+) create mode 100644 plugins/outputs/yandex_cloud_monitoring/README.md create mode 100644 plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go create mode 100644 plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go diff --git a/README.md b/README.md index acaf9c1818cba..ca969132d5eb2 100644 --- a/README.md +++ b/README.md @@ -447,3 +447,4 @@ For documentation on the latest development code see the [documentation index][d * [warp10](./plugins/outputs/warp10) * [wavefront](./plugins/outputs/wavefront) * [sumologic](./plugins/outputs/sumologic) +* [yandex_cloud_monitoring](./plugins/outputs/yandex_cloud_monitoring) diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 9d89976dd6cca..a5f8438670093 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -41,4 +41,5 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/timestream" _ "github.com/influxdata/telegraf/plugins/outputs/warp10" _ "github.com/influxdata/telegraf/plugins/outputs/wavefront" + _ "github.com/influxdata/telegraf/plugins/outputs/yandex_cloud_monitoring" ) diff --git a/plugins/outputs/yandex_cloud_monitoring/README.md b/plugins/outputs/yandex_cloud_monitoring/README.md new file mode 100644 index 0000000000000..3bace22b4adb2 --- /dev/null +++ b/plugins/outputs/yandex_cloud_monitoring/README.md @@ -0,0 +1,26 @@ +# Yandex Cloud Monitoring + +This plugin will send custom metrics to Yandex Cloud Monitoring. +https://cloud.yandex.com/services/monitoring + +### Configuration: + +```toml +[[outputs.yandex_cloud_monitoring]] + ## Timeout for HTTP writes. + # timeout = "20s" + + ## Yandex.Cloud monitoring API endpoint. Normally should not be changed + # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" + + ## All user metrics should be sent with "custom" service specified. Normally should not be changed + # service = "custom" +``` + +### Authentication + +This plugin currently support only YC.Compute metadata based authentication. + +When plugin is working inside a YC.Compute instance it will take IAM token and Folder ID from instance metadata. + +Other authentication methods will be added later. diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go new file mode 100644 index 0000000000000..36fd4ab0bef9f --- /dev/null +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go @@ -0,0 +1,259 @@ +package yandex_cloud_monitoring + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/selfstat" +) + +// YandexCloudMonitoring allows publishing of metrics to the Yandex Cloud Monitoring custom metrics +// service +type YandexCloudMonitoring struct { + Timeout internal.Duration `toml:"timeout"` + EndpointUrl string `toml:"endpoint_url"` + Service string `toml:"service"` + + Log telegraf.Logger + + MetadataTokenURL string + MetadataFolderURL string + FolderID string + IAMToken string + IamTokenExpirationTime time.Time + + client *http.Client + + timeFunc func() time.Time + + MetricOutsideWindow selfstat.Stat +} + +type yandexCloudMonitoringMessage struct { + TS string `json:"ts,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Metrics []yandexCloudMonitoringMetric `json:"metrics"` +} + +type yandexCloudMonitoringMetric struct { + Name string `json:"name"` + Labels map[string]string `json:"labels"` + MetricType string `json:"type,omitempty"` // DGAUGE|IGAUGE|COUNTER|RATE. Default: DGAUGE + TS string `json:"ts,omitempty"` + Value float64 `json:"value"` +} + +type MetadataIamToken struct { + AccessToken string `json:"access_token"` + ExpiresIn int64 `json:"expires_in"` + TokenType string `json:"token_type"` +} + +const ( + defaultRequestTimeout = time.Second * 20 + defaultEndpointUrl = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" + defaultMetadataTokenUrl = "http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token" + defaultMetadataFolderUrl = "http://169.254.169.254/computeMetadata/v1/instance/attributes/folder-id" +) + +var sampleConfig = ` + ## Timeout for HTTP writes. + # timeout = "20s" + + ## Yandex.Cloud monitoring API endpoint. Normally should not be changed + # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" + + ## All user metrics should be sent with "custom" service specified. Normally should not be changed + # service = "custom" +` + +// Description provides a description of the plugin +func (a *YandexCloudMonitoring) Description() string { + return "Send aggregated metrics to Yandex.Cloud Monitoring" +} + +// SampleConfig provides a sample configuration for the plugin +func (a *YandexCloudMonitoring) SampleConfig() string { + return sampleConfig +} + +// Connect initializes the plugin and validates connectivity +func (a *YandexCloudMonitoring) Connect() error { + if a.Timeout.Duration <= 0 { + a.Timeout.Duration = defaultRequestTimeout + } + if a.EndpointUrl == "" { + a.EndpointUrl = defaultEndpointUrl + } + if a.Service == "" { + a.Service = "custom" + } + if a.MetadataTokenURL == "" { + a.MetadataTokenURL = defaultMetadataTokenUrl + } + if a.MetadataFolderURL == "" { + a.MetadataFolderURL = defaultMetadataFolderUrl + } + + a.client = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, + Timeout: a.Timeout.Duration, + } + + var err error + a.FolderID, err = a.getFolderIDFromMetadata() + if err != nil { + return err + } + + a.Log.Infof("Writing to Yandex.Cloud Monitoring URL: %s", a.EndpointUrl) + + tags := map[string]string{} + a.MetricOutsideWindow = selfstat.Register("yandex_cloud_monitoring", "metric_outside_window", tags) + + return nil +} + +// Close shuts down an any active connections +func (a *YandexCloudMonitoring) Close() error { + a.client = nil + return nil +} + +// Write writes metrics to the remote endpoint +func (a *YandexCloudMonitoring) Write(metrics []telegraf.Metric) error { + var yandexCloudMonitoringMetrics []yandexCloudMonitoringMetric + for _, m := range metrics { + for _, field := range m.FieldList() { + yandexCloudMonitoringMetrics = append( + yandexCloudMonitoringMetrics, + yandexCloudMonitoringMetric{ + Name: field.Key, + Labels: m.Tags(), + TS: fmt.Sprint(m.Time().Format(time.RFC3339)), + Value: field.Value.(float64), + }, + ) + } + } + + var body []byte + jsonBytes, err := json.Marshal( + yandexCloudMonitoringMessage{ + Metrics: yandexCloudMonitoringMetrics, + }, + ) + + if err != nil { + return err + } + body = append(body, jsonBytes...) + body = append(jsonBytes, '\n') + return a.send(body) +} + +func getResponseFromMetadata(c *http.Client, metadataUrl string) ([]byte, error) { + req, err := http.NewRequest("GET", metadataUrl, nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + req.Header.Set("Metadata-Flavor", "Google") + resp, err := c.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if resp.StatusCode >= 300 || resp.StatusCode < 200 { + return nil, fmt.Errorf("unable to fetch instance metadata: [%s] %d", + metadataUrl, resp.StatusCode) + } + return body, nil +} + +func (a *YandexCloudMonitoring) getFolderIDFromMetadata() (string, error) { + a.Log.Infof("getting folder ID in %s", a.MetadataFolderURL) + body, err := getResponseFromMetadata(a.client, a.MetadataFolderURL) + if err != nil { + return "", err + } + folderID := string(body) + if folderID == "" { + return "", fmt.Errorf("unable to fetch folder id from URL %s: %v", a.MetadataFolderURL, err) + } + return folderID, nil +} + +func (a *YandexCloudMonitoring) getIAMTokenFromMetadata() (string, int, error) { + a.Log.Debugf("getting new IAM token in %s", a.MetadataTokenURL) + body, err := getResponseFromMetadata(a.client, a.MetadataTokenURL) + if err != nil { + return "", 0, err + } + var metadata MetadataIamToken + if err := json.Unmarshal(body, &metadata); err != nil { + return "", 0, err + } + if metadata.AccessToken == "" || metadata.ExpiresIn == 0 { + return "", 0, fmt.Errorf("unable to fetch authentication credentials %s: %v", a.MetadataTokenURL, err) + } + return metadata.AccessToken, int(metadata.ExpiresIn), nil +} + +func (a *YandexCloudMonitoring) send(body []byte) error { + req, err := http.NewRequest("POST", a.EndpointUrl, bytes.NewBuffer(body)) + if err != nil { + return err + } + q := req.URL.Query() + q.Add("folderId", a.FolderID) + q.Add("service", a.Service) + req.URL.RawQuery = q.Encode() + + req.Header.Set("Content-Type", "application/json") + isTokenExpired := !a.IamTokenExpirationTime.After(time.Now()) + if a.IAMToken == "" || isTokenExpired { + token, expiresIn, err := a.getIAMTokenFromMetadata() + if err != nil { + return err + } + a.IamTokenExpirationTime = time.Now().Add(time.Duration(expiresIn) * time.Second) + a.IAMToken = token + } + req.Header.Set("Authorization", "Bearer "+a.IAMToken) + + a.Log.Debugf("sending metrics to %s", req.URL.String()) + resp, err := a.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = ioutil.ReadAll(resp.Body) + if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 { + return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status) + } + + return nil +} + +func init() { + outputs.Add("yandex_cloud_monitoring", func() telegraf.Output { + return &YandexCloudMonitoring{ + timeFunc: time.Now, + } + }) +} diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go new file mode 100644 index 0000000000000..edd2960bf0cff --- /dev/null +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go @@ -0,0 +1,96 @@ +package yandex_cloud_monitoring + +import ( + "encoding/json" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" +) + +func TestWrite(t *testing.T) { + readBody := func(r *http.Request) (yandexCloudMonitoringMessage, error) { + decoder := json.NewDecoder(r.Body) + var message yandexCloudMonitoringMessage + err := decoder.Decode(&message) + require.NoError(t, err) + return message, nil + } + + testMetadataHttpServer := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, "/token") { + token := MetadataIamToken{ + AccessToken: "token1", + ExpiresIn: 123, + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + err := json.NewEncoder(w).Encode(token) + require.NoError(t, err) + } else if strings.HasSuffix(r.URL.Path, "/folder") { + _, err := io.WriteString(w, "folder1") + require.NoError(t, err) + } + w.WriteHeader(http.StatusOK) + }), + ) + defer testMetadataHttpServer.Close() + metadataTokenUrl := "http://" + testMetadataHttpServer.Listener.Addr().String() + "/token" + metadataFolderUrl := "http://" + testMetadataHttpServer.Listener.Addr().String() + "/folder" + + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + url := "http://" + ts.Listener.Addr().String() + "/metrics" + + tests := []struct { + name string + plugin *YandexCloudMonitoring + metrics []telegraf.Metric + handler func(t *testing.T, w http.ResponseWriter, r *http.Request) + }{ + { + name: "metric is converted to json value", + plugin: &YandexCloudMonitoring{}, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cluster", + map[string]string{}, + map[string]interface{}{ + "cpu": 42.0, + }, + time.Unix(0, 0), + ), + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + message, err := readBody(r) + require.NoError(t, err) + require.Len(t, message.Metrics, 1) + require.Equal(t, "cpu", message.Metrics[0].Name) + require.Equal(t, 42.0, message.Metrics[0].Value) + w.WriteHeader(http.StatusOK) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tt.handler(t, w, r) + }) + tt.plugin.Log = testutil.Logger{} + tt.plugin.EndpointUrl = url + tt.plugin.MetadataTokenURL = metadataTokenUrl + tt.plugin.MetadataFolderURL = metadataFolderUrl + err := tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write(tt.metrics) + + require.NoError(t, err) + }) + } +} From 748af7f5d17efdd21e8b9fa42e5d4beb3055f03b Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Mon, 2 Nov 2020 23:12:48 -0500 Subject: [PATCH 044/761] [output.wavefront] Introduced "immediate_flush" flag (#8165) --- plugins/outputs/wavefront/README.md | 6 ++++++ plugins/outputs/wavefront/wavefront.go | 20 ++++++++++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/wavefront/README.md b/plugins/outputs/wavefront/README.md index 2daca328cd577..8439295bbe029 100644 --- a/plugins/outputs/wavefront/README.md +++ b/plugins/outputs/wavefront/README.md @@ -49,6 +49,12 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. #truncate_tags = false + + ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics + ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending + ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in + ## Telegraf. + #immediate_flush = true ``` diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 523549fb127e2..6ba82ce5ce5db 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -25,6 +25,7 @@ type Wavefront struct { UseRegex bool UseStrict bool TruncateTags bool + ImmediateFlush bool SourceOverride []string StringToNumber map[string][]map[string]float64 @@ -101,6 +102,12 @@ var sampleConfig = ` ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. #truncate_tags = false + ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics + ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending + ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in + ## Telegraf. + #immediate_flush = true + ## Define a mapping, namespaced by metric prefix, from string values to numeric values ## deprecated in 1.9; use the enum processor plugin #[[outputs.wavefront.string_to_number.elasticsearch]] @@ -123,12 +130,16 @@ func (w *Wavefront) Connect() error { w.Log.Warn("The string_to_number option is deprecated; please use the enum processor instead") } + flushSeconds := 5 + if w.ImmediateFlush { + flushSeconds = 86400 // Set a very long flush interval if we're flushing directly + } if w.Url != "" { w.Log.Debug("connecting over http/https using Url: %s", w.Url) sender, err := wavefront.NewDirectSender(&wavefront.DirectConfiguration{ Server: w.Url, Token: w.Token, - FlushIntervalSeconds: 5, + FlushIntervalSeconds: flushSeconds, }) if err != nil { return fmt.Errorf("Wavefront: Could not create Wavefront Sender for Url: %s", w.Url) @@ -139,7 +150,7 @@ func (w *Wavefront) Connect() error { sender, err := wavefront.NewProxySender(&wavefront.ProxyConfiguration{ Host: w.Host, MetricsPort: w.Port, - FlushIntervalSeconds: 5, + FlushIntervalSeconds: flushSeconds, }) if err != nil { return fmt.Errorf("Wavefront: Could not create Wavefront Sender for Host: %q and Port: %d", w.Host, w.Port) @@ -166,6 +177,10 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error { } } } + if w.ImmediateFlush { + w.Log.Debugf("Flushing batch of %d points", len(metrics)) + return w.sender.Flush() + } return nil } @@ -336,6 +351,7 @@ func init() { ConvertPaths: true, ConvertBool: true, TruncateTags: false, + ImmediateFlush: true, } }) } From 1939e58b68662a884e14e82b468259c1b123e35c Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Tue, 3 Nov 2020 17:04:24 +0100 Subject: [PATCH 045/761] On-prem fix for #8324 (#8356) --- plugins/inputs/sqlserver/sqlserverqueries.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index f3d3aa3ca34c9..3fdbd5d34ab69 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -216,7 +216,7 @@ SELECT ,CAST(SERVERPROPERTY(''EngineEdition'') AS int) AS [engine_edition] ,DATEDIFF(MINUTE,si.[sqlserver_start_time],GETDATE()) AS [uptime] ,SERVERPROPERTY(''ProductVersion'') AS [sql_version] - ,LEFT(@@VERSION,CHARINDEX(' - ',@@VERSION)) AS [sql_version_desc] + ,LEFT(@@VERSION,CHARINDEX('' - '',@@VERSION)) AS [sql_version_desc] ,dbs.[db_online] ,dbs.[db_restoring] ,dbs.[db_recovering] From 89919631c5abb555ed2d70ed9ab6947b478b1f6b Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Tue, 3 Nov 2020 13:09:37 -0500 Subject: [PATCH 046/761] Prydin issue 8169 (#8357) --- plugins/inputs/vsphere/endpoint.go | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 6d77cb69dddca..49c875d93268b 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -37,6 +37,8 @@ const maxSampleConst = 10 // Absolute maximum number of samples regardless of pe const maxMetadataSamples = 100 // Number of resources to sample for metric metadata +const maxRealtimeMetrics = 50000 // Absolute maximum metrics per realtime query + const hwMarkTTL = time.Duration(4 * time.Hour) type queryChunk []types.PerfQuerySpec @@ -322,13 +324,13 @@ func (e *Endpoint) reloadMetricNameMap(ctx context.Context) error { return err } - mn, err := client.CounterInfoByName(ctx) + mn, err := client.CounterInfoByKey(ctx) if err != nil { return err } e.metricNameLookup = make(map[int32]string) - for name, m := range mn { - e.metricNameLookup[m.Key] = name + for key, m := range mn { + e.metricNameLookup[key] = m.Name() } return nil } @@ -889,6 +891,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim } pqs := make(queryChunk, 0, e.Parent.MaxQueryObjects) + numQs := 0 for _, object := range res.objects { timeBuckets := make(map[int64]*types.PerfQuerySpec, 0) @@ -924,9 +927,9 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim // Add this metric to the bucket bucket.MetricId = append(bucket.MetricId, metric) - // Bucket filled to capacity? (Only applies to non real time) + // Bucket filled to capacity? // OR if we're past the absolute maximum limit - if (!res.realTime && len(bucket.MetricId) >= maxMetrics) || len(bucket.MetricId) > 100000 { + if (!res.realTime && len(bucket.MetricId) >= maxMetrics) || len(bucket.MetricId) > maxRealtimeMetrics { e.log.Debugf("Submitting partial query: %d metrics (%d remaining) of type %s for %s. Total objects %d", len(bucket.MetricId), len(res.metrics)-metricIdx, res.name, e.URL.Host, len(res.objects)) @@ -943,16 +946,18 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim // Handle data in time bucket and submit job if we've reached the maximum number of object. for _, bucket := range timeBuckets { pqs = append(pqs, *bucket) - if (!res.realTime && len(pqs) > e.Parent.MaxQueryObjects) || len(pqs) > 100000 { - e.log.Debugf("Submitting final bucket job for %s: %d metrics", res.name, len(bucket.MetricId)) + numQs += len(bucket.MetricId) + if (!res.realTime && numQs > e.Parent.MaxQueryObjects) || numQs > maxRealtimeMetrics { + e.log.Debugf("Submitting final bucket job for %s: %d metrics", res.name, numQs) submitChunkJob(ctx, te, job, pqs) pqs = make(queryChunk, 0, e.Parent.MaxQueryObjects) + numQs = 0 } } } // Submit any jobs left in the queue if len(pqs) > 0 { - e.log.Debugf("Submitting job for %s: %d objects", res.name, len(pqs)) + e.log.Debugf("Submitting job for %s: %d objects, %d metrics", res.name, len(pqs), numQs) submitChunkJob(ctx, te, job, pqs) } From f8daed059757b42bda7b173328327f8925d08cc0 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 3 Nov 2020 15:34:23 -0500 Subject: [PATCH 047/761] misc tests --- plugins/inputs/mysql/mysql_test.go | 2 ++ plugins/inputs/tcp_listener/tcp_listener_test.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index be9c338bf7b0e..4d8d5ff6e2a38 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -190,6 +190,8 @@ func TestParseValue(t *testing.T) { {sql.RawBytes("YES"), 1, true}, {sql.RawBytes("No"), 0, true}, {sql.RawBytes("Yes"), 1, true}, + {sql.RawBytes("-794"), int64(-794), true}, + {sql.RawBytes("18446744073709552333"), float64(18446744073709552000), true}, {sql.RawBytes(""), nil, false}, } for _, cases := range testCases { diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go index bb83f0465bd77..61fa890cd9b82 100644 --- a/plugins/inputs/tcp_listener/tcp_listener_test.go +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -274,7 +274,7 @@ func TestRunParserInvalidMsg(t *testing.T) { scnr := bufio.NewScanner(buf) for scnr.Scan() { - if strings.Contains(scnr.Text(), fmt.Sprintf(malformedwarn, 1)) { + if strings.Contains(scnr.Text(), "tcp_listener has received 1 malformed packets thus far.") { break } } From 998ae54d36085c7f3f98c880e1a6194d96b3f752 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 3 Nov 2020 16:40:57 -0500 Subject: [PATCH 048/761] shorten config (#7583) --- config/config.go | 1374 +++++++++++------------------------------ config/config_test.go | 9 +- 2 files changed, 360 insertions(+), 1023 deletions(-) diff --git a/config/config.go b/config/config.go index 4fd65139e2ab9..1071ffb45a87b 100644 --- a/config/config.go +++ b/config/config.go @@ -2,15 +2,14 @@ package config import ( "bytes" - "errors" "fmt" "io/ioutil" "log" - "math" "net/http" "net/url" "os" "path/filepath" + "reflect" "regexp" "runtime" "sort" @@ -56,6 +55,10 @@ var ( // will be logging to, as well as all the plugins that the user has // specified type Config struct { + toml *toml.Config + errs []error // config load errors. + UnusedFields map[string]bool + Tags map[string]string InputFilters []string OutputFilters []string @@ -69,8 +72,13 @@ type Config struct { AggProcessors models.RunningProcessors } +// NewConfig creates a new struct to hold the Telegraf config. +// For historical reasons, It holds the actual instances of the running plugins +// once the configuration is parsed. func NewConfig() *Config { c := &Config{ + UnusedFields: map[string]bool{}, + // Agent defaults: Agent: &AgentConfig{ Interval: internal.Duration{Duration: 10 * time.Second}, @@ -88,9 +96,18 @@ func NewConfig() *Config { InputFilters: make([]string, 0), OutputFilters: make([]string, 0), } + + tomlCfg := &toml.Config{ + NormFieldName: toml.DefaultConfig.NormFieldName, + FieldToKey: toml.DefaultConfig.FieldToKey, + MissingField: c.missingTomlField, + } + c.toml = tomlCfg + return c } +// AgentConfig defines configuration that will be used by the Telegraf agent type AgentConfig struct { // Interval at which to gather information Interval internal.Duration @@ -623,7 +640,7 @@ func PrintInputConfig(name string) error { if creator, ok := inputs.Inputs[name]; ok { printConfig(name, creator(), "inputs", false) } else { - return errors.New(fmt.Sprintf("Input %s not found", name)) + return fmt.Errorf("Input %s not found", name) } return nil } @@ -633,11 +650,12 @@ func PrintOutputConfig(name string) error { if creator, ok := outputs.Outputs[name]; ok { printConfig(name, creator(), "outputs", false) } else { - return errors.New(fmt.Sprintf("Output %s not found", name)) + return fmt.Errorf("Output %s not found", name) } return nil } +// LoadDirectory loads all toml config files found in the specified path, recursively. func (c *Config) LoadDirectory(path string) error { walkfn := func(thispath string, info os.FileInfo, _ error) error { if info == nil { @@ -727,8 +745,8 @@ func (c *Config) LoadConfigData(data []byte) error { if !ok { return fmt.Errorf("invalid configuration, bad table name %q", tableName) } - if err = toml.UnmarshalTable(subTable, c.Tags); err != nil { - return fmt.Errorf("error parsing table name %q: %w", tableName, err) + if err = c.toml.UnmarshalTable(subTable, c.Tags); err != nil { + return fmt.Errorf("error parsing table name %q: %s", tableName, err) } } } @@ -739,8 +757,8 @@ func (c *Config) LoadConfigData(data []byte) error { if !ok { return fmt.Errorf("invalid configuration, error parsing agent table") } - if err = toml.UnmarshalTable(subTable, c.Agent); err != nil { - return fmt.Errorf("error parsing agent table: %w", err) + if err = c.toml.UnmarshalTable(subTable, c.Agent); err != nil { + return fmt.Errorf("error parsing [agent]: %w", err) } } @@ -757,6 +775,10 @@ func (c *Config) LoadConfigData(data []byte) error { c.Tags["host"] = c.Agent.Hostname } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("line %d: configuration specified the fields %q, but they weren't used", tbl.Line, keys(c.UnusedFields)) + } + // Parse all the rest of the plugins: for name, val := range tbl.Fields { subTable, ok := val.(*ast.Table) @@ -772,18 +794,21 @@ func (c *Config) LoadConfigData(data []byte) error { // legacy [outputs.influxdb] support case *ast.Table: if err = c.addOutput(pluginName, pluginSubTable); err != nil { - return fmt.Errorf("Error parsing %s, %s", pluginName, err) + return fmt.Errorf("error parsing %s, %w", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addOutput(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s array, %s", pluginName, err) + return fmt.Errorf("error parsing %s array, %w", pluginName, err) } } default: - return fmt.Errorf("Unsupported config format: %s", + return fmt.Errorf("unsupported config format: %s", pluginName) } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) + } } case "inputs", "plugins": for pluginName, pluginVal := range subTable.Fields { @@ -791,18 +816,21 @@ func (c *Config) LoadConfigData(data []byte) error { // legacy [inputs.cpu] support case *ast.Table: if err = c.addInput(pluginName, pluginSubTable); err != nil { - return fmt.Errorf("Error parsing %s, %s", pluginName, err) + return fmt.Errorf("error parsing %s, %w", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addInput(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", pluginName, err) + return fmt.Errorf("error parsing %s, %w", pluginName, err) } } default: return fmt.Errorf("Unsupported config format: %s", pluginName) } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) + } } case "processors": for pluginName, pluginVal := range subTable.Fields { @@ -810,13 +838,16 @@ func (c *Config) LoadConfigData(data []byte) error { case []*ast.Table: for _, t := range pluginSubTable { if err = c.addProcessor(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", pluginName, err) + return fmt.Errorf("error parsing %s, %w", pluginName, err) } } default: return fmt.Errorf("Unsupported config format: %s", pluginName) } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) + } } case "aggregators": for pluginName, pluginVal := range subTable.Fields { @@ -831,6 +862,9 @@ func (c *Config) LoadConfigData(data []byte) error { return fmt.Errorf("Unsupported config format: %s", pluginName) } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) + } } // Assume it's an input input for legacy config file support if no other // identifiers are present @@ -912,19 +946,19 @@ func parseConfig(contents []byte) (*ast.Table, error) { continue } - var env_var []byte + var envVar []byte if parameter[1] != nil { - env_var = parameter[1] + envVar = parameter[1] } else if parameter[2] != nil { - env_var = parameter[2] + envVar = parameter[2] } else { continue } - env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$")) + envVal, ok := os.LookupEnv(strings.TrimPrefix(string(envVar), "$")) if ok { - env_val = escapeEnv(env_val) - contents = bytes.Replace(contents, parameter[0], []byte(env_val), 1) + envVal = escapeEnv(envVal) + contents = bytes.Replace(contents, parameter[0], []byte(envVal), 1) } } @@ -938,12 +972,12 @@ func (c *Config) addAggregator(name string, table *ast.Table) error { } aggregator := creator() - conf, err := buildAggregator(name, table) + conf, err := c.buildAggregator(name, table) if err != nil { return err } - if err := toml.UnmarshalTable(table, aggregator); err != nil { + if err := c.toml.UnmarshalTable(table, aggregator); err != nil { return err } @@ -957,7 +991,7 @@ func (c *Config) addProcessor(name string, table *ast.Table) error { return fmt.Errorf("Undefined but requested processor: %s", name) } - processorConfig, err := buildProcessor(name, table) + processorConfig, err := c.buildProcessor(name, table) if err != nil { return err } @@ -987,11 +1021,11 @@ func (c *Config) newRunningProcessor( processor := creator() if p, ok := processor.(unwrappable); ok { - if err := toml.UnmarshalTable(table, p.Unwrap()); err != nil { + if err := c.toml.UnmarshalTable(table, p.Unwrap()); err != nil { return nil, err } } else { - if err := toml.UnmarshalTable(table, processor); err != nil { + if err := c.toml.UnmarshalTable(table, processor); err != nil { return nil, err } } @@ -1014,19 +1048,19 @@ func (c *Config) addOutput(name string, table *ast.Table) error { // arbitrary types of output, so build the serializer and set it. switch t := output.(type) { case serializers.SerializerOutput: - serializer, err := buildSerializer(name, table) + serializer, err := c.buildSerializer(name, table) if err != nil { return err } t.SetSerializer(serializer) } - outputConfig, err := buildOutput(name, table) + outputConfig, err := c.buildOutput(name, table) if err != nil { return err } - if err := toml.UnmarshalTable(table, output); err != nil { + if err := c.toml.UnmarshalTable(table, output); err != nil { return err } @@ -1054,7 +1088,7 @@ func (c *Config) addInput(name string, table *ast.Table) error { // If the input has a SetParser function, then this means it can accept // arbitrary types of input, so build the parser and set it. if t, ok := input.(parsers.ParserInput); ok { - parser, err := buildParser(name, table) + parser, err := c.buildParser(name, table) if err != nil { return err } @@ -1062,7 +1096,7 @@ func (c *Config) addInput(name string, table *ast.Table) error { } if t, ok := input.(parsers.ParserFuncInput); ok { - config, err := getParserConfig(name, table) + config, err := c.getParserConfig(name, table) if err != nil { return err } @@ -1071,12 +1105,12 @@ func (c *Config) addInput(name string, table *ast.Table) error { }) } - pluginConfig, err := buildInput(name, table) + pluginConfig, err := c.buildInput(name, table) if err != nil { return err } - if err := toml.UnmarshalTable(table, input); err != nil { + if err := c.toml.UnmarshalTable(table, input); err != nil { return err } @@ -1089,7 +1123,7 @@ func (c *Config) addInput(name string, table *ast.Table) error { // buildAggregator parses Aggregator specific items from the ast.Table, // builds the filter and returns a // models.AggregatorConfig to be inserted into models.RunningAggregator -func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) { +func (c *Config) buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) { conf := &models.AggregatorConfig{ Name: name, Delay: time.Millisecond * 100, @@ -1097,79 +1131,30 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err Grace: time.Second * 0, } - if err := getConfigDuration(tbl, "period", &conf.Period); err != nil { - return nil, err - } - - if err := getConfigDuration(tbl, "delay", &conf.Delay); err != nil { - return nil, err - } - - if err := getConfigDuration(tbl, "grace", &conf.Grace); err != nil { - return nil, err - } - - if node, ok := tbl.Fields["drop_original"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - conf.DropOriginal, err = strconv.ParseBool(b.Value) - if err != nil { - return nil, fmt.Errorf("error parsing boolean value for %s: %s", name, err) - } - } - } - } - - if node, ok := tbl.Fields["name_prefix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.MeasurementPrefix = str.Value - } - } - } - - if node, ok := tbl.Fields["name_suffix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.MeasurementSuffix = str.Value - } - } - } - - if node, ok := tbl.Fields["name_override"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.NameOverride = str.Value - } - } - } - - if node, ok := tbl.Fields["alias"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.Alias = str.Value - } - } - } + c.getFieldDuration(tbl, "period", &conf.Period) + c.getFieldDuration(tbl, "delay", &conf.Delay) + c.getFieldDuration(tbl, "grace", &conf.Grace) + c.getFieldBool(tbl, "drop_original", &conf.DropOriginal) + c.getFieldString(tbl, "name_prefix", &conf.MeasurementPrefix) + c.getFieldString(tbl, "name_suffix", &conf.MeasurementSuffix) + c.getFieldString(tbl, "name_override", &conf.NameOverride) + c.getFieldString(tbl, "alias", &conf.Alias) conf.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { - if err := toml.UnmarshalTable(subtbl, conf.Tags); err != nil { + if err := c.toml.UnmarshalTable(subtbl, conf.Tags); err != nil { return nil, fmt.Errorf("could not parse tags for input %s", name) } } } - delete(tbl.Fields, "drop_original") - delete(tbl.Fields, "name_prefix") - delete(tbl.Fields, "name_suffix") - delete(tbl.Fields, "name_override") - delete(tbl.Fields, "alias") - delete(tbl.Fields, "tags") + if c.hasErrs() { + return nil, c.firstErr() + } + var err error - conf.Filter, err = buildFilter(tbl) + conf.Filter, err = c.buildFilter(tbl) if err != nil { return conf, err } @@ -1179,33 +1164,18 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err // buildProcessor parses Processor specific items from the ast.Table, // builds the filter and returns a // models.ProcessorConfig to be inserted into models.RunningProcessor -func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) { +func (c *Config) buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) { conf := &models.ProcessorConfig{Name: name} - if node, ok := tbl.Fields["order"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Integer); ok { - var err error - conf.Order, err = strconv.ParseInt(b.Value, 10, 64) - if err != nil { - return nil, fmt.Errorf("error parsing int value for %s: %s", name, err) - } - } - } - } + c.getFieldInt64(tbl, "order", &conf.Order) + c.getFieldString(tbl, "alias", &conf.Alias) - if node, ok := tbl.Fields["alias"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.Alias = str.Value - } - } + if c.hasErrs() { + return nil, c.firstErr() } - delete(tbl.Fields, "alias") - delete(tbl.Fields, "order") var err error - conf.Filter, err = buildFilter(tbl) + conf.Filter, err = c.buildFilter(tbl) if err != nil { return conf, err } @@ -1216,205 +1186,63 @@ func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error // (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to // be inserted into the models.OutputConfig/models.InputConfig // to be used for glob filtering on tags and measurements -func buildFilter(tbl *ast.Table) (models.Filter, error) { +func (c *Config) buildFilter(tbl *ast.Table) (models.Filter, error) { f := models.Filter{} - if node, ok := tbl.Fields["namepass"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.NamePass = append(f.NamePass, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["namedrop"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.NameDrop = append(f.NameDrop, str.Value) - } - } - } - } - } + c.getFieldStringSlice(tbl, "namepass", &f.NamePass) + c.getFieldStringSlice(tbl, "namedrop", &f.NameDrop) - fields := []string{"pass", "fieldpass"} - for _, field := range fields { - if node, ok := tbl.Fields[field]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.FieldPass = append(f.FieldPass, str.Value) - } - } - } - } - } - } + c.getFieldStringSlice(tbl, "pass", &f.FieldPass) + c.getFieldStringSlice(tbl, "fieldpass", &f.FieldPass) - fields = []string{"drop", "fielddrop"} - for _, field := range fields { - if node, ok := tbl.Fields[field]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.FieldDrop = append(f.FieldDrop, str.Value) - } - } - } - } - } - } + c.getFieldStringSlice(tbl, "drop", &f.FieldDrop) + c.getFieldStringSlice(tbl, "fielddrop", &f.FieldDrop) - if node, ok := tbl.Fields["tagpass"]; ok { - if subtbl, ok := node.(*ast.Table); ok { - for name, val := range subtbl.Fields { - if kv, ok := val.(*ast.KeyValue); ok { - tagfilter := &models.TagFilter{Name: name} - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - tagfilter.Filter = append(tagfilter.Filter, str.Value) - } - } - } - f.TagPass = append(f.TagPass, *tagfilter) - } - } - } - } + c.getFieldTagFilter(tbl, "tagpass", &f.TagPass) + c.getFieldTagFilter(tbl, "tagdrop", &f.TagDrop) - if node, ok := tbl.Fields["tagdrop"]; ok { - if subtbl, ok := node.(*ast.Table); ok { - for name, val := range subtbl.Fields { - if kv, ok := val.(*ast.KeyValue); ok { - tagfilter := &models.TagFilter{Name: name} - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - tagfilter.Filter = append(tagfilter.Filter, str.Value) - } - } - } - f.TagDrop = append(f.TagDrop, *tagfilter) - } - } - } - } + c.getFieldStringSlice(tbl, "tagexclude", &f.TagExclude) + c.getFieldStringSlice(tbl, "taginclude", &f.TagInclude) - if node, ok := tbl.Fields["tagexclude"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.TagExclude = append(f.TagExclude, str.Value) - } - } - } - } + if c.hasErrs() { + return f, c.firstErr() } - if node, ok := tbl.Fields["taginclude"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.TagInclude = append(f.TagInclude, str.Value) - } - } - } - } - } if err := f.Compile(); err != nil { return f, err } - delete(tbl.Fields, "namedrop") - delete(tbl.Fields, "namepass") - delete(tbl.Fields, "fielddrop") - delete(tbl.Fields, "fieldpass") - delete(tbl.Fields, "drop") - delete(tbl.Fields, "pass") - delete(tbl.Fields, "tagdrop") - delete(tbl.Fields, "tagpass") - delete(tbl.Fields, "tagexclude") - delete(tbl.Fields, "taginclude") return f, nil } // buildInput parses input specific items from the ast.Table, // builds the filter and returns a // models.InputConfig to be inserted into models.RunningInput -func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { +func (c *Config) buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { cp := &models.InputConfig{Name: name} - - if err := getConfigDuration(tbl, "interval", &cp.Interval); err != nil { - return nil, err - } - - if err := getConfigDuration(tbl, "precision", &cp.Precision); err != nil { - return nil, err - } - - if err := getConfigDuration(tbl, "collection_jitter", &cp.CollectionJitter); err != nil { - return nil, err - } - - if node, ok := tbl.Fields["name_prefix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - cp.MeasurementPrefix = str.Value - } - } - } - - if node, ok := tbl.Fields["name_suffix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - cp.MeasurementSuffix = str.Value - } - } - } - - if node, ok := tbl.Fields["name_override"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - cp.NameOverride = str.Value - } - } - } - - if node, ok := tbl.Fields["alias"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - cp.Alias = str.Value - } - } - } + c.getFieldDuration(tbl, "interval", &cp.Interval) + c.getFieldDuration(tbl, "precision", &cp.Precision) + c.getFieldDuration(tbl, "collection_jitter", &cp.CollectionJitter) + c.getFieldString(tbl, "name_prefix", &cp.MeasurementPrefix) + c.getFieldString(tbl, "name_suffix", &cp.MeasurementSuffix) + c.getFieldString(tbl, "name_override", &cp.NameOverride) + c.getFieldString(tbl, "alias", &cp.Alias) cp.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { - if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil { - return nil, fmt.Errorf("could not parse tags for input %s\n", name) + if err := c.toml.UnmarshalTable(subtbl, cp.Tags); err != nil { + return nil, fmt.Errorf("could not parse tags for input %s", name) } } } - delete(tbl.Fields, "name_prefix") - delete(tbl.Fields, "name_suffix") - delete(tbl.Fields, "name_override") - delete(tbl.Fields, "alias") - delete(tbl.Fields, "tags") + if c.hasErrs() { + return nil, c.firstErr() + } + var err error - cp.Filter, err = buildFilter(tbl) + cp.Filter, err = c.buildFilter(tbl) if err != nil { return cp, err } @@ -1424,796 +1252,322 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { // buildParser grabs the necessary entries from the ast.Table for creating // a parsers.Parser object, and creates it, which can then be added onto // an Input object. -func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { - config, err := getParserConfig(name, tbl) +func (c *Config) buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { + config, err := c.getParserConfig(name, tbl) if err != nil { return nil, err } return parsers.NewParser(config) } -func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { - c := &parsers.Config{ +func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { + pc := &parsers.Config{ JSONStrict: true, } - if node, ok := tbl.Fields["data_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DataFormat = str.Value - } - } - } + c.getFieldString(tbl, "data_format", &pc.DataFormat) // Legacy support, exec plugin originally parsed JSON by default. - if name == "exec" && c.DataFormat == "" { - c.DataFormat = "json" - } else if c.DataFormat == "" { - c.DataFormat = "influx" + if name == "exec" && pc.DataFormat == "" { + pc.DataFormat = "json" + } else if pc.DataFormat == "" { + pc.DataFormat = "influx" + } + + c.getFieldString(tbl, "separator", &pc.Separator) + + c.getFieldStringSlice(tbl, "templates", &pc.Templates) + c.getFieldStringSlice(tbl, "tag_keys", &pc.TagKeys) + c.getFieldStringSlice(tbl, "json_string_fields", &pc.JSONStringFields) + c.getFieldString(tbl, "json_name_key", &pc.JSONNameKey) + c.getFieldString(tbl, "json_query", &pc.JSONQuery) + c.getFieldString(tbl, "json_time_key", &pc.JSONTimeKey) + c.getFieldString(tbl, "json_time_format", &pc.JSONTimeFormat) + c.getFieldString(tbl, "json_timezone", &pc.JSONTimezone) + c.getFieldBool(tbl, "json_strict", &pc.JSONStrict) + c.getFieldString(tbl, "data_type", &pc.DataType) + c.getFieldString(tbl, "collectd_auth_file", &pc.CollectdAuthFile) + c.getFieldString(tbl, "collectd_security_level", &pc.CollectdSecurityLevel) + c.getFieldString(tbl, "collectd_parse_multivalue", &pc.CollectdSplit) + + c.getFieldStringSlice(tbl, "collectd_typesdb", &pc.CollectdTypesDB) + + c.getFieldString(tbl, "dropwizard_metric_registry_path", &pc.DropwizardMetricRegistryPath) + c.getFieldString(tbl, "dropwizard_time_path", &pc.DropwizardTimePath) + c.getFieldString(tbl, "dropwizard_time_format", &pc.DropwizardTimeFormat) + c.getFieldString(tbl, "dropwizard_tags_path", &pc.DropwizardTagsPath) + c.getFieldStringMap(tbl, "dropwizard_tag_paths", &pc.DropwizardTagPathsMap) + + //for grok data_format + c.getFieldStringSlice(tbl, "grok_named_patterns", &pc.GrokNamedPatterns) + c.getFieldStringSlice(tbl, "grok_patterns", &pc.GrokPatterns) + c.getFieldString(tbl, "grok_custom_patterns", &pc.GrokCustomPatterns) + c.getFieldStringSlice(tbl, "grok_custom_pattern_files", &pc.GrokCustomPatternFiles) + c.getFieldString(tbl, "grok_timezone", &pc.GrokTimezone) + c.getFieldString(tbl, "grok_unique_timestamp", &pc.GrokUniqueTimestamp) + + //for csv parser + c.getFieldStringSlice(tbl, "csv_column_names", &pc.CSVColumnNames) + c.getFieldStringSlice(tbl, "csv_column_types", &pc.CSVColumnTypes) + c.getFieldStringSlice(tbl, "csv_tag_columns", &pc.CSVTagColumns) + c.getFieldString(tbl, "csv_timezone", &pc.CSVTimezone) + c.getFieldString(tbl, "csv_delimiter", &pc.CSVDelimiter) + c.getFieldString(tbl, "csv_comment", &pc.CSVComment) + c.getFieldString(tbl, "csv_measurement_column", &pc.CSVMeasurementColumn) + c.getFieldString(tbl, "csv_timestamp_column", &pc.CSVTimestampColumn) + c.getFieldString(tbl, "csv_timestamp_format", &pc.CSVTimestampFormat) + c.getFieldInt(tbl, "csv_header_row_count", &pc.CSVHeaderRowCount) + c.getFieldInt(tbl, "csv_skip_rows", &pc.CSVSkipRows) + c.getFieldInt(tbl, "csv_skip_columns", &pc.CSVSkipColumns) + c.getFieldBool(tbl, "csv_trim_space", &pc.CSVTrimSpace) + + c.getFieldStringSlice(tbl, "form_urlencoded_tag_keys", &pc.FormUrlencodedTagKeys) + + pc.MetricName = name + + if c.hasErrs() { + return nil, c.firstErr() + } + + return pc, nil +} + +// buildSerializer grabs the necessary entries from the ast.Table for creating +// a serializers.Serializer object, and creates it, which can then be added onto +// an Output object. +func (c *Config) buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) { + sc := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)} + + c.getFieldString(tbl, "data_format", &sc.DataFormat) + + if sc.DataFormat == "" { + sc.DataFormat = "influx" + } + + c.getFieldString(tbl, "prefix", &sc.Prefix) + c.getFieldString(tbl, "template", &sc.Template) + c.getFieldStringSlice(tbl, "templates", &sc.Templates) + c.getFieldString(tbl, "carbon2_format", &sc.Carbon2Format) + c.getFieldInt(tbl, "influx_max_line_bytes", &sc.InfluxMaxLineBytes) + + c.getFieldBool(tbl, "influx_sort_fields", &sc.InfluxSortFields) + c.getFieldBool(tbl, "influx_uint_support", &sc.InfluxUintSupport) + c.getFieldBool(tbl, "graphite_tag_support", &sc.GraphiteTagSupport) + c.getFieldString(tbl, "graphite_separator", &sc.GraphiteSeparator) + + c.getFieldDuration(tbl, "json_timestamp_units", &sc.TimestampUnits) + + c.getFieldBool(tbl, "splunkmetric_hec_routing", &sc.HecRouting) + c.getFieldBool(tbl, "splunkmetric_multimetric", &sc.SplunkmetricMultiMetric) + + c.getFieldStringSlice(tbl, "wavefront_source_override", &sc.WavefrontSourceOverride) + c.getFieldBool(tbl, "wavefront_use_strict", &sc.WavefrontUseStrict) + + c.getFieldBool(tbl, "prometheus_export_timestamp", &sc.PrometheusExportTimestamp) + c.getFieldBool(tbl, "prometheus_sort_metrics", &sc.PrometheusSortMetrics) + c.getFieldBool(tbl, "prometheus_string_as_label", &sc.PrometheusStringAsLabel) + + if c.hasErrs() { + return nil, c.firstErr() + } + + return serializers.NewSerializer(sc) +} + +// buildOutput parses output specific items from the ast.Table, +// builds the filter and returns an +// models.OutputConfig to be inserted into models.RunningInput +// Note: error exists in the return for future calls that might require error +func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { + filter, err := c.buildFilter(tbl) + if err != nil { + return nil, err + } + oc := &models.OutputConfig{ + Name: name, + Filter: filter, + } + + // TODO: support FieldPass/FieldDrop on outputs + + c.getFieldDuration(tbl, "flush_interval", &oc.FlushInterval) + c.getFieldDuration(tbl, "flush_jitter", oc.FlushJitter) + + c.getFieldInt(tbl, "metric_buffer_limit", &oc.MetricBufferLimit) + c.getFieldInt(tbl, "metric_batch_size", &oc.MetricBatchSize) + c.getFieldString(tbl, "alias", &oc.Alias) + c.getFieldString(tbl, "name_override", &oc.NameOverride) + c.getFieldString(tbl, "name_suffix", &oc.NameSuffix) + c.getFieldString(tbl, "name_prefix", &oc.NamePrefix) + + if c.hasErrs() { + return nil, c.firstErr() + } + + return oc, nil +} + +func (c *Config) missingTomlField(typ reflect.Type, key string) error { + switch key { + case "interval", "tagpass", "tagdrop", "namepass", "namedrop", "name_suffix", + "fieldpass", "fielddrop", "pass", "drop", "taginclude", "tagexclude", "data_format": + // ignore fields that are common to all plugins. + default: + c.UnusedFields[key] = true } + return nil +} - if node, ok := tbl.Fields["separator"]; ok { +func (c *Config) getFieldString(tbl *ast.Table, fieldName string, target *string) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { - c.Separator = str.Value + *target = str.Value } } } +} - if node, ok := tbl.Fields["templates"]; ok { +func (c *Config) getFieldDuration(tbl *ast.Table, fieldName string, target interface{}) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.Templates = append(c.Templates, str.Value) - } + if str, ok := kv.Value.(*ast.String); ok { + d, err := time.ParseDuration(str.Value) + if err != nil { + c.addError(tbl, fmt.Errorf("error parsing duration: %w", err)) + return } + targetVal := reflect.ValueOf(target).Elem() + targetVal.Set(reflect.ValueOf(d)) } } } +} - if node, ok := tbl.Fields["tag_keys"]; ok { +func (c *Config) getFieldBool(tbl *ast.Table, fieldName string, target *bool) { + var err error + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.TagKeys = append(c.TagKeys, str.Value) - } + switch t := kv.Value.(type) { + case *ast.Boolean: + *target, err = t.Boolean() + if err != nil { + c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value)) + return } + case *ast.String: + *target, err = strconv.ParseBool(t.Value) + if err != nil { + c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value)) + return + } + default: + c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value.Source())) + return } } } +} - if node, ok := tbl.Fields["json_string_fields"]; ok { +func (c *Config) getFieldInt(tbl *ast.Table, fieldName string, target *int) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.JSONStringFields = append(c.JSONStringFields, str.Value) - } + if iAst, ok := kv.Value.(*ast.Integer); ok { + i, err := iAst.Int() + if err != nil { + c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value)) + return } + *target = int(i) } } } +} - if node, ok := tbl.Fields["json_name_key"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONNameKey = str.Value - } - } - } - - if node, ok := tbl.Fields["json_query"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONQuery = str.Value - } - } - } - - if node, ok := tbl.Fields["json_time_key"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONTimeKey = str.Value - } - } - } - - if node, ok := tbl.Fields["json_time_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONTimeFormat = str.Value - } - } - } - - if node, ok := tbl.Fields["json_timezone"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONTimezone = str.Value - } - } - } - - if node, ok := tbl.Fields["json_strict"]; ok { +func (c *Config) getFieldInt64(tbl *ast.Table, fieldName string, target *int64) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.JSONStrict, err = b.Boolean() + if iAst, ok := kv.Value.(*ast.Integer); ok { + i, err := iAst.Int() if err != nil { - return nil, err + c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value)) + return } + *target = i } } } +} - if node, ok := tbl.Fields["data_type"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DataType = str.Value - } - } - } - - if node, ok := tbl.Fields["collectd_auth_file"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CollectdAuthFile = str.Value - } - } - } - - if node, ok := tbl.Fields["collectd_security_level"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CollectdSecurityLevel = str.Value - } - } - } - - if node, ok := tbl.Fields["collectd_parse_multivalue"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CollectdSplit = str.Value - } - } - } - - if node, ok := tbl.Fields["collectd_typesdb"]; ok { +func (c *Config) getFieldStringSlice(tbl *ast.Table, fieldName string, target *[]string) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if ary, ok := kv.Value.(*ast.Array); ok { for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { - c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value) + *target = append(*target, str.Value) } } } } } - - if node, ok := tbl.Fields["dropwizard_metric_registry_path"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardMetricRegistryPath = str.Value - } - } - } - if node, ok := tbl.Fields["dropwizard_time_path"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardTimePath = str.Value - } - } - } - if node, ok := tbl.Fields["dropwizard_time_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardTimeFormat = str.Value - } - } - } - if node, ok := tbl.Fields["dropwizard_tags_path"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardTagsPath = str.Value - } - } - } - c.DropwizardTagPathsMap = make(map[string]string) - if node, ok := tbl.Fields["dropwizard_tag_paths"]; ok { +} +func (c *Config) getFieldTagFilter(tbl *ast.Table, fieldName string, target *[]models.TagFilter) { + if node, ok := tbl.Fields[fieldName]; ok { if subtbl, ok := node.(*ast.Table); ok { for name, val := range subtbl.Fields { if kv, ok := val.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardTagPathsMap[name] = str.Value - } - } - } - } - } - - //for grok data_format - if node, ok := tbl.Fields["grok_named_patterns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.GrokNamedPatterns = append(c.GrokNamedPatterns, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["grok_patterns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.GrokPatterns = append(c.GrokPatterns, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["grok_custom_patterns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.GrokCustomPatterns = str.Value - } - } - } - - if node, ok := tbl.Fields["grok_custom_pattern_files"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.GrokCustomPatternFiles = append(c.GrokCustomPatternFiles, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["grok_timezone"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.GrokTimezone = str.Value - } - } - } - - if node, ok := tbl.Fields["grok_unique_timestamp"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.GrokUniqueTimestamp = str.Value - } - } - } - - //for csv parser - if node, ok := tbl.Fields["csv_column_names"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.CSVColumnNames = append(c.CSVColumnNames, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["csv_column_types"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.CSVColumnTypes = append(c.CSVColumnTypes, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["csv_tag_columns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.CSVTagColumns = append(c.CSVTagColumns, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["csv_delimiter"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVDelimiter = str.Value - } - } - } - - if node, ok := tbl.Fields["csv_comment"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVComment = str.Value - } - } - } - - if node, ok := tbl.Fields["csv_measurement_column"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVMeasurementColumn = str.Value - } - } - } - - if node, ok := tbl.Fields["csv_timestamp_column"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVTimestampColumn = str.Value - } - } - } - - if node, ok := tbl.Fields["csv_timestamp_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVTimestampFormat = str.Value - } - } - } - - if node, ok := tbl.Fields["csv_timezone"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVTimezone = str.Value - } - } - } - - if node, ok := tbl.Fields["csv_header_row_count"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - c.CSVHeaderRowCount = int(v) - } - } - } - - if node, ok := tbl.Fields["csv_skip_rows"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - c.CSVSkipRows = int(v) - } - } - } - - if node, ok := tbl.Fields["csv_skip_columns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - c.CSVSkipColumns = int(v) - } - } - } - - if node, ok := tbl.Fields["csv_trim_space"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.Boolean); ok { - //for config with no quotes - val, err := strconv.ParseBool(str.Value) - c.CSVTrimSpace = val - if err != nil { - return nil, fmt.Errorf("E! parsing to bool: %v", err) - } - } - } - } - - if node, ok := tbl.Fields["form_urlencoded_tag_keys"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.FormUrlencodedTagKeys = append(c.FormUrlencodedTagKeys, str.Value) + tagfilter := models.TagFilter{Name: name} + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + tagfilter.Filter = append(tagfilter.Filter, str.Value) + } + } } + *target = append(*target, tagfilter) } } } } - - c.MetricName = name - - delete(tbl.Fields, "data_format") - delete(tbl.Fields, "separator") - delete(tbl.Fields, "templates") - delete(tbl.Fields, "tag_keys") - delete(tbl.Fields, "json_name_key") - delete(tbl.Fields, "json_query") - delete(tbl.Fields, "json_string_fields") - delete(tbl.Fields, "json_time_format") - delete(tbl.Fields, "json_time_key") - delete(tbl.Fields, "json_timezone") - delete(tbl.Fields, "json_strict") - delete(tbl.Fields, "data_type") - delete(tbl.Fields, "collectd_auth_file") - delete(tbl.Fields, "collectd_security_level") - delete(tbl.Fields, "collectd_typesdb") - delete(tbl.Fields, "collectd_parse_multivalue") - delete(tbl.Fields, "dropwizard_metric_registry_path") - delete(tbl.Fields, "dropwizard_time_path") - delete(tbl.Fields, "dropwizard_time_format") - delete(tbl.Fields, "dropwizard_tags_path") - delete(tbl.Fields, "dropwizard_tag_paths") - delete(tbl.Fields, "grok_named_patterns") - delete(tbl.Fields, "grok_patterns") - delete(tbl.Fields, "grok_custom_patterns") - delete(tbl.Fields, "grok_custom_pattern_files") - delete(tbl.Fields, "grok_timezone") - delete(tbl.Fields, "grok_unique_timestamp") - delete(tbl.Fields, "csv_column_names") - delete(tbl.Fields, "csv_column_types") - delete(tbl.Fields, "csv_comment") - delete(tbl.Fields, "csv_delimiter") - delete(tbl.Fields, "csv_field_columns") - delete(tbl.Fields, "csv_header_row_count") - delete(tbl.Fields, "csv_measurement_column") - delete(tbl.Fields, "csv_skip_columns") - delete(tbl.Fields, "csv_skip_rows") - delete(tbl.Fields, "csv_tag_columns") - delete(tbl.Fields, "csv_timestamp_column") - delete(tbl.Fields, "csv_timestamp_format") - delete(tbl.Fields, "csv_timezone") - delete(tbl.Fields, "csv_trim_space") - delete(tbl.Fields, "form_urlencoded_tag_keys") - - return c, nil } -// buildSerializer grabs the necessary entries from the ast.Table for creating -// a serializers.Serializer object, and creates it, which can then be added onto -// an Output object. -func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) { - c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)} - - if node, ok := tbl.Fields["data_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DataFormat = str.Value - } - } - } - - if c.DataFormat == "" { - c.DataFormat = "influx" - } - - if node, ok := tbl.Fields["prefix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.Prefix = str.Value - } - } - } - - if node, ok := tbl.Fields["template"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.Template = str.Value - } - } - } - - if node, ok := tbl.Fields["templates"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.Templates = append(c.Templates, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["carbon2_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.Carbon2Format = str.Value - } - } - } - - if node, ok := tbl.Fields["influx_max_line_bytes"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - c.InfluxMaxLineBytes = int(v) - } - } - } - - if node, ok := tbl.Fields["influx_sort_fields"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.InfluxSortFields, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["influx_uint_support"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.InfluxUintSupport, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["graphite_tag_support"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.GraphiteTagSupport, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["graphite_separator"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.GraphiteSeparator = str.Value - } - } - } - - if node, ok := tbl.Fields["json_timestamp_units"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - timestampVal, err := time.ParseDuration(str.Value) - if err != nil { - return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err) - } - // now that we have a duration, truncate it to the nearest - // power of ten (just in case) - nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds()))) - new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent))) - c.TimestampUnits = time.Duration(new_nanoseconds) - } - } - } - - if node, ok := tbl.Fields["splunkmetric_hec_routing"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.HecRouting, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["splunkmetric_multimetric"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.SplunkmetricMultiMetric, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["wavefront_source_override"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.WavefrontSourceOverride = append(c.WavefrontSourceOverride, str.Value) +func (c *Config) getFieldStringMap(tbl *ast.Table, fieldName string, target *map[string]string) { + *target = map[string]string{} + if node, ok := tbl.Fields[fieldName]; ok { + if subtbl, ok := node.(*ast.Table); ok { + for name, val := range subtbl.Fields { + if kv, ok := val.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + (*target)[name] = str.Value } } } } } - - if node, ok := tbl.Fields["wavefront_use_strict"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.WavefrontUseStrict, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["prometheus_export_timestamp"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.PrometheusExportTimestamp, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["prometheus_sort_metrics"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.PrometheusSortMetrics, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["prometheus_string_as_label"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.PrometheusStringAsLabel, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - delete(tbl.Fields, "carbon2_format") - delete(tbl.Fields, "influx_max_line_bytes") - delete(tbl.Fields, "influx_sort_fields") - delete(tbl.Fields, "influx_uint_support") - delete(tbl.Fields, "graphite_tag_support") - delete(tbl.Fields, "graphite_separator") - delete(tbl.Fields, "data_format") - delete(tbl.Fields, "prefix") - delete(tbl.Fields, "template") - delete(tbl.Fields, "templates") - delete(tbl.Fields, "json_timestamp_units") - delete(tbl.Fields, "splunkmetric_hec_routing") - delete(tbl.Fields, "splunkmetric_multimetric") - delete(tbl.Fields, "wavefront_source_override") - delete(tbl.Fields, "wavefront_use_strict") - delete(tbl.Fields, "prometheus_export_timestamp") - delete(tbl.Fields, "prometheus_sort_metrics") - delete(tbl.Fields, "prometheus_string_as_label") - return serializers.NewSerializer(c) } -// buildOutput parses output specific items from the ast.Table, -// builds the filter and returns an -// models.OutputConfig to be inserted into models.RunningInput -// Note: error exists in the return for future calls that might require error -func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { - filter, err := buildFilter(tbl) - if err != nil { - return nil, err - } - oc := &models.OutputConfig{ - Name: name, - Filter: filter, - } - - // TODO - // Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass - if len(oc.Filter.FieldDrop) > 0 { - oc.Filter.NameDrop = oc.Filter.FieldDrop - } - if len(oc.Filter.FieldPass) > 0 { - oc.Filter.NamePass = oc.Filter.FieldPass - } - - if err := getConfigDuration(tbl, "flush_interval", &oc.FlushInterval); err != nil { - return nil, err - } - - if err := getConfigDuration(tbl, "flush_jitter", &oc.FlushJitter); err != nil { - return nil, err - } - - if node, ok := tbl.Fields["metric_buffer_limit"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - oc.MetricBufferLimit = int(v) - } - } - } - - if node, ok := tbl.Fields["metric_batch_size"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - oc.MetricBatchSize = int(v) - } - } - } - - if node, ok := tbl.Fields["alias"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - oc.Alias = str.Value - } - } - } - - if node, ok := tbl.Fields["name_override"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - oc.NameOverride = str.Value - } - } +func keys(m map[string]bool) []string { + result := []string{} + for k := range m { + result = append(result, k) } + return result +} - if node, ok := tbl.Fields["name_suffix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - oc.NameSuffix = str.Value - } - } - } +func (c *Config) hasErrs() bool { + return len(c.errs) > 0 +} - if node, ok := tbl.Fields["name_prefix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - oc.NamePrefix = str.Value - } - } +func (c *Config) firstErr() error { + if len(c.errs) == 0 { + return nil } + return c.errs[0] +} - delete(tbl.Fields, "metric_buffer_limit") - delete(tbl.Fields, "metric_batch_size") - delete(tbl.Fields, "alias") - delete(tbl.Fields, "name_override") - delete(tbl.Fields, "name_suffix") - delete(tbl.Fields, "name_prefix") - - return oc, nil +func (c *Config) addError(tbl *ast.Table, err error) { + c.errs = append(c.errs, fmt.Errorf("line %d:%d: %w", tbl.Line, tbl.Position, err)) } // unwrappable lets you retrieve the original telegraf.Processor from the @@ -2222,19 +1576,3 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { type unwrappable interface { Unwrap() telegraf.Processor } - -func getConfigDuration(tbl *ast.Table, key string, target *time.Duration) error { - if node, ok := tbl.Fields[key]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - d, err := time.ParseDuration(str.Value) - if err != nil { - return err - } - delete(tbl.Fields, key) - *target = d - } - } - } - return nil -} diff --git a/config/config_test.go b/config/config_test.go index 42aefff151761..5543c60e7da70 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -208,20 +208,19 @@ func TestConfig_FieldNotDefined(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/invalid_field.toml") require.Error(t, err, "invalid field name") - assert.Equal(t, "Error loading config file ./testdata/invalid_field.toml: Error parsing http_listener_v2, line 2: field corresponding to `not_a_field' is not defined in http_listener_v2.HTTPListenerV2", err.Error()) - + assert.Equal(t, "Error loading config file ./testdata/invalid_field.toml: plugin inputs.http_listener_v2: line 1: configuration specified the fields [\"not_a_field\"], but they weren't used", err.Error()) } func TestConfig_WrongFieldType(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/wrong_field_type.toml") require.Error(t, err, "invalid field type") - assert.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Port) cannot unmarshal TOML string into int", err.Error()) + assert.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Port) cannot unmarshal TOML string into int", err.Error()) c = NewConfig() err = c.LoadConfig("./testdata/wrong_field_type2.toml") require.Error(t, err, "invalid field type2") - assert.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Methods) cannot unmarshal TOML string into []string", err.Error()) + assert.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Methods) cannot unmarshal TOML string into []string", err.Error()) } func TestConfig_InlineTables(t *testing.T) { @@ -256,7 +255,7 @@ func TestConfig_BadOrdering(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/non_slice_slice.toml") require.Error(t, err, "bad ordering") - assert.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: Error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) + assert.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) } func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { From 5af6861ebbc934107c43a1da0076afeb1c918cc2 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 4 Nov 2020 18:23:52 -0500 Subject: [PATCH 049/761] fixes config issue #8362 (#8364) --- config/config.go | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/config/config.go b/config/config.go index 1071ffb45a87b..4b2f39955f78c 100644 --- a/config/config.go +++ b/config/config.go @@ -1409,8 +1409,26 @@ func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, func (c *Config) missingTomlField(typ reflect.Type, key string) error { switch key { - case "interval", "tagpass", "tagdrop", "namepass", "namedrop", "name_suffix", - "fieldpass", "fielddrop", "pass", "drop", "taginclude", "tagexclude", "data_format": + case "alias", "carbon2_format", "collectd_auth_file", "collectd_parse_multivalue", + "collectd_security_level", "collectd_typesdb", "collection_jitter", "csv_column_names", + "csv_column_types", "csv_comment", "csv_delimiter", "csv_header_row_count", + "csv_measurement_column", "csv_skip_columns", "csv_skip_rows", "csv_tag_columns", + "csv_timestamp_column", "csv_timestamp_format", "csv_timezone", "csv_trim_space", + "data_format", "data_type", "delay", "drop", "drop_original", "dropwizard_metric_registry_path", + "dropwizard_tag_paths", "dropwizard_tags_path", "dropwizard_time_format", "dropwizard_time_path", + "fielddrop", "fieldpass", "flush_interval", "flush_jitter", "form_urlencoded_tag_keys", + "grace", "graphite_separator", "graphite_tag_support", "grok_custom_pattern_files", + "grok_custom_patterns", "grok_named_patterns", "grok_patterns", "grok_timezone", + "grok_unique_timestamp", "influx_max_line_bytes", "influx_sort_fields", "influx_uint_support", + "interval", "json_name_key", "json_query", "json_strict", "json_string_fields", + "json_time_format", "json_time_key", "json_timestamp_units", "json_timezone", + "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", + "name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision", + "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", + "separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys", + "tagdrop", "tagexclude", "taginclude", "tagpass", "template", "templates", + "wavefront_source_override", "wavefront_use_strict": + // ignore fields that are common to all plugins. default: c.UnusedFields[key] = true From d369003912e6568aeb43c8d56207e407550afa01 Mon Sep 17 00:00:00 2001 From: smizach <55546955+smizach@users.noreply.github.com> Date: Fri, 6 Nov 2020 10:15:27 -0500 Subject: [PATCH 050/761] Add OData-Version header to requests (#8288) Closes influxdata#8093 --- plugins/inputs/redfish/redfish.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/redfish/redfish.go b/plugins/inputs/redfish/redfish.go index 54d1d15b8c097..ca500ab6c819a 100644 --- a/plugins/inputs/redfish/redfish.go +++ b/plugins/inputs/redfish/redfish.go @@ -181,6 +181,7 @@ func (r *Redfish) getData(url string, payload interface{}) error { req.SetBasicAuth(r.Username, r.Password) req.Header.Set("Accept", "application/json") req.Header.Set("Content-Type", "application/json") + req.Header.Set("OData-Version", "4.0") resp, err := r.client.Do(req) if err != nil { return err From 3523652e30390c6059b5d02e99ea280751868987 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 12 Nov 2020 10:49:03 -0500 Subject: [PATCH 051/761] fix to start Telegraf from Linux systemd.service --- scripts/telegraf.service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/telegraf.service b/scripts/telegraf.service index ff9860d5c4e2d..ab29f7f666785 100644 --- a/scripts/telegraf.service +++ b/scripts/telegraf.service @@ -4,7 +4,7 @@ Documentation=https://github.com/influxdata/telegraf After=network.target [Service] -EnvironmentFile=-/etc/default/telegraf +EnvironmentFile=/etc/default/telegraf User=telegraf ExecStart=/usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d $TELEGRAF_OPTS ExecReload=/bin/kill -HUP $MAINPID From ff0a8c2d87b67e453be45d9ed4a04d71ea130545 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Thu, 12 Nov 2020 17:04:52 +0100 Subject: [PATCH 052/761] Fix SMART plugin to recognize all devices from config (#8374) --- plugins/inputs/smart/smart.go | 71 ++++++++++-------------------- plugins/inputs/smart/smart_test.go | 13 ++---- 2 files changed, 27 insertions(+), 57 deletions(-) diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index adc23f0921e26..121edb0acf71b 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -12,14 +12,13 @@ import ( "sync" "syscall" "time" - "unicode" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -const IntelVID = "0x8086" +const intelVID = "0x8086" var ( // Device Model: APPLE SSD SM256E @@ -55,7 +54,7 @@ var ( // vid : 0x8086 // sn : CFGT53260XSP8011P - nvmeIdCtrlExpressionPattern = regexp.MustCompile(`^([\w\s]+):([\s\w]+)`) + nvmeIDCtrlExpressionPattern = regexp.MustCompile(`^([\w\s]+):([\s\w]+)`) deviceFieldIds = map[string]string{ "1": "read_error_rate", @@ -267,13 +266,7 @@ var ( } ) -type NVMeDevice struct { - name string - vendorID string - model string - serialNumber string -} - +// Smart plugin reads metrics from storage devices supporting S.M.A.R.T. type Smart struct { Path string `toml:"path"` //deprecated - to keep backward compatibility PathSmartctl string `toml:"path_smartctl"` @@ -288,6 +281,13 @@ type Smart struct { Log telegraf.Logger `toml:"-"` } +type nvmeDevice struct { + name string + vendorID string + model string + serialNumber string +} + var sampleConfig = ` ## Optionally specify the path to the smartctl executable # path_smartctl = "/usr/bin/smartctl" @@ -330,20 +330,23 @@ var sampleConfig = ` # timeout = "30s" ` -func NewSmart() *Smart { +func newSmart() *Smart { return &Smart{ Timeout: internal.Duration{Duration: time.Second * 30}, } } +// SampleConfig returns sample configuration for this plugin. func (m *Smart) SampleConfig() string { return sampleConfig } +// Description returns the plugin description. func (m *Smart) Description() string { return "Read metrics from storage devices supporting S.M.A.R.T." } +// Init performs one time setup of the plugin and returns an error if the configuration is invalid. func (m *Smart) Init() error { //if deprecated `path` (to smartctl binary) is provided in config and `path_smartctl` override does not exist if len(m.Path) > 0 && len(m.PathSmartctl) == 0 { @@ -377,6 +380,7 @@ func (m *Smart) Init() error { return nil } +// Gather takes in an accumulator and adds the metrics that the SMART tools gather. func (m *Smart) Gather(acc telegraf.Accumulator) error { var err error var scannedNVMeDevices []string @@ -387,8 +391,6 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { isVendorExtension := len(m.EnableExtensions) != 0 if len(m.Devices) != 0 { - devicesFromConfig = excludeWrongDeviceNames(devicesFromConfig) - m.getAttributes(acc, devicesFromConfig) // if nvme-cli is present, vendor specific attributes can be gathered @@ -418,31 +420,6 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { return nil } -// validate and exclude not correct config device names to avoid unwanted behaviours -func excludeWrongDeviceNames(devices []string) []string { - validSigns := map[string]struct{}{ - " ": {}, - "/": {}, - "\\": {}, - "-": {}, - ",": {}, - } - var wrongDevices []string - - for _, device := range devices { - for _, char := range device { - if unicode.IsLetter(char) || unicode.IsNumber(char) { - continue - } - if _, exist := validSigns[string(char)]; exist { - continue - } - wrongDevices = append(wrongDevices, device) - } - } - return difference(devices, wrongDevices) -} - func (m *Smart) scanAllDevices(ignoreExcludes bool) ([]string, []string, error) { // this will return all devices (including NVMe devices) for smartctl version >= 7.0 // for older versions this will return non NVMe devices @@ -540,11 +517,11 @@ func (m *Smart) getVendorNVMeAttributes(acc telegraf.Accumulator, devices []stri for _, device := range NVMeDevices { if contains(m.EnableExtensions, "auto-on") { switch device.vendorID { - case IntelVID: + case intelVID: wg.Add(1) go gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) } - } else if contains(m.EnableExtensions, "Intel") && device.vendorID == IntelVID { + } else if contains(m.EnableExtensions, "Intel") && device.vendorID == intelVID { wg.Add(1) go gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) } @@ -552,8 +529,8 @@ func (m *Smart) getVendorNVMeAttributes(acc telegraf.Accumulator, devices []stri wg.Wait() } -func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme string, timeout internal.Duration, useSudo bool) []NVMeDevice { - var NVMeDevices []NVMeDevice +func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme string, timeout internal.Duration, useSudo bool) []nvmeDevice { + var NVMeDevices []nvmeDevice for _, device := range devices { vid, sn, mn, err := gatherNVMeDeviceInfo(nvme, device, timeout, useSudo) @@ -561,7 +538,7 @@ func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme acc.AddError(fmt.Errorf("cannot find device info for %s device", device)) continue } - newDevice := NVMeDevice{ + newDevice := nvmeDevice{ name: device, vendorID: vid, model: mn, @@ -593,7 +570,7 @@ func findNVMeDeviceInfo(output string) (string, string, string, error) { for scanner.Scan() { line := scanner.Text() - if matches := nvmeIdCtrlExpressionPattern.FindStringSubmatch(line); len(matches) > 2 { + if matches := nvmeIDCtrlExpressionPattern.FindStringSubmatch(line); len(matches) > 2 { matches[1] = strings.TrimSpace(matches[1]) matches[2] = strings.TrimSpace(matches[2]) if matches[1] == "vid" { @@ -612,7 +589,7 @@ func findNVMeDeviceInfo(output string) (string, string, string, error) { return vid, sn, mn, nil } -func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo bool, nvme string, device NVMeDevice, wg *sync.WaitGroup) { +func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo bool, nvme string, device nvmeDevice, wg *sync.WaitGroup) { defer wg.Done() args := []string{"intel", "smart-log-add"} @@ -966,7 +943,7 @@ func parseTemperature(fields, deviceFields map[string]interface{}, str string) e return nil } -func parseTemperatureSensor(fields, deviceFields map[string]interface{}, str string) error { +func parseTemperatureSensor(fields, _ map[string]interface{}, str string) error { var temp int64 if _, err := fmt.Sscanf(str, "%d C", &temp); err != nil { return err @@ -993,7 +970,7 @@ func init() { _ = os.Setenv("LC_NUMERIC", "en_US.UTF-8") inputs.Add("smart", func() telegraf.Input { - m := NewSmart() + m := newSmart() m.Nocheck = "standby" return m }) diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index 00d8cf0725ea7..e82307d391565 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -14,7 +14,7 @@ import ( ) func TestGatherAttributes(t *testing.T) { - s := NewSmart() + s := newSmart() s.Attributes = true assert.Equal(t, time.Second*30, s.Timeout.Duration) @@ -78,7 +78,7 @@ func TestGatherAttributes(t *testing.T) { } func TestGatherNoAttributes(t *testing.T) { - s := NewSmart() + s := newSmart() s.Attributes = false assert.Equal(t, time.Second*30, s.Timeout.Duration) @@ -244,7 +244,7 @@ func TestGatherIntelNvme(t *testing.T) { var ( acc = &testutil.Accumulator{} wg = &sync.WaitGroup{} - device = NVMeDevice{ + device = nvmeDevice{ name: "nvme0", model: mockModel, serialNumber: mockSerial, @@ -275,13 +275,6 @@ func Test_checkForNVMeDevices(t *testing.T) { assert.Equal(t, expectedNVMeDevices, resultNVMeDevices) } -func Test_excludeWrongDeviceNames(t *testing.T) { - devices := []string{"/dev/sda", "/dev/nvme -d nvme", "/dev/sda1 -d megaraid,1", "/dev/sda ; ./suspicious_script.sh"} - validDevices := []string{"/dev/sda", "/dev/nvme -d nvme", "/dev/sda1 -d megaraid,1"} - result := excludeWrongDeviceNames(devices) - assert.Equal(t, validDevices, result) -} - func Test_contains(t *testing.T) { devices := []string{"/dev/sda", "/dev/nvme1"} device := "/dev/nvme1" From 954387e325cf67c3bfeccc41a666c7f24230c45b Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 12 Nov 2020 11:12:45 -0500 Subject: [PATCH 053/761] Revert "fix to start Telegraf from Linux systemd.service" This reverts commit 3523652e30390c6059b5d02e99ea280751868987. --- scripts/telegraf.service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/telegraf.service b/scripts/telegraf.service index ab29f7f666785..ff9860d5c4e2d 100644 --- a/scripts/telegraf.service +++ b/scripts/telegraf.service @@ -4,7 +4,7 @@ Documentation=https://github.com/influxdata/telegraf After=network.target [Service] -EnvironmentFile=/etc/default/telegraf +EnvironmentFile=-/etc/default/telegraf User=telegraf ExecStart=/usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d $TELEGRAF_OPTS ExecReload=/bin/kill -HUP $MAINPID From dabea48a907a5f59f8ab3ae487b9300665ab38cb Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Thu, 12 Nov 2020 09:20:50 -0800 Subject: [PATCH 054/761] Fix minor typos in readmes (#8370) --- plugins/inputs/redis/README.md | 2 +- plugins/inputs/sqlserver/README.md | 8 ++++---- plugins/inputs/win_perf_counters/README.md | 2 +- plugins/parsers/collectd/README.md | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index c8f343b262aca..4327a28bb98ee 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -63,7 +63,7 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a - maxmemory_policy(string) - mem_fragmentation_ratio(float, number) - **Persistance** + **Persistence** - loading(int,flag) - rdb_changes_since_last_save(int, number) - rdb_bgsave_in_progress(int, flag) diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 27c6da1cd7571..be5b98aa8d2b6 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -176,7 +176,7 @@ The new (version 2) metrics provide: - *Memory*: PLE, Page reads/sec, Page writes/sec, + more - *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more -- *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc. +- *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevant properties such as Tier, #Vcores, Memory etc. - *Wait stats*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. - *Schedulers* - This captures `sys.dm_os_schedulers`. - *SqlRequests* - This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and @@ -205,7 +205,7 @@ These are metrics for Azure SQL Database (single database) and are very similar - AzureSQLDBMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`. = AzureSQLDBResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_user_db_resource_governance` - AzureSQLDBPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. -- AzureSQLDBServerProperties: Relevant Azure SQL relevent properties from such as Tier, #Vcores, Memory etc, storage, etc. +- AzureSQLDBServerProperties: Relevant Azure SQL relevant properties from such as Tier, #Vcores, Memory etc, storage, etc. - AzureSQLDBWaitstats: Wait time in ms from `sys.dm_db_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected only as of the end of the a statement. and for a specific database only. - *AzureSQLOsWaitstats*: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide - *AzureSQLDBRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` @@ -218,7 +218,7 @@ These are metrics for Azure SQL Managed instance, are very similar to version 2 - AzureSQLMIMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`. - AzureSQLMIResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_instance_resource_governance` - AzureSQLMIPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. -- AzureSQLMIServerProperties: Relevant Azure SQL relevent properties such as Tier, #Vcores, Memory etc, storage, etc. +- AzureSQLMIServerProperties: Relevant Azure SQL relevant properties such as Tier, #Vcores, Memory etc, storage, etc. - AzureSQLMIOsWaitstats: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide - AzureSQLMIRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` - AzureSQLMISchedulers - This captures `sys.dm_os_schedulers` snapshots. @@ -233,7 +233,7 @@ These are metrics for Azure SQL Managed instance, are very similar to version 2 - *Memory*: PLE, Page reads/sec, Page writes/sec, + more - *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more -- SQLServerProperties: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc. +- SQLServerProperties: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevant properties such as Tier, #Vcores, Memory etc. - SQLServerWaitStatsCategorized: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. - SQLServerSchedulers - This captures `sys.dm_os_schedulers`. - SQLServerRequests - This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and diff --git a/plugins/inputs/win_perf_counters/README.md b/plugins/inputs/win_perf_counters/README.md index 1bb4bcb34a7ff..de45386a764a1 100644 --- a/plugins/inputs/win_perf_counters/README.md +++ b/plugins/inputs/win_perf_counters/README.md @@ -65,7 +65,7 @@ Example: _Deprecated. Necessary features on Windows Vista and newer are checked dynamically_ -Bool, if set to `true`, the plugin will use the localized PerfCounter interface that has been present since before Vista for backwards compatability. +Bool, if set to `true`, the plugin will use the localized PerfCounter interface that has been present since before Vista for backwards compatibility. It is recommended NOT to use this on OSes starting with Vista and newer because it requires more configuration to use this than the newer interface present since Vista. diff --git a/plugins/parsers/collectd/README.md b/plugins/parsers/collectd/README.md index cc7daa4f6af42..8dbc052be145d 100644 --- a/plugins/parsers/collectd/README.md +++ b/plugins/parsers/collectd/README.md @@ -39,7 +39,7 @@ You can also change the path to the typesdb or add additional typesdb using ## Multi-value plugins can be handled two ways. ## "split" will parse and store the multi-value plugin data into separate measurements ## "join" will parse and store the multi-value plugin as a single multi-value measurement. - ## "split" is the default behavior for backward compatability with previous versions of influxdb. + ## "split" is the default behavior for backward compatibility with previous versions of influxdb. collectd_parse_multivalue = "split" ``` From e0938382b1e0f67901fb7e91a5e6b1acc243023b Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 12 Nov 2020 09:21:47 -0800 Subject: [PATCH 055/761] fix links in external plugins readme (#8307) * fix awsalarms link in readme * fix links --- docs/EXTERNAL_PLUGINS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/EXTERNAL_PLUGINS.md b/docs/EXTERNAL_PLUGINS.md index aa3b5058aa8b4..abef068f50f48 100644 --- a/docs/EXTERNAL_PLUGINS.md +++ b/docs/EXTERNAL_PLUGINS.md @@ -56,7 +56,7 @@ This is a guide to help you set up your plugin to use it with `execd` block to look for this plugin. 1. Add usage and development instructions in the homepage of your repository for running your plugin with its respective `execd` plugin. Please refer to - [openvpn](/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](/vipinvkmenon/awsalarms#installation) + [openvpn](https://github.com/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](https://github.com/vipinvkmenon/awsalarms#installation) for examples. Include the following steps: 1. How to download the release package for your platform or how to clone the binary for your external plugin 1. The commands to unpack or build your binary From 97fb465c2dd9d6ddf413182dad0137a661a470d3 Mon Sep 17 00:00:00 2001 From: Kevin R Date: Thu, 12 Nov 2020 12:51:44 -0800 Subject: [PATCH 056/761] systemd_units: add --plain to command invocation (#7990) (#7991) --- plugins/inputs/systemd_units/README.md | 2 +- plugins/inputs/systemd_units/systemd_units_linux.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/systemd_units/README.md b/plugins/inputs/systemd_units/README.md index fc8306dee2da9..7fe09e224c564 100644 --- a/plugins/inputs/systemd_units/README.md +++ b/plugins/inputs/systemd_units/README.md @@ -1,7 +1,7 @@ # systemd Units Input Plugin The systemd_units plugin gathers systemd unit status on Linux. It relies on -`systemctl list-units --all --type=service` to collect data on service status. +`systemctl list-units --all --plain --type=service` to collect data on service status. The results are tagged with the unit name and provide enumerated fields for loaded, active and running fields, indicating the unit health. diff --git a/plugins/inputs/systemd_units/systemd_units_linux.go b/plugins/inputs/systemd_units/systemd_units_linux.go index 64caf03d007f3..8df21efa6de13 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux.go +++ b/plugins/inputs/systemd_units/systemd_units_linux.go @@ -198,13 +198,13 @@ func setSystemctl(Timeout internal.Duration, UnitType string) (*bytes.Buffer, er return nil, err } - cmd := exec.Command(systemctlPath, "list-units", "--all", fmt.Sprintf("--type=%s", UnitType), "--no-legend") + cmd := exec.Command(systemctlPath, "list-units", "--all", "--plain", fmt.Sprintf("--type=%s", UnitType), "--no-legend") var out bytes.Buffer cmd.Stdout = &out err = internal.RunTimeout(cmd, Timeout.Duration) if err != nil { - return &out, fmt.Errorf("error running systemctl list-units --all --type=%s --no-legend: %s", UnitType, err) + return &out, fmt.Errorf("error running systemctl list-units --all --plain --type=%s --no-legend: %s", UnitType, err) } return &out, nil From fb463bcc179857aa3dfd00288d1e9c2b4ef51f86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Krzysztof=20D=C4=85browski?= Date: Fri, 13 Nov 2020 00:12:29 +0100 Subject: [PATCH 057/761] proxmox: ignore QEMU templates and iron out a few bugs (#8326) --- plugins/inputs/proxmox/README.md | 4 +-- plugins/inputs/proxmox/proxmox.go | 42 +++++++++++++++----------- plugins/inputs/proxmox/proxmox_test.go | 7 +++-- plugins/inputs/proxmox/structs.go | 6 ++-- 4 files changed, 35 insertions(+), 24 deletions(-) diff --git a/plugins/inputs/proxmox/README.md b/plugins/inputs/proxmox/README.md index 24e39ade24ea3..db9f57e974d2d 100644 --- a/plugins/inputs/proxmox/README.md +++ b/plugins/inputs/proxmox/README.md @@ -11,8 +11,8 @@ Telegraf minimum version: Telegraf 1.16.0 ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. base_url = "https://localhost:8006/api2/json" api_token = "USER@REALM!TOKENID=UUID" - ## Optional node name config - # node_name = "localhost" + ## Node name, defaults to OS hostname + # node_name = "" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index 7c14356849d6b..810c45c58c454 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -3,19 +3,22 @@ package proxmox import ( "encoding/json" "errors" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" "io/ioutil" "net/http" "net/url" "os" "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" ) var sampleConfig = ` ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. base_url = "https://localhost:8006/api2/json" api_token = "USER@REALM!TOKENID=UUID" + ## Node name, defaults to OS hostname + # node_name = "" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -49,9 +52,10 @@ func (px *Proxmox) Gather(acc telegraf.Accumulator) error { } func (px *Proxmox) Init() error { - + // Set hostname as default node name for backwards compatibility if px.NodeName == "" { - return errors.New("node_name must be configured") + hostname, _ := os.Hostname() + px.NodeName = hostname } tlsCfg, err := px.ClientConfig.TLSConfig() @@ -69,15 +73,11 @@ func (px *Proxmox) Init() error { } func init() { - px := Proxmox{ - requestFunction: performRequest, - } - - // Set hostname as default node name for backwards compatibility - hostname, _ := os.Hostname() - px.NodeName = hostname - - inputs.Add("proxmox", func() telegraf.Input { return &px }) + inputs.Add("proxmox", func() telegraf.Input { + return &Proxmox{ + requestFunction: performRequest, + } + }) } func getNodeSearchDomain(px *Proxmox) error { @@ -94,7 +94,7 @@ func getNodeSearchDomain(px *Proxmox) error { } if nodeDns.Data.Searchdomain == "" { - return errors.New("node_name not found") + return errors.New("search domain is not set") } px.nodeSearchDomain = nodeDns.Data.Searchdomain @@ -141,20 +141,28 @@ func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { for _, vmStat := range vmStats.Data { vmConfig, err := getVmConfig(px, vmStat.ID, rt) if err != nil { - px.Log.Error("Error getting VM config: %v", err) + px.Log.Errorf("Error getting VM config: %v", err) return } + + if vmConfig.Data.Template == 1 { + px.Log.Debugf("Ignoring template VM %s (%s)", vmStat.ID, vmStat.Name) + continue + } + tags := getTags(px, vmStat.Name, vmConfig, rt) currentVMStatus, err := getCurrentVMStatus(px, rt, vmStat.ID) if err != nil { - px.Log.Error("Error getting VM curent VM status: %v", err) + px.Log.Errorf("Error getting VM curent VM status: %v", err) return } + fields, err := getFields(currentVMStatus) if err != nil { - px.Log.Error("Error getting VM measurements: %v", err) + px.Log.Errorf("Error getting VM measurements: %v", err) return } + acc.AddFields("proxmox", fields, tags) } } diff --git a/plugins/inputs/proxmox/proxmox_test.go b/plugins/inputs/proxmox/proxmox_test.go index 524a105e7b1ab..226705329761c 100644 --- a/plugins/inputs/proxmox/proxmox_test.go +++ b/plugins/inputs/proxmox/proxmox_test.go @@ -1,12 +1,13 @@ package proxmox import ( - "github.com/bmizerany/assert" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" "net/url" "strings" "testing" + + "github.com/bmizerany/assert" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var nodeSearchDomainTestData = `{"data":{"search":"test.example.com","dns1":"1.0.0.1"}}` diff --git a/plugins/inputs/proxmox/structs.go b/plugins/inputs/proxmox/structs.go index 461e71d767d6a..b137603ea79a9 100644 --- a/plugins/inputs/proxmox/structs.go +++ b/plugins/inputs/proxmox/structs.go @@ -2,11 +2,12 @@ package proxmox import ( "encoding/json" + "net/http" + "net/url" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" - "net/http" - "net/url" ) type Proxmox struct { @@ -57,6 +58,7 @@ type VmConfig struct { Data struct { Searchdomain string `json:"searchdomain"` Hostname string `json:"hostname"` + Template int `json:"template"` } `json:"data"` } From 049daf7892a8fdd9e9c1bb2b4e427897d0745678 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 13 Nov 2020 16:36:08 +0100 Subject: [PATCH 058/761] Fix parsing of multiple files with different headers (#6318). (#8400) --- plugins/parsers/csv/parser.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 76d8306ea6e46..87e40327390d7 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -32,6 +32,8 @@ type Config struct { Timezone string `toml:"csv_timezone"` TrimSpace bool `toml:"csv_trim_space"` + gotColumnNames bool + TimeFunc func() time.Time DefaultTags map[string]string } @@ -64,6 +66,8 @@ func NewParser(c *Config) (*Parser, error) { return nil, fmt.Errorf("csv_column_names field count doesn't match with csv_column_types") } + c.gotColumnNames = len(c.ColumnNames) > 0 + if c.TimeFunc == nil { c.TimeFunc = time.Now } @@ -102,10 +106,13 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { return nil, err } } - // if there is a header and nothing in DataColumns + // if there is a header and we did not get DataColumns // set DataColumns to names extracted from the header - headerNames := make([]string, 0) - if len(p.ColumnNames) == 0 { + // we always reread the header to avoid side effects + // in cases where multiple files with different + // headers are read + if !p.gotColumnNames { + headerNames := make([]string, 0) for i := 0; i < p.HeaderRowCount; i++ { header, err := csvReader.Read() if err != nil { From 8f0070b86549240805344ad9165759cb405112f0 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 13 Nov 2020 11:44:22 -0500 Subject: [PATCH 059/761] Update changelog (cherry picked from commit 52f8cc468c0f3c212391aa025a9c8c2c9017590a) --- CHANGELOG.md | 19 +++++++++++++++++++ build_version.txt | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 23c7d2d063743..805fd53fa88a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,22 @@ +## v1.16.2 [2020-11-13] + +#### Bugfixes + + - [#8400](https://github.com/influxdata/telegraf/pull/8400) `parsers.csv` Fix parsing of multiple files with different headers (#6318). + - [#8326](https://github.com/influxdata/telegraf/pull/8326) `inputs.proxmox` proxmox: ignore QEMU templates and iron out a few bugs + - [#7991](https://github.com/influxdata/telegraf/pull/7991) `inputs.systemd_units` systemd_units: add --plain to command invocation (#7990) + - [#8307](https://github.com/influxdata/telegraf/pull/8307) fix links in external plugins readme + - [#8370](https://github.com/influxdata/telegraf/pull/8370) `inputs.redis` Fix minor typos in readmes + - [#8374](https://github.com/influxdata/telegraf/pull/8374) `inputs.smart` Fix SMART plugin to recognize all devices from config + - [#8288](https://github.com/influxdata/telegraf/pull/8288) `inputs.redfish` Add OData-Version header to requests + - [#8357](https://github.com/influxdata/telegraf/pull/8357) `inputs.vsphere` Prydin issue 8169 + - [#8356](https://github.com/influxdata/telegraf/pull/8356) `inputs.sqlserver` On-prem fix for #8324 + - [#8165](https://github.com/influxdata/telegraf/pull/8165) `outputs.wavefront` [output.wavefront] Introduced "immediate_flush" flag + - [#7938](https://github.com/influxdata/telegraf/pull/7938) `inputs.gnmi` added support for bytes encoding + - [#8337](https://github.com/influxdata/telegraf/pull/8337) `inputs.dcos` Update jwt-go module to address CVE-2020-26160 + - [#8350](https://github.com/influxdata/telegraf/pull/8350) `inputs.ras` fix plugins/input/ras test + - [#8329](https://github.com/influxdata/telegraf/pull/8329) `outputs.dynatrace` #8328 Fixed a bug with the state map in Dynatrace Plugin + ## v1.16.1 [2020-10-28] #### Release Notes diff --git a/build_version.txt b/build_version.txt index 41c11ffb730cf..4a02d2c3170bd 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.16.1 +1.16.2 From ca041063d9e34abdbfe88fa72732189fd4a9c2c9 Mon Sep 17 00:00:00 2001 From: Nicolas Filotto Date: Fri, 13 Nov 2020 20:26:07 +0100 Subject: [PATCH 060/761] Allow to catch errors that occur in the apply function (#8401) --- plugins/processors/starlark/README.md | 21 ++++++++++ plugins/processors/starlark/builtins.go | 13 +++++++ plugins/processors/starlark/starlark.go | 1 + plugins/processors/starlark/starlark_test.go | 40 ++++++++++++++++++++ 4 files changed, 75 insertions(+) diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index 1b541c33857ed..96da69e499fe9 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -154,6 +154,27 @@ def apply(metric): Telegraf freezes the global scope, which prevents it from being modified. Attempting to modify the global scope will fail with an error. +**How to manage errors that occur in the apply function?** + +In case you need to call some code that may return an error, you can delegate the call +to the built-in function `catch` which takes as argument a `Callable` and returns the error +that occured if any, `None` otherwise. + +So for example: + +```python +load("json.star", "json") + +def apply(metric): + error = catch(lambda: failing(metric)) + if error != None: + # Some code to execute in case of an error + metric.fields["error"] = error + return metric + +def failing(metric): + json.decode("non-json-content") +``` ### Examples diff --git a/plugins/processors/starlark/builtins.go b/plugins/processors/starlark/builtins.go index 4eda39b7d8d12..c2a30a0dc9379 100644 --- a/plugins/processors/starlark/builtins.go +++ b/plugins/processors/starlark/builtins.go @@ -34,6 +34,19 @@ func deepcopy(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, return &Metric{metric: dup}, nil } +// catch(f) evaluates f() and returns its evaluation error message +// if it failed or None if it succeeded. +func catch(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { + var fn starlark.Callable + if err := starlark.UnpackArgs("catch", args, kwargs, "fn", &fn); err != nil { + return nil, err + } + if _, err := starlark.Call(thread, fn, nil, nil); err != nil { + return starlark.String(err.Error()), nil + } + return starlark.None, nil +} + type builtinMethod func(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) func builtinAttr(recv starlark.Value, name string, methods map[string]builtinMethod) (starlark.Value, error) { diff --git a/plugins/processors/starlark/starlark.go b/plugins/processors/starlark/starlark.go index cf791b3f155e3..a39b341f235c9 100644 --- a/plugins/processors/starlark/starlark.go +++ b/plugins/processors/starlark/starlark.go @@ -58,6 +58,7 @@ func (s *Starlark) Init() error { builtins := starlark.StringDict{} builtins["Metric"] = starlark.NewBuiltin("Metric", newMetric) builtins["deepcopy"] = starlark.NewBuiltin("deepcopy", deepcopy) + builtins["catch"] = starlark.NewBuiltin("catch", catch) program, err := s.sourceProgram(builtins) if err != nil { diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index ce0b1803c959c..f83767210c3c1 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -2378,6 +2378,46 @@ def apply(metric): ), }, }, + { + name: "support errors", + source: ` +load("json.star", "json") + +def apply(metric): + msg = catch(lambda: process(metric)) + if msg != None: + metric.fields["error"] = msg + metric.fields["value"] = "default" + return metric + +def process(metric): + metric.fields["field1"] = "value1" + metric.tags["tags1"] = "value2" + # Throw an error + json.decode(metric.fields.get('value')) + # Should never be called + metric.fields["msg"] = "value4" +`, + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{"value": "non-json-content", "msg": "value3"}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{"tags1": "value2"}, + map[string]interface{}{ + "value": "default", + "field1": "value1", + "msg": "value3", + "error": "json.decode: at offset 0, unexpected character 'n'", + }, + time.Unix(0, 0), + ), + }, + }, } for _, tt := range tests { From 18460e182502a8222a500503fb679864bf796f80 Mon Sep 17 00:00:00 2001 From: Yuxuan 'fishy' Wang Date: Fri, 13 Nov 2020 14:08:05 -0800 Subject: [PATCH 061/761] Wavefront output should distinguish between retryable and non-retryable errors (#8404) --- plugins/outputs/wavefront/wavefront.go | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 6ba82ce5ce5db..ee0a8a5b0b193 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -173,7 +173,10 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error { for _, point := range w.buildMetrics(m) { err := w.sender.SendMetric(point.Metric, point.Value, point.Timestamp, point.Source, point.Tags) if err != nil { - return fmt.Errorf("Wavefront sending error: %s", err.Error()) + if isRetryable(err) { + return fmt.Errorf("Wavefront sending error: %v", err) + } + w.Log.Errorf("non-retryable error during Wavefront.Write: %v", err) } } } @@ -355,3 +358,21 @@ func init() { } }) } + +// TODO: Currently there's no canonical way to exhaust all +// retryable/non-retryable errors from wavefront, so this implementation just +// handles known non-retryable errors in a case-by-case basis and assumes all +// other errors are retryable. +// A support ticket has been filed against wavefront to provide a canonical way +// to distinguish between retryable and non-retryable errors (link is not +// public). +func isRetryable(err error) bool { + if err != nil { + // "empty metric name" errors are non-retryable as retry will just keep + // getting the same error again and again. + if strings.Contains(err.Error(), "empty metric name") { + return false + } + } + return true +} From 2c346ed08b256a0b2c173aca79ce8ae69a55a9cd Mon Sep 17 00:00:00 2001 From: Aaron Griffin <63020788+agriffin208@users.noreply.github.com> Date: Mon, 16 Nov 2020 07:54:58 -0700 Subject: [PATCH 062/761] #8405 add non-retryable debug logging (#8406) Add debug level logging for metric data that is not retryable. --- plugins/outputs/wavefront/wavefront.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index ee0a8a5b0b193..ef5a6418fe868 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -177,6 +177,7 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error { return fmt.Errorf("Wavefront sending error: %v", err) } w.Log.Errorf("non-retryable error during Wavefront.Write: %v", err) + w.Log.Debugf("Non-retryable metric data: Name: %v, Value: %v, Timestamp: %v, Source: %v, PointTags: %v ", point.Metric, point.Value, point.Timestamp, point.Source, point.Tags) } } } From dc782805da823a9c40a31c91f8d3bfc7a87a8a8b Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Mon, 16 Nov 2020 09:15:34 -0800 Subject: [PATCH 063/761] add kinesis output to external plugins list (#8315) --- EXTERNAL_PLUGINS.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index 1aea58dac3070..bdbd244ca3ec3 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -13,3 +13,6 @@ Pull requests welcome. - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. + +## Outputs +- [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. From 0c15569174a84c84d3e89dfe18ee3b42eca99d15 Mon Sep 17 00:00:00 2001 From: Nicolas Filotto Date: Mon, 16 Nov 2020 21:22:40 +0100 Subject: [PATCH 064/761] Support logging in starlark (#8408) --- plugins/processors/starlark/README.md | 2 + plugins/processors/starlark/logging.go | 47 +++++++++++++++++++ plugins/processors/starlark/starlark.go | 10 +++- plugins/processors/starlark/starlark_test.go | 25 ++++++++++ .../processors/starlark/testdata/logging.star | 19 ++++++++ 5 files changed, 101 insertions(+), 2 deletions(-) create mode 100644 plugins/processors/starlark/logging.go create mode 100644 plugins/processors/starlark/testdata/logging.star diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index 96da69e499fe9..f674b7fdc67e7 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -96,6 +96,7 @@ While Starlark is similar to Python, there are important differences to note: The ability to load external scripts other than your own is pretty limited. The following libraries are available for loading: * json: `load("json.star", "json")` provides the following functions: `json.encode()`, `json.decode()`, `json.indent()`. See [json.star](/plugins/processors/starlark/testdata/json.star) for an example. +* log: `load("logging.star", "log")` provides the following functions: `log.debug()`, `log.info()`, `log.warn()`, `log.error()`. See [logging.star](/plugins/processors/starlark/testdata/logging.star) for an example. If you would like to see support for something else here, please open an issue. @@ -185,6 +186,7 @@ def failing(metric): - [rename](/plugins/processors/starlark/testdata/rename.star) - Rename tags or fields using a name mapping. - [scale](/plugins/processors/starlark/testdata/scale.star) - Multiply any field by a number - [value filter](/plugins/processors/starlark/testdata/value_filter.star) - remove a metric based on a field value. +- [logging](/plugins/processors/starlark/testdata/logging.star) - Log messages with the logger of Telegraf [All examples](/plugins/processors/starlark/testdata) are in the testdata folder. diff --git a/plugins/processors/starlark/logging.go b/plugins/processors/starlark/logging.go new file mode 100644 index 0000000000000..35ba65d1db80f --- /dev/null +++ b/plugins/processors/starlark/logging.go @@ -0,0 +1,47 @@ +package starlark + +import ( + "errors" + "fmt" + + "github.com/influxdata/telegraf" + "go.starlark.net/starlark" + "go.starlark.net/starlarkstruct" +) + +// Builds a module that defines all the supported logging functions which will log using the provided logger +func LogModule(logger telegraf.Logger) *starlarkstruct.Module { + var logFunc = func(t *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { + return log(t, b, args, kwargs, logger) + } + return &starlarkstruct.Module{ + Name: "log", + Members: starlark.StringDict{ + "debug": starlark.NewBuiltin("log.debug", logFunc), + "info": starlark.NewBuiltin("log.info", logFunc), + "warn": starlark.NewBuiltin("log.warn", logFunc), + "error": starlark.NewBuiltin("log.error", logFunc), + }, + } +} + +// Logs the provided message according to the level chosen +func log(t *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple, logger telegraf.Logger) (starlark.Value, error) { + var msg starlark.String + if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &msg); err != nil { + return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) + } + switch b.Name() { + case "log.debug": + logger.Debug(string(msg)) + case "log.info": + logger.Info(string(msg)) + case "log.warn": + logger.Warn(string(msg)) + case "log.error": + logger.Error(string(msg)) + default: + return nil, errors.New("method " + b.Name() + " is unknown") + } + return starlark.None, nil +} diff --git a/plugins/processors/starlark/starlark.go b/plugins/processors/starlark/starlark.go index a39b341f235c9..4835f06dee5a4 100644 --- a/plugins/processors/starlark/starlark.go +++ b/plugins/processors/starlark/starlark.go @@ -52,7 +52,9 @@ func (s *Starlark) Init() error { s.thread = &starlark.Thread{ Print: func(_ *starlark.Thread, msg string) { s.Log.Debug(msg) }, - Load: loadFunc, + Load: func(thread *starlark.Thread, module string) (starlark.StringDict, error) { + return loadFunc(thread, module, s.Log) + }, } builtins := starlark.StringDict{} @@ -217,12 +219,16 @@ func init() { }) } -func loadFunc(thread *starlark.Thread, module string) (starlark.StringDict, error) { +func loadFunc(thread *starlark.Thread, module string, logger telegraf.Logger) (starlark.StringDict, error) { switch module { case "json.star": return starlark.StringDict{ "json": starlarkjson.Module, }, nil + case "logging.star": + return starlark.StringDict{ + "log": LogModule(logger), + }, nil default: return nil, errors.New("module " + module + " is not available") } diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index f83767210c3c1..aad2575caa1bf 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -2535,6 +2535,31 @@ func TestScript(t *testing.T) { ), }, }, + { + name: "logging", + plugin: &Starlark{ + Script: "testdata/logging.star", + Log: testutil.Logger{}, + }, + input: []telegraf.Metric{ + testutil.MustMetric("log", + map[string]string{}, + map[string]interface{}{ + "debug": "a debug message", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("log", + map[string]string{}, + map[string]interface{}{ + "debug": "a debug message", + }, + time.Unix(0, 0), + ), + }, + }, } for _, tt := range tests { diff --git a/plugins/processors/starlark/testdata/logging.star b/plugins/processors/starlark/testdata/logging.star new file mode 100644 index 0000000000000..8be85eb968cf1 --- /dev/null +++ b/plugins/processors/starlark/testdata/logging.star @@ -0,0 +1,19 @@ +# Example of the way to log a message with all the supported levels +# using the logger of Telegraf. +# +# Example Input: +# log debug="a debug message" 1465839830100400201 +# +# Example Output: +# log debug="a debug message" 1465839830100400201 + +load("logging.star", "log") +# loads log.debug(), log.info(), log.warn(), log.error() + +def apply(metric): + log.debug("debug: {}".format(metric.fields["debug"])) + log.info("an info message") + log.warn("a warning message") + log.error("an error message") + return metric + \ No newline at end of file From 6b3f65308812f320f396893d0b6beb04cb5f67cf Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 16 Nov 2020 16:57:50 -0500 Subject: [PATCH 065/761] fix config issue with tags (#8419) --- config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/config.go b/config/config.go index 4b2f39955f78c..f86692835afb4 100644 --- a/config/config.go +++ b/config/config.go @@ -1426,7 +1426,7 @@ func (c *Config) missingTomlField(typ reflect.Type, key string) error { "name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision", "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", "separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys", - "tagdrop", "tagexclude", "taginclude", "tagpass", "template", "templates", + "tagdrop", "tagexclude", "taginclude", "tagpass", "tags", "template", "templates", "wavefront_source_override", "wavefront_use_strict": // ignore fields that are common to all plugins. From ee861fdeed3314787af959f332420be2a8c2e174 Mon Sep 17 00:00:00 2001 From: Nicolas Filotto Date: Tue, 17 Nov 2020 22:13:25 +0100 Subject: [PATCH 066/761] Show how to return several metrics with the Starlark processor (#8423) --- plugins/processors/starlark/README.md | 2 + plugins/processors/starlark/starlark_test.go | 64 +++++++++++++++++++ .../starlark/testdata/multiple_metrics.star | 26 ++++++++ .../testdata/multiple_metrics_with_json.star | 26 ++++++++ 4 files changed, 118 insertions(+) create mode 100644 plugins/processors/starlark/testdata/multiple_metrics.star create mode 100644 plugins/processors/starlark/testdata/multiple_metrics_with_json.star diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index f674b7fdc67e7..091564dccf527 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -187,6 +187,8 @@ def failing(metric): - [scale](/plugins/processors/starlark/testdata/scale.star) - Multiply any field by a number - [value filter](/plugins/processors/starlark/testdata/value_filter.star) - remove a metric based on a field value. - [logging](/plugins/processors/starlark/testdata/logging.star) - Log messages with the logger of Telegraf +- [multiple metrics](/plugins/processors/starlark/testdata/multiple_metrics.star) - Return multiple metrics by using [a list](https://docs.bazel.build/versions/master/skylark/lib/list.html) of metrics. +- [multiple metrics from json array](/plugins/processors/starlark/testdata/multiple_metrics_with_json.star) - Builds a new metric from each element of a json array then returns all the created metrics. [All examples](/plugins/processors/starlark/testdata) are in the testdata folder. diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index aad2575caa1bf..e17793237fa97 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -2560,6 +2560,70 @@ func TestScript(t *testing.T) { ), }, }, + { + name: "multiple_metrics", + plugin: &Starlark{ + Script: "testdata/multiple_metrics.star", + Log: testutil.Logger{}, + }, + input: []telegraf.Metric{ + testutil.MustMetric("mm", + map[string]string{}, + map[string]interface{}{ + "value": "a", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("mm2", + map[string]string{}, + map[string]interface{}{ + "value": "b", + }, + time.Unix(0, 0), + ), + testutil.MustMetric("mm1", + map[string]string{}, + map[string]interface{}{ + "value": "a", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "multiple_metrics_with_json", + plugin: &Starlark{ + Script: "testdata/multiple_metrics_with_json.star", + Log: testutil.Logger{}, + }, + input: []telegraf.Metric{ + testutil.MustMetric("json", + map[string]string{}, + map[string]interface{}{ + "value": "[{\"label\": \"hello\"}, {\"label\": \"world\"}]", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("json", + map[string]string{}, + map[string]interface{}{ + "value": "hello", + }, + time.Unix(0, 0), + ), + testutil.MustMetric("json", + map[string]string{}, + map[string]interface{}{ + "value": "world", + }, + time.Unix(0, 0), + ), + }, + }, } for _, tt := range tests { diff --git a/plugins/processors/starlark/testdata/multiple_metrics.star b/plugins/processors/starlark/testdata/multiple_metrics.star new file mode 100644 index 0000000000000..3d2e3d85f9e57 --- /dev/null +++ b/plugins/processors/starlark/testdata/multiple_metrics.star @@ -0,0 +1,26 @@ +# Example showing how to create several metrics using the Starlark processor. +# +# Example Input: +# mm value="a" 1465839830100400201 +# +# Example Output: +# mm2 value="b" 1465839830100400201 +# mm1 value="a" 1465839830100400201 + +def apply(metric): + # Initialize a list of metrics + metrics = [] + # Create a new metric whose name is "mm2" + metric2 = Metric("mm2") + # Set the field "value" to b + metric2.fields["value"] = "b" + # Reset the time (only needed for testing purpose) + metric2.time = 0 + # Add metric2 to the list of metrics + metrics.append(metric2) + # Rename the original metric to "mm1" + metric.name = "mm1" + # Add metric to the list of metrics + metrics.append(metric) + # Return the created list of metrics + return metrics diff --git a/plugins/processors/starlark/testdata/multiple_metrics_with_json.star b/plugins/processors/starlark/testdata/multiple_metrics_with_json.star new file mode 100644 index 0000000000000..78f318e62c7ac --- /dev/null +++ b/plugins/processors/starlark/testdata/multiple_metrics_with_json.star @@ -0,0 +1,26 @@ +# Example showing how to create several metrics from a json array. +# +# Example Input: +# json value="[{\"label\": \"hello\"}, {\"label\": \"world\"}]" +# +# Example Output: +# json value="hello" 1465839830100400201 +# json value="world" 1465839830100400201 + +# loads json.encode(), json.decode(), json.indent() +load("json.star", "json") + +def apply(metric): + # Initialize a list of metrics + metrics = [] + # Loop over the json array stored into the field + for obj in json.decode(metric.fields['value']): + # Create a new metric whose name is "json" + current_metric = Metric("json") + # Set the field "value" to the label extracted from the current json object + current_metric.fields["value"] = obj["label"] + # Reset the time (only needed for testing purpose) + current_metric.time = 0 + # Add metric to the list of metrics + metrics.append(current_metric) + return metrics From 8ad288bad470b591f49a9bd188e59e15a51ed681 Mon Sep 17 00:00:00 2001 From: Alexey Kuzyashin <33540273+Kuzyashin@users.noreply.github.com> Date: Thu, 19 Nov 2020 19:52:47 +0300 Subject: [PATCH 067/761] Add DriverVersion and CUDA Version to output (#8436) --- plugins/inputs/nvidia_smi/README.md | 2 ++ plugins/inputs/nvidia_smi/nvidia_smi.go | 10 +++++++++- plugins/inputs/nvidia_smi/nvidia_smi_test.go | 4 ++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index bbe90e005c6d6..f147137f36b77 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -52,6 +52,8 @@ You'll need to escape the `\` within the `telegraf.conf` like this: `C:\\Program - `clocks_current_sm` (integer, MHz) - `clocks_current_memory` (integer, MHz) - `clocks_current_video` (integer, MHz) + - `driver_version` (string) + - `cuda_version` (string) ### Sample Query diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index 688c3d4bb7680..f1ebfa38babb9 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -109,6 +109,8 @@ func (s *SMI) genTagsFields() []metric { setTagIfUsed(tags, "uuid", gpu.UUID) setTagIfUsed(tags, "compute_mode", gpu.ComputeMode) + setIfUsed("str", fields, "driver_version", s.DriverVersion) + setIfUsed("str", fields, "cuda_version", s.CUDAVersion) setIfUsed("int", fields, "fan_speed", gpu.FanSpeed) setIfUsed("int", fields, "memory_total", gpu.Memory.Total) setIfUsed("int", fields, "memory_used", gpu.Memory.Used) @@ -169,12 +171,18 @@ func setIfUsed(t string, m map[string]interface{}, k, v string) { m[k] = i } } + case "str": + if val != "" { + m[k] = val + } } } // SMI defines the structure for the output of _nvidia-smi -q -x_. type SMI struct { - GPU GPU `xml:"gpu"` + GPU GPU `xml:"gpu"` + DriverVersion string `xml:"driver_version"` + CUDAVersion string `xml:"cuda_version"` } // GPU defines the structure of the GPU portion of the smi output. diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go index 3c191e609ade4..ea5887ae10a5d 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -69,6 +69,8 @@ func TestGatherValidXML(t *testing.T) { "clocks_current_memory": 405, "clocks_current_sm": 300, "clocks_current_video": 540, + "cuda_version": "10.1", + "driver_version": "418.43", "encoder_stats_average_fps": 0, "encoder_stats_average_latency": 0, "encoder_stats_session_count": 0, @@ -109,6 +111,8 @@ func TestGatherValidXML(t *testing.T) { "clocks_current_memory": 405, "clocks_current_sm": 139, "clocks_current_video": 544, + "cuda_version": "10.1", + "driver_version": "418.43", "encoder_stats_average_fps": 0, "encoder_stats_average_latency": 0, "encoder_stats_session_count": 0, From 247230c5c94be7c678e2213055c4b90f454991f5 Mon Sep 17 00:00:00 2001 From: Stephanie Engel <22456349+stephanie-engel@users.noreply.github.com> Date: Fri, 20 Nov 2020 09:52:07 -0600 Subject: [PATCH 068/761] keep field name as is for csv timestamp column (#8440) --- plugins/parsers/csv/parser.go | 6 ++++++ plugins/parsers/csv/parser_test.go | 20 ++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 87e40327390d7..1c3d511ef43eb 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -204,6 +204,12 @@ outer: } } + // If the field name is the timestamp column, then keep field name as is. + if fieldName == p.TimestampColumn { + recordFields[fieldName] = value + continue + } + // Try explicit conversion only when column types is defined. if len(p.ColumnTypes) > 0 { // Throw error if current column count exceeds defined types. diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index c0f489365eb75..31fd4b02a0966 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -85,6 +85,26 @@ func TestTimestamp(t *testing.T) { require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906000000000)) } +func TestTimestampYYYYMMDDHHmm(t *testing.T) { + p, err := NewParser( + &Config{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimestampFormat: "200601021504", + TimeFunc: DefaultTime, + }, + ) + testCSV := `line1,line2,line3 +200905231605,70,test_name +200907111605,80,test_name2` + metrics, err := p.Parse([]byte(testCSV)) + + require.NoError(t, err) + require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094700000000000)) + require.Equal(t, metrics[1].Time().UnixNano(), int64(1247328300000000000)) +} func TestTimestampError(t *testing.T) { p, err := NewParser( &Config{ From bbd4e80409f0b1c7e81512a26bf55efb30f52403 Mon Sep 17 00:00:00 2001 From: Nicolas Filotto Date: Fri, 20 Nov 2020 17:28:56 +0100 Subject: [PATCH 069/761] Show how to return a custom error with the Starlark processor (#8439) --- plugins/processors/starlark/README.md | 1 + plugins/processors/starlark/starlark_test.go | 47 ++++++++++++++++++- .../processors/starlark/testdata/fail.star | 13 +++++ 3 files changed, 59 insertions(+), 2 deletions(-) create mode 100644 plugins/processors/starlark/testdata/fail.star diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index 091564dccf527..1194845ea1ad8 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -189,6 +189,7 @@ def failing(metric): - [logging](/plugins/processors/starlark/testdata/logging.star) - Log messages with the logger of Telegraf - [multiple metrics](/plugins/processors/starlark/testdata/multiple_metrics.star) - Return multiple metrics by using [a list](https://docs.bazel.build/versions/master/skylark/lib/list.html) of metrics. - [multiple metrics from json array](/plugins/processors/starlark/testdata/multiple_metrics_with_json.star) - Builds a new metric from each element of a json array then returns all the created metrics. +- [custom error](/plugins/processors/starlark/testdata/fail.star) - Return a custom error with [fail](https://docs.bazel.build/versions/master/skylark/lib/globals.html#fail). [All examples](/plugins/processors/starlark/testdata) are in the testdata folder. diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index e17793237fa97..8328c0bb0bb8b 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -2624,6 +2624,24 @@ func TestScript(t *testing.T) { ), }, }, + { + name: "fail", + plugin: &Starlark{ + Script: "testdata/fail.star", + Log: testutil.Logger{}, + }, + input: []telegraf.Metric{ + testutil.MustMetric("fail", + map[string]string{}, + map[string]interface{}{ + "value": 1, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{}, + expectedErrorStr: "fail: The field value should be greater than 1", + }, } for _, tt := range tests { @@ -2937,7 +2955,11 @@ func TestAllScriptTestData(t *testing.T) { require.NoError(t, err) lines := strings.Split(string(b), "\n") inputMetrics := parseMetricsFrom(t, lines, "Example Input:") - outputMetrics := parseMetricsFrom(t, lines, "Example Output:") + expectedErrorStr := parseErrorMessage(t, lines, "Example Output Error:") + outputMetrics := []telegraf.Metric{} + if expectedErrorStr == "" { + outputMetrics = parseMetricsFrom(t, lines, "Example Output:") + } plugin := &Starlark{ Script: fn, Log: testutil.Logger{}, @@ -2951,7 +2973,11 @@ func TestAllScriptTestData(t *testing.T) { for _, m := range inputMetrics { err = plugin.Add(m, acc) - require.NoError(t, err) + if expectedErrorStr != "" { + require.EqualError(t, err, expectedErrorStr) + } else { + require.NoError(t, err) + } } err = plugin.Stop() @@ -2992,3 +3018,20 @@ func parseMetricsFrom(t *testing.T, lines []string, header string) (metrics []te } return metrics } + +// parses error message out of line protocol following a header +func parseErrorMessage(t *testing.T, lines []string, header string) string { + require.NotZero(t, len(lines), "Expected some lines to parse from .star file, found none") + startIdx := -1 + for i := range lines { + if strings.TrimLeft(lines[i], "# ") == header { + startIdx = i + 1 + break + } + } + if startIdx == -1 { + return "" + } + require.True(t, startIdx < len(lines), fmt.Sprintf("Expected to find the error message after %q, but found none", header)) + return strings.TrimLeft(lines[startIdx], "# ") +} diff --git a/plugins/processors/starlark/testdata/fail.star b/plugins/processors/starlark/testdata/fail.star new file mode 100644 index 0000000000000..484217aad9dba --- /dev/null +++ b/plugins/processors/starlark/testdata/fail.star @@ -0,0 +1,13 @@ +# Example of the way to return a custom error thanks to the built-in function fail +# Returning an error will drop the current metric. Consider using logging instead if you want to keep the metric. +# +# Example Input: +# fail value=1 1465839830100400201 +# +# Example Output Error: +# fail: The field value should be greater than 1 + +def apply(metric): + if metric.fields["value"] <= 1: + return fail("The field value should be greater than 1") + return metric From 521caf3995070a137d4c30ddeb5dece3a2e07225 Mon Sep 17 00:00:00 2001 From: Roy Lenferink Date: Fri, 20 Nov 2020 19:54:56 +0100 Subject: [PATCH 070/761] Update mdlayher/apcupsd dependency (#8444) --- go.mod | 2 +- go.sum | 5 +++-- plugins/inputs/apcupsd/apcupsd_test.go | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 81464d821d18e..ba5f17e8a5b48 100644 --- a/go.mod +++ b/go.mod @@ -87,7 +87,7 @@ require ( github.com/lib/pq v1.3.0 // indirect github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe + github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b github.com/miekg/dns v1.0.14 github.com/mitchellh/go-testing-interface v1.0.0 // indirect github.com/morikuni/aec v1.0.0 // indirect diff --git a/go.sum b/go.sum index 866e6d15d95a1..23952fba5a959 100644 --- a/go.sum +++ b/go.sum @@ -280,6 +280,7 @@ github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= @@ -418,8 +419,8 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe h1:yMrL+YorbzaBpj/h3BbLMP+qeslPZYMbzcpHFBNy1Yk= -github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe/go.mod h1:y3mw3VG+t0m20OMqpG8RQqw8cDXvShVb+L8Z8FEnebw= +github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b h1:Kcr+kPbkWZHFHXwl87quXUAmavS4/IMgu2zck3aiE7k= +github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= diff --git a/plugins/inputs/apcupsd/apcupsd_test.go b/plugins/inputs/apcupsd/apcupsd_test.go index e749d5137daba..a7dbd2c7de1b7 100644 --- a/plugins/inputs/apcupsd/apcupsd_test.go +++ b/plugins/inputs/apcupsd/apcupsd_test.go @@ -140,7 +140,7 @@ func TestApcupsdGather(t *testing.T) { "nominal_battery_voltage": float64(12), "nominal_power": int(865), "firmware": string("857.L3 .I USB FW:L3"), - "battery_date": time.Date(2016, time.September, 06, 0, 0, 0, 0, time.UTC), + "battery_date": string("2016-09-06"), }, out: genOutput, }, From 245bef2f3a1193db657cfb351554b04c9595b0de Mon Sep 17 00:00:00 2001 From: Olli-Pekka Lehto Date: Fri, 20 Nov 2020 14:53:51 -0600 Subject: [PATCH 071/761] Add rate and interval to the basicstats aggregator plugin (#8428) --- plugins/aggregators/basicstats/README.md | 9 ++- plugins/aggregators/basicstats/basicstats.go | 69 +++++++++++----- .../aggregators/basicstats/basicstats_test.go | 79 ++++++++++++++++++- 3 files changed, 134 insertions(+), 23 deletions(-) diff --git a/plugins/aggregators/basicstats/README.md b/plugins/aggregators/basicstats/README.md index 8fef0c6f4886a..f13dd8f375682 100644 --- a/plugins/aggregators/basicstats/README.md +++ b/plugins/aggregators/basicstats/README.md @@ -16,7 +16,7 @@ emitting the aggregate every `period` seconds. drop_original = false ## Configures which basic stats to push as fields - # stats = ["count","diff","min","max","mean","non_negative_diff","stdev","s2","sum"] + # stats = ["count","diff","rate","min","max","mean","non_negative_diff","non_negative_rate","stdev","s2","sum","interval"] ``` - stats @@ -28,13 +28,16 @@ emitting the aggregate every `period` seconds. - measurement1 - field1_count - field1_diff (difference) + - field1_rate (rate per second) - field1_max - field1_min - field1_mean - field1_non_negative_diff (non-negative difference) + - field1_non_negative_rate (non-negative rate per second) - field1_sum - field1_s2 (variance) - field1_stdev (standard deviation) + - field1_interval (interval in nanoseconds) ### Tags: @@ -46,8 +49,8 @@ No tags are applied by this aggregator. $ telegraf --config telegraf.conf --quiet system,host=tars load1=1 1475583980000000000 system,host=tars load1=1 1475583990000000000 -system,host=tars load1_count=2,load1_diff=0,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0 1475584010000000000 +system,host=tars load1_count=2,load1_diff=0,load1_rate=0,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0,load1_interval=10000000000i 1475584010000000000 system,host=tars load1=1 1475584020000000000 system,host=tars load1=3 1475584030000000000 -system,host=tars load1_count=2,load1_diff=2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162 1475584010000000000 +system,host=tars load1_count=2,load1_diff=2,load1_rate=0.2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162,load1_interval=10000000000i 1475584010000000000 ``` diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index 4e62ee31123a4..67cee50c4609b 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -2,6 +2,7 @@ package basicstats import ( "math" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/aggregators" @@ -25,6 +26,9 @@ type configuredStats struct { sum bool diff bool non_negative_diff bool + rate bool + non_negative_rate bool + interval bool } func NewBasicStats() *BasicStats { @@ -40,14 +44,17 @@ type aggregate struct { } type basicstats struct { - count float64 - min float64 - max float64 - sum float64 - mean float64 - diff float64 - M2 float64 //intermediate value for variance/stdev - LAST float64 //intermediate value for diff + count float64 + min float64 + max float64 + sum float64 + mean float64 + diff float64 + rate float64 + interval time.Duration + M2 float64 //intermediate value for variance/stdev + LAST float64 //intermediate value for diff + TIME time.Time //intermediate value for rate } var sampleConfig = ` @@ -88,8 +95,10 @@ func (b *BasicStats) Add(in telegraf.Metric) { mean: fv, sum: fv, diff: 0.0, + rate: 0.0, M2: 0.0, LAST: fv, + TIME: in.Time(), } } } @@ -100,14 +109,17 @@ func (b *BasicStats) Add(in telegraf.Metric) { if _, ok := b.cache[id].fields[field.Key]; !ok { // hit an uncached field of a cached metric b.cache[id].fields[field.Key] = basicstats{ - count: 1, - min: fv, - max: fv, - mean: fv, - sum: fv, - diff: 0.0, - M2: 0.0, - LAST: fv, + count: 1, + min: fv, + max: fv, + mean: fv, + sum: fv, + diff: 0.0, + rate: 0.0, + interval: 0, + M2: 0.0, + LAST: fv, + TIME: in.Time(), } continue } @@ -138,6 +150,12 @@ func (b *BasicStats) Add(in telegraf.Metric) { tmp.sum += fv //diff compute tmp.diff = fv - tmp.LAST + //interval compute + tmp.interval = in.Time().Sub(tmp.TIME) + //rate compute + if !in.Time().Equal(tmp.TIME) { + tmp.rate = tmp.diff / tmp.interval.Seconds() + } //store final data b.cache[id].fields[field.Key] = tmp } @@ -182,7 +200,15 @@ func (b *BasicStats) Push(acc telegraf.Accumulator) { if b.statsConfig.non_negative_diff && v.diff >= 0 { fields[k+"_non_negative_diff"] = v.diff } - + if b.statsConfig.rate { + fields[k+"_rate"] = v.rate + } + if b.statsConfig.non_negative_rate && v.diff >= 0 { + fields[k+"_non_negative_rate"] = v.rate + } + if b.statsConfig.interval { + fields[k+"_interval"] = v.interval.Nanoseconds() + } } //if count == 1 StdDev = infinite => so I won't send data } @@ -217,7 +243,12 @@ func (b *BasicStats) parseStats() *configuredStats { parsed.diff = true case "non_negative_diff": parsed.non_negative_diff = true - + case "rate": + parsed.rate = true + case "non_negative_rate": + parsed.non_negative_rate = true + case "interval": + parsed.interval = true default: b.Log.Warnf("Unrecognized basic stat %q, ignoring", name) } @@ -237,6 +268,8 @@ func (b *BasicStats) getConfiguredStats() { stdev: true, sum: false, non_negative_diff: false, + rate: false, + non_negative_rate: false, } } else { b.statsConfig = b.parseStats() diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go index c5a093840abc7..8b2e9c7397872 100644 --- a/plugins/aggregators/basicstats/basicstats_test.go +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -19,7 +19,7 @@ var m1, _ = metric.New("m1", "d": float64(2), "g": int64(3), }, - time.Now(), + time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), ) var m2, _ = metric.New("m1", map[string]string{"foo": "bar"}, @@ -34,7 +34,7 @@ var m2, _ = metric.New("m1", "andme": true, "g": int64(1), }, - time.Now(), + time.Date(2000, 1, 1, 0, 0, 0, 1e6, time.UTC), ) func BenchmarkApply(b *testing.B) { @@ -498,6 +498,81 @@ func TestBasicStatsWithDiff(t *testing.T) { acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) } +func TestBasicStatsWithRate(t *testing.T) { + + aggregator := NewBasicStats() + aggregator.Stats = []string{"rate"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() + + aggregator.Add(m1) + aggregator.Add(m2) + + acc := testutil.Accumulator{} + aggregator.Push(&acc) + expectedFields := map[string]interface{}{ + "a_rate": float64(0), + "b_rate": float64(2000), + "c_rate": float64(2000), + "d_rate": float64(4000), + "g_rate": float64(-2000), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +func TestBasicStatsWithNonNegativeRate(t *testing.T) { + + aggregator := NewBasicStats() + aggregator.Stats = []string{"non_negative_rate"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() + + aggregator.Add(m1) + aggregator.Add(m2) + + acc := testutil.Accumulator{} + aggregator.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_non_negative_rate": float64(0), + "b_non_negative_rate": float64(2000), + "c_non_negative_rate": float64(2000), + "d_non_negative_rate": float64(4000), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} +func TestBasicStatsWithInterval(t *testing.T) { + + aggregator := NewBasicStats() + aggregator.Stats = []string{"interval"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() + + aggregator.Add(m1) + aggregator.Add(m2) + + acc := testutil.Accumulator{} + aggregator.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_interval": int64(time.Millisecond), + "b_interval": int64(time.Millisecond), + "c_interval": int64(time.Millisecond), + "d_interval": int64(time.Millisecond), + "g_interval": int64(time.Millisecond), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + // Test only aggregating non_negative_diff func TestBasicStatsWithNonNegativeDiff(t *testing.T) { From b39c1974a23507a1892979c4401d135778a52fad Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 20 Nov 2020 17:53:44 -0500 Subject: [PATCH 072/761] update to go 1.15.5 (#8446) --- .circleci/config.yml | 2 +- Makefile | 4 ++-- scripts/alpine.docker | 2 +- scripts/buster.docker | 2 +- scripts/ci-1.15.docker | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 2d3c152fedc8b..9e5b1aaaefa87 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ defaults: - image: 'quay.io/influxdb/telegraf-ci:1.14.9' go-1_15: &go-1_15 docker: - - image: 'quay.io/influxdb/telegraf-ci:1.15.2' + - image: 'quay.io/influxdb/telegraf-ci:1.15.5' mac: &mac macos: xcode: 11.3.1 diff --git a/Makefile b/Makefile index eebd15c30bffc..44375dbcc5107 100644 --- a/Makefile +++ b/Makefile @@ -172,8 +172,8 @@ plugin-%: .PHONY: ci-1.15 ci-1.15: - docker build -t quay.io/influxdb/telegraf-ci:1.15.2 - < scripts/ci-1.15.docker - docker push quay.io/influxdb/telegraf-ci:1.15.2 + docker build -t quay.io/influxdb/telegraf-ci:1.15.5 - < scripts/ci-1.15.docker + docker push quay.io/influxdb/telegraf-ci:1.15.5 .PHONY: ci-1.14 ci-1.14: diff --git a/scripts/alpine.docker b/scripts/alpine.docker index 4c83e322d277e..39075571fa9d9 100644 --- a/scripts/alpine.docker +++ b/scripts/alpine.docker @@ -1,4 +1,4 @@ -FROM golang:1.15.2 as builder +FROM golang:1.15.5 as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/buster.docker b/scripts/buster.docker index 3919d8ca5fd20..e39cf63cc828c 100644 --- a/scripts/buster.docker +++ b/scripts/buster.docker @@ -1,4 +1,4 @@ -FROM golang:1.15.2-buster as builder +FROM golang:1.15.5-buster as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/ci-1.15.docker b/scripts/ci-1.15.docker index 65230db5f6f3b..afef08606a94d 100644 --- a/scripts/ci-1.15.docker +++ b/scripts/ci-1.15.docker @@ -1,4 +1,4 @@ -FROM golang:1.15.2 +FROM golang:1.15.5 RUN chmod -R 755 "$GOPATH" From d64c72294a0c755921435011410fe5b3ad5e375b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Tosser?= Date: Mon, 23 Nov 2020 19:35:00 +0400 Subject: [PATCH 073/761] Add response_time to monit plugin (#8056) --- plugins/inputs/monit/README.md | 2 ++ plugins/inputs/monit/monit.go | 12 +++++++----- plugins/inputs/monit/monit_test.go | 1 + 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/monit/README.md b/plugins/inputs/monit/README.md index be116394d6609..aa4a08b31bbc8 100644 --- a/plugins/inputs/monit/README.md +++ b/plugins/inputs/monit/README.md @@ -128,6 +128,7 @@ Minimum Version of Monit tested with is 5.16. - hostname - port_number - request + - response_time - protocol - type @@ -232,4 +233,5 @@ monit_file,monitoring_mode=active,monitoring_status=monitored,pending_action=non monit_process,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog,source=xyzzy.local,status=running,version=5.20.0 children=0i,cpu_percent=0,cpu_percent_total=0,mem_kb=3148i,mem_kb_total=3148i,mem_percent=0.2,mem_percent_total=0.2,monitoring_mode_code=0i,monitoring_status_code=1i,parent_pid=1i,pending_action_code=0i,pid=318i,status_code=0i,threads=4i 1579735047000000000 monit_program,monitoring_mode=active,monitoring_status=initializing,pending_action=none,platform_name=Linux,service=echo,source=xyzzy.local,status=running,version=5.20.0 monitoring_mode_code=0i,monitoring_status_code=2i,pending_action_code=0i,program_started=0i,program_status=0i,status_code=0i 1579735047000000000 monit_system,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=debian-stretch-monit.virt,source=xyzzy.local,status=running,version=5.20.0 cpu_load_avg_15m=0,cpu_load_avg_1m=0,cpu_load_avg_5m=0,cpu_system=0,cpu_user=0,cpu_wait=0,mem_kb=42852i,mem_percent=2.1,monitoring_mode_code=0i,monitoring_status_code=1i,pending_action_code=0i,status_code=0i,swap_kb=0,swap_percent=0 1579735047000000000 +monit_remote_host,dc=new-12,host=palladium,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,rack=rack-0,service=blog.kalvad.com,source=palladium,status=running,version=5.27.0 monitoring_status_code=1i,monitoring_mode_code=0i,response_time=0.664412,type="TCP",pending_action_code=0i,remote_hostname="blog.kalvad.com",port_number=443i,request="/",protocol="HTTP",status_code=0i 1599138990000000000 ``` diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go index a17042bf5e3a9..00b2d96f93889 100644 --- a/plugins/inputs/monit/monit.go +++ b/plugins/inputs/monit/monit.go @@ -114,11 +114,12 @@ type Upload struct { } type Port struct { - Hostname string `xml:"hostname"` - PortNumber int64 `xml:"portnumber"` - Request string `xml:"request"` - Protocol string `xml:"protocol"` - Type string `xml:"type"` + Hostname string `xml:"hostname"` + PortNumber int64 `xml:"portnumber"` + Request string `xml:"request"` + ResponseTime float64 `xml:"responsetime"` + Protocol string `xml:"protocol"` + Type string `xml:"type"` } type Block struct { @@ -301,6 +302,7 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error { fields["remote_hostname"] = service.Port.Hostname fields["port_number"] = service.Port.PortNumber fields["request"] = service.Port.Request + fields["response_time"] = service.Port.ResponseTime fields["protocol"] = service.Port.Protocol fields["type"] = service.Port.Type acc.AddFields("monit_remote_host", fields, tags) diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index 1d95b45a51bc5..2739240f1be8a 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -179,6 +179,7 @@ func TestServiceType(t *testing.T) { "request": "", "protocol": "DEFAULT", "type": "TCP", + "response_time": 0.000145, }, time.Unix(0, 0), ), From 0fcfee0caf908adc6c2ff4711d791b6cef181de9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Mon, 23 Nov 2020 16:40:32 +0100 Subject: [PATCH 074/761] Fix tests for Windows - part 1 (#8414) --- .gitattributes | 1 + Makefile | 7 +- appveyor.yml | 2 +- config/config_test.go | 3 +- config/testdata/telegraf-agent.toml | 8 -- internal/globpath/globpath_test.go | 54 ++++++++----- plugins/inputs/bcache/README.md | 18 ++--- plugins/inputs/bcache/bcache.go | 6 +- plugins/inputs/bcache/bcache_test.go | 2 + plugins/inputs/bcache/bcache_windows.go | 3 + plugins/inputs/ceph/ceph_test.go | 8 +- plugins/inputs/disk/disk_test.go | 11 +-- plugins/inputs/exec/exec_test.go | 5 ++ plugins/inputs/execd/execd_test.go | 11 +-- plugins/inputs/execd/shim/shim_posix_test.go | 5 -- plugins/inputs/file/file_test.go | 5 ++ plugins/inputs/filecount/filecount_test.go | 5 ++ .../filecount/filesystem_helpers_test.go | 5 ++ plugins/inputs/filestat/filestat_test.go | 78 ++++++++++--------- plugins/inputs/haproxy/haproxy_test.go | 11 ++- .../http_response/http_response_test.go | 60 +++++++------- plugins/inputs/leofs/leofs_test.go | 15 +++- plugins/inputs/logparser/logparser_test.go | 74 +++++++++--------- plugins/inputs/lustre2/lustre2.go | 6 +- plugins/inputs/lustre2/lustre2_test.go | 2 + plugins/inputs/lustre2/lustre2_windows.go | 3 + plugins/inputs/monit/monit_test.go | 9 +-- .../inputs/net_response/net_response_test.go | 1 + plugins/inputs/passenger/passenger_test.go | 51 ++++++++---- plugins/inputs/phpfpm/phpfpm_test.go | 5 ++ plugins/inputs/postfix/postfix.go | 4 + plugins/inputs/postfix/postfix_test.go | 2 + plugins/inputs/postfix/postfix_windows.go | 3 + plugins/inputs/powerdns/powerdns_test.go | 11 ++- .../powerdns_recursor_test.go | 4 +- plugins/inputs/redfish/redfish_test.go | 11 ++- plugins/inputs/snmp/snmp_test.go | 4 +- .../socket_listener/socket_listener_test.go | 8 +- plugins/inputs/syslog/nontransparent_test.go | 3 +- plugins/inputs/syslog/octetcounting_test.go | 3 +- plugins/inputs/syslog/rfc5426_test.go | 12 ++- plugins/inputs/syslog/syslog.go | 7 +- plugins/inputs/syslog/syslog_test.go | 19 +++-- plugins/inputs/tail/tail_test.go | 53 +++++++------ plugins/inputs/x509_cert/x509_cert.go | 4 + plugins/inputs/x509_cert/x509_cert_test.go | 12 ++- .../socket_writer/socket_writer_test.go | 5 ++ plugins/processors/ifname/ifname_test.go | 3 +- plugins/processors/ifname/ttl_cache.go | 10 +++ plugins/processors/reverse_dns/rdnscache.go | 8 +- .../processors/reverse_dns/reversedns_test.go | 9 ++- 51 files changed, 416 insertions(+), 253 deletions(-) create mode 100644 plugins/inputs/bcache/bcache_windows.go create mode 100644 plugins/inputs/lustre2/lustre2_windows.go create mode 100644 plugins/inputs/postfix/postfix_windows.go diff --git a/.gitattributes b/.gitattributes index 21bc439bf797e..7769daa83cb06 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3,3 +3,4 @@ README.md merge=union go.sum merge=union plugins/inputs/all/all.go merge=union plugins/outputs/all/all.go merge=union +**/testdata/** test eol=lf diff --git a/Makefile b/Makefile index 44375dbcc5107..92e94772576b9 100644 --- a/Makefile +++ b/Makefile @@ -114,12 +114,7 @@ fmtcheck: .PHONY: test-windows test-windows: - go test -short $(race_detector) ./plugins/inputs/ping/... - go test -short $(race_detector) ./plugins/inputs/win_perf_counters/... - go test -short $(race_detector) ./plugins/inputs/win_services/... - go test -short $(race_detector) ./plugins/inputs/procstat/... - go test -short $(race_detector) ./plugins/inputs/ntpq/... - go test -short $(race_detector) ./plugins/processors/port_name/... + go test -short ./... .PHONY: vet vet: diff --git a/appveyor.yml b/appveyor.yml index b454c8dc8d9dd..6f5f6e94828c5 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -12,7 +12,7 @@ clone_folder: C:\gopath\src\github.com\influxdata\telegraf environment: GOPATH: C:\gopath -stack: go 1.14 +stack: go 1.15 platform: x64 diff --git a/config/config_test.go b/config/config_test.go index 5543c60e7da70..79d74e83b5a43 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -2,6 +2,7 @@ package config import ( "os" + "strings" "testing" "time" @@ -201,7 +202,7 @@ func TestConfig_LoadSpecialTypes(t *testing.T) { // Tests telegraf size parsing. assert.Equal(t, internal.Size{Size: 1024 * 1024}, inputHTTPListener.MaxBodySize) // Tests toml multiline basic strings. - assert.Equal(t, "/path/to/my/cert\n", inputHTTPListener.TLSCert) + assert.Equal(t, "/path/to/my/cert", strings.TrimRight(inputHTTPListener.TLSCert, "\r\n")) } func TestConfig_FieldNotDefined(t *testing.T) { diff --git a/config/testdata/telegraf-agent.toml b/config/testdata/telegraf-agent.toml index f71b98206e5e8..6967d6e862277 100644 --- a/config/testdata/telegraf-agent.toml +++ b/config/testdata/telegraf-agent.toml @@ -176,14 +176,6 @@ # If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port. servers = ["127.0.0.1:4021"] -# Read metrics from local Lustre service on OST, MDS -[[inputs.lustre2]] - # An array of /proc globs to search for Lustre stats - # If not specified, the default will work on Lustre 2.5.x - # - # ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"] - # mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"] - # Read metrics about memory usage [[inputs.mem]] # no configuration diff --git a/internal/globpath/globpath_test.go b/internal/globpath/globpath_test.go index 60562d8f8f1ae..92af2d20b88f1 100644 --- a/internal/globpath/globpath_test.go +++ b/internal/globpath/globpath_test.go @@ -1,29 +1,38 @@ +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package globpath import ( + "os" + "path/filepath" "runtime" - "strings" "testing" "github.com/stretchr/testify/require" ) +var ( + testdataDir = getTestdataDir() +) + func TestCompileAndMatch(t *testing.T) { - dir := getTestdataDir() // test super asterisk - g1, err := Compile(dir + "/**") + g1, err := Compile(filepath.Join(testdataDir, "**")) require.NoError(t, err) // test single asterisk - g2, err := Compile(dir + "/*.log") + g2, err := Compile(filepath.Join(testdataDir, "*.log")) require.NoError(t, err) // test no meta characters (file exists) - g3, err := Compile(dir + "/log1.log") + g3, err := Compile(filepath.Join(testdataDir, "log1.log")) require.NoError(t, err) // test file that doesn't exist - g4, err := Compile(dir + "/i_dont_exist.log") + g4, err := Compile(filepath.Join(testdataDir, "i_dont_exist.log")) require.NoError(t, err) // test super asterisk that doesn't exist - g5, err := Compile(dir + "/dir_doesnt_exist/**") + g5, err := Compile(filepath.Join(testdataDir, "dir_doesnt_exist", "**")) require.NoError(t, err) matches := g1.Match() @@ -39,15 +48,14 @@ func TestCompileAndMatch(t *testing.T) { } func TestRootGlob(t *testing.T) { - dir := getTestdataDir() tests := []struct { input string output string }{ - {dir + "/**", dir + "/*"}, - {dir + "/nested?/**", dir + "/nested?/*"}, - {dir + "/ne**/nest*", dir + "/ne*"}, - {dir + "/nested?/*", ""}, + {filepath.Join(testdataDir, "**"), filepath.Join(testdataDir, "*")}, + {filepath.Join(testdataDir, "nested?", "**"), filepath.Join(testdataDir, "nested?", "*")}, + {filepath.Join(testdataDir, "ne**", "nest*"), filepath.Join(testdataDir, "ne*")}, + {filepath.Join(testdataDir, "nested?", "*"), ""}, } for _, test := range tests { @@ -57,21 +65,19 @@ func TestRootGlob(t *testing.T) { } func TestFindNestedTextFile(t *testing.T) { - dir := getTestdataDir() // test super asterisk - g1, err := Compile(dir + "/**.txt") + g1, err := Compile(filepath.Join(testdataDir, "**.txt")) require.NoError(t, err) matches := g1.Match() require.Len(t, matches, 1) } -func getTestdataDir() string { - _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "globpath_test.go", "testdata", 1) -} - func TestMatch_ErrPermission(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping Unix only test") + } + tests := []struct { input string expected []string @@ -98,3 +104,13 @@ func TestWindowsSeparator(t *testing.T) { ok := glob.MatchString("testdata\\nested1") require.True(t, ok) } + +func getTestdataDir() string { + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + return filepath.Join(dir, "testdata") +} diff --git a/plugins/inputs/bcache/README.md b/plugins/inputs/bcache/README.md index 11d567ec5616b..88c9f14f9236a 100644 --- a/plugins/inputs/bcache/README.md +++ b/plugins/inputs/bcache/README.md @@ -56,15 +56,15 @@ cache_readaheads Using this configuration: ```toml -[bcache] - # Bcache sets path - # If not specified, then default is: - # bcachePath = "/sys/fs/bcache" - # - # By default, telegraf gather stats for all bcache devices - # Setting devices will restrict the stats to the specified - # bcache devices. - # bcacheDevs = ["bcache0", ...] +[[inputs.bcache]] + ## Bcache sets path + ## If not specified, then default is: + bcachePath = "/sys/fs/bcache" + + ## By default, Telegraf gather stats for all bcache devices + ## Setting devices will restrict the stats to the specified + ## bcache devices. + bcacheDevs = ["bcache0"] ``` When run with: diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 849e6dd37de0d..c94af73f93dd4 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -1,3 +1,7 @@ +// +build !windows + +// bcache doesn't aim for Windows + package bcache import ( @@ -22,7 +26,7 @@ var sampleConfig = ` ## If not specified, then default is: bcachePath = "/sys/fs/bcache" - ## By default, telegraf gather stats for all bcache devices + ## By default, Telegraf gather stats for all bcache devices ## Setting devices will restrict the stats to the specified ## bcache devices. bcacheDevs = ["bcache0"] diff --git a/plugins/inputs/bcache/bcache_test.go b/plugins/inputs/bcache/bcache_test.go index bd191528fd014..4646963c4bfe1 100644 --- a/plugins/inputs/bcache/bcache_test.go +++ b/plugins/inputs/bcache/bcache_test.go @@ -1,3 +1,5 @@ +// +build !windows + package bcache import ( diff --git a/plugins/inputs/bcache/bcache_windows.go b/plugins/inputs/bcache/bcache_windows.go new file mode 100644 index 0000000000000..9a580cc940106 --- /dev/null +++ b/plugins/inputs/bcache/bcache_windows.go @@ -0,0 +1,3 @@ +// +build windows + +package bcache diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index 78da3438de691..f57cda4679ce4 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -4,7 +4,7 @@ import ( "fmt" "io/ioutil" "os" - "path" + "path/filepath" "strconv" "strings" "testing" @@ -163,7 +163,7 @@ func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*soc } else { prefix = monPrefix } - expected := path.Join(dir, sockFile(prefix, i)) + expected := filepath.Join(dir, sockFile(prefix, i)) found := false for _, s := range sockets { fmt.Printf("Checking %s\n", s.socket) @@ -183,7 +183,7 @@ func sockFile(prefix string, i int) string { func createTestFiles(dir string, st *SockTest) { writeFile := func(prefix string, i int) { f := sockFile(prefix, i) - fpath := path.Join(dir, f) + fpath := filepath.Join(dir, f) ioutil.WriteFile(fpath, []byte(""), 0777) } tstFileApply(st, writeFile) @@ -192,7 +192,7 @@ func createTestFiles(dir string, st *SockTest) { func cleanupTestFiles(dir string, st *SockTest) { rmFile := func(prefix string, i int) { f := sockFile(prefix, i) - fpath := path.Join(dir, f) + fpath := filepath.Join(dir, f) err := os.Remove(fpath) if err != nil { fmt.Printf("Error removing test file %s: %v\n", fpath, err) diff --git a/plugins/inputs/disk/disk_test.go b/plugins/inputs/disk/disk_test.go index aeb2ae92bd77f..13180fffb1c37 100644 --- a/plugins/inputs/disk/disk_test.go +++ b/plugins/inputs/disk/disk_test.go @@ -1,6 +1,7 @@ package disk import ( + "fmt" "os" "testing" @@ -74,13 +75,13 @@ func TestDiskUsage(t *testing.T) { assert.Equal(t, expectedAllDiskMetrics, numDiskMetrics) tags1 := map[string]string{ - "path": "/", + "path": string(os.PathSeparator), "fstype": "ext4", "device": "sda", "mode": "ro", } tags2 := map[string]string{ - "path": "/home", + "path": fmt.Sprintf("%chome", os.PathSeparator), "fstype": "ext4", "device": "sdb", "mode": "rw", @@ -144,7 +145,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, }, expectedTags: map[string]string{ - "path": "/", + "path": string(os.PathSeparator), "device": "sda", "fstype": "ext4", "mode": "ro", @@ -177,7 +178,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, hostMountPrefix: "/hostfs", expectedTags: map[string]string{ - "path": "/var", + "path": fmt.Sprintf("%cvar", os.PathSeparator), "device": "sda", "fstype": "ext4", "mode": "ro", @@ -210,7 +211,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, hostMountPrefix: "/hostfs", expectedTags: map[string]string{ - "path": "/", + "path": string(os.PathSeparator), "device": "sda", "fstype": "ext4", "mode": "ro", diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index d0fcc71f668e5..38503a7c069d8 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -1,3 +1,8 @@ +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package exec import ( diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index a7be617da3a48..6cb254eb5b8f4 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -1,5 +1,3 @@ -// +build !windows - package execd import ( @@ -11,17 +9,16 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/models" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" - "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/serializers" - - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" ) func TestSettingConfigWorks(t *testing.T) { diff --git a/plugins/inputs/execd/shim/shim_posix_test.go b/plugins/inputs/execd/shim/shim_posix_test.go index 873ef89bf655f..594985d23ffc1 100644 --- a/plugins/inputs/execd/shim/shim_posix_test.go +++ b/plugins/inputs/execd/shim/shim_posix_test.go @@ -7,7 +7,6 @@ import ( "context" "io" "os" - "runtime" "syscall" "testing" "time" @@ -16,10 +15,6 @@ import ( ) func TestShimUSR1SignalingWorks(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip() - return - } stdinReader, stdinWriter := io.Pipe() stdoutReader, stdoutWriter := io.Pipe() diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go index 427ff25d8c789..a5cacec21a03c 100644 --- a/plugins/inputs/file/file_test.go +++ b/plugins/inputs/file/file_test.go @@ -1,3 +1,8 @@ +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package file import ( diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 568ee07b5d458..39bbafb36de99 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -1,3 +1,8 @@ +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package filecount import ( diff --git a/plugins/inputs/filecount/filesystem_helpers_test.go b/plugins/inputs/filecount/filesystem_helpers_test.go index 08bb15a2e59cf..62e7e2f814531 100644 --- a/plugins/inputs/filecount/filesystem_helpers_test.go +++ b/plugins/inputs/filecount/filesystem_helpers_test.go @@ -1,3 +1,8 @@ +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package filecount import ( diff --git a/plugins/inputs/filestat/filestat_test.go b/plugins/inputs/filestat/filestat_test.go index a38d3b0aacdc4..79a111ffb849a 100644 --- a/plugins/inputs/filestat/filestat_test.go +++ b/plugins/inputs/filestat/filestat_test.go @@ -1,102 +1,108 @@ +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package filestat import ( - "runtime" - "strings" + "os" + "path/filepath" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" +) + +var ( + testdataDir = getTestdataDir() ) func TestGatherNoMd5(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Files = []string{ - dir + "log1.log", - dir + "log2.log", - "/non/existant/file", + filepath.Join(testdataDir, "log1.log"), + filepath.Join(testdataDir, "log2.log"), + filepath.Join(testdataDir, "non_existent_file"), } acc := testutil.Accumulator{} acc.GatherError(fs.Gather) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) tags2 := map[string]string{ - "file": dir + "log2.log", + "file": filepath.Join(testdataDir, "log2.log"), } require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1))) tags3 := map[string]string{ - "file": "/non/existant/file", + "file": filepath.Join(testdataDir, "non_existent_file"), } require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(0))) } func TestGatherExplicitFiles(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ - dir + "log1.log", - dir + "log2.log", - "/non/existant/file", + filepath.Join(testdataDir, "log1.log"), + filepath.Join(testdataDir, "log2.log"), + filepath.Join(testdataDir, "non_existent_file"), } acc := testutil.Accumulator{} acc.GatherError(fs.Gather) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags2 := map[string]string{ - "file": dir + "log2.log", + "file": filepath.Join(testdataDir, "log2.log"), } require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags3 := map[string]string{ - "file": "/non/existant/file", + "file": filepath.Join(testdataDir, "non_existent_file"), } require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(0))) } func TestGatherGlob(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ - dir + "*.log", + filepath.Join(testdataDir, "*.log"), } acc := testutil.Accumulator{} acc.GatherError(fs.Gather) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags2 := map[string]string{ - "file": dir + "log2.log", + "file": filepath.Join(testdataDir, "log2.log"), } require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1))) @@ -104,33 +110,32 @@ func TestGatherGlob(t *testing.T) { } func TestGatherSuperAsterisk(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ - dir + "**", + filepath.Join(testdataDir, "**"), } acc := testutil.Accumulator{} acc.GatherError(fs.Gather) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags2 := map[string]string{ - "file": dir + "log2.log", + "file": filepath.Join(testdataDir, "log2.log"), } require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags3 := map[string]string{ - "file": dir + "test.conf", + "file": filepath.Join(testdataDir, "test.conf"), } require.True(t, acc.HasPoint("filestat", tags3, "size_bytes", int64(104))) require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(1))) @@ -138,18 +143,17 @@ func TestGatherSuperAsterisk(t *testing.T) { } func TestModificationTime(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Files = []string{ - dir + "log1.log", + filepath.Join(testdataDir, "log1.log"), } acc := testutil.Accumulator{} acc.GatherError(fs.Gather) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) @@ -160,22 +164,21 @@ func TestNoModificationTime(t *testing.T) { fs := NewFileStat() fs.Log = testutil.Logger{} fs.Files = []string{ - "/non/existant/file", + filepath.Join(testdataDir, "non_existent_file"), } acc := testutil.Accumulator{} acc.GatherError(fs.Gather) tags1 := map[string]string{ - "file": "/non/existant/file", + "file": filepath.Join(testdataDir, "non_existent_file"), } require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(0))) require.False(t, acc.HasInt64Field("filestat", "modification_time")) } func TestGetMd5(t *testing.T) { - dir := getTestdataDir() - md5, err := getMd5(dir + "test.conf") + md5, err := getMd5(filepath.Join(testdataDir, "test.conf")) assert.NoError(t, err) assert.Equal(t, "5a7e9b77fa25e7bb411dbd17cf403c1f", md5) @@ -184,6 +187,11 @@ func TestGetMd5(t *testing.T) { } func getTestdataDir() string { - _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "filestat_test.go", "testdata/", 1) + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + return filepath.Join(dir, "testdata") } diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index e05031f192675..0a360c351a644 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -7,6 +7,8 @@ import ( "net" "net/http" "net/http/httptest" + "os" + "path/filepath" "strings" "testing" @@ -114,12 +116,13 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) { func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { var randomNumber int64 var sockets [5]net.Listener - _globmask := "/tmp/test-haproxy*.sock" - _badmask := "/tmp/test-fail-haproxy*.sock" + + _globmask := filepath.Join(os.TempDir(), "test-haproxy*.sock") + _badmask := filepath.Join(os.TempDir(), "test-fail-haproxy*.sock") for i := 0; i < 5; i++ { binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) - sockname := fmt.Sprintf("/tmp/test-haproxy%d.sock", randomNumber) + sockname := filepath.Join(os.TempDir(), fmt.Sprintf("test-haproxy%d.sock", randomNumber)) sock, err := net.Listen("unix", sockname) if err != nil { @@ -146,7 +149,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { for _, sock := range sockets { tags := map[string]string{ - "server": sock.Addr().String(), + "server": getSocketAddr(sock.Addr().String()), "proxy": "git", "sv": "www", "type": "server", diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index adf4e7999aa94..3c290c1539c31 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -1,3 +1,7 @@ +// +build !windows + +// TODO: Windows - should be enabled for Windows when https://github.com/influxdata/telegraf/issues/8451 is fixed + package http_response import ( @@ -162,7 +166,7 @@ func TestHeaders(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL, + URLs: []string{ts.URL}, Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 2}, Headers: map[string]string{ @@ -198,7 +202,7 @@ func TestFields(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -236,7 +240,7 @@ func TestResponseBodyField(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -271,7 +275,7 @@ func TestResponseBodyField(t *testing.T) { // Invalid UTF-8 String h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/invalidUTF8", + URLs: []string{ts.URL + "/invalidUTF8"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -305,7 +309,7 @@ func TestResponseBodyMaxSize(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -339,7 +343,7 @@ func TestHTTPHeaderTags(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -374,7 +378,7 @@ func TestHTTPHeaderTags(t *testing.T) { h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/noheader", + URLs: []string{ts.URL + "/noheader"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -400,7 +404,7 @@ func TestHTTPHeaderTags(t *testing.T) { // Connection failed h = &HTTPResponse{ Log: testutil.Logger{}, - Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here + URLs: []string{"https:/nonexistent.nonexistent"}, // Any non-routable IP works here Body: "", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 5}, @@ -456,7 +460,7 @@ func TestInterface(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -495,7 +499,7 @@ func TestRedirects(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/redirect", + URLs: []string{ts.URL + "/redirect"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -526,7 +530,7 @@ func TestRedirects(t *testing.T) { h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/badredirect", + URLs: []string{ts.URL + "/badredirect"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -563,7 +567,7 @@ func TestMethod(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/mustbepostmethod", + URLs: []string{ts.URL + "/mustbepostmethod"}, Body: "{ 'test': 'data'}", Method: "POST", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -594,7 +598,7 @@ func TestMethod(t *testing.T) { h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/mustbepostmethod", + URLs: []string{ts.URL + "/mustbepostmethod"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -626,7 +630,7 @@ func TestMethod(t *testing.T) { //check that lowercase methods work correctly h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/mustbepostmethod", + URLs: []string{ts.URL + "/mustbepostmethod"}, Body: "{ 'test': 'data'}", Method: "head", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -663,7 +667,7 @@ func TestBody(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/musthaveabody", + URLs: []string{ts.URL + "/musthaveabody"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -694,7 +698,7 @@ func TestBody(t *testing.T) { h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/musthaveabody", + URLs: []string{ts.URL + "/musthaveabody"}, Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, Headers: map[string]string{ @@ -728,7 +732,7 @@ func TestStringMatch(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "hit the good page", @@ -766,7 +770,7 @@ func TestStringMatchJson(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/jsonresponse", + URLs: []string{ts.URL + "/jsonresponse"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "\"service_status\": \"up\"", @@ -804,7 +808,7 @@ func TestStringMatchFail(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "hit the bad page", @@ -847,7 +851,7 @@ func TestTimeout(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/twosecondnap", + URLs: []string{ts.URL + "/twosecondnap"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second}, @@ -881,7 +885,7 @@ func TestBadRegex(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "bad regex:[[", @@ -905,7 +909,7 @@ func TestNetworkErrors(t *testing.T) { // DNS error h := &HTTPResponse{ Log: testutil.Logger{}, - Address: "https://nonexistent.nonexistent", // Any non-resolvable URL works here + URLs: []string{"https://nonexistent.nonexistent"}, // Any non-resolvable URL works here Body: "", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -932,7 +936,7 @@ func TestNetworkErrors(t *testing.T) { // Connection failed h = &HTTPResponse{ Log: testutil.Logger{}, - Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here + URLs: []string{"https:/nonexistent.nonexistent"}, // Any non-routable IP works here Body: "", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 5}, @@ -1082,7 +1086,7 @@ func TestBasicAuth(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -1121,7 +1125,7 @@ func TestStatusCodeMatchFail(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/nocontent", + URLs: []string{ts.URL + "/nocontent"}, ResponseStatusCode: http.StatusOK, ResponseTimeout: internal.Duration{Duration: time.Second * 20}, } @@ -1154,7 +1158,7 @@ func TestStatusCodeMatch(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/nocontent", + URLs: []string{ts.URL + "/nocontent"}, ResponseStatusCode: http.StatusNoContent, ResponseTimeout: internal.Duration{Duration: time.Second * 20}, } @@ -1187,7 +1191,7 @@ func TestStatusCodeAndStringMatch(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, ResponseStatusCode: http.StatusOK, ResponseStringMatch: "hit the good page", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -1222,7 +1226,7 @@ func TestStatusCodeAndStringMatchFail(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/nocontent", + URLs: []string{ts.URL + "/nocontent"}, ResponseStatusCode: http.StatusOK, ResponseStringMatch: "hit the good page", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, diff --git a/plugins/inputs/leofs/leofs_test.go b/plugins/inputs/leofs/leofs_test.go index f456a998e73a6..6d7799d0b8cdc 100644 --- a/plugins/inputs/leofs/leofs_test.go +++ b/plugins/inputs/leofs/leofs_test.go @@ -8,6 +8,7 @@ import ( "log" "os" "os/exec" + "runtime" "testing" ) @@ -133,19 +134,25 @@ func makeFakeSNMPSrc(code string) string { return path } -func buildFakeSNMPCmd(src string) { - err := exec.Command("go", "build", "-o", "snmpwalk", src).Run() +func buildFakeSNMPCmd(src string, executable string) { + err := exec.Command("go", "build", "-o", executable, src).Run() if err != nil { log.Fatalln(err) } } func testMain(t *testing.T, code string, endpoint string, serverType ServerType) { + executable := "snmpwalk" + if runtime.GOOS == "windows" { + executable = "snmpwalk.exe" + } + // Build the fake snmpwalk for test src := makeFakeSNMPSrc(code) defer os.Remove(src) - buildFakeSNMPCmd(src) - defer os.Remove("./snmpwalk") + buildFakeSNMPCmd(src, executable) + defer os.Remove("./" + executable) + envPathOrigin := os.Getenv("PATH") // Refer to the fake snmpwalk os.Setenv("PATH", ".") diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 142f78d464963..9bf4f125ae4f6 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -3,22 +3,26 @@ package logparser import ( "io/ioutil" "os" - "runtime" - "strings" + "path/filepath" "testing" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +var ( + testdataDir = getTestdataDir() ) func TestStartNoParsers(t *testing.T) { logparser := &LogParserPlugin{ Log: testutil.Logger{}, FromBeginning: true, - Files: []string{"testdata/*.log"}, + Files: []string{filepath.Join(testdataDir, "*.log")}, } acc := testutil.Accumulator{} @@ -26,15 +30,13 @@ func TestStartNoParsers(t *testing.T) { } func TestGrokParseLogFilesNonExistPattern(t *testing.T) { - thisdir := getCurrentDir() - logparser := &LogParserPlugin{ Log: testutil.Logger{}, FromBeginning: true, - Files: []string{thisdir + "testdata/*.log"}, + Files: []string{filepath.Join(testdataDir, "*.log")}, GrokConfig: GrokConfig{ Patterns: []string{"%{FOOBAR}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, } @@ -44,17 +46,15 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) { } func TestGrokParseLogFiles(t *testing.T) { - thisdir := getCurrentDir() - logparser := &LogParserPlugin{ Log: testutil.Logger{}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}", "%{TEST_LOG_C}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, FromBeginning: true, - Files: []string{thisdir + "testdata/*.log"}, + Files: []string{filepath.Join(testdataDir, "*.log")}, } acc := testutil.Accumulator{} @@ -68,7 +68,7 @@ func TestGrokParseLogFiles(t *testing.T) { "logparser_grok", map[string]string{ "response_code": "200", - "path": thisdir + "testdata/test_a.log", + "path": filepath.Join(testdataDir, "test_a.log"), }, map[string]interface{}{ "clientip": "192.168.1.1", @@ -81,7 +81,7 @@ func TestGrokParseLogFiles(t *testing.T) { testutil.MustMetric( "logparser_grok", map[string]string{ - "path": thisdir + "testdata/test_b.log", + "path": filepath.Join(testdataDir, "test_b.log"), }, map[string]interface{}{ "myfloat": 1.25, @@ -93,7 +93,7 @@ func TestGrokParseLogFiles(t *testing.T) { testutil.MustMetric( "logparser_grok", map[string]string{ - "path": thisdir + "testdata/test_c.log", + "path": filepath.Join(testdataDir, "test_c.log"), "response_code": "200", }, map[string]interface{}{ @@ -115,16 +115,14 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { defer os.RemoveAll(emptydir) assert.NoError(t, err) - thisdir := getCurrentDir() - logparser := &LogParserPlugin{ Log: testutil.Logger{}, FromBeginning: true, - Files: []string{emptydir + "/*.log"}, + Files: []string{filepath.Join(emptydir, "*.log")}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, } @@ -133,7 +131,12 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { assert.Equal(t, acc.NFields(), 0) - _ = os.Symlink(thisdir+"testdata/test_a.log", emptydir+"/test_a.log") + input, err := ioutil.ReadFile(filepath.Join(testdataDir, "test_a.log")) + assert.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(emptydir, "test_a.log"), input, 0644) + assert.NoError(t, err) + assert.NoError(t, acc.GatherError(logparser.Gather)) acc.Wait(1) @@ -148,23 +151,21 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { }, map[string]string{ "response_code": "200", - "path": emptydir + "/test_a.log", + "path": filepath.Join(emptydir, "test_a.log"), }) } // Test that test_a.log line gets parsed even though we don't have the correct // pattern available for test_b.log func TestGrokParseLogFilesOneBad(t *testing.T) { - thisdir := getCurrentDir() - logparser := &LogParserPlugin{ Log: testutil.Logger{}, FromBeginning: true, - Files: []string{thisdir + "testdata/test_a.log"}, + Files: []string{filepath.Join(testdataDir, "test_a.log")}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_BAD}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, } @@ -184,22 +185,20 @@ func TestGrokParseLogFilesOneBad(t *testing.T) { }, map[string]string{ "response_code": "200", - "path": thisdir + "testdata/test_a.log", + "path": filepath.Join(testdataDir, "test_a.log"), }) } func TestGrokParseLogFiles_TimestampInEpochMilli(t *testing.T) { - thisdir := getCurrentDir() - logparser := &LogParserPlugin{ Log: testutil.Logger{}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_C}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, FromBeginning: true, - Files: []string{thisdir + "testdata/test_c.log"}, + Files: []string{filepath.Join(testdataDir, "test_c.log")}, } acc := testutil.Accumulator{} @@ -218,11 +217,16 @@ func TestGrokParseLogFiles_TimestampInEpochMilli(t *testing.T) { }, map[string]string{ "response_code": "200", - "path": thisdir + "testdata/test_c.log", + "path": filepath.Join(testdataDir, "test_c.log"), }) } -func getCurrentDir() string { - _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "logparser_test.go", "", 1) +func getTestdataDir() string { + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + return filepath.Join(dir, "testdata") } diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 611ba294dbc5c..44e046c7337f0 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -1,5 +1,9 @@ +// +build !windows + +// lustre2 doesn't aim for Windows + /* -Lustre 2.x telegraf plugin +Lustre 2.x Telegraf plugin Lustre (http://lustre.org/) is an open-source, parallel file system for HPC environments. It stores statistics about its activity in diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 8e93da8e81726..1fb55d30491ce 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -1,3 +1,5 @@ +// +build !windows + package lustre2 import ( diff --git a/plugins/inputs/lustre2/lustre2_windows.go b/plugins/inputs/lustre2/lustre2_windows.go new file mode 100644 index 0000000000000..0c4d970608e09 --- /dev/null +++ b/plugins/inputs/lustre2/lustre2_windows.go @@ -0,0 +1,3 @@ +// +build windows + +package lustre2 diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index 2739240f1be8a..d17db8d2acc6d 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -4,6 +4,7 @@ import ( "errors" "net/http" "net/http/httptest" + "net/url" "testing" "time" @@ -573,21 +574,19 @@ func TestAllowHosts(t *testing.T) { } func TestConnection(t *testing.T) { - r := &Monit{ Address: "http://127.0.0.1:2812", Username: "test", Password: "test", } - var acc testutil.Accumulator - r.Init() + var acc testutil.Accumulator err := r.Gather(&acc) - if assert.Error(t, err) { - assert.Contains(t, err.Error(), "connect: connection refused") + _, ok := err.(*url.Error) + assert.True(t, ok) } } diff --git a/plugins/inputs/net_response/net_response_test.go b/plugins/inputs/net_response/net_response_test.go index ef4d0714a7a74..8f01d687927f3 100644 --- a/plugins/inputs/net_response/net_response_test.go +++ b/plugins/inputs/net_response/net_response_test.go @@ -91,6 +91,7 @@ func TestTCPError(t *testing.T) { c := NetResponse{ Protocol: "tcp", Address: ":9999", + Timeout: internal.Duration{Duration: time.Second * 30}, } // Error err1 := c.Gather(&acc) diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index c54239d39ecfd..ce1ebe462cfbe 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -4,20 +4,37 @@ import ( "fmt" "io/ioutil" "os" + "path/filepath" + "runtime" + "strings" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) -func fakePassengerStatus(stat string) { - content := fmt.Sprintf("#!/bin/sh\ncat << EOF\n%s\nEOF", stat) - ioutil.WriteFile("/tmp/passenger-status", []byte(content), 0700) +func fakePassengerStatus(stat string) string { + var fileExtension, content string + if runtime.GOOS == "windows" { + fileExtension = ".bat" + content = "@echo off\n" + for _, line := range strings.Split(strings.TrimSuffix(stat, "\n"), "\n") { + content += "for /f \"delims=\" %%A in (\"" + line + "\") do echo %%~A\n" //my eyes are bleeding + } + } else { + content = fmt.Sprintf("#!/bin/sh\ncat << EOF\n%s\nEOF", stat) + } + + tempFilePath := filepath.Join(os.TempDir(), "passenger-status"+fileExtension) + ioutil.WriteFile(tempFilePath, []byte(content), 0700) + + return tempFilePath } -func teardown() { - os.Remove("/tmp/passenger-status") +func teardown(tempFilePath string) { + os.Remove(tempFilePath) } func Test_Invalid_Passenger_Status_Cli(t *testing.T) { @@ -29,28 +46,28 @@ func Test_Invalid_Passenger_Status_Cli(t *testing.T) { err := r.Gather(&acc) require.Error(t, err) - assert.Equal(t, err.Error(), `exec: "an-invalid-command": executable file not found in $PATH`) + assert.Contains(t, err.Error(), `exec: "an-invalid-command": executable file not found in `) } func Test_Invalid_Xml(t *testing.T) { - fakePassengerStatus("invalid xml") - defer teardown() + tempFilePath := fakePassengerStatus("invalid xml") + defer teardown(tempFilePath) r := &passenger{ - Command: "/tmp/passenger-status", + Command: tempFilePath, } var acc testutil.Accumulator err := r.Gather(&acc) require.Error(t, err) - assert.Equal(t, err.Error(), "Cannot parse input with error: EOF\n") + assert.Equal(t, "Cannot parse input with error: EOF\n", err.Error()) } // We test this by ensure that the error message match the path of default cli func Test_Default_Config_Load_Default_Command(t *testing.T) { - fakePassengerStatus("invalid xml") - defer teardown() + tempFilePath := fakePassengerStatus("invalid xml") + defer teardown(tempFilePath) r := &passenger{} @@ -58,16 +75,16 @@ func Test_Default_Config_Load_Default_Command(t *testing.T) { err := r.Gather(&acc) require.Error(t, err) - assert.Equal(t, err.Error(), "exec: \"passenger-status\": executable file not found in $PATH") + assert.Contains(t, err.Error(), "exec: \"passenger-status\": executable file not found in ") } func TestPassengerGenerateMetric(t *testing.T) { - fakePassengerStatus(sampleStat) - defer teardown() + tempFilePath := fakePassengerStatus(sampleStat) + defer teardown(tempFilePath) //Now we tested again above server, with our authentication data r := &passenger{ - Command: "/tmp/passenger-status", + Command: tempFilePath, } var acc testutil.Accumulator diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 7be2e6a27dbf8..6db740df45e66 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -1,3 +1,8 @@ +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package phpfpm import ( diff --git a/plugins/inputs/postfix/postfix.go b/plugins/inputs/postfix/postfix.go index 8700362d0d63f..87e11a195add0 100644 --- a/plugins/inputs/postfix/postfix.go +++ b/plugins/inputs/postfix/postfix.go @@ -1,3 +1,7 @@ +// +build !windows + +// postfix doesn't aim for Windows + package postfix import ( diff --git a/plugins/inputs/postfix/postfix_test.go b/plugins/inputs/postfix/postfix_test.go index 5dbc91d13e23f..ad997eebdbbe7 100644 --- a/plugins/inputs/postfix/postfix_test.go +++ b/plugins/inputs/postfix/postfix_test.go @@ -1,3 +1,5 @@ +// +build !windows + package postfix import ( diff --git a/plugins/inputs/postfix/postfix_windows.go b/plugins/inputs/postfix/postfix_windows.go new file mode 100644 index 0000000000000..122c1543da55d --- /dev/null +++ b/plugins/inputs/postfix/postfix_windows.go @@ -0,0 +1,3 @@ +// +build windows + +package postfix diff --git a/plugins/inputs/powerdns/powerdns_test.go b/plugins/inputs/powerdns/powerdns_test.go index fe64be5db62eb..0fe3e31058bd0 100644 --- a/plugins/inputs/powerdns/powerdns_test.go +++ b/plugins/inputs/powerdns/powerdns_test.go @@ -3,11 +3,14 @@ package powerdns import ( "fmt" "net" + "os" + "path/filepath" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) type statServer struct{} @@ -71,7 +74,8 @@ func (s statServer) serverSocket(l net.Listener) { func TestPowerdnsGeneratesMetrics(t *testing.T) { // We create a fake server to return test data randomNumber := int64(5239846799706671610) - socket, err := net.Listen("unix", fmt.Sprintf("/tmp/pdns%d.controlsocket", randomNumber)) + sockname := filepath.Join(os.TempDir(), fmt.Sprintf("pdns%d.controlsocket", randomNumber)) + socket, err := net.Listen("unix", sockname) if err != nil { t.Fatal("Cannot initialize server on port ") } @@ -82,11 +86,10 @@ func TestPowerdnsGeneratesMetrics(t *testing.T) { go s.serverSocket(socket) p := &Powerdns{ - UnixSockets: []string{fmt.Sprintf("/tmp/pdns%d.controlsocket", randomNumber)}, + UnixSockets: []string{sockname}, } var acc testutil.Accumulator - err = acc.GatherError(p.Gather) require.NoError(t, err) diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go index d0f5690cc31cb..25d39dcd45560 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go @@ -99,8 +99,8 @@ var intOverflowMetrics = "all-outqueries\t18446744073709550195\nanswers-slow\t36 "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n" func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { - if runtime.GOOS == "darwin" { - t.Skip("Skipping test on darwin") + if runtime.GOOS == "darwin" || runtime.GOOS == "windows" { + t.Skip("Skipping on windows and darwin, as unixgram sockets are not supported") } // We create a fake server to return test data controlSocket := "/tmp/pdns5724354148158589552.controlsocket" diff --git a/plugins/inputs/redfish/redfish_test.go b/plugins/inputs/redfish/redfish_test.go index 8821b3d97557f..c3c6f0d104719 100644 --- a/plugins/inputs/redfish/redfish_test.go +++ b/plugins/inputs/redfish/redfish_test.go @@ -8,9 +8,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestDellApis(t *testing.T) { @@ -643,7 +645,6 @@ func checkAuth(r *http.Request, username, password string) bool { } func TestConnection(t *testing.T) { - r := &Redfish{ Address: "http://127.0.0.1", Username: "test", @@ -654,8 +655,10 @@ func TestConnection(t *testing.T) { var acc testutil.Accumulator r.Init() err := r.Gather(&acc) - require.Error(t, err) - require.Contains(t, err.Error(), "connect: connection refused") + if assert.Error(t, err) { + _, ok := err.(*url.Error) + assert.True(t, ok) + } } func TestInvalidUsernameorPassword(t *testing.T) { diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 583b2dc847282..9d9d7df2351ee 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -640,8 +640,8 @@ func TestGather(t *testing.T) { assert.Len(t, m.Fields, 2) assert.Equal(t, 234, m.Fields["myfield2"]) assert.Equal(t, "baz", m.Fields["myfield3"]) - assert.True(t, tstart.Before(m.Time)) - assert.True(t, tstop.After(m.Time)) + assert.True(t, !tstart.After(m.Time)) + assert.True(t, !tstop.Before(m.Time)) m2 := acc.Metrics[1] assert.Equal(t, "myOtherTable", m2.Measurement) diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index a46add15cf61b..03d0c045307c9 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -9,6 +9,7 @@ import ( "net" "os" "path/filepath" + "runtime" "testing" "time" @@ -138,7 +139,8 @@ func TestSocketListener_unix(t *testing.T) { defer testEmptyLog(t)() - os.Create(sock) + f, _ := os.Create(sock) + f.Close() sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "unix://" + sock @@ -156,6 +158,10 @@ func TestSocketListener_unix(t *testing.T) { } func TestSocketListener_unixgram(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping on Windows, as unixgram sockets are not supported") + } + tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go index d0352c6ae1c7f..544b31929f123 100644 --- a/plugins/inputs/syslog/nontransparent_test.go +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -9,11 +9,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func getTestCasesForNonTransparent() []testCaseStream { diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index 210b64dbe11c8..87909fcec2dd3 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -10,11 +10,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func getTestCasesForOctetCounting() []testCaseStream { diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index 31007bad928a3..2a6d937fb288e 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -6,13 +6,15 @@ import ( "net" "os" "path/filepath" + "runtime" "sync/atomic" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func getTestCasesForRFC5426() []testCasePacket { @@ -284,6 +286,10 @@ func TestStrict_udp(t *testing.T) { } func TestBestEffort_unixgram(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping on Windows, as unixgram sockets are not supported") + } + tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) @@ -293,6 +299,10 @@ func TestBestEffort_unixgram(t *testing.T) { } func TestStrict_unixgram(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping on Windows, as unixgram sockets are not supported") + } + tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 6b3615a3e80ce..17b9b77a52c4f 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -7,6 +7,7 @@ import ( "net" "net/url" "os" + "path/filepath" "strings" "sync" "time" @@ -16,6 +17,7 @@ import ( "github.com/influxdata/go-syslog/v2/nontransparent" "github.com/influxdata/go-syslog/v2/octetcounting" "github.com/influxdata/go-syslog/v2/rfc5424" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" framing "github.com/influxdata/telegraf/internal/syslog" @@ -195,7 +197,10 @@ func getAddressParts(a string) (string, string, error) { return "", "", fmt.Errorf("missing protocol within address '%s'", a) } - u, _ := url.Parse(a) + u, err := url.Parse(filepath.ToSlash(a)) //convert backslashes to slashes (to make Windows path a valid URL) + if err != nil { + return "", "", fmt.Errorf("could not parse address '%s': %v", a, err) + } switch u.Scheme { case "unix", "unixpacket", "unixgram": return parts[0], parts[1], nil diff --git a/plugins/inputs/syslog/syslog_test.go b/plugins/inputs/syslog/syslog_test.go index 66568380e95a6..ac0539d30e1af 100644 --- a/plugins/inputs/syslog/syslog_test.go +++ b/plugins/inputs/syslog/syslog_test.go @@ -4,12 +4,14 @@ import ( "io/ioutil" "os" "path/filepath" + "runtime" "strings" "testing" "time" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const ( @@ -49,13 +51,16 @@ func TestAddress(t *testing.T) { require.NoError(t, err) sock := filepath.Join(tmpdir, "syslog.TestAddress.sock") - rec = &Syslog{ - Address: "unixgram://" + sock, + if runtime.GOOS != "windows" { + // Skipping on Windows, as unixgram sockets are not supported + rec = &Syslog{ + Address: "unixgram://" + sock, + } + err = rec.Start(&testutil.Accumulator{}) + require.NoError(t, err) + require.Equal(t, sock, rec.Address) + rec.Stop() } - err = rec.Start(&testutil.Accumulator{}) - require.NoError(t, err) - require.Equal(t, sock, rec.Address) - rec.Stop() // Default port is 6514 rec = &Syslog{ diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 5669fbf2e6ea7..83f6c9e7823ca 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -5,11 +5,13 @@ import ( "io/ioutil" "log" "os" - "runtime" - "strings" + "path/filepath" "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/parsers" @@ -17,8 +19,10 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" +) + +var ( + testdataDir = getTestdataDir() ) func TestTailBadLine(t *testing.T) { @@ -58,7 +62,7 @@ func TestTailBadLine(t *testing.T) { assert.Contains(t, buf.String(), "Malformed log line") } -func TestTailDosLineendings(t *testing.T) { +func TestTailDosLineEndings(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -92,14 +96,13 @@ func TestTailDosLineendings(t *testing.T) { } func TestGrokParseLogFilesWithMultiline(t *testing.T) { - thisdir := getCurrentDir() //we make sure the timeout won't kick in duration, _ := time.ParseDuration("100s") tt := NewTail() tt.Log = testutil.Logger{} tt.FromBeginning = true - tt.Files = []string{thisdir + "testdata/test_multiline.log"} + tt.Files = []string{filepath.Join(testdataDir, "test_multiline.log")} tt.MultilineConfig = MultilineConfig{ Pattern: `^[^\[]`, MatchWhichLine: Previous, @@ -117,7 +120,7 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { acc.Wait(3) - expectedPath := thisdir + "testdata/test_multiline.log" + expectedPath := filepath.Join(testdataDir, "test_multiline.log") acc.AssertContainsTaggedFields(t, "tail_grok", map[string]interface{}{ "message": "HelloExample: This is debug", @@ -151,7 +154,7 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { require.NoError(t, err) defer os.Remove(tmpfile.Name()) - // This seems neccessary in order to get the test to read the following lines. + // This seems necessary in order to get the test to read the following lines. _, err = tmpfile.WriteString("[04/Jun/2016:12:41:48 +0100] INFO HelloExample: This is fluff\r\n") require.NoError(t, err) require.NoError(t, tmpfile.Sync()) @@ -209,14 +212,13 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { } func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *testing.T) { - thisdir := getCurrentDir() //we make sure the timeout won't kick in duration := 100 * time.Second tt := NewTail() tt.Log = testutil.Logger{} tt.FromBeginning = true - tt.Files = []string{thisdir + "testdata/test_multiline.log"} + tt.Files = []string{filepath.Join(testdataDir, "test_multiline.log")} tt.MultilineConfig = MultilineConfig{ Pattern: `^[^\[]`, MatchWhichLine: Previous, @@ -236,7 +238,7 @@ func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *test tt.Stop() acc.Wait(4) - expectedPath := thisdir + "testdata/test_multiline.log" + expectedPath := filepath.Join(testdataDir, "test_multiline.log") acc.AssertContainsTaggedFields(t, "tail_grok", map[string]interface{}{ "message": "HelloExample: This is warn", @@ -251,7 +253,7 @@ func createGrokParser() (parsers.Parser, error) { grokConfig := &parsers.Config{ MetricName: "tail_grok", GrokPatterns: []string{"%{TEST_LOG_MULTILINE}"}, - GrokCustomPatternFiles: []string{getCurrentDir() + "testdata/test-patterns"}, + GrokCustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, DataFormat: "grok", } parser, err := parsers.NewParser(grokConfig) @@ -374,11 +376,6 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) { testutil.IgnoreTime()) } -func getCurrentDir() string { - _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "tail_test.go", "", 1) -} - func TestCharacterEncoding(t *testing.T) { full := []telegraf.Metric{ testutil.MustMetric("cpu", @@ -437,7 +434,7 @@ func TestCharacterEncoding(t *testing.T) { { name: "utf-8", plugin: &Tail{ - Files: []string{"testdata/cpu-utf-8.influx"}, + Files: []string{filepath.Join(testdataDir, "cpu-utf-8.influx")}, FromBeginning: true, MaxUndeliveredLines: 1000, Log: testutil.Logger{}, @@ -448,7 +445,7 @@ func TestCharacterEncoding(t *testing.T) { { name: "utf-8 seek", plugin: &Tail{ - Files: []string{"testdata/cpu-utf-8.influx"}, + Files: []string{filepath.Join(testdataDir, "cpu-utf-8.influx")}, MaxUndeliveredLines: 1000, Log: testutil.Logger{}, CharacterEncoding: "utf-8", @@ -459,7 +456,7 @@ func TestCharacterEncoding(t *testing.T) { { name: "utf-16le", plugin: &Tail{ - Files: []string{"testdata/cpu-utf-16le.influx"}, + Files: []string{filepath.Join(testdataDir, "cpu-utf-16le.influx")}, FromBeginning: true, MaxUndeliveredLines: 1000, Log: testutil.Logger{}, @@ -470,7 +467,7 @@ func TestCharacterEncoding(t *testing.T) { { name: "utf-16le seek", plugin: &Tail{ - Files: []string{"testdata/cpu-utf-16le.influx"}, + Files: []string{filepath.Join(testdataDir, "cpu-utf-16le.influx")}, MaxUndeliveredLines: 1000, Log: testutil.Logger{}, CharacterEncoding: "utf-16le", @@ -481,7 +478,7 @@ func TestCharacterEncoding(t *testing.T) { { name: "utf-16be", plugin: &Tail{ - Files: []string{"testdata/cpu-utf-16be.influx"}, + Files: []string{filepath.Join(testdataDir, "cpu-utf-16be.influx")}, FromBeginning: true, MaxUndeliveredLines: 1000, Log: testutil.Logger{}, @@ -565,3 +562,13 @@ func TestTailEOF(t *testing.T) { err = tmpfile.Close() require.NoError(t, err) } + +func getTestdataDir() string { + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + return filepath.Join(dir, "testdata") +} diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 983926af16aeb..e5c2f835baacf 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -10,6 +10,7 @@ import ( "io/ioutil" "net" "net/url" + "path/filepath" "strings" "time" @@ -60,6 +61,9 @@ func (c *X509Cert) locationToURL(location string) (*url.URL, error) { if strings.HasPrefix(location, "/") { location = "file://" + location } + if strings.Index(location, ":\\") == 1 { + location = "file://" + filepath.ToSlash(location) + } u, err := url.Parse(location) if err != nil { diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index c3452445739f1..041d20db787ea 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -7,6 +7,8 @@ import ( "io/ioutil" "math/big" "os" + "path/filepath" + "runtime" "testing" "time" @@ -51,7 +53,7 @@ func TestGatherRemote(t *testing.T) { {name: "wrong port", server: ":99999", error: true}, {name: "no server", timeout: 5}, {name: "successful https", server: "https://example.org:443", timeout: 5}, - {name: "successful file", server: "file://" + tmpfile.Name(), timeout: 5}, + {name: "successful file", server: "file://" + filepath.ToSlash(tmpfile.Name()), timeout: 5}, {name: "unsupported scheme", server: "foo://", timeout: 5, error: true}, {name: "no certificate", timeout: 5, unset: true, error: true}, {name: "closed connection", close: true, error: true}, @@ -166,9 +168,11 @@ func TestGatherLocal(t *testing.T) { t.Fatal(err) } - err = f.Chmod(test.mode) - if err != nil { - t.Fatal(err) + if runtime.GOOS != "windows" { + err = f.Chmod(test.mode) + if err != nil { + t.Fatal(err) + } } err = f.Close() diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index 14b25e6c570ff..98ae51b8df79a 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -6,6 +6,7 @@ import ( "net" "os" "path/filepath" + "runtime" "sync" "testing" @@ -66,6 +67,10 @@ func TestSocketWriter_unix(t *testing.T) { } func TestSocketWriter_unixgram(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping on Windows, as unixgram sockets are not supported") + } + tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) diff --git a/plugins/processors/ifname/ifname_test.go b/plugins/processors/ifname/ifname_test.go index 85ddc767411c0..25e130e3a7ac7 100644 --- a/plugins/processors/ifname/ifname_test.go +++ b/plugins/processors/ifname/ifname_test.go @@ -6,12 +6,13 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/snmp" si "github.com/influxdata/telegraf/plugins/inputs/snmp" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestTable(t *testing.T) { diff --git a/plugins/processors/ifname/ttl_cache.go b/plugins/processors/ifname/ttl_cache.go index 8f9c4ae653499..99cbab1d5a9f1 100644 --- a/plugins/processors/ifname/ttl_cache.go +++ b/plugins/processors/ifname/ttl_cache.go @@ -1,6 +1,7 @@ package ifname import ( + "runtime" "time" ) @@ -30,6 +31,15 @@ func (c *TTLCache) Get(key keyType) (valType, bool, time.Duration) { if !ok { return valType{}, false, 0 } + + if runtime.GOOS == "windows" { + // Sometimes on Windows `c.now().Sub(v.time) == 0` due to clock resolution issues: + // https://github.com/golang/go/issues/17696 + // https://github.com/golang/go/issues/29485 + // Force clock to refresh: + time.Sleep(time.Nanosecond) + } + age := c.now().Sub(v.time) if age < c.validDuration { return v.val, ok, age diff --git a/plugins/processors/reverse_dns/rdnscache.go b/plugins/processors/reverse_dns/rdnscache.go index 1d86b5385d218..cc9574552dae8 100644 --- a/plugins/processors/reverse_dns/rdnscache.go +++ b/plugins/processors/reverse_dns/rdnscache.go @@ -111,7 +111,7 @@ func (d *ReverseDNSCache) lookup(ip string) ([]string, error) { // check if the value is cached d.rwLock.RLock() result, found := d.lockedGetFromCache(ip) - if found && result.completed && result.expiresAt.After(time.Now()) { + if found && result.completed && !result.expiresAt.Before(time.Now()) { defer d.rwLock.RUnlock() atomic.AddUint64(&d.stats.CacheHit, 1) // cache is valid @@ -176,7 +176,7 @@ func (d *ReverseDNSCache) subscribeTo(ip string) callbackChannelType { // the dnslookup that is returned until you clone it. func (d *ReverseDNSCache) lockedGetFromCache(ip string) (lookup *dnslookup, found bool) { lookup, found = d.cache[ip] - if found && lookup.expiresAt.Before(time.Now()) { + if found && !lookup.expiresAt.After(time.Now()) { return nil, false } return lookup, found @@ -185,7 +185,7 @@ func (d *ReverseDNSCache) lockedGetFromCache(ip string) (lookup *dnslookup, foun // lockedSaveToCache stores a lookup in the correct internal ip cache. // you MUST first do a write lock before calling it. func (d *ReverseDNSCache) lockedSaveToCache(lookup *dnslookup) { - if lookup.expiresAt.Before(time.Now()) { + if !lookup.expiresAt.After(time.Now()) { return // don't cache. } d.cache[lookup.ip] = lookup @@ -277,7 +277,7 @@ func (d *ReverseDNSCache) cleanup() { } ipsToDelete := []string{} for i := 0; i < len(d.expireList); i++ { - if d.expireList[i].expiresAt.After(now) { + if !d.expireList[i].expiresAt.Before(now) { break // done. Nothing after this point is expired. } ipsToDelete = append(ipsToDelete, d.expireList[i].ip) diff --git a/plugins/processors/reverse_dns/reversedns_test.go b/plugins/processors/reverse_dns/reversedns_test.go index 499dffb77e08b..660c25e3015e8 100644 --- a/plugins/processors/reverse_dns/reversedns_test.go +++ b/plugins/processors/reverse_dns/reversedns_test.go @@ -1,13 +1,15 @@ package reverse_dns import ( + "runtime" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestSimpleReverseLookup(t *testing.T) { @@ -40,7 +42,10 @@ func TestSimpleReverseLookup(t *testing.T) { processedMetric := acc.GetTelegrafMetrics()[0] f, ok := processedMetric.GetField("source_name") require.True(t, ok) - require.EqualValues(t, "localhost", f) + if runtime.GOOS != "windows" { + // lookupAddr on Windows works differently than on Linux so `source_name` won't be "localhost" on every environment + require.EqualValues(t, "localhost", f) + } tag, ok := processedMetric.GetTag("dest_name") require.True(t, ok) From 8b30bb9534a7d348232d2011b79d66422d293322 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 23 Nov 2020 15:51:58 -0500 Subject: [PATCH 075/761] Enable kafka zstd compression and idempotent writes (#8435) --- plugins/common/kafka/config.go | 94 ++++++++++++++++ plugins/inputs/kafka_consumer/README.md | 10 +- .../inputs/kafka_consumer/kafka_consumer.go | 58 +++------- .../kafka_consumer/kafka_consumer_test.go | 54 ++++++---- plugins/outputs/kafka/README.md | 17 +-- plugins/outputs/kafka/kafka.go | 102 +++++------------- plugins/outputs/kafka/kafka_test.go | 11 +- 7 files changed, 194 insertions(+), 152 deletions(-) create mode 100644 plugins/common/kafka/config.go diff --git a/plugins/common/kafka/config.go b/plugins/common/kafka/config.go new file mode 100644 index 0000000000000..f68030403b3c3 --- /dev/null +++ b/plugins/common/kafka/config.go @@ -0,0 +1,94 @@ +package kafka + +import ( + "log" + + "github.com/Shopify/sarama" + "github.com/influxdata/telegraf/plugins/common/tls" +) + +// ReadConfig for kafka clients meaning to read from Kafka. +type ReadConfig struct { + Config +} + +// SetConfig on the sarama.Config object from the ReadConfig struct. +func (k *ReadConfig) SetConfig(config *sarama.Config) error { + config.Consumer.Return.Errors = true + + return k.Config.SetConfig(config) +} + +// WriteConfig for kafka clients meaning to write to kafka +type WriteConfig struct { + Config + + RequiredAcks int `toml:"required_acks"` + MaxRetry int `toml:"max_retry"` + MaxMessageBytes int `toml:"max_message_bytes"` + IdempotentWrites bool `toml:"idempotent_writes"` +} + +// SetConfig on the sarama.Config object from the WriteConfig struct. +func (k *WriteConfig) SetConfig(config *sarama.Config) error { + config.Producer.Return.Successes = true + config.Producer.Idempotent = k.IdempotentWrites + config.Producer.Retry.Max = k.MaxRetry + if k.MaxMessageBytes > 0 { + config.Producer.MaxMessageBytes = k.MaxMessageBytes + } + config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks) + return k.Config.SetConfig(config) +} + +// Config common to all Kafka clients. +type Config struct { + SASLAuth + tls.ClientConfig + + Version string `toml:"version"` + ClientID string `toml:"client_id"` + CompressionCodec int `toml:"compression_codec"` + + // EnableTLS deprecated + EnableTLS *bool `toml:"enable_tls"` +} + +// SetConfig on the sarama.Config object from the Config struct. +func (k *Config) SetConfig(config *sarama.Config) error { + if k.EnableTLS != nil { + log.Printf("W! [kafka] enable_tls is deprecated, and the setting does nothing, you can safely remove it from the config") + } + if k.Version != "" { + version, err := sarama.ParseKafkaVersion(k.Version) + if err != nil { + return err + } + + config.Version = version + } + + if k.ClientID != "" { + config.ClientID = k.ClientID + } else { + config.ClientID = "Telegraf" + } + + config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec) + + tlsConfig, err := k.ClientConfig.TLSConfig() + if err != nil { + return err + } + + if tlsConfig != nil { + config.Net.TLS.Config = tlsConfig + config.Net.TLS.Enable = true + } + + if err := k.SetSASLConfig(config); err != nil { + return err + } + + return nil +} diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 3535f8fce5b5a..ac04925a23d14 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -35,7 +35,7 @@ and use the old zookeeper connection method. # insecure_skip_verify = false ## SASL authentication credentials. These settings should typically be used - ## with TLS encryption enabled using the "enable_tls" option. + ## with TLS encryption enabled # sasl_username = "kafka" # sasl_password = "secret" @@ -62,6 +62,14 @@ and use the old zookeeper connection method. ## Name of the consumer group. # consumer_group = "telegraf_metrics_consumers" + ## Compression codec represents the various compression codecs recognized by + ## Kafka in messages. + ## 0 : None + ## 1 : Gzip + ## 2 : Snappy + ## 3 : LZ4 + ## 4 : ZSTD + # compression_codec = 0 ## Initial offset position; one of "oldest" or "newest". # offset = "oldest" diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index a0b4b41cf6167..78feacdd30850 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -12,7 +12,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/kafka" - "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) @@ -36,7 +35,6 @@ const sampleConfig = ` # version = "" ## Optional TLS Config - # enable_tls = true # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" @@ -44,7 +42,7 @@ const sampleConfig = ` # insecure_skip_verify = false ## SASL authentication credentials. These settings should typically be used - ## with TLS encryption enabled using the "enable_tls" option. + ## with TLS encryption enabled # sasl_username = "kafka" # sasl_password = "secret" @@ -71,6 +69,15 @@ const sampleConfig = ` ## Name of the consumer group. # consumer_group = "telegraf_metrics_consumers" + ## Compression codec represents the various compression codecs recognized by + ## Kafka in messages. + ## 0 : None + ## 1 : Gzip + ## 2 : Snappy + ## 3 : LZ4 + ## 4 : ZSTD + # compression_codec = 0 + ## Initial offset position; one of "oldest" or "newest". # offset = "oldest" @@ -110,7 +117,6 @@ type semaphore chan empty type KafkaConsumer struct { Brokers []string `toml:"brokers"` - ClientID string `toml:"client_id"` ConsumerGroup string `toml:"consumer_group"` MaxMessageLen int `toml:"max_message_len"` MaxUndeliveredMessages int `toml:"max_undelivered_messages"` @@ -118,12 +124,8 @@ type KafkaConsumer struct { BalanceStrategy string `toml:"balance_strategy"` Topics []string `toml:"topics"` TopicTag string `toml:"topic_tag"` - Version string `toml:"version"` - kafka.SASLAuth - - EnableTLS *bool `toml:"enable_tls"` - tls.ClientConfig + kafka.ReadConfig Log telegraf.Logger `toml:"-"` @@ -173,50 +175,14 @@ func (k *KafkaConsumer) Init() error { } config := sarama.NewConfig() - config.Consumer.Return.Errors = true // Kafka version 0.10.2.0 is required for consumer groups. config.Version = sarama.V0_10_2_0 - if k.Version != "" { - version, err := sarama.ParseKafkaVersion(k.Version) - if err != nil { - return err - } - - config.Version = version - } - - if k.EnableTLS != nil && *k.EnableTLS { - config.Net.TLS.Enable = true - } - - tlsConfig, err := k.ClientConfig.TLSConfig() - if err != nil { - return err - } - - if tlsConfig != nil { - config.Net.TLS.Config = tlsConfig - - // To maintain backwards compatibility, if the enable_tls option is not - // set TLS is enabled if a non-default TLS config is used. - if k.EnableTLS == nil { - k.Log.Warnf("Use of deprecated configuration: enable_tls should be set when using TLS") - config.Net.TLS.Enable = true - } - } - - if err := k.SetSASLConfig(config); err != nil { + if err := k.SetConfig(config); err != nil { return err } - if k.ClientID != "" { - config.ClientID = k.ClientID - } else { - config.ClientID = "Telegraf" - } - switch strings.ToLower(k.Offset) { case "oldest", "": config.Consumer.Offsets.Initial = sarama.OffsetOldest diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 5973fa82a6629..d7804a01b87e1 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -7,6 +7,7 @@ import ( "github.com/Shopify/sarama" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/testutil" @@ -68,8 +69,12 @@ func TestInit(t *testing.T) { { name: "parses valid version string", plugin: &KafkaConsumer{ - Version: "1.0.0", - Log: testutil.Logger{}, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + Version: "1.0.0", + }, + }, + Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { require.Equal(t, plugin.config.Version, sarama.V1_0_0_0) @@ -78,16 +83,24 @@ func TestInit(t *testing.T) { { name: "invalid version string", plugin: &KafkaConsumer{ - Version: "100", - Log: testutil.Logger{}, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + Version: "100", + }, + }, + Log: testutil.Logger{}, }, initError: true, }, { name: "custom client_id", plugin: &KafkaConsumer{ - ClientID: "custom", - Log: testutil.Logger{}, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + ClientID: "custom", + }, + }, + Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { require.Equal(t, plugin.config.ClientID, "custom") @@ -123,8 +136,12 @@ func TestInit(t *testing.T) { { name: "default tls with a tls config", plugin: &KafkaConsumer{ - ClientConfig: tls.ClientConfig{ - InsecureSkipVerify: true, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, + }, }, Log: testutil.Logger{}, }, @@ -133,24 +150,17 @@ func TestInit(t *testing.T) { }, }, { - name: "disable tls", + name: "Insecure tls", plugin: &KafkaConsumer{ - EnableTLS: func() *bool { v := false; return &v }(), - ClientConfig: tls.ClientConfig{ - InsecureSkipVerify: true, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, + }, }, Log: testutil.Logger{}, }, - check: func(t *testing.T, plugin *KafkaConsumer) { - require.False(t, plugin.config.Net.TLS.Enable) - }, - }, - { - name: "enable tls", - plugin: &KafkaConsumer{ - EnableTLS: func() *bool { v := true; return &v }(), - Log: testutil.Logger{}, - }, check: func(t *testing.T, plugin *KafkaConsumer) { require.True(t, plugin.config.Net.TLS.Enable) }, diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 8c16ee0541f61..e76522018fb4a 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -72,13 +72,18 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## routing_key = "telegraf" # routing_key = "" - ## CompressionCodec represents the various compression codecs recognized by + ## Compression codec represents the various compression codecs recognized by ## Kafka in messages. - ## 0 : No compression - ## 1 : Gzip compression - ## 2 : Snappy compression - ## 3 : LZ4 compression - # compression_codec = 0 + ## 0 : None + ## 1 : Gzip + ## 2 : Snappy + ## 3 : LZ4 + ## 4 : ZSTD + # compression_codec = 0 + + ## Idempotent Writes + ## If enabled, exactly one copy of each message is written. + # idempotent_writes = false ## RequiredAcks is used in Produce Requests to tell the broker how many ## replica acknowledgements it must see before responding diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 5fdfae48d221b..ceb2b93a6e14b 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -11,7 +11,6 @@ import ( "github.com/gofrs/uuid" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/kafka" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) @@ -25,20 +24,13 @@ var ValidTopicSuffixMethods = []string{ var zeroTime = time.Unix(0, 0) type Kafka struct { - Brokers []string `toml:"brokers"` - Topic string `toml:"topic"` - TopicTag string `toml:"topic_tag"` - ExcludeTopicTag bool `toml:"exclude_topic_tag"` - ClientID string `toml:"client_id"` - TopicSuffix TopicSuffix `toml:"topic_suffix"` - RoutingTag string `toml:"routing_tag"` - RoutingKey string `toml:"routing_key"` - CompressionCodec int `toml:"compression_codec"` - RequiredAcks int `toml:"required_acks"` - MaxRetry int `toml:"max_retry"` - MaxMessageBytes int `toml:"max_message_bytes"` - - Version string `toml:"version"` + Brokers []string `toml:"brokers"` + Topic string `toml:"topic"` + TopicTag string `toml:"topic_tag"` + ExcludeTopicTag bool `toml:"exclude_topic_tag"` + TopicSuffix TopicSuffix `toml:"topic_suffix"` + RoutingTag string `toml:"routing_tag"` + RoutingKey string `toml:"routing_key"` // Legacy TLS config options // TLS client certificate @@ -48,10 +40,7 @@ type Kafka struct { // TLS certificate authority CA string - EnableTLS *bool `toml:"enable_tls"` - tlsint.ClientConfig - - kafka.SASLAuth + kafka.WriteConfig Log telegraf.Logger `toml:"-"` @@ -158,14 +147,19 @@ var sampleConfig = ` ## routing_key = "telegraf" # routing_key = "" - ## CompressionCodec represents the various compression codecs recognized by + ## Compression codec represents the various compression codecs recognized by ## Kafka in messages. - ## 0 : No compression - ## 1 : Gzip compression - ## 2 : Snappy compression - ## 3 : LZ4 compression + ## 0 : None + ## 1 : Gzip + ## 2 : Snappy + ## 3 : LZ4 + ## 4 : ZSTD # compression_codec = 0 + ## Idempotent Writes + ## If enabled, exactly one copy of each message is written. + # idempotent_writes = false + ## RequiredAcks is used in Produce Requests to tell the broker how many ## replica acknowledgements it must see before responding ## 0 : the producer never waits for an acknowledgement from the broker. @@ -191,7 +185,6 @@ var sampleConfig = ` # max_message_bytes = 1000000 ## Optional TLS Config - # enable_tls = true # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" @@ -278,34 +271,15 @@ func (k *Kafka) SetSerializer(serializer serializers.Serializer) { k.serializer = serializer } -func (k *Kafka) Connect() error { +func (k *Kafka) Init() error { err := ValidateTopicSuffixMethod(k.TopicSuffix.Method) if err != nil { return err } config := sarama.NewConfig() - if k.Version != "" { - version, err := sarama.ParseKafkaVersion(k.Version) - if err != nil { - return err - } - config.Version = version - } - - if k.ClientID != "" { - config.ClientID = k.ClientID - } else { - config.ClientID = "Telegraf" - } - - config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks) - config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec) - config.Producer.Retry.Max = k.MaxRetry - config.Producer.Return.Successes = true - - if k.MaxMessageBytes > 0 { - config.Producer.MaxMessageBytes = k.MaxMessageBytes + if err := k.SetConfig(config); err != nil { + return err } // Legacy support ssl config @@ -315,30 +289,6 @@ func (k *Kafka) Connect() error { k.TLSKey = k.Key } - if k.EnableTLS != nil && *k.EnableTLS { - config.Net.TLS.Enable = true - } - - tlsConfig, err := k.ClientConfig.TLSConfig() - if err != nil { - return err - } - - if tlsConfig != nil { - config.Net.TLS.Config = tlsConfig - - // To maintain backwards compatibility, if the enable_tls option is not - // set TLS is enabled if a non-default TLS config is used. - if k.EnableTLS == nil { - k.Log.Warnf("Use of deprecated configuration: enable_tls should be set when using TLS") - config.Net.TLS.Enable = true - } - } - - if err := k.SetSASLConfig(config); err != nil { - return err - } - producer, err := k.producerFunc(k.Brokers, config) if err != nil { return err @@ -347,6 +297,10 @@ func (k *Kafka) Connect() error { return nil } +func (k *Kafka) Connect() error { + return nil +} + func (k *Kafka) Close() error { return k.producer.Close() } @@ -436,8 +390,10 @@ func init() { sarama.Logger = &DebugLogger{} outputs.Add("kafka", func() telegraf.Output { return &Kafka{ - MaxRetry: 3, - RequiredAcks: -1, + WriteConfig: kafka.WriteConfig{ + MaxRetry: 3, + RequiredAcks: -1, + }, producerFunc: sarama.NewSyncProducer, } }) diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index 070eea3f91d9c..4e93515febc4b 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -25,13 +25,16 @@ func TestConnectAndWrite(t *testing.T) { brokers := []string{testutil.GetLocalHost() + ":9092"} s, _ := serializers.NewInfluxSerializer() k := &Kafka{ - Brokers: brokers, - Topic: "Test", - serializer: s, + Brokers: brokers, + Topic: "Test", + serializer: s, + producerFunc: sarama.NewSyncProducer, } // Verify that we can connect to the Kafka broker - err := k.Connect() + err := k.Init() + require.NoError(t, err) + err = k.Connect() require.NoError(t, err) // Verify that we can successfully write data to the kafka broker From 832925d9c894c6477fb0c34b3ba3913d28d0513e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Mon, 23 Nov 2020 23:21:36 +0100 Subject: [PATCH 076/761] [php-fpm] Fix possible "index out of range" (#8461) --- plugins/inputs/phpfpm/phpfpm.go | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 52907bb50749e..dd7d6a63074a3 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -7,7 +7,6 @@ import ( "io" "net/http" "net/url" - "os" "strconv" "strings" "sync" @@ -301,25 +300,18 @@ func globUnixSocket(url string) ([]string, error) { } paths := glob.Match() if len(paths) == 0 { - if _, err := os.Stat(paths[0]); err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Socket doesn't exist '%s': %s", pattern, err) - } - return nil, err - } - return nil, nil + return nil, fmt.Errorf("socket doesn't exist %q: %v", pattern, err) } - addrs := make([]string, 0, len(paths)) - + addresses := make([]string, 0, len(paths)) for _, path := range paths { if status != "" { path = path + ":" + status } - addrs = append(addrs, path) + addresses = append(addresses, path) } - return addrs, nil + return addresses, nil } func unixSocketPaths(addr string) (string, string) { From e28cccf2015b81184edb9828d219c9a1b2675af0 Mon Sep 17 00:00:00 2001 From: Igor Kuchmienko <56545352+IgorKuchmienko@users.noreply.github.com> Date: Tue, 24 Nov 2020 22:45:09 +0300 Subject: [PATCH 077/761] Fix typo in column name (#8468) --- plugins/inputs/sqlserver/azuresqlqueries.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go index fa4eb197723b1..4e936b98d3c8e 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqlqueries.go @@ -96,7 +96,7 @@ END SELECT 'sqlserver_azuredb_waitstats' AS [measurement] ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,DB_NAME() as [database_name'] + ,DB_NAME() as [database_name] ,dbws.[wait_type] ,dbws.[wait_time_ms] ,dbws.[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] From d312a5e6a8be0988c4048e7efac3388b9530e966 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 26 Nov 2020 12:16:17 -0500 Subject: [PATCH 078/761] Update CONTRIBUTING.md --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 897ac1377e6e7..cc89134c8eebb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,7 +13,7 @@ 1. Ensure you have added proper unit tests and documentation. 1. Open a new [pull request][]. -#### Contributing an External Plugin *(experimental)* +#### Contributing an External Plugin *(new)* Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](plugins/inputs/execd), [Execd Output](/plugins/inputs/execd), and [Execd Processor](plugins/processors/execd) Plugins without having to change the plugin code. Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. From a7096c8128f1181b902b7f051fb29e7ce8ed34b4 Mon Sep 17 00:00:00 2001 From: Tuamas Date: Thu, 26 Nov 2020 20:21:57 +0200 Subject: [PATCH 079/761] Translate snmp field values (#8466) --- plugins/inputs/snmp/README.md | 4 ++++ plugins/inputs/snmp/snmp.go | 13 +++++++++++++ plugins/inputs/snmp/snmp_mocks_generate.go | 1 + plugins/inputs/snmp/snmp_mocks_test.go | 1 + plugins/inputs/snmp/snmp_test.go | 13 +++++++++++++ 5 files changed, 32 insertions(+) diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index a0c9155db5432..ea6e7a95bbfa4 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -184,6 +184,10 @@ One [metric][] is created for each row of the SNMP table. ## path segments). Truncates the index after this point to remove non-fixed ## value or length index suffixes. # oid_index_length = 0 + + ## Specifies if the value of given field should be snmptranslated + ## by default no field values are translated + # translate = true ``` ### Troubleshooting diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 103b23d214485..623c9ba61ce23 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -237,6 +237,8 @@ type Field struct { // "hwaddr" will convert a 6-byte string to a MAC address. // "ipaddr" will convert the value to an IPv4 or IPv6 address. Conversion string + // Translate tells if the value of the field should be snmptranslated + Translate bool initialized bool } @@ -460,6 +462,17 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { }, idx) } + // snmptranslate table field value here + if f.Translate { + if entOid, ok := ent.Value.(string); ok { + _, _, oidText, _, err := SnmpTranslate(entOid) + if err == nil { + // If no error translating, the original value for ent.Value should be replaced + ent.Value = oidText + } + } + } + fv, err := fieldConvert(f.Conversion, ent.Value) if err != nil { return &walkError{ diff --git a/plugins/inputs/snmp/snmp_mocks_generate.go b/plugins/inputs/snmp/snmp_mocks_generate.go index c09dd004580da..7227771a7e4fa 100644 --- a/plugins/inputs/snmp/snmp_mocks_generate.go +++ b/plugins/inputs/snmp/snmp_mocks_generate.go @@ -23,6 +23,7 @@ var mockedCommands = [][]string{ {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1.0"}, {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.5"}, {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.2.3"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.7"}, {"snmptranslate", "-Td", "-Ob", ".iso.2.3"}, {"snmptranslate", "-Td", "-Ob", "-m", "all", ".999"}, {"snmptranslate", "-Td", "-Ob", "TEST::server"}, diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/plugins/inputs/snmp/snmp_mocks_test.go index 56d9326f1d639..5b3bbd767e67d 100644 --- a/plugins/inputs/snmp/snmp_mocks_test.go +++ b/plugins/inputs/snmp/snmp_mocks_test.go @@ -69,6 +69,7 @@ var mockedCommandResults = map[string]mockedCommandResult{ "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": {stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.5": {stdout: "TEST::testTableEntry.5\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 5 }\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.7": {stdout: "TEST::testTableEntry.7\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) std(0) testOID(0) testTable(0) testTableEntry(1) 7 }\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": {stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00TEST::server": {stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 9d9d7df2351ee..8368ed7385de7 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -81,6 +81,7 @@ var tsc = &testSNMPConnection{ ".1.0.0.1.3": []byte("byte slice"), ".1.0.0.2.1.5.0.9.9": 11, ".1.0.0.2.1.5.1.9.9": 22, + ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", }, } @@ -493,6 +494,16 @@ func TestTableBuild_walk(t *testing.T) { Oid: ".1.0.0.2.1.5", OidIndexLength: 1, }, + { + Name: "myfield6", + Oid: ".1.0.0.0.1.6", + Translate: true, + }, + { + Name: "myfield7", + Oid: ".1.0.0.0.1.6", + Translate: false, + }, }, } @@ -510,6 +521,8 @@ func TestTableBuild_walk(t *testing.T) { "myfield3": float64(0.123), "myfield4": 11, "myfield5": 11, + "myfield6": "testTableEntry.7", + "myfield7": ".1.0.0.0.1.7", }, } rtr2 := RTableRow{ From 4090c77275457648c2df3403b6f56ca5e8d1f410 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 26 Nov 2020 17:16:25 -0500 Subject: [PATCH 080/761] proxy support for http input (#8477) --- plugins/common/proxy/proxy.go | 24 ++++++++++++++++++++++++ plugins/inputs/http/README.md | 3 +++ plugins/inputs/http/http.go | 23 ++++++++++++++++++----- 3 files changed, 45 insertions(+), 5 deletions(-) create mode 100644 plugins/common/proxy/proxy.go diff --git a/plugins/common/proxy/proxy.go b/plugins/common/proxy/proxy.go new file mode 100644 index 0000000000000..4ef97f1eb52e8 --- /dev/null +++ b/plugins/common/proxy/proxy.go @@ -0,0 +1,24 @@ +package proxy + +import ( + "fmt" + "net/http" + "net/url" +) + +type HTTPProxy struct { + HTTPProxyURL string `toml:"http_proxy_url"` +} + +type proxyFunc func(req *http.Request) (*url.URL, error) + +func (p *HTTPProxy) Proxy() (proxyFunc, error) { + if len(p.HTTPProxyURL) > 0 { + url, err := url.Parse(p.HTTPProxyURL) + if err != nil { + return nil, fmt.Errorf("error parsing proxy url %q: %w", p.HTTPProxyURL, err) + } + return http.ProxyURL(url), nil + } + return http.ProxyFromEnvironment, nil +} diff --git a/plugins/inputs/http/README.md b/plugins/inputs/http/README.md index 59abd82562672..a9c554cadbfb4 100644 --- a/plugins/inputs/http/README.md +++ b/plugins/inputs/http/README.md @@ -34,6 +34,9 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The # username = "username" # password = "pa$$word" + ## HTTP Proxy support + # http_proxy_url = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index c247d40076620..58a5bd51040ed 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -11,6 +11,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" @@ -29,6 +30,8 @@ type HTTP struct { Password string `toml:"password"` tls.ClientConfig + proxy.HTTPProxy + // Absolute path to file with Bearer token BearerToken string `toml:"bearer_token"` @@ -70,6 +73,9 @@ var sampleConfig = ` ## compress body or "identity" to apply no encoding. # content_encoding = "identity" + ## HTTP Proxy support + # http_proxy_url = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -106,12 +112,19 @@ func (h *HTTP) Init() error { return err } + proxy, err := h.HTTPProxy.Proxy() + if err != nil { + return err + } + + transport := &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: proxy, + } + h.client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsCfg, - Proxy: http.ProxyFromEnvironment, - }, - Timeout: h.Timeout.Duration, + Transport: transport, + Timeout: h.Timeout.Duration, } // Set default as [200] From f5d5a51c2179b4cc6741293ad995ec1b60397027 Mon Sep 17 00:00:00 2001 From: Olli-Pekka Lehto Date: Fri, 27 Nov 2020 10:24:26 -0600 Subject: [PATCH 081/761] Added "name" parameter to NATS output plugin (#8429) --- plugins/outputs/nats/README.md | 4 ++++ plugins/outputs/nats/nats.go | 8 ++++++++ plugins/outputs/nats/nats_test.go | 1 + 3 files changed, 13 insertions(+) diff --git a/plugins/outputs/nats/README.md b/plugins/outputs/nats/README.md index c5539900b02e0..1fb1a2b4b96ae 100644 --- a/plugins/outputs/nats/README.md +++ b/plugins/outputs/nats/README.md @@ -6,6 +6,10 @@ This plugin writes to a (list of) specified NATS instance(s). [[outputs.nats]] ## URLs of NATS servers servers = ["nats://localhost:4222"] + + ## Optional client name + # name = "" + ## Optional credentials # username = "" # password = "" diff --git a/plugins/outputs/nats/nats.go b/plugins/outputs/nats/nats.go index bf1baae339876..50102b43a47dd 100644 --- a/plugins/outputs/nats/nats.go +++ b/plugins/outputs/nats/nats.go @@ -15,6 +15,7 @@ import ( type NATS struct { Servers []string `toml:"servers"` Secure bool `toml:"secure"` + Name string `toml:"name"` Username string `toml:"username"` Password string `toml:"password"` Credentials string `toml:"credentials"` @@ -30,6 +31,9 @@ var sampleConfig = ` ## URLs of NATS servers servers = ["nats://localhost:4222"] + ## Optional client name + # name = "" + ## Optional credentials # username = "" # password = "" @@ -73,6 +77,10 @@ func (n *NATS) Connect() error { opts = append(opts, nats.UserInfo(n.Username, n.Password)) } + if n.Name != "" { + opts = append(opts, nats.Name(n.Name)) + } + if n.Secure { tlsConfig, err := n.ClientConfig.TLSConfig() if err != nil { diff --git a/plugins/outputs/nats/nats_test.go b/plugins/outputs/nats/nats_test.go index 773dbaa6efdbd..432c9241875c4 100644 --- a/plugins/outputs/nats/nats_test.go +++ b/plugins/outputs/nats/nats_test.go @@ -17,6 +17,7 @@ func TestConnectAndWrite(t *testing.T) { s, _ := serializers.NewInfluxSerializer() n := &NATS{ Servers: server, + Name: "telegraf", Subject: "telegraf", serializer: s, } From 0ce55bbd4ad592d42a239e7f0640bc7c9790d8fa Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 27 Nov 2020 17:28:12 +0100 Subject: [PATCH 082/761] Starlark example dropbytype (#8438) --- plugins/processors/starlark/README.md | 2 + plugins/processors/starlark/starlark_test.go | 65 +++++++++++++++++++ .../drop_fields_with_unexpected_type.star | 26 ++++++++ .../starlark/testdata/drop_string_fields.star | 14 ++++ 4 files changed, 107 insertions(+) create mode 100644 plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star create mode 100644 plugins/processors/starlark/testdata/drop_string_fields.star diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index 1194845ea1ad8..c660342f456d4 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -179,6 +179,8 @@ def failing(metric): ### Examples +- [drop string fields](/plugins/processors/starlark/testdata/drop_string_fields.star) - Drop fields containing string values. +- [drop fields with unexpected type](/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star) - Drop fields containing unexpected value types. - [json](/plugins/processors/starlark/testdata/json.star) - an example of processing JSON from a field in a metric - [number logic](/plugins/processors/starlark/testdata/number_logic.star) - transform a numerical value to another numerical value - [pivot](/plugins/processors/starlark/testdata/pivot.star) - Pivots a key's value to be the key for another key. diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 8328c0bb0bb8b..afcb721025d55 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -2486,6 +2486,71 @@ func TestScript(t *testing.T) { ), }, }, + { + name: "drop fields by type", + plugin: &Starlark{ + Script: "testdata/drop_string_fields.star", + Log: testutil.Logger{}, + }, + input: []telegraf.Metric{ + testutil.MustMetric("device", + map[string]string{}, + map[string]interface{}{ + "a": 42, + "b": "42", + "c": 42.0, + "d": "42.0", + "e": true, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("device", + map[string]string{}, + map[string]interface{}{ + "a": 42, + "c": 42.0, + "e": true, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "drop fields with unexpected type", + plugin: &Starlark{ + Script: "testdata/drop_fields_with_unexpected_type.star", + Log: testutil.Logger{}, + }, + input: []telegraf.Metric{ + testutil.MustMetric("device", + map[string]string{}, + map[string]interface{}{ + "a": 42, + "b": "42", + "c": 42.0, + "d": "42.0", + "e": true, + "f": 23.0, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("device", + map[string]string{}, + map[string]interface{}{ + "a": 42, + "c": 42.0, + "d": "42.0", + "e": true, + "f": 23.0, + }, + time.Unix(0, 0), + ), + }, + }, { name: "scale", plugin: &Starlark{ diff --git a/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star b/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star new file mode 100644 index 0000000000000..601ce631f906b --- /dev/null +++ b/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star @@ -0,0 +1,26 @@ +# Drop fields if they NOT contain values of an expected type. +# +# In this example we ignore fields with an unknown expected type and do not drop them. +# +# Example Input: +# measurement,host=hostname a=1i,b=4.2,c=42.0,d="v3.14",e=true,f=23.0 1597255410000000000 +# measurement,host=hostname a=1i,b="somestring",c=42.0,d="v3.14",e=true,f=23.0 1597255410000000000 +# +# Example Output: +# measurement,host=hostname a=1i,b=4.2,c=42.0,d="v3.14",e=true,f=23.0 1597255410000000000 +# measurement,host=hostname a=1i,c=42.0,d="v3.14",e=true,f=23.0 1597255410000000000 + +expected_type = { + "a": "int", + "b": "float", + "c": "float", + "d": "string", + "e": "bool" +} + +def apply(metric): + for k, v in metric.fields.items(): + if type(v) != expected_type.get(k, type(v)): + metric.fields.pop(k) + + return metric diff --git a/plugins/processors/starlark/testdata/drop_string_fields.star b/plugins/processors/starlark/testdata/drop_string_fields.star new file mode 100644 index 0000000000000..d5c44e497c77c --- /dev/null +++ b/plugins/processors/starlark/testdata/drop_string_fields.star @@ -0,0 +1,14 @@ +# Drop fields if they contain a string. +# +# Example Input: +# measurement,host=hostname a=1,b="somestring" 1597255410000000000 +# +# Example Output: +# measurement,host=hostname a=1 1597255410000000000 + +def apply(metric): + for k, v in metric.fields.items(): + if type(v) == "string": + metric.fields.pop(k) + + return metric From d536f610cd5fd716e6619405ba723fb6ca8cced6 Mon Sep 17 00:00:00 2001 From: Vipin Menon Date: Fri, 27 Nov 2020 22:00:45 +0530 Subject: [PATCH 083/761] Support Riemann-Protobuff Listener (#8163) --- docs/LICENSE_OF_DEPENDENCIES.md | 2 + go.mod | 3 +- go.sum | 6 + plugins/inputs/all/all.go | 1 + plugins/inputs/riemann_listener/README.md | 42 ++ .../riemann_listener/riemann_listener.go | 399 ++++++++++++++++++ .../riemann_listener/riemann_listener_test.go | 55 +++ 7 files changed, 507 insertions(+), 1 deletion(-) create mode 100644 plugins/inputs/riemann_listener/README.md create mode 100644 plugins/inputs/riemann_listener/riemann_listener.go create mode 100644 plugins/inputs/riemann_listener/riemann_listener_test.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 66dc38b43eb08..971a95584f678 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -123,6 +123,7 @@ following works: - github.com/prometheus/common [Apache License 2.0](https://github.com/prometheus/common/blob/master/LICENSE) - github.com/prometheus/procfs [Apache License 2.0](https://github.com/prometheus/procfs/blob/master/LICENSE) - github.com/rcrowley/go-metrics [MIT License](https://github.com/rcrowley/go-metrics/blob/master/LICENSE) +- github.com/riemann/riemann-go-client [MIT License](https://github.com/riemann/riemann-go-client/blob/master/LICENSE) - github.com/safchain/ethtool [Apache License 2.0](https://github.com/safchain/ethtool/blob/master/LICENSE) - github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) - github.com/shirou/gopsutil [BSD 3-Clause Clear License](https://github.com/shirou/gopsutil/blob/master/LICENSE) @@ -172,6 +173,7 @@ following works: - gopkg.in/mgo.v2 [BSD 2-Clause "Simplified" License](https://github.com/go-mgo/mgo/blob/v2/LICENSE) - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) +- gopkg.in/tomb.v2 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v2/LICENSE) - gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE) - gopkg.in/yaml.v3 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v3/LICENSE) - modernc.org/libc [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/libc/-/blob/master/LICENSE) diff --git a/go.mod b/go.mod index ba5f17e8a5b48..84d4fd30ee9eb 100644 --- a/go.mod +++ b/go.mod @@ -108,6 +108,7 @@ require ( github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.9.1 github.com/prometheus/procfs v0.0.8 + github.com/riemann/riemann-go-client v0.5.0 github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect @@ -149,7 +150,7 @@ require ( gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce gopkg.in/olivere/elastic.v5 v5.0.70 gopkg.in/yaml.v2 v2.2.8 - gotest.tools v2.2.0+incompatible // indirect + gotest.tools v2.2.0+incompatible honnef.co/go/tools v0.0.1-2020.1.3 // indirect k8s.io/apimachinery v0.17.1 // indirect modernc.org/sqlite v1.7.4 diff --git a/go.sum b/go.sum index 23952fba5a959..a5632a2e0a752 100644 --- a/go.sum +++ b/go.sum @@ -257,6 +257,7 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= @@ -539,6 +540,8 @@ github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/riemann/riemann-go-client v0.5.0 h1:yPP7tz1vSYJkSZvZFCsMiDsHHXX57x8/fEX3qyEXuAA= +github.com/riemann/riemann-go-client v0.5.0/go.mod h1:FMiaOL8dgBnRfgwENzV0xlYJ2eCbV1o7yqVwOBLbShQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= @@ -674,6 +677,7 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -906,6 +910,8 @@ gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 1d1b8eb58b463..6eb5dbb7aafef 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -146,6 +146,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/redis" _ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" _ "github.com/influxdata/telegraf/plugins/inputs/riak" + _ "github.com/influxdata/telegraf/plugins/inputs/riemann_listener" _ "github.com/influxdata/telegraf/plugins/inputs/salesforce" _ "github.com/influxdata/telegraf/plugins/inputs/sensors" _ "github.com/influxdata/telegraf/plugins/inputs/sflow" diff --git a/plugins/inputs/riemann_listener/README.md b/plugins/inputs/riemann_listener/README.md new file mode 100644 index 0000000000000..54e70be6ecb71 --- /dev/null +++ b/plugins/inputs/riemann_listener/README.md @@ -0,0 +1,42 @@ +# Riemann Listener Input Plugin + +The Riemann Listener is a simple input plugin that listens for messages from +client that use riemann clients using riemann-protobuff format. + + +### Configuration: + +This is a sample configuration for the plugin. + +```toml +[[inputs.rimann_listener]] + ## URL to listen on + ## Default is "tcp://:5555" + # service_address = "tcp://:8094" + # service_address = "tcp://127.0.0.1:http" + # service_address = "tcp4://:8094" + # service_address = "tcp6://:8094" + # service_address = "tcp6://[2001:db8::1]:8094" + + ## Maximum number of concurrent connections. + ## 0 (default) is unlimited. + # max_connections = 1024 + ## Read timeout. + ## 0 (default) is unlimited. + # read_timeout = "30s" + ## Optional TLS configuration. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Enables client authentication if set. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## Maximum socket buffer size (in bytes when no unit specified). + # read_buffer_size = "64KiB" + ## Period between keep alive probes. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" +``` +Just like Riemann the default port is 5555. This can be configured, refer configuration above. + +Riemann `Service` is mapped as `measurement`. `metric` and `TTL` are converted into field values. +As Riemann tags as simply an array, they are converted into the `influx_line` format key-value, where both key and value are the tags. diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go new file mode 100644 index 0000000000000..45d1ef4db27f2 --- /dev/null +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -0,0 +1,399 @@ +package riemann_listener + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/binary" + "fmt" + "io" + "log" + "net" + "os" + "os/signal" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf/metric" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + tlsint "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" + riemanngo "github.com/riemann/riemann-go-client" + riemangoProto "github.com/riemann/riemann-go-client/proto" +) + +type RiemannSocketListener struct { + ServiceAddress string `toml:"service_address"` + MaxConnections int `toml:"max_connections"` + ReadBufferSize internal.Size `toml:"read_buffer_size"` + ReadTimeout *internal.Duration `toml:"read_timeout"` + KeepAlivePeriod *internal.Duration `toml:"keep_alive_period"` + SocketMode string `toml:"socket_mode"` + tlsint.ServerConfig + + wg sync.WaitGroup + + Log telegraf.Logger + + telegraf.Accumulator +} +type setReadBufferer interface { + SetReadBuffer(bytes int) error +} + +type riemannListener struct { + net.Listener + *RiemannSocketListener + + sockType string + + connections map[string]net.Conn + connectionsMtx sync.Mutex +} + +func (rsl *riemannListener) listen(ctx context.Context) { + rsl.connections = map[string]net.Conn{} + + wg := sync.WaitGroup{} + + select { + case <-ctx.Done(): + rsl.closeAllConnections() + wg.Wait() + return + default: + for { + c, err := rsl.Accept() + if err != nil { + if !strings.HasSuffix(err.Error(), ": use of closed network connection") { + rsl.Log.Error(err.Error()) + } + break + } + + if rsl.ReadBufferSize.Size > 0 { + if srb, ok := c.(setReadBufferer); ok { + srb.SetReadBuffer(int(rsl.ReadBufferSize.Size)) + } else { + rsl.Log.Warnf("Unable to set read buffer on a %s socket", rsl.sockType) + } + } + + rsl.connectionsMtx.Lock() + if rsl.MaxConnections > 0 && len(rsl.connections) >= rsl.MaxConnections { + rsl.connectionsMtx.Unlock() + c.Close() + continue + } + rsl.connections[c.RemoteAddr().String()] = c + rsl.connectionsMtx.Unlock() + + if err := rsl.setKeepAlive(c); err != nil { + rsl.Log.Errorf("Unable to configure keep alive %q: %s", rsl.ServiceAddress, err.Error()) + } + + wg.Add(1) + go func() { + defer wg.Done() + rsl.read(c) + }() + } + rsl.closeAllConnections() + wg.Wait() + } +} + +func (rsl *riemannListener) closeAllConnections() { + rsl.connectionsMtx.Lock() + for _, c := range rsl.connections { + c.Close() + } + rsl.connectionsMtx.Unlock() +} + +func (rsl *riemannListener) setKeepAlive(c net.Conn) error { + if rsl.KeepAlivePeriod == nil { + return nil + } + tcpc, ok := c.(*net.TCPConn) + if !ok { + return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(rsl.ServiceAddress, "://", 2)[0]) + } + if rsl.KeepAlivePeriod.Duration == 0 { + return tcpc.SetKeepAlive(false) + } + if err := tcpc.SetKeepAlive(true); err != nil { + return err + } + return tcpc.SetKeepAlivePeriod(rsl.KeepAlivePeriod.Duration) +} + +func (rsl *riemannListener) removeConnection(c net.Conn) { + rsl.connectionsMtx.Lock() + delete(rsl.connections, c.RemoteAddr().String()) + rsl.connectionsMtx.Unlock() +} + +//Utilities + +/* +readMessages will read Riemann messages in binary format +from the TCP connection. byte Array p size will depend on the size +of the riemann message as sent by the cleint +*/ +func readMessages(r io.Reader, p []byte) error { + for len(p) > 0 { + n, err := r.Read(p) + p = p[n:] + if err != nil { + return err + } + } + return nil +} + +func checkError(err error) { + log.Println("The error is") + if err != nil { + log.Println(err.Error()) + } +} + +func (rsl *riemannListener) read(conn net.Conn) { + defer rsl.removeConnection(conn) + defer conn.Close() + var err error + + for { + if rsl.ReadTimeout != nil && rsl.ReadTimeout.Duration > 0 { + + err = conn.SetDeadline(time.Now().Add(rsl.ReadTimeout.Duration)) + } + + messagePb := &riemangoProto.Msg{} + var header uint32 + // First obtain the size of the riemann event from client and acknowledge + if err = binary.Read(conn, binary.BigEndian, &header); err != nil { + if err.Error() != "EOF" { + rsl.Log.Debugf("Failed to read header") + riemannReturnErrorResponse(conn, err.Error()) + return + } + return + } + data := make([]byte, header) + + if err = readMessages(conn, data); err != nil { + rsl.Log.Debugf("Failed to read body: %s", err.Error()) + riemannReturnErrorResponse(conn, "Failed to read body") + return + } + if err = proto.Unmarshal(data, messagePb); err != nil { + rsl.Log.Debugf("Failed to unmarshal: %s", err.Error()) + riemannReturnErrorResponse(conn, "Failed to unmarshal") + return + } + riemannEvents := riemanngo.ProtocolBuffersToEvents(messagePb.Events) + + for _, m := range riemannEvents { + if m.Service == "" { + riemannReturnErrorResponse(conn, "No Service Name") + return + } + tags := make(map[string]string) + fieldValues := map[string]interface{}{} + for _, tag := range m.Tags { + tags[strings.ReplaceAll(tag, " ", "_")] = tag + } + tags["Host"] = m.Host + tags["Description"] = m.Description + tags["State"] = m.State + fieldValues["Metric"] = m.Metric + fieldValues["TTL"] = m.TTL.Seconds() + singleMetric, err := metric.New(m.Service, tags, fieldValues, m.Time, telegraf.Untyped) + if err != nil { + rsl.Log.Debugf("Could not create metric for service %s at %s", m.Service, m.Time.String()) + riemannReturnErrorResponse(conn, "Could not create metric") + return + } + + rsl.AddMetric(singleMetric) + } + riemannReturnResponse(conn) + + } + +} + +func riemannReturnResponse(conn net.Conn) { + t := true + message := new(riemangoProto.Msg) + message.Ok = &t + returnData, err := proto.Marshal(message) + if err != nil { + checkError(err) + return + } + b := new(bytes.Buffer) + if err = binary.Write(b, binary.BigEndian, uint32(len(returnData))); err != nil { + checkError(err) + } + // send the msg length + if _, err = conn.Write(b.Bytes()); err != nil { + checkError(err) + } + if _, err = conn.Write(returnData); err != nil { + checkError(err) + } +} + +func riemannReturnErrorResponse(conn net.Conn, errorMessage string) { + t := false + message := new(riemangoProto.Msg) + message.Ok = &t + message.Error = &errorMessage + returnData, err := proto.Marshal(message) + if err != nil { + checkError(err) + return + } + b := new(bytes.Buffer) + if err = binary.Write(b, binary.BigEndian, uint32(len(returnData))); err != nil { + checkError(err) + } + // send the msg length + if _, err = conn.Write(b.Bytes()); err != nil { + checkError(err) + } + if _, err = conn.Write(returnData); err != nil { + log.Println("Somethign") + checkError(err) + } +} + +func (rsl *RiemannSocketListener) Description() string { + return "Riemann protobuff listener." +} + +func (rsl *RiemannSocketListener) SampleConfig() string { + return ` + ## URL to listen on. + ## Default is "tcp://:5555" + # service_address = "tcp://:8094" + # service_address = "tcp://127.0.0.1:http" + # service_address = "tcp4://:8094" + # service_address = "tcp6://:8094" + # service_address = "tcp6://[2001:db8::1]:8094" + + ## Maximum number of concurrent connections. + ## 0 (default) is unlimited. + # max_connections = 1024 + ## Read timeout. + ## 0 (default) is unlimited. + # read_timeout = "30s" + ## Optional TLS configuration. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Enables client authentication if set. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## Maximum socket buffer size (in bytes when no unit specified). + # read_buffer_size = "64KiB" + ## Period between keep alive probes. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" +` +} + +func (rsl *RiemannSocketListener) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (rsl *RiemannSocketListener) Start(acc telegraf.Accumulator) error { + ctx, cancelFunc := context.WithCancel(context.Background()) + go processOsSignals(cancelFunc) + rsl.Accumulator = acc + if rsl.ServiceAddress == "" { + rsl.Log.Warnf("Using default service_address tcp://:5555") + rsl.ServiceAddress = "tcp://:5555" + } + spl := strings.SplitN(rsl.ServiceAddress, "://", 2) + if len(spl) != 2 { + return fmt.Errorf("invalid service address: %s", rsl.ServiceAddress) + } + + protocol := spl[0] + addr := spl[1] + + switch protocol { + case "tcp", "tcp4", "tcp6": + tlsCfg, err := rsl.ServerConfig.TLSConfig() + if err != nil { + return err + } + + var l net.Listener + if tlsCfg == nil { + l, err = net.Listen(protocol, addr) + } else { + l, err = tls.Listen(protocol, addr, tlsCfg) + } + if err != nil { + return err + } + + rsl.Log.Infof("Listening on %s://%s", protocol, l.Addr()) + + rsl := &riemannListener{ + Listener: l, + RiemannSocketListener: rsl, + sockType: spl[0], + } + + rsl.wg = sync.WaitGroup{} + rsl.wg.Add(1) + go func() { + defer rsl.wg.Done() + rsl.listen(ctx) + + }() + default: + return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, rsl.ServiceAddress) + } + + return nil +} + +// Handle cancellations from the process +func processOsSignals(cancelFunc context.CancelFunc) { + signalChan := make(chan os.Signal) + signal.Notify(signalChan, os.Interrupt) + for { + sig := <-signalChan + switch sig { + case os.Interrupt: + log.Println("Signal SIGINT is received, probably due to `Ctrl-C`, exiting ...") + cancelFunc() + return + } + } + +} + +func (rsl *RiemannSocketListener) Stop() { + rsl.wg.Done() + rsl.wg.Wait() + os.Exit(0) +} + +func newRiemannSocketListener() *RiemannSocketListener { + return &RiemannSocketListener{} +} + +func init() { + inputs.Add("riemann_listener", func() telegraf.Input { return newRiemannSocketListener() }) +} diff --git a/plugins/inputs/riemann_listener/riemann_listener_test.go b/plugins/inputs/riemann_listener/riemann_listener_test.go new file mode 100644 index 0000000000000..f1ce824c6a731 --- /dev/null +++ b/plugins/inputs/riemann_listener/riemann_listener_test.go @@ -0,0 +1,55 @@ +package riemann_listener + +import ( + "log" + "testing" + "time" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + riemanngo "github.com/riemann/riemann-go-client" + "github.com/stretchr/testify/require" + "gotest.tools/assert" +) + +func TestSocketListener_tcp(t *testing.T) { + log.Println("Entering") + + sl := newRiemannSocketListener() + sl.Log = testutil.Logger{} + sl.ServiceAddress = "tcp://127.0.0.1:5555" + sl.ReadBufferSize = internal.Size{Size: 1024} + + acc := &testutil.Accumulator{} + err := sl.Start(acc) + require.NoError(t, err) + defer sl.Stop() + + testStats(t, sl) + testMissingService(t, sl) +} +func testStats(t *testing.T, sl *RiemannSocketListener) { + c := riemanngo.NewTCPClient("127.0.0.1:5555", 5*time.Second) + err := c.Connect() + if err != nil { + log.Println("Error") + panic(err) + } + defer c.Close() + result, err := riemanngo.SendEvent(c, &riemanngo.Event{ + Service: "hello", + }) + assert.Equal(t, result.GetOk(), true) + +} +func testMissingService(t *testing.T, sl *RiemannSocketListener) { + c := riemanngo.NewTCPClient("127.0.0.1:5555", 5*time.Second) + err := c.Connect() + if err != nil { + panic(err) + } + defer c.Close() + result, err := riemanngo.SendEvent(c, &riemanngo.Event{}) + assert.Equal(t, result.GetOk(), false) + +} From 11b83dbd54b8a612ac43c91d18bd2b200556e91e Mon Sep 17 00:00:00 2001 From: Karrick McDermott Date: Fri, 27 Nov 2020 11:56:17 -0500 Subject: [PATCH 084/761] update godirwalk to v1.16.1 (#7987) 1. Most importantly, this version runs on Dragonfly BSD, which might be an issue that potential users would care about. 2. Re-issues syscall request after EINTR. More resiliant on Go v1.14 and above, where syscall is more likely to return syscall.EINTR. Also, alongside the place where it would instead call golang.org/x/sys/unix, but it is commented out, I include the equivalent code for checking for unix.EINTR, to make it less likely that I forget to change the EINTR check if I ever convert it to use golang.org/x/sys/unix rather than syscall. 3. Performance improvements for unix and Windows. Co-authored-by: Karrick S. McDermott --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 84d4fd30ee9eb..0b2223ff4e491 100644 --- a/go.mod +++ b/go.mod @@ -79,7 +79,7 @@ require ( github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect github.com/jackc/pgx v3.6.0+incompatible github.com/kardianos/service v1.0.0 - github.com/karrick/godirwalk v1.12.0 + github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee github.com/kylelemons/godebug v1.1.0 // indirect diff --git a/go.sum b/go.sum index a5632a2e0a752..6d85076759f9e 100644 --- a/go.sum +++ b/go.sum @@ -382,8 +382,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= -github.com/karrick/godirwalk v1.12.0 h1:nkS4xxsjiZMvVlazd0mFyiwD4BR9f3m6LXGhM2TUx3Y= -github.com/karrick/godirwalk v1.12.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= +github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= From 42eacb3a42d180f024b82024b6c2b8add1af5777 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 27 Nov 2020 12:20:21 -0500 Subject: [PATCH 085/761] add log warning to starlark drop-fields example --- .../starlark/testdata/drop_fields_with_unexpected_type.star | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star b/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star index 601ce631f906b..2b122e19e258a 100644 --- a/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star +++ b/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star @@ -10,6 +10,9 @@ # measurement,host=hostname a=1i,b=4.2,c=42.0,d="v3.14",e=true,f=23.0 1597255410000000000 # measurement,host=hostname a=1i,c=42.0,d="v3.14",e=true,f=23.0 1597255410000000000 +load("logging.star", "log") +# loads log.debug(), log.info(), log.warn(), log.error() + expected_type = { "a": "int", "b": "float", @@ -17,10 +20,11 @@ expected_type = { "d": "string", "e": "bool" } - + def apply(metric): for k, v in metric.fields.items(): if type(v) != expected_type.get(k, type(v)): metric.fields.pop(k) + log.warn("Unexpected field type dropped: metric {} had field {} with type {}, but it is expected to be {}".format(metric.name, k, type(v), expected_type.get(k, type(v)))) return metric From ef91f96de9d3d8855cacb8014f32cc4db6182d9e Mon Sep 17 00:00:00 2001 From: Enzo Hamelin Date: Fri, 27 Nov 2020 19:58:32 +0100 Subject: [PATCH 086/761] Add dataset metrics to zfs input (#8383) --- plugins/inputs/zfs/README.md | 23 ++++++++++- plugins/inputs/zfs/zfs.go | 22 ++++++++--- plugins/inputs/zfs/zfs_freebsd.go | 55 +++++++++++++++++++++++++- plugins/inputs/zfs/zfs_freebsd_test.go | 54 +++++++++++++++++++++++++ 4 files changed, 144 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/zfs/README.md b/plugins/inputs/zfs/README.md index f0e71a47d714c..1f3f125d391ec 100644 --- a/plugins/inputs/zfs/README.md +++ b/plugins/inputs/zfs/README.md @@ -2,7 +2,7 @@ This ZFS plugin provides metrics from your ZFS filesystems. It supports ZFS on Linux and FreeBSD. It gets ZFS stat from `/proc/spl/kstat/zfs` on Linux and -from `sysctl` and `zpool` on FreeBSD. +from `sysctl`, 'zfs' and `zpool` on FreeBSD. ### Configuration: @@ -22,11 +22,14 @@ from `sysctl` and `zpool` on FreeBSD. ## By default, don't gather zpool stats # poolMetrics = false + + ## By default, don't gather dataset stats + # datasetMetrics = false ``` ### Measurements & Fields: -By default this plugin collects metrics about ZFS internals and pool. +By default this plugin collects metrics about ZFS internals pool and dataset. These metrics are either counters or measure sizes in bytes. These metrics will be in the `zfs` measurement with the field names listed bellow. @@ -34,6 +37,9 @@ names listed bellow. If `poolMetrics` is enabled then additional metrics will be gathered for each pool. +If `datasetMetrics` is enabled then additional metrics will be gathered for +each dataset. + - zfs With fields listed bellow. @@ -206,21 +212,34 @@ On FreeBSD: - size (integer, bytes) - fragmentation (integer, percent) +#### Dataset Metrics (optional, only on FreeBSD) + +- zfs_dataset + - avail (integer, bytes) + - used (integer, bytes) + - usedsnap (integer, bytes + - usedds (integer, bytes) + ### Tags: - ZFS stats (`zfs`) will have the following tag: - pools - A `::` concatenated list of all ZFS pools on the machine. + - datasets - A `::` concatenated list of all ZFS datasets on the machine. - Pool metrics (`zfs_pool`) will have the following tag: - pool - with the name of the pool which the metrics are for. - health - the health status of the pool. (FreeBSD only) +- Dataset metrics (`zfs_dataset`) will have the following tag: + - dataset - with the name of the dataset which the metrics are for. + ### Example Output: ``` $ ./telegraf --config telegraf.conf --input-filter zfs --test * Plugin: zfs, Collection 1 > zfs_pool,health=ONLINE,pool=zroot allocated=1578590208i,capacity=2i,dedupratio=1,fragmentation=1i,free=64456531968i,size=66035122176i 1464473103625653908 +> zfs_dataset,dataset=zata avail=10741741326336,used=8564135526400,usedsnap=0,usedds=90112 > zfs,pools=zroot arcstats_allocated=4167764i,arcstats_anon_evictable_data=0i,arcstats_anon_evictable_metadata=0i,arcstats_anon_size=16896i,arcstats_arc_meta_limit=10485760i,arcstats_arc_meta_max=115269568i,arcstats_arc_meta_min=8388608i,arcstats_arc_meta_used=51977456i,arcstats_c=16777216i,arcstats_c_max=41943040i,arcstats_c_min=16777216i,arcstats_data_size=0i,arcstats_deleted=1699340i,arcstats_demand_data_hits=14836131i,arcstats_demand_data_misses=2842945i,arcstats_demand_hit_predictive_prefetch=0i,arcstats_demand_metadata_hits=1655006i,arcstats_demand_metadata_misses=830074i,arcstats_duplicate_buffers=0i,arcstats_duplicate_buffers_size=0i,arcstats_duplicate_reads=123i,arcstats_evict_l2_cached=0i,arcstats_evict_l2_eligible=332172623872i,arcstats_evict_l2_ineligible=6168576i,arcstats_evict_l2_skip=0i,arcstats_evict_not_enough=12189444i,arcstats_evict_skip=195190764i,arcstats_hash_chain_max=2i,arcstats_hash_chains=10i,arcstats_hash_collisions=43134i,arcstats_hash_elements=2268i,arcstats_hash_elements_max=6136i,arcstats_hdr_size=565632i,arcstats_hits=16515778i,arcstats_l2_abort_lowmem=0i,arcstats_l2_asize=0i,arcstats_l2_cdata_free_on_write=0i,arcstats_l2_cksum_bad=0i,arcstats_l2_compress_failures=0i,arcstats_l2_compress_successes=0i,arcstats_l2_compress_zeros=0i,arcstats_l2_evict_l1cached=0i,arcstats_l2_evict_lock_retry=0i,arcstats_l2_evict_reading=0i,arcstats_l2_feeds=0i,arcstats_l2_free_on_write=0i,arcstats_l2_hdr_size=0i,arcstats_l2_hits=0i,arcstats_l2_io_error=0i,arcstats_l2_misses=0i,arcstats_l2_read_bytes=0i,arcstats_l2_rw_clash=0i,arcstats_l2_size=0i,arcstats_l2_write_buffer_bytes_scanned=0i,arcstats_l2_write_buffer_iter=0i,arcstats_l2_write_buffer_list_iter=0i,arcstats_l2_write_buffer_list_null_iter=0i,arcstats_l2_write_bytes=0i,arcstats_l2_write_full=0i,arcstats_l2_write_in_l2=0i,arcstats_l2_write_io_in_progress=0i,arcstats_l2_write_not_cacheable=380i,arcstats_l2_write_passed_headroom=0i,arcstats_l2_write_pios=0i,arcstats_l2_write_spa_mismatch=0i,arcstats_l2_write_trylock_fail=0i,arcstats_l2_writes_done=0i,arcstats_l2_writes_error=0i,arcstats_l2_writes_lock_retry=0i,arcstats_l2_writes_sent=0i,arcstats_memory_throttle_count=0i,arcstats_metadata_size=17014784i,arcstats_mfu_evictable_data=0i,arcstats_mfu_evictable_metadata=16384i,arcstats_mfu_ghost_evictable_data=5723648i,arcstats_mfu_ghost_evictable_metadata=10709504i,arcstats_mfu_ghost_hits=1315619i,arcstats_mfu_ghost_size=16433152i,arcstats_mfu_hits=7646611i,arcstats_mfu_size=305152i,arcstats_misses=3676993i,arcstats_mru_evictable_data=0i,arcstats_mru_evictable_metadata=0i,arcstats_mru_ghost_evictable_data=0i,arcstats_mru_ghost_evictable_metadata=80896i,arcstats_mru_ghost_hits=324250i,arcstats_mru_ghost_size=80896i,arcstats_mru_hits=8844526i,arcstats_mru_size=16693248i,arcstats_mutex_miss=354023i,arcstats_other_size=34397040i,arcstats_p=4172800i,arcstats_prefetch_data_hits=0i,arcstats_prefetch_data_misses=0i,arcstats_prefetch_metadata_hits=24641i,arcstats_prefetch_metadata_misses=3974i,arcstats_size=51977456i,arcstats_sync_wait_for_async=0i,vdev_cache_stats_delegations=779i,vdev_cache_stats_hits=323123i,vdev_cache_stats_misses=59929i,zfetchstats_hits=0i,zfetchstats_max_streams=0i,zfetchstats_misses=0i 1464473103634124908 ``` diff --git a/plugins/inputs/zfs/zfs.go b/plugins/inputs/zfs/zfs.go index 8e6bec4644932..297e3cc07ec42 100644 --- a/plugins/inputs/zfs/zfs.go +++ b/plugins/inputs/zfs/zfs.go @@ -1,14 +1,22 @@ package zfs +import ( + "github.com/influxdata/telegraf" +) + type Sysctl func(metric string) ([]string, error) type Zpool func() ([]string, error) +type Zdataset func(properties []string) ([]string, error) type Zfs struct { - KstatPath string - KstatMetrics []string - PoolMetrics bool - sysctl Sysctl - zpool Zpool + KstatPath string + KstatMetrics []string + PoolMetrics bool + DatasetMetrics bool + sysctl Sysctl + zpool Zpool + zdataset Zdataset + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -24,6 +32,8 @@ var sampleConfig = ` # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] ## By default, don't gather zpool stats # poolMetrics = false + ## By default, don't gather zdataset stats + # datasetMetrics = false ` func (z *Zfs) SampleConfig() string { @@ -31,5 +41,5 @@ func (z *Zfs) SampleConfig() string { } func (z *Zfs) Description() string { - return "Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools" + return "Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets" } diff --git a/plugins/inputs/zfs/zfs_freebsd.go b/plugins/inputs/zfs/zfs_freebsd.go index 51c20682e832b..491388147d93c 100644 --- a/plugins/inputs/zfs/zfs_freebsd.go +++ b/plugins/inputs/zfs/zfs_freebsd.go @@ -87,6 +87,47 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) { return strings.Join(pools, "::"), nil } +func (z *Zfs) gatherDatasetStats(acc telegraf.Accumulator) (string, error) { + properties := []string{"name", "avail", "used", "usedsnap", "usedds"} + + lines, err := z.zdataset(properties) + if err != nil { + return "", err + } + + datasets := []string{} + for _, line := range lines { + col := strings.Split(line, "\t") + + datasets = append(datasets, col[0]) + } + + if z.DatasetMetrics { + for _, line := range lines { + col := strings.Split(line, "\t") + if len(col) != len(properties) { + z.Log.Warnf("Invalid number of columns for line: %s", line) + continue + } + + tags := map[string]string{"dataset": col[0]} + fields := map[string]interface{}{} + + for i, key := range properties[1:] { + value, err := strconv.ParseInt(col[i+1], 10, 64) + if err != nil { + return "", fmt.Errorf("Error parsing %s %q: %s", key, col[i+1], err) + } + fields[key] = value + } + + acc.AddFields("zfs_dataset", fields, tags) + } + } + + return strings.Join(datasets, "::"), nil +} + func (z *Zfs) Gather(acc telegraf.Accumulator) error { kstatMetrics := z.KstatMetrics if len(kstatMetrics) == 0 { @@ -99,6 +140,11 @@ func (z *Zfs) Gather(acc telegraf.Accumulator) error { return err } tags["pools"] = poolNames + datasetNames, err := z.gatherDatasetStats(acc) + if err != nil { + return err + } + tags["datasets"] = datasetNames fields := make(map[string]interface{}) for _, metric := range kstatMetrics { @@ -137,6 +183,10 @@ func zpool() ([]string, error) { return run("zpool", []string{"list", "-Hp", "-o", "name,health,size,alloc,free,fragmentation,capacity,dedupratio"}...) } +func zdataset(properties []string) ([]string, error) { + return run("zfs", []string{"list", "-Hp", "-o", strings.Join(properties, ",")}...) +} + func sysctl(metric string) ([]string, error) { return run("sysctl", []string{"-q", fmt.Sprintf("kstat.zfs.misc.%s", metric)}...) } @@ -144,8 +194,9 @@ func sysctl(metric string) ([]string, error) { func init() { inputs.Add("zfs", func() telegraf.Input { return &Zfs{ - sysctl: sysctl, - zpool: zpool, + sysctl: sysctl, + zpool: zpool, + zdataset: zdataset, } }) } diff --git a/plugins/inputs/zfs/zfs_freebsd_test.go b/plugins/inputs/zfs/zfs_freebsd_test.go index 87f21f67245f4..4d1fea0ae483a 100644 --- a/plugins/inputs/zfs/zfs_freebsd_test.go +++ b/plugins/inputs/zfs/zfs_freebsd_test.go @@ -31,6 +31,18 @@ func mock_zpool_unavail() ([]string, error) { return zpool_output_unavail, nil } +// $ zfs list -Hp -o name,avail,used,usedsnap,usedds +var zdataset_output = []string{ + "zata 10741741326336 8564135526400 0 90112", + "zata/home 10741741326336 2498560 212992 2285568", + "zata/import 10741741326336 196608 81920 114688", + "zata/storage 10741741326336 8556084379648 3601138999296 4954945380352", +} + +func mock_zdataset() ([]string, error) { + return zdataset_output, nil +} + // sysctl -q kstat.zfs.misc.arcstats // sysctl -q kstat.zfs.misc.vdev_cache_stats @@ -126,6 +138,39 @@ func TestZfsPoolMetrics_unavail(t *testing.T) { acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags) } +func TestZfsDatasetMetrics(t *testing.T) { + var acc testutil.Accumulator + + z := &Zfs{ + KstatMetrics: []string{"vdev_cache_stats"}, + sysctl: mock_sysctl, + zdataset: mock_zdataset, + } + err := z.Gather(&acc) + require.NoError(t, err) + + require.False(t, acc.HasMeasurement("zfs_dataset")) + acc.Metrics = nil + + z = &Zfs{ + KstatMetrics: []string{"vdev_cache_stats"}, + DatasetMetrics: true, + sysctl: mock_sysctl, + zdataset: mock_zdataset, + } + err = z.Gather(&acc) + require.NoError(t, err) + + //one pool, all metrics + tags := map[string]string{ + "dataset": "zata", + } + + datasetMetrics := getZataDatasetMetrics() + + acc.AssertContainsTaggedFields(t, "zfs_dataset", datasetMetrics, tags) +} + func TestZfsGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator @@ -178,6 +223,15 @@ func getTemp2PoolMetrics() map[string]interface{} { } } +func getZataDatasetMetrics() map[string]interface{} { + return map[string]interface{}{ + "avail": int64(10741741326336), + "used": int64(8564135526400), + "usedsnap": int64(0), + "usedds": int64(90112), + } +} + func getKstatMetricsVdevOnly() map[string]interface{} { return map[string]interface{}{ "vdev_cache_stats_misses": int64(87789), From 143cabc4b7b04c49092075e5afe9e1dc2b364adf Mon Sep 17 00:00:00 2001 From: Ben Carlton Date: Fri, 27 Nov 2020 16:15:11 -0500 Subject: [PATCH 087/761] Removed outdated information about Windows support (#7971) Newer versions of Telegraf for Windows do not appear to support using WMI query patterns as values for the "pattern" option. Regular expressions appear to be the only patterns now supported. --- plugins/inputs/procstat/README.md | 9 --------- 1 file changed, 9 deletions(-) diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 3803215697ec7..73c40ef79213e 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -64,15 +64,6 @@ Processes can be selected for monitoring using one of several methods: Preliminary support for Windows has been added, however you may prefer using the `win_perf_counters` input plugin as a more mature alternative. -When using the `pid_finder = "native"` in Windows, the pattern lookup method is -implemented as a WMI query. The pattern allows fuzzy matching using only -[WMI query patterns](https://msdn.microsoft.com/en-us/library/aa392263(v=vs.85).aspx): -```toml -[[inputs.procstat]] - pattern = "%influx%" - pid_finder = "native" -``` - ### Metrics: - procstat From 6be3bd8c9cd812b9cc5135eaf0cc496de157db23 Mon Sep 17 00:00:00 2001 From: Mike Dalrymple Date: Mon, 30 Nov 2020 09:12:10 -0800 Subject: [PATCH 088/761] Log SubscribeResponse_Error message and code. #8482 (#8483) --- plugins/inputs/gnmi/gnmi.go | 13 ++++++++----- plugins/inputs/gnmi/gnmi_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index 09332cc29f532..694ca7851f2be 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -240,14 +240,17 @@ func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Co return nil } -// HandleSubscribeResponse message from gNMI and parse contained telemetry data func (c *GNMI) handleSubscribeResponse(address string, reply *gnmi.SubscribeResponse) { - // Check if response is a gNMI Update and if we have a prefix to derive the measurement name - response, ok := reply.Response.(*gnmi.SubscribeResponse_Update) - if !ok { - return + switch response := reply.Response.(type) { + case *gnmi.SubscribeResponse_Update: + c.handleSubscribeResponseUpdate(address, response) + case *gnmi.SubscribeResponse_Error: + c.Log.Errorf("Subscribe error (%d), %q", response.Error.Code, response.Error.Message) } +} +// Handle SubscribeResponse_Update message from gNMI and parse contained telemetry data +func (c *GNMI) handleSubscribeResponseUpdate(address string, response *gnmi.SubscribeResponse_Update) { var prefix, prefixAliasPath string grouper := metric.NewSeriesGrouper() timestamp := time.Unix(0, response.Update.Timestamp) diff --git a/plugins/inputs/gnmi/gnmi_test.go b/plugins/inputs/gnmi/gnmi_test.go index c74fbcd4a5164..1846fd67a9951 100644 --- a/plugins/inputs/gnmi/gnmi_test.go +++ b/plugins/inputs/gnmi/gnmi_test.go @@ -403,6 +403,30 @@ func TestNotification(t *testing.T) { } } +type MockLogger struct { + telegraf.Logger + lastFormat string + lastArgs []interface{} +} + +func (l *MockLogger) Errorf(format string, args ...interface{}) { + l.lastFormat = format + l.lastArgs = args +} + +func TestSubscribeResponseError(t *testing.T) { + me := "mock error message" + var mc uint32 = 7 + ml := &MockLogger{} + plugin := &GNMI{Log: ml} + errorResponse := &gnmi.SubscribeResponse_Error{ + Error: &gnmi.Error{Message: me, Code: mc}} + plugin.handleSubscribeResponse( + "127.0.0.1:0", &gnmi.SubscribeResponse{Response: errorResponse}) + require.NotEmpty(t, ml.lastFormat) + require.Equal(t, ml.lastArgs, []interface{}{mc, me}) +} + func TestRedial(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) From 01fc69da4736aba0fa5e58cd96b3c32268002195 Mon Sep 17 00:00:00 2001 From: Nicolas Filotto Date: Mon, 30 Nov 2020 21:44:21 +0100 Subject: [PATCH 089/761] Add the shared state to the global scope to get previous data (#8447) --- plugins/processors/starlark/README.md | 1 + plugins/processors/starlark/starlark.go | 3 +++ .../starlark/testdata/compare_metrics.star | 25 +++++++++++++++++++ 3 files changed, 29 insertions(+) create mode 100644 plugins/processors/starlark/testdata/compare_metrics.star diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index c660342f456d4..e66cb4a23bd49 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -192,6 +192,7 @@ def failing(metric): - [multiple metrics](/plugins/processors/starlark/testdata/multiple_metrics.star) - Return multiple metrics by using [a list](https://docs.bazel.build/versions/master/skylark/lib/list.html) of metrics. - [multiple metrics from json array](/plugins/processors/starlark/testdata/multiple_metrics_with_json.star) - Builds a new metric from each element of a json array then returns all the created metrics. - [custom error](/plugins/processors/starlark/testdata/fail.star) - Return a custom error with [fail](https://docs.bazel.build/versions/master/skylark/lib/globals.html#fail). +- [compare with previous metric](/plugins/processors/starlark/testdata/compare_metrics.star) - Compare the current metric with the previous one using the shared state. [All examples](/plugins/processors/starlark/testdata) are in the testdata folder. diff --git a/plugins/processors/starlark/starlark.go b/plugins/processors/starlark/starlark.go index 4835f06dee5a4..9a055ce56db6f 100644 --- a/plugins/processors/starlark/starlark.go +++ b/plugins/processors/starlark/starlark.go @@ -73,6 +73,9 @@ func (s *Starlark) Init() error { return err } + // Make available a shared state to the apply function + globals["state"] = starlark.NewDict(0) + // Freeze the global state. This prevents modifications to the processor // state and prevents scripts from containing errors storing tracking // metrics. Tasks that require global state will not be possible due to diff --git a/plugins/processors/starlark/testdata/compare_metrics.star b/plugins/processors/starlark/testdata/compare_metrics.star new file mode 100644 index 0000000000000..79555729d1814 --- /dev/null +++ b/plugins/processors/starlark/testdata/compare_metrics.star @@ -0,0 +1,25 @@ +# Example showing how to keep the last metric in order to compare it with the new one. +# +# Example Input: +# cpu value=10i 1465839830100400201 +# cpu value=8i 1465839830100400301 +# +# Example Output: +# cpu_diff value=2i 1465839830100400301 + +state = { + "last": None +} + +def apply(metric): + # Load from the shared state the metric assigned to the key "last" + last = state["last"] + # Store the deepcopy of the new metric into the shared state and assign it to the key "last" + # NB: To store a metric into the shared state you have to deep copy it + state["last"] = deepcopy(metric) + if last != None: + # Create a new metric named "cpu_diff" + result = Metric("cpu_diff") + # Set the field "value" to the difference between the value of the last metric and the current one + result.fields["value"] = last.fields["value"] - metric.fields["value"] + return result From 05378980a3e3d8755ba4bafe27bbe21d9b4292a5 Mon Sep 17 00:00:00 2001 From: SoerMan Date: Mon, 30 Nov 2020 22:22:57 +0100 Subject: [PATCH 090/761] Add FLOAT64-IEEE support to inputs.modbus (#8361) (by @Nemecsek) (#8474) --- plugins/inputs/modbus/README.md | 8 ++-- plugins/inputs/modbus/modbus.go | 13 +++++- plugins/inputs/modbus/modbus_test.go | 60 ++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md index 3c568b5e6e5e7..7fe8f8fda4205 100644 --- a/plugins/inputs/modbus/README.md +++ b/plugins/inputs/modbus/README.md @@ -67,7 +67,7 @@ Registers via Modbus TCP or Modbus RTU/ASCII. ## |---BA, DCBA - Little Endian ## |---BADC - Mid-Big Endian ## |---CDAB - Mid-Little Endian - ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation) + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) ## FLOAT32 (deprecated), FIXED, UFIXED (fixed-point representation on input) ## scale - the final numeric variable representation ## address - variable address @@ -105,10 +105,10 @@ and cannot be configured. These types are used for integer input values. Select the one that matches your modbus data source. -#### Floating Point: `FLOAT32-IEEE` +#### Floating Point: `FLOAT32-IEEE`, `FLOAT64-IEEE` -Use this type if your modbus registers contain a value that is encoded in this format. This type -always includes the sign and therefore there exists no variant. +Use these types if your modbus registers contain a value that is encoded in this format. These types +always include the sign and therefore there exists no variant. #### Fixed Point: `FIXED`, `UFIXED` (`FLOAT32`) diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index ec68890c5eb91..21bd8a977da7b 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -132,7 +132,8 @@ const sampleConfig = ` ## |---BA, DCBA - Little Endian ## |---BADC - Mid-Big Endian ## |---CDAB - Mid-Little Endian - ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation) + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, + ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) ## scale - the final numeric variable representation ## address - variable address @@ -355,7 +356,7 @@ func validateFieldContainers(t []fieldContainer, n string) error { // search data type switch item.DataType { - case "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "FLOAT32-IEEE", "FLOAT32", "FIXED", "UFIXED": + case "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "FLOAT32-IEEE", "FLOAT64-IEEE", "FLOAT32", "FIXED", "UFIXED": break default: return fmt.Errorf("invalid data type '%s' in '%s' - '%s'", item.DataType, n, item.Name) @@ -512,6 +513,10 @@ func convertDataType(t fieldContainer, bytes []byte) interface{} { e32 := convertEndianness32(t.ByteOrder, bytes) f32 := math.Float32frombits(e32) return scaleFloat32(t.Scale, f32) + case "FLOAT64-IEEE": + e64 := convertEndianness64(t.ByteOrder, bytes) + f64 := math.Float64frombits(e64) + return scaleFloat64(t.Scale, f64) case "FIXED": if len(bytes) == 2 { e16 := convertEndianness16(t.ByteOrder, bytes) @@ -662,6 +667,10 @@ func scaleFloat32(s float64, v float32) float32 { return float32(float64(v) * s) } +func scaleFloat64(s float64, v float64) float64 { + return v * s +} + func scaleUint64(s float64, v uint64) uint64 { return uint64(float64(v) * float64(s)) } diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index 8c5241dc2aaee..07af3369a66ec 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -549,6 +549,66 @@ func TestHoldingRegisters(t *testing.T) { write: []byte{0xF6, 0x84, 0xF9, 0x45, 0xFE, 0xBC, 0xFF, 0xFF}, read: uint64(18446742686322259968), }, + { + name: "register214_to_register217_abcdefgh_float64_ieee", + address: []uint16{214, 215, 216, 217}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0xBF, 0x9C, 0x6A, 0x40, 0xC3, 0x47, 0x8F, 0x55}, + read: float64(-0.02774907295123737), + }, + { + name: "register214_to_register217_abcdefgh_float64_ieee_scaled", + address: []uint16{214, 215, 216, 217}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "FLOAT64-IEEE", + scale: 0.1, + write: []byte{0xBF, 0x9C, 0x6A, 0x40, 0xC3, 0x47, 0x8F, 0x55}, + read: float64(-0.002774907295123737), + }, + { + name: "register218_to_register221_abcdefgh_float64_ieee_pos", + address: []uint16{218, 219, 220, 221}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0x3F, 0x9C, 0x6A, 0x40, 0xC3, 0x47, 0x8F, 0x55}, + read: float64(0.02774907295123737), + }, + { + name: "register222_to_register225_hgfecdba_float64_ieee", + address: []uint16{222, 223, 224, 225}, + quantity: 4, + byteOrder: "HGFEDCBA", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0x55, 0x8F, 0x47, 0xC3, 0x40, 0x6A, 0x9C, 0xBF}, + read: float64(-0.02774907295123737), + }, + { + name: "register226_to_register229_badcfehg_float64_ieee", + address: []uint16{226, 227, 228, 229}, + quantity: 4, + byteOrder: "BADCFEHG", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0x9C, 0xBF, 0x40, 0x6A, 0x47, 0xC3, 0x55, 0x8F}, + read: float64(-0.02774907295123737), + }, + { + name: "register230_to_register233_ghefcdab_float64_ieee", + address: []uint16{230, 231, 232, 233}, + quantity: 4, + byteOrder: "GHEFCDAB", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0x8F, 0x55, 0xC3, 0x47, 0x6A, 0x40, 0xBF, 0x9C}, + read: float64(-0.02774907295123737), + }, } serv := mbserver.NewServer() From a7484211235c5d1750c6ea717bec85020b6dbced Mon Sep 17 00:00:00 2001 From: Andrey Klyachkin Date: Mon, 30 Nov 2020 22:58:03 +0100 Subject: [PATCH 091/761] add support for linux/ppc64le (#8432) --- Makefile | 8 ++++++++ README.md | 2 ++ scripts/release.sh | 2 ++ 3 files changed, 12 insertions(+) diff --git a/Makefile b/Makefile index 92e94772576b9..284cbaf86d938 100644 --- a/Makefile +++ b/Makefile @@ -207,12 +207,14 @@ debs += telegraf_$(deb_version)_i386.deb debs += telegraf_$(deb_version)_mips.deb debs += telegraf_$(deb_version)_mipsel.deb debs += telegraf_$(deb_version)_s390x.deb +debs += telegraf_$(deb_version)_ppc64el.deb rpms += telegraf-$(rpm_version).aarch64.rpm rpms += telegraf-$(rpm_version).armel.rpm rpms += telegraf-$(rpm_version).armv6hl.rpm rpms += telegraf-$(rpm_version).i386.rpm rpms += telegraf-$(rpm_version).s390x.rpm +rpms += telegraf-$(rpm_version).ppc64le.rpm rpms += telegraf-$(rpm_version).x86_64.rpm tars += telegraf-$(tar_version)_darwin_amd64.tar.gz @@ -226,6 +228,7 @@ tars += telegraf-$(tar_version)_linux_i386.tar.gz tars += telegraf-$(tar_version)_linux_mips.tar.gz tars += telegraf-$(tar_version)_linux_mipsel.tar.gz tars += telegraf-$(tar_version)_linux_s390x.tar.gz +tars += telegraf-$(tar_version)_linux_ppc64le.tar.gz tars += telegraf-$(tar_version)_static_linux_amd64.tar.gz zips += telegraf-$(tar_version)_windows_amd64.zip @@ -239,6 +242,7 @@ package: $(dists) rpm_amd64 := amd64 rpm_386 := i386 rpm_s390x := s390x +rpm_ppc64le := ppc64le rpm_arm5 := armel rpm_arm6 := armv6hl rpm_arm647 := aarch64 @@ -275,6 +279,7 @@ $(rpms): deb_amd64 := amd64 deb_386 := i386 deb_s390x := s390x +deb_ppc64le := ppc64el deb_arm5 := armel deb_arm6 := armhf deb_arm647 := arm64 @@ -360,6 +365,9 @@ upload-nightly: %s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOOS := linux %s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOARCH := s390x +%ppc64el.deb %ppc64le.rpm %linux_ppc64le.tar.gz: export GOOS := linux +%ppc64el.deb %ppc64le.rpm %linux_ppc64le.tar.gz: export GOARCH := ppc64le + %freebsd_amd64.tar.gz: export GOOS := freebsd %freebsd_amd64.tar.gz: export GOARCH := amd64 diff --git a/README.md b/README.md index ca969132d5eb2..6093a253f479f 100644 --- a/README.md +++ b/README.md @@ -94,6 +94,8 @@ These builds are generated from the master branch: - [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) - [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) - [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) +- [telegraf_nightly_ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) +- [telegraf-nightly.ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) - [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) - [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) - [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) diff --git a/scripts/release.sh b/scripts/release.sh index cf29b5c23a0e8..41cb0cd7fddac 100644 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -65,6 +65,8 @@ arch() { echo amd64;; *s390x.*) echo s390x;; + *ppc64le.*) + echo ppc64le;; *mipsel.*) echo mipsel;; *mips.*) From 0eeab49efdcdc3e2f2628a439d9c879907728185 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 1 Dec 2020 14:47:50 -0500 Subject: [PATCH 092/761] add to starlark readme --- plugins/processors/starlark/README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index e66cb4a23bd49..a22296f48f3da 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -102,6 +102,12 @@ If you would like to see support for something else here, please open an issue. ### Common Questions +**What's the performance cost to using Starlark?** + +In local tests, it takes about 1µs (1 microsecond) to run a modest script to process one +metric. This is going to vary with the size of your script, but the total impact is minimal. +At this pace, it's likely not going to be the bottleneck in your Telegraf setup. + **How can I drop/delete a metric?** If you don't return the metric it will be deleted. Usually this means the From 6fcc12a9b60ce1e2ad6a0ef9f69a42023f881baf Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Tue, 1 Dec 2020 14:16:31 -0700 Subject: [PATCH 093/761] Update changelog (cherry picked from commit acdc002bb1ab13a96ec5428ca76f11debe456623) --- CHANGELOG.md | 20 ++++++++++++++++++++ build_version.txt | 2 +- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 805fd53fa88a8..f7552b5aef103 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,23 @@ +## v1.16.3 [2020-12-01] + +#### Bugfixes + + - [#8483](https://github.com/influxdata/telegraf/pull/8483) `inputs.gnmi` Log SubscribeResponse_Error message and code. #8482 + - [#7987](https://github.com/influxdata/telegraf/pull/7987) update godirwalk to v1.16.1 + - [#8438](https://github.com/influxdata/telegraf/pull/8438) `processors.starlark` Starlark example dropbytype + - [#8468](https://github.com/influxdata/telegraf/pull/8468) `inputs.sqlserver` Fix typo in column name + - [#8461](https://github.com/influxdata/telegraf/pull/8461) `inputs.phpfpm` [php-fpm] Fix possible "index out of range" + - [#8444](https://github.com/influxdata/telegraf/pull/8444) `inputs.apcupsd` Update mdlayher/apcupsd dependency + - [#8439](https://github.com/influxdata/telegraf/pull/8439) `processors.starlark` Show how to return a custom error with the Starlark processor + - [#8440](https://github.com/influxdata/telegraf/pull/8440) `parsers.csv` keep field name as is for csv timestamp column + - [#8436](https://github.com/influxdata/telegraf/pull/8436) `inputs.nvidia_smi` Add DriverVersion and CUDA Version to output + - [#8423](https://github.com/influxdata/telegraf/pull/8423) `processors.starlark` Show how to return several metrics with the Starlark processor + - [#8408](https://github.com/influxdata/telegraf/pull/8408) `processors.starlark` Support logging in starlark + - [#8315](https://github.com/influxdata/telegraf/pull/8315) add kinesis output to external plugins list + - [#8406](https://github.com/influxdata/telegraf/pull/8406) `outputs.wavefront` #8405 add non-retryable debug logging + - [#8404](https://github.com/influxdata/telegraf/pull/8404) `outputs.wavefront` Wavefront output should distinguish between retryable and non-retryable errors + - [#8401](https://github.com/influxdata/telegraf/pull/8401) `processors.starlark` Allow to catch errors that occur in the apply function + ## v1.16.2 [2020-11-13] #### Bugfixes diff --git a/build_version.txt b/build_version.txt index 4a02d2c3170bd..c807441cfed77 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.16.2 +1.16.3 From a5f3121f6d07977d0ea47cae360c0a989c9142ab Mon Sep 17 00:00:00 2001 From: Hong Date: Thu, 3 Dec 2020 02:11:35 +0800 Subject: [PATCH 094/761] Update grok package to support for field names containing '-' and '.' (#8276) --- go.mod | 2 +- go.sum | 4 ++-- plugins/parsers/grok/parser_test.go | 19 +++++++++++++++++++ 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0b2223ff4e491..f2d7a3ec6a95e 100644 --- a/go.mod +++ b/go.mod @@ -123,7 +123,7 @@ require ( github.com/tidwall/gjson v1.6.0 github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect - github.com/vjeantet/grok v1.0.0 + github.com/vjeantet/grok v1.0.1-0.20180213041522-5a86c829f3c3 github.com/vmware/govmomi v0.19.0 github.com/wavefronthq/wavefront-sdk-go v0.9.2 github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf diff --git a/go.sum b/go.sum index 6d85076759f9e..ce815505833e4 100644 --- a/go.sum +++ b/go.sum @@ -588,8 +588,8 @@ github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Su github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vjeantet/grok v1.0.0 h1:uxMqatJP6MOFXsj6C1tZBnqqAThQEeqnizUZ48gSJQQ= -github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= +github.com/vjeantet/grok v1.0.1-0.20180213041522-5a86c829f3c3 h1:T3ATR84Xk4b9g0QbGgLJVpRYWm/jvixqLTWRsR108sI= +github.com/vjeantet/grok v1.0.1-0.20180213041522-5a86c829f3c3/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY= github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/wavefronthq/wavefront-sdk-go v0.9.2 h1:/LvWgZYNjHFUg+ZUX+qv+7e+M8sEMi0lM15zPp681Gk= diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 1c409e8a542b6..5aaa0c967c1ce 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -1115,3 +1115,22 @@ func TestTrimRegression(t *testing.T) { ) require.Equal(t, expected, actual) } + +func TestAdvanceFieldName(t *testing.T) { + p := &Parser{ + Patterns: []string{`rts=%{NUMBER:response-time.s} local=%{IP:local-ip} remote=%{IP:remote.ip}`}, + } + assert.NoError(t, p.Compile()) + + metricA, err := p.ParseLine(`rts=1.283 local=127.0.0.1 remote=10.0.0.1`) + require.NotNil(t, metricA) + assert.NoError(t, err) + assert.Equal(t, + map[string]interface{}{ + "response-time.s": "1.283", + "local-ip": "127.0.0.1", + "remote.ip": "10.0.0.1", + }, + metricA.Fields()) + assert.Equal(t, map[string]string{}, metricA.Tags()) +} From 0ccb134ae427f13a4db1faab7021d14742b7779f Mon Sep 17 00:00:00 2001 From: Dmitry Senin Date: Wed, 2 Dec 2020 22:48:44 +0300 Subject: [PATCH 095/761] Add a parser plugin for prometheus (#7778) --- docs/DATA_FORMATS_INPUT.md | 3 +- plugins/inputs/prometheus/parser.go | 209 +---------- plugins/inputs/prometheus/prometheus.go | 4 +- plugins/parsers/prometheus/README.md | 17 + plugins/parsers/prometheus/common/helpers.go | 36 ++ plugins/parsers/prometheus/parser.go | 179 ++++++++++ plugins/parsers/prometheus/parser_test.go | 346 +++++++++++++++++++ plugins/parsers/registry.go | 9 + 8 files changed, 609 insertions(+), 194 deletions(-) create mode 100644 plugins/parsers/prometheus/README.md create mode 100644 plugins/parsers/prometheus/common/helpers.go create mode 100644 plugins/parsers/prometheus/parser.go create mode 100644 plugins/parsers/prometheus/parser_test.go diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index b716501683bf8..ff660ab204f8f 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -5,15 +5,16 @@ using a configurable parser into [metrics][]. This allows, for example, the `kafka_consumer` input plugin to process messages in either InfluxDB Line Protocol or in JSON format. -- [InfluxDB Line Protocol](/plugins/parsers/influx) - [Collectd](/plugins/parsers/collectd) - [CSV](/plugins/parsers/csv) - [Dropwizard](/plugins/parsers/dropwizard) - [Graphite](/plugins/parsers/graphite) - [Grok](/plugins/parsers/grok) +- [InfluxDB Line Protocol](/plugins/parsers/influx) - [JSON](/plugins/parsers/json) - [Logfmt](/plugins/parsers/logfmt) - [Nagios](/plugins/parsers/nagios) +- [Prometheus](/plugins/parsers/prometheus) - [Value](/plugins/parsers/value), ie: 45 or "booyah" - [Wavefront](/plugins/parsers/wavefront) diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index 0726c87713b0a..c2235c6929d3d 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -1,8 +1,5 @@ package prometheus -// Parser inspired from -// https://github.com/prometheus/prom2json/blob/master/main.go - import ( "bufio" "bytes" @@ -15,168 +12,27 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + . "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" + "github.com/matttproud/golang_protobuf_extensions/pbutil" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" ) -// Parse returns a slice of Metrics from a text representation of a -// metrics -func ParseV2(buf []byte, header http.Header) ([]telegraf.Metric, error) { - var metrics []telegraf.Metric - var parser expfmt.TextParser - // parse even if the buffer begins with a newline - buf = bytes.TrimPrefix(buf, []byte("\n")) - // Read raw data - buffer := bytes.NewBuffer(buf) - reader := bufio.NewReader(buffer) - - mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) - // Prepare output - metricFamilies := make(map[string]*dto.MetricFamily) - - if err == nil && mediatype == "application/vnd.google.protobuf" && - params["encoding"] == "delimited" && - params["proto"] == "io.prometheus.client.MetricFamily" { - for { - mf := &dto.MetricFamily{} - if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil { - if ierr == io.EOF { - break - } - return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr) - } - metricFamilies[mf.GetName()] = mf - } - } else { - metricFamilies, err = parser.TextToMetricFamilies(reader) - if err != nil { - return nil, fmt.Errorf("reading text format failed: %s", err) - } - } - - // make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds - now := time.Now() - // read metrics - for metricName, mf := range metricFamilies { - for _, m := range mf.Metric { - // reading tags - tags := makeLabels(m) - - if mf.GetType() == dto.MetricType_SUMMARY { - // summary metric - telegrafMetrics := makeQuantilesV2(m, tags, metricName, mf.GetType(), now) - metrics = append(metrics, telegrafMetrics...) - } else if mf.GetType() == dto.MetricType_HISTOGRAM { - // histogram metric - telegrafMetrics := makeBucketsV2(m, tags, metricName, mf.GetType(), now) - metrics = append(metrics, telegrafMetrics...) - } else { - // standard metric - // reading fields - fields := getNameAndValueV2(m, metricName) - // converting to telegraf metric - if len(fields) > 0 { - var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { - t = time.Unix(0, *m.TimestampMs*1000000) - } else { - t = now - } - metric, err := metric.New("prometheus", tags, fields, t, valueType(mf.GetType())) - if err == nil { - metrics = append(metrics, metric) - } - } - } - } - } - - return metrics, err -} - -// Get Quantiles for summary metric & Buckets for histogram -func makeQuantilesV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { - var metrics []telegraf.Metric - fields := make(map[string]interface{}) - var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { - t = time.Unix(0, *m.TimestampMs*1000000) - } else { - t = now - } - fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) - fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) - met, err := metric.New("prometheus", tags, fields, t, valueType(metricType)) - if err == nil { - metrics = append(metrics, met) - } - - for _, q := range m.GetSummary().Quantile { - newTags := tags - fields = make(map[string]interface{}) - - newTags["quantile"] = fmt.Sprint(q.GetQuantile()) - fields[metricName] = float64(q.GetValue()) - - quantileMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType)) - if err == nil { - metrics = append(metrics, quantileMetric) - } - } - return metrics -} - -// Get Buckets from histogram metric -func makeBucketsV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { - var metrics []telegraf.Metric - fields := make(map[string]interface{}) - var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { - t = time.Unix(0, *m.TimestampMs*1000000) - } else { - t = now - } - fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) - fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) - - met, err := metric.New("prometheus", tags, fields, t, valueType(metricType)) - if err == nil { - metrics = append(metrics, met) - } - - for _, b := range m.GetHistogram().Bucket { - newTags := tags - fields = make(map[string]interface{}) - newTags["le"] = fmt.Sprint(b.GetUpperBound()) - fields[metricName+"_bucket"] = float64(b.GetCumulativeCount()) - - histogramMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType)) - if err == nil { - metrics = append(metrics, histogramMetric) - } - } - return metrics -} - -// Parse returns a slice of Metrics from a text representation of a -// metrics func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { - var metrics []telegraf.Metric var parser expfmt.TextParser + var metrics []telegraf.Metric + var err error // parse even if the buffer begins with a newline buf = bytes.TrimPrefix(buf, []byte("\n")) // Read raw data buffer := bytes.NewBuffer(buf) reader := bufio.NewReader(buffer) - mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) // Prepare output metricFamilies := make(map[string]*dto.MetricFamily) - if err == nil && mediatype == "application/vnd.google.protobuf" && - params["encoding"] == "delimited" && - params["proto"] == "io.prometheus.client.MetricFamily" { + if isProtobuf(header) { for { mf := &dto.MetricFamily{} if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil { @@ -194,13 +50,13 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { } } - // make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds now := time.Now() // read metrics for metricName, mf := range metricFamilies { for _, m := range mf.Metric { // reading tags - tags := makeLabels(m) + tags := MakeLabels(m, nil) + // reading fields var fields map[string]interface{} if mf.GetType() == dto.MetricType_SUMMARY { @@ -226,7 +82,7 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { } else { t = now } - metric, err := metric.New(metricName, tags, fields, t, valueType(mf.GetType())) + metric, err := metric.New(metricName, tags, fields, t, ValueType(mf.GetType())) if err == nil { metrics = append(metrics, metric) } @@ -237,19 +93,16 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { return metrics, err } -func valueType(mt dto.MetricType) telegraf.ValueType { - switch mt { - case dto.MetricType_COUNTER: - return telegraf.Counter - case dto.MetricType_GAUGE: - return telegraf.Gauge - case dto.MetricType_SUMMARY: - return telegraf.Summary - case dto.MetricType_HISTOGRAM: - return telegraf.Histogram - default: - return telegraf.Untyped +func isProtobuf(header http.Header) bool { + mediatype, params, error := mime.ParseMediaType(header.Get("Content-Type")) + + if error != nil { + return false } + + return mediatype == "application/vnd.google.protobuf" && + params["encoding"] == "delimited" && + params["proto"] == "io.prometheus.client.MetricFamily" } // Get Quantiles from summary metric @@ -272,15 +125,6 @@ func makeBuckets(m *dto.Metric) map[string]interface{} { return fields } -// Get labels from metric -func makeLabels(m *dto.Metric) map[string]string { - result := map[string]string{} - for _, lp := range m.Label { - result[lp.GetName()] = lp.GetValue() - } - return result -} - // Get name and value from metric func getNameAndValue(m *dto.Metric) map[string]interface{} { fields := make(map[string]interface{}) @@ -299,22 +143,3 @@ func getNameAndValue(m *dto.Metric) map[string]interface{} { } return fields } - -// Get name and value from metric -func getNameAndValueV2(m *dto.Metric, metricName string) map[string]interface{} { - fields := make(map[string]interface{}) - if m.Gauge != nil { - if !math.IsNaN(m.GetGauge().GetValue()) { - fields[metricName] = float64(m.GetGauge().GetValue()) - } - } else if m.Counter != nil { - if !math.IsNaN(m.GetCounter().GetValue()) { - fields[metricName] = float64(m.GetCounter().GetValue()) - } - } else if m.Untyped != nil { - if !math.IsNaN(m.GetUntyped().GetValue()) { - fields[metricName] = float64(m.GetUntyped().GetValue()) - } - } - return fields -} diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 70d72e0b0a379..5a7891ceb60ef 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -15,6 +15,7 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" + parser_v2 "github.com/influxdata/telegraf/plugins/parsers/prometheus" ) const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1` @@ -329,7 +330,8 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error } if p.MetricVersion == 2 { - metrics, err = ParseV2(body, resp.Header) + parser := parser_v2.Parser{} + metrics, err = parser.Parse(body) } else { metrics, err = Parse(body, resp.Header) } diff --git a/plugins/parsers/prometheus/README.md b/plugins/parsers/prometheus/README.md new file mode 100644 index 0000000000000..931008e88696d --- /dev/null +++ b/plugins/parsers/prometheus/README.md @@ -0,0 +1,17 @@ +# Prometheus Text-Based Format + +There are no additional configuration options for [Prometheus Text-Based Format][]. The metrics are parsed directly into Telegraf metrics. It is used internally in [prometheus input](/plugins/inputs/prometheus) or can be used in [http_listener_v2](/plugins/inputs/http_listener_v2) to simulate Pushgateway. + +[Prometheus Text-Based Format]: https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "prometheus" + +``` diff --git a/plugins/parsers/prometheus/common/helpers.go b/plugins/parsers/prometheus/common/helpers.go new file mode 100644 index 0000000000000..bc1be0339dfee --- /dev/null +++ b/plugins/parsers/prometheus/common/helpers.go @@ -0,0 +1,36 @@ +package common + +import ( + "github.com/influxdata/telegraf" + dto "github.com/prometheus/client_model/go" +) + +func ValueType(mt dto.MetricType) telegraf.ValueType { + switch mt { + case dto.MetricType_COUNTER: + return telegraf.Counter + case dto.MetricType_GAUGE: + return telegraf.Gauge + case dto.MetricType_SUMMARY: + return telegraf.Summary + case dto.MetricType_HISTOGRAM: + return telegraf.Histogram + default: + return telegraf.Untyped + } +} + +// Get labels from metric +func MakeLabels(m *dto.Metric, defaultTags map[string]string) map[string]string { + result := map[string]string{} + + for key, value := range defaultTags { + result[key] = value + } + + for _, lp := range m.Label { + result[lp.GetName()] = lp.GetValue() + } + + return result +} diff --git a/plugins/parsers/prometheus/parser.go b/plugins/parsers/prometheus/parser.go new file mode 100644 index 0000000000000..c5355ffe07a8f --- /dev/null +++ b/plugins/parsers/prometheus/parser.go @@ -0,0 +1,179 @@ +package prometheus + +import ( + "bufio" + "bytes" + "fmt" + "math" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + . "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" +) + +type Parser struct { + DefaultTags map[string]string +} + +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + var parser expfmt.TextParser + var metrics []telegraf.Metric + var err error + // parse even if the buffer begins with a newline + buf = bytes.TrimPrefix(buf, []byte("\n")) + // Read raw data + buffer := bytes.NewBuffer(buf) + reader := bufio.NewReader(buffer) + + // Prepare output + metricFamilies := make(map[string]*dto.MetricFamily) + metricFamilies, err = parser.TextToMetricFamilies(reader) + if err != nil { + return nil, fmt.Errorf("reading text format failed: %s", err) + } + + now := time.Now() + + // read metrics + for metricName, mf := range metricFamilies { + for _, m := range mf.Metric { + // reading tags + tags := MakeLabels(m, p.DefaultTags) + + if mf.GetType() == dto.MetricType_SUMMARY { + // summary metric + telegrafMetrics := makeQuantiles(m, tags, metricName, mf.GetType(), now) + metrics = append(metrics, telegrafMetrics...) + } else if mf.GetType() == dto.MetricType_HISTOGRAM { + // histogram metric + telegrafMetrics := makeBuckets(m, tags, metricName, mf.GetType(), now) + metrics = append(metrics, telegrafMetrics...) + } else { + // standard metric + // reading fields + fields := make(map[string]interface{}) + fields = getNameAndValue(m, metricName) + // converting to telegraf metric + if len(fields) > 0 { + t := getTimestamp(m, now) + metric, err := metric.New("prometheus", tags, fields, t, ValueType(mf.GetType())) + if err == nil { + metrics = append(metrics, metric) + } + } + } + } + } + + return metrics, err +} + +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { + metrics, err := p.Parse([]byte(line)) + if err != nil { + return nil, err + } + + if len(metrics) < 1 { + return nil, fmt.Errorf("No metrics in line") + } + + if len(metrics) > 1 { + return nil, fmt.Errorf("More than one metric in line") + } + + return metrics[0], nil +} + +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +// Get Quantiles for summary metric & Buckets for histogram +func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { + var metrics []telegraf.Metric + fields := make(map[string]interface{}) + t := getTimestamp(m, now) + + fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) + fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) + met, err := metric.New("prometheus", tags, fields, t, ValueType(metricType)) + if err == nil { + metrics = append(metrics, met) + } + + for _, q := range m.GetSummary().Quantile { + newTags := tags + fields = make(map[string]interface{}) + + newTags["quantile"] = fmt.Sprint(q.GetQuantile()) + fields[metricName] = float64(q.GetValue()) + + quantileMetric, err := metric.New("prometheus", newTags, fields, t, ValueType(metricType)) + if err == nil { + metrics = append(metrics, quantileMetric) + } + } + return metrics +} + +// Get Buckets from histogram metric +func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { + var metrics []telegraf.Metric + fields := make(map[string]interface{}) + t := getTimestamp(m, now) + + fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) + fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) + + met, err := metric.New("prometheus", tags, fields, t, ValueType(metricType)) + if err == nil { + metrics = append(metrics, met) + } + + for _, b := range m.GetHistogram().Bucket { + newTags := tags + fields = make(map[string]interface{}) + newTags["le"] = fmt.Sprint(b.GetUpperBound()) + fields[metricName+"_bucket"] = float64(b.GetCumulativeCount()) + + histogramMetric, err := metric.New("prometheus", newTags, fields, t, ValueType(metricType)) + if err == nil { + metrics = append(metrics, histogramMetric) + } + } + return metrics +} + +// Get name and value from metric +func getNameAndValue(m *dto.Metric, metricName string) map[string]interface{} { + fields := make(map[string]interface{}) + if m.Gauge != nil { + if !math.IsNaN(m.GetGauge().GetValue()) { + fields[metricName] = float64(m.GetGauge().GetValue()) + } + } else if m.Counter != nil { + if !math.IsNaN(m.GetCounter().GetValue()) { + fields[metricName] = float64(m.GetCounter().GetValue()) + } + } else if m.Untyped != nil { + if !math.IsNaN(m.GetUntyped().GetValue()) { + fields[metricName] = float64(m.GetUntyped().GetValue()) + } + } + return fields +} + +func getTimestamp(m *dto.Metric, now time.Time) time.Time { + var t time.Time + if m.TimestampMs != nil && *m.TimestampMs > 0 { + t = time.Unix(0, m.GetTimestampMs()*1000000) + } else { + t = now + } + return t +} diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go new file mode 100644 index 0000000000000..74530ef1b9233 --- /dev/null +++ b/plugins/parsers/prometheus/parser_test.go @@ -0,0 +1,346 @@ +package prometheus + +import ( + "fmt" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +const ( + validUniqueGauge = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision. +# TYPE cadvisor_version_info gauge +cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1 +` + validUniqueCounter = `# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source +# TYPE get_token_fail_count counter +get_token_fail_count 0 +` + + validUniqueLine = `# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source +` + + validUniqueSummary = `# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 5.876804288e+06 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 5.876804288e+06 +http_request_duration_microseconds_sum{handler="prometheus"} 1.8909097205e+07 +http_request_duration_microseconds_count{handler="prometheus"} 9 +` + + validUniqueHistogram = `# HELP apiserver_request_latencies Response latency distribution in microseconds for each verb, resource and client. +# TYPE apiserver_request_latencies histogram +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="125000"} 1994 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="250000"} 1997 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="500000"} 2000 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="1e+06"} 2005 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="2e+06"} 2012 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="4e+06"} 2017 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="8e+06"} 2024 +apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="+Inf"} 2025 +apiserver_request_latencies_sum{resource="bindings",verb="POST"} 1.02726334e+08 +apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 +` +) + +func TestParsingValidGauge(t *testing.T) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "osVersion": "CentOS Linux 7 (Core)", + "cadvisorRevision": "", + "cadvisorVersion": "", + "dockerVersion": "1.8.2", + "kernelVersion": "3.10.0-229.20.1.el7.x86_64", + }, + map[string]interface{}{ + "cadvisor_version_info": float64(1), + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + } + + metrics, err := parse([]byte(validUniqueGauge)) + + assert.NoError(t, err) + assert.Len(t, metrics, 1) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestParsingValieCounter(t *testing.T) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "get_token_fail_count": float64(0), + }, + time.Unix(0, 0), + telegraf.Counter, + ), + } + + metrics, err := parse([]byte(validUniqueCounter)) + + assert.NoError(t, err) + assert.Len(t, metrics, 1) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestParsingValidSummary(t *testing.T) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "handler": "prometheus", + }, + map[string]interface{}{ + "http_request_duration_microseconds_sum": float64(1.8909097205e+07), + "http_request_duration_microseconds_count": float64(9.0), + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "handler": "prometheus", + "quantile": "0.5", + }, + map[string]interface{}{ + "http_request_duration_microseconds": float64(552048.506), + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "handler": "prometheus", + "quantile": "0.9", + }, + map[string]interface{}{ + "http_request_duration_microseconds": float64(5.876804288e+06), + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "handler": "prometheus", + "quantile": "0.99", + }, + map[string]interface{}{ + "http_request_duration_microseconds": float64(5.876804288e+6), + }, + time.Unix(0, 0), + telegraf.Summary, + ), + } + + metrics, err := parse([]byte(validUniqueSummary)) + + assert.NoError(t, err) + assert.Len(t, metrics, 4) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestParsingValidHistogram(t *testing.T) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + }, + map[string]interface{}{ + "apiserver_request_latencies_count": float64(2025.0), + "apiserver_request_latencies_sum": float64(1.02726334e+08), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "125000", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(1994.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "250000", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(1997.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "500000", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(2000.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "1e+06", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(2005.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "2e+06", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(2012.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "4e+06", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(2017.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "8e+06", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(2024.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "verb": "POST", + "resource": "bindings", + "le": "+Inf", + }, + map[string]interface{}{ + "apiserver_request_latencies_bucket": float64(2025.0), + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + } + + metrics, err := parse([]byte(validUniqueHistogram)) + + assert.NoError(t, err) + assert.Len(t, metrics, 9) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestDefautTags(t *testing.T) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "osVersion": "CentOS Linux 7 (Core)", + "cadvisorRevision": "", + "cadvisorVersion": "", + "dockerVersion": "1.8.2", + "kernelVersion": "3.10.0-229.20.1.el7.x86_64", + "defaultTag": "defaultTagValue", + }, + map[string]interface{}{ + "cadvisor_version_info": float64(1), + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + } + + parser := Parser{ + DefaultTags: map[string]string{ + "defaultTag": "defaultTagValue", + "dockerVersion": "to_be_overriden", + }, + } + metrics, err := parser.Parse([]byte(validUniqueGauge)) + + assert.NoError(t, err) + assert.Len(t, metrics, 1) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestMetricsWithTimestamp(t *testing.T) { + testTime := time.Date(2020, time.October, 4, 17, 0, 0, 0, time.UTC) + testTimeUnix := testTime.UnixNano() / int64(time.Millisecond) + metricsWithTimestamps := fmt.Sprintf(` +# TYPE test_counter counter +test_counter{label="test"} 1 %d +`, testTimeUnix) + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "label": "test", + }, + map[string]interface{}{ + "test_counter": float64(1.0), + }, + testTime, + telegraf.Counter, + ), + } + + metrics, _ := parse([]byte(metricsWithTimestamps)) + + testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics()) +} + +func parse(buf []byte) ([]telegraf.Metric, error) { + parser := Parser{} + return parser.Parse(buf) +} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 729ed048c0720..ac31a374dd75d 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -14,6 +14,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/plugins/parsers/logfmt" "github.com/influxdata/telegraf/plugins/parsers/nagios" + "github.com/influxdata/telegraf/plugins/parsers/prometheus" "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/plugins/parsers/wavefront" ) @@ -232,6 +233,8 @@ func NewParser(config *Config) (Parser, error) { config.DefaultTags, config.FormUrlencodedTagKeys, ) + case "prometheus": + parser, err = NewPrometheusParser(config.DefaultTags) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } @@ -339,3 +342,9 @@ func NewFormUrlencodedParser( TagKeys: tagKeys, }, nil } + +func NewPrometheusParser(defaultTags map[string]string) (Parser, error) { + return &prometheus.Parser{ + DefaultTags: defaultTags, + }, nil +} From 498a6da75f7a9825f666eb8f6119c2508fcd98c9 Mon Sep 17 00:00:00 2001 From: reimda Date: Wed, 2 Dec 2020 17:06:47 -0700 Subject: [PATCH 096/761] Add node groups to opcua input plugin (#8389) --- plugins/inputs/opcua/README.md | 101 ++++++-- plugins/inputs/opcua/opcua_client.go | 281 ++++++++++++++++------ plugins/inputs/opcua/opcua_client_test.go | 181 ++++++++++++-- 3 files changed, 449 insertions(+), 114 deletions(-) diff --git a/plugins/inputs/opcua/README.md b/plugins/inputs/opcua/README.md index 173d98b6fac98..d6530c0839b18 100644 --- a/plugins/inputs/opcua/README.md +++ b/plugins/inputs/opcua/README.md @@ -9,8 +9,8 @@ Plugin minimum tested version: 1.16 ```toml [[inputs.opcua]] - ## Device name - # name = "localhost" + ## Metric name + # name = "opcua" # ## OPC UA Endpoint URL # endpoint = "opc.tcp://localhost:4840" @@ -47,34 +47,97 @@ Plugin minimum tested version: 1.16 # password = "" # ## Node ID configuration - ## name - the variable name - ## namespace - integer value 0 thru 3 - ## identifier_type - s=string, i=numeric, g=guid, b=opaque - ## identifier - tag as shown in opcua browser - ## data_type - boolean, byte, short, int, uint, uint16, int16, - ## uint32, int32, float, double, string, datetime, number + ## name - field name to use in the output + ## namespace - OPC UA namespace of the node (integer value 0 thru 3) + ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) + ## identifier - OPC UA ID (tag as shown in opcua browser) + ## tags - extra tags to be added to the output metric (optional) ## Example: - ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"} - nodes = [ - {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, - {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, - ] + ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", tags=[["tag1","value1"],["tag2","value2]]} + # nodes = [ + # {name="", namespace="", identifier_type="", identifier=""}, + # {name="", namespace="", identifier_type="", identifier=""}, + #] + # + ## Node Group + ## Sets defaults for OPC UA namespace and ID type so they aren't required in + ## every node. A group can also have a metric name that overrides the main + ## plugin metric name. + ## + ## Multiple node groups are allowed + #[[inputs.opcua.group]] + ## Group Metric name. Overrides the top level name. If unset, the + ## top level name is used. + # name = + # + ## Group default namespace. If a node in the group doesn't set its + ## namespace, this is used. + # namespace = + # + ## Group default identifier type. If a node in the group doesn't set its + ## namespace, this is used. + # identifier_type = + # + ## Node ID Configuration. Array of nodes with the same settings as above. + # nodes = [ + # {name="", namespace="", identifier_type="", identifier=""}, + # {name="", namespace="", identifier_type="", identifier=""}, + #] ``` -### Example Node Configuration -An OPC UA node ID may resemble: "n=3,s=Temperature". In this example: +### Node Configuration +An OPC UA node ID may resemble: "n=3;s=Temperature". In this example: - n=3 is indicating the `namespace` is 3 - s=Temperature is indicting that the `identifier_type` is a string and `identifier` value is 'Temperature' -- This example temperature node has a value of 79.0, which makes the `data_type` a 'float'. +- This example temperature node has a value of 79.0 To gather data from this node enter the following line into the 'nodes' property above: ``` -{name="LabelName", namespace="3", identifier_type="s", identifier="Temperature", data_type="float", description="Description of node"}, +{field_name="temp", namespace="3", identifier_type="s", identifier="Temperature"}, +``` + +This node configuration produces a metric like this: +``` +opcua,id=n\=3;s\=Temperature temp=79.0,quality="OK (0x0)" 1597820490000000000 + ``` +### Group Configuration +Groups can set default values for the namespace, identifier type, and +tags settings. The default values apply to all the nodes in the +group. If a default is set, a node may omit the setting altogether. +This simplifies node configuration, especially when many nodes share +the same namespace or identifier type. -### Example Output +The output metric will include tags set in the group and the node. If +a tag with the same name is set in both places, the tag value from the +node is used. +This example group configuration has two groups with two nodes each: +``` + [[inputs.opcua.group]] + name="group1_metric_name" + namespace="3" + identifier_type="i" + tags=[["group1_tag", "val1"]] + nodes = [ + {name="name", identifier="1001", tags=[["node1_tag", "val2"]]}, + {name="name", identifier="1002", tags=[["node1_tag", "val3"]]}, + ] + [[inputs.opcua.group]] + name="group2_metric_name" + namespace="3" + identifier_type="i" + tags=[["group2_tag", "val3"]] + nodes = [ + {name="saw", identifier="1003", tags=[["node2_tag", "val4"]]}, + {name="sin", identifier="1004"}, + ] ``` -opcua,host=3c70aee0901e,name=Random,type=double Random=0.018158170305814902 1597820490000000000 +It produces metrics like these: +``` +group1_metric_name,group1_tag=val1,id=ns\=3;i\=1001,node1_tag=val2 name=0,Quality="OK (0x0)" 1606893246000000000 +group1_metric_name,group1_tag=val1,id=ns\=3;i\=1002,node1_tag=val3 name=-1.389117,Quality="OK (0x0)" 1606893246000000000 +group2_metric_name,group2_tag=val3,id=ns\=3;i\=1003,node2_tag=val4 Quality="OK (0x0)",saw=-1.6 1606893246000000000 +group2_metric_name,group2_tag=val3,id=ns\=3;i\=1004 sin=1.902113,Quality="OK (0x0)" 1606893246000000000 ``` diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index 87647e2b9d5f8..0481a3b08241e 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "net/url" + "sort" "strings" "time" @@ -13,11 +14,12 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/selfstat" ) // OpcUA type type OpcUA struct { - Name string `toml:"name"` + MetricName string `toml:"name"` Endpoint string `toml:"endpoint"` SecurityPolicy string `toml:"security_policy"` SecurityMode string `toml:"security_mode"` @@ -28,18 +30,18 @@ type OpcUA struct { AuthMethod string `toml:"auth_method"` ConnectTimeout config.Duration `toml:"connect_timeout"` RequestTimeout config.Duration `toml:"request_timeout"` - NodeList []OPCTag `toml:"nodes"` + RootNodes []NodeSettings `toml:"nodes"` + Groups []GroupSettings `toml:"group"` - Nodes []string `toml:"-"` - NodeData []OPCData `toml:"-"` - NodeIDs []*ua.NodeID `toml:"-"` - NodeIDerror []error `toml:"-"` + nodes []Node + nodeData []OPCData + nodeIDs []*ua.NodeID + nodeIDerror []error state ConnectionState // status - ReadSuccess int `toml:"-"` - ReadError int `toml:"-"` - NumberOfTags int `toml:"-"` + ReadSuccess selfstat.Stat `toml:"-"` + ReadError selfstat.Stat `toml:"-"` // internal values client *opcua.Client @@ -48,13 +50,29 @@ type OpcUA struct { } // OPCTag type -type OPCTag struct { - Name string `toml:"name"` - Namespace string `toml:"namespace"` - IdentifierType string `toml:"identifier_type"` - Identifier string `toml:"identifier"` - DataType string `toml:"data_type"` - Description string `toml:"description"` +type NodeSettings struct { + FieldName string `toml:"name"` + Namespace string `toml:"namespace"` + IdentifierType string `toml:"identifier_type"` + Identifier string `toml:"identifier"` + DataType string `toml:"data_type"` // Kept for backward compatibility but was never used. + Description string `toml:"description"` // Kept for backward compatibility but was never used. + TagsSlice [][]string `toml:"tags"` +} + +type Node struct { + tag NodeSettings + idStr string + metricName string + metricTags map[string]string +} + +type GroupSettings struct { + MetricName string `toml:"name"` // Overrides plugin's setting + Namespace string `toml:"namespace"` // Can be overridden by node setting + IdentifierType string `toml:"identifier_type"` // Can be overridden by node setting + Nodes []NodeSettings `toml:"nodes"` + TagsSlice [][]string `toml:"tags"` } // OPCData type @@ -81,9 +99,8 @@ const ( const description = `Retrieve data from OPCUA devices` const sampleConfig = ` -[[inputs.opcua]] - ## Device name - # name = "localhost" + ## Metric name + # name = "opcua" # ## OPC UA Endpoint URL # endpoint = "opc.tcp://localhost:4840" @@ -120,18 +137,41 @@ const sampleConfig = ` # password = "" # ## Node ID configuration - ## name - the variable name - ## namespace - integer value 0 thru 3 - ## identifier_type - s=string, i=numeric, g=guid, b=opaque - ## identifier - tag as shown in opcua browser - ## data_type - boolean, byte, short, int, uint, uint16, int16, - ## uint32, int32, float, double, string, datetime, number + ## name - field name to use in the output + ## namespace - OPC UA namespace of the node (integer value 0 thru 3) + ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) + ## identifier - OPC UA ID (tag as shown in opcua browser) ## Example: - ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"} - nodes = [ - {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, - {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, - ] + ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262"} + # nodes = [ + # {name="", namespace="", identifier_type="", identifier=""}, + # {name="", namespace="", identifier_type="", identifier=""}, + #] + # + ## Node Group + ## Sets defaults for OPC UA namespace and ID type so they aren't required in + ## every node. A group can also have a metric name that overrides the main + ## plugin metric name. + ## + ## Multiple node groups are allowed + #[[inputs.opcua.group]] + ## Group Metric name. Overrides the top level name. If unset, the + ## top level name is used. + # name = + # + ## Group default namespace. If a node in the group doesn't set its + ## namespace, this is used. + # namespace = + # + ## Group default identifier type. If a node in the group doesn't set its + ## namespace, this is used. + # identifier_type = + # + ## Node ID Configuration. Array of nodes with the same settings as above. + # nodes = [ + # {name="", namespace="", identifier_type="", identifier=""}, + # {name="", namespace="", identifier_type="", identifier=""}, + #] ` // Description will appear directly above the plugin definition in the config file @@ -157,16 +197,21 @@ func (o *OpcUA) Init() error { if err != nil { return err } - o.NumberOfTags = len(o.NodeList) o.setupOptions() + tags := map[string]string{ + "endpoint": o.Endpoint, + } + o.ReadError = selfstat.Register("opcua", "read_error", tags) + o.ReadSuccess = selfstat.Register("opcua", "read_success", tags) + return nil } func (o *OpcUA) validateEndpoint() error { - if o.Name == "" { + if o.MetricName == "" { return fmt.Errorf("device name is empty") } @@ -184,22 +229,79 @@ func (o *OpcUA) validateEndpoint() error { case "None", "Basic128Rsa15", "Basic256", "Basic256Sha256", "auto": break default: - return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityPolicy, o.Name) + return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityPolicy, o.MetricName) } //search security mode type switch o.SecurityMode { case "None", "Sign", "SignAndEncrypt", "auto": break default: - return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityMode, o.Name) + return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityMode, o.MetricName) } return nil } +func tagsSliceToMap(tags [][]string) (map[string]string, error) { + m := make(map[string]string) + for i, tag := range tags { + if len(tag) != 2 { + return nil, fmt.Errorf("tag %d needs 2 values, has %d: %v", i+1, len(tag), tag) + } + if tag[0] == "" { + return nil, fmt.Errorf("tag %d has empty name", i+1) + } + if tag[1] == "" { + return nil, fmt.Errorf("tag %d has empty value", i+1) + } + if _, ok := m[tag[0]]; ok { + return nil, fmt.Errorf("tag %d has duplicate key: %v", i+1, tag[0]) + } + m[tag[0]] = tag[1] + } + return m, nil +} + //InitNodes Method on OpcUA func (o *OpcUA) InitNodes() error { - if len(o.NodeList) == 0 { - return nil + for _, node := range o.RootNodes { + o.nodes = append(o.nodes, Node{ + metricName: o.MetricName, + tag: node, + }) + } + + for _, group := range o.Groups { + if group.MetricName == "" { + group.MetricName = o.MetricName + } + groupTags, err := tagsSliceToMap(group.TagsSlice) + if err != nil { + return err + } + for _, node := range group.Nodes { + if node.Namespace == "" { + node.Namespace = group.Namespace + } + if node.IdentifierType == "" { + node.IdentifierType = group.IdentifierType + } + nodeTags, err := tagsSliceToMap(node.TagsSlice) + if err != nil { + return err + } + mergedTags := make(map[string]string) + for k, v := range groupTags { + mergedTags[k] = v + } + for k, v := range nodeTags { + mergedTags[k] = v + } + o.nodes = append(o.nodes, Node{ + metricName: group.MetricName, + tag: node, + metricTags: mergedTags, + }) + } } err := o.validateOPCTags() @@ -210,50 +312,74 @@ func (o *OpcUA) InitNodes() error { return nil } +type metricParts struct { + metricName string + fieldName string + tags string // sorted by tag name and in format tag1=value1, tag2=value2 +} + +func newMP(n *Node) metricParts { + var keys []string + for key := range n.metricTags { + keys = append(keys, key) + } + sort.Strings(keys) + var sb strings.Builder + for i, key := range keys { + if i != 0 { + sb.WriteString(", ") + } + sb.WriteString(key) + sb.WriteString("=") + sb.WriteString(n.metricTags[key]) + } + x := metricParts{ + metricName: n.metricName, + fieldName: n.tag.FieldName, + tags: sb.String(), + } + return x +} + func (o *OpcUA) validateOPCTags() error { - nameEncountered := map[string]bool{} - for i, item := range o.NodeList { + nameEncountered := map[metricParts]struct{}{} + for _, node := range o.nodes { + mp := newMP(&node) //check empty name - if item.Name == "" { - return fmt.Errorf("empty name in '%s'", item.Name) + if node.tag.FieldName == "" { + return fmt.Errorf("empty name in '%s'", node.tag.FieldName) } //search name duplicate - if nameEncountered[item.Name] { - return fmt.Errorf("name '%s' is duplicated in '%s'", item.Name, item.Name) + if _, ok := nameEncountered[mp]; ok { + return fmt.Errorf("name '%s' is duplicated (metric name '%s', tags '%s')", + mp.fieldName, mp.metricName, mp.tags) } else { - nameEncountered[item.Name] = true + //add it to the set + nameEncountered[mp] = struct{}{} } //search identifier type - switch item.IdentifierType { + switch node.tag.IdentifierType { case "s", "i", "g", "b": break default: - return fmt.Errorf("invalid identifier type '%s' in '%s'", item.IdentifierType, item.Name) - } - // search data type - switch item.DataType { - case "boolean", "byte", "short", "int", "uint", "uint16", "int16", "uint32", "int32", "float", "double", "string", "datetime", "number": - break - default: - return fmt.Errorf("invalid data type '%s' in '%s'", item.DataType, item.Name) + return fmt.Errorf("invalid identifier type '%s' in '%s'", node.tag.IdentifierType, node.tag.FieldName) } - // build nodeid - o.Nodes = append(o.Nodes, BuildNodeID(item)) + node.idStr = BuildNodeID(node.tag) //parse NodeIds and NodeIds errors - nid, niderr := ua.ParseNodeID(o.Nodes[i]) + nid, niderr := ua.ParseNodeID(node.idStr) // build NodeIds and Errors - o.NodeIDs = append(o.NodeIDs, nid) - o.NodeIDerror = append(o.NodeIDerror, niderr) + o.nodeIDs = append(o.nodeIDs, nid) + o.nodeIDerror = append(o.nodeIDerror, niderr) // Grow NodeData for later input - o.NodeData = append(o.NodeData, OPCData{}) + o.nodeData = append(o.nodeData, OPCData{}) } return nil } // BuildNodeID build node ID from OPC tag -func BuildNodeID(tag OPCTag) string { +func BuildNodeID(tag NodeSettings) string { return "ns=" + tag.Namespace + ";" + tag.IdentifierType + "=" + tag.Identifier } @@ -280,7 +406,7 @@ func Connect(o *OpcUA) error { } regResp, err := o.client.RegisterNodes(&ua.RegisterNodesRequest{ - NodesToRegister: o.NodeIDs, + NodesToRegister: o.nodeIDs, }) if err != nil { return fmt.Errorf("RegisterNodes failed: %v", err) @@ -325,22 +451,22 @@ func (o *OpcUA) setupOptions() error { func (o *OpcUA) getData() error { resp, err := o.client.Read(o.req) if err != nil { - o.ReadError++ + o.ReadError.Incr(1) return fmt.Errorf("RegisterNodes Read failed: %v", err) } - o.ReadSuccess++ + o.ReadSuccess.Incr(1) for i, d := range resp.Results { if d.Status != ua.StatusOK { return fmt.Errorf("Status not OK: %v", d.Status) } - o.NodeData[i].TagName = o.NodeList[i].Name + o.nodeData[i].TagName = o.nodes[i].tag.FieldName if d.Value != nil { - o.NodeData[i].Value = d.Value.Value() - o.NodeData[i].DataType = d.Value.Type() + o.nodeData[i].Value = d.Value.Value() + o.nodeData[i].DataType = d.Value.Type() } - o.NodeData[i].Quality = d.Status - o.NodeData[i].TimeStamp = d.ServerTimestamp.String() - o.NodeData[i].Time = d.SourceTimestamp.String() + o.nodeData[i].Quality = d.Status + o.nodeData[i].TimeStamp = d.ServerTimestamp.String() + o.nodeData[i].Time = d.SourceTimestamp.String() } return nil } @@ -359,9 +485,6 @@ func disconnect(o *OpcUA) error { return err } - o.ReadError = 0 - o.ReadSuccess = 0 - switch u.Scheme { case "opc.tcp": o.state = Disconnected @@ -392,16 +515,18 @@ func (o *OpcUA) Gather(acc telegraf.Accumulator) error { return err } - for i, n := range o.NodeList { + for i, n := range o.nodes { fields := make(map[string]interface{}) tags := map[string]string{ - "name": n.Name, - "id": BuildNodeID(n), + "id": n.idStr, + } + for k, v := range n.metricTags { + tags[k] = v } - fields[o.NodeData[i].TagName] = o.NodeData[i].Value - fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.NodeData[i].Quality)) - acc.AddFields(o.Name, fields, tags) + fields[o.nodeData[i].TagName] = o.nodeData[i].Value + fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.nodeData[i].Quality)) + acc.AddFields(n.metricName, fields, tags) } return nil } @@ -410,7 +535,7 @@ func (o *OpcUA) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("opcua", func() telegraf.Input { return &OpcUA{ - Name: "localhost", + MetricName: "opcua", Endpoint: "opc.tcp://localhost:4840", SecurityPolicy: "auto", SecurityMode: "auto", diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go index 637ac87bc0afa..26dd2fbd4f40d 100644 --- a/plugins/inputs/opcua/opcua_client_test.go +++ b/plugins/inputs/opcua/opcua_client_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/influxdata/telegraf/config" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -15,7 +16,6 @@ type OPCTags struct { Namespace string IdentifierType string Identifier string - DataType string Want string } @@ -25,15 +25,15 @@ func TestClient1(t *testing.T) { } var testopctags = []OPCTags{ - {"ProductName", "0", "i", "2261", "string", "open62541 OPC UA Server"}, - {"ProductUri", "0", "i", "2262", "string", "http://open62541.org"}, - {"ManufacturerName", "0", "i", "2263", "string", "open62541"}, + {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, + {"ProductUri", "0", "i", "2262", "http://open62541.org"}, + {"ManufacturerName", "0", "i", "2263", "open62541"}, } var o OpcUA var err error - o.Name = "testing" + o.MetricName = "testing" o.Endpoint = "opc.tcp://opcua.rocks:4840" o.AuthMethod = "Anonymous" o.ConnectTimeout = config.Duration(10 * time.Second) @@ -41,7 +41,7 @@ func TestClient1(t *testing.T) { o.SecurityPolicy = "None" o.SecurityMode = "None" for _, tags := range testopctags { - o.NodeList = append(o.NodeList, MapOPCTag(tags)) + o.RootNodes = append(o.RootNodes, MapOPCTag(tags)) } err = o.Init() if err != nil { @@ -52,26 +52,25 @@ func TestClient1(t *testing.T) { t.Fatalf("Connect Error: %s", err) } - for i, v := range o.NodeData { + for i, v := range o.nodeData { if v.Value != nil { types := reflect.TypeOf(v.Value) value := reflect.ValueOf(v.Value) compare := fmt.Sprintf("%v", value.Interface()) if compare != testopctags[i].Want { - t.Errorf("Tag %s: Values %v for type %s does not match record", o.NodeList[i].Name, value.Interface(), types) + t.Errorf("Tag %s: Values %v for type %s does not match record", o.nodes[i].tag.FieldName, value.Interface(), types) } } else { - t.Errorf("Tag: %s has value: %v", o.NodeList[i].Name, v.Value) + t.Errorf("Tag: %s has value: %v", o.nodes[i].tag.FieldName, v.Value) } } } -func MapOPCTag(tags OPCTags) (out OPCTag) { - out.Name = tags.Name +func MapOPCTag(tags OPCTags) (out NodeSettings) { + out.FieldName = tags.Name out.Namespace = tags.Namespace out.IdentifierType = tags.IdentifierType out.Identifier = tags.Identifier - out.DataType = tags.DataType return out } @@ -90,9 +89,21 @@ auth_method = "Anonymous" username = "" password = "" nodes = [ - {name="name", namespace="", identifier_type="", identifier="", data_type="", description=""}, - {name="name2", namespace="", identifier_type="", identifier="", data_type="", description=""}, + {name="name", namespace="1", identifier_type="s", identifier="one"}, + {name="name2", namespace="2", identifier_type="s", identifier="two"}, ] +[[inputs.opcua.group]] +name = "foo" +namespace = "3" +identifier_type = "i" +tags = [["tag1", "val1"], ["tag2", "val2"]] +nodes = [{name="name3", identifier="3000", tags=[["tag3", "val3"]]}] +[[inputs.opcua.group]] +name = "bar" +namespace = "0" +identifier_type = "i" +tags = [["tag1", "val1"], ["tag2", "val2"]] +nodes = [{name="name4", identifier="4000", tags=[["tag1", "override"]]}] ` c := config.NewConfig() @@ -104,7 +115,143 @@ nodes = [ o, ok := c.Inputs[0].Input.(*OpcUA) require.True(t, ok) - require.Len(t, o.NodeList, 2) - require.Equal(t, o.NodeList[0].Name, "name") - require.Equal(t, o.NodeList[1].Name, "name2") + require.Len(t, o.RootNodes, 2) + require.Equal(t, o.RootNodes[0].FieldName, "name") + require.Equal(t, o.RootNodes[1].FieldName, "name2") + + require.Len(t, o.Groups, 2) + require.Equal(t, o.Groups[0].MetricName, "foo") + require.Len(t, o.Groups[0].Nodes, 1) + require.Equal(t, o.Groups[0].Nodes[0].Identifier, "3000") + + require.NoError(t, o.InitNodes()) + require.Len(t, o.nodes, 4) + require.Len(t, o.nodes[2].metricTags, 3) + require.Len(t, o.nodes[3].metricTags, 2) +} + +func TestTagsSliceToMap(t *testing.T) { + m, err := tagsSliceToMap([][]string{{"foo", "bar"}, {"baz", "bat"}}) + assert.NoError(t, err) + assert.Len(t, m, 2) + assert.Equal(t, m["foo"], "bar") + assert.Equal(t, m["baz"], "bat") +} + +func TestTagsSliceToMap_twoStrings(t *testing.T) { + var err error + _, err = tagsSliceToMap([][]string{{"foo", "bar", "baz"}}) + assert.Error(t, err) + _, err = tagsSliceToMap([][]string{{"foo"}}) + assert.Error(t, err) +} + +func TestTagsSliceToMap_dupeKey(t *testing.T) { + _, err := tagsSliceToMap([][]string{{"foo", "bar"}, {"foo", "bat"}}) + assert.Error(t, err) +} + +func TestTagsSliceToMap_empty(t *testing.T) { + _, err := tagsSliceToMap([][]string{{"foo", ""}}) + assert.Equal(t, fmt.Errorf("tag 1 has empty value"), err) + _, err = tagsSliceToMap([][]string{{"", "bar"}}) + assert.Equal(t, fmt.Errorf("tag 1 has empty name"), err) +} + +func TestValidateOPCTags(t *testing.T) { + tests := []struct { + name string + nodes []Node + err error + }{ + { + "same", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "v1", "t2": "v2"}, + }, + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "v1", "t2": "v2"}, + }, + }, + fmt.Errorf("name 'fn' is duplicated (metric name 'mn', tags 't1=v1, t2=v2')"), + }, + { + "different metric tag names", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t3": ""}, + }, + }, + nil, + }, + { + "different metric tag values", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "foo", "t2": ""}, + }, + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "bar", "t2": ""}, + }, + }, + nil, + }, + { + "different metric names", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + { + metricName: "mn2", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + }, + nil, + }, + { + "different field names", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn2", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + }, + nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := OpcUA{ + nodes: tt.nodes, + } + require.Equal(t, tt.err, o.validateOPCTags()) + }) + } } From a267570ae306f91151959476ab492a8858450b8b Mon Sep 17 00:00:00 2001 From: Igor Kuchmienko <56545352+IgorKuchmienko@users.noreply.github.com> Date: Thu, 3 Dec 2020 19:05:43 +0300 Subject: [PATCH 097/761] Add column measurement_db_type to output of all queries if not empty (#8464) --- plugins/inputs/sqlserver/sqlserver.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 2ed4df266598f..4b0bd5e3f199c 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -307,6 +307,10 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e } } + if s.DatabaseType != "" { + tags["measurement_db_type"] = s.DatabaseType + } + if query.ResultByRow { // add measurement to Accumulator acc.AddFields(measurement, From def6963b5e514b0ffd0c73d594311b6099ce1e5b Mon Sep 17 00:00:00 2001 From: Igor Kuchmienko <56545352+IgorKuchmienko@users.noreply.github.com> Date: Thu, 3 Dec 2020 19:06:19 +0300 Subject: [PATCH 098/761] sqlAzureMIRequests - remove duplicate column [session_db_name] (#8462) --- plugins/inputs/sqlserver/azuresqlqueries.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go index 4e936b98d3c8e..0a90ab80669de 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqlqueries.go @@ -1093,8 +1093,7 @@ SELECT ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] ,DB_NAME() as [database_name] ,s.[session_id] - ,ISNULL(r.[request_id], 0) as [request_id] - ,DB_NAME(s.[database_id]) as [session_db_name] + ,ISNULL(r.[request_id], 0) as [request_id] ,COALESCE(r.[status], s.[status]) AS [status] ,COALESCE(r.[cpu_time], s.[cpu_time]) AS [cpu_time_ms] ,COALESCE(r.[total_elapsed_time], s.[total_elapsed_time]) AS [total_elapsed_time_ms] From f7d94430d29d15c42d8d186c99bfd065b9a21fa0 Mon Sep 17 00:00:00 2001 From: bhsu-ms <72472578+bhsu-ms@users.noreply.github.com> Date: Thu, 3 Dec 2020 08:07:08 -0800 Subject: [PATCH 099/761] Added is_primary_replica for monitoring readable secondaries for Azure SQL DB (#8368) --- plugins/inputs/sqlserver/azuresqlqueries.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go index 0a90ab80669de..3504658c99f20 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqlqueries.go @@ -31,6 +31,7 @@ SELECT TOP(1) ,[end_time] ,cast([avg_instance_memory_percent] as float) as [avg_instance_memory_percent] ,cast([avg_instance_cpu_percent] as float) as [avg_instance_cpu_percent] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_db_resource_stats WITH (NOLOCK) ORDER BY @@ -80,6 +81,7 @@ SELECT ,[volume_type_external_xstore_iops] ,[volume_pfs_iops] ,[volume_type_pfs_iops] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_user_db_resource_governance WITH (NOLOCK); ` @@ -103,6 +105,7 @@ SELECT ,dbws.[signal_wait_time_ms] ,dbws.[max_wait_time_ms] ,dbws.[waiting_tasks_count] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_db_wait_stats AS dbws WITH (NOLOCK) WHERE @@ -180,6 +183,7 @@ SELECT END AS [file_type] ,ISNULL([size],0)/128 AS [current_size_mb] ,ISNULL(FILEPROPERTY(b.[logical_filename],'SpaceUsed')/128,0) as [space_used_mb] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM [sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs -- needed to get Tempdb file names on Azure SQL DB so you can join appropriately. Without this had a bug where join was only on file_id @@ -237,6 +241,7 @@ SELECT ) END AS [available_storage_mb] ,(select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as [uptime] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.[databases] AS d -- sys.databases.database_id may not match current DB_ID on Azure SQL DB CROSS JOIN sys.[database_service_objectives] AS slo @@ -320,6 +325,7 @@ SELECT 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' ELSE 'Other' END as [wait_category] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) WHERE ws.[wait_type] NOT IN ( @@ -374,6 +380,7 @@ SELECT ,DB_NAME() AS [database_name] ,mc.[type] AS [clerk_type] ,SUM(mc.[pages_kb]) AS [size_kb] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) GROUP BY mc.[type] @@ -542,6 +549,7 @@ SELECT END AS [instance] ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability from @PCounters pc LEFT OUTER JOIN @PCounters AS pc1 ON ( @@ -611,6 +619,7 @@ SELECT ,DB_NAME(qt.[dbid]) [stmt_db_name] ,CONVERT(varchar(20),[query_hash],1) as [query_hash] ,CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_exec_sessions AS s LEFT OUTER JOIN sys.dm_exec_requests AS r ON s.[session_id] = r.[session_id] @@ -653,6 +662,7 @@ SELECT ,s.[yield_count] ,s.[total_cpu_usage_ms] ,s.[total_scheduler_delay_ms] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_os_schedulers AS s ` From 7f3773e8e77446f79df09f7df711c4d9397a196c Mon Sep 17 00:00:00 2001 From: Stephanie Engel <22456349+stephanie-engel@users.noreply.github.com> Date: Thu, 3 Dec 2020 13:42:50 -0600 Subject: [PATCH 100/761] Add configurable timeout to bind input plugin http call (#8508) --- plugins/inputs/bind/README.md | 1 + plugins/inputs/bind/bind.go | 19 +++++++++++++++---- plugins/inputs/bind/bind_test.go | 10 ++++++++++ plugins/inputs/bind/json_stats.go | 2 +- plugins/inputs/bind/xml_stats_v2.go | 2 +- plugins/inputs/bind/xml_stats_v3.go | 2 +- 6 files changed, 29 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/bind/README.md b/plugins/inputs/bind/README.md index e3bcf6a75b252..d67a02020f527 100644 --- a/plugins/inputs/bind/README.md +++ b/plugins/inputs/bind/README.md @@ -20,6 +20,7 @@ not enable support for JSON statistics in their BIND packages. trailing slash in the URL. Default is "http://localhost:8053/xml/v3". - **gather_memory_contexts** bool: Report per-context memory statistics. - **gather_views** bool: Report per-view query statistics. +- **timeout** Timeout for http requests made by bind nameserver (example: "4s"). The following table summarizes the URL formats which should be used, depending on your BIND version and configured statistics channel. diff --git a/plugins/inputs/bind/bind.go b/plugins/inputs/bind/bind.go index 967c9031a2634..e27fdfc38ec71 100644 --- a/plugins/inputs/bind/bind.go +++ b/plugins/inputs/bind/bind.go @@ -8,6 +8,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -15,6 +16,9 @@ type Bind struct { Urls []string GatherMemoryContexts bool GatherViews bool + Timeout config.Duration `toml:"timeout"` + + client http.Client } var sampleConfig = ` @@ -23,11 +27,10 @@ var sampleConfig = ` # urls = ["http://localhost:8053/xml/v3"] # gather_memory_contexts = false # gather_views = false -` -var client = &http.Client{ - Timeout: time.Duration(4 * time.Second), -} + ## Timeout for http requests made by bind nameserver + # timeout = "4s" +` func (b *Bind) Description() string { return "Read BIND nameserver XML statistics" @@ -37,6 +40,14 @@ func (b *Bind) SampleConfig() string { return sampleConfig } +func (b *Bind) Init() error { + b.client = http.Client{ + Timeout: time.Duration(b.Timeout), + } + + return nil +} + func (b *Bind) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup diff --git a/plugins/inputs/bind/bind_test.go b/plugins/inputs/bind/bind_test.go index 6ed953b691dd3..7ca79c1ef19a4 100644 --- a/plugins/inputs/bind/bind_test.go +++ b/plugins/inputs/bind/bind_test.go @@ -5,6 +5,7 @@ import ( "net/http" "net/http/httptest" "testing" + "time" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -20,6 +21,9 @@ func TestBindJsonStats(t *testing.T) { Urls: []string{ts.URL + "/json/v1"}, GatherMemoryContexts: true, GatherViews: true, + client: http.Client{ + Timeout: 4 * time.Second, + }, } var acc testutil.Accumulator @@ -190,6 +194,9 @@ func TestBindXmlStatsV2(t *testing.T) { Urls: []string{ts.URL + "/xml/v2"}, GatherMemoryContexts: true, GatherViews: true, + client: http.Client{ + Timeout: 4 * time.Second, + }, } var acc testutil.Accumulator @@ -392,6 +399,9 @@ func TestBindXmlStatsV3(t *testing.T) { Urls: []string{ts.URL + "/xml/v3"}, GatherMemoryContexts: true, GatherViews: true, + client: http.Client{ + Timeout: 4 * time.Second, + }, } var acc testutil.Accumulator diff --git a/plugins/inputs/bind/json_stats.go b/plugins/inputs/bind/json_stats.go index 87b6065e2eb1c..906ab21d97a69 100644 --- a/plugins/inputs/bind/json_stats.go +++ b/plugins/inputs/bind/json_stats.go @@ -155,7 +155,7 @@ func (b *Bind) readStatsJSON(addr *url.URL, acc telegraf.Accumulator) error { for _, suffix := range [...]string{"/server", "/net", "/mem"} { scrapeUrl := addr.String() + suffix - resp, err := client.Get(scrapeUrl) + resp, err := b.client.Get(scrapeUrl) if err != nil { return err } diff --git a/plugins/inputs/bind/xml_stats_v2.go b/plugins/inputs/bind/xml_stats_v2.go index 5e17851fb671c..ce7116a199d9e 100644 --- a/plugins/inputs/bind/xml_stats_v2.go +++ b/plugins/inputs/bind/xml_stats_v2.go @@ -89,7 +89,7 @@ func addXMLv2Counter(acc telegraf.Accumulator, commonTags map[string]string, sta func (b *Bind) readStatsXMLv2(addr *url.URL, acc telegraf.Accumulator) error { var stats v2Root - resp, err := client.Get(addr.String()) + resp, err := b.client.Get(addr.String()) if err != nil { return err } diff --git a/plugins/inputs/bind/xml_stats_v3.go b/plugins/inputs/bind/xml_stats_v3.go index 89e4ea0b8fcb6..7d36e000b9d95 100644 --- a/plugins/inputs/bind/xml_stats_v3.go +++ b/plugins/inputs/bind/xml_stats_v3.go @@ -140,7 +140,7 @@ func (b *Bind) readStatsXMLv3(addr *url.URL, acc telegraf.Accumulator) error { for _, suffix := range [...]string{"/server", "/net", "/mem"} { scrapeUrl := addr.String() + suffix - resp, err := client.Get(scrapeUrl) + resp, err := b.client.Get(scrapeUrl) if err != nil { return err } From ef6ce2c9d9091493a60e7546613e4ca55401a0c9 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Fri, 4 Dec 2020 12:08:11 -0500 Subject: [PATCH 101/761] fixed network test (#8498) --- plugins/inputs/http_response/http_response.go | 6 +++++- plugins/inputs/http_response/http_response_test.go | 13 +++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index bd3078e490c33..434dccca8d9c6 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -52,7 +52,11 @@ type HTTPResponse struct { Log telegraf.Logger compiledStringMatch *regexp.Regexp - client *http.Client + client httpClient +} + +type httpClient interface { + Do(req *http.Request) (*http.Response, error) } // Description returns the plugin Description diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 3c290c1539c31..7b25b4be57220 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -11,6 +11,7 @@ import ( "net" "net/http" "net/http/httptest" + "net/url" "testing" "time" @@ -905,6 +906,17 @@ func TestBadRegex(t *testing.T) { checkOutput(t, &acc, nil, nil, absentFields, absentTags) } +type fakeClient struct { + statusCode int + err error +} + +func (f *fakeClient) Do(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: f.statusCode, + }, f.err +} + func TestNetworkErrors(t *testing.T) { // DNS error h := &HTTPResponse{ @@ -914,6 +926,7 @@ func TestNetworkErrors(t *testing.T) { Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, FollowRedirects: false, + client: &fakeClient{err: &url.Error{Err: &net.OpError{Err: &net.DNSError{Err: "DNS error"}}}}, } var acc testutil.Accumulator From 7c5754ef8d853e45ea933292ec7391cc5db5bcc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Fri, 4 Dec 2020 18:37:18 +0100 Subject: [PATCH 102/761] Fix carbon2 tests (#8254) --- plugins/serializers/carbon2/carbon2_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/serializers/carbon2/carbon2_test.go b/plugins/serializers/carbon2/carbon2_test.go index 7ed98d6e6d6da..1d6359858dd9e 100644 --- a/plugins/serializers/carbon2/carbon2_test.go +++ b/plugins/serializers/carbon2/carbon2_test.go @@ -228,34 +228,34 @@ func TestSerializeMetricBool(t *testing.T) { testcases := []struct { metric telegraf.Metric - format string + format format expected string }{ { metric: requireMetric(t, now, false), - format: string(Carbon2FormatFieldSeparate), + format: Carbon2FormatFieldSeparate, expected: fmt.Sprintf("metric=cpu field=java_lang_GarbageCollector_Valid tag_name=tag_value 0 %d\n", now.Unix()), }, { metric: requireMetric(t, now, false), - format: string(Carbon2FormatMetricIncludesField), + format: Carbon2FormatMetricIncludesField, expected: fmt.Sprintf("metric=cpu_java_lang_GarbageCollector_Valid tag_name=tag_value 0 %d\n", now.Unix()), }, { metric: requireMetric(t, now, true), - format: string(Carbon2FormatFieldSeparate), + format: Carbon2FormatFieldSeparate, expected: fmt.Sprintf("metric=cpu field=java_lang_GarbageCollector_Valid tag_name=tag_value 1 %d\n", now.Unix()), }, { metric: requireMetric(t, now, true), - format: string(Carbon2FormatMetricIncludesField), + format: Carbon2FormatMetricIncludesField, expected: fmt.Sprintf("metric=cpu_java_lang_GarbageCollector_Valid tag_name=tag_value 1 %d\n", now.Unix()), }, } for _, tc := range testcases { - t.Run(tc.format, func(t *testing.T) { - s, err := NewSerializer(tc.format) + t.Run(string(tc.format), func(t *testing.T) { + s, err := NewSerializer(string(tc.format)) require.NoError(t, err) buf, err := s.Serialize(tc.metric) From 2187baceea458e571560b5a0b9f16ba382137ac4 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Fri, 4 Dec 2020 14:39:00 -0500 Subject: [PATCH 103/761] Add configurable Max TTL duration for statsd input plugin entries (#8509) * Adding max TTL duration for all metric caches in the statsd input plugin * Update README.md was missing type in readme --- plugins/inputs/statsd/README.md | 4 + plugins/inputs/statsd/statsd.go | 121 +++++++++++++++++++-------- plugins/inputs/statsd/statsd_test.go | 69 ++++++++++++++- 3 files changed, 156 insertions(+), 38 deletions(-) diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 57953eed72600..26cbe26289615 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -68,6 +68,9 @@ ## Maximum socket buffer size in bytes, once the buffer fills up, metrics ## will start dropping. Defaults to the OS default. # read_buffer_size = 65535 + + ## Max duration (TTL) for each metric to stay cached/reported without being updated. + # max_ttl = "10h" ``` ### Description @@ -192,6 +195,7 @@ the accuracy of percentiles but also increases the memory usage and cpu time. measurements and tags. - **parse_data_dog_tags** boolean: Enable parsing of tags in DataDog's dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) - **datadog_extensions** boolean: Enable parsing of DataDog's extensions to dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) +- **max_ttl** config.Duration: Max duration (TTL) for each metric to stay cached/reported without being updated. ### Statsd bucket -> InfluxDB line-protocol Templates diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 9c5780d00a596..f74eb0ef4dc38 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -13,6 +13,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers/graphite" @@ -117,6 +118,9 @@ type Statsd struct { TCPKeepAlive bool `toml:"tcp_keep_alive"` TCPKeepAlivePeriod *internal.Duration `toml:"tcp_keep_alive_period"` + // Max duration for each metric to stay cached without being updated. + MaxTTL config.Duration `toml:"max_ttl"` + graphiteParser *graphite.GraphiteParser acc telegraf.Accumulator @@ -131,7 +135,7 @@ type Statsd struct { UDPBytesRecv selfstat.Stat ParseTimeNS selfstat.Stat - Log telegraf.Logger + Log telegraf.Logger `toml:"-"` // A pool of byte slices to handle parsing bufPool sync.Pool @@ -159,27 +163,31 @@ type metric struct { } type cachedset struct { - name string - fields map[string]map[string]bool - tags map[string]string + name string + fields map[string]map[string]bool + tags map[string]string + expiresAt time.Time } type cachedgauge struct { - name string - fields map[string]interface{} - tags map[string]string + name string + fields map[string]interface{} + tags map[string]string + expiresAt time.Time } type cachedcounter struct { - name string - fields map[string]interface{} - tags map[string]string + name string + fields map[string]interface{} + tags map[string]string + expiresAt time.Time } type cachedtimings struct { - name string - fields map[string]RunningStats - tags map[string]string + name string + fields map[string]RunningStats + tags map[string]string + expiresAt time.Time } func (_ *Statsd) Description() string { @@ -243,6 +251,9 @@ const sampleConfig = ` ## calculation of percentiles. Raising this limit increases the accuracy ## of percentiles but also increases the memory usage and cpu time. percentile_limit = 1000 + + ## Max duration (TTL) for each metric to stay cached/reported without being updated. + #max_ttl = "1000h" ` func (_ *Statsd) SampleConfig() string { @@ -306,6 +317,9 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { if s.DeleteSets { s.sets = make(map[string]cachedset) } + + s.expireCachedMetrics() + return nil } @@ -527,9 +541,6 @@ func (s *Statsd) parser() error { // parseStatsdLine will parse the given statsd line, validating it as it goes. // If the line is valid, it will be cached for the next call to Gather() func (s *Statsd) parseStatsdLine(line string) error { - s.Lock() - defer s.Unlock() - lineTags := make(map[string]string) if s.DataDogExtensions { recombinedSegments := make([]string, 0) @@ -734,6 +745,9 @@ func parseKeyValue(keyvalue string) (string, string) { // aggregates and caches the current value(s). It does not deal with the // Delete* options, because those are dealt with in the Gather function. func (s *Statsd) aggregate(m metric) { + s.Lock() + defer s.Unlock() + switch m.mtype { case "ms", "h": // Check if the measurement exists @@ -761,61 +775,67 @@ func (s *Statsd) aggregate(m metric) { field.AddValue(m.floatvalue) } cached.fields[m.field] = field + cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) s.timings[m.hash] = cached case "c": // check if the measurement exists - _, ok := s.counters[m.hash] + cached, ok := s.counters[m.hash] if !ok { - s.counters[m.hash] = cachedcounter{ + cached = cachedcounter{ name: m.name, fields: make(map[string]interface{}), tags: m.tags, } } // check if the field exists - _, ok = s.counters[m.hash].fields[m.field] + _, ok = cached.fields[m.field] if !ok { - s.counters[m.hash].fields[m.field] = int64(0) + cached.fields[m.field] = int64(0) } - s.counters[m.hash].fields[m.field] = - s.counters[m.hash].fields[m.field].(int64) + m.intvalue + cached.fields[m.field] = cached.fields[m.field].(int64) + m.intvalue + cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) + s.counters[m.hash] = cached case "g": // check if the measurement exists - _, ok := s.gauges[m.hash] + cached, ok := s.gauges[m.hash] if !ok { - s.gauges[m.hash] = cachedgauge{ + cached = cachedgauge{ name: m.name, fields: make(map[string]interface{}), tags: m.tags, } } // check if the field exists - _, ok = s.gauges[m.hash].fields[m.field] + _, ok = cached.fields[m.field] if !ok { - s.gauges[m.hash].fields[m.field] = float64(0) + cached.fields[m.field] = float64(0) } if m.additive { - s.gauges[m.hash].fields[m.field] = - s.gauges[m.hash].fields[m.field].(float64) + m.floatvalue + cached.fields[m.field] = cached.fields[m.field].(float64) + m.floatvalue } else { - s.gauges[m.hash].fields[m.field] = m.floatvalue + cached.fields[m.field] = m.floatvalue } + + cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) + s.gauges[m.hash] = cached case "s": // check if the measurement exists - _, ok := s.sets[m.hash] + cached, ok := s.sets[m.hash] if !ok { - s.sets[m.hash] = cachedset{ + cached = cachedset{ name: m.name, fields: make(map[string]map[string]bool), tags: m.tags, } } // check if the field exists - _, ok = s.sets[m.hash].fields[m.field] + _, ok = cached.fields[m.field] if !ok { - s.sets[m.hash].fields[m.field] = make(map[string]bool) + cached.fields[m.field] = make(map[string]bool) } - s.sets[m.hash].fields[m.field][m.strvalue] = true + cached.fields[m.field][m.strvalue] = true + cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) + s.sets[m.hash] = cached } } @@ -932,6 +952,39 @@ func (s *Statsd) isUDP() bool { return strings.HasPrefix(s.Protocol, "udp") } +func (s *Statsd) expireCachedMetrics() { + // If Max TTL wasn't configured, skip expiration. + if s.MaxTTL == 0 { + return + } + + now := time.Now() + + for key, cached := range s.gauges { + if now.After(cached.expiresAt) { + delete(s.gauges, key) + } + } + + for key, cached := range s.sets { + if now.After(cached.expiresAt) { + delete(s.sets, key) + } + } + + for key, cached := range s.timings { + if now.After(cached.expiresAt) { + delete(s.timings, key) + } + } + + for key, cached := range s.counters { + if now.After(cached.expiresAt) { + delete(s.counters, key) + } + } +} + func init() { inputs.Add("statsd", func() telegraf.Input { return &Statsd{ diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index f76681134a094..fd3b49b9203f0 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -2,15 +2,17 @@ package statsd import ( "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "net" "sync" "testing" "time" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" ) @@ -1077,6 +1079,65 @@ func TestParse_MeasurementsWithSameName(t *testing.T) { } } +// Test that the metric caches expire (clear) an entry after the entry hasn't been updated for the configurable MaxTTL duration. +func TestCachesExpireAfterMaxTTL(t *testing.T) { + s := NewTestStatsd() + s.MaxTTL = config.Duration(100 * time.Microsecond) + + acc := &testutil.Accumulator{} + s.parseStatsdLine("valid:45|c") + s.parseStatsdLine("valid:45|c") + require.NoError(t, s.Gather(acc)) + + // Max TTL goes by, our 'valid' entry is cleared. + time.Sleep(100 * time.Microsecond) + require.NoError(t, s.Gather(acc)) + + // Now when we gather, we should have a counter that is reset to zero. + s.parseStatsdLine("valid:45|c") + require.NoError(t, s.Gather(acc)) + + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + testutil.MustMetric( + "valid", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 90, + }, + time.Now(), + telegraf.Counter, + ), + testutil.MustMetric( + "valid", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 90, + }, + time.Now(), + telegraf.Counter, + ), + testutil.MustMetric( + "valid", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 45, + }, + time.Now(), + telegraf.Counter, + ), + }, + acc.GetTelegrafMetrics(), + testutil.IgnoreTime(), + ) +} + // Test that measurements with multiple bits, are treated as different outputs // but are equal to their single-measurement representation func TestParse_MeasurementsWithMultipleValues(t *testing.T) { From f7950be1075849122cde68445c24953d5160d6b8 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Fri, 4 Dec 2020 16:47:58 -0500 Subject: [PATCH 104/761] Adding support for new lines in influx line protocol fields. (#8499) --- plugins/processors/execd/execd.go | 34 ++++++++++++++++++ plugins/processors/execd/execd_test.go | 50 ++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) diff --git a/plugins/processors/execd/execd.go b/plugins/processors/execd/execd.go index 7aeb285a44fc5..3d11bac4969fe 100644 --- a/plugins/processors/execd/execd.go +++ b/plugins/processors/execd/execd.go @@ -12,6 +12,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/process" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/processors" "github.com/influxdata/telegraf/plugins/serializers" ) @@ -117,6 +118,12 @@ func (e *Execd) Stop() error { } func (e *Execd) cmdReadOut(out io.Reader) { + // Prefer using the StreamParser when parsing influx format. + if _, isInfluxParser := e.parser.(*influx.Parser); isInfluxParser { + e.cmdReadOutStream(out) + return + } + scanner := bufio.NewScanner(out) scanBuf := make([]byte, 4096) scanner.Buffer(scanBuf, 262144) @@ -137,6 +144,33 @@ func (e *Execd) cmdReadOut(out io.Reader) { } } +func (e *Execd) cmdReadOutStream(out io.Reader) { + parser := influx.NewStreamParser(out) + + for { + metric, err := parser.Next() + + if err != nil { + // Stop parsing when we've reached the end. + if err == influx.EOF { + break + } + + if parseErr, isParseError := err.(*influx.ParseError); isParseError { + // Continue past parse errors. + e.acc.AddError(parseErr) + continue + } + + // Stop reading on any non-recoverable error. + e.acc.AddError(err) + return + } + + e.acc.AddMetric(metric) + } +} + func (e *Execd) cmdReadErr(out io.Reader) { scanner := bufio.NewScanner(out) diff --git a/plugins/processors/execd/execd_test.go b/plugins/processors/execd/execd_test.go index 451669ec6a130..3cccc9fbb156e 100644 --- a/plugins/processors/execd/execd_test.go +++ b/plugins/processors/execd/execd_test.go @@ -79,6 +79,56 @@ func TestExternalProcessorWorks(t *testing.T) { } } +func TestParseLinesWithNewLines(t *testing.T) { + e := New() + e.Log = testutil.Logger{} + + exe, err := os.Executable() + require.NoError(t, err) + t.Log(exe) + e.Command = []string{exe, "-countmultiplier"} + e.RestartDelay = config.Duration(5 * time.Second) + + acc := &testutil.Accumulator{} + + require.NoError(t, e.Start(acc)) + + now := time.Now() + orig := now + + m, err := metric.New("test", + map[string]string{ + "author": "Mr. Gopher", + }, + map[string]interface{}{ + "phrase": "Gophers are amazing creatures.\nAbsolutely amazing.", + "count": 3, + }, + now) + + require.NoError(t, err) + + e.Add(m, acc) + + acc.Wait(1) + require.NoError(t, e.Stop()) + + processedMetric := acc.GetTelegrafMetrics()[0] + + expectedMetric := testutil.MustMetric("test", + map[string]string{ + "author": "Mr. Gopher", + }, + map[string]interface{}{ + "phrase": "Gophers are amazing creatures.\nAbsolutely amazing.", + "count": 6, + }, + orig, + ) + + testutil.RequireMetricEqual(t, expectedMetric, processedMetric) +} + var countmultiplier = flag.Bool("countmultiplier", false, "if true, act like line input program instead of test") From 73986acc878e095ea1f1f51877bfab5da4573cf0 Mon Sep 17 00:00:00 2001 From: reimda Date: Fri, 4 Dec 2020 16:53:57 -0700 Subject: [PATCH 105/761] Update circleci mac golang version (#8516) --- .circleci/config.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9e5b1aaaefa87..2b55149d0abc2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,7 +12,7 @@ defaults: - image: 'quay.io/influxdb/telegraf-ci:1.15.5' mac: &mac macos: - xcode: 11.3.1 + xcode: 12.1.0 working_directory: '~/go/src/github.com/influxdata/telegraf' environment: HOMEBREW_NO_AUTO_UPDATE: 1 @@ -42,13 +42,13 @@ jobs: steps: - checkout - restore_cache: - key: mac-go-mod-v1-{{ checksum "go.sum" }} + key: mac-go-mod-v2-{{ checksum "go.sum" }} - run: 'brew install go' # latest - run: 'make deps' - run: 'make tidy' - save_cache: name: 'go module cache' - key: mac-go-mod-v1-{{ checksum "go.sum" }} + key: mac-go-mod-v2-{{ checksum "go.sum" }} paths: - '~/go/pkg/mod' - '/usr/local/Cellar/go' From 97469f6d85b69ddff50d3ffd0734151f527f5642 Mon Sep 17 00:00:00 2001 From: Joe Wang Date: Mon, 7 Dec 2020 10:59:32 -0800 Subject: [PATCH 106/761] Update kube_inventory readme (#8510) --- plugins/inputs/kube_inventory/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index dbed6d6f01edb..d65bfcc38f05d 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -229,9 +229,9 @@ subjects: - state_code - state_reason - terminated_reason (string, deprecated in 1.15: use `state_reason` instead) - - resource_requests_cpu_units + - resource_requests_millicpu_units - resource_requests_memory_bytes - - resource_limits_cpu_units + - resource_limits_millicpu_units - resource_limits_memory_bytes - kubernetes_service From 139498937a97c258581bf5efe1c2925d348f828d Mon Sep 17 00:00:00 2001 From: Wiard van Rij <5786097+wiardvanrij@users.noreply.github.com> Date: Mon, 7 Dec 2020 22:45:06 +0100 Subject: [PATCH 107/761] Add support to convert snmp hex strings to integers (#8426) --- plugins/inputs/snmp/README.md | 24 ++++++++++------ plugins/inputs/snmp/snmp.go | 47 ++++++++++++++++++++++++++++---- plugins/inputs/snmp/snmp_test.go | 6 ++++ 3 files changed, 62 insertions(+), 15 deletions(-) diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index ea6e7a95bbfa4..0eb0ac31a0c97 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -113,15 +113,21 @@ option operate similar to the `snmpget` utility. # is_tag = false ## Apply one of the following conversions to the variable value: - ## float(X) Convert the input value into a float and divides by the - ## Xth power of 10. Effectively just moves the decimal left - ## X places. For example a value of `123` with `float(2)` - ## will result in `1.23`. - ## float: Convert the value into a float with no adjustment. Same - ## as `float(0)`. - ## int: Convert the value into an integer. - ## hwaddr: Convert the value to a MAC address. - ## ipaddr: Convert the value to an IP address. + ## float(X): Convert the input value into a float and divides by the + ## Xth power of 10. Effectively just moves the decimal left + ## X places. For example a value of `123` with `float(2)` + ## will result in `1.23`. + ## float: Convert the value into a float with no adjustment. Same + ## as `float(0)`. + ## int: Convert the value into an integer. + ## hwaddr: Convert the value to a MAC address. + ## ipaddr: Convert the value to an IP address. + ## hextoint:X:Y Convert a hex string value to integer. Where X is the Endian + ## and Y the bit size. For example: hextoint:LittleEndian:uint64 + ## or hextoint:BigEndian:uint32. Valid options for the Endian are: + ## BigEndian and LittleEndian. For the bit size: uint16, uint32 + ## and uint64. + ## # conversion = "" ``` diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 623c9ba61ce23..f8fa500043e9b 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -3,6 +3,7 @@ package snmp import ( "bufio" "bytes" + "encoding/binary" "fmt" "log" "math" @@ -574,12 +575,6 @@ func (s *Snmp) getConnection(idx int) (snmpConnection, error) { } // fieldConvert converts from any type according to the conv specification -// "float"/"float(0)" will convert the value into a float. -// "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit. -// "int" will convert the value into an integer. -// "hwaddr" will convert the value into a MAC address. -// "ipaddr" will convert the value into into an IP address. -// "" will convert a byte slice into a string. func fieldConvert(conv string, v interface{}) (interface{}, error) { if conv == "" { if bs, ok := v.([]byte); ok { @@ -671,6 +666,46 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { return v, nil } + split := strings.Split(conv, ":") + if split[0] == "hextoint" && len(split) == 3 { + + endian := split[1] + bit := split[2] + + bv, ok := v.([]byte) + if !ok { + return v, nil + } + + if endian == "LittleEndian" { + switch bit { + case "uint64": + v = binary.LittleEndian.Uint64(bv) + case "uint32": + v = binary.LittleEndian.Uint32(bv) + case "uint16": + v = binary.LittleEndian.Uint16(bv) + default: + return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) + } + } else if endian == "BigEndian" { + switch bit { + case "uint64": + v = binary.BigEndian.Uint64(bv) + case "uint32": + v = binary.BigEndian.Uint32(bv) + case "uint16": + v = binary.BigEndian.Uint16(bv) + default: + return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) + } + } else { + return nil, fmt.Errorf("invalid Endian value (%s) for hex to int conversion", endian) + } + + return v, nil + } + if conv == "ipaddr" { var ipbs []byte diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 8368ed7385de7..199fbe83c156f 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -739,6 +739,12 @@ func TestFieldConvert(t *testing.T) { {[]byte("abcd"), "ipaddr", "97.98.99.100"}, {"abcd", "ipaddr", "97.98.99.100"}, {[]byte("abcdefghijklmnop"), "ipaddr", "6162:6364:6566:6768:696a:6b6c:6d6e:6f70"}, + {[]byte{0x00, 0x09, 0x3E, 0xE3, 0xF6, 0xD5, 0x3B, 0x60}, "hextoint:BigEndian:uint64", uint64(2602423610063712)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3}, "hextoint:BigEndian:uint32", uint32(605923)}, + {[]byte{0x00, 0x09}, "hextoint:BigEndian:uint16", uint16(9)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3, 0xF6, 0xD5, 0x3B, 0x60}, "hextoint:LittleEndian:uint64", uint64(6934371307618175232)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3}, "hextoint:LittleEndian:uint32", uint32(3812493568)}, + {[]byte{0x00, 0x09}, "hextoint:LittleEndian:uint16", uint16(2304)}, } for _, tc := range testTable { From d712bd13d6c86d03252c2435ade4c8a52a13fc56 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Mon, 7 Dec 2020 23:43:15 +0100 Subject: [PATCH 108/761] Add initialization example to mock-plugin. (#8520) --- plugins/inputs/mock_Plugin.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/plugins/inputs/mock_Plugin.go b/plugins/inputs/mock_Plugin.go index 4dec121bc7b6f..7270954dc5f8d 100644 --- a/plugins/inputs/mock_Plugin.go +++ b/plugins/inputs/mock_Plugin.go @@ -9,6 +9,8 @@ import ( // MockPlugin struct should be named the same as the Plugin type MockPlugin struct { mock.Mock + + constructedVariable string } // Description will appear directly above the plugin definition in the config file @@ -21,6 +23,12 @@ func (m *MockPlugin) SampleConfig() string { return ` sampleVar = 'foo'` } +// Init can be implemented to do one-time processing stuff like initializing variables +func (m *MockPlugin) Init() error { + m.constructedVariable = "I'm initialized now." + return nil +} + // Gather defines what data the plugin will gather. func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error { ret := m.Called(_a0) From e687cd7a5f8ae2684b152c041ad45b107fe6cac3 Mon Sep 17 00:00:00 2001 From: Avinash Nigam <56562150+avinash-nigam@users.noreply.github.com> Date: Tue, 8 Dec 2020 12:40:11 -0800 Subject: [PATCH 109/761] SQL Server HA/DR Availability Group queries (#8379) --- etc/telegraf.conf | 94 ++++++++++---------- plugins/inputs/sqlserver/README.md | 70 ++++++++++----- plugins/inputs/sqlserver/sqlserver.go | 90 +++++++++---------- plugins/inputs/sqlserver/sqlserverqueries.go | 94 ++++++++++++++++++++ 4 files changed, 233 insertions(+), 115 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index f67ddfbf19dcd..50224cb008dd0 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -5132,62 +5132,58 @@ # ## See https://github.com/denisenkom/go-mssqldb for detailed connection # ## parameters, in particular, tls connections can be created like so: # ## "encrypt=true;certificate=;hostNameInCertificate=" -# # servers = [ -# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", -# # ] -# -# ## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 -# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -# ## Possible values for database_type are -# ## "AzureSQLDB" -# ## "SQLServer" -# ## "AzureSQLManagedInstance" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] + +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" + +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers + # # database_type = "AzureSQLDB" -# -# + +# ## A list of queries to include. If not specified, all the above listed queries are used. +# # include_query = [] + +# ## A list of queries to explicitly ignore. +# # exclude_query = [] + +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers + +# # database_type = "AzureSQLManagedInstance" + +# # include_query = [] + +# # exclude_query = [] + +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu + +# database_type = "SQLServer" + +# include_query = [] + +# ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] + +# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries + # ## Optional parameter, setting this to 2 will use a new version # ## of the collection queries that break compatibility with the original # ## dashboards. # ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB -# query_version = 2 -# +# # query_version = 2 + # ## If you are using AzureDB, setting this to true will gather resource utilization metrics # # azuredb = false -# -# ## Possible queries -# ## Version 2: -# ## - PerformanceCounters -# ## - WaitStatsCategorized -# ## - DatabaseIO -# ## - ServerProperties -# ## - MemoryClerk -# ## - Schedulers -# ## - SqlRequests -# ## - VolumeSpace -# ## - Cpu -# -# ## Version 1: -# ## - PerformanceCounters -# ## - WaitStatsCategorized -# ## - CPUHistory -# ## - DatabaseIO -# ## - DatabaseSize -# ## - DatabaseStats -# ## - DatabaseProperties -# ## - MemoryClerk -# ## - VolumeSpace -# ## - PerformanceMetrics -# -# -# ## Queries enabled by default for specific Database Type -# ## database_type = AzureSQLDB -# ## AzureDBWaitStats, AzureDBResourceStats, AzureDBResourceGovernance, sqlAzureDBDatabaseIO -# -# ## A list of queries to include. If not specified, all the above listed queries are used. -# # include_query = [] -# -# ## A list of queries to explicitly ignore. -# exclude_query = [ 'Schedulers' , 'SqlRequests'] # # Gather timeseries from Google Cloud Platform v3 monitoring API diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index be5b98aa8d2b6..db15c4af755a6 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -48,22 +48,54 @@ GO ## See https://github.com/denisenkom/go-mssqldb for detailed connection ## parameters, in particular, tls connections can be created like so: ## "encrypt=true;certificate=;hostNameInCertificate=" - # servers = [ - # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", - # ] - - ## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 - ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. - ## Possible values for database_type are - ## "AzureSQLDB" - ## "SQLServer" - ## "AzureSQLManagedInstance" + servers = [ + "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", + ] + + ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 + ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. + ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" + + ## Queries enabled by default for database_type = "AzureSQLDB" are - + ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, + ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers + # database_type = "AzureSQLDB" + ## A list of queries to include. If not specified, all the above listed queries are used. + # include_query = [] + + ## A list of queries to explicitly ignore. + # exclude_query = [] + + ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - + ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, + ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers + + # database_type = "AzureSQLManagedInstance" + + # include_query = [] + + # exclude_query = [] + + ## Queries enabled by default for database_type = "SQLServer" are - + ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, + ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu + + database_type = "SQLServer" + + include_query = [] + + ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default + exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] + + ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use + ## the new mechanism of identifying the database_type there by use it's corresponding queries + ## Optional parameter, setting this to 2 will use a new version - ## of the collection queries that break compatibility with the original dashboards. - ## Version 2 - is compatible from SQL Server 2008 Sp3 and later versions and also for SQL Azure DB - ## Version 2 is in the process of being deprecated, please consider using database_type. + ## of the collection queries that break compatibility with the original + ## dashboards. + ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB # query_version = 2 ## If you are using AzureDB, setting this to true will gather resource utilization metrics @@ -105,6 +137,9 @@ GO ## - SQLServerRequests ## - SQLServerVolumeSpace ## - SQLServerCpu + ## and following as optional (if mentioned in the include_query list) + ## - SQLServerAvailabilityReplicaStates + ## - SQLServerDatabaseReplicaStates ## Version 2 by default collects the following queries ## Version 2 is being deprecated, please consider using database_type. @@ -133,13 +168,6 @@ GO - ## A list of queries to include. If not specified, all the above listed queries are used. - # include_query = [] - - ## A list of queries to explicitly ignore. - exclude_query = [ 'Schedulers' , 'SqlRequests' ] - - ``` @@ -240,6 +268,8 @@ These are metrics for Azure SQL Managed instance, are very similar to version 2 blocking sessions. - SQLServerVolumeSpace - uses `sys.dm_os_volume_stats` to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. - SQLServerCpu - uses the buffer ring (`sys.dm_os_ring_buffers`) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). +- SQLServerAvailabilityReplicaStates: Collects availability replica state information from `sys.dm_hadr_availability_replica_states` for a High Availability / Disaster Recovery (HADR) setup +- SQLServerDatabaseReplicaStates: Collects database replica state information from `sys.dm_hadr_database_replica_states` for a High Availability / Disaster Recovery (HADR) setup #### Output Measures diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 4b0bd5e3f199c..3baa5ed6aafbb 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -46,62 +46,58 @@ const sampleConfig = ` ## See https://github.com/denisenkom/go-mssqldb for detailed connection ## parameters, in particular, tls connections can be created like so: ## "encrypt=true;certificate=;hostNameInCertificate=" -# servers = [ -# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", -# ] - -## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 -## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -## Possible values for database_type are -## "AzureSQLDB" -## "SQLServer" -## "AzureSQLManagedInstance" +servers = [ + "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +] + +## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" + +## Queries enabled by default for database_type = "AzureSQLDB" are - +## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers + # database_type = "AzureSQLDB" +## A list of queries to include. If not specified, all the above listed queries are used. +# include_query = [] + +## A list of queries to explicitly ignore. +# exclude_query = [] + +## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers + +# database_type = "AzureSQLManagedInstance" + +# include_query = [] + +# exclude_query = [] + +## Queries enabled by default for database_type = "SQLServer" are - +## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu + +database_type = "SQLServer" + +include_query = [] + +## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default +exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] + +## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +## the new mechanism of identifying the database_type there by use it's corresponding queries ## Optional parameter, setting this to 2 will use a new version ## of the collection queries that break compatibility with the original ## dashboards. ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB -query_version = 2 +# query_version = 2 ## If you are using AzureDB, setting this to true will gather resource utilization metrics # azuredb = false - -## Possible queries -## Version 2: -## - PerformanceCounters -## - WaitStatsCategorized -## - DatabaseIO -## - ServerProperties -## - MemoryClerk -## - Schedulers -## - SqlRequests -## - VolumeSpace -## - Cpu - -## Version 1: -## - PerformanceCounters -## - WaitStatsCategorized -## - CPUHistory -## - DatabaseIO -## - DatabaseSize -## - DatabaseStats -## - DatabaseProperties -## - MemoryClerk -## - VolumeSpace -## - PerformanceMetrics - - -## Queries enabled by default for specific Database Type -## database_type = AzureSQLDB - ## AzureDBWaitStats, AzureDBResourceStats, AzureDBResourceGovernance, sqlAzureDBDatabaseIO - -## A list of queries to include. If not specified, all the above listed queries are used. -# include_query = [] - -## A list of queries to explicitly ignore. -exclude_query = [ 'Schedulers' , 'SqlRequests'] ` // SampleConfig return the sample configuration @@ -159,6 +155,8 @@ func initQueries(s *SQLServer) error { queries["SQLServerRequests"] = Query{ScriptName: "SQLServerRequests", Script: sqlServerRequests, ResultByRow: false} queries["SQLServerVolumeSpace"] = Query{ScriptName: "SQLServerVolumeSpace", Script: sqlServerVolumeSpace, ResultByRow: false} queries["SQLServerCpu"] = Query{ScriptName: "SQLServerCpu", Script: sqlServerRingBufferCpu, ResultByRow: false} + queries["SQLServerAvailabilityReplicaStates"] = Query{ScriptName: "SQLServerAvailabilityReplicaStates", Script: sqlServerAvailabilityReplicaStates, ResultByRow: false} + queries["SQLServerDatabaseReplicaStates"] = Query{ScriptName: "SQLServerDatabaseReplicaStates", Script: sqlServerDatabaseReplicaStates, ResultByRow: false} } else { // If this is an AzureDB instance, grab some extra metrics if s.AzureDB { diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index 3fdbd5d34ab69..2af8e1eb775cf 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -1150,3 +1150,97 @@ FROM ( ORDER BY [record_id] DESC ) AS z ` + +const sqlServerAvailabilityReplicaStates string = ` +IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard,Enterprise or Express. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +IF SERVERPROPERTY('IsHadrEnabled') = 1 BEGIN + SELECT + 'sqlserver_hadr_replica_states' AS [measurement], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + convert(nvarchar(36), hars.replica_id) as replica_id, + ar.replica_server_name, + convert(nvarchar(36), hars.group_id) as group_id, + ag.name AS group_name, + ag.basic_features, + ag.is_distributed, + hags.synchronization_health_desc AS ag_synchronization_health_desc, + ar.replica_metadata_id, + ar.availability_mode, + ar.availability_mode_desc, + ar.failover_mode, + ar.failover_mode_desc, + ar.session_timeout, + ar.primary_role_allow_connections, + ar.primary_role_allow_connections_desc, + ar.secondary_role_allow_connections, + ar.secondary_role_allow_connections_desc, + ar.seeding_mode, + ar.seeding_mode_desc, + hars.is_local, + hars.role, + hars.role_desc, + hars.operational_state, + hars.operational_state_desc, + hars.connected_state, + hars.connected_state_desc, + hars.recovery_health, + hars.recovery_health_desc, + hars.synchronization_health AS replica_synchronization_health, + hars.synchronization_health_desc AS replica_synchronization_health_desc, + hars.last_connect_error_number, + hars.last_connect_error_description, + hars.last_connect_error_timestamp + from sys.dm_hadr_availability_replica_states AS hars + inner join sys.availability_replicas AS ar on hars.replica_id = ar.replica_id + inner join sys.availability_groups AS ag on ar.group_id = ag.group_id + inner join sys.dm_hadr_availability_group_states AS hags ON hags.group_id = ag.group_id +END +` + +const sqlServerDatabaseReplicaStates string = ` +IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard,Enterprise or Express. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +IF SERVERPROPERTY('IsHadrEnabled') = 1 BEGIN + SELECT + 'sqlserver_hadr_dbreplica_states' AS [measurement], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + database_id, + db_name(database_id) as database_name, + convert(nvarchar(36), drs.replica_id) as replica_id, + ar.replica_server_name, + convert(nvarchar(36), drs.group_database_id) as group_database_id, + is_primary_replica, + synchronization_state, + synchronization_state_desc, + is_commit_participant, + synchronization_health, + synchronization_health_desc, + database_state, + database_state_desc, + is_suspended, + suspend_reason, + suspend_reason_desc, + last_sent_time, + last_received_time, + last_hardened_time, + last_redone_time, + log_send_queue_size, + log_send_rate, + redo_queue_size, + redo_rate, + filestream_send_rate, + last_commit_time, + secondary_lag_seconds + from sys.dm_hadr_database_replica_states AS drs + inner join sys.availability_replicas AS ar on drs.replica_id = ar.replica_id +END +` From 752a4b77e1a43956c79564aa8fe1e2d98ed2a267 Mon Sep 17 00:00:00 2001 From: bhsu-ms <72472578+bhsu-ms@users.noreply.github.com> Date: Tue, 8 Dec 2020 12:42:31 -0800 Subject: [PATCH 110/761] Added tags for monitoring readable secondaries for Azure SQL MI (#8369) --- plugins/inputs/sqlserver/azuresqlqueries.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go index 3504658c99f20..2358a12c39614 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqlqueries.go @@ -694,6 +694,7 @@ SELECT TOP 1 ,[db_recovering] ,[db_recoveryPending] ,[db_suspect] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.server_resource_stats CROSS APPLY ( SELECT @@ -720,6 +721,7 @@ SELECT TOP(1) 'sqlserver_azure_db_resource_stats' AS [measurement] ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] ,cast([avg_cpu_percent] as float) as [avg_cpu_percent] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.server_resource_stats ORDER BY @@ -747,6 +749,7 @@ SELECT ,[volume_type_managed_xstore_iops] as [voltype_man_xtore_iops] ,[volume_type_external_xstore_iops] as [voltype_ext_xtore_iops] ,[volume_external_xstore_iops] as [vol_ext_xtore_iops] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_instance_resource_governance; ` @@ -772,6 +775,7 @@ SELECT ,vfs.[num_of_bytes_written] AS [write_bytes] ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs LEFT OUTER JOIN sys.master_files AS mf WITH (NOLOCK) ON vfs.[database_id] = mf.[database_id] @@ -792,6 +796,7 @@ SELECT ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] ,mc.[type] AS [clerk_type] ,SUM(mc.[pages_kb]) AS [size_kb] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) GROUP BY mc.[type] @@ -874,6 +879,7 @@ SELECT 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' ELSE 'Other' END as [wait_category] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) WHERE ws.[wait_type] NOT IN ( @@ -1074,6 +1080,7 @@ SELECT END AS [instance] ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability from @PCounters pc LEFT OUTER JOIN @PCounters AS pc1 ON ( @@ -1143,6 +1150,7 @@ SELECT ,CONVERT(varchar(20),[query_hash],1) as [query_hash] ,CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash] ,DB_NAME(COALESCE(r.[database_id], s.[database_id])) AS [session_db_name] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_exec_sessions AS s LEFT OUTER JOIN sys.dm_exec_requests AS r ON s.[session_id] = r.[session_id] @@ -1185,5 +1193,6 @@ SELECT ,s.[yield_count] ,s.[total_cpu_usage_ms] ,s.[total_scheduler_delay_ms] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_os_schedulers AS s ` From 97de3981bd0c288dad19e9c08f07f81ead07b03b Mon Sep 17 00:00:00 2001 From: Frank Riley Date: Wed, 9 Dec 2020 10:54:51 -0700 Subject: [PATCH 111/761] Add percentiles to the ping plugin (#7345) --- plugins/inputs/ping/README.md | 12 +++-- plugins/inputs/ping/ping.go | 77 +++++++++++++++++++++++++++----- plugins/inputs/ping/ping_test.go | 4 ++ 3 files changed, 79 insertions(+), 14 deletions(-) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 91af1b2ae33ed..83a91a2eeb96d 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -57,6 +57,9 @@ native Go by the Telegraf process, eliminating the need to execute the system ## option of the ping command. # interface = "" + ## Percentiles to calculate. This only works with the native method. + # percentiles = [50, 95, 99] + ## Specify the ping executable binary. # binary = "ping" @@ -147,10 +150,11 @@ sockets and the `ping_group_range` setting. - packets_received (integer) - percent_packet_loss (float) - ttl (integer, Not available on Windows) - - average_response_ms (integer) - - minimum_response_ms (integer) - - maximum_response_ms (integer) - - standard_deviation_ms (integer, Available on Windows only with native ping) + - average_response_ms (float) + - minimum_response_ms (float) + - maximum_response_ms (float) + - standard_deviation_ms (float, Available on Windows only with method = "native") + - percentile\_ms (float, Where `` is the percentile specified in `percentiles`. Available with method = "native" only) - errors (float, Windows only) - reply_received (integer, Windows with method = "exec" only) - percent_reply_loss (float, Windows with method = "exec" only) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 008cfceacc5b9..da9ab8698e83b 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -3,11 +3,13 @@ package ping import ( "context" "errors" + "fmt" "log" "math" "net" "os/exec" "runtime" + "sort" "strings" "sync" "time" @@ -69,6 +71,9 @@ type Ping struct { // listenAddr is the address associated with the interface defined. listenAddr string + + // Calculate the given percentiles when using native method + Percentiles []int } func (*Ping) Description() string { @@ -108,6 +113,9 @@ const sampleConfig = ` ## option of the ping command. # interface = "" + ## Percentiles to calculate. This only works with the native method. + # percentiles = [50, 95, 99] + ## Specify the ping executable binary. # binary = "ping" @@ -345,11 +353,41 @@ finish: log.Printf("D! [inputs.ping] %s", doErr.Error()) } - tags, fields := onFin(packetsSent, rsps, doErr, destination) + tags, fields := onFin(packetsSent, rsps, doErr, destination, p.Percentiles) acc.AddFields("ping", fields, tags) } -func onFin(packetsSent int, resps []*ping.Response, err error, destination string) (map[string]string, map[string]interface{}) { +type durationSlice []time.Duration + +func (p durationSlice) Len() int { return len(p) } +func (p durationSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p durationSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// R7 from Hyndman and Fan (1996), which matches Excel +func percentile(values durationSlice, perc int) time.Duration { + if perc < 0 { + perc = 0 + } + if perc > 100 { + perc = 100 + } + var percFloat = float64(perc) / 100.0 + + var count = len(values) + var rank = percFloat * float64(count-1) + var rankInteger = int(rank) + var rankFraction = rank - math.Floor(rank) + + if rankInteger >= count-1 { + return values[count-1] + } else { + upper := values[rankInteger+1] + lower := values[rankInteger] + return lower + time.Duration(rankFraction*float64(upper-lower)) + } +} + +func onFin(packetsSent int, resps []*ping.Response, err error, destination string, percentiles []int) (map[string]string, map[string]interface{}) { packetsRcvd := len(resps) tags := map[string]string{"url": destination} @@ -378,17 +416,35 @@ func onFin(packetsSent int, resps []*ping.Response, err error, destination strin ttl := resps[0].TTL var min, max, avg, total time.Duration - min = resps[0].RTT - max = resps[0].RTT - for _, res := range resps { - if res.RTT < min { - min = res.RTT + if len(percentiles) > 0 { + var rtt []time.Duration + for _, resp := range resps { + rtt = append(rtt, resp.RTT) + total += resp.RTT + } + sort.Sort(durationSlice(rtt)) + min = rtt[0] + max = rtt[len(rtt)-1] + + for _, perc := range percentiles { + var value = percentile(durationSlice(rtt), perc) + var field = fmt.Sprintf("percentile%v_ms", perc) + fields[field] = float64(value.Nanoseconds()) / float64(time.Millisecond) } - if res.RTT > max { - max = res.RTT + } else { + min = resps[0].RTT + max = resps[0].RTT + + for _, res := range resps { + if res.RTT < min { + min = res.RTT + } + if res.RTT > max { + max = res.RTT + } + total += res.RTT } - total += res.RTT } avg = total / time.Duration(packetsRcvd) @@ -433,6 +489,7 @@ func init() { Method: "exec", Binary: "ping", Arguments: []string{}, + Percentiles: []int{}, } }) } diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 0c8cfb0939daa..e3d725de33253 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -413,11 +413,15 @@ func TestPingGatherNative(t *testing.T) { Method: "native", Count: 5, resolveHost: mockHostResolver, + Percentiles: []int{50, 95, 99}, } assert.NoError(t, acc.GatherError(p.Gather)) assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) + assert.True(t, acc.HasField("ping", "percentile50_ms")) + assert.True(t, acc.HasField("ping", "percentile95_ms")) + assert.True(t, acc.HasField("ping", "percentile99_ms")) } func mockHostResolverError(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) { From d3031a467dd2674080de1af4dc4d09dd0c808cfb Mon Sep 17 00:00:00 2001 From: alespour <42931850+alespour@users.noreply.github.com> Date: Thu, 10 Dec 2020 18:02:07 +0100 Subject: [PATCH 112/761] feat: add build number field to jenkins_job measurement (#8038) --- plugins/inputs/jenkins/README.md | 1 + plugins/inputs/jenkins/jenkins.go | 3 +++ plugins/inputs/jenkins/jenkins_test.go | 14 ++++++++++++++ 3 files changed, 18 insertions(+) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index f4e9f94ac22a7..dc9889fe628fc 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -88,6 +88,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API - port - fields: - duration (ms) + - number - result_code (0 = SUCCESS, 1 = FAILURE, 2 = NOT_BUILD, 3 = UNSTABLE, 4 = ABORTED) ### Sample Queries: diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index a2d3e3500bc30..46637836b2cb2 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -419,6 +419,7 @@ type jobBuild struct { type buildResponse struct { Building bool `json:"building"` Duration int64 `json:"duration"` + Number int64 `json:"number"` Result string `json:"result"` Timestamp int64 `json:"timestamp"` } @@ -436,6 +437,7 @@ type jobRequest struct { name string parents []string layer int + number int64 } func (jr jobRequest) combined() []string { @@ -473,6 +475,7 @@ func (j *Jenkins) gatherJobBuild(jr jobRequest, b *buildResponse, acc telegraf.A fields := make(map[string]interface{}) fields["duration"] = b.Duration fields["result_code"] = mapResultCode(b.Result) + fields["number"] = b.Number acc.AddFields(measurementJob, fields, tags, b.GetTimestamp()) } diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index be899476d8595..f09f5f9a936bf 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -530,12 +530,14 @@ func TestGatherJobs(t *testing.T) { Building: false, Result: "SUCCESS", Duration: 25558, + Number: 3, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, "/job/job2/1/api/json": &buildResponse{ Building: false, Result: "FAILURE", Duration: 1558, + Number: 1, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, }, @@ -549,6 +551,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(25558), + "number": int64(3), "result_code": 0, }, }, @@ -559,6 +562,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(1558), + "number": int64(1), "result_code": 1, }, }, @@ -583,6 +587,7 @@ func TestGatherJobs(t *testing.T) { Building: false, Result: "SUCCESS", Duration: 25558, + Number: 3, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, }, @@ -596,6 +601,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(25558), + "number": int64(3), "result_code": 0, }, }, @@ -711,24 +717,28 @@ func TestGatherJobs(t *testing.T) { Building: false, Result: "FAILURE", Duration: 1558, + Number: 1, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, "/job/apps/job/k8s-cloud/job/PR-101/4/api/json": &buildResponse{ Building: false, Result: "SUCCESS", Duration: 76558, + Number: 4, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, "/job/apps/job/k8s-cloud/job/PR-100/1/api/json": &buildResponse{ Building: false, Result: "SUCCESS", Duration: 91558, + Number: 1, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, "/job/apps/job/k8s-cloud/job/PR%201/1/api/json": &buildResponse{ Building: false, Result: "SUCCESS", Duration: 87832, + Number: 1, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, }, @@ -743,6 +753,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(87832), + "number": int64(1), "result_code": 0, }, }, @@ -754,6 +765,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(91558), + "number": int64(1), "result_code": 0, }, }, @@ -765,6 +777,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(76558), + "number": int64(4), "result_code": 0, }, }, @@ -776,6 +789,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(1558), + "number": int64(1), "result_code": 1, }, }, From 6062265f94ff65c6239e365454ae30db645b3271 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Thu, 10 Dec 2020 12:02:36 -0500 Subject: [PATCH 113/761] Fixed misspelled check for datacenter (#8505) --- plugins/inputs/vsphere/endpoint.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 49c875d93268b..9c1837713977b 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -439,7 +439,7 @@ func (e *Endpoint) discover(ctx context.Context) error { } // Fill in datacenter names where available (no need to do it for Datacenters) - if res.name != "Datacenter" { + if res.name != "datacenter" { for k, obj := range objects { if obj.parentRef != nil { obj.dcname, _ = e.getDatacenterName(ctx, client, dcNameCache, *obj.parentRef) From a96c8b49e4f7050bcf17e1948da47dee42ca5b91 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 10 Dec 2020 13:20:37 -0500 Subject: [PATCH 114/761] disable flakey grok test for now --- plugins/parsers/grok/parser_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 5aaa0c967c1ce..c918969d6fc60 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -1117,14 +1117,15 @@ func TestTrimRegression(t *testing.T) { } func TestAdvanceFieldName(t *testing.T) { + t.Skip("waiting for grok package fix") p := &Parser{ Patterns: []string{`rts=%{NUMBER:response-time.s} local=%{IP:local-ip} remote=%{IP:remote.ip}`}, } assert.NoError(t, p.Compile()) metricA, err := p.ParseLine(`rts=1.283 local=127.0.0.1 remote=10.0.0.1`) - require.NotNil(t, metricA) assert.NoError(t, err) + require.NotNil(t, metricA) assert.Equal(t, map[string]interface{}{ "response-time.s": "1.283", From d79a2464d359e414185ec7e360e9f1a9030e5a64 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 10 Dec 2020 13:21:58 -0500 Subject: [PATCH 115/761] docs update --- docker-compose.yml | 2 +- docs/AGGREGATORS_AND_PROCESSORS.md | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 4e94b8f012eab..092a7b9144c3e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -53,7 +53,7 @@ services: - "6432:6432" postgres: image: postgres:alpine - environment: + environment: - POSTGRES_HOST_AUTH_METHOD=trust ports: - "5432:5432" diff --git a/docs/AGGREGATORS_AND_PROCESSORS.md b/docs/AGGREGATORS_AND_PROCESSORS.md index 7be34aed5cef4..934a4b0cf7706 100644 --- a/docs/AGGREGATORS_AND_PROCESSORS.md +++ b/docs/AGGREGATORS_AND_PROCESSORS.md @@ -17,8 +17,8 @@ metrics as they pass through Telegraf: │ Memory │───┤ ┌──▶│ InfluxDB │ │ │ │ │ │ │ └───────────┘ │ ┌─────────────┐ ┌─────────────┐ │ └───────────┘ - │ │ │ │Aggregate │ │ -┌───────────┐ │ │Process │ │ - mean │ │ ┌───────────┐ + │ │ │ │Aggregators │ │ +┌───────────┐ │ │Processors │ │ - mean │ │ ┌───────────┐ │ │ │ │ - transform │ │ - quantiles │ │ │ │ │ MySQL │───┼───▶│ - decorate │────▶│ - min/max │───┼──▶│ File │ │ │ │ │ - filter │ │ - count │ │ │ │ @@ -62,6 +62,6 @@ emit the aggregates and not the original metrics. Since aggregates are created for each measurement, field, and unique tag combination the plugin receives, you can make use of `taginclude` to group -aggregates by specific tags only. +aggregates by specific tags only. **Note:** Aggregator plugins only aggregate metrics within their periods (`now() - period`). Data with a timestamp earlier than `now() - period` cannot be included. From 99287d89e07816bd4f0b60d83edeca4d034961b9 Mon Sep 17 00:00:00 2001 From: Joe Wang Date: Thu, 10 Dec 2020 11:38:01 -0800 Subject: [PATCH 116/761] Update string parsing of allocatable cpu cores in kube_inventory (#8512) --- plugins/inputs/kube_inventory/README.md | 2 ++ plugins/inputs/kube_inventory/node.go | 6 ++++-- plugins/inputs/kube_inventory/node_test.go | 16 +++++++++------- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index d65bfcc38f05d..276a90110bc8f 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -191,9 +191,11 @@ subjects: - node_name - fields: - capacity_cpu_cores + - capacity_millicpu_cores - capacity_memory_bytes - capacity_pods - allocatable_cpu_cores + - allocatable_millicpu_cores - allocatable_memory_bytes - allocatable_pods diff --git a/plugins/inputs/kube_inventory/node.go b/plugins/inputs/kube_inventory/node.go index cccf6897f8aa3..cb123c458c592 100644 --- a/plugins/inputs/kube_inventory/node.go +++ b/plugins/inputs/kube_inventory/node.go @@ -31,7 +31,8 @@ func (ki *KubernetesInventory) gatherNode(n v1.Node, acc telegraf.Accumulator) e for resourceName, val := range n.Status.Capacity { switch resourceName { case "cpu": - fields["capacity_cpu_cores"] = atoi(val.GetString_()) + fields["capacity_cpu_cores"] = convertQuantity(val.GetString_(), 1) + fields["capacity_millicpu_cores"] = convertQuantity(val.GetString_(), 1000) case "memory": fields["capacity_memory_bytes"] = convertQuantity(val.GetString_(), 1) case "pods": @@ -42,7 +43,8 @@ func (ki *KubernetesInventory) gatherNode(n v1.Node, acc telegraf.Accumulator) e for resourceName, val := range n.Status.Allocatable { switch resourceName { case "cpu": - fields["allocatable_cpu_cores"] = atoi(val.GetString_()) + fields["allocatable_cpu_cores"] = convertQuantity(val.GetString_(), 1) + fields["allocatable_millicpu_cores"] = convertQuantity(val.GetString_(), 1000) case "memory": fields["allocatable_memory_bytes"] = convertQuantity(val.GetString_(), 1) case "pods": diff --git a/plugins/inputs/kube_inventory/node_test.go b/plugins/inputs/kube_inventory/node_test.go index 7573dd2c06f6d..68cf463b07e43 100644 --- a/plugins/inputs/kube_inventory/node_test.go +++ b/plugins/inputs/kube_inventory/node_test.go @@ -56,7 +56,7 @@ func TestNode(t *testing.T) { "pods": {String_: toStrPtr("110")}, }, Allocatable: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("16")}, + "cpu": {String_: toStrPtr("1000m")}, "ephemeral_storage_bytes": {String_: toStrPtr("44582761194")}, "hugepages_1Gi_bytes": {String_: toStrPtr("0")}, "hugepages_2Mi_bytes": {String_: toStrPtr("0")}, @@ -103,12 +103,14 @@ func TestNode(t *testing.T) { { Measurement: nodeMeasurement, Fields: map[string]interface{}{ - "capacity_cpu_cores": int64(16), - "capacity_memory_bytes": int64(1.28837533696e+11), - "capacity_pods": int64(110), - "allocatable_cpu_cores": int64(16), - "allocatable_memory_bytes": int64(1.28732676096e+11), - "allocatable_pods": int64(110), + "capacity_cpu_cores": int64(16), + "capacity_millicpu_cores": int64(16000), + "capacity_memory_bytes": int64(1.28837533696e+11), + "capacity_pods": int64(110), + "allocatable_cpu_cores": int64(1), + "allocatable_millicpu_cores": int64(1000), + "allocatable_memory_bytes": int64(1.28732676096e+11), + "allocatable_pods": int64(110), }, Tags: map[string]string{ "node_name": "node1", From 9166a16577502d7accc3c209c9dbd9e4fa7e5c5a Mon Sep 17 00:00:00 2001 From: MaciejMis <66726049+MaciejMis@users.noreply.github.com> Date: Thu, 10 Dec 2020 21:23:27 +0100 Subject: [PATCH 117/761] New Intel PowerStat input plugin (#8488) --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/intel_powerstat/README.md | 206 ++++++++ plugins/inputs/intel_powerstat/dto.go | 37 ++ plugins/inputs/intel_powerstat/file.go | 154 ++++++ .../inputs/intel_powerstat/file_mock_test.go | 132 +++++ .../inputs/intel_powerstat/intel_powerstat.go | 486 +++++++++++++++++ .../intel_powerstat_notlinux.go | 3 + .../intel_powerstat/intel_powerstat_test.go | 494 ++++++++++++++++++ plugins/inputs/intel_powerstat/msr.go | 207 ++++++++ .../inputs/intel_powerstat/msr_mock_test.go | 61 +++ plugins/inputs/intel_powerstat/msr_test.go | 134 +++++ plugins/inputs/intel_powerstat/rapl.go | 238 +++++++++ .../inputs/intel_powerstat/rapl_mock_test.go | 66 +++ plugins/inputs/intel_powerstat/rapl_test.go | 115 ++++ .../inputs/intel_powerstat/unit_converter.go | 49 ++ 16 files changed, 2384 insertions(+) create mode 100644 plugins/inputs/intel_powerstat/README.md create mode 100644 plugins/inputs/intel_powerstat/dto.go create mode 100644 plugins/inputs/intel_powerstat/file.go create mode 100644 plugins/inputs/intel_powerstat/file_mock_test.go create mode 100644 plugins/inputs/intel_powerstat/intel_powerstat.go create mode 100644 plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go create mode 100644 plugins/inputs/intel_powerstat/intel_powerstat_test.go create mode 100644 plugins/inputs/intel_powerstat/msr.go create mode 100644 plugins/inputs/intel_powerstat/msr_mock_test.go create mode 100644 plugins/inputs/intel_powerstat/msr_test.go create mode 100644 plugins/inputs/intel_powerstat/rapl.go create mode 100644 plugins/inputs/intel_powerstat/rapl_mock_test.go create mode 100644 plugins/inputs/intel_powerstat/rapl_test.go create mode 100644 plugins/inputs/intel_powerstat/unit_converter.go diff --git a/README.md b/README.md index 6093a253f479f..1999f635fba9d 100644 --- a/README.md +++ b/README.md @@ -214,6 +214,7 @@ For documentation on the latest development code see the [documentation index][d * [influxdb](./plugins/inputs/influxdb) * [influxdb_listener](./plugins/inputs/influxdb_listener) * [influxdb_v2_listener](./plugins/inputs/influxdb_v2_listener) +* [intel_powerstat](plugins/inputs/intel_powerstat) * [intel_rdt](./plugins/inputs/intel_rdt) * [internal](./plugins/inputs/internal) * [interrupts](./plugins/inputs/interrupts) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 6eb5dbb7aafef..6ad302d668e47 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -63,6 +63,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_v2_listener" + _ "github.com/influxdata/telegraf/plugins/inputs/intel_powerstat" _ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt" _ "github.com/influxdata/telegraf/plugins/inputs/internal" _ "github.com/influxdata/telegraf/plugins/inputs/interrupts" diff --git a/plugins/inputs/intel_powerstat/README.md b/plugins/inputs/intel_powerstat/README.md new file mode 100644 index 0000000000000..9efb4176d525b --- /dev/null +++ b/plugins/inputs/intel_powerstat/README.md @@ -0,0 +1,206 @@ +# Intel PowerStat Input Plugin + +Telemetry frameworks allow users to monitor critical platform level metrics. +Key source of platform telemetry is power domain that is beneficial for MANO/Monitoring&Analytics systems +to take preventive/corrective actions based on platform busyness, CPU temperature, actual CPU utilization +and power statistics. Main use cases are power saving and workload migration. + +Intel PowerStat plugin supports Intel based platforms and assumes presence of Linux based OS. + +### Configuration: +```toml +# Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and per-CPU metrics like temperature, power and utilization. +[[inputs.intel_powerstat]] + ## All global metrics are always collected by Intel PowerStat plugin. + ## User can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. + ## Empty array means no per-CPU specific metrics will be collected by the plugin - in this case only platform level + ## telemetry will be exposed by Intel PowerStat plugin. + ## Supported options: + ## "cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles" + # cpu_metrics = [] +``` +### Example: Configuration with no per-CPU telemetry +This configuration allows getting global metrics (processor package specific), no per-CPU metrics are collected: +```toml +[[inputs.intel_powerstat]] + cpu_metrics = [] +``` + +### Example: Configuration with no per-CPU telemetry - equivalent case +This configuration allows getting global metrics (processor package specific), no per-CPU metrics are collected: +```toml +[[inputs.intel_powerstat]] +``` + +### Example: Configuration for CPU Temperature and Frequency only +This configuration allows getting global metrics plus subset of per-CPU metrics (CPU Temperature and Current Frequency): +```toml +[[inputs.intel_powerstat]] + cpu_metrics = ["cpu_frequency", "cpu_temperature"] +``` + +### Example: Configuration with all available metrics +This configuration allows getting global metrics and all per-CPU metrics: +```toml +[[inputs.intel_powerstat]] + cpu_metrics = ["cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles"] +``` + +### SW Dependencies: +Plugin is based on Linux Kernel modules that expose specific metrics over `sysfs` or `devfs` interfaces. +The following dependencies are expected by plugin: +- _intel-rapl_ module which exposes Intel Runtime Power Limiting metrics over `sysfs` (`/sys/devices/virtual/powercap/intel-rapl`), +- _msr_ kernel module that provides access to processor model specific registers over `devfs` (`/dev/cpu/cpu%d/msr`), +- _cpufreq_ kernel module - which exposes per-CPU Frequency over `sysfs` (`/sys/devices/system/cpu/cpu%d/cpufreq/scaling_cur_freq`). + +Minimum kernel version required is 3.13 to satisfy all requirements. + +Please make sure that kernel modules are loaded and running. You might have to manually enable them by using `modprobe`. +Exact commands to be executed are: +``` +sudo modprobe cpufreq-stats +sudo modprobe msr +sudo modprobe intel_rapl +``` + +**Telegraf with Intel PowerStat plugin enabled may require root access to read model specific registers (MSRs)** +to retrieve data for calculation of most critical per-CPU specific metrics: +- `cpu_busy_frequency_mhz` +- `cpu_temperature_celsius` +- `cpu_c1_state_residency_percent` +- `cpu_c6_state_residency_percent` +- `cpu_busy_cycles_percent` + +To expose other Intel PowerStat metrics root access may or may not be required (depending on OS type or configuration). + +### HW Dependencies: +Specific metrics require certain processor features to be present, otherwise Intel PowerStat plugin won't be able to +read them. When using Linux Kernel based OS, user can detect supported processor features reading `/proc/cpuinfo` file. +Plugin assumes crucial properties are the same for all CPU cores in the system. +The following processor properties are examined in more detail in this section: +processor _cpu family_, _model_ and _flags_. +The following processor properties are required by the plugin: +- Processor _cpu family_ must be Intel (0x6) - since data used by the plugin assumes Intel specific +model specific registers for all features +- The following processor flags shall be present: + - "_msr_" shall be present for plugin to read platform data from processor model specific registers and collect + the following metrics: _powerstat_core.cpu_temperature_, _powerstat_core.cpu_busy_frequency_, + _powerstat_core.cpu_busy_cycles_, _powerstat_core.cpu_c1_state_residency_, _powerstat_core._cpu_c6_state_residency_ + - "_aperfmperf_" shall be present to collect the following metrics: _powerstat_core.cpu_busy_frequency_, + _powerstat_core.cpu_busy_cycles_, _powerstat_core.cpu_c1_state_residency_ + - "_dts_" shall be present to collect _powerstat_core.cpu_temperature_ +- Processor _Model number_ must be one of the following values for plugin to read _powerstat_core.cpu_c1_state_residency_ +and _powerstat_core.cpu_c6_state_residency_ metrics: + +| Model number | Processor name | +|-----|-------------| +| 0x37 | Intel Atom® Bay Trail | +| 0x4D | Intel Atom® Avaton | +| 0x5C | Intel Atom® Apollo Lake | +| 0x5F | Intel Atom® Denverton | +| 0x7A | Intel Atom® Goldmont | +| 0x4C | Intel Atom® Airmont | +| 0x86 | Intel Atom® Jacobsville | +| 0x96 | Intel Atom® Elkhart Lake | +| 0x9C | Intel Atom® Jasper Lake | +| 0x1A | Intel Nehalem-EP | +| 0x1E | Intel Nehalem | +| 0x1F | Intel Nehalem-G | +| 0x2E | Intel Nehalem-EX | +| 0x25 | Intel Westmere | +| 0x2C | Intel Westmere-EP | +| 0x2F | Intel Westmere-EX | +| 0x2A | Intel Sandybridge | +| 0x2D | Intel Sandybridge-X | +| 0x3A | Intel Ivybridge | +| 0x3E | Intel Ivybridge-X | +| 0x4E | Intel Atom® Silvermont-MID | +| 0x5E | Intel Skylake | +| 0x55 | Intel Skylake-X | +| 0x8E | Intel Kabylake-L | +| 0x9E | Intel Kabylake | +| 0x6A | Intel Icelake-X | +| 0x6C | Intel Icelake-D | +| 0x7D | Intel Icelake | +| 0x7E | Intel Icelake-L | +| 0x9D | Intel Icelake-NNPI | +| 0x3C | Intel Haswell | +| 0x3F | Intel Haswell-X | +| 0x45 | Intel Haswell-L | +| 0x46 | Intel Haswell-G | +| 0x3D | Intel Broadwell | +| 0x47 | Intel Broadwell-G | +| 0x4F | Intel Broadwell-X | +| 0x56 | Intel Broadwell-D | +| 0x66 | Intel Cannonlake-L | +| 0x57 | Intel Xeon® PHI Knights Landing | +| 0x85 | Intel Xeon® PHI Knights Mill | +| 0xA5 | Intel CometLake | +| 0xA6 | Intel CometLake-L | +| 0x8F | Intel Sapphire Rapids X | +| 0x8C | Intel TigerLake-L | +| 0x8D | Intel TigerLake | + +### Metrics +All metrics collected by Intel PowerStat plugin are collected in fixed intervals. +Metrics that reports processor C-state residency or power are calculated over elapsed intervals. +When starting to measure metrics, plugin skips first iteration of metrics if they are based on deltas with previous value. + +**The following measurements are supported by Intel PowerStat plugin:** +- powerstat_core + + - The following Tags are returned by plugin with powerstat_core measurements: + + | Tag | Description | + |-----|-------------| + | `package_id` | ID of platform package/socket | + | `core_id` | ID of physical processor core | + | `cpu_id` | ID of logical processor core | + Measurement powerstat_core metrics are collected per-CPU (cpu_id is the key) + while core_id and package_id tags are additional topology information. + + - Available metrics for powerstat_core measurement + + | Metric name (field) | Description | Units | + |-----|-------------|-----| + | `cpu_frequency_mhz` | Current operational frequency of CPU Core | MHz | + | `cpu_busy_frequency_mhz` | CPU Core Busy Frequency measured as frequency adjusted to CPU Core busy cycles | MHz | + | `cpu_temperature_celsius` | Current temperature of CPU Core | Celsius degrees | + | `cpu_c1_state_residency_percent` | Percentage of time that CPU Core spent in C1 Core residency state | % | + | `cpu_c6_state_residency_percent` | Percentage of time that CPU Core spent in C6 Core residency state | % | + | `cpu_busy_cycles_percent` | CPU Core Busy cycles as a ratio of Cycles spent in C0 state residency to all cycles executed by CPU Core | % | + + + +- powerstat_package + + - The following Tags are returned by plugin with powerstat_package measurements: + + | Tag | Description | + |-----|-------------| + | `package_id` | ID of platform package/socket | + Measurement powerstat_package metrics are collected per processor package - _package_id_ tag indicates which + package metric refers to. + + - Available metrics for powerstat_package measurement + + | Metric name (field) | Description | Units | + |-----|-------------|-----| + | `thermal_design_power_watts` | Maximum Thermal Design Power (TDP) available for processor package | Watts | + | `current_power_consumption_watts` | Current power consumption of processor package | Watts | + | `current_dram_power_consumption_watts` | Current power consumption of processor package DRAM subsystem | Watts | + + +### Example Output: + +``` +powerstat_package,host=ubuntu,package_id=0 thermal_design_power_watts=160 1606494744000000000 +powerstat_package,host=ubuntu,package_id=0 current_power_consumption_watts=35 1606494744000000000 +powerstat_package,host=ubuntu,package_id=0 current_dram_power_consumption_watts=13.94 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_frequency_mhz=1200.29 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_temperature_celsius=34i 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_c6_state_residency_percent=92.52 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_busy_cycles_percent=0.8 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_c1_state_residency_percent=6.68 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_busy_frequency_mhz=1213.24 1606494744000000000 +``` diff --git a/plugins/inputs/intel_powerstat/dto.go b/plugins/inputs/intel_powerstat/dto.go new file mode 100644 index 0000000000000..eb3da0bc269f7 --- /dev/null +++ b/plugins/inputs/intel_powerstat/dto.go @@ -0,0 +1,37 @@ +package intel_powerstat + +type msrData struct { + mperf uint64 + aperf uint64 + timeStampCounter uint64 + c3 uint64 + c6 uint64 + c7 uint64 + throttleTemp uint64 + temp uint64 + mperfDelta uint64 + aperfDelta uint64 + timeStampCounterDelta uint64 + c3Delta uint64 + c6Delta uint64 + c7Delta uint64 + readDate int64 +} + +type raplData struct { + dramCurrentEnergy float64 + socketCurrentEnergy float64 + socketEnergy float64 + dramEnergy float64 + readDate int64 +} + +type cpuInfo struct { + physicalID string + coreID string + cpuID string + vendorID string + cpuFamily string + model string + flags string +} diff --git a/plugins/inputs/intel_powerstat/file.go b/plugins/inputs/intel_powerstat/file.go new file mode 100644 index 0000000000000..7953726fd9ba8 --- /dev/null +++ b/plugins/inputs/intel_powerstat/file.go @@ -0,0 +1,154 @@ +// +build linux + +package intel_powerstat + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" +) + +// fileService is responsible for handling operations on files. +type fileService interface { + getCPUInfoStats() (map[string]*cpuInfo, error) + getStringsMatchingPatternOnPath(path string) ([]string, error) + readFile(path string) ([]byte, error) + readFileToFloat64(reader io.Reader) (float64, int64, error) + readFileAtOffsetToUint64(reader io.ReaderAt, offset int64) (uint64, error) +} + +type fileServiceImpl struct { +} + +// getCPUInfoStats retrieves basic information about CPU from /proc/cpuinfo. +func (fs *fileServiceImpl) getCPUInfoStats() (map[string]*cpuInfo, error) { + path := "/proc/cpuinfo" + cpuInfoFile, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("error while reading %s, err: %v", path, err) + } + defer cpuInfoFile.Close() + + scanner := bufio.NewScanner(cpuInfoFile) + + processorRegexp := regexp.MustCompile(`^processor\t+:\s([0-9]+)\n*$`) + physicalIDRegexp := regexp.MustCompile(`^physical id\t+:\s([0-9]+)\n*$`) + coreIDRegexp := regexp.MustCompile(`^core id\t+:\s([0-9]+)\n*$`) + vendorIDRegexp := regexp.MustCompile(`^vendor_id\t+:\s([a-zA-Z]+)\n*$`) + cpuFamilyRegexp := regexp.MustCompile(`^cpu\sfamily\t+:\s([0-9]+)\n*$`) + modelRegexp := regexp.MustCompile(`^model\t+:\s([0-9]+)\n*$`) + flagsRegexp := regexp.MustCompile(`^flags\t+:\s(.+)\n*$`) + + stats := make(map[string]*cpuInfo) + currentInfo := &cpuInfo{} + + for scanner.Scan() { + line := scanner.Text() + + processorRes := processorRegexp.FindStringSubmatch(line) + if len(processorRes) > 1 { + currentInfo = &cpuInfo{ + cpuID: processorRes[1], + } + } + + vendorIDRes := vendorIDRegexp.FindStringSubmatch(line) + if len(vendorIDRes) > 1 { + currentInfo.vendorID = vendorIDRes[1] + } + + physicalIDRes := physicalIDRegexp.FindStringSubmatch(line) + if len(physicalIDRes) > 1 { + currentInfo.physicalID = physicalIDRes[1] + } + + coreIDRes := coreIDRegexp.FindStringSubmatch(line) + if len(coreIDRes) > 1 { + currentInfo.coreID = coreIDRes[1] + } + + cpuFamilyRes := cpuFamilyRegexp.FindStringSubmatch(line) + if len(cpuFamilyRes) > 1 { + currentInfo.cpuFamily = cpuFamilyRes[1] + } + + modelRes := modelRegexp.FindStringSubmatch(line) + if len(modelRes) > 1 { + currentInfo.model = modelRes[1] + } + + flagsRes := flagsRegexp.FindStringSubmatch(line) + if len(flagsRes) > 1 { + currentInfo.flags = flagsRes[1] + + // Flags is the last value we have to acquire, so currentInfo is added to map. + stats[currentInfo.cpuID] = currentInfo + } + } + + return stats, nil +} + +// getStringsMatchingPatternOnPath looks for filenames and directory names on path matching given regexp. +// It ignores file system errors such as I/O errors reading directories. The only possible returned error +// is ErrBadPattern, when pattern is malformed. +func (fs *fileServiceImpl) getStringsMatchingPatternOnPath(path string) ([]string, error) { + return filepath.Glob(path) +} + +// readFile reads file on path and return string content. +func (fs *fileServiceImpl) readFile(path string) ([]byte, error) { + out, err := ioutil.ReadFile(path) + if err != nil { + return make([]byte, 0), err + } + return out, nil +} + +// readFileToFloat64 reads file on path and tries to parse content to float64. +func (fs *fileServiceImpl) readFileToFloat64(reader io.Reader) (float64, int64, error) { + read, err := ioutil.ReadAll(reader) + if err != nil { + return 0, 0, err + } + + readDate := time.Now().UnixNano() + + // Remove new line character + trimmedString := strings.TrimRight(string(read), "\n") + // Parse result to float64 + parsedValue, err := strconv.ParseFloat(trimmedString, 64) + if err != nil { + return 0, 0, fmt.Errorf("error parsing string to float for %s", trimmedString) + } + + return parsedValue, readDate, nil +} + +// readFileAtOffsetToUint64 reads 8 bytes from passed file at given offset. +func (fs *fileServiceImpl) readFileAtOffsetToUint64(reader io.ReaderAt, offset int64) (uint64, error) { + buffer := make([]byte, 8) + + if offset == 0 { + return 0, fmt.Errorf("file offset %d should not be 0", offset) + } + + _, err := reader.ReadAt(buffer, offset) + if err != nil { + return 0, fmt.Errorf("error on reading file at offset %d, err: %v", offset, err) + } + + return binary.LittleEndian.Uint64(buffer), nil +} + +func newFileService() *fileServiceImpl { + return &fileServiceImpl{} +} diff --git a/plugins/inputs/intel_powerstat/file_mock_test.go b/plugins/inputs/intel_powerstat/file_mock_test.go new file mode 100644 index 0000000000000..ab4bd8c57baa6 --- /dev/null +++ b/plugins/inputs/intel_powerstat/file_mock_test.go @@ -0,0 +1,132 @@ +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. + +package intel_powerstat + +import ( + io "io" + + mock "github.com/stretchr/testify/mock" +) + +// mockFileService is an autogenerated mock type for the fileService type +type mockFileService struct { + mock.Mock +} + +// getCPUInfoStats provides a mock function with given fields: +func (_m *mockFileService) getCPUInfoStats() (map[string]*cpuInfo, error) { + ret := _m.Called() + + var r0 map[string]*cpuInfo + if rf, ok := ret.Get(0).(func() map[string]*cpuInfo); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]*cpuInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// getStringsMatchingPatternOnPath provides a mock function with given fields: path +func (_m *mockFileService) getStringsMatchingPatternOnPath(path string) ([]string, error) { + ret := _m.Called(path) + + var r0 []string + if rf, ok := ret.Get(0).(func(string) []string); ok { + r0 = rf(path) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(path) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// readFile provides a mock function with given fields: path +func (_m *mockFileService) readFile(path string) ([]byte, error) { + ret := _m.Called(path) + + var r0 []byte + if rf, ok := ret.Get(0).(func(string) []byte); ok { + r0 = rf(path) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(path) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// readFileAtOffsetToUint64 provides a mock function with given fields: reader, offset +func (_m *mockFileService) readFileAtOffsetToUint64(reader io.ReaderAt, offset int64) (uint64, error) { + ret := _m.Called(reader, offset) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(io.ReaderAt, int64) uint64); ok { + r0 = rf(reader, offset) + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(io.ReaderAt, int64) error); ok { + r1 = rf(reader, offset) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// readFileToFloat64 provides a mock function with given fields: reader +func (_m *mockFileService) readFileToFloat64(reader io.Reader) (float64, int64, error) { + ret := _m.Called(reader) + + var r0 float64 + if rf, ok := ret.Get(0).(func(io.Reader) float64); ok { + r0 = rf(reader) + } else { + r0 = ret.Get(0).(float64) + } + + var r1 int64 + if rf, ok := ret.Get(1).(func(io.Reader) int64); ok { + r1 = rf(reader) + } else { + r1 = ret.Get(1).(int64) + } + + var r2 error + if rf, ok := ret.Get(2).(func(io.Reader) error); ok { + r2 = rf(reader) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} diff --git a/plugins/inputs/intel_powerstat/intel_powerstat.go b/plugins/inputs/intel_powerstat/intel_powerstat.go new file mode 100644 index 0000000000000..9340fdec814b1 --- /dev/null +++ b/plugins/inputs/intel_powerstat/intel_powerstat.go @@ -0,0 +1,486 @@ +// +build linux + +package intel_powerstat + +import ( + "fmt" + "math/big" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const ( + cpuFrequency = "cpu_frequency" + cpuBusyFrequency = "cpu_busy_frequency" + cpuTemperature = "cpu_temperature" + cpuC1StateResidency = "cpu_c1_state_residency" + cpuC6StateResidency = "cpu_c6_state_residency" + cpuBusyCycles = "cpu_busy_cycles" + percentageMultiplier = 100 +) + +// PowerStat plugin enables monitoring of platform metrics (power, TDP) and Core metrics like temperature, power and utilization. +type PowerStat struct { + CPUMetrics []string `toml:"cpu_metrics"` + Log telegraf.Logger `toml:"-"` + + fs fileService + rapl raplService + msr msrService + + cpuFrequency bool + cpuBusyFrequency bool + cpuTemperature bool + cpuC1StateResidency bool + cpuC6StateResidency bool + cpuBusyCycles bool + cpuInfo map[string]*cpuInfo + skipFirstIteration bool +} + +// Description returns a one-sentence description on the plugin. +func (p *PowerStat) Description() string { + return `Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and Core metrics like temperature, power and utilization.` +} + +// SampleConfig returns the default configuration of the plugin. +func (p *PowerStat) SampleConfig() string { + return ` + ## All global metrics are always collected by Intel PowerStat plugin. + ## User can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. + ## Empty array means no per-CPU specific metrics will be collected by the plugin - in this case only platform level + ## telemetry will be exposed by Intel PowerStat plugin. + ## Supported options: + ## "cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles" + # cpu_metrics = [] +` +} + +// Init performs one time setup of the plugin. +func (p *PowerStat) Init() error { + p.parseCPUMetricsConfig() + err := p.verifyProcessor() + if err != nil { + return err + } + // Initialize MSR service only when there is at least one core metric enabled. + if p.cpuFrequency || p.cpuBusyFrequency || p.cpuTemperature || p.cpuC1StateResidency || + p.cpuC6StateResidency || p.cpuBusyCycles { + p.msr = newMsrServiceWithFs(p.Log, p.fs) + } + p.rapl = newRaplServiceWithFs(p.Log, p.fs) + + return nil +} + +// Gather takes in an accumulator and adds the metrics that the Input gathers. +func (p *PowerStat) Gather(acc telegraf.Accumulator) error { + p.addGlobalMetrics(acc) + + if p.areCoreMetricsEnabled() { + p.addPerCoreMetrics(acc) + } + + // Gathering the first iteration of metrics was skipped for most of them because they are based on delta calculations. + p.skipFirstIteration = false + + return nil +} + +func (p *PowerStat) addGlobalMetrics(acc telegraf.Accumulator) { + // Prepare RAPL data each gather because there is a possibility to disable rapl kernel module + p.rapl.initializeRaplData() + + for socketID := range p.rapl.getRaplData() { + err := p.rapl.retrieveAndCalculateData(socketID) + if err != nil { + // In case of an error skip calculating metrics for this socket + p.Log.Errorf("error fetching rapl data for socket %s, err: %v", socketID, err) + continue + } + p.addThermalDesignPowerMetric(socketID, acc) + if p.skipFirstIteration { + continue + } + p.addCurrentSocketPowerConsumption(socketID, acc) + p.addCurrentDramPowerConsumption(socketID, acc) + } +} + +func (p *PowerStat) addThermalDesignPowerMetric(socketID string, acc telegraf.Accumulator) { + maxPower, err := p.rapl.getConstraintMaxPowerWatts(socketID) + if err != nil { + p.Log.Errorf("error while retrieving TDP of the socket %s, err: %v", socketID, err) + return + } + + tags := map[string]string{ + "package_id": socketID, + } + + fields := map[string]interface{}{ + "thermal_design_power_watts": roundFloatToNearestTwoDecimalPlaces(maxPower), + } + + acc.AddGauge("powerstat_package", fields, tags) +} + +func (p *PowerStat) addCurrentSocketPowerConsumption(socketID string, acc telegraf.Accumulator) { + tags := map[string]string{ + "package_id": socketID, + } + + fields := map[string]interface{}{ + "current_power_consumption_watts": roundFloatToNearestTwoDecimalPlaces(p.rapl.getRaplData()[socketID].socketCurrentEnergy), + } + + acc.AddGauge("powerstat_package", fields, tags) +} + +func (p *PowerStat) addCurrentDramPowerConsumption(socketID string, acc telegraf.Accumulator) { + tags := map[string]string{ + "package_id": socketID, + } + + fields := map[string]interface{}{ + "current_dram_power_consumption_watts": roundFloatToNearestTwoDecimalPlaces(p.rapl.getRaplData()[socketID].dramCurrentEnergy), + } + + acc.AddGauge("powerstat_package", fields, tags) +} + +func (p *PowerStat) addPerCoreMetrics(acc telegraf.Accumulator) { + var wg sync.WaitGroup + wg.Add(len(p.msr.getCPUCoresData())) + + for cpuID := range p.msr.getCPUCoresData() { + go p.addMetricsForSingleCore(cpuID, acc, &wg) + } + + wg.Wait() +} + +func (p *PowerStat) addMetricsForSingleCore(cpuID string, acc telegraf.Accumulator, wg *sync.WaitGroup) { + defer wg.Done() + + if p.cpuFrequency { + p.addCPUFrequencyMetric(cpuID, acc) + } + + // Read data from MSR only if required + if p.cpuC1StateResidency || p.cpuC6StateResidency || p.cpuBusyCycles || p.cpuTemperature || + p.cpuBusyFrequency { + err := p.msr.openAndReadMsr(cpuID) + if err != nil { + // In case of an error exit the function. All metrics past this point are dependant on MSR. + p.Log.Debugf("error while reading msr: %v", err) + return + } + } + + if p.cpuTemperature { + p.addCPUTemperatureMetric(cpuID, acc) + } + + // cpuBusyFrequency metric does some calculations inside that are required in another plugin cycle. + if p.cpuBusyFrequency { + p.addCPUBusyFrequencyMetric(cpuID, acc) + } + + if !p.skipFirstIteration { + if p.cpuC1StateResidency { + p.addCPUC1StateResidencyMetric(cpuID, acc) + } + + if p.cpuC6StateResidency { + p.addCPUC6StateResidencyMetric(cpuID, acc) + } + + if p.cpuBusyCycles { + p.addCPUBusyCyclesMetric(cpuID, acc) + } + } +} + +func (p *PowerStat) addCPUFrequencyMetric(cpuID string, acc telegraf.Accumulator) { + frequency, err := p.msr.retrieveCPUFrequencyForCore(cpuID) + + // In case of an error leave func + if err != nil { + p.Log.Debugf("error while reading file: %v", err) + return + } + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + + fields := map[string]interface{}{ + "cpu_frequency_mhz": roundFloatToNearestTwoDecimalPlaces(frequency), + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUTemperatureMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + temp := coresData[cpuID].throttleTemp - coresData[cpuID].temp + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_temperature_celsius": temp, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUBusyFrequencyMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + mperfDelta := coresData[cpuID].mperfDelta + // Avoid division by 0 + if mperfDelta == 0 { + p.Log.Errorf("mperf delta should not equal 0 on core %s", cpuID) + return + } + aperfMperf := float64(coresData[cpuID].aperfDelta) / float64(mperfDelta) + tsc := convertProcessorCyclesToHertz(coresData[cpuID].timeStampCounterDelta) + timeNow := time.Now().UnixNano() + interval := convertNanoSecondsToSeconds(timeNow - coresData[cpuID].readDate) + coresData[cpuID].readDate = timeNow + + if p.skipFirstIteration { + return + } + + if interval == 0 { + p.Log.Errorf("interval between last two Telegraf cycles is 0") + return + } + + busyMhzValue := roundFloatToNearestTwoDecimalPlaces(tsc * aperfMperf / interval) + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_busy_frequency_mhz": busyMhzValue, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUC1StateResidencyMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + timestampDeltaBig := new(big.Int).SetUint64(coresData[cpuID].timeStampCounterDelta) + // Avoid division by 0 + if timestampDeltaBig.Sign() < 1 { + p.Log.Errorf("timestamp delta value %v should not be lower than 1", timestampDeltaBig) + return + } + + // Since counter collection is not atomic it may happen that sum of C0, C1, C3, C6 and C7 + // is bigger value than TSC, in such case C1 residency shall be set to 0. + // Operating on big.Int to avoid overflow + mperfDeltaBig := new(big.Int).SetUint64(coresData[cpuID].mperfDelta) + c3DeltaBig := new(big.Int).SetUint64(coresData[cpuID].c3Delta) + c6DeltaBig := new(big.Int).SetUint64(coresData[cpuID].c6Delta) + c7DeltaBig := new(big.Int).SetUint64(coresData[cpuID].c7Delta) + + c1Big := new(big.Int).Sub(timestampDeltaBig, mperfDeltaBig) + c1Big.Sub(c1Big, c3DeltaBig) + c1Big.Sub(c1Big, c6DeltaBig) + c1Big.Sub(c1Big, c7DeltaBig) + + if c1Big.Sign() < 0 { + c1Big = c1Big.SetInt64(0) + } + c1Value := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * float64(c1Big.Uint64()) / float64(timestampDeltaBig.Uint64())) + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_c1_state_residency_percent": c1Value, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUC6StateResidencyMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + // Avoid division by 0 + if coresData[cpuID].timeStampCounterDelta == 0 { + p.Log.Errorf("timestamp counter on offset %s should not equal 0 on cpuID %s", + timestampCounterLocation, cpuID) + return + } + c6Value := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * + float64(coresData[cpuID].c6Delta) / float64(coresData[cpuID].timeStampCounterDelta)) + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_c6_state_residency_percent": c6Value, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUBusyCyclesMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + // Avoid division by 0 + if coresData[cpuID].timeStampCounterDelta == 0 { + p.Log.Errorf("timestamp counter on offset %s should not equal 0 on cpuID %s", + timestampCounterLocation, cpuID) + return + } + busyCyclesValue := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * + float64(coresData[cpuID].mperfDelta) / float64(coresData[cpuID].timeStampCounterDelta)) + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_busy_cycles_percent": busyCyclesValue, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) parseCPUMetricsConfig() { + if len(p.CPUMetrics) == 0 { + return + } + + if contains(p.CPUMetrics, cpuFrequency) { + p.cpuFrequency = true + } + + if contains(p.CPUMetrics, cpuC1StateResidency) { + p.cpuC1StateResidency = true + } + + if contains(p.CPUMetrics, cpuC6StateResidency) { + p.cpuC6StateResidency = true + } + + if contains(p.CPUMetrics, cpuBusyCycles) { + p.cpuBusyCycles = true + } + + if contains(p.CPUMetrics, cpuBusyFrequency) { + p.cpuBusyFrequency = true + } + + if contains(p.CPUMetrics, cpuTemperature) { + p.cpuTemperature = true + } +} + +func (p *PowerStat) verifyProcessor() error { + allowedProcessorModelsForC1C6 := []int64{0x37, 0x4D, 0x5C, 0x5F, 0x7A, 0x4C, 0x86, 0x96, 0x9C, + 0x1A, 0x1E, 0x1F, 0x2E, 0x25, 0x2C, 0x2F, 0x2A, 0x2D, 0x3A, 0x3E, 0x4E, 0x5E, 0x55, 0x8E, + 0x9E, 0x6A, 0x6C, 0x7D, 0x7E, 0x9D, 0x3C, 0x3F, 0x45, 0x46, 0x3D, 0x47, 0x4F, 0x56, + 0x66, 0x57, 0x85, 0xA5, 0xA6, 0x8F, 0x8C, 0x8D} + stats, err := p.fs.getCPUInfoStats() + if err != nil { + return err + } + + p.cpuInfo = stats + + // First CPU is sufficient for verification. + firstCPU := p.cpuInfo["0"] + if firstCPU == nil { + return fmt.Errorf("first core not found while parsing /proc/cpuinfo") + } + + if firstCPU.vendorID != "GenuineIntel" || firstCPU.cpuFamily != "6" { + return fmt.Errorf("Intel processor not found, vendorId: %s", firstCPU.vendorID) + } + + if !contains(convertIntegerArrayToStringArray(allowedProcessorModelsForC1C6), firstCPU.model) { + p.cpuC1StateResidency = false + p.cpuC6StateResidency = false + } + + if !strings.Contains(firstCPU.flags, "msr") { + p.cpuTemperature = false + p.cpuC6StateResidency = false + p.cpuBusyCycles = false + p.cpuBusyFrequency = false + p.cpuC1StateResidency = false + } + + if !strings.Contains(firstCPU.flags, "aperfmperf") { + p.cpuBusyFrequency = false + p.cpuBusyCycles = false + p.cpuC1StateResidency = false + } + + if !strings.Contains(firstCPU.flags, "dts") { + p.cpuTemperature = false + } + + return nil +} + +func contains(slice []string, str string) bool { + for _, v := range slice { + if v == str { + return true + } + } + + return false +} + +func (p *PowerStat) areCoreMetricsEnabled() bool { + return p.msr != nil && len(p.msr.getCPUCoresData()) > 0 +} + +// newPowerStat creates and returns PowerStat struct. +func newPowerStat(fs fileService) *PowerStat { + p := &PowerStat{ + cpuFrequency: false, + cpuC1StateResidency: false, + cpuC6StateResidency: false, + cpuBusyCycles: false, + cpuTemperature: false, + cpuBusyFrequency: false, + skipFirstIteration: true, + fs: fs, + } + + return p +} + +func init() { + inputs.Add("intel_powerstat", func() telegraf.Input { + return newPowerStat(newFileService()) + }) +} diff --git a/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go b/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go new file mode 100644 index 0000000000000..f46755cee92b7 --- /dev/null +++ b/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go @@ -0,0 +1,3 @@ +// +build !linux + +package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/intel_powerstat_test.go b/plugins/inputs/intel_powerstat/intel_powerstat_test.go new file mode 100644 index 0000000000000..13006de3c6e81 --- /dev/null +++ b/plugins/inputs/intel_powerstat/intel_powerstat_test.go @@ -0,0 +1,494 @@ +// +build linux + +package intel_powerstat + +import ( + "errors" + "strconv" + "sync" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestInitPlugin(t *testing.T) { + cores := []string{"cpu0", "cpu1", "cpu2", "cpu3"} + power, fsMock, _, _ := getPowerWithMockedServices() + + fsMock.On("getCPUInfoStats", mock.Anything). + Return(nil, errors.New("error getting cpu stats")).Once() + require.Error(t, power.Init()) + + fsMock.On("getCPUInfoStats", mock.Anything). + Return(make(map[string]*cpuInfo), nil).Once() + require.Error(t, power.Init()) + + fsMock.On("getCPUInfoStats", mock.Anything). + Return(map[string]*cpuInfo{"0": { + vendorID: "GenuineIntel", + cpuFamily: "test", + }}, nil).Once() + require.Error(t, power.Init()) + + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(cores, nil).Once(). + On("getCPUInfoStats", mock.Anything). + Return(map[string]*cpuInfo{"0": { + vendorID: "GenuineIntel", + cpuFamily: "6", + }}, nil) + // Verify MSR service initialization. + power.cpuFrequency = true + require.NoError(t, power.Init()) + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, len(cores), len(power.msr.getCPUCoresData())) + + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(nil, errors.New("error during getStringsMatchingPatternOnPath")).Once() + + // In case of an error when fetching cpu cores plugin should proceed with execution. + require.NoError(t, power.Init()) + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, 0, len(power.msr.getCPUCoresData())) +} + +func TestParseCPUMetricsConfig(t *testing.T) { + power, _, _, _ := getPowerWithMockedServices() + disableCoreMetrics(power) + + power.CPUMetrics = []string{ + "cpu_frequency", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles", "cpu_temperature", + "cpu_busy_frequency", + } + power.parseCPUMetricsConfig() + verifyCoreMetrics(t, power, true) + disableCoreMetrics(power) + verifyCoreMetrics(t, power, false) + + power.CPUMetrics = []string{} + power.parseCPUMetricsConfig() + + power.CPUMetrics = []string{"cpu_c6_state_residency", "#@$sdkjdfsdf3@", "1pu_c1_state_residency"} + power.parseCPUMetricsConfig() + require.Equal(t, false, power.cpuC1StateResidency) + require.Equal(t, true, power.cpuC6StateResidency) + disableCoreMetrics(power) + verifyCoreMetrics(t, power, false) + + power.CPUMetrics = []string{"#@$sdkjdfsdf3@", "1pu_c1_state_residency", "123"} + power.parseCPUMetricsConfig() + verifyCoreMetrics(t, power, false) +} + +func verifyCoreMetrics(t *testing.T, power *PowerStat, enabled bool) { + require.Equal(t, enabled, power.cpuFrequency) + require.Equal(t, enabled, power.cpuC1StateResidency) + require.Equal(t, enabled, power.cpuC6StateResidency) + require.Equal(t, enabled, power.cpuBusyCycles) + require.Equal(t, enabled, power.cpuBusyFrequency) + require.Equal(t, enabled, power.cpuTemperature) +} + +func TestGather(t *testing.T) { + var acc testutil.Accumulator + packageIDs := []string{"0", "1"} + coreIDs := []string{"0", "1", "2", "3"} + socketCurrentEnergy := 13213852.2 + dramCurrentEnergy := 784552.0 + preparedCPUData := getPreparedCPUData(coreIDs) + raplDataMap := prepareRaplDataMap(packageIDs, socketCurrentEnergy, dramCurrentEnergy) + + power, _, raplMock, msrMock := getPowerWithMockedServices() + prepareCPUInfo(power, coreIDs, packageIDs) + enableCoreMetrics(power) + power.skipFirstIteration = false + + raplMock.On("initializeRaplData", mock.Anything). + On("getRaplData").Return(raplDataMap). + On("retrieveAndCalculateData", mock.Anything).Return(nil).Times(len(raplDataMap)). + On("getConstraintMaxPowerWatts", mock.Anything).Return(546783852.3, nil) + msrMock.On("getCPUCoresData").Return(preparedCPUData). + On("openAndReadMsr", mock.Anything).Return(nil). + On("retrieveCPUFrequencyForCore", mock.Anything).Return(1200000.2, nil) + + require.NoError(t, power.Gather(&acc)) + // Number of global metrics : 3 + // Number of per core metrics : 6 + require.Equal(t, 3*len(packageIDs)+6*len(coreIDs), len(acc.GetTelegrafMetrics())) +} + +func TestAddGlobalMetricsNegative(t *testing.T) { + var acc testutil.Accumulator + socketCurrentEnergy := 13213852.2 + dramCurrentEnergy := 784552.0 + raplDataMap := prepareRaplDataMap([]string{"0", "1"}, socketCurrentEnergy, dramCurrentEnergy) + power, _, raplMock, _ := getPowerWithMockedServices() + power.skipFirstIteration = false + raplMock.On("initializeRaplData", mock.Anything).Once(). + On("getRaplData").Return(raplDataMap).Once(). + On("retrieveAndCalculateData", mock.Anything).Return(errors.New("error while calculating data")).Times(len(raplDataMap)) + + power.addGlobalMetrics(&acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) + raplMock.AssertNumberOfCalls(t, "retrieveAndCalculateData", len(raplDataMap)) + + raplMock.On("initializeRaplData", mock.Anything).Once(). + On("getRaplData").Return(make(map[string]*raplData)).Once() + + power.addGlobalMetrics(&acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) + raplMock.AssertNotCalled(t, "retrieveAndCalculateData") + + raplMock.On("initializeRaplData", mock.Anything).Once(). + On("getRaplData").Return(raplDataMap). + On("retrieveAndCalculateData", mock.Anything).Return(nil).Once(). + On("retrieveAndCalculateData", mock.Anything).Return(errors.New("error while calculating data")).Once(). + On("getConstraintMaxPowerWatts", mock.Anything).Return(12313851.5, nil).Twice() + + power.addGlobalMetrics(&acc) + require.Equal(t, 3, len(acc.GetTelegrafMetrics())) +} + +func TestAddGlobalMetricsPositive(t *testing.T) { + var acc testutil.Accumulator + socketCurrentEnergy := 3644574.4 + dramCurrentEnergy := 124234872.5 + raplDataMap := prepareRaplDataMap([]string{"0", "1"}, socketCurrentEnergy, dramCurrentEnergy) + maxPower := 546783852.9 + power, _, raplMock, _ := getPowerWithMockedServices() + power.skipFirstIteration = false + + raplMock.On("initializeRaplData", mock.Anything). + On("getRaplData").Return(raplDataMap). + On("retrieveAndCalculateData", mock.Anything).Return(nil).Times(len(raplDataMap)). + On("getConstraintMaxPowerWatts", mock.Anything).Return(maxPower, nil).Twice(). + On("getCurrentDramPowerConsumption", mock.Anything).Return(dramCurrentEnergy) + + power.addGlobalMetrics(&acc) + require.Equal(t, 6, len(acc.GetTelegrafMetrics())) + + expectedResults := getGlobalMetrics(maxPower, socketCurrentEnergy, dramCurrentEnergy) + for _, test := range expectedResults { + acc.AssertContainsTaggedFields(t, "powerstat_package", test.fields, test.tags) + } +} + +func TestAddMetricsForSingleCoreNegative(t *testing.T) { + var wg sync.WaitGroup + var acc testutil.Accumulator + core := "0" + power, _, _, msrMock := getPowerWithMockedServices() + + msrMock.On("openAndReadMsr", core).Return(errors.New("error reading MSR file")).Once() + + // Skip generating metric for CPU frequency. + power.cpuFrequency = false + + wg.Add(1) + power.addMetricsForSingleCore(core, &acc, &wg) + wg.Wait() + + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddCPUFrequencyMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + frequency := 1200000.2 + power, _, _, msrMock := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + + msrMock.On("retrieveCPUFrequencyForCore", mock.Anything). + Return(float64(0), errors.New("error on reading file")).Once() + + power.addCPUFrequencyMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) + + msrMock.On("retrieveCPUFrequencyForCore", mock.Anything).Return(frequency, nil).Once() + + power.addCPUFrequencyMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedFrequency := roundFloatToNearestTwoDecimalPlaces(frequency) + expectedMetric := getPowerCoreMetric("cpu_frequency_mhz", expectedFrequency, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) +} + +func TestAddCoreCPUTemperatureMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, _, _, msrMock := getPowerWithMockedServices() + preparedData := getPreparedCPUData([]string{cpuID}) + expectedTemp := preparedData[cpuID].throttleTemp - preparedData[cpuID].temp + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + + msrMock.On("getCPUCoresData").Return(preparedData).Once() + power.addCPUTemperatureMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedMetric := getPowerCoreMetric("cpu_temperature_celsius", expectedTemp, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) +} + +func TestAddC6StateResidencyMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, _, _, msrMock := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + preparedData := getPreparedCPUData([]string{cpuID}) + expectedC6 := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * + float64(preparedData[cpuID].c6Delta) / float64(preparedData[cpuID].timeStampCounterDelta)) + + msrMock.On("getCPUCoresData").Return(preparedData).Twice() + power.addCPUC6StateResidencyMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedMetric := getPowerCoreMetric("cpu_c6_state_residency_percent", expectedC6, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) + + acc.ClearMetrics() + preparedData[cpuID].timeStampCounterDelta = 0 + + power.addCPUC6StateResidencyMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddProcessorBusyCyclesMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, _, _, msrMock := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + preparedData := getPreparedCPUData([]string{cpuID}) + expectedBusyCycles := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * float64(preparedData[cpuID].mperfDelta) / + float64(preparedData[cpuID].timeStampCounterDelta)) + + msrMock.On("getCPUCoresData").Return(preparedData).Twice() + power.addCPUBusyCyclesMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedMetric := getPowerCoreMetric("cpu_busy_cycles_percent", expectedBusyCycles, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) + + acc.ClearMetrics() + preparedData[cpuID].timeStampCounterDelta = 0 + power.addCPUBusyCyclesMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddProcessorBusyFrequencyMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, _, _, msrMock := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + preparedData := getPreparedCPUData([]string{cpuID}) + power.skipFirstIteration = false + + msrMock.On("getCPUCoresData").Return(preparedData).Twice() + power.addCPUBusyFrequencyMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + acc.ClearMetrics() + preparedData[cpuID].mperfDelta = 0 + power.addCPUBusyFrequencyMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddC1StateResidencyMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, _, _, msrMock := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + preparedData := getPreparedCPUData([]string{cpuID}) + c1 := preparedData[cpuID].timeStampCounterDelta - preparedData[cpuID].mperfDelta - preparedData[cpuID].c3Delta - + preparedData[cpuID].c6Delta - preparedData[cpuID].c7Delta + expectedC1 := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * float64(c1) / float64(preparedData[cpuID].timeStampCounterDelta)) + + msrMock.On("getCPUCoresData").Return(preparedData).Twice() + + power.addCPUC1StateResidencyMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedMetric := getPowerCoreMetric("cpu_c1_state_residency_percent", expectedC1, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) + + acc.ClearMetrics() + preparedData[cpuID].timeStampCounterDelta = 0 + power.addCPUC1StateResidencyMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddThermalDesignPowerMetric(t *testing.T) { + var acc testutil.Accumulator + sockets := []string{"0"} + maxPower := 195720672.1 + power, _, raplMock, _ := getPowerWithMockedServices() + + raplMock.On("getConstraintMaxPowerWatts", mock.Anything). + Return(float64(0), errors.New("getConstraintMaxPowerWatts error")).Once(). + On("getConstraintMaxPowerWatts", mock.Anything).Return(maxPower, nil).Once() + + power.addThermalDesignPowerMetric(sockets[0], &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) + + power.addThermalDesignPowerMetric(sockets[0], &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedTDP := roundFloatToNearestTwoDecimalPlaces(maxPower) + expectedMetric := getPowerGlobalMetric("thermal_design_power_watts", expectedTDP, sockets[0]) + acc.AssertContainsTaggedFields(t, "powerstat_package", expectedMetric.fields, expectedMetric.tags) +} + +func getPreparedCPUData(cores []string) map[string]*msrData { + msrDataMap := make(map[string]*msrData) + + for _, core := range cores { + msrDataMap[core] = &msrData{ + mperf: 43079, + aperf: 82001, + timeStampCounter: 15514, + c3: 52829, + c6: 86930, + c7: 25340, + throttleTemp: 88150, + temp: 40827, + mperfDelta: 23515, + aperfDelta: 33866, + timeStampCounterDelta: 13686000, + c3Delta: 20003, + c6Delta: 44518, + c7Delta: 20979, + } + } + + return msrDataMap +} + +func getGlobalMetrics(maxPower float64, socketCurrentEnergy float64, dramCurrentEnergy float64) []struct { + fields map[string]interface{} + tags map[string]string +} { + return []struct { + fields map[string]interface{} + tags map[string]string + }{ + getPowerGlobalMetric("thermal_design_power_watts", roundFloatToNearestTwoDecimalPlaces(maxPower), "0"), + getPowerGlobalMetric("thermal_design_power_watts", roundFloatToNearestTwoDecimalPlaces(maxPower), "1"), + getPowerGlobalMetric("current_power_consumption_watts", roundFloatToNearestTwoDecimalPlaces(socketCurrentEnergy), "0"), + getPowerGlobalMetric("current_power_consumption_watts", roundFloatToNearestTwoDecimalPlaces(socketCurrentEnergy), "1"), + getPowerGlobalMetric("current_dram_power_consumption_watts", roundFloatToNearestTwoDecimalPlaces(dramCurrentEnergy), "0"), + getPowerGlobalMetric("current_dram_power_consumption_watts", roundFloatToNearestTwoDecimalPlaces(dramCurrentEnergy), "1"), + } +} + +func getPowerCoreMetric(name string, value interface{}, coreID string, packageID string, cpuID string) struct { + fields map[string]interface{} + tags map[string]string +} { + return getPowerMetric(name, value, map[string]string{"package_id": packageID, "core_id": coreID, "cpu_id": cpuID}) +} + +func getPowerGlobalMetric(name string, value interface{}, socketID string) struct { + fields map[string]interface{} + tags map[string]string +} { + return getPowerMetric(name, value, map[string]string{"package_id": socketID}) +} + +func getPowerMetric(name string, value interface{}, tags map[string]string) struct { + fields map[string]interface{} + tags map[string]string +} { + return struct { + fields map[string]interface{} + tags map[string]string + }{ + map[string]interface{}{ + name: value, + }, + tags, + } +} + +func prepareCPUInfoForSingleCPU(power *PowerStat, cpuID string, coreID string, packageID string) { + power.cpuInfo = make(map[string]*cpuInfo) + power.cpuInfo[cpuID] = &cpuInfo{ + physicalID: packageID, + coreID: coreID, + cpuID: cpuID, + } +} + +func prepareCPUInfo(power *PowerStat, coreIDs []string, packageIDs []string) { + power.cpuInfo = make(map[string]*cpuInfo) + currentCPU := 0 + for _, packageID := range packageIDs { + for _, coreID := range coreIDs { + cpuID := strconv.Itoa(currentCPU) + power.cpuInfo[cpuID] = &cpuInfo{ + physicalID: packageID, + cpuID: cpuID, + coreID: coreID, + } + currentCPU++ + } + } +} + +func enableCoreMetrics(power *PowerStat) { + power.cpuC1StateResidency = true + power.cpuC6StateResidency = true + power.cpuTemperature = true + power.cpuBusyFrequency = true + power.cpuFrequency = true + power.cpuBusyCycles = true +} + +func disableCoreMetrics(power *PowerStat) { + power.cpuC1StateResidency = false + power.cpuC6StateResidency = false + power.cpuTemperature = false + power.cpuBusyFrequency = false + power.cpuFrequency = false + power.cpuBusyCycles = false +} + +func prepareRaplDataMap(socketIDs []string, socketCurrentEnergy float64, dramCurrentEnergy float64) map[string]*raplData { + raplDataMap := make(map[string]*raplData, len(socketIDs)) + for _, socketID := range socketIDs { + raplDataMap[socketID] = &raplData{ + socketCurrentEnergy: socketCurrentEnergy, + dramCurrentEnergy: dramCurrentEnergy, + } + } + + return raplDataMap +} + +func getPowerWithMockedServices() (*PowerStat, *mockFileService, *mockRaplService, *mockMsrService) { + fsMock := &mockFileService{} + msrMock := &mockMsrService{} + raplMock := &mockRaplService{} + logger := testutil.Logger{Name: "PowerPluginTest"} + p := newPowerStat(fsMock) + p.Log = logger + p.fs = fsMock + p.rapl = raplMock + p.msr = msrMock + + return p, fsMock, raplMock, msrMock +} diff --git a/plugins/inputs/intel_powerstat/msr.go b/plugins/inputs/intel_powerstat/msr.go new file mode 100644 index 0000000000000..8d39164d6e783 --- /dev/null +++ b/plugins/inputs/intel_powerstat/msr.go @@ -0,0 +1,207 @@ +// +build linux + +package intel_powerstat + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "golang.org/x/sync/errgroup" + + "github.com/influxdata/telegraf" +) + +const ( + systemCPUPath = "/sys/devices/system/cpu/" + cpuCurrentFreqPartialPath = "/sys/devices/system/cpu/cpu%s/cpufreq/scaling_cur_freq" + msrPartialPath = "/dev/cpu/%s/msr" + c3StateResidencyLocation = 0x3FC + c6StateResidencyLocation = 0x3FD + c7StateResidencyLocation = 0x3FE + maximumFrequencyClockCountLocation = 0xE7 + actualFrequencyClockCountLocation = 0xE8 + throttleTemperatureLocation = 0x1A2 + temperatureLocation = 0x19C + timestampCounterLocation = 0x10 +) + +// msrService is responsible for interactions with MSR. +type msrService interface { + getCPUCoresData() map[string]*msrData + retrieveCPUFrequencyForCore(core string) (float64, error) + openAndReadMsr(core string) error +} + +type msrServiceImpl struct { + cpuCoresData map[string]*msrData + msrOffsets []int64 + fs fileService + log telegraf.Logger +} + +func (m *msrServiceImpl) getCPUCoresData() map[string]*msrData { + return m.cpuCoresData +} + +func (m *msrServiceImpl) retrieveCPUFrequencyForCore(core string) (float64, error) { + cpuFreqPath := fmt.Sprintf(cpuCurrentFreqPartialPath, core) + cpuFreqFile, err := os.Open(cpuFreqPath) + if err != nil { + return 0, fmt.Errorf("error opening scaling_cur_freq file on path %s, err: %v", cpuFreqPath, err) + } + defer cpuFreqFile.Close() + + cpuFreq, _, err := m.fs.readFileToFloat64(cpuFreqFile) + return convertKiloHertzToMegaHertz(cpuFreq), err +} + +func (m *msrServiceImpl) openAndReadMsr(core string) error { + path := fmt.Sprintf(msrPartialPath, core) + msrFile, err := os.Open(path) + if err != nil { + return fmt.Errorf("error opening MSR file on path %s, err: %v", path, err) + } + defer msrFile.Close() + + err = m.readDataFromMsr(core, msrFile) + if err != nil { + return fmt.Errorf("error reading data from MSR for core %s, err: %v", core, err) + } + return nil +} + +func (m *msrServiceImpl) readDataFromMsr(core string, reader io.ReaderAt) error { + g, ctx := errgroup.WithContext(context.Background()) + + // Create and populate a map that contains msr offsets along with their respective channels + msrOffsetsWithChannels := make(map[int64]chan uint64) + for _, offset := range m.msrOffsets { + msrOffsetsWithChannels[offset] = make(chan uint64) + } + + // Start a goroutine for each msr offset + for offset, channel := range msrOffsetsWithChannels { + // Wrap around function to avoid race on loop counter + func(off int64, ch chan uint64) { + g.Go(func() error { + defer close(ch) + + err := m.readValueFromFileAtOffset(ctx, ch, reader, off) + if err != nil { + return fmt.Errorf("error reading MSR file, err: %v", err) + } + + return nil + }) + }(offset, channel) + } + + newC3 := <-msrOffsetsWithChannels[c3StateResidencyLocation] + newC6 := <-msrOffsetsWithChannels[c6StateResidencyLocation] + newC7 := <-msrOffsetsWithChannels[c7StateResidencyLocation] + newMperf := <-msrOffsetsWithChannels[maximumFrequencyClockCountLocation] + newAperf := <-msrOffsetsWithChannels[actualFrequencyClockCountLocation] + newTsc := <-msrOffsetsWithChannels[timestampCounterLocation] + newThrottleTemp := <-msrOffsetsWithChannels[throttleTemperatureLocation] + newTemp := <-msrOffsetsWithChannels[temperatureLocation] + + if err := g.Wait(); err != nil { + return fmt.Errorf("received error during reading MSR values in goroutines: %v", err) + } + + m.cpuCoresData[core].c3Delta = newC3 - m.cpuCoresData[core].c3 + m.cpuCoresData[core].c6Delta = newC6 - m.cpuCoresData[core].c6 + m.cpuCoresData[core].c7Delta = newC7 - m.cpuCoresData[core].c7 + m.cpuCoresData[core].mperfDelta = newMperf - m.cpuCoresData[core].mperf + m.cpuCoresData[core].aperfDelta = newAperf - m.cpuCoresData[core].aperf + m.cpuCoresData[core].timeStampCounterDelta = newTsc - m.cpuCoresData[core].timeStampCounter + + m.cpuCoresData[core].c3 = newC3 + m.cpuCoresData[core].c6 = newC6 + m.cpuCoresData[core].c7 = newC7 + m.cpuCoresData[core].mperf = newMperf + m.cpuCoresData[core].aperf = newAperf + m.cpuCoresData[core].timeStampCounter = newTsc + // MSR (1A2h) IA32_TEMPERATURE_TARGET bits 23:16. + m.cpuCoresData[core].throttleTemp = (newThrottleTemp >> 16) & 0xFF + // MSR (19Ch) IA32_THERM_STATUS bits 22:16. + m.cpuCoresData[core].temp = (newTemp >> 16) & 0x7F + + return nil +} + +func (m *msrServiceImpl) readValueFromFileAtOffset(ctx context.Context, ch chan uint64, reader io.ReaderAt, offset int64) error { + value, err := m.fs.readFileAtOffsetToUint64(reader, offset) + if err != nil { + return err + } + + // Detect context cancellation and return an error if other goroutine fails + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- value: + } + + return nil +} + +// setCPUCores initialize cpuCoresData map. +func (m *msrServiceImpl) setCPUCores() error { + m.cpuCoresData = make(map[string]*msrData) + cpuPrefix := "cpu" + cpuCore := fmt.Sprintf("%s%s", cpuPrefix, "[0-9]*") + cpuCorePattern := fmt.Sprintf("%s/%s", systemCPUPath, cpuCore) + cpuPaths, err := m.fs.getStringsMatchingPatternOnPath(cpuCorePattern) + if err != nil { + return err + } + if len(cpuPaths) == 0 { + m.log.Debugf("CPU core data wasn't found using pattern: %s", cpuCorePattern) + return nil + } + + for _, cpuPath := range cpuPaths { + core := strings.TrimPrefix(filepath.Base(cpuPath), cpuPrefix) + m.cpuCoresData[core] = &msrData{ + mperf: 0, + aperf: 0, + timeStampCounter: 0, + c3: 0, + c6: 0, + c7: 0, + throttleTemp: 0, + temp: 0, + mperfDelta: 0, + aperfDelta: 0, + timeStampCounterDelta: 0, + c3Delta: 0, + c6Delta: 0, + c7Delta: 0, + } + } + + return nil +} + +func newMsrServiceWithFs(logger telegraf.Logger, fs fileService) *msrServiceImpl { + msrService := &msrServiceImpl{ + fs: fs, + log: logger, + } + err := msrService.setCPUCores() + if err != nil { + // This error does not prevent plugin from working thus it is not returned. + msrService.log.Error(err) + } + + msrService.msrOffsets = []int64{c3StateResidencyLocation, c6StateResidencyLocation, c7StateResidencyLocation, + maximumFrequencyClockCountLocation, actualFrequencyClockCountLocation, timestampCounterLocation, + throttleTemperatureLocation, temperatureLocation} + + return msrService +} diff --git a/plugins/inputs/intel_powerstat/msr_mock_test.go b/plugins/inputs/intel_powerstat/msr_mock_test.go new file mode 100644 index 0000000000000..4ca80e8a871bf --- /dev/null +++ b/plugins/inputs/intel_powerstat/msr_mock_test.go @@ -0,0 +1,61 @@ +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. + +package intel_powerstat + +import mock "github.com/stretchr/testify/mock" + +// mockMsrService is an autogenerated mock type for the msrService type +type mockMsrService struct { + mock.Mock +} + +// getCPUCoresData provides a mock function with given fields: +func (_m *mockMsrService) getCPUCoresData() map[string]*msrData { + ret := _m.Called() + + var r0 map[string]*msrData + if rf, ok := ret.Get(0).(func() map[string]*msrData); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]*msrData) + } + } + + return r0 +} + +// openAndReadMsr provides a mock function with given fields: core +func (_m *mockMsrService) openAndReadMsr(core string) error { + ret := _m.Called(core) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(core) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// retrieveCPUFrequencyForCore provides a mock function with given fields: core +func (_m *mockMsrService) retrieveCPUFrequencyForCore(core string) (float64, error) { + ret := _m.Called(core) + + var r0 float64 + if rf, ok := ret.Get(0).(func(string) float64); ok { + r0 = rf(core) + } else { + r0 = ret.Get(0).(float64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(core) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/plugins/inputs/intel_powerstat/msr_test.go b/plugins/inputs/intel_powerstat/msr_test.go new file mode 100644 index 0000000000000..945716e15a105 --- /dev/null +++ b/plugins/inputs/intel_powerstat/msr_test.go @@ -0,0 +1,134 @@ +// +build linux + +package intel_powerstat + +import ( + "context" + "errors" + "strings" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestReadDataFromMsrPositive(t *testing.T) { + firstValue := uint64(1000000) + secondValue := uint64(5000000) + delta := secondValue - firstValue + cpuCores := []string{"cpu0", "cpu1"} + msr, fsMock := getMsrServiceWithMockedFs() + prepareTestData(fsMock, cpuCores, msr, t) + cores := trimCPUFromCores(cpuCores) + + methodCallNumberForFirstValue := len(msr.msrOffsets) * len(cores) + methodCallNumberForSecondValue := methodCallNumberForFirstValue * 2 + + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(firstValue, nil).Times(methodCallNumberForFirstValue) + for _, core := range cores { + require.NoError(t, msr.readDataFromMsr(core, nil)) + } + fsMock.AssertNumberOfCalls(t, "readFileAtOffsetToUint64", methodCallNumberForFirstValue) + verifyCPUCoresData(cores, t, msr, firstValue, false, 0) + + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(secondValue, nil).Times(methodCallNumberForFirstValue) + for _, core := range cores { + require.NoError(t, msr.readDataFromMsr(core, nil)) + } + fsMock.AssertNumberOfCalls(t, "readFileAtOffsetToUint64", methodCallNumberForSecondValue) + verifyCPUCoresData(cores, t, msr, secondValue, true, delta) +} + +func trimCPUFromCores(cpuCores []string) []string { + cores := make([]string, 0) + for _, core := range cpuCores { + cores = append(cores, strings.TrimPrefix(core, "cpu")) + } + return cores +} + +func TestReadDataFromMsrNegative(t *testing.T) { + firstValue := uint64(1000000) + cpuCores := []string{"cpu0", "cpu1"} + msr, fsMock := getMsrServiceWithMockedFs() + + prepareTestData(fsMock, cpuCores, msr, t) + cores := trimCPUFromCores(cpuCores) + + methodCallNumberPerCore := len(msr.msrOffsets) + + // Normal execution for first core. + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(firstValue, nil).Times(methodCallNumberPerCore). + // Fail to read file for second core. + On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(uint64(0), errors.New("error reading file")).Times(methodCallNumberPerCore) + + require.NoError(t, msr.readDataFromMsr(cores[0], nil)) + require.Error(t, msr.readDataFromMsr(cores[1], nil)) +} + +func TestReadValueFromFileAtOffset(t *testing.T) { + cores := []string{"cpu0", "cpu1"} + msr, fsMock := getMsrServiceWithMockedFs() + ctx := context.Background() + testChannel := make(chan uint64, 1) + defer close(testChannel) + zero := uint64(0) + + prepareTestData(fsMock, cores, msr, t) + + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(zero, errors.New("error reading file")).Once() + require.Error(t, msr.readValueFromFileAtOffset(ctx, testChannel, nil, 0)) + + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(zero, nil).Once() + require.Equal(t, nil, msr.readValueFromFileAtOffset(ctx, testChannel, nil, 0)) + require.Equal(t, zero, <-testChannel) +} + +func prepareTestData(fsMock *mockFileService, cores []string, msr *msrServiceImpl, t *testing.T) { + // Prepare MSR offsets and CPUCoresData for test. + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(cores, nil).Once() + require.NoError(t, msr.setCPUCores()) + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) +} + +func verifyCPUCoresData(cores []string, t *testing.T, msr *msrServiceImpl, expectedValue uint64, verifyDelta bool, delta uint64) { + for _, core := range cores { + require.Equal(t, expectedValue, msr.cpuCoresData[core].c3) + require.Equal(t, expectedValue, msr.cpuCoresData[core].c6) + require.Equal(t, expectedValue, msr.cpuCoresData[core].c7) + require.Equal(t, expectedValue, msr.cpuCoresData[core].mperf) + require.Equal(t, expectedValue, msr.cpuCoresData[core].aperf) + require.Equal(t, expectedValue, msr.cpuCoresData[core].timeStampCounter) + require.Equal(t, (expectedValue>>16)&0xFF, msr.cpuCoresData[core].throttleTemp) + require.Equal(t, (expectedValue>>16)&0x7F, msr.cpuCoresData[core].temp) + + if verifyDelta { + require.Equal(t, delta, msr.cpuCoresData[core].c3Delta) + require.Equal(t, delta, msr.cpuCoresData[core].c6Delta) + require.Equal(t, delta, msr.cpuCoresData[core].c7Delta) + require.Equal(t, delta, msr.cpuCoresData[core].mperfDelta) + require.Equal(t, delta, msr.cpuCoresData[core].aperfDelta) + require.Equal(t, delta, msr.cpuCoresData[core].timeStampCounterDelta) + } + } +} + +func getMsrServiceWithMockedFs() (*msrServiceImpl, *mockFileService) { + cores := []string{"cpu0", "cpu1", "cpu2", "cpu3"} + logger := testutil.Logger{Name: "PowerPluginTest"} + fsMock := &mockFileService{} + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(cores, nil).Once() + msr := newMsrServiceWithFs(logger, fsMock) + + return msr, fsMock +} diff --git a/plugins/inputs/intel_powerstat/rapl.go b/plugins/inputs/intel_powerstat/rapl.go new file mode 100644 index 0000000000000..17d66ff3aea4b --- /dev/null +++ b/plugins/inputs/intel_powerstat/rapl.go @@ -0,0 +1,238 @@ +// +build linux + +package intel_powerstat + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/influxdata/telegraf" +) + +const ( + intelRaplPath = "/sys/devices/virtual/powercap/intel-rapl" + intelRaplSocketPartialPath = "%s/intel-rapl:%s" + energyUjPartialPath = "%s/energy_uj" + maxEnergyRangeUjPartialPath = "%s/max_energy_range_uj" + maxPowerUwPartialPath = "%s/constraint_0_max_power_uw" + intelRaplDramPartialPath = "%s/intel-rapl:%s/%s" + intelRaplDramNamePartialPath = "%s/name" +) + +// raplService is responsible for interactions with RAPL. +type raplService interface { + initializeRaplData() + getRaplData() map[string]*raplData + retrieveAndCalculateData(socketID string) error + getConstraintMaxPowerWatts(socketID string) (float64, error) +} + +type raplServiceImpl struct { + log telegraf.Logger + data map[string]*raplData + dramFolders map[string]string + fs fileService +} + +// initializeRaplData looks for RAPL folders and initializes data map with fetched information. +func (r *raplServiceImpl) initializeRaplData() { + r.prepareData() + r.findDramFolders() +} + +func (r *raplServiceImpl) getRaplData() map[string]*raplData { + return r.data +} + +func (r *raplServiceImpl) retrieveAndCalculateData(socketID string) error { + socketRaplPath := fmt.Sprintf(intelRaplSocketPartialPath, intelRaplPath, socketID) + socketEnergyUjPath := fmt.Sprintf(energyUjPartialPath, socketRaplPath) + socketEnergyUjFile, err := os.Open(socketEnergyUjPath) + if err != nil { + return fmt.Errorf("error opening socket energy_uj file on path %s, err: %v", socketEnergyUjPath, err) + } + defer socketEnergyUjFile.Close() + + dramRaplPath := fmt.Sprintf(intelRaplDramPartialPath, intelRaplPath, socketID, r.dramFolders[socketID]) + dramEnergyUjPath := fmt.Sprintf(energyUjPartialPath, dramRaplPath) + dramEnergyUjFile, err := os.Open(dramEnergyUjPath) + if err != nil { + return fmt.Errorf("error opening dram energy_uj file on path %s, err: %v", dramEnergyUjPath, err) + } + defer dramEnergyUjFile.Close() + + socketMaxEnergyUjPath := fmt.Sprintf(maxEnergyRangeUjPartialPath, socketRaplPath) + socketMaxEnergyUjFile, err := os.Open(socketMaxEnergyUjPath) + if err != nil { + return fmt.Errorf("error opening socket max_energy_range_uj file on path %s, err: %v", socketMaxEnergyUjPath, err) + } + defer socketMaxEnergyUjFile.Close() + + dramMaxEnergyUjPath := fmt.Sprintf(maxEnergyRangeUjPartialPath, dramRaplPath) + dramMaxEnergyUjFile, err := os.Open(dramMaxEnergyUjPath) + if err != nil { + return fmt.Errorf("error opening dram max_energy_range_uj file on path %s, err: %v", dramMaxEnergyUjPath, err) + } + defer dramMaxEnergyUjFile.Close() + + return r.calculateData(socketID, socketEnergyUjFile, dramEnergyUjFile, socketMaxEnergyUjFile, dramMaxEnergyUjFile) +} + +func (r *raplServiceImpl) getConstraintMaxPowerWatts(socketID string) (float64, error) { + socketRaplPath := fmt.Sprintf(intelRaplSocketPartialPath, intelRaplPath, socketID) + socketMaxPowerPath := fmt.Sprintf(maxPowerUwPartialPath, socketRaplPath) + socketMaxPowerFile, err := os.Open(socketMaxPowerPath) + if err != nil { + return 0, fmt.Errorf("error opening constraint_0_max_power_uw file on path %s, err: %v", socketMaxPowerPath, err) + } + defer socketMaxPowerFile.Close() + + socketMaxPower, _, err := r.fs.readFileToFloat64(socketMaxPowerFile) + return convertMicroWattToWatt(socketMaxPower), err + +} + +func (r *raplServiceImpl) prepareData() { + intelRaplPrefix := "intel-rapl:" + intelRapl := fmt.Sprintf("%s%s", intelRaplPrefix, "[0-9]*") + raplPattern := fmt.Sprintf("%s/%s", intelRaplPath, intelRapl) + + raplPaths, err := r.fs.getStringsMatchingPatternOnPath(raplPattern) + if err != nil { + r.log.Errorf("error while preparing RAPL data: %v", err) + r.data = make(map[string]*raplData) + return + } + if len(raplPaths) == 0 { + r.log.Debugf("RAPL data wasn't found using pattern: %s", raplPattern) + r.data = make(map[string]*raplData) + return + } + + // If RAPL exists initialize data map (if it wasn't initialized before). + if len(r.data) == 0 { + for _, raplPath := range raplPaths { + socketID := strings.TrimPrefix(filepath.Base(raplPath), intelRaplPrefix) + r.data[socketID] = &raplData{ + socketCurrentEnergy: 0, + dramCurrentEnergy: 0, + socketEnergy: 0, + dramEnergy: 0, + readDate: 0, + } + } + } +} + +func (r *raplServiceImpl) findDramFolders() { + intelRaplPrefix := "intel-rapl:" + intelRaplDram := fmt.Sprintf("%s%s", intelRaplPrefix, "[0-9]*[0-9]*") + // Clean existing map + r.dramFolders = make(map[string]string) + + for socketID := range r.data { + path := fmt.Sprintf(intelRaplSocketPartialPath, intelRaplPath, socketID) + raplFoldersPattern := fmt.Sprintf("%s/%s", path, intelRaplDram) + pathsToRaplFolders, err := r.fs.getStringsMatchingPatternOnPath(raplFoldersPattern) + if err != nil { + r.log.Errorf("error during lookup for rapl dram: %v", err) + continue + } + if len(pathsToRaplFolders) == 0 { + r.log.Debugf("RAPL folders weren't found using pattern: %s", raplFoldersPattern) + continue + } + + raplFolders := make([]string, 0) + for _, folderPath := range pathsToRaplFolders { + raplFolders = append(raplFolders, filepath.Base(folderPath)) + } + + r.findDramFolder(raplFolders, socketID) + } +} + +func (r *raplServiceImpl) findDramFolder(raplFolders []string, socketID string) { + for _, raplFolder := range raplFolders { + potentialDramPath := fmt.Sprintf(intelRaplDramPartialPath, intelRaplPath, socketID, raplFolder) + nameFilePath := fmt.Sprintf(intelRaplDramNamePartialPath, potentialDramPath) + read, err := r.fs.readFile(nameFilePath) + if err != nil { + r.log.Errorf("error reading file on path: %s, err: %v", nameFilePath, err) + continue + } + + // Remove new line character + trimmedString := strings.TrimRight(string(read), "\n") + if trimmedString == "dram" { + // There should be only one DRAM folder per socket + r.dramFolders[socketID] = raplFolder + return + } + } +} + +func (r *raplServiceImpl) calculateData(socketID string, socketEnergyUjFile io.Reader, dramEnergyUjFile io.Reader, + socketMaxEnergyUjFile io.Reader, dramMaxEnergyUjFile io.Reader) error { + + newSocketEnergy, _, err := r.readEnergyInJoules(socketEnergyUjFile) + if err != nil { + return err + } + + newDramEnergy, readDate, err := r.readEnergyInJoules(dramEnergyUjFile) + if err != nil { + return err + } + + interval := convertNanoSecondsToSeconds(readDate - r.data[socketID].readDate) + r.data[socketID].readDate = readDate + if interval == 0 { + return fmt.Errorf("interval between last two Telegraf cycles is 0") + } + + if newSocketEnergy > r.data[socketID].socketEnergy { + r.data[socketID].socketCurrentEnergy = (newSocketEnergy - r.data[socketID].socketEnergy) / interval + } else { + socketMaxEnergy, _, err := r.readEnergyInJoules(socketMaxEnergyUjFile) + if err != nil { + return err + } + // When socket energy_uj counter reaches maximum value defined in max_energy_range_uj file it + // starts counting from 0. + r.data[socketID].socketCurrentEnergy = (socketMaxEnergy - r.data[socketID].socketEnergy + newSocketEnergy) / interval + } + + if newDramEnergy > r.data[socketID].dramEnergy { + r.data[socketID].dramCurrentEnergy = (newDramEnergy - r.data[socketID].dramEnergy) / interval + } else { + dramMaxEnergy, _, err := r.readEnergyInJoules(dramMaxEnergyUjFile) + if err != nil { + return err + } + // When dram energy_uj counter reaches maximum value defined in max_energy_range_uj file it + // starts counting from 0. + r.data[socketID].dramCurrentEnergy = (dramMaxEnergy - r.data[socketID].dramEnergy + newDramEnergy) / interval + } + r.data[socketID].socketEnergy = newSocketEnergy + r.data[socketID].dramEnergy = newDramEnergy + + return nil +} + +func (r *raplServiceImpl) readEnergyInJoules(reader io.Reader) (float64, int64, error) { + currentEnergy, readDate, err := r.fs.readFileToFloat64(reader) + return convertMicroJoulesToJoules(currentEnergy), readDate, err +} + +func newRaplServiceWithFs(logger telegraf.Logger, fs fileService) *raplServiceImpl { + return &raplServiceImpl{ + log: logger, + data: make(map[string]*raplData), + dramFolders: make(map[string]string), + fs: fs, + } +} diff --git a/plugins/inputs/intel_powerstat/rapl_mock_test.go b/plugins/inputs/intel_powerstat/rapl_mock_test.go new file mode 100644 index 0000000000000..7742db140ccf1 --- /dev/null +++ b/plugins/inputs/intel_powerstat/rapl_mock_test.go @@ -0,0 +1,66 @@ +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. + +package intel_powerstat + +import mock "github.com/stretchr/testify/mock" + +// mockRaplService is an autogenerated mock type for the raplService type +type mockRaplService struct { + mock.Mock +} + +// getConstraintMaxPowerWatts provides a mock function with given fields: socketID +func (_m *mockRaplService) getConstraintMaxPowerWatts(socketID string) (float64, error) { + ret := _m.Called(socketID) + + var r0 float64 + if rf, ok := ret.Get(0).(func(string) float64); ok { + r0 = rf(socketID) + } else { + r0 = ret.Get(0).(float64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(socketID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// getRaplData provides a mock function with given fields: +func (_m *mockRaplService) getRaplData() map[string]*raplData { + ret := _m.Called() + + var r0 map[string]*raplData + if rf, ok := ret.Get(0).(func() map[string]*raplData); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]*raplData) + } + } + + return r0 +} + +// initializeRaplData provides a mock function with given fields: +func (_m *mockRaplService) initializeRaplData() { + _m.Called() +} + +// retrieveAndCalculateData provides a mock function with given fields: socketID +func (_m *mockRaplService) retrieveAndCalculateData(socketID string) error { + ret := _m.Called(socketID) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(socketID) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/plugins/inputs/intel_powerstat/rapl_test.go b/plugins/inputs/intel_powerstat/rapl_test.go new file mode 100644 index 0000000000000..551f06f890ea4 --- /dev/null +++ b/plugins/inputs/intel_powerstat/rapl_test.go @@ -0,0 +1,115 @@ +// +build linux + +package intel_powerstat + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestPrepareData(t *testing.T) { + sockets := []string{"intel-rapl:0", "intel-rapl:1"} + rapl, fsMock := getRaplWithMockedFs() + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything).Return(sockets, nil).Twice() + rapl.prepareData() + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, len(sockets), len(rapl.getRaplData())) + + // Verify no data is wiped in the next calls + socketEnergy := 74563813417.0 + socketID := "0" + rapl.data[socketID].socketEnergy = socketEnergy + + rapl.prepareData() + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, len(sockets), len(rapl.getRaplData())) + require.Equal(t, socketEnergy, rapl.data[socketID].socketEnergy) + + // Verify data is wiped once there is no RAPL folders + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(nil, errors.New("missing RAPL")).Once() + rapl.prepareData() + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, 0, len(rapl.getRaplData())) +} + +func TestFindDramFolders(t *testing.T) { + sockets := []string{"0", "1"} + raplFolders := []string{"intel-rapl:0:1", "intel-rapl:0:2", "intel-rapl:0:3"} + rapl, fsMock := getRaplWithMockedFs() + + for _, socketID := range sockets { + rapl.data[socketID] = &raplData{} + } + + firstPath := fmt.Sprintf(intelRaplDramNamePartialPath, + fmt.Sprintf(intelRaplDramPartialPath, intelRaplPath, "0", raplFolders[2])) + secondPath := fmt.Sprintf(intelRaplDramNamePartialPath, + fmt.Sprintf(intelRaplDramPartialPath, intelRaplPath, "1", raplFolders[1])) + + fsMock. + On("getStringsMatchingPatternOnPath", mock.Anything).Return(raplFolders, nil).Twice(). + On("readFile", firstPath).Return([]byte("dram"), nil).Once(). + On("readFile", secondPath).Return([]byte("dram"), nil).Once(). + On("readFile", mock.Anything).Return([]byte("random"), nil) + + rapl.findDramFolders() + + require.Equal(t, len(sockets), len(rapl.dramFolders)) + require.Equal(t, raplFolders[2], rapl.dramFolders["0"]) + require.Equal(t, raplFolders[1], rapl.dramFolders["1"]) + fsMock.AssertNumberOfCalls(t, "readFile", 5) +} + +func TestCalculateDataOverflowCases(t *testing.T) { + socketID := "1" + rapl, fsMock := getRaplWithMockedFs() + + rapl.data[socketID] = &raplData{} + rapl.data[socketID].socketEnergy = convertMicroJoulesToJoules(23424123.1) + rapl.data[socketID].dramEnergy = convertMicroJoulesToJoules(345611233.2) + rapl.data[socketID].readDate = 54123 + + interval := int64(54343) + convertedInterval := convertNanoSecondsToSeconds(interval - rapl.data[socketID].readDate) + + newEnergy := 3343443.4 + maxEnergy := 234324546456.6 + convertedNewEnergy := convertMicroJoulesToJoules(newEnergy) + convertedMaxNewEnergy := convertMicroJoulesToJoules(maxEnergy) + + maxDramEnergy := 981230834098.3 + newDramEnergy := 4533311.1 + convertedMaxDramEnergy := convertMicroJoulesToJoules(maxDramEnergy) + convertedDramEnergy := convertMicroJoulesToJoules(newDramEnergy) + + expectedCurrentEnergy := (convertedMaxNewEnergy - rapl.data[socketID].socketEnergy + convertedNewEnergy) / convertedInterval + expectedDramCurrentEnergy := (convertedMaxDramEnergy - rapl.data[socketID].dramEnergy + convertedDramEnergy) / convertedInterval + + fsMock. + On("readFileToFloat64", mock.Anything).Return(newEnergy, int64(12321), nil).Once(). + On("readFileToFloat64", mock.Anything).Return(newDramEnergy, interval, nil).Once(). + On("readFileToFloat64", mock.Anything).Return(maxEnergy, int64(64534), nil).Once(). + On("readFileToFloat64", mock.Anything).Return(maxDramEnergy, int64(98342), nil).Once() + + require.NoError(t, rapl.calculateData(socketID, strings.NewReader(mock.Anything), strings.NewReader(mock.Anything), + strings.NewReader(mock.Anything), strings.NewReader(mock.Anything))) + + require.Equal(t, expectedCurrentEnergy, rapl.data[socketID].socketCurrentEnergy) + require.Equal(t, expectedDramCurrentEnergy, rapl.data[socketID].dramCurrentEnergy) +} + +func getRaplWithMockedFs() (*raplServiceImpl, *mockFileService) { + logger := testutil.Logger{Name: "PowerPluginTest"} + fsMock := &mockFileService{} + rapl := newRaplServiceWithFs(logger, fsMock) + + return rapl, fsMock +} diff --git a/plugins/inputs/intel_powerstat/unit_converter.go b/plugins/inputs/intel_powerstat/unit_converter.go new file mode 100644 index 0000000000000..4c3cba6b1b83a --- /dev/null +++ b/plugins/inputs/intel_powerstat/unit_converter.go @@ -0,0 +1,49 @@ +// +build linux + +package intel_powerstat + +import ( + "math" + "strconv" +) + +const ( + microJouleToJoule = 1.0 / 1000000 + microWattToWatt = 1.0 / 1000000 + kiloHertzToMegaHertz = 1.0 / 1000 + nanoSecondsToSeconds = 1.0 / 1000000000 + cyclesToHertz = 1.0 / 1000000 +) + +func convertMicroJoulesToJoules(mJ float64) float64 { + return mJ * microJouleToJoule +} + +func convertMicroWattToWatt(mW float64) float64 { + return mW * microWattToWatt +} + +func convertKiloHertzToMegaHertz(kHz float64) float64 { + return kHz * kiloHertzToMegaHertz +} + +func convertNanoSecondsToSeconds(ns int64) float64 { + return float64(ns) * nanoSecondsToSeconds +} + +func convertProcessorCyclesToHertz(pc uint64) float64 { + return float64(pc) * cyclesToHertz +} + +func roundFloatToNearestTwoDecimalPlaces(n float64) float64 { + return math.Round(n*100) / 100 +} + +func convertIntegerArrayToStringArray(array []int64) []string { + stringArray := make([]string, 0) + for _, value := range array { + stringArray = append(stringArray, strconv.FormatInt(value, 10)) + } + + return stringArray +} From 34151c47a64d94c9d70d3f55bd9dad7e413201fb Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 10 Dec 2020 21:23:57 +0100 Subject: [PATCH 118/761] [inputs.github] Add query of pull-request statistics (#8500) --- plugins/inputs/github/README.md | 20 ++++++++- plugins/inputs/github/github.go | 73 +++++++++++++++++++++++++++++---- 2 files changed, 83 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md index 46127082e69c5..a920a48f54e1d 100644 --- a/plugins/inputs/github/README.md +++ b/plugins/inputs/github/README.md @@ -23,6 +23,14 @@ alternative method for collecting repository information. ## Timeout for HTTP requests. # http_timeout = "5s" + + ## List of additional fields to query. + ## NOTE: Getting those fields might involve issuing additional API-calls, so please + ## make sure you do not exceed the rate-limit of GitHub. + ## + ## Available fields are: + ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) + # additional_fields = [] ``` ### Metrics @@ -52,11 +60,21 @@ When the [internal][] input is enabled: - remaining - How many requests you have remaining (per hour) - blocks - How many requests have been blocked due to rate limit +When specifying `additional_fields` the plugin will collect the specified properties. +**NOTE:** Querying this additional fields might require to perform additional API-calls. +Please make sure you don't exceed the query rate-limit by specifying too many additional fields. +In the following we list the available options with the required API-calls and the resulting fields + +- "pull-requests" (2 API-calls per repository) + - fields: + - open_pull_requests (int) + - closed_pull_requests (int) + ### Example Output ``` github_repository,language=Go,license=MIT\ License,name=telegraf,owner=influxdata forks=2679i,networks=2679i,open_issues=794i,size=23263i,stars=7091i,subscribers=316i,watchers=7091i 1563901372000000000 -internal_github,access_token=Unauthenticated rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i 1552653551000000000 +internal_github,access_token=Unauthenticated closed_pull_requests=3522i,rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i,open_pull_requests=260i 1552653551000000000 ``` [GitHub]: https://www.github.com diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go index a26923f3f305c..e9b48bc389709 100644 --- a/plugins/inputs/github/github.go +++ b/plugins/inputs/github/github.go @@ -20,6 +20,7 @@ import ( type GitHub struct { Repositories []string `toml:"repositories"` AccessToken string `toml:"access_token"` + AdditionalFields []string `toml:"additional_fields"` EnterpriseBaseURL string `toml:"enterprise_base_url"` HTTPTimeout internal.Duration `toml:"http_timeout"` githubClient *github.Client @@ -46,6 +47,14 @@ const sampleConfig = ` ## Timeout for HTTP requests. # http_timeout = "5s" + + ## List of additional fields to query. + ## NOTE: Getting those fields might involve issuing additional API-calls, so please + ## make sure you do not exceed the rate-limit of GitHub. + ## + ## Available fields are: + ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) + # additional_fields = [] ` // SampleConfig returns sample configuration for this plugin. @@ -97,7 +106,6 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error { if g.githubClient == nil { githubClient, err := g.createGitHubClient(ctx) - if err != nil { return err } @@ -127,23 +135,35 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error { } repositoryInfo, response, err := g.githubClient.Repositories.Get(ctx, owner, repository) - - if _, ok := err.(*github.RateLimitError); ok { - g.RateLimitErrors.Incr(1) - } - + g.handleRateLimit(response, err) if err != nil { acc.AddError(err) return } - g.RateLimit.Set(int64(response.Rate.Limit)) - g.RateRemaining.Set(int64(response.Rate.Remaining)) - now := time.Now() tags := getTags(repositoryInfo) fields := getFields(repositoryInfo) + for _, field := range g.AdditionalFields { + addFields := make(map[string]interface{}) + switch field { + case "pull-requests": + // Pull request properties + addFields, err = g.getPullRequestFields(ctx, owner, repository) + if err != nil { + acc.AddError(err) + continue + } + default: + acc.AddError(fmt.Errorf("unknown additional field %q", field)) + continue + } + for k, v := range addFields { + fields[k] = v + } + } + acc.AddFields("github_repository", fields, tags, now) }(repository, acc) } @@ -152,6 +172,15 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error { return nil } +func (g *GitHub) handleRateLimit(response *github.Response, err error) { + if err == nil { + g.RateLimit.Set(int64(response.Rate.Limit)) + g.RateRemaining.Set(int64(response.Rate.Remaining)) + } else if _, ok := err.(*github.RateLimitError); ok { + g.RateLimitErrors.Incr(1) + } +} + func splitRepositoryName(repositoryName string) (string, string, error) { splits := strings.SplitN(repositoryName, "/", 2) @@ -191,6 +220,32 @@ func getFields(repositoryInfo *github.Repository) map[string]interface{} { } } +func (g *GitHub) getPullRequestFields(ctx context.Context, owner, repo string) (map[string]interface{}, error) { + options := github.SearchOptions{ + TextMatch: false, + ListOptions: github.ListOptions{ + PerPage: 100, + Page: 1, + }, + } + + classes := []string{"open", "closed"} + fields := make(map[string]interface{}) + for _, class := range classes { + q := fmt.Sprintf("repo:%s/%s is:pr is:%s", owner, repo, class) + searchResult, response, err := g.githubClient.Search.Issues(ctx, q, &options) + g.handleRateLimit(response, err) + if err != nil { + return fields, err + } + + f := fmt.Sprintf("%s_pull_requests", class) + fields[f] = searchResult.GetTotal() + } + + return fields, nil +} + func init() { inputs.Add("github", func() telegraf.Input { return &GitHub{ From 7d3b7fc2f9d379aa21d09eb3e905e644e876be6f Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 10 Dec 2020 17:21:18 -0500 Subject: [PATCH 119/761] Update changelog (cherry picked from commit f88373afa465bd7e4e4cd9030115238582166b80) --- CHANGELOG.md | 57 +++++++ build_version.txt | 2 +- etc/telegraf.conf | 408 ++++++++++++++++++++++++++++++++++++++++------ 3 files changed, 414 insertions(+), 53 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f7552b5aef103..9111d2f9cb59c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,59 @@ +## v1.17.0-rc0 [2020-12-10] + +#### Release Notes + +- Starlark plugins can now store state between runs using a global state variable +- + +#### Bugfixes + + - [#8505](https://github.com/influxdata/telegraf/pull/8505) `inputs.vsphere` Fixed misspelled check for datacenter + - [#8499](https://github.com/influxdata/telegraf/pull/8499) `processors.execd` Adding support for new lines in influx line protocol fields. + - [#8254](https://github.com/influxdata/telegraf/pull/8254) `serializers.carbon2` Fix carbon2 tests + - [#8498](https://github.com/influxdata/telegraf/pull/8498) `inputs.http_response` fixed network test + - [#8276](https://github.com/influxdata/telegraf/pull/8276) `parsers.grok` Update grok package to support for field names containing '-' and '.' + - [#8414](https://github.com/influxdata/telegraf/pull/8414) `inputs.bcache` Fix tests for Windows - part 1 + +#### Features + + - [#8038](https://github.com/influxdata/telegraf/pull/8038) `inputs.jenkins` feat: add build number field to jenkins_job measurement + - [#7345](https://github.com/influxdata/telegraf/pull/7345) `inputs.ping` Add percentiles to the ping plugin + - [#8369](https://github.com/influxdata/telegraf/pull/8369) `inputs.sqlserver` Added tags for monitoring readable secondaries for Azure SQL MI + - [#8379](https://github.com/influxdata/telegraf/pull/8379) `inputs.sqlserver` SQL Server HA/DR Availability Group queries + - [#8520](https://github.com/influxdata/telegraf/pull/8520) Add initialization example to mock-plugin. + - [#8426](https://github.com/influxdata/telegraf/pull/8426) `inputs.snmp` Add support to convert snmp hex strings to integers + - [#8509](https://github.com/influxdata/telegraf/pull/8509) `inputs.statsd` Add configurable Max TTL duration for statsd input plugin entries + - [#8508](https://github.com/influxdata/telegraf/pull/8508) `inputs.bind` Add configurable timeout to bind input plugin http call + - [#8368](https://github.com/influxdata/telegraf/pull/8368) `inputs.sqlserver` Added is_primary_replica for monitoring readable secondaries for Azure SQL DB + - [#8462](https://github.com/influxdata/telegraf/pull/8462) `inputs.sqlserver` sqlAzureMIRequests - remove duplicate column [session_db_name] + - [#8464](https://github.com/influxdata/telegraf/pull/8464) `inputs.sqlserver` Add column measurement_db_type to output of all queries if not empty + - [#8389](https://github.com/influxdata/telegraf/pull/8389) `inputs.opcua` Add node groups to opcua input plugin + - [#8432](https://github.com/influxdata/telegraf/pull/8432) add support for linux/ppc64le + - [#8474](https://github.com/influxdata/telegraf/pull/8474) `inputs.modbus` Add FLOAT64-IEEE support to inputs.modbus (#8361) (by @Nemecsek) + - [#8447](https://github.com/influxdata/telegraf/pull/8447) `processors.starlark` Add the shared state to the global scope to get previous data + - [#8383](https://github.com/influxdata/telegraf/pull/8383) `inputs.zfs` Add dataset metrics to zfs input + - [#8429](https://github.com/influxdata/telegraf/pull/8429) `outputs.nats` Added "name" parameter to NATS output plugin + - [#8477](https://github.com/influxdata/telegraf/pull/8477) `inputs.http` proxy support for http input + - [#8466](https://github.com/influxdata/telegraf/pull/8466) `inputs.snmp` Translate snmp field values + - [#8435](https://github.com/influxdata/telegraf/pull/8435) `common.kafka` Enable kafka zstd compression and idempotent writes + - [#8056](https://github.com/influxdata/telegraf/pull/8056) `inputs.monit` Add response_time to monit plugin + - [#8446](https://github.com/influxdata/telegraf/pull/8446) update to go 1.15.5 + - [#8428](https://github.com/influxdata/telegraf/pull/8428) `aggregators.basicstats` Add rate and interval to the basicstats aggregator plugin + +#### New Parser Plugins + + - [#7778](https://github.com/influxdata/telegraf/pull/7778) `parsers.prometheus` Add a parser plugin for prometheus + +#### New Input Plugins + + - [#8163](https://github.com/influxdata/telegraf/pull/8163) `inputs.riemann` Support Riemann-Protobuff Listener + +#### New Output Plugins + + - [#8296](https://github.com/influxdata/telegraf/pull/8296) `outputs.yandex_cloud_monitoring` #8295 Initial Yandex.Cloud monitoring + - [#8202](https://github.com/influxdata/telegraf/pull/8202) `outputs.all` A new Logz.io output plugin + + ## v1.16.3 [2020-12-01] #### Bugfixes @@ -18,6 +74,7 @@ - [#8404](https://github.com/influxdata/telegraf/pull/8404) `outputs.wavefront` Wavefront output should distinguish between retryable and non-retryable errors - [#8401](https://github.com/influxdata/telegraf/pull/8401) `processors.starlark` Allow to catch errors that occur in the apply function + ## v1.16.2 [2020-11-13] #### Bugfixes diff --git a/build_version.txt b/build_version.txt index c807441cfed77..092afa15df4df 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.16.3 +1.17.0 diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 50224cb008dd0..508f2fb3a7712 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -171,7 +171,7 @@ ## HTTP Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" + # content_encoding = "gzip" ## When true, Telegraf will output unsigned integers as unsigned values, ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned @@ -883,14 +883,19 @@ # ## routing_key = "telegraf" # # routing_key = "" # -# ## CompressionCodec represents the various compression codecs recognized by +# ## Compression codec represents the various compression codecs recognized by # ## Kafka in messages. -# ## 0 : No compression -# ## 1 : Gzip compression -# ## 2 : Snappy compression -# ## 3 : LZ4 compression +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD # # compression_codec = 0 # +# ## Idempotent Writes +# ## If enabled, exactly one copy of each message is written. +# # idempotent_writes = false +# # ## RequiredAcks is used in Produce Requests to tell the broker how many # ## replica acknowledgements it must see before responding # ## 0 : the producer never waits for an acknowledgement from the broker. @@ -916,7 +921,6 @@ # # max_message_bytes = 1000000 # # ## Optional TLS Config -# # enable_tls = true # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" @@ -927,6 +931,23 @@ # # sasl_username = "kafka" # # sasl_password = "secret" # +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# # ## SASL protocol version. When connecting to Azure EventHub set to 0. # # sasl_version = 1 # @@ -1023,6 +1044,23 @@ # +# # Send aggregate metrics to Logz.io +# [[outputs.logzio]] +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Logz.io account token +# token = "your logz.io token" # required +# +# ## Use your listener URL for your Logz.io account region. +# # url = "https://listener.logz.io:8071" + + # # Configuration for MQTT server to send metrics to # [[outputs.mqtt]] # servers = ["localhost:1883"] # required. @@ -1075,6 +1113,9 @@ # ## URLs of NATS servers # servers = ["nats://localhost:4222"] # +# ## Optional client name +# # name = "" +# # ## Optional credentials # # username = "" # # password = "" @@ -1435,6 +1476,118 @@ # # default_appname = "Telegraf" +# # Configuration for Amazon Timestream output. +# [[outputs.timestream]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order: +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) Explicit credentials from 'access_key' and 'secret_key' +# ## 3) Shared profile from 'profile' +# ## 4) Environment variables +# ## 5) Shared credentials file +# ## 6) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Timestream database where the metrics will be inserted. +# ## The database must exist prior to starting Telegraf. +# database_name = "yourDatabaseNameHere" +# +# ## Specifies if the plugin should describe the Timestream database upon starting +# ## to validate if it has access necessary permissions, connection, etc., as a safety check. +# ## If the describe operation fails, the plugin will not start +# ## and therefore the Telegraf agent will not start. +# describe_database_on_start = false +# +# ## The mapping mode specifies how Telegraf records are represented in Timestream. +# ## Valid values are: single-table, multi-table. +# ## For example, consider the following data in line protocol format: +# ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 +# ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 +# ## where weather and airquality are the measurement names, location and season are tags, +# ## and temperature, humidity, no2, pm25 are fields. +# ## In multi-table mode: +# ## - first line will be ingested to table named weather +# ## - second line will be ingested to table named airquality +# ## - the tags will be represented as dimensions +# ## - first table (weather) will have two records: +# ## one with measurement name equals to temperature, +# ## another with measurement name equals to humidity +# ## - second table (airquality) will have two records: +# ## one with measurement name equals to no2, +# ## another with measurement name equals to pm25 +# ## - the Timestream tables from the example will look like this: +# ## TABLE "weather": +# ## time | location | season | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 +# ## TABLE "airquality": +# ## time | location | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-west | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | pm25 | 16 +# ## In single-table mode: +# ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) +# ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) +# ## - location and season will be represented as dimensions +# ## - temperature, humidity, no2, pm25 will be represented as measurement name +# ## - the Timestream table from the example will look like this: +# ## Assuming: +# ## - single_table_name = "my_readings" +# ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# ## TABLE "my_readings": +# ## time | location | season | namespace | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 +# ## In most cases, using multi-table mapping mode is recommended. +# ## However, you can consider using single-table in situations when you have thousands of measurement names. +# mapping_mode = "multi-table" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Specifies the Timestream table where the metrics will be uploaded. +# # single_table_name = "yourTableNameHere" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Describes what will be the Timestream dimension name for the Telegraf +# ## measurement name. +# # single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# +# ## Specifies if the plugin should create the table, if the table do not exist. +# ## The plugin writes the data without prior checking if the table exists. +# ## When the table does not exist, the error returned from Timestream will cause +# ## the plugin to create the table, if this parameter is set to true. +# create_table_if_not_exists = true +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table magnetic store retention period in days. +# ## Check Timestream documentation for more details. +# create_table_magnetic_store_retention_period_in_days = 365 +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table memory store retention period in hours. +# ## Check Timestream documentation for more details. +# create_table_memory_store_retention_period_in_hours = 24 +# +# ## Only valid and optional if create_table_if_not_exists = true +# ## Specifies the Timestream table tags. +# ## Check Timestream documentation for more details +# # create_table_tags = { "foo" = "bar", "environment" = "dev"} + + # # Write metrics to Warp 10 # [[outputs.warp10]] # # Prefix to add to the measurement. @@ -1509,6 +1662,12 @@ # ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. # #truncate_tags = false # +# ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics +# ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending +# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in +# ## Telegraf. +# #immediate_flush = true +# # ## Define a mapping, namespaced by metric prefix, from string values to numeric values # ## deprecated in 1.9; use the enum processor plugin # #[[outputs.wavefront.string_to_number.elasticsearch]] @@ -1517,6 +1676,18 @@ # # red = 0.0 +# # Send aggregated metrics to Yandex.Cloud Monitoring +# [[outputs.yandex_cloud_monitoring]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Yandex.Cloud monitoring API endpoint. Normally should not be changed +# # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" +# +# ## All user metrics should be sent with "custom" service specified. Normally should not be changed +# # service = "custom" + + ############################################################################### # PROCESSOR PLUGINS # ############################################################################### @@ -1779,17 +1950,25 @@ # value_key = "value" -# # Given a tag of a TCP or UDP port number, add a tag of the service name looked up in the system services file +# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file # [[processors.port_name]] # [[processors.port_name]] # ## Name of tag holding the port number # # tag = "port" +# ## Or name of the field holding the port number +# # field = "port" # -# ## Name of output tag where service name will be added +# ## Name of output tag or field (depending on the source) where service name will be added # # dest = "service" # # ## Default tcp or udp # # default_protocol = "tcp" +# +# ## Tag containing the protocol (tcp or udp, case-insensitive) +# # protocol_tag = "proto" +# +# ## Field containing the protocol (tcp or udp, case-insensitive) +# # protocol_field = "proto" # # Print all metrics that pass through this filter. @@ -2362,7 +2541,7 @@ # ## If not specified, then default is: # bcachePath = "/sys/fs/bcache" # -# ## By default, telegraf gather stats for all bcache devices +# ## By default, Telegraf gather stats for all bcache devices # ## Setting devices will restrict the stats to the specified # ## bcache devices. # bcacheDevs = ["bcache0"] @@ -2385,6 +2564,9 @@ # # urls = ["http://localhost:8053/xml/v3"] # # gather_memory_contexts = false # # gather_views = false +# +# ## Timeout for http requests made by bind nameserver +# # timeout = "4s" # # Collect bond interface status, slaves statuses and failures count @@ -3188,6 +3370,9 @@ # ## compress body or "identity" to apply no encoding. # # content_encoding = "identity" # +# ## HTTP Proxy support +# # http_proxy_url = "" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -3256,6 +3441,12 @@ # # response_string_match = "ok" # # response_string_match = "\".*_status\".?:.?\"up\"" # +# ## Expected response status code. +# ## The status code of the response is compared to this value. If they match, the field +# ## "response_status_code_match" will be 1, otherwise it will be 0. If the +# ## expected status code is 0, the check is disabled and the field won't be added. +# # response_status_code = 0 +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -3985,7 +4176,8 @@ # ## |---BA, DCBA - Little Endian # ## |---BADC - Mid-Big Endian # ## |---CDAB - Mid-Little Endian -# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation) +# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, +# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) # ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) # ## scale - the final numeric variable representation # ## address - variable address @@ -4414,9 +4606,8 @@ # # Retrieve data from OPCUA devices # [[inputs.opcua]] -# [[inputs.opcua]] -# ## Device name -# # name = "localhost" +# ## Metric name +# # name = "opcua" # # # ## OPC UA Endpoint URL # # endpoint = "opc.tcp://localhost:4840" @@ -4453,18 +4644,41 @@ # # password = "" # # # ## Node ID configuration -# ## name - the variable name -# ## namespace - integer value 0 thru 3 -# ## identifier_type - s=string, i=numeric, g=guid, b=opaque -# ## identifier - tag as shown in opcua browser -# ## data_type - boolean, byte, short, int, uint, uint16, int16, -# ## uint32, int32, float, double, string, datetime, number +# ## name - field name to use in the output +# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) +# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) +# ## identifier - OPC UA ID (tag as shown in opcua browser) # ## Example: -# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"} -# nodes = [ -# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, -# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, -# ] +# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262"} +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] +# # +# ## Node Group +# ## Sets defaults for OPC UA namespace and ID type so they aren't required in +# ## every node. A group can also have a metric name that overrides the main +# ## plugin metric name. +# ## +# ## Multiple node groups are allowed +# #[[inputs.opcua.group]] +# ## Group Metric name. Overrides the top level name. If unset, the +# ## top level name is used. +# # name = +# # +# ## Group default namespace. If a node in the group doesn't set its +# ## namespace, this is used. +# # namespace = +# # +# ## Group default identifier type. If a node in the group doesn't set its +# ## namespace, this is used. +# # identifier_type = +# # +# ## Node ID Configuration. Array of nodes with the same settings as above. +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] # # OpenLDAP cn=Monitor plugin @@ -4638,6 +4852,9 @@ # ## option of the ping command. # # interface = "" # +# ## Percentiles to calculate. This only works with the native method. +# # percentiles = [50, 95, 99] +# # ## Specify the ping executable binary. # # binary = "ping" # @@ -4724,6 +4941,8 @@ # ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. # base_url = "https://localhost:8006/api2/json" # api_token = "USER@REALM!TOKENID=UUID" +# ## Node name, defaults to OS hostname +# # node_name = "" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" @@ -5135,53 +5354,53 @@ # servers = [ # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", # ] - +# # ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 # ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. # ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" - -# ## Queries enabled by default for database_type = "AzureSQLDB" are - -# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, # ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers - +# # # database_type = "AzureSQLDB" - +# # ## A list of queries to include. If not specified, all the above listed queries are used. # # include_query = [] - +# # ## A list of queries to explicitly ignore. # # exclude_query = [] - -# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - -# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, # ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers - +# # # database_type = "AzureSQLManagedInstance" - +# # # include_query = [] - +# # # exclude_query = [] - -# ## Queries enabled by default for database_type = "SQLServer" are - -# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, # ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu - +# # database_type = "SQLServer" - +# # include_query = [] - +# # ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default # exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] - -# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +# +# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use # ## the new mechanism of identifying the database_type there by use it's corresponding queries - +# # ## Optional parameter, setting this to 2 will use a new version # ## of the collection queries that break compatibility with the original # ## dashboards. # ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB # # query_version = 2 - +# # ## If you are using AzureDB, setting this to true will gather resource utilization metrics # # azuredb = false @@ -5499,7 +5718,7 @@ # # tls_key = "/etc/telegraf/key.pem" -# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets # [[inputs.zfs]] # ## ZFS kstat path. Ignored on FreeBSD # ## If not specified, then default is: @@ -5513,6 +5732,8 @@ # # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] # ## By default, don't gather zpool stats # # poolMetrics = false +# ## By default, don't gather zdataset stats +# # datasetMetrics = false # # Reads 'mntr' stats from one or many zookeeper servers @@ -6040,7 +6261,7 @@ # username = "cisco" # password = "cisco" # -# ## gNMI encoding requested (one of: "proto", "json", "json_ietf") +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") # # encoding = "proto" # # ## redial in case of failures after @@ -6243,6 +6464,32 @@ # # token = "some-long-shared-secret-token" +# # Intel Resource Director Technology plugin +# [[inputs.intel_rdt]] +# ## Optionally set sampling interval to Nx100ms. +# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. +# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. +# # sampling_interval = "10" +# +# ## Optionally specify the path to pqos executable. +# ## If not provided, auto discovery will be performed. +# # pqos_path = "/usr/local/bin/pqos" +# +# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. +# ## If not provided, default value is false. +# # shortened_metrics = false +# +# ## Specify the list of groups of CPU core(s) to be provided as pqos input. +# ## Mandatory if processes aren't set and forbidden if processes are specified. +# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] +# # cores = ["0-3"] +# +# ## Specify the list of processes for which Metrics will be collected. +# ## Mandatory if cores aren't set and forbidden if cores are specified. +# ## e.g. ["qemu", "pmd"] +# # processes = ["process"] + + # # Read JTI OpenConfig Telemetry from listed sensors # [[inputs.jti_openconfig_telemetry]] # ## List of device addresses to collect telemetry from @@ -6314,7 +6561,6 @@ # # version = "" # # ## Optional TLS Config -# # enable_tls = true # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" @@ -6322,16 +6568,42 @@ # # insecure_skip_verify = false # # ## SASL authentication credentials. These settings should typically be used -# ## with TLS encryption enabled using the "enable_tls" option. +# ## with TLS encryption enabled # # sasl_username = "kafka" # # sasl_password = "secret" # +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# # ## SASL protocol version. When connecting to Azure EventHub set to 0. # # sasl_version = 1 # # ## Name of the consumer group. # # consumer_group = "telegraf_metrics_consumers" # +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# # ## Initial offset position; one of "oldest" or "newest". # # offset = "oldest" # @@ -6831,6 +7103,35 @@ # # insecure_skip_verify = false +# # Riemann protobuff listener. +# [[inputs.riemann_listener]] +# ## URL to listen on. +# ## Default is "tcp://:5555" +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# +# ## Maximum number of concurrent connections. +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# ## Read timeout. +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# ## Optional TLS configuration. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Maximum socket buffer size (in bytes when no unit specified). +# # read_buffer_size = "64KiB" +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" + + # # SFlow V5 Protocol Listener # [[inputs.sflow]] # ## Address to listen for sFlow packets. @@ -6993,6 +7294,9 @@ # ## calculation of percentiles. Raising this limit increases the accuracy # ## of percentiles but also increases the memory usage and cpu time. # percentile_limit = 1000 +# +# ## Max duration (TTL) for each metric to stay cached/reported without being updated. +# #max_ttl = "1000h" # # Suricata stats plugin From a063f9d7f7b97995ed34cb41de86849ff0ecf692 Mon Sep 17 00:00:00 2001 From: Joshua Gross <820727+grossjo@users.noreply.github.com> Date: Thu, 10 Dec 2020 17:38:21 -0500 Subject: [PATCH 120/761] fix edge case in aerospike plugin where an expected hex string was converted to integer if all digits (#8542) --- plugins/inputs/aerospike/aerospike.go | 29 +++++++++++++++------- plugins/inputs/aerospike/aerospike_test.go | 24 ++++++++++++------ 2 files changed, 36 insertions(+), 17 deletions(-) diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index 7ab15d18168f7..e470b58a40f25 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -81,6 +81,14 @@ var sampleConfig = ` # num_histogram_buckets = 100 # default: 10 ` +// On the random chance a hex value is all digits +// these are fields that can contain hex and should always be strings +var protectedHexFields = map[string]bool{ + "node_name": true, + "cluster_key": true, + "paxos_principal": true, +} + func (a *Aerospike) SampleConfig() string { return sampleConfig } @@ -238,8 +246,9 @@ func (a *Aerospike) parseNodeInfo(stats map[string]string, hostPort string, node fields := make(map[string]interface{}) for k, v := range stats { - val := parseValue(v) - fields[strings.Replace(k, "-", "_", -1)] = val + key := strings.Replace(k, "-", "_", -1) + fields[key] = parseAerospikeValue(key, v) + } acc.AddFields("aerospike_node", fields, tags, time.Now()) @@ -284,8 +293,8 @@ func (a *Aerospike) parseNamespaceInfo(stats map[string]string, hostPort string, if len(parts) < 2 { continue } - val := parseValue(parts[1]) - nFields[strings.Replace(parts[0], "-", "_", -1)] = val + key := strings.Replace(parts[0], "-", "_", -1) + nFields[key] = parseAerospikeValue(key, parts[1]) } acc.AddFields("aerospike_namespace", nFields, nTags, time.Now()) @@ -355,8 +364,8 @@ func (a *Aerospike) parseSetInfo(stats map[string]string, hostPort string, names continue } - val := parseValue(pieces[1]) - nFields[strings.Replace(pieces[0], "-", "_", -1)] = val + key := strings.Replace(pieces[0], "-", "_", -1) + nFields[key] = parseAerospikeValue(key, pieces[1]) } acc.AddFields("aerospike_set", nFields, nTags, time.Now()) @@ -436,7 +445,7 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam for i, bucket := range buckets { // Sum records and increment bucket collection counter if bucketCount < numRecordsPerBucket { - bucketSum = bucketSum + parseValue(bucket).(int64) + bucketSum = bucketSum + parseAerospikeValue("", bucket).(int64) bucketCount++ } @@ -469,8 +478,10 @@ func splitNamespaceSet(namespaceSet string) (string, string) { return split[0], split[1] } -func parseValue(v string) interface{} { - if parsed, err := strconv.ParseInt(v, 10, 64); err == nil { +func parseAerospikeValue(key string, v string) interface{} { + if protectedHexFields[key] { + return v + } else if parsed, err := strconv.ParseInt(v, 10, 64); err == nil { return parsed } else if parsed, err := strconv.ParseUint(v, 10, 64); err == nil { return parsed diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go index ee69f0049f401..e88c078b7ae5e 100644 --- a/plugins/inputs/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -454,19 +454,27 @@ func TestParseHistogramNamespace(t *testing.T) { } func TestAerospikeParseValue(t *testing.T) { // uint64 with value bigger than int64 max - val := parseValue("18446744041841121751") - require.Equal(t, uint64(18446744041841121751), val) + val := parseAerospikeValue("", "18446744041841121751") + require.Equal(t, val, uint64(18446744041841121751)) - val = parseValue("true") - require.Equal(t, true, val) + val = parseAerospikeValue("", "true") + require.Equal(t, val, true) // int values - val = parseValue("42") - require.Equal(t, val, int64(42), "must be parsed as int") + val = parseAerospikeValue("", "42") + require.Equal(t, int64(42), val, "must be parsed as an int64") // string values - val = parseValue("BB977942A2CA502") - require.Equal(t, val, `BB977942A2CA502`, "must be left as string") + val = parseAerospikeValue("", "BB977942A2CA502") + require.Equal(t, `BB977942A2CA502`, val, "must be left as a string") + + // all digit hex values, unprotected + val = parseAerospikeValue("", "1992929191") + require.Equal(t, int64(1992929191), val, "must be parsed as an int64") + + // all digit hex values, protected + val = parseAerospikeValue("node_name", "1992929191") + require.Equal(t, `1992929191`, val, "must be left as a string") } func FindTagValue(acc *testutil.Accumulator, measurement string, key string, value string) bool { From 0fe238649478343a695e1436efe298096758c6d9 Mon Sep 17 00:00:00 2001 From: Vyacheslav Stepanov Date: Fri, 11 Dec 2020 16:08:30 +0200 Subject: [PATCH 121/761] Fixing issue with missing metrics when pod has only pending containers (#8472) Also added Pod Phase and Pod Reason fields fixes #8347 Co-authored-by: Vyacheslav-Stepanov --- plugins/inputs/kube_inventory/README.md | 4 +- plugins/inputs/kube_inventory/pod.go | 56 ++++-- plugins/inputs/kube_inventory/pod_test.go | 220 ++++++++++++++++++++++ 3 files changed, 261 insertions(+), 19 deletions(-) diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index 276a90110bc8f..06c84a92ef89d 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -224,12 +224,14 @@ subjects: - node_name - pod_name - node_selector (\*varies) + - phase - state - readiness - fields: - restarts_total - state_code - state_reason + - phase_reason - terminated_reason (string, deprecated in 1.15: use `state_reason` instead) - resource_requests_millicpu_units - resource_requests_memory_bytes @@ -301,7 +303,7 @@ kubernetes_persistentvolume,phase=Released,pv_name=pvc-aaaaaaaa-bbbb-cccc-1111-2 kubernetes_persistentvolumeclaim,namespace=default,phase=Bound,pvc_name=data-etcd-0,selector_select1=s1,storageclass=ebs-1-retain phase_type=0i 1547597615000000000 kubernetes_pod,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1 last_transition_time=1547578322000000000i,ready="false" 1547597616000000000 kubernetes_service,cluster_ip=172.29.61.80,namespace=redis-cache-0001,port_name=redis,port_protocol=TCP,selector_app=myapp,selector_io.kompose.service=redis,selector_role=slave,service_name=redis-slave created=1588690034000000000i,generation=0i,port=6379i,target_port=0i 1547597616000000000 -kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,node_selector_node-role.kubernetes.io/compute=true,pod_name=tick1,state=running,readiness=ready resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,state_reason="",resource_requests_memory_bytes=524288000 1547597616000000000 +kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,node_selector_node-role.kubernetes.io/compute=true,pod_name=tick1,phase=Running,state=running,readiness=ready resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,state_reason="",phase_reason="",resource_requests_memory_bytes=524288000 1547597616000000000 kubernetes_statefulset,namespace=default,selector_select1=s1,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000 ``` diff --git a/plugins/inputs/kube_inventory/pod.go b/plugins/inputs/kube_inventory/pod.go index 2f17f690d08c5..c75f133ba1ffe 100644 --- a/plugins/inputs/kube_inventory/pod.go +++ b/plugins/inputs/kube_inventory/pod.go @@ -27,8 +27,16 @@ func (ki *KubernetesInventory) gatherPod(p v1.Pod, acc telegraf.Accumulator) err return nil } - for i, cs := range p.Status.ContainerStatuses { - c := p.Spec.Containers[i] + containerList := map[string]*v1.ContainerStatus{} + for _, v := range p.Status.ContainerStatuses { + containerList[*v.Name] = v + } + + for _, c := range p.Spec.Containers { + cs, ok := containerList[*c.Name] + if !ok { + cs = &v1.ContainerStatus{} + } gatherPodContainer(*p.Spec.NodeName, ki, p, *cs, *c, acc) } @@ -39,41 +47,53 @@ func gatherPodContainer(nodeName string, ki *KubernetesInventory, p v1.Pod, cs v stateCode := 3 stateReason := "" state := "unknown" + readiness := "unready" - switch { - case cs.State.Running != nil: - stateCode = 0 - state = "running" - case cs.State.Terminated != nil: - stateCode = 1 - state = "terminated" - stateReason = cs.State.Terminated.GetReason() - case cs.State.Waiting != nil: - stateCode = 2 - state = "waiting" - stateReason = cs.State.Waiting.GetReason() + if cs.State != nil { + switch { + case cs.State.Running != nil: + stateCode = 0 + state = "running" + case cs.State.Terminated != nil: + stateCode = 1 + state = "terminated" + stateReason = cs.State.Terminated.GetReason() + case cs.State.Waiting != nil: + stateCode = 2 + state = "waiting" + stateReason = cs.State.Waiting.GetReason() + } } - readiness := "unready" if cs.GetReady() { readiness = "ready" } fields := map[string]interface{}{ - "restarts_total": cs.GetRestartCount(), - "state_code": stateCode, - "terminated_reason": cs.State.Terminated.GetReason(), + "restarts_total": cs.GetRestartCount(), + "state_code": stateCode, + } + + // deprecated in 1.15: use `state_reason` instead + if state == "terminated" { + fields["terminated_reason"] = stateReason } if stateReason != "" { fields["state_reason"] = stateReason } + phaseReason := p.Status.GetReason() + if phaseReason != "" { + fields["phase_reason"] = phaseReason + } + tags := map[string]string{ "container_name": *c.Name, "namespace": *p.Metadata.Namespace, "node_name": *p.Spec.NodeName, "pod_name": *p.Metadata.Name, + "phase": *p.Status.Phase, "state": state, "readiness": readiness, } diff --git a/plugins/inputs/kube_inventory/pod_test.go b/plugins/inputs/kube_inventory/pod_test.go index d9b3221655027..230fbbef99dab 100644 --- a/plugins/inputs/kube_inventory/pod_test.go +++ b/plugins/inputs/kube_inventory/pod_test.go @@ -225,6 +225,7 @@ func TestPod(t *testing.T) { "container_name": "running", "node_name": "node1", "pod_name": "pod1", + "phase": "Running", "state": "running", "readiness": "ready", "node_selector_select1": "s1", @@ -245,6 +246,7 @@ func TestPod(t *testing.T) { "container_name": "completed", "node_name": "node1", "pod_name": "pod1", + "phase": "Running", "state": "terminated", "readiness": "unready", }, @@ -263,6 +265,7 @@ func TestPod(t *testing.T) { "container_name": "waiting", "node_name": "node1", "pod_name": "pod1", + "phase": "Running", "state": "waiting", "readiness": "unready", }, @@ -551,3 +554,220 @@ func TestPodSelectorFilter(t *testing.T) { } } } + +func TestPodPendingContainers(t *testing.T) { + cli := &client{} + selectInclude := []string{} + selectExclude := []string{} + now := time.Now() + started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location()) + created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location()) + cond1 := time.Date(now.Year(), 7, 5, 7, 53, 29, 0, now.Location()) + cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "collect pods", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/pods/": &v1.PodList{ + Items: []*v1.Pod{ + { + Spec: &v1.PodSpec{ + NodeName: toStrPtr("node1"), + Containers: []*v1.Container{ + { + Name: toStrPtr("waiting"), + Image: toStrPtr("image1"), + Ports: []*v1.ContainerPort{ + { + ContainerPort: toInt32Ptr(8080), + Protocol: toStrPtr("TCP"), + }, + }, + Resources: &v1.ResourceRequirements{ + Limits: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("100m")}, + }, + Requests: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("100m")}, + }, + }, + }, + { + Name: toStrPtr("terminated"), + Image: toStrPtr("image1"), + Ports: []*v1.ContainerPort{ + { + ContainerPort: toInt32Ptr(8080), + Protocol: toStrPtr("TCP"), + }, + }, + Resources: &v1.ResourceRequirements{ + Limits: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("100m")}, + }, + Requests: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("100m")}, + }, + }, + }, + }, + Volumes: []*v1.Volume{ + { + Name: toStrPtr("vol1"), + VolumeSource: &v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: toStrPtr("pc1"), + ReadOnly: toBoolPtr(true), + }, + }, + }, + { + Name: toStrPtr("vol2"), + }, + }, + NodeSelector: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + Status: &v1.PodStatus{ + Phase: toStrPtr("Pending"), + Reason: toStrPtr("NetworkNotReady"), + HostIP: toStrPtr("180.12.10.18"), + PodIP: toStrPtr("10.244.2.15"), + StartTime: &metav1.Time{Seconds: toInt64Ptr(started.Unix())}, + Conditions: []*v1.PodCondition{ + { + Type: toStrPtr("Initialized"), + Status: toStrPtr("True"), + LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + }, + { + Type: toStrPtr("Ready"), + Status: toStrPtr("True"), + LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + }, + { + Type: toStrPtr("Scheduled"), + Status: toStrPtr("True"), + LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + }, + }, + ContainerStatuses: []*v1.ContainerStatus{}, + }, + Metadata: &metav1.ObjectMeta{ + OwnerReferences: []*metav1.OwnerReference{ + { + ApiVersion: toStrPtr("apps/v1"), + Kind: toStrPtr("DaemonSet"), + Name: toStrPtr("forwarder"), + Controller: toBoolPtr(true), + }, + }, + Generation: toInt64Ptr(11232), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("pod1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Measurement: podContainerMeasurement, + Fields: map[string]interface{}{ + "phase_reason": "NetworkNotReady", + "restarts_total": int32(0), + "state_code": 3, + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + }, + Tags: map[string]string{ + "namespace": "ns1", + "container_name": "waiting", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Pending", + "state": "unknown", + "readiness": "unready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", + }, + }, + { + Measurement: podContainerMeasurement, + Fields: map[string]interface{}{ + "phase_reason": "NetworkNotReady", + "restarts_total": int32(0), + "state_code": 3, + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + }, + Tags: map[string]string{ + "namespace": "ns1", + "container_name": "terminated", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Pending", + "state": "unknown", + "readiness": "unready", + }, + }, + }, + }, + hasError: false, + }, + } + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + SelectorInclude: selectInclude, + SelectorExclude: selectExclude, + } + ks.createSelectorFilters() + acc := new(testutil.Accumulator) + for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items { + err := ks.gatherPod(*pod, acc) + if err != nil { + t.Errorf("Failed to gather pod - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s, i %d\n", v.name, k, m, acc.Metrics[i].Tags[k], i) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T), i %d\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k], i) + } + } + } + } + } +} From ca7e3ff322a1426786bf5ea0094804a467700fed Mon Sep 17 00:00:00 2001 From: Harsh Seth Date: Tue, 15 Dec 2020 04:06:05 +0530 Subject: [PATCH 122/761] Fix typo in CONTRIBUTING.md (#8528) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cc89134c8eebb..0b2ad0ede3f28 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,7 +14,7 @@ 1. Open a new [pull request][]. #### Contributing an External Plugin *(new)* -Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](plugins/inputs/execd), [Execd Output](/plugins/inputs/execd), and [Execd Processor](plugins/processors/execd) Plugins without having to change the plugin code. +Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](plugins/inputs/execd), [Execd Output](/plugins/outputs/execd), and [Execd Processor](plugins/processors/execd) Plugins without having to change the plugin code. Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. Check out our [guidelines](docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. From 21253ecae15ac14e28c2afb5b62d60ee5279c6f2 Mon Sep 17 00:00:00 2001 From: Oleksandr Date: Wed, 16 Dec 2020 02:59:52 +1100 Subject: [PATCH 123/761] Add per user metrics to mysql input (#6132) --- plugins/inputs/mysql/README.md | 9 ++ plugins/inputs/mysql/mysql.go | 185 +++++++++++++++++++++++++++++++++ 2 files changed, 194 insertions(+) diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index 644d4cf8d7887..43a6515b04e2c 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -88,6 +88,15 @@ This plugin gathers the statistic data from MySQL server # gather_file_events_stats = false ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST + # gather_perf_events_statements = false + # + ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME + # gather_perf_sum_per_acc_per_event = false + # + ## list of events to be gathered for gather_perf_sum_per_acc_per_event + ## in case of empty list all events will be gathered + # perf_summary_events = [] + # # gather_perf_events_statements = false ## the limits for metrics form perf_events_statements diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 7ce9bd1666173..89bce5c3519c4 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -37,6 +37,8 @@ type Mysql struct { GatherFileEventsStats bool `toml:"gather_file_events_stats"` GatherPerfEventsStatements bool `toml:"gather_perf_events_statements"` GatherGlobalVars bool `toml:"gather_global_variables"` + GatherPerfSummaryPerAccountPerEvent bool `toml:"gather_perf_sum_per_acc_per_event"` + PerfSummaryEvents []string `toml:"perf_summary_events"` IntervalSlow string `toml:"interval_slow"` MetricVersion int `toml:"metric_version"` @@ -121,6 +123,13 @@ const sampleConfig = ` # perf_events_statements_limit = 250 # perf_events_statements_time_limit = 86400 + ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME + # gather_perf_sum_per_acc_per_event = false + + ## list of events to be gathered for gather_perf_sum_per_acc_per_event + ## in case of empty list all events will be gathered + # perf_summary_events = [] + ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) ## example: interval_slow = "30m" # interval_slow = "" @@ -416,6 +425,38 @@ const ( FROM information_schema.tables WHERE table_schema = 'performance_schema' AND table_name = ? ` + + perfSummaryPerAccountPerEvent = ` + SELECT + coalesce(user, "unknown"), + coalesce(host, "unknown"), + coalesce(event_name, "unknown"), + count_star, + sum_timer_wait, + min_timer_wait, + avg_timer_wait, + max_timer_wait, + sum_lock_time, + sum_errors, + sum_warnings, + sum_rows_affected, + sum_rows_sent, + sum_rows_examined, + sum_created_tmp_disk_tables, + sum_created_tmp_tables, + sum_select_full_join, + sum_select_full_range_join, + sum_select_range, + sum_select_range_check, + sum_select_scan, + sum_sort_merge_passes, + sum_sort_range, + sum_sort_rows, + sum_sort_scan, + sum_no_index_used, + sum_no_good_index_used + FROM performance_schema.events_statements_summary_by_account_by_event_name + ` ) func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { @@ -491,6 +532,13 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { } } + if m.GatherPerfSummaryPerAccountPerEvent { + err = m.gatherPerfSummaryPerAccountPerEvent(db, serv, acc) + if err != nil { + return err + } + } + if m.GatherTableIOWaits { err = m.gatherPerfTableIOWaits(db, serv, acc) if err != nil { @@ -1262,6 +1310,143 @@ func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, serv string, acc telegraf.Accumu return nil } +// gatherPerfSummaryPerAccountPerEvent can be used to fetch enabled metrics from +// performance_schema.events_statements_summary_by_account_by_event_name +func (m *Mysql) gatherPerfSummaryPerAccountPerEvent(db *sql.DB, serv string, acc telegraf.Accumulator) error { + sqlQuery := perfSummaryPerAccountPerEvent + + var rows *sql.Rows + var err error + + var ( + srcUser string + srcHost string + eventName string + countStar float64 + sumTimerWait float64 + minTimerWait float64 + avgTimerWait float64 + maxTimerWait float64 + sumLockTime float64 + sumErrors float64 + sumWarnings float64 + sumRowsAffected float64 + sumRowsSent float64 + sumRowsExamined float64 + sumCreatedTmpDiskTables float64 + sumCreatedTmpTables float64 + sumSelectFullJoin float64 + sumSelectFullRangeJoin float64 + sumSelectRange float64 + sumSelectRangeCheck float64 + sumSelectScan float64 + sumSortMergePasses float64 + sumSortRange float64 + sumSortRows float64 + sumSortScan float64 + sumNoIndexUsed float64 + sumNoGoodIndexUsed float64 + ) + + var events []interface{} + // if we have perf_summary_events set - select only listed events (adding filter criteria for rows) + if len(m.PerfSummaryEvents) > 0 { + sqlQuery += " WHERE EVENT_NAME IN (" + for i, eventName := range m.PerfSummaryEvents { + if i > 0 { + sqlQuery += ", " + } + sqlQuery += "?" + events = append(events, eventName) + } + sqlQuery += ")" + + rows, err = db.Query(sqlQuery, events...) + } else { + // otherwise no filter, hence, select all rows + rows, err = db.Query(perfSummaryPerAccountPerEvent) + } + + if err != nil { + return err + } + defer rows.Close() + + // parse DSN and save server tag + servtag := getDSNTag(serv) + tags := map[string]string{"server": servtag} + for rows.Next() { + if err := rows.Scan( + &srcUser, + &srcHost, + &eventName, + &countStar, + &sumTimerWait, + &minTimerWait, + &avgTimerWait, + &maxTimerWait, + &sumLockTime, + &sumErrors, + &sumWarnings, + &sumRowsAffected, + &sumRowsSent, + &sumRowsExamined, + &sumCreatedTmpDiskTables, + &sumCreatedTmpTables, + &sumSelectFullJoin, + &sumSelectFullRangeJoin, + &sumSelectRange, + &sumSelectRangeCheck, + &sumSelectScan, + &sumSortMergePasses, + &sumSortRange, + &sumSortRows, + &sumSortScan, + &sumNoIndexUsed, + &sumNoGoodIndexUsed, + ); err != nil { + return err + } + srcUser = strings.ToLower(srcUser) + srcHost = strings.ToLower(srcHost) + + sqlLWTags := copyTags(tags) + sqlLWTags["src_user"] = srcUser + sqlLWTags["src_host"] = srcHost + sqlLWTags["event"] = eventName + sqlLWFields := map[string]interface{}{ + "count_star": countStar, + "sum_timer_wait": sumTimerWait, + "min_timer_wait": minTimerWait, + "avg_timer_wait": avgTimerWait, + "max_timer_wait": maxTimerWait, + "sum_lock_time": sumLockTime, + "sum_errors": sumErrors, + "sum_warnings": sumWarnings, + "sum_rows_affected": sumRowsAffected, + "sum_rows_sent": sumRowsSent, + "sum_rows_examined": sumRowsExamined, + "sum_created_tmp_disk_tables": sumCreatedTmpDiskTables, + "sum_created_tmp_tables": sumCreatedTmpTables, + "sum_select_full_join": sumSelectFullJoin, + "sum_select_full_range_join": sumSelectFullRangeJoin, + "sum_select_range": sumSelectRange, + "sum_select_range_check": sumSelectRangeCheck, + "sum_select_scan": sumSelectScan, + "sum_sort_merge_passes": sumSortMergePasses, + "sum_sort_range": sumSortRange, + "sum_sort_rows": sumSortRows, + "sum_sort_scan": sumSortScan, + "sum_no_index_used": sumNoIndexUsed, + "sum_no_good_index_used": sumNoGoodIndexUsed, + } + acc.AddFields("mysql_perf_acc_event", sqlLWFields, sqlLWTags) + + } + + return nil +} + // gatherPerfTableLockWaits can be used to get // the total number and time for SQL and external lock wait events // for each table and operation From d9d6a194b1f9d0df5d30440cacc19bd350dda951 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 15 Dec 2020 09:09:11 -0800 Subject: [PATCH 124/761] Update json parser readme (#8532) --- plugins/parsers/json/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index d39a9d6bf77d9..01ddf673eec4d 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -3,8 +3,8 @@ The JSON data format parses a [JSON][json] object or an array of objects into metric fields. -**NOTE:** All JSON numbers are converted to float fields. JSON String are -ignored unless specified in the `tag_key` or `json_string_fields` options. +**NOTE:** All JSON numbers are converted to float fields. JSON strings and booleans are +ignored unless specified in the `tag_key` or `json_string_fields` options. ### Configuration @@ -36,7 +36,7 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. "my_tag_2" ] - ## Array of glob pattern strings keys that should be added as string fields. + ## Array of glob pattern strings or booleans keys that should be added as string fields. json_string_fields = [] ## Name key is the key to use as the measurement name. From f6e2d451cd71cf9e2c0ac7ac28c0903190a203d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=ADtalo=20Silva?= Date: Tue, 15 Dec 2020 15:41:51 -0300 Subject: [PATCH 125/761] Graphite tags parser (#8564) --- plugins/parsers/graphite/README.md | 2 +- plugins/parsers/graphite/parser.go | 24 +++++++- plugins/parsers/graphite/parser_test.go | 75 +++++++++++++++++++++++++ 3 files changed, 98 insertions(+), 3 deletions(-) diff --git a/plugins/parsers/graphite/README.md b/plugins/parsers/graphite/README.md index b0b1127aa4ce0..63d7c936ae819 100644 --- a/plugins/parsers/graphite/README.md +++ b/plugins/parsers/graphite/README.md @@ -1,7 +1,7 @@ # Graphite The Graphite data format translates graphite *dot* buckets directly into -telegraf measurement names, with a single value field, and without any tags. +telegraf measurement names, with a single value field, and optional tags. By default, the separator is left as `.`, but this can be changed using the `separator` argument. For more advanced options, Telegraf supports specifying [templates](#templates) to translate graphite buckets into Telegraf metrics. diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go index f50217711c15c..528bc4f2072e6 100644 --- a/plugins/parsers/graphite/parser.go +++ b/plugins/parsers/graphite/parser.go @@ -103,15 +103,17 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) { return nil, fmt.Errorf("received %q which doesn't have required fields", line) } + parts := strings.Split(fields[0], ";") + // decode the name and tags - measurement, tags, field, err := p.templateEngine.Apply(fields[0]) + measurement, tags, field, err := p.templateEngine.Apply(parts[0]) if err != nil { return nil, err } // Could not extract measurement, use the raw value if measurement == "" { - measurement = fields[0] + measurement = parts[0] } // Parse value. @@ -147,6 +149,24 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) { } } } + + // Split name and tags + if len(parts) >= 2 { + for _, tag := range parts[1:] { + tagValue := strings.Split(tag, "=") + if len(tagValue) != 2 || len(tagValue[0]) == 0 || len(tagValue[1]) == 0 { + continue + } + if strings.IndexAny(tagValue[0], "!^") != -1 { + continue + } + if strings.Index(tagValue[1], "~") == 0 { + continue + } + tags[tagValue[0]] = tagValue[1] + } + } + // Set the default tags on the point if they are not already set for k, v := range p.DefaultTags { if _, ok := tags[k]; !ok { diff --git a/plugins/parsers/graphite/parser_test.go b/plugins/parsers/graphite/parser_test.go index 9254574b604e6..1bc4f6363c3e4 100644 --- a/plugins/parsers/graphite/parser_test.go +++ b/plugins/parsers/graphite/parser_test.go @@ -178,6 +178,67 @@ func TestParseLine(t *testing.T) { value: 50, time: testTime, }, + { + test: "normal case with tag", + input: `cpu.foo.bar;tag1=value1 50 ` + strTime, + template: "measurement.foo.bar", + measurement: "cpu", + tags: map[string]string{ + "foo": "foo", + "bar": "bar", + "tag1": "value1", + }, + value: 50, + time: testTime, + }, + { + test: "wrong tag names", + input: `cpu.foo.bar;tag!1=value1;tag^2=value2 50 ` + strTime, + template: "measurement.foo.bar", + measurement: "cpu", + tags: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + value: 50, + time: testTime, + }, + { + test: "empty tag name", + input: `cpu.foo.bar;=value1 50 ` + strTime, + template: "measurement.foo.bar", + measurement: "cpu", + tags: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + value: 50, + time: testTime, + }, + { + test: "wrong tag value", + input: `cpu.foo.bar;tag1=~value1 50 ` + strTime, + template: "measurement.foo.bar", + measurement: "cpu", + tags: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + value: 50, + time: testTime, + }, + { + test: "empty tag value", + input: `cpu.foo.bar;tag1= 50 ` + strTime, + template: "measurement.foo.bar", + measurement: "cpu", + tags: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + value: 50, + time: testTime, + }, { test: "metric only with float value", input: `cpu 50.554 ` + strTime, @@ -279,6 +340,20 @@ func TestParse(t *testing.T) { value: 50, time: testTime, }, + { + test: "normal case with tag", + input: []byte(`cpu.foo.bar;tag1=value1 50 ` + strTime), + template: "measurement.foo.bar", + measurement: "cpu", + tags: map[string]string{ + "foo": "foo", + "bar": "bar", + "tag1": "value1", + }, + value: 50, + time: testTime, + }, + { test: "metric only with float value", input: []byte(`cpu 50.554 ` + strTime), From 8e08da18893264615a88b3f10469a190de3c4e07 Mon Sep 17 00:00:00 2001 From: vhqtvn Date: Wed, 16 Dec 2020 01:52:48 +0700 Subject: [PATCH 126/761] fix crash when socket_listener receiving invalid data (#8551) Co-authored-by: Hoa Nguyen Van --- plugins/inputs/socket_listener/socket_listener.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index e412996f38e6e..15c6f18e1e1dc 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -114,6 +114,7 @@ func (ssl *streamSocketListener) read(c net.Conn) { decoder, err := internal.NewStreamContentDecoder(ssl.ContentEncoding, c) if err != nil { ssl.Log.Error("Read error: %v", err) + return } scnr := bufio.NewScanner(decoder) From b64c38bb00516282103f62194777b1aa321ccf09 Mon Sep 17 00:00:00 2001 From: Vyacheslav Stepanov Date: Wed, 16 Dec 2020 20:23:31 +0200 Subject: [PATCH 127/761] Remove duplicated field "revision" from ecs_task because it's already defined as a tag there (#8574) --- plugins/inputs/ecs/README.md | 3 +-- plugins/inputs/ecs/ecs.go | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/plugins/inputs/ecs/README.md b/plugins/inputs/ecs/README.md index 9e3188eec30bf..0bf8b983cd219 100644 --- a/plugins/inputs/ecs/README.md +++ b/plugins/inputs/ecs/README.md @@ -87,7 +87,6 @@ present in the metadata/stats endpoints. - id - name - fields: - - revision (string) - desired_status (string) - known_status (string) - limit_cpu (float) @@ -226,7 +225,7 @@ present in the metadata/stats endpoints. ### Example Output ``` -ecs_task,cluster=test,family=nginx,host=c4b301d4a123,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a revision="2",desired_status="RUNNING",known_status="RUNNING",limit_cpu=0.5,limit_mem=512 1542641488000000000 +ecs_task,cluster=test,family=nginx,host=c4b301d4a123,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a desired_status="RUNNING",known_status="RUNNING",limit_cpu=0.5,limit_mem=512 1542641488000000000 ecs_container_mem,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a active_anon=40960i,active_file=8192i,cache=790528i,pgpgin=1243i,total_pgfault=1298i,total_rss=40960i,limit=1033658368i,max_usage=4825088i,hierarchical_memory_limit=536870912i,rss=40960i,total_active_file=8192i,total_mapped_file=618496i,usage_percent=0.05349543109392212,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",pgfault=1298i,pgmajfault=6i,pgpgout=1040i,total_active_anon=40960i,total_inactive_file=782336i,total_pgpgin=1243i,usage=552960i,inactive_file=782336i,mapped_file=618496i,total_cache=790528i,total_pgpgout=1040i 1542642001000000000 ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu-total,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a usage_in_kernelmode=0i,throttling_throttled_periods=0i,throttling_periods=0i,throttling_throttled_time=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_percent=0,usage_total=26426156i,usage_in_usermode=20000000i,usage_system=2336100000000i 1542642001000000000 ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu0,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_total=26426156i 1542642001000000000 diff --git a/plugins/inputs/ecs/ecs.go b/plugins/inputs/ecs/ecs.go index 5fa53d4fd58bc..2194257f1913d 100644 --- a/plugins/inputs/ecs/ecs.go +++ b/plugins/inputs/ecs/ecs.go @@ -166,7 +166,6 @@ func resolveEndpoint(ecs *Ecs) { func (ecs *Ecs) accTask(task *Task, tags map[string]string, acc telegraf.Accumulator) { taskFields := map[string]interface{}{ - "revision": task.Revision, "desired_status": task.DesiredStatus, "known_status": task.KnownStatus, "limit_cpu": task.Limits["CPU"], From 945b556330588f0eb175e391f08a3da43c96d1b0 Mon Sep 17 00:00:00 2001 From: Kush <3647166+kushsharma@users.noreply.github.com> Date: Wed, 16 Dec 2020 23:53:50 +0530 Subject: [PATCH 128/761] Typo in INPUTS.md (#8573) --- docs/INPUTS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/INPUTS.md b/docs/INPUTS.md index 179b674442d6d..6a10cf949829c 100644 --- a/docs/INPUTS.md +++ b/docs/INPUTS.md @@ -83,7 +83,7 @@ func init() { ### Typed Metrics -In addition the the `AddFields` function, the accumulator also supports +In addition to the `AddFields` function, the accumulator also supports functions to add typed metrics: `AddGauge`, `AddCounter`, etc. Metric types are ignored by the InfluxDB output, but can be used for other outputs, such as [prometheus][prom metric types]. From 045c3c18b87329eee88c7b2548e17b9fc805170d Mon Sep 17 00:00:00 2001 From: Anthony Arnaud Date: Wed, 16 Dec 2020 14:11:05 -0500 Subject: [PATCH 129/761] Add prometheus remote write serializer (#8360) --- docs/LICENSE_OF_DEPENDENCIES.md | 2 + go.mod | 7 +- go.sum | 19 +- .../prometheusremotewrite/README.md | 38 + .../prometheusremotewrite.go | 341 +++++++++ .../prometheusremotewrite_test.go | 674 ++++++++++++++++++ plugins/serializers/registry.go | 20 + 7 files changed, 1095 insertions(+), 6 deletions(-) create mode 100644 plugins/serializers/prometheusremotewrite/README.md create mode 100644 plugins/serializers/prometheusremotewrite/prometheusremotewrite.go create mode 100644 plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 971a95584f678..2ae86bb537eaf 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -71,6 +71,7 @@ following works: - github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) - github.com/gopcua/opcua [MIT License](https://github.com/gopcua/opcua/blob/master/LICENSE) - github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE) +- github.com/grpc-ecosystem/grpc-gateway [BSD 3-Clause "New" or "Revised" License](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/LICENSE.txt) - github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) - github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE) - github.com/hashicorp/consul [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) @@ -122,6 +123,7 @@ following works: - github.com/prometheus/client_model [Apache License 2.0](https://github.com/prometheus/client_model/blob/master/LICENSE) - github.com/prometheus/common [Apache License 2.0](https://github.com/prometheus/common/blob/master/LICENSE) - github.com/prometheus/procfs [Apache License 2.0](https://github.com/prometheus/procfs/blob/master/LICENSE) +- github.com/prometheus/prometheus [Apache License 2.0](https://github.com/prometheus/prometheus/blob/master/LICENSE) - github.com/rcrowley/go-metrics [MIT License](https://github.com/rcrowley/go-metrics/blob/master/LICENSE) - github.com/riemann/riemann-go-client [MIT License](https://github.com/riemann/riemann-go-client/blob/master/LICENSE) - github.com/safchain/ethtool [Apache License 2.0](https://github.com/safchain/ethtool/blob/master/LICENSE) diff --git a/go.mod b/go.mod index f2d7a3ec6a95e..d222ff34e3e90 100644 --- a/go.mod +++ b/go.mod @@ -61,10 +61,12 @@ require ( github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/protobuf v1.3.5 + github.com/golang/snappy v0.0.1 github.com/google/go-cmp v0.5.2 github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.12 github.com/gorilla/mux v1.6.2 + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 github.com/hashicorp/consul v1.2.1 @@ -108,6 +110,7 @@ require ( github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.9.1 github.com/prometheus/procfs v0.0.8 + github.com/prometheus/prometheus v2.5.0+incompatible github.com/riemann/riemann-go-client v0.5.0 github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect @@ -142,8 +145,8 @@ require ( golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 gonum.org/v1/gonum v0.6.2 // indirect google.golang.org/api v0.20.0 - google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 - google.golang.org/grpc v1.28.0 + google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 + google.golang.org/grpc v1.33.1 gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/ldap.v3 v3.1.0 diff --git a/go.sum b/go.sum index ce815505833e4..4aaf5103cf7af 100644 --- a/go.sum +++ b/go.sum @@ -103,6 +103,7 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1C github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= @@ -204,6 +205,7 @@ github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 h1:YpooqMW354GG47PXNBiaCv6yCQizyP3MXD9NUPrCEQ8= @@ -300,6 +302,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= @@ -311,6 +315,8 @@ github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8 github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ= @@ -534,6 +540,8 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= +github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= @@ -542,6 +550,7 @@ github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6O github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/riemann/riemann-go-client v0.5.0 h1:yPP7tz1vSYJkSZvZFCsMiDsHHXX57x8/fEX3qyEXuAA= github.com/riemann/riemann-go-client v0.5.0/go.mod h1:FMiaOL8dgBnRfgwENzV0xlYJ2eCbV1o7yqVwOBLbShQ= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= @@ -698,6 +707,7 @@ golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -856,8 +866,8 @@ google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 h1:IGPykv426z7LZSVPlaPufOyphngM4at5uZ7x5alaFvE= -google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -868,8 +878,8 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.33.1 h1:DGeFlSan2f+WEtCERJ4J9GJWk15TxUi8QGagfI87Xyc= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -914,6 +924,7 @@ gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= diff --git a/plugins/serializers/prometheusremotewrite/README.md b/plugins/serializers/prometheusremotewrite/README.md new file mode 100644 index 0000000000000..8bad919b2e923 --- /dev/null +++ b/plugins/serializers/prometheusremotewrite/README.md @@ -0,0 +1,38 @@ +# Prometheus + +The `prometheusremotewrite` data format converts metrics into the Prometheus protobuf +exposition format. + +**Warning**: When generating histogram and summary types, output may +not be correct if the metric spans multiple batches. This issue can be +somewhat, but not fully, mitigated by using outputs that support writing in +"batch format". When using histogram and summary types, it is recommended to +use only the `prometheus_client` output. + +### Configuration + +```toml +[[outputs.http]] + url = "https://cortex/api/prom/push" + data_format = "prometheusremotewrite" + tls_ca = "/etc/telegraf/ca.pem" + tls_cert = "/etc/telegraf/cert.pem" + tls_key = "/etc/telegraf/key.pem" + [outputs.http.headers] + Content-Type = "application/x-protobuf" + Content-Encoding = "snappy" + X-Prometheus-Remote-Write-Version = "0.1.0" +``` + +### Metrics + +A Prometheus metric is created for each integer, float, boolean or unsigned +field. Boolean values are converted to *1.0* for true and *0.0* for false. + +The Prometheus metric names are produced by joining the measurement name with +the field key. In the special case where the measurement name is `prometheus` +it is not included in the final metric name. + +Prometheus labels are produced for each tag. + +**Note:** String fields are ignored and do not produce Prometheus metrics. diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go new file mode 100644 index 0000000000000..aca801d561425 --- /dev/null +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go @@ -0,0 +1,341 @@ +package prometheusremotewrite + +import ( + "bytes" + "fmt" + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/influxdata/telegraf/plugins/serializers/prometheus" + "hash/fnv" + "sort" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/prometheus/prometheus/prompb" +) + +type MetricKey uint64 + +// MetricSortOrder controls if the output is sorted. +type MetricSortOrder int + +const ( + NoSortMetrics MetricSortOrder = iota + SortMetrics +) + +// StringHandling defines how to process string fields. +type StringHandling int + +const ( + DiscardStrings StringHandling = iota + StringAsLabel +) + +type FormatConfig struct { + MetricSortOrder MetricSortOrder + StringHandling StringHandling +} + +type Serializer struct { + config FormatConfig +} + +func NewSerializer(config FormatConfig) (*Serializer, error) { + s := &Serializer{config: config} + return s, nil +} + +func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) { + return s.SerializeBatch([]telegraf.Metric{metric}) +} + +func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + var buf bytes.Buffer + var entries = make(map[MetricKey]*prompb.TimeSeries) + for _, metric := range metrics { + commonLabels := s.createLabels(metric) + var metrickey MetricKey + var promts *prompb.TimeSeries + for _, field := range metric.FieldList() { + metricName := prometheus.MetricName(metric.Name(), field.Key, metric.Type()) + metricName, ok := prometheus.SanitizeMetricName(metricName) + if !ok { + continue + } + switch metric.Type() { + case telegraf.Counter: + fallthrough + case telegraf.Gauge: + fallthrough + case telegraf.Untyped: + value, ok := prometheus.SampleValue(field.Value) + if !ok { + continue + } + metrickey, promts = getPromTS(metricName, commonLabels, value, metric.Time()) + case telegraf.Histogram: + switch { + case strings.HasSuffix(field.Key, "_bucket"): + // if bucket only, init sum, count, inf + metrickeysum, promtssum := getPromTS(fmt.Sprintf("%s_sum", metricName), commonLabels, float64(0), metric.Time()) + if _, ok = entries[metrickeysum]; !ok { + entries[metrickeysum] = promtssum + } + metrickeycount, promtscount := getPromTS(fmt.Sprintf("%s_count", metricName), commonLabels, float64(0), metric.Time()) + if _, ok = entries[metrickeycount]; !ok { + entries[metrickeycount] = promtscount + } + labels := make([]*prompb.Label, len(commonLabels), len(commonLabels)+1) + copy(labels, commonLabels) + labels = append(labels, &prompb.Label{ + Name: "le", + Value: "+Inf", + }) + metrickeyinf, promtsinf := getPromTS(fmt.Sprintf("%s_bucket", metricName), labels, float64(0), metric.Time()) + if _, ok = entries[metrickeyinf]; !ok { + entries[metrickeyinf] = promtsinf + } + + le, ok := metric.GetTag("le") + if !ok { + continue + } + bound, err := strconv.ParseFloat(le, 64) + if err != nil { + continue + } + count, ok := prometheus.SampleCount(field.Value) + if !ok { + continue + } + + labels = make([]*prompb.Label, len(commonLabels), len(commonLabels)+1) + copy(labels, commonLabels) + labels = append(labels, &prompb.Label{ + Name: "le", + Value: fmt.Sprint(bound), + }) + metrickey, promts = getPromTS(fmt.Sprintf("%s_bucket", metricName), labels, float64(count), metric.Time()) + case strings.HasSuffix(field.Key, "_sum"): + sum, ok := prometheus.SampleSum(field.Value) + if !ok { + continue + } + + metrickey, promts = getPromTS(fmt.Sprintf("%s_sum", metricName), commonLabels, sum, metric.Time()) + case strings.HasSuffix(field.Key, "_count"): + count, ok := prometheus.SampleCount(field.Value) + if !ok { + continue + } + + // if no bucket generate +Inf entry + labels := make([]*prompb.Label, len(commonLabels), len(commonLabels)+1) + copy(labels, commonLabels) + labels = append(labels, &prompb.Label{ + Name: "le", + Value: "+Inf", + }) + metrickeyinf, promtsinf := getPromTS(fmt.Sprintf("%s_bucket", metricName), labels, float64(count), metric.Time()) + if minf, ok := entries[metrickeyinf]; !ok || minf.Samples[0].Value == 0 { + entries[metrickeyinf] = promtsinf + } + + metrickey, promts = getPromTS(fmt.Sprintf("%s_count", metricName), commonLabels, float64(count), metric.Time()) + default: + continue + } + case telegraf.Summary: + switch { + case strings.HasSuffix(field.Key, "_sum"): + sum, ok := prometheus.SampleSum(field.Value) + if !ok { + continue + } + + metrickey, promts = getPromTS(fmt.Sprintf("%s_sum", metricName), commonLabels, sum, metric.Time()) + case strings.HasSuffix(field.Key, "_count"): + count, ok := prometheus.SampleCount(field.Value) + if !ok { + continue + } + + metrickey, promts = getPromTS(fmt.Sprintf("%s_count", metricName), commonLabels, float64(count), metric.Time()) + default: + quantileTag, ok := metric.GetTag("quantile") + if !ok { + continue + } + quantile, err := strconv.ParseFloat(quantileTag, 64) + if err != nil { + continue + } + value, ok := prometheus.SampleValue(field.Value) + if !ok { + continue + } + + labels := make([]*prompb.Label, len(commonLabels), len(commonLabels)+1) + copy(labels, commonLabels) + labels = append(labels, &prompb.Label{ + Name: "quantile", + Value: fmt.Sprint(quantile), + }) + metrickey, promts = getPromTS(metricName, labels, value, metric.Time()) + } + default: + return nil, fmt.Errorf("Unknown type %v", metric.Type()) + } + + // A batch of metrics can contain multiple values for a single + // Prometheus sample. If this metric is older than the existing + // sample then we can skip over it. + m, ok := entries[metrickey] + if ok { + if metric.Time().Before(time.Unix(m.Samples[0].Timestamp, 0)) { + continue + } + } + entries[metrickey] = promts + } + + } + + var promTS = make([]*prompb.TimeSeries, len(entries)) + var i int64 = 0 + for _, promts := range entries { + promTS[i] = promts + i++ + } + + switch s.config.MetricSortOrder { + case SortMetrics: + sort.Slice(promTS, func(i, j int) bool { + lhs := promTS[i].Labels + rhs := promTS[j].Labels + if len(lhs) != len(rhs) { + return len(lhs) < len(rhs) + } + + for index := range lhs { + l := lhs[index] + r := rhs[index] + + if l.Name != r.Name { + return l.Name < r.Name + } + + if l.Value != r.Value { + return l.Value < r.Value + } + } + + return false + }) + + } + data, err := proto.Marshal(&prompb.WriteRequest{Timeseries: promTS}) + if err != nil { + return nil, fmt.Errorf("unable to marshal protobuf: %v", err) + } + encoded := snappy.Encode(nil, data) + buf.Write(encoded) + return buf.Bytes(), nil +} + +func hasLabel(name string, labels []*prompb.Label) bool { + for _, label := range labels { + if name == label.Name { + return true + } + } + return false +} + +func (s *Serializer) createLabels(metric telegraf.Metric) []*prompb.Label { + labels := make([]*prompb.Label, 0, len(metric.TagList())) + for _, tag := range metric.TagList() { + // Ignore special tags for histogram and summary types. + switch metric.Type() { + case telegraf.Histogram: + if tag.Key == "le" { + continue + } + case telegraf.Summary: + if tag.Key == "quantile" { + continue + } + } + + name, ok := prometheus.SanitizeLabelName(tag.Key) + if !ok { + continue + } + + labels = append(labels, &prompb.Label{Name: name, Value: tag.Value}) + } + + if s.config.StringHandling != StringAsLabel { + return labels + } + + addedFieldLabel := false + for _, field := range metric.FieldList() { + value, ok := field.Value.(string) + if !ok { + continue + } + + name, ok := prometheus.SanitizeLabelName(field.Key) + if !ok { + continue + } + + // If there is a tag with the same name as the string field, discard + // the field and use the tag instead. + if hasLabel(name, labels) { + continue + } + + labels = append(labels, &prompb.Label{Name: name, Value: value}) + addedFieldLabel = true + + } + + if addedFieldLabel { + sort.Slice(labels, func(i, j int) bool { + return labels[i].Name < labels[j].Name + }) + } + + return labels +} + +func MakeMetricKey(labels []*prompb.Label) MetricKey { + h := fnv.New64a() + for _, label := range labels { + h.Write([]byte(label.Name)) + h.Write([]byte("\x00")) + h.Write([]byte(label.Value)) + h.Write([]byte("\x00")) + } + return MetricKey(h.Sum64()) +} + +func getPromTS(name string, labels []*prompb.Label, value float64, ts time.Time) (MetricKey, *prompb.TimeSeries) { + sample := []prompb.Sample{{ + // Timestamp is int milliseconds for remote write. + Timestamp: ts.UnixNano() / int64(time.Millisecond), + Value: value, + }} + labelscopy := make([]*prompb.Label, len(labels), len(labels)+1) + copy(labelscopy, labels) + labels = append(labelscopy, &prompb.Label{ + Name: "__name__", + Value: name, + }) + return MakeMetricKey(labels), &prompb.TimeSeries{Labels: labels, Samples: sample} +} diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go new file mode 100644 index 0000000000000..8aecd8ebca9bf --- /dev/null +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go @@ -0,0 +1,674 @@ +package prometheusremotewrite + +import ( + "bytes" + "fmt" + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/prompb" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestRemoteWriteSerialize(t *testing.T) { + tests := []struct { + name string + config FormatConfig + metric telegraf.Metric + expected []byte + }{ + { + name: "simple", + metric: testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + expected: []byte(` +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "prometheus input untyped", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "code": "400", + "method": "post", + }, + map[string]interface{}{ + "http_requests_total": 3.0, + }, + time.Unix(0, 0), + telegraf.Untyped, + ), + expected: []byte(` +http_requests_total{code="400", method="post"} 3 +`), + }, + { + name: "prometheus input counter", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "code": "400", + "method": "post", + }, + map[string]interface{}{ + "http_requests_total": 3.0, + }, + time.Unix(0, 0), + telegraf.Counter, + ), + expected: []byte(` +http_requests_total{code="400", method="post"} 3 +`), + }, + { + name: "prometheus input gauge", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "code": "400", + "method": "post", + }, + map[string]interface{}{ + "http_requests_total": 3.0, + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + expected: []byte(` +http_requests_total{code="400", method="post"} 3 +`), + }, + { + name: "prometheus input histogram no buckets", + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 53423, + "http_request_duration_seconds_count": 144320, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + expected: []byte(` +http_request_duration_seconds_count 144320 +http_request_duration_seconds_sum 53423 +http_request_duration_seconds_bucket{le="+Inf"} 144320 +`), + }, + { + name: "prometheus input histogram only bucket", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "le": "0.5", + }, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 129389.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + expected: []byte(` +http_request_duration_seconds_count 0 +http_request_duration_seconds_sum 0 +http_request_duration_seconds_bucket{le="+Inf"} 0 +http_request_duration_seconds_bucket{le="0.5"} 129389 +`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, err := NewSerializer(FormatConfig{ + MetricSortOrder: SortMetrics, + StringHandling: tt.config.StringHandling, + }) + require.NoError(t, err) + data, err := s.Serialize(tt.metric) + actual, err := prompbToText(data) + require.NoError(t, err) + + require.Equal(t, strings.TrimSpace(string(tt.expected)), + strings.TrimSpace(string(actual))) + }) + } +} + +func TestRemoteWriteSerializeBatch(t *testing.T) { + tests := []struct { + name string + config FormatConfig + metrics []telegraf.Metric + expected []byte + }{ + { + name: "simple", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "one.example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "two.example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle{host="one.example.org"} 42 +cpu_time_idle{host="two.example.org"} 42 +`), + }, + { + name: "multiple metric families", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "one.example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + "time_guest": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_guest{host="one.example.org"} 42 +cpu_time_idle{host="one.example.org"} 42 +`), + }, + { + name: "histogram", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 53423, + "http_request_duration_seconds_count": 144320, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 24054.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.1"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 33444.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.2"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 100392.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.5"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 129389.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "1.0"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 133988.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 144320.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + }, + expected: []byte(` +http_request_duration_seconds_count 144320 +http_request_duration_seconds_sum 53423 +http_request_duration_seconds_bucket{le="+Inf"} 144320 +http_request_duration_seconds_bucket{le="0.05"} 24054 +http_request_duration_seconds_bucket{le="0.1"} 33444 +http_request_duration_seconds_bucket{le="0.2"} 100392 +http_request_duration_seconds_bucket{le="0.5"} 129389 +http_request_duration_seconds_bucket{le="1"} 133988 +`), + }, + { + name: "summary with quantile", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.7560473e+07, + "rpc_duration_seconds_count": 2693, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 3102.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.05"}, + map[string]interface{}{ + "rpc_duration_seconds": 3272.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.5"}, + map[string]interface{}{ + "rpc_duration_seconds": 4773.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.9"}, + map[string]interface{}{ + "rpc_duration_seconds": 9001.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.99"}, + map[string]interface{}{ + "rpc_duration_seconds": 76656.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + }, + expected: []byte(` +rpc_duration_seconds_count 2693 +rpc_duration_seconds_sum 17560473 +rpc_duration_seconds{quantile="0.01"} 3102 +rpc_duration_seconds{quantile="0.05"} 3272 +rpc_duration_seconds{quantile="0.5"} 4773 +rpc_duration_seconds{quantile="0.9"} 9001 +rpc_duration_seconds{quantile="0.99"} 76656 +`), + }, + { + name: "newer sample", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 43.0, + }, + time.Unix(1, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle 43 +`), + }, + { + name: "colons are not replaced in metric name from measurement", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu::xyzzy", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu::xyzzy_time_idle 42 +`), + }, + { + name: "colons are not replaced in metric name from field", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time:idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time:idle 42 +`), + }, + { + name: "invalid label", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host-name": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle{host_name="example.org"} 42 +`), + }, + { + name: "colons are replaced in label name", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host:name": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle{host_name="example.org"} 42 +`), + }, + { + name: "discard strings", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + "cpu": "cpu0", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle 42 +`), + }, + { + name: "string as label", + config: FormatConfig{ + MetricSortOrder: SortMetrics, + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + "cpu": "cpu0", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle{cpu="cpu0"} 42 +`), + }, + { + name: "string as label duplicate tag", + config: FormatConfig{ + MetricSortOrder: SortMetrics, + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42.0, + "cpu": "cpu1", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_idle{cpu="cpu0"} 42 +`), + }, + { + name: "replace characters when using string as label", + config: FormatConfig{ + MetricSortOrder: SortMetrics, + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "host:name": "example.org", + "time_idle": 42.0, + }, + time.Unix(1574279268, 0), + ), + }, + expected: []byte(` +cpu_time_idle{host_name="example.org"} 42 +`), + }, + { + name: "multiple fields grouping", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 8106.04, + "time_system": 26271.4, + "time_user": 92904.33, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + }, + map[string]interface{}{ + "time_guest": 8181.63, + "time_system": 25351.49, + "time_user": 96912.57, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu2", + }, + map[string]interface{}{ + "time_guest": 7470.04, + "time_system": 24998.43, + "time_user": 96034.08, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu3", + }, + map[string]interface{}{ + "time_guest": 7517.95, + "time_system": 24970.82, + "time_user": 94148, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +cpu_time_guest{cpu="cpu0"} 8106.04 +cpu_time_system{cpu="cpu0"} 26271.4 +cpu_time_user{cpu="cpu0"} 92904.33 +cpu_time_guest{cpu="cpu1"} 8181.63 +cpu_time_system{cpu="cpu1"} 25351.49 +cpu_time_user{cpu="cpu1"} 96912.57 +cpu_time_guest{cpu="cpu2"} 7470.04 +cpu_time_system{cpu="cpu2"} 24998.43 +cpu_time_user{cpu="cpu2"} 96034.08 +cpu_time_guest{cpu="cpu3"} 7517.95 +cpu_time_system{cpu="cpu3"} 24970.82 +cpu_time_user{cpu="cpu3"} 94148 +`), + }, + { + name: "summary with no quantile", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.7560473e+07, + "rpc_duration_seconds_count": 2693, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + }, + expected: []byte(` +rpc_duration_seconds_count 2693 +rpc_duration_seconds_sum 17560473 +`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, err := NewSerializer(FormatConfig{ + MetricSortOrder: SortMetrics, + StringHandling: tt.config.StringHandling, + }) + require.NoError(t, err) + data, err := s.SerializeBatch(tt.metrics) + require.NoError(t, err) + actual, err := prompbToText(data) + require.NoError(t, err) + + require.Equal(t, + strings.TrimSpace(string(tt.expected)), + strings.TrimSpace(string(actual))) + }) + } +} + +func prompbToText(data []byte) ([]byte, error) { + var buf = bytes.Buffer{} + protobuff, err := snappy.Decode(nil, data) + if err != nil { + return nil, err + } + var req prompb.WriteRequest + err = proto.Unmarshal(protobuff, &req) + if err != nil { + return nil, err + } + samples := protoToSamples(&req) + for _, sample := range samples { + buf.Write([]byte(fmt.Sprintf("%s %s\n", sample.Metric.String(), sample.Value.String()))) + } + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func protoToSamples(req *prompb.WriteRequest) model.Samples { + var samples model.Samples + for _, ts := range req.Timeseries { + metric := make(model.Metric, len(ts.Labels)) + for _, l := range ts.Labels { + metric[model.LabelName(l.Name)] = model.LabelValue(l.Value) + } + + for _, s := range ts.Samples { + samples = append(samples, &model.Sample{ + Metric: metric, + Value: model.SampleValue(s.Value), + Timestamp: model.Time(s.Timestamp), + }) + } + } + return samples +} diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index b12ef7660b981..32bc034e0b0b0 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -2,6 +2,7 @@ package serializers import ( "fmt" + "github.com/influxdata/telegraf/plugins/serializers/prometheusremotewrite" "time" "github.com/influxdata/telegraf" @@ -126,12 +127,31 @@ func NewSerializer(config *Config) (Serializer, error) { serializer, err = NewWavefrontSerializer(config.Prefix, config.WavefrontUseStrict, config.WavefrontSourceOverride) case "prometheus": serializer, err = NewPrometheusSerializer(config) + case "prometheusremotewrite": + serializer, err = NewPrometheusRemoteWriteSerializer(config) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } return serializer, err } +func NewPrometheusRemoteWriteSerializer(config *Config) (Serializer, error) { + sortMetrics := prometheusremotewrite.NoSortMetrics + if config.PrometheusExportTimestamp { + sortMetrics = prometheusremotewrite.SortMetrics + } + + stringAsLabels := prometheusremotewrite.DiscardStrings + if config.PrometheusStringAsLabel { + stringAsLabels = prometheusremotewrite.StringAsLabel + } + + return prometheusremotewrite.NewSerializer(prometheusremotewrite.FormatConfig{ + MetricSortOrder: sortMetrics, + StringHandling: stringAsLabels, + }) +} + func NewPrometheusSerializer(config *Config) (Serializer, error) { exportTimestamp := prometheus.NoExportTimestamp if config.PrometheusExportTimestamp { From b858eb962af294790463c7c0d3ff15fab80e942b Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 16 Dec 2020 14:32:29 -0600 Subject: [PATCH 130/761] Added Glob pattern matching for "Windows Services" plugin (#8575) * Allow glob patterns in config * Update README * Move creating filter to init * Need to explictly call init Co-authored-by: Bas <3441183+BattleBas@users.noreply.github.com> --- plugins/inputs/win_services/README.md | 3 +- plugins/inputs/win_services/win_services.go | 36 ++++++++++++----- .../win_services_integration_test.go | 13 +++++- .../inputs/win_services/win_services_test.go | 40 +++++++++++++++---- 4 files changed, 72 insertions(+), 20 deletions(-) diff --git a/plugins/inputs/win_services/README.md b/plugins/inputs/win_services/README.md index eef641718b965..1d7aa63568949 100644 --- a/plugins/inputs/win_services/README.md +++ b/plugins/inputs/win_services/README.md @@ -8,10 +8,11 @@ Monitoring some services may require running Telegraf with administrator privile ```toml [[inputs.win_services]] - ## Names of the services to monitor. Leave empty to monitor all the available services on the host + ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. service_names = [ "LanmanServer", "TermService", + "Win*", ] ``` diff --git a/plugins/inputs/win_services/win_services.go b/plugins/inputs/win_services/win_services.go index 6ac1bde68ca20..185e9b6b67de4 100644 --- a/plugins/inputs/win_services/win_services.go +++ b/plugins/inputs/win_services/win_services.go @@ -7,6 +7,7 @@ import ( "os" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/mgr" @@ -78,10 +79,11 @@ func (rmr *MgProvider) Connect() (WinServiceManager, error) { } var sampleConfig = ` - ## Names of the services to monitor. Leave empty to monitor all the available services on the host + ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. service_names = [ "LanmanServer", - "TermService", + "TermService", + "Win*", ] ` @@ -93,6 +95,8 @@ type WinServices struct { ServiceNames []string `toml:"service_names"` mgrProvider ManagerProvider + + servicesFilter filter.Filter } type ServiceInfo struct { @@ -102,6 +106,16 @@ type ServiceInfo struct { StartUpMode int } +func (m *WinServices) Init() error { + var err error + m.servicesFilter, err = filter.NewIncludeExcludeFilter(m.ServiceNames, nil) + if err != nil { + return err + } + + return nil +} + func (m *WinServices) Description() string { return description } @@ -117,7 +131,7 @@ func (m *WinServices) Gather(acc telegraf.Accumulator) error { } defer scmgr.Disconnect() - serviceNames, err := listServices(scmgr, m.ServiceNames) + serviceNames, err := m.listServices(scmgr) if err != nil { return err } @@ -152,16 +166,20 @@ func (m *WinServices) Gather(acc telegraf.Accumulator) error { } // listServices returns a list of services to gather. -func listServices(scmgr WinServiceManager, userServices []string) ([]string, error) { - if len(userServices) != 0 { - return userServices, nil - } - +func (m *WinServices) listServices(scmgr WinServiceManager) ([]string, error) { names, err := scmgr.ListServices() if err != nil { return nil, fmt.Errorf("Could not list services: %s", err) } - return names, nil + + var services []string + for _, n := range names { + if m.servicesFilter.Match(n) { + services = append(services, n) + } + } + + return services, nil } // collectServiceInfo gathers info about a service. diff --git a/plugins/inputs/win_services/win_services_integration_test.go b/plugins/inputs/win_services/win_services_integration_test.go index 0c375c3dd2e65..028954f13a609 100644 --- a/plugins/inputs/win_services/win_services_integration_test.go +++ b/plugins/inputs/win_services/win_services_integration_test.go @@ -22,7 +22,11 @@ func TestList(t *testing.T) { require.NoError(t, err) defer scmgr.Disconnect() - services, err := listServices(scmgr, KnownServices) + winServices := &WinServices{ + ServiceNames: KnownServices, + } + winServices.Init() + services, err := winServices.listServices(scmgr) require.NoError(t, err) require.Len(t, services, 2, "Different number of services") require.Equal(t, services[0], KnownServices[0]) @@ -38,7 +42,11 @@ func TestEmptyList(t *testing.T) { require.NoError(t, err) defer scmgr.Disconnect() - services, err := listServices(scmgr, []string{}) + winServices := &WinServices{ + ServiceNames: []string{}, + } + winServices.Init() + services, err := winServices.listServices(scmgr) require.NoError(t, err) require.Condition(t, func() bool { return len(services) > 20 }, "Too few service") } @@ -52,6 +60,7 @@ func TestGatherErrors(t *testing.T) { ServiceNames: InvalidServices, mgrProvider: &MgProvider{}, } + ws.Init() require.Len(t, ws.ServiceNames, 3, "Different number of services") var acc testutil.Accumulator require.NoError(t, ws.Gather(&acc)) diff --git a/plugins/inputs/win_services/win_services_test.go b/plugins/inputs/win_services/win_services_test.go index e33ab2ddce622..7d1672e8f6515 100644 --- a/plugins/inputs/win_services/win_services_test.go +++ b/plugins/inputs/win_services/win_services_test.go @@ -123,35 +123,50 @@ var testErrors = []testData{ {nil, errors.New("Fake srv query error"), nil, "Fake service 2", "", 0, 0}, {nil, nil, errors.New("Fake srv config error"), "Fake service 3", "", 0, 0}, }}, - {nil, nil, nil, []serviceTestInfo{ + {[]string{"Fake service 1"}, nil, nil, []serviceTestInfo{ {errors.New("Fake srv open error"), nil, nil, "Fake service 1", "", 0, 0}, }}, } func TestBasicInfo(t *testing.T) { - winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[0]}} + winServices := &WinServices{ + Log: testutil.Logger{}, + mgrProvider: &FakeMgProvider{testErrors[0]}, + } + winServices.Init() assert.NotEmpty(t, winServices.SampleConfig()) assert.NotEmpty(t, winServices.Description()) } func TestMgrErrors(t *testing.T) { //mgr.connect error - winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[0]}} + winServices := &WinServices{ + Log: testutil.Logger{}, + mgrProvider: &FakeMgProvider{testErrors[0]}, + } var acc1 testutil.Accumulator err := winServices.Gather(&acc1) require.Error(t, err) assert.Contains(t, err.Error(), testErrors[0].mgrConnectError.Error()) ////mgr.listServices error - winServices = &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[1]}} + winServices = &WinServices{ + Log: testutil.Logger{}, + mgrProvider: &FakeMgProvider{testErrors[1]}, + } var acc2 testutil.Accumulator err = winServices.Gather(&acc2) require.Error(t, err) assert.Contains(t, err.Error(), testErrors[1].mgrListServicesError.Error()) ////mgr.listServices error 2 - winServices = &WinServices{testutil.Logger{}, []string{"Fake service 1"}, &FakeMgProvider{testErrors[3]}} + winServices = &WinServices{ + Log: testutil.Logger{}, + ServiceNames: []string{"Fake service 1"}, + mgrProvider: &FakeMgProvider{testErrors[3]}, + } + winServices.Init() var acc3 testutil.Accumulator buf := &bytes.Buffer{} @@ -162,7 +177,11 @@ func TestMgrErrors(t *testing.T) { } func TestServiceErrors(t *testing.T) { - winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[2]}} + winServices := &WinServices{ + Log: testutil.Logger{}, + mgrProvider: &FakeMgProvider{testErrors[2]}, + } + winServices.Init() var acc1 testutil.Accumulator buf := &bytes.Buffer{} @@ -184,8 +203,13 @@ var testSimpleData = []testData{ }}, } -func TestGather2(t *testing.T) { - winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testSimpleData[0]}} +func TestGatherContainsTag(t *testing.T) { + winServices := &WinServices{ + Log: testutil.Logger{}, + ServiceNames: []string{"Service*"}, + mgrProvider: &FakeMgProvider{testSimpleData[0]}, + } + winServices.Init() var acc1 testutil.Accumulator require.NoError(t, winServices.Gather(&acc1)) assert.Len(t, acc1.Errors, 0, "There should be no errors after gather") From e39208d60a99a44273c19253e94bd869d3e00402 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 16 Dec 2020 15:38:33 -0500 Subject: [PATCH 131/761] fix issue with mqtt concurrent map write (#8562) --- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 10 +++++++++- plugins/inputs/mqtt_consumer/mqtt_consumer_test.go | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 26122b8e86b88..73d41a32f0f9e 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -5,9 +5,10 @@ import ( "errors" "fmt" "strings" + "sync" "time" - "github.com/eclipse/paho.mqtt.golang" + mqtt "github.com/eclipse/paho.mqtt.golang" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" @@ -69,6 +70,7 @@ type MQTTConsumer struct { state ConnectionState sem semaphore messages map[telegraf.TrackingID]bool + messagesMutex sync.Mutex topicTag string ctx context.Context @@ -219,7 +221,9 @@ func (m *MQTTConsumer) connect() error { m.Log.Infof("Connected %v", m.Servers) m.state = Connected + m.messagesMutex.Lock() m.messages = make(map[telegraf.TrackingID]bool) + m.messagesMutex.Unlock() // Persistent sessions should skip subscription if a session is present, as // the subscriptions are stored by the server. @@ -258,6 +262,7 @@ func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) { select { case track := <-m.acc.Delivered(): <-m.sem + m.messagesMutex.Lock() _, ok := m.messages[track.ID()] if !ok { // Added by a previous connection @@ -265,6 +270,7 @@ func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) { } // No ack, MQTT does not support durable handling delete(m.messages, track.ID()) + m.messagesMutex.Unlock() case m.sem <- empty{}: err := m.onMessage(m.acc, msg) if err != nil { @@ -290,7 +296,9 @@ func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Mess } id := acc.AddTrackingMetricGroup(metrics) + m.messagesMutex.Lock() m.messages[id] = true + m.messagesMutex.Unlock() return nil } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index 4884fc0508107..2d9db2c23872a 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/eclipse/paho.mqtt.golang" + mqtt "github.com/eclipse/paho.mqtt.golang" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" From a27ded6d957f0de5b250d8861f3f308a08700220 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 16 Dec 2020 15:39:12 -0500 Subject: [PATCH 132/761] fix potential issue with race condition (#8577) --- plugins/inputs/ping/ping.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index da9ab8698e83b..87f7af8e7489f 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -12,6 +12,7 @@ import ( "sort" "strings" "sync" + "sync/atomic" "time" "github.com/glinton/ping" @@ -289,7 +290,7 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { c := ping.Client{} var doErr error - var packetsSent int + var packetsSent int32 type sentReq struct { err error @@ -304,7 +305,7 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { doErr = sent.err } if sent.sent { - packetsSent++ + atomic.AddInt32(&packetsSent, 1) } } r.Done() @@ -387,7 +388,7 @@ func percentile(values durationSlice, perc int) time.Duration { } } -func onFin(packetsSent int, resps []*ping.Response, err error, destination string, percentiles []int) (map[string]string, map[string]interface{}) { +func onFin(packetsSent int32, resps []*ping.Response, err error, destination string, percentiles []int) (map[string]string, map[string]interface{}) { packetsRcvd := len(resps) tags := map[string]string{"url": destination} @@ -412,7 +413,7 @@ func onFin(packetsSent int, resps []*ping.Response, err error, destination strin return tags, fields } - fields["percent_packet_loss"] = float64(packetsSent-packetsRcvd) / float64(packetsSent) * 100 + fields["percent_packet_loss"] = float64(int(packetsSent)-packetsRcvd) / float64(packetsSent) * 100 ttl := resps[0].TTL var min, max, avg, total time.Duration From 73f7e7da186fbd11f9e703b2fed363ce7c86677b Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 16 Dec 2020 16:20:49 -0500 Subject: [PATCH 133/761] Update changelog (cherry picked from commit d7cc715c15f2421eba8b56b0fec4c6d930cd250c) --- CHANGELOG.md | 21 ++++++++++++++++++--- etc/telegraf.conf | 15 +++++++++++++++ 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9111d2f9cb59c..921ccf977bdcf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,8 @@ -## v1.17.0-rc0 [2020-12-10] +## v1.17.0-rc1 [2020-12-16] #### Release Notes - Starlark plugins can now store state between runs using a global state variable -- #### Bugfixes @@ -13,6 +12,14 @@ - [#8498](https://github.com/influxdata/telegraf/pull/8498) `inputs.http_response` fixed network test - [#8276](https://github.com/influxdata/telegraf/pull/8276) `parsers.grok` Update grok package to support for field names containing '-' and '.' - [#8414](https://github.com/influxdata/telegraf/pull/8414) `inputs.bcache` Fix tests for Windows - part 1 + - [#8577](https://github.com/influxdata/telegraf/pull/8577) `inputs.ping` fix potential issue with race condition + - [#8562](https://github.com/influxdata/telegraf/pull/8562) `inputs.mqtt_consumer` fix issue with mqtt concurrent map write + - [#8574](https://github.com/influxdata/telegraf/pull/8574) `inputs.ecs` Remove duplicated field "revision" from ecs_task because it's already defined as a tag there + - [#8551](https://github.com/influxdata/telegraf/pull/8551) `inputs.socket_listener` fix crash when socket_listener receiving invalid data + - [#8564](https://github.com/influxdata/telegraf/pull/8564) `parsers.graphite` Graphite tags parser + - [#8472](https://github.com/influxdata/telegraf/pull/8472) `inputs.kube_inventory` Fixing issue with missing metrics when pod has only pending containers + - [#8542](https://github.com/influxdata/telegraf/pull/8542) `inputs.aerospike` fix edge case in aerospike plugin where an expected hex string was converted to integer if all digits + - [#8512](https://github.com/influxdata/telegraf/pull/8512) `inputs.kube_inventory` Update string parsing of allocatable cpu cores in kube_inventory #### Features @@ -39,19 +46,27 @@ - [#8056](https://github.com/influxdata/telegraf/pull/8056) `inputs.monit` Add response_time to monit plugin - [#8446](https://github.com/influxdata/telegraf/pull/8446) update to go 1.15.5 - [#8428](https://github.com/influxdata/telegraf/pull/8428) `aggregators.basicstats` Add rate and interval to the basicstats aggregator plugin + - [#8575](https://github.com/influxdata/telegraf/pull/8575) `inputs.win_services` Added Glob pattern matching for "Windows Services" plugin + - [#6132](https://github.com/influxdata/telegraf/pull/6132) `inputs.mysql` Add per user metrics to mysql input + - [#8500](https://github.com/influxdata/telegraf/pull/8500) `inputs.github` [inputs.github] Add query of pull-request statistics #### New Parser Plugins - [#7778](https://github.com/influxdata/telegraf/pull/7778) `parsers.prometheus` Add a parser plugin for prometheus +#### New Serializer Plugins + + - [#8360](https://github.com/influxdata/telegraf/pull/8360) `serializers.prometheusremotewrite` Add prometheus remote write serializer + #### New Input Plugins - [#8163](https://github.com/influxdata/telegraf/pull/8163) `inputs.riemann` Support Riemann-Protobuff Listener + - [#8488](https://github.com/influxdata/telegraf/pull/8488) `inputs.intel_powerstat` New Intel PowerStat input plugin #### New Output Plugins - [#8296](https://github.com/influxdata/telegraf/pull/8296) `outputs.yandex_cloud_monitoring` #8295 Initial Yandex.Cloud monitoring - - [#8202](https://github.com/influxdata/telegraf/pull/8202) `outputs.all` A new Logz.io output plugin + - [#8202](https://github.com/influxdata/telegraf/pull/8202) `outputs.logzio` A new Logz.io output plugin ## v1.16.3 [2020-12-01] diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 508f2fb3a7712..dc74540e4c6e1 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -3258,6 +3258,14 @@ # # ## Timeout for HTTP requests. # # http_timeout = "5s" +# +# ## List of additional fields to query. +# ## NOTE: Getting those fields might involve issuing additional API-calls, so please +# ## make sure you do not exceed the rate-limit of GitHub. +# ## +# ## Available fields are: +# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) +# # additional_fields = [] # # Read flattened metrics from one or more GrayLog HTTP endpoints @@ -4349,6 +4357,13 @@ # # perf_events_statements_limit = 250 # # perf_events_statements_time_limit = 86400 # +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME +# # gather_perf_sum_per_acc_per_event = false +# +# ## list of events to be gathered for gather_perf_sum_per_acc_per_event +# ## in case of empty list all events will be gathered +# # perf_summary_events = [] +# # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) # ## example: interval_slow = "30m" # # interval_slow = "" From 717d329af8f4ad55fc7f0a8bbb917e6c50d26323 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 16 Dec 2020 17:04:42 -0500 Subject: [PATCH 134/761] Revert "disable flakey grok test for now" This reverts commit a96c8b49e4f7050bcf17e1948da47dee42ca5b91. (cherry picked from commit 115e25ea36b854d4e5f941596dfb965d103c1376) --- plugins/parsers/grok/parser_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index c918969d6fc60..5aaa0c967c1ce 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -1117,15 +1117,14 @@ func TestTrimRegression(t *testing.T) { } func TestAdvanceFieldName(t *testing.T) { - t.Skip("waiting for grok package fix") p := &Parser{ Patterns: []string{`rts=%{NUMBER:response-time.s} local=%{IP:local-ip} remote=%{IP:remote.ip}`}, } assert.NoError(t, p.Compile()) metricA, err := p.ParseLine(`rts=1.283 local=127.0.0.1 remote=10.0.0.1`) - assert.NoError(t, err) require.NotNil(t, metricA) + assert.NoError(t, err) assert.Equal(t, map[string]interface{}{ "response-time.s": "1.283", From 7ff96a7424b5589775cfa2f681b180da91ad1e1b Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 16 Dec 2020 17:04:54 -0500 Subject: [PATCH 135/761] Revert "Update grok package to support for field names containing '-' and '.' (#8276)" This reverts commit a5f3121f6d07977d0ea47cae360c0a989c9142ab. (cherry picked from commit 5c826e8a3254dc509012d018638a2e5f41a110f2) --- CHANGELOG.md | 1 - go.mod | 2 +- go.sum | 4 ++-- plugins/parsers/grok/parser_test.go | 19 ------------------- 4 files changed, 3 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 921ccf977bdcf..6bcf0a36a6b9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,6 @@ - [#8499](https://github.com/influxdata/telegraf/pull/8499) `processors.execd` Adding support for new lines in influx line protocol fields. - [#8254](https://github.com/influxdata/telegraf/pull/8254) `serializers.carbon2` Fix carbon2 tests - [#8498](https://github.com/influxdata/telegraf/pull/8498) `inputs.http_response` fixed network test - - [#8276](https://github.com/influxdata/telegraf/pull/8276) `parsers.grok` Update grok package to support for field names containing '-' and '.' - [#8414](https://github.com/influxdata/telegraf/pull/8414) `inputs.bcache` Fix tests for Windows - part 1 - [#8577](https://github.com/influxdata/telegraf/pull/8577) `inputs.ping` fix potential issue with race condition - [#8562](https://github.com/influxdata/telegraf/pull/8562) `inputs.mqtt_consumer` fix issue with mqtt concurrent map write diff --git a/go.mod b/go.mod index d222ff34e3e90..d0da1c1516390 100644 --- a/go.mod +++ b/go.mod @@ -126,7 +126,7 @@ require ( github.com/tidwall/gjson v1.6.0 github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect - github.com/vjeantet/grok v1.0.1-0.20180213041522-5a86c829f3c3 + github.com/vjeantet/grok v1.0.0 github.com/vmware/govmomi v0.19.0 github.com/wavefronthq/wavefront-sdk-go v0.9.2 github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf diff --git a/go.sum b/go.sum index 4aaf5103cf7af..a0d9277dc9574 100644 --- a/go.sum +++ b/go.sum @@ -597,8 +597,8 @@ github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Su github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vjeantet/grok v1.0.1-0.20180213041522-5a86c829f3c3 h1:T3ATR84Xk4b9g0QbGgLJVpRYWm/jvixqLTWRsR108sI= -github.com/vjeantet/grok v1.0.1-0.20180213041522-5a86c829f3c3/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= +github.com/vjeantet/grok v1.0.0 h1:uxMqatJP6MOFXsj6C1tZBnqqAThQEeqnizUZ48gSJQQ= +github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY= github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/wavefronthq/wavefront-sdk-go v0.9.2 h1:/LvWgZYNjHFUg+ZUX+qv+7e+M8sEMi0lM15zPp681Gk= diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 5aaa0c967c1ce..1c409e8a542b6 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -1115,22 +1115,3 @@ func TestTrimRegression(t *testing.T) { ) require.Equal(t, expected, actual) } - -func TestAdvanceFieldName(t *testing.T) { - p := &Parser{ - Patterns: []string{`rts=%{NUMBER:response-time.s} local=%{IP:local-ip} remote=%{IP:remote.ip}`}, - } - assert.NoError(t, p.Compile()) - - metricA, err := p.ParseLine(`rts=1.283 local=127.0.0.1 remote=10.0.0.1`) - require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, - map[string]interface{}{ - "response-time.s": "1.283", - "local-ip": "127.0.0.1", - "remote.ip": "10.0.0.1", - }, - metricA.Fields()) - assert.Equal(t, map[string]string{}, metricA.Tags()) -} From ee91b4856b53dd37d7a9b8eb86556fbf8f22489b Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 17 Dec 2020 08:21:45 -0800 Subject: [PATCH 136/761] update inputs.influxdb readme (#8569) * update inputs.influxdb readme * remove duplicate --- plugins/inputs/influxdb/README.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/influxdb/README.md b/plugins/inputs/influxdb/README.md index e17bd7072438b..9a2db484601fd 100644 --- a/plugins/inputs/influxdb/README.md +++ b/plugins/inputs/influxdb/README.md @@ -1,6 +1,8 @@ # InfluxDB Input Plugin -The InfluxDB plugin will collect metrics on the given InfluxDB servers. +The InfluxDB plugin will collect metrics on the given InfluxDB servers. Read our +[documentation](https://docs.influxdata.com/platform/monitoring/influxdata-platform/tools/measurements-internal/) +for detailed information about `influxdb` metrics. This plugin can also gather metrics from endpoints that expose InfluxDB-formatted endpoints. See below for more information. @@ -41,7 +43,13 @@ InfluxDB-formatted endpoints. See below for more information. - influxdb - n_shards: The total number of shards in the specified database. +- influxdb_ae _(Enterprise Only)_ : Statistics related to the Anti-Entropy (AE) engine in InfluxDB Enterprise clusters. +- influxdb_cluster _(Enterprise Only)_ : Statistics related to the clustering features of the data nodes in InfluxDB Enterprise clusters. +- influxdb_cq: The metrics related to continuous queries (CQs). - influxdb_database: The database metrics are being collected from. +- influxdb_hh _(Enterprise Only)_ : Events resulting in new hinted handoff (HH) processors in InfluxDB Enterprise clusters. +- influxdb_hh_database _(Enterprise Only)_ : Aggregates all hinted handoff queues for a single database and node. +- influxdb_hh_processor _(Enterprise Only)_ : Statistics stored for a single queue (shard). - influxdb_httpd: The URL to listen for network requests. By default, `http://localhost:8086/debug/var`. - influxdb_measurement: The measurement that metrics are collected from. - influxdb_memstats: Statistics about the memory allocator in the specified database. @@ -71,9 +79,14 @@ InfluxDB-formatted endpoints. See below for more information. - mspan_sys: The bytes of memory obtained from the OS for mspan. - mcache_inuse: The bytes of allocated mcache structures. - last_gc: Time the last garbage collection finished, as nanoseconds since 1970 (the UNIX epoch). +- influxdb_queryExecutor: Query Executor metrics of the InfluxDB engine. +- influxdb_rpc _(Enterprise Only)_ : Statistics are related to the use of RPC calls within InfluxDB Enterprise clusters. +- influxdb_runtime: The shard metrics are collected from. - influxdb_shard: The shard metrics are collected from. - influxdb_subscriber: The InfluxDB subscription that metrics are collected from. - influxdb_tsm1_cache: The TSM cache that metrics are collected from. +- influxdb_tsm1_engine: The TSM storage engine that metrics are collected from. +- influxdb_tsm1_filestore: The TSM file store that metrics are collected from. - influxdb_tsm1_wal: The TSM Write Ahead Log (WAL) that metrics are collected from. - influxdb_write: The total writes to the specified database. From 94eb8f2e429c66c78c3bbef425e0fb7e13a2ff96 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Thu, 17 Dec 2020 19:32:25 -0500 Subject: [PATCH 137/761] Add wildcard tags json parser support (#8579) --- plugins/parsers/json/README.md | 6 +- plugins/parsers/json/parser.go | 44 +-- plugins/parsers/json/parser_test.go | 419 +++++++++++++++++++++++++++- 3 files changed, 446 insertions(+), 23 deletions(-) diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index 01ddf673eec4d..682a0c62b56cb 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -30,10 +30,12 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. json_query = "" ## Tag keys is an array of keys that should be added as tags. Matching keys - ## are no longer saved as fields. + ## are no longer saved as fields. Supports wildcard glob matching. tag_keys = [ "my_tag_1", - "my_tag_2" + "my_tag_2", + "tags_*", + "tag*" ] ## Array of glob pattern strings or booleans keys that should be added as string fields. diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index bd9dee869170f..e8a748e7052db 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -36,7 +36,7 @@ type Config struct { type Parser struct { metricName string - tagKeys []string + tagKeys filter.Filter stringFields filter.Filter nameKey string query string @@ -53,9 +53,14 @@ func New(config *Config) (*Parser, error) { return nil, err } + tagKeyFilter, err := filter.Compile(config.TagKeys) + if err != nil { + return nil, err + } + return &Parser{ metricName: config.MetricName, - tagKeys: config.TagKeys, + tagKeys: tagKeyFilter, nameKey: config.NameKey, stringFields: stringFilter, query: config.Query, @@ -104,7 +109,7 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) ( name := p.metricName - //checks if json_name_key is set + // checks if json_name_key is set if p.nameKey != "" { switch field := f.Fields[p.nameKey].(type) { case string: @@ -112,7 +117,7 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) ( } } - //if time key is specified, set timestamp to it + // if time key is specified, set timestamp to it if p.timeKey != "" { if p.timeFormat == "" { err := fmt.Errorf("use of 'json_time_key' requires 'json_time_format'") @@ -131,7 +136,7 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) ( delete(f.Fields, p.timeKey) - //if the year is 0, set to current year + // if the year is 0, set to current year if timestamp.Year() == 0 { timestamp = timestamp.AddDate(time.Now().Year(), 0, 0) } @@ -145,32 +150,37 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) ( return []telegraf.Metric{metric}, nil } -//will take in field map with strings and bools, -//search for TagKeys that match fieldnames and add them to tags -//will delete any strings/bools that shouldn't be fields -//assumes that any non-numeric values in TagKeys should be displayed as tags +// will take in field map with strings and bools, +// search for TagKeys that match fieldnames and add them to tags +// will delete any strings/bools that shouldn't be fields +// assumes that any non-numeric values in TagKeys should be displayed as tags func (p *Parser) switchFieldToTag(tags map[string]string, fields map[string]interface{}) (map[string]string, map[string]interface{}) { - for _, name := range p.tagKeys { - //switch any fields in tagkeys into tags - if fields[name] == nil { + + for name, value := range fields { + if p.tagKeys == nil { + continue + } + // skip switch statement if tagkey doesn't match fieldname + if !p.tagKeys.Match(name) { continue } - switch value := fields[name].(type) { + // switch any fields in tagkeys into tags + switch t := value.(type) { case string: - tags[name] = value + tags[name] = t delete(fields, name) case bool: - tags[name] = strconv.FormatBool(value) + tags[name] = strconv.FormatBool(t) delete(fields, name) case float64: - tags[name] = strconv.FormatFloat(value, 'f', -1, 64) + tags[name] = strconv.FormatFloat(t, 'f', -1, 64) delete(fields, name) default: log.Printf("E! [parsers.json] Unrecognized type %T", value) } } - //remove any additional string/bool values from fields + // remove any additional string/bool values from fields for fk := range fields { switch fields[fk].(type) { case string, bool: diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index 31c507e7517f7..525c8fd2804c2 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -24,10 +24,14 @@ const validJSONTags = ` { "a": 5, "b": { - "c": 6 + "c": 6 }, "mytag": "foobar", - "othertag": "baz" + "othertag": "baz", + "tags_object": { + "mytag": "foobar", + "othertag": "baz" + } } ` @@ -39,7 +43,16 @@ const validJSONArrayTags = ` "c": 6 }, "mytag": "foo", - "othertag": "baz" + "othertag": "baz", + "tags_array": [ + { + "mytag": "foo" + }, + { + "othertag": "baz" + } + ], + "anothert": "foo" }, { "a": 7, @@ -47,8 +60,17 @@ const validJSONArrayTags = ` "c": 8 }, "mytag": "bar", + "othertag": "baz", + "tags_array": [ + { + "mytag": "bar" + }, + { "othertag": "baz" -} + } + ], + "anothert": "bar" + } ] ` @@ -948,3 +970,392 @@ func TestParse(t *testing.T) { }) } } + +func TestParseWithWildcardTagKeys(t *testing.T) { + var tests = []struct { + name string + config *Config + input []byte + expected []telegraf.Metric + }{ + { + name: "wildcard matching with tags nested within object", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"tags_object_*"}, + }, + input: []byte(validJSONTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_test", + map[string]string{ + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "wildcard matching with keys containing tag", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"*tag"}, + }, + input: []byte(validJSONTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_test", + map[string]string{ + "mytag": "foobar", + "othertag": "baz", + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "strings not matching tag keys are still also ignored", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"wrongtagkey", "tags_object_*"}, + }, + input: []byte(validJSONTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_test", + map[string]string{ + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "single tag key is also found and applied", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"mytag", "tags_object_*"}, + }, + input: []byte(validJSONTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_test", + map[string]string{ + "mytag": "foobar", + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser, err := New(tt.config) + require.NoError(t, err) + + actual, err := parser.Parse(tt.input) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) + }) + } +} + +func TestParseLineWithWildcardTagKeys(t *testing.T) { + var tests = []struct { + name string + config *Config + input string + expected telegraf.Metric + }{ + { + name: "wildcard matching with tags nested within object", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"tags_object_*"}, + }, + input: validJSONTags, + expected: testutil.MustMetric( + "json_test", + map[string]string{ + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + { + name: "wildcard matching with keys containing tag", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"*tag"}, + }, + input: validJSONTags, + expected: testutil.MustMetric( + "json_test", + map[string]string{ + "mytag": "foobar", + "othertag": "baz", + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + { + name: "strings not matching tag keys are ignored", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"wrongtagkey", "tags_object_*"}, + }, + input: validJSONTags, + expected: testutil.MustMetric( + "json_test", + map[string]string{ + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + { + name: "single tag key is also found and applied", + config: &Config{ + MetricName: "json_test", + TagKeys: []string{"mytag", "tags_object_*"}, + }, + input: validJSONTags, + expected: testutil.MustMetric( + "json_test", + map[string]string{ + "mytag": "foobar", + "tags_object_mytag": "foobar", + "tags_object_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser, err := New(tt.config) + require.NoError(t, err) + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual, testutil.IgnoreTime()) + }) + } +} + +func TestParseArrayWithWildcardTagKeys(t *testing.T) { + var tests = []struct { + name string + config *Config + input []byte + expected []telegraf.Metric + }{ + { + name: "wildcard matching with keys containing tag within array works", + config: &Config{ + MetricName: "json_array_test", + TagKeys: []string{"*tag"}, + }, + input: []byte(validJSONArrayTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_array_test", + map[string]string{ + "mytag": "foo", + "othertag": "baz", + "tags_array_0_mytag": "foo", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "json_array_test", + map[string]string{ + "mytag": "bar", + "othertag": "baz", + "tags_array_0_mytag": "bar", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(7), + "b_c": float64(8), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: " wildcard matching with tags nested array within object works", + config: &Config{ + MetricName: "json_array_test", + TagKeys: []string{"tags_array_*"}, + }, + input: []byte(validJSONArrayTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_array_test", + map[string]string{ + "tags_array_0_mytag": "foo", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "json_array_test", + map[string]string{ + "tags_array_0_mytag": "bar", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(7), + "b_c": float64(8), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "strings not matching tag keys are still also ignored", + config: &Config{ + MetricName: "json_array_test", + TagKeys: []string{"mytag", "*tag"}, + }, + input: []byte(validJSONArrayTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_array_test", + map[string]string{ + "mytag": "foo", + "othertag": "baz", + "tags_array_0_mytag": "foo", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "json_array_test", + map[string]string{ + "mytag": "bar", + "othertag": "baz", + "tags_array_0_mytag": "bar", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(7), + "b_c": float64(8), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "single tag key is also found and applied", + config: &Config{ + MetricName: "json_array_test", + TagKeys: []string{"anothert", "*tag"}, + }, + input: []byte(validJSONArrayTags), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json_array_test", + map[string]string{ + "anothert": "foo", + "mytag": "foo", + "othertag": "baz", + "tags_array_0_mytag": "foo", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(5), + "b_c": float64(6), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "json_array_test", + map[string]string{ + "anothert": "bar", + "mytag": "bar", + "othertag": "baz", + "tags_array_0_mytag": "bar", + "tags_array_1_othertag": "baz", + }, + map[string]interface{}{ + "a": float64(7), + "b_c": float64(8), + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser, err := New(tt.config) + require.NoError(t, err) + + actual, err := parser.Parse(tt.input) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) + }) + } + +} From 04e62e3d2348c649413fde3a1606ded5e9841037 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Thu, 17 Dec 2020 19:32:49 -0500 Subject: [PATCH 138/761] changed TestThreadStats acc to wait for 2 and added warning comment (#8592) --- plugins/inputs/suricata/suricata_test.go | 2 +- testutil/accumulator.go | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index 9c9c2ddc3694c..3308f28b5715f 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -119,7 +119,7 @@ func TestThreadStats(t *testing.T) { c.Write([]byte(ex3)) c.Write([]byte("\n")) c.Close() - acc.Wait(1) + acc.Wait(2) expected := []telegraf.Metric{ testutil.MustMetric( diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 6e754da45a220..c02f5092c9dd8 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -54,6 +54,8 @@ func (a *Accumulator) NMetrics() uint64 { return atomic.LoadUint64(&a.nMetrics) } +// GetTelegrafMetrics returns all the metrics collected by the accumulator +// If you are getting race conditions here then you are not waiting for all of your metrics to arrive: see Wait() func (a *Accumulator) GetTelegrafMetrics() []telegraf.Metric { metrics := []telegraf.Metric{} for _, m := range a.Metrics { From 7bf8cdb8e3a5df374d4bc46357ca00eb12c46099 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Fri, 18 Dec 2020 18:04:02 +0100 Subject: [PATCH 139/761] [ethtool] interface_up field added (#8590) Co-authored-by: Pawel Zak Co-authored-by: Sven Rebhan <36194019+srebhan@users.noreply.github.com> --- plugins/inputs/ethtool/README.md | 12 ++++----- plugins/inputs/ethtool/ethtool.go | 7 +++--- plugins/inputs/ethtool/ethtool_linux.go | 11 ++++----- plugins/inputs/ethtool/ethtool_test.go | 33 +++++++++++++------------ 4 files changed, 32 insertions(+), 31 deletions(-) diff --git a/plugins/inputs/ethtool/README.md b/plugins/inputs/ethtool/README.md index 3f397cdfbe36f..1b36001d9b74c 100644 --- a/plugins/inputs/ethtool/README.md +++ b/plugins/inputs/ethtool/README.md @@ -1,6 +1,6 @@ # Ethtool Input Plugin -The ethtool input plugin pulls ethernet device stats. Fields pulled will depend on the network device and driver +The ethtool input plugin pulls ethernet device stats. Fields pulled will depend on the network device and driver. ### Configuration: @@ -14,20 +14,20 @@ The ethtool input plugin pulls ethernet device stats. Fields pulled will depend # interface_exclude = ["eth1"] ``` -Interfaces can be included or ignored using +Interfaces can be included or ignored using: - `interface_include` - `interface_exclude` -Note that loopback interfaces will be automatically ignored +Note that loopback interfaces will be automatically ignored. ### Metrics: -Metrics are dependant on the network device and driver +Metrics are dependent on the network device and driver. ### Example Output: ``` -ethtool,driver=igb,host=test01,interface=mgmt0 tx_queue_1_packets=280782i,rx_queue_5_csum_err=0i,tx_queue_4_restart=0i,tx_multicast=7i,tx_queue_1_bytes=39674885i,rx_queue_2_alloc_failed=0i,tx_queue_5_packets=173970i,tx_single_coll_ok=0i,rx_queue_1_drops=0i,tx_queue_2_restart=0i,tx_aborted_errors=0i,rx_queue_6_csum_err=0i,tx_queue_5_restart=0i,tx_queue_4_bytes=64810835i,tx_abort_late_coll=0i,tx_queue_4_packets=109102i,os2bmc_tx_by_bmc=0i,tx_bytes=427527435i,tx_queue_7_packets=66665i,dropped_smbus=0i,rx_queue_0_csum_err=0i,tx_flow_control_xoff=0i,rx_packets=25926536i,rx_queue_7_csum_err=0i,rx_queue_3_bytes=84326060i,rx_multicast=83771i,rx_queue_4_alloc_failed=0i,rx_queue_3_drops=0i,rx_queue_3_csum_err=0i,rx_errors=0i,tx_errors=0i,tx_queue_6_packets=183236i,rx_broadcast=24378893i,rx_queue_7_packets=88680i,tx_dropped=0i,rx_frame_errors=0i,tx_queue_3_packets=161045i,tx_packets=1257017i,rx_queue_1_csum_err=0i,tx_window_errors=0i,tx_dma_out_of_sync=0i,rx_length_errors=0i,rx_queue_5_drops=0i,tx_timeout_count=0i,rx_queue_4_csum_err=0i,rx_flow_control_xon=0i,tx_heartbeat_errors=0i,tx_flow_control_xon=0i,collisions=0i,tx_queue_0_bytes=29465801i,rx_queue_6_drops=0i,rx_queue_0_alloc_failed=0i,tx_queue_1_restart=0i,rx_queue_0_drops=0i,tx_broadcast=9i,tx_carrier_errors=0i,tx_queue_7_bytes=13777515i,tx_queue_7_restart=0i,rx_queue_5_bytes=50732006i,rx_queue_7_bytes=35744457i,tx_deferred_ok=0i,tx_multi_coll_ok=0i,rx_crc_errors=0i,rx_fifo_errors=0i,rx_queue_6_alloc_failed=0i,tx_queue_2_packets=175206i,tx_queue_0_packets=107011i,rx_queue_4_bytes=201364548i,rx_queue_6_packets=372573i,os2bmc_rx_by_host=0i,multicast=83771i,rx_queue_4_drops=0i,rx_queue_5_packets=130535i,rx_queue_6_bytes=139488035i,tx_fifo_errors=0i,tx_queue_5_bytes=84899130i,rx_queue_0_packets=24529563i,rx_queue_3_alloc_failed=0i,rx_queue_7_drops=0i,tx_queue_6_bytes=96288614i,tx_queue_2_bytes=22132949i,tx_tcp_seg_failed=0i,rx_queue_1_bytes=246703840i,rx_queue_0_bytes=1506870738i,tx_queue_0_restart=0i,rx_queue_2_bytes=111344804i,tx_tcp_seg_good=0i,tx_queue_3_restart=0i,rx_no_buffer_count=0i,rx_smbus=0i,rx_queue_1_packets=273865i,rx_over_errors=0i,os2bmc_tx_by_host=0i,rx_queue_1_alloc_failed=0i,rx_queue_7_alloc_failed=0i,rx_short_length_errors=0i,tx_hwtstamp_timeouts=0i,tx_queue_6_restart=0i,rx_queue_2_packets=207136i,tx_queue_3_bytes=70391970i,rx_queue_3_packets=112007i,rx_queue_4_packets=212177i,tx_smbus=0i,rx_long_byte_count=2480280632i,rx_queue_2_csum_err=0i,rx_missed_errors=0i,rx_bytes=2480280632i,rx_queue_5_alloc_failed=0i,rx_queue_2_drops=0i,os2bmc_rx_by_bmc=0i,rx_align_errors=0i,rx_long_length_errors=0i,rx_hwtstamp_cleared=0i,rx_flow_control_xoff=0i 1564658080000000000 -ethtool,driver=igb,host=test02,interface=mgmt0 rx_queue_2_bytes=111344804i,tx_queue_3_bytes=70439858i,multicast=83771i,rx_broadcast=24378975i,tx_queue_0_packets=107011i,rx_queue_6_alloc_failed=0i,rx_queue_6_drops=0i,rx_hwtstamp_cleared=0i,tx_window_errors=0i,tx_tcp_seg_good=0i,rx_queue_1_drops=0i,tx_queue_1_restart=0i,rx_queue_7_csum_err=0i,rx_no_buffer_count=0i,tx_queue_1_bytes=39675245i,tx_queue_5_bytes=84899130i,tx_broadcast=9i,rx_queue_1_csum_err=0i,tx_flow_control_xoff=0i,rx_queue_6_csum_err=0i,tx_timeout_count=0i,os2bmc_tx_by_bmc=0i,rx_queue_6_packets=372577i,rx_queue_0_alloc_failed=0i,tx_flow_control_xon=0i,rx_queue_2_drops=0i,tx_queue_2_packets=175206i,rx_queue_3_csum_err=0i,tx_abort_late_coll=0i,tx_queue_5_restart=0i,tx_dropped=0i,rx_queue_2_alloc_failed=0i,tx_multi_coll_ok=0i,rx_queue_1_packets=273865i,rx_flow_control_xon=0i,tx_single_coll_ok=0i,rx_length_errors=0i,rx_queue_7_bytes=35744457i,rx_queue_4_alloc_failed=0i,rx_queue_6_bytes=139488395i,rx_queue_2_csum_err=0i,rx_long_byte_count=2480288216i,rx_queue_1_alloc_failed=0i,tx_queue_0_restart=0i,rx_queue_0_csum_err=0i,tx_queue_2_bytes=22132949i,rx_queue_5_drops=0i,tx_dma_out_of_sync=0i,rx_queue_3_drops=0i,rx_queue_4_packets=212177i,tx_queue_6_restart=0i,rx_packets=25926650i,rx_queue_7_packets=88680i,rx_frame_errors=0i,rx_queue_3_bytes=84326060i,rx_short_length_errors=0i,tx_queue_7_bytes=13777515i,rx_queue_3_alloc_failed=0i,tx_queue_6_packets=183236i,rx_queue_0_drops=0i,rx_multicast=83771i,rx_queue_2_packets=207136i,rx_queue_5_csum_err=0i,rx_queue_5_packets=130535i,rx_queue_7_alloc_failed=0i,tx_smbus=0i,tx_queue_3_packets=161081i,rx_queue_7_drops=0i,tx_queue_2_restart=0i,tx_multicast=7i,tx_fifo_errors=0i,tx_queue_3_restart=0i,rx_long_length_errors=0i,tx_queue_6_bytes=96288614i,tx_queue_1_packets=280786i,tx_tcp_seg_failed=0i,rx_align_errors=0i,tx_errors=0i,rx_crc_errors=0i,rx_queue_0_packets=24529673i,rx_flow_control_xoff=0i,tx_queue_0_bytes=29465801i,rx_over_errors=0i,rx_queue_4_drops=0i,os2bmc_rx_by_bmc=0i,rx_smbus=0i,dropped_smbus=0i,tx_hwtstamp_timeouts=0i,rx_errors=0i,tx_queue_4_packets=109102i,tx_carrier_errors=0i,tx_queue_4_bytes=64810835i,tx_queue_4_restart=0i,rx_queue_4_csum_err=0i,tx_queue_7_packets=66665i,tx_aborted_errors=0i,rx_missed_errors=0i,tx_bytes=427575843i,collisions=0i,rx_queue_1_bytes=246703840i,rx_queue_5_bytes=50732006i,rx_bytes=2480288216i,os2bmc_rx_by_host=0i,rx_queue_5_alloc_failed=0i,rx_queue_3_packets=112007i,tx_deferred_ok=0i,os2bmc_tx_by_host=0i,tx_heartbeat_errors=0i,rx_queue_0_bytes=1506877506i,tx_queue_7_restart=0i,tx_packets=1257057i,rx_queue_4_bytes=201364548i,rx_fifo_errors=0i,tx_queue_5_packets=173970i 1564658090000000000 +ethtool,driver=igb,host=test01,interface=mgmt0 tx_queue_1_packets=280782i,rx_queue_5_csum_err=0i,tx_queue_4_restart=0i,tx_multicast=7i,tx_queue_1_bytes=39674885i,rx_queue_2_alloc_failed=0i,tx_queue_5_packets=173970i,tx_single_coll_ok=0i,rx_queue_1_drops=0i,tx_queue_2_restart=0i,tx_aborted_errors=0i,rx_queue_6_csum_err=0i,tx_queue_5_restart=0i,tx_queue_4_bytes=64810835i,tx_abort_late_coll=0i,tx_queue_4_packets=109102i,os2bmc_tx_by_bmc=0i,tx_bytes=427527435i,tx_queue_7_packets=66665i,dropped_smbus=0i,rx_queue_0_csum_err=0i,tx_flow_control_xoff=0i,rx_packets=25926536i,rx_queue_7_csum_err=0i,rx_queue_3_bytes=84326060i,rx_multicast=83771i,rx_queue_4_alloc_failed=0i,rx_queue_3_drops=0i,rx_queue_3_csum_err=0i,rx_errors=0i,tx_errors=0i,tx_queue_6_packets=183236i,rx_broadcast=24378893i,rx_queue_7_packets=88680i,tx_dropped=0i,rx_frame_errors=0i,tx_queue_3_packets=161045i,tx_packets=1257017i,rx_queue_1_csum_err=0i,tx_window_errors=0i,tx_dma_out_of_sync=0i,rx_length_errors=0i,rx_queue_5_drops=0i,tx_timeout_count=0i,rx_queue_4_csum_err=0i,rx_flow_control_xon=0i,tx_heartbeat_errors=0i,tx_flow_control_xon=0i,collisions=0i,tx_queue_0_bytes=29465801i,rx_queue_6_drops=0i,rx_queue_0_alloc_failed=0i,tx_queue_1_restart=0i,rx_queue_0_drops=0i,tx_broadcast=9i,tx_carrier_errors=0i,tx_queue_7_bytes=13777515i,tx_queue_7_restart=0i,rx_queue_5_bytes=50732006i,rx_queue_7_bytes=35744457i,tx_deferred_ok=0i,tx_multi_coll_ok=0i,rx_crc_errors=0i,rx_fifo_errors=0i,rx_queue_6_alloc_failed=0i,tx_queue_2_packets=175206i,tx_queue_0_packets=107011i,rx_queue_4_bytes=201364548i,rx_queue_6_packets=372573i,os2bmc_rx_by_host=0i,multicast=83771i,rx_queue_4_drops=0i,rx_queue_5_packets=130535i,rx_queue_6_bytes=139488035i,tx_fifo_errors=0i,tx_queue_5_bytes=84899130i,rx_queue_0_packets=24529563i,rx_queue_3_alloc_failed=0i,rx_queue_7_drops=0i,tx_queue_6_bytes=96288614i,tx_queue_2_bytes=22132949i,tx_tcp_seg_failed=0i,rx_queue_1_bytes=246703840i,rx_queue_0_bytes=1506870738i,tx_queue_0_restart=0i,rx_queue_2_bytes=111344804i,tx_tcp_seg_good=0i,tx_queue_3_restart=0i,rx_no_buffer_count=0i,rx_smbus=0i,rx_queue_1_packets=273865i,rx_over_errors=0i,os2bmc_tx_by_host=0i,rx_queue_1_alloc_failed=0i,rx_queue_7_alloc_failed=0i,rx_short_length_errors=0i,tx_hwtstamp_timeouts=0i,tx_queue_6_restart=0i,rx_queue_2_packets=207136i,tx_queue_3_bytes=70391970i,rx_queue_3_packets=112007i,rx_queue_4_packets=212177i,tx_smbus=0i,rx_long_byte_count=2480280632i,rx_queue_2_csum_err=0i,rx_missed_errors=0i,rx_bytes=2480280632i,rx_queue_5_alloc_failed=0i,rx_queue_2_drops=0i,os2bmc_rx_by_bmc=0i,rx_align_errors=0i,rx_long_length_errors=0i,interface_up=1i,rx_hwtstamp_cleared=0i,rx_flow_control_xoff=0i 1564658080000000000 +ethtool,driver=igb,host=test02,interface=mgmt0 rx_queue_2_bytes=111344804i,tx_queue_3_bytes=70439858i,multicast=83771i,rx_broadcast=24378975i,tx_queue_0_packets=107011i,rx_queue_6_alloc_failed=0i,rx_queue_6_drops=0i,rx_hwtstamp_cleared=0i,tx_window_errors=0i,tx_tcp_seg_good=0i,rx_queue_1_drops=0i,tx_queue_1_restart=0i,rx_queue_7_csum_err=0i,rx_no_buffer_count=0i,tx_queue_1_bytes=39675245i,tx_queue_5_bytes=84899130i,tx_broadcast=9i,rx_queue_1_csum_err=0i,tx_flow_control_xoff=0i,rx_queue_6_csum_err=0i,tx_timeout_count=0i,os2bmc_tx_by_bmc=0i,rx_queue_6_packets=372577i,rx_queue_0_alloc_failed=0i,tx_flow_control_xon=0i,rx_queue_2_drops=0i,tx_queue_2_packets=175206i,rx_queue_3_csum_err=0i,tx_abort_late_coll=0i,tx_queue_5_restart=0i,tx_dropped=0i,rx_queue_2_alloc_failed=0i,tx_multi_coll_ok=0i,rx_queue_1_packets=273865i,rx_flow_control_xon=0i,tx_single_coll_ok=0i,rx_length_errors=0i,rx_queue_7_bytes=35744457i,rx_queue_4_alloc_failed=0i,rx_queue_6_bytes=139488395i,rx_queue_2_csum_err=0i,rx_long_byte_count=2480288216i,rx_queue_1_alloc_failed=0i,tx_queue_0_restart=0i,rx_queue_0_csum_err=0i,tx_queue_2_bytes=22132949i,rx_queue_5_drops=0i,tx_dma_out_of_sync=0i,rx_queue_3_drops=0i,rx_queue_4_packets=212177i,tx_queue_6_restart=0i,rx_packets=25926650i,rx_queue_7_packets=88680i,rx_frame_errors=0i,rx_queue_3_bytes=84326060i,rx_short_length_errors=0i,tx_queue_7_bytes=13777515i,rx_queue_3_alloc_failed=0i,tx_queue_6_packets=183236i,rx_queue_0_drops=0i,rx_multicast=83771i,rx_queue_2_packets=207136i,rx_queue_5_csum_err=0i,rx_queue_5_packets=130535i,rx_queue_7_alloc_failed=0i,tx_smbus=0i,tx_queue_3_packets=161081i,rx_queue_7_drops=0i,tx_queue_2_restart=0i,tx_multicast=7i,tx_fifo_errors=0i,tx_queue_3_restart=0i,rx_long_length_errors=0i,tx_queue_6_bytes=96288614i,tx_queue_1_packets=280786i,tx_tcp_seg_failed=0i,rx_align_errors=0i,tx_errors=0i,rx_crc_errors=0i,rx_queue_0_packets=24529673i,rx_flow_control_xoff=0i,tx_queue_0_bytes=29465801i,rx_over_errors=0i,rx_queue_4_drops=0i,os2bmc_rx_by_bmc=0i,rx_smbus=0i,dropped_smbus=0i,tx_hwtstamp_timeouts=0i,rx_errors=0i,tx_queue_4_packets=109102i,tx_carrier_errors=0i,tx_queue_4_bytes=64810835i,tx_queue_4_restart=0i,rx_queue_4_csum_err=0i,tx_queue_7_packets=66665i,tx_aborted_errors=0i,rx_missed_errors=0i,tx_bytes=427575843i,collisions=0i,rx_queue_1_bytes=246703840i,rx_queue_5_bytes=50732006i,rx_bytes=2480288216i,os2bmc_rx_by_host=0i,rx_queue_5_alloc_failed=0i,rx_queue_3_packets=112007i,tx_deferred_ok=0i,os2bmc_tx_by_host=0i,tx_heartbeat_errors=0i,rx_queue_0_bytes=1506877506i,tx_queue_7_restart=0i,tx_packets=1257057i,rx_queue_4_bytes=201364548i,interface_up=0i,rx_fifo_errors=0i,tx_queue_5_packets=173970i 1564658090000000000 ``` diff --git a/plugins/inputs/ethtool/ethtool.go b/plugins/inputs/ethtool/ethtool.go index 3f8f8e15618a2..0978bef837383 100644 --- a/plugins/inputs/ethtool/ethtool.go +++ b/plugins/inputs/ethtool/ethtool.go @@ -27,9 +27,10 @@ type Ethtool struct { } const ( - pluginName = "ethtool" - tagInterface = "interface" - tagDriverName = "driver" + pluginName = "ethtool" + tagInterface = "interface" + tagDriverName = "driver" + fieldInterfaceUp = "interface_up" sampleConfig = ` ## List of interfaces to pull metrics for diff --git a/plugins/inputs/ethtool/ethtool_linux.go b/plugins/inputs/ethtool/ethtool_linux.go index b8c9312cbe309..13dabd2f8a6b6 100644 --- a/plugins/inputs/ethtool/ethtool_linux.go +++ b/plugins/inputs/ethtool/ethtool_linux.go @@ -18,7 +18,6 @@ type CommandEthtool struct { } func (e *Ethtool) Gather(acc telegraf.Accumulator) error { - // Get the list of interfaces interfaces, err := e.command.Interfaces() if err != nil { @@ -35,7 +34,6 @@ func (e *Ethtool) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup for _, iface := range interfaces { - // Check this isn't a loop back and that its matched by the filter if (iface.Flags&net.FlagLoopback == 0) && interfaceFilter.Match(iface.Name) { wg.Add(1) @@ -59,7 +57,6 @@ func (e *Ethtool) Init() error { // Gather the stats for the interface. func (e *Ethtool) gatherEthtoolStats(iface net.Interface, acc telegraf.Accumulator) { - tags := make(map[string]string) tags[tagInterface] = iface.Name @@ -80,6 +77,7 @@ func (e *Ethtool) gatherEthtoolStats(iface net.Interface, acc telegraf.Accumulat return } + fields[fieldInterfaceUp] = e.interfaceUp(iface) for k, v := range stats { fields[k] = v } @@ -87,12 +85,15 @@ func (e *Ethtool) gatherEthtoolStats(iface net.Interface, acc telegraf.Accumulat acc.AddFields(pluginName, fields, tags) } +func (e *Ethtool) interfaceUp(iface net.Interface) bool { + return (iface.Flags & net.FlagUp) != 0 +} + func NewCommandEthtool() *CommandEthtool { return &CommandEthtool{} } func (c *CommandEthtool) Init() error { - if c.ethtool != nil { return nil } @@ -114,7 +115,6 @@ func (c *CommandEthtool) Stats(intf string) (map[string]uint64, error) { } func (c *CommandEthtool) Interfaces() ([]net.Interface, error) { - // Get the list of interfaces interfaces, err := net.Interfaces() if err != nil { @@ -125,7 +125,6 @@ func (c *CommandEthtool) Interfaces() ([]net.Interface, error) { } func init() { - inputs.Add(pluginName, func() telegraf.Input { return &Ethtool{ InterfaceInclude: []string{}, diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go index d281644a51ed0..ac5527733ce73 100644 --- a/plugins/inputs/ethtool/ethtool_test.go +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -15,10 +15,11 @@ var command *Ethtool var interfaceMap map[string]*InterfaceMock type InterfaceMock struct { - Name string - DriverName string - Stat map[string]uint64 - LoopBack bool + Name string + DriverName string + Stat map[string]uint64 + LoopBack bool + InterfaceUp bool } type CommandEthtoolMock struct { @@ -42,11 +43,14 @@ func (c *CommandEthtoolMock) DriverName(intf string) (driverName string, err err func (c *CommandEthtoolMock) Interfaces() ([]net.Interface, error) { interfaceNames := make([]net.Interface, 0) for k, v := range c.InterfaceMap { - - // Whether to set the flag to loopback - flag := net.FlagUp + var flag net.Flags + // When interface is up + if v.InterfaceUp { + flag |= net.FlagUp + } + // For loopback interface if v.LoopBack { - flag = net.FlagLoopback + flag |= net.FlagLoopback } // Create a dummy interface @@ -72,10 +76,10 @@ func (c *CommandEthtoolMock) Stats(intf string) (stat map[string]uint64, err err } func setup() { - interfaceMap = make(map[string]*InterfaceMock) eth1Stat := map[string]uint64{ + "interface_up": 1, "port_rx_1024_to_15xx": 25167245, "port_rx_128_to_255": 1573526387, "port_rx_15xx_to_jumbo": 137819058, @@ -173,10 +177,11 @@ func setup() { "tx_tso_fallbacks": 0, "tx_tso_long_headers": 0, } - eth1 := &InterfaceMock{"eth1", "driver1", eth1Stat, false} + eth1 := &InterfaceMock{"eth1", "driver1", eth1Stat, false, true} interfaceMap[eth1.Name] = eth1 eth2Stat := map[string]uint64{ + "interface_up": 0, "port_rx_1024_to_15xx": 11529312, "port_rx_128_to_255": 1868952037, "port_rx_15xx_to_jumbo": 130339387, @@ -274,14 +279,14 @@ func setup() { "tx_tso_fallbacks": 0, "tx_tso_long_headers": 0, } - eth2 := &InterfaceMock{"eth2", "driver1", eth2Stat, false} + eth2 := &InterfaceMock{"eth2", "driver1", eth2Stat, false, false} interfaceMap[eth2.Name] = eth2 // dummy loopback including dummy stat to ensure that the ignore feature is working lo0Stat := map[string]uint64{ "dummy": 0, } - lo0 := &InterfaceMock{"lo0", "", lo0Stat, true} + lo0 := &InterfaceMock{"lo0", "", lo0Stat, true, true} interfaceMap[lo0.Name] = lo0 c := &CommandEthtoolMock{interfaceMap} @@ -301,7 +306,6 @@ func toStringMapInterface(in map[string]uint64) map[string]interface{} { } func TestGather(t *testing.T) { - setup() var acc testutil.Accumulator @@ -324,7 +328,6 @@ func TestGather(t *testing.T) { } func TestGatherIncludeInterfaces(t *testing.T) { - setup() var acc testutil.Accumulator @@ -352,7 +355,6 @@ func TestGatherIncludeInterfaces(t *testing.T) { } func TestGatherIgnoreInterfaces(t *testing.T) { - setup() var acc testutil.Accumulator @@ -377,5 +379,4 @@ func TestGatherIgnoreInterfaces(t *testing.T) { "driver": "driver1", } acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth2, expectedTagsEth2) - } From 50265d9023e3104c3656ec9412f93b800fb9c8c4 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Fri, 18 Dec 2020 15:41:39 -0600 Subject: [PATCH 140/761] Allow globs (wildcards) in config for tags/fields in enum processor (#8598) * Allow glob in enum processor config * change assert to require --- plugins/processors/enum/README.md | 4 +- plugins/processors/enum/enum.go | 98 ++++++++++++++++++++-------- plugins/processors/enum/enum_test.go | 61 +++++++++++++---- 3 files changed, 122 insertions(+), 41 deletions(-) diff --git a/plugins/processors/enum/README.md b/plugins/processors/enum/README.md index 72a0556252902..651e58e7d2fce 100644 --- a/plugins/processors/enum/README.md +++ b/plugins/processors/enum/README.md @@ -14,10 +14,10 @@ source tag or field is overwritten. ```toml [[processors.enum]] [[processors.enum.mapping]] - ## Name of the field to map + ## Name of the field to map. Globs accepted. field = "status" - ## Name of the tag to map + ## Name of the tag to map. Globs accepted. # tag = "status" ## Destination tag or field to be used for the mapped value. By default the diff --git a/plugins/processors/enum/enum.go b/plugins/processors/enum/enum.go index a96e7d5095bcf..60a4264528844 100644 --- a/plugins/processors/enum/enum.go +++ b/plugins/processors/enum/enum.go @@ -5,15 +5,16 @@ import ( "strconv" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/processors" ) var sampleConfig = ` [[processors.enum.mapping]] - ## Name of the field to map + ## Name of the field to map. Globs accepted. field = "status" - ## Name of the tag to map + ## Name of the tag to map. Globs accepted. # tag = "status" ## Destination tag or field to be used for the mapped value. By default the @@ -34,6 +35,9 @@ var sampleConfig = ` type EnumMapper struct { Mappings []Mapping `toml:"mapping"` + + FieldFilters map[string]filter.Filter + TagFilters map[string]filter.Filter } type Mapping struct { @@ -44,6 +48,29 @@ type Mapping struct { ValueMappings map[string]interface{} } +func (mapper *EnumMapper) Init() error { + mapper.FieldFilters = make(map[string]filter.Filter) + mapper.TagFilters = make(map[string]filter.Filter) + for _, mapping := range mapper.Mappings { + if mapping.Field != "" { + fieldFilter, err := filter.NewIncludeExcludeFilter([]string{mapping.Field}, nil) + if err != nil { + return fmt.Errorf("Failed to create new field filter: %w", err) + } + mapper.FieldFilters[mapping.Field] = fieldFilter + } + if mapping.Tag != "" { + tagFilter, err := filter.NewIncludeExcludeFilter([]string{mapping.Tag}, nil) + if err != nil { + return fmt.Errorf("Failed to create new tag filter: %s", err) + } + mapper.TagFilters[mapping.Tag] = tagFilter + } + } + + return nil +} + func (mapper *EnumMapper) SampleConfig() string { return sampleConfig } @@ -60,30 +87,56 @@ func (mapper *EnumMapper) Apply(in ...telegraf.Metric) []telegraf.Metric { } func (mapper *EnumMapper) applyMappings(metric telegraf.Metric) telegraf.Metric { + newFields := make(map[string]interface{}) + newTags := make(map[string]string) + for _, mapping := range mapper.Mappings { if mapping.Field != "" { - if originalValue, isPresent := metric.GetField(mapping.Field); isPresent { - if adjustedValue, isString := adjustValue(originalValue).(string); isString { - if mappedValue, isMappedValuePresent := mapping.mapValue(adjustedValue); isMappedValuePresent { - writeField(metric, mapping.getDestination(), mappedValue) - } + mapper.fieldMapping(metric, mapping, newFields) + } + if mapping.Tag != "" { + mapper.tagMapping(metric, mapping, newTags) + } + } + + for k, v := range newFields { + writeField(metric, k, v) + } + + for k, v := range newTags { + writeTag(metric, k, v) + } + + return metric +} + +func (mapper *EnumMapper) fieldMapping(metric telegraf.Metric, mapping Mapping, newFields map[string]interface{}) { + fields := metric.FieldList() + for _, f := range fields { + if mapper.FieldFilters[mapping.Field].Match(f.Key) { + if adjustedValue, isString := adjustValue(f.Value).(string); isString { + if mappedValue, isMappedValuePresent := mapping.mapValue(adjustedValue); isMappedValuePresent { + newFields[mapping.getDestination(f.Key)] = mappedValue } } } - if mapping.Tag != "" { - if originalValue, isPresent := metric.GetTag(mapping.Tag); isPresent { - if mappedValue, isMappedValuePresent := mapping.mapValue(originalValue); isMappedValuePresent { - switch val := mappedValue.(type) { - case string: - writeTag(metric, mapping.getDestinationTag(), val) - default: - writeTag(metric, mapping.getDestinationTag(), fmt.Sprintf("%v", val)) - } + } +} + +func (mapper *EnumMapper) tagMapping(metric telegraf.Metric, mapping Mapping, newTags map[string]string) { + tags := metric.TagList() + for _, t := range tags { + if mapper.TagFilters[mapping.Tag].Match(t.Key) { + if mappedValue, isMappedValuePresent := mapping.mapValue(t.Value); isMappedValuePresent { + switch val := mappedValue.(type) { + case string: + newTags[mapping.getDestination(t.Key)] = val + default: + newTags[mapping.getDestination(t.Key)] = fmt.Sprintf("%v", val) } } } } - return metric } func adjustValue(in interface{}) interface{} { @@ -109,18 +162,11 @@ func (mapping *Mapping) mapValue(original string) (interface{}, bool) { return original, false } -func (mapping *Mapping) getDestination() string { - if mapping.Dest != "" { - return mapping.Dest - } - return mapping.Field -} - -func (mapping *Mapping) getDestinationTag() string { +func (mapping *Mapping) getDestination(defaultDest string) string { if mapping.Dest != "" { return mapping.Dest } - return mapping.Tag + return defaultDest } func writeField(metric telegraf.Metric, name string, value interface{}) { diff --git a/plugins/processors/enum/enum_test.go b/plugins/processors/enum/enum_test.go index de13aad156f5c..21b89d241a2a2 100644 --- a/plugins/processors/enum/enum_test.go +++ b/plugins/processors/enum/enum_test.go @@ -7,17 +7,22 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func createTestMetric() telegraf.Metric { metric, _ := metric.New("m1", - map[string]string{"tag": "tag_value"}, + map[string]string{ + "tag": "tag_value", + "duplicate_tag": "tag_value", + }, map[string]interface{}{ - "string_value": "test", - "int_value": int(200), - "uint_value": uint(500), - "float_value": float64(3.14), - "true_value": true, + "string_value": "test", + "duplicate_string_value": "test", + "int_value": int(200), + "uint_value": uint(500), + "float_value": float64(3.14), + "true_value": true, }, time.Now(), ) @@ -48,6 +53,8 @@ func assertTagValue(t *testing.T, expected interface{}, tag string, tags map[str func TestRetainsMetric(t *testing.T) { mapper := EnumMapper{} + err := mapper.Init() + require.Nil(t, err) source := createTestMetric() target := mapper.Apply(source)[0] @@ -64,7 +71,8 @@ func TestRetainsMetric(t *testing.T) { func TestMapsSingleStringValueTag(t *testing.T) { mapper := EnumMapper{Mappings: []Mapping{{Tag: "tag", ValueMappings: map[string]interface{}{"tag_value": "valuable"}}}} - + err := mapper.Init() + require.Nil(t, err) tags := calculateProcessedTags(mapper, createTestMetric()) assertTagValue(t, "valuable", "tag", tags) @@ -72,7 +80,8 @@ func TestMapsSingleStringValueTag(t *testing.T) { func TestNoFailureOnMappingsOnNonSupportedValuedFields(t *testing.T) { mapper := EnumMapper{Mappings: []Mapping{{Field: "float_value", ValueMappings: map[string]interface{}{"3.14": "pi"}}}} - + err := mapper.Init() + require.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, float64(3.14), "float_value", fields) @@ -110,6 +119,8 @@ func TestMappings(t *testing.T) { field_name := mapping["field_name"][0].(string) for index := range mapping["target_value"] { mapper := EnumMapper{Mappings: []Mapping{{Field: field_name, ValueMappings: map[string]interface{}{mapping["target_value"][index].(string): mapping["mapped_value"][index]}}}} + err := mapper.Init() + assert.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, mapping["expected_value"][index], field_name, fields) } @@ -118,7 +129,8 @@ func TestMappings(t *testing.T) { func TestMapsToDefaultValueOnUnknownSourceValue(t *testing.T) { mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"other": int64(1)}}}} - + err := mapper.Init() + require.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, 42, "string_value", fields) @@ -126,7 +138,8 @@ func TestMapsToDefaultValueOnUnknownSourceValue(t *testing.T) { func TestDoNotMapToDefaultValueKnownSourceValue(t *testing.T) { mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"test": int64(1)}}}} - + err := mapper.Init() + require.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, 1, "string_value", fields) @@ -134,7 +147,8 @@ func TestDoNotMapToDefaultValueKnownSourceValue(t *testing.T) { func TestNoMappingWithoutDefaultOrDefinedMappingValue(t *testing.T) { mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", ValueMappings: map[string]interface{}{"other": int64(1)}}}} - + err := mapper.Init() + require.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, "test", "string_value", fields) @@ -142,7 +156,8 @@ func TestNoMappingWithoutDefaultOrDefinedMappingValue(t *testing.T) { func TestWritesToDestination(t *testing.T) { mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Dest: "string_code", ValueMappings: map[string]interface{}{"test": int64(1)}}}} - + err := mapper.Init() + require.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, "test", "string_value", fields) @@ -152,10 +167,30 @@ func TestWritesToDestination(t *testing.T) { func TestDoNotWriteToDestinationWithoutDefaultOrDefinedMapping(t *testing.T) { field := "string_code" mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Dest: field, ValueMappings: map[string]interface{}{"other": int64(1)}}}} - + err := mapper.Init() + require.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, "test", "string_value", fields) _, present := fields[field] assert.False(t, present, "value of field '"+field+"' was present") } + +func TestFieldGlobMatching(t *testing.T) { + mapper := EnumMapper{Mappings: []Mapping{{Field: "*", ValueMappings: map[string]interface{}{"test": "glob"}}}} + err := mapper.Init() + require.Nil(t, err) + fields := calculateProcessedValues(mapper, createTestMetric()) + + assertFieldValue(t, "glob", "string_value", fields) + assertFieldValue(t, "glob", "duplicate_string_value", fields) +} + +func TestTagGlobMatching(t *testing.T) { + mapper := EnumMapper{Mappings: []Mapping{{Tag: "*", ValueMappings: map[string]interface{}{"tag_value": "glob"}}}} + err := mapper.Init() + require.Nil(t, err) + tags := calculateProcessedTags(mapper, createTestMetric()) + + assertTagValue(t, "glob", "tag", tags) +} From 828fbbc4dcfd7101e65476201233ca2241d08cbf Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 18 Dec 2020 17:39:35 -0500 Subject: [PATCH 141/761] Update changelog (cherry picked from commit 7de2ff7fff6c09d36f3b9e86092beed4634ea611) --- CHANGELOG.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6bcf0a36a6b9e..0e33f1c6cc430 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,12 @@ -## v1.17.0-rc1 [2020-12-16] +## v1.17.0 [2020-12-18] #### Release Notes -- Starlark plugins can now store state between runs using a global state variable + - Starlark plugins can now store state between runs using a global state variable. This lets you make custom aggregators as well as custom processors that are state-aware. + - New input plugins: Riemann-Protobuff Listener, Intel PowerStat + - New output plugins: Yandex.Cloud monitoring, Logz.io + - New parser plugin: Prometheus + - New serializer: Prometheus remote write #### Bugfixes @@ -48,6 +52,10 @@ - [#8575](https://github.com/influxdata/telegraf/pull/8575) `inputs.win_services` Added Glob pattern matching for "Windows Services" plugin - [#6132](https://github.com/influxdata/telegraf/pull/6132) `inputs.mysql` Add per user metrics to mysql input - [#8500](https://github.com/influxdata/telegraf/pull/8500) `inputs.github` [inputs.github] Add query of pull-request statistics + - [#8598](https://github.com/influxdata/telegraf/pull/8598) `processors.enum` Allow globs (wildcards) in config for tags/fields in enum processor + - [#8590](https://github.com/influxdata/telegraf/pull/8590) `inputs.ethtool` [ethtool] interface_up field added + - [#8579](https://github.com/influxdata/telegraf/pull/8579) `parsers.json` Add wildcard tags json parser support + #### New Parser Plugins From f067b4b75abb6410d9b1b0f2e8270d1a9b6da27a Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 18 Dec 2020 18:32:33 -0500 Subject: [PATCH 142/761] Update build version to 1.18.0 --- build_version.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_version.txt b/build_version.txt index 092afa15df4df..84cc529467b05 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.17.0 +1.18.0 From a7dff56ddea2acf9fc4ec9bf6e2314cb49bea05d Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Mon, 21 Dec 2020 09:14:10 -0700 Subject: [PATCH 143/761] Add more verbose errors to influxdb output (#6061) Looks like ear/959 has already been resolved, but these additional information for the errors still seems useful. I just re-based the change and merging. --- plugins/outputs/influxdb/http.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 57e3e918b8202..87c5a89b014cf 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -316,7 +316,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []telegraf.Metric) error { loc, err := makeWriteURL(c.config.URL, db, rp, c.config.Consistency) if err != nil { - return err + return fmt.Errorf("failed making write url: %s", err.Error()) } reader, err := c.requestBodyReader(metrics) @@ -327,13 +327,13 @@ func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []te req, err := c.makeWriteRequest(loc, reader) if err != nil { - return err + return fmt.Errorf("failed making write req: %s", err.Error()) } resp, err := c.client.Do(req.WithContext(ctx)) if err != nil { internal.OnClientError(c.client, err) - return err + return fmt.Errorf("failed doing req: %s", err.Error()) } defer resp.Body.Close() @@ -426,7 +426,7 @@ func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request req, err := http.NewRequest("POST", url, body) if err != nil { - return nil, err + return nil, fmt.Errorf("failed creating new request: %s", err.Error()) } req.Header.Set("Content-Type", "text/plain; charset=utf-8") From c47fcf662646dcbc26ac8a78e6e128d7d97ee902 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Mon, 21 Dec 2020 11:45:19 -0500 Subject: [PATCH 144/761] Add configurable number of 'most recent' date-stamped indices to gather in Elasticsearch input (#8543) Add configurable number of 'most recent' date-stamped indices to gather in the Elasticsearch input plugin, and allow wildcards to account for date-suffixed index names. Configuring '3' for num_most_recent_indices will only gather the 3 latest indices, based on the date or number they end with. Finding the date or number is dependent on the targeted indices being configured with wildcards at the end of their 'base' names. --- plugins/inputs/elasticsearch/README.md | 5 + plugins/inputs/elasticsearch/elasticsearch.go | 203 +- .../elasticsearch/elasticsearch_test.go | 43 + plugins/inputs/elasticsearch/testdata_test.go | 2002 +++++++++++++++++ 4 files changed, 2206 insertions(+), 47 deletions(-) diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index 54285c3b9e8e3..0afb0e325dbdd 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -53,6 +53,7 @@ Note that specific statistics information can change between Elasticsearch versi cluster_stats_only_from_master = true ## Indices to collect; can be one or more indices names or _all + ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. indices_include = ["_all"] ## One of "shards", "cluster", "indices" @@ -74,6 +75,10 @@ Note that specific statistics information can change between Elasticsearch versi # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. + ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and ## sort them by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most ## recent indices. + # num_most_recent_indices = 0 ``` ### Metrics diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index b6dfd2a81b11f..75a04e49c28bb 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -12,6 +12,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -115,6 +116,7 @@ const sampleConfig = ` cluster_stats_only_from_master = true ## Indices to collect; can be one or more indices names or _all + ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. indices_include = ["_all"] ## One of "shards", "cluster", "indices" @@ -135,6 +137,11 @@ const sampleConfig = ` # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. + ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and sort them + ## by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most recent indices. + # num_most_recent_indices = 0 ` // Elasticsearch is a plugin to read stats from one or many Elasticsearch @@ -152,11 +159,14 @@ type Elasticsearch struct { NodeStats []string `toml:"node_stats"` Username string `toml:"username"` Password string `toml:"password"` + NumMostRecentIndices int `toml:"num_most_recent_indices"` + tls.ClientConfig client *http.Client serverInfo map[string]serverInfo serverInfoMutex sync.Mutex + indexMatchers map[string]filter.Filter } type serverInfo struct { nodeID string @@ -214,6 +224,19 @@ func (e *Elasticsearch) Description() string { return "Read stats from one or more Elasticsearch servers or clusters" } +// Init the plugin. +func (e *Elasticsearch) Init() error { + // Compile the configured indexes to match for sorting. + indexMatchers, err := e.compileIndexMatchers() + if err != nil { + return err + } + + e.indexMatchers = indexMatchers + + return nil +} + // Gather reads the stats from Elasticsearch and writes it to the // Accumulator. func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { @@ -527,66 +550,135 @@ func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator) acc.AddFields("elasticsearch_indices_stats_"+m, jsonParser.Fields, map[string]string{"index_name": "_all"}, now) } - // Individual Indices stats - for id, index := range indicesStats.Indices { - indexTag := map[string]string{"index_name": id} - stats := map[string]interface{}{ - "primaries": index.Primaries, - "total": index.Total, + // Gather stats for each index. + err := e.gatherIndividualIndicesStats(indicesStats.Indices, now, acc) + + return err +} + +// gatherSortedIndicesStats gathers stats for all indices in no particular order. +func (e *Elasticsearch) gatherIndividualIndicesStats(indices map[string]indexStat, now time.Time, acc telegraf.Accumulator) error { + // Sort indices into buckets based on their configured prefix, if any matches. + categorizedIndexNames, err := e.categorizeIndices(indices) + if err != nil { + return err + } + + for _, matchingIndices := range categorizedIndexNames { + // Establish the number of each category of indices to use. User can configure to use only the latest 'X' amount. + indicesCount := len(matchingIndices) + indicesToTrackCount := indicesCount + + // Sort the indices if configured to do so. + if e.NumMostRecentIndices > 0 { + if e.NumMostRecentIndices < indicesToTrackCount { + indicesToTrackCount = e.NumMostRecentIndices + } + sort.Strings(matchingIndices) } - for m, s := range stats { - f := jsonparser.JSONFlattener{} - // parse Json, getting strings and bools - err := f.FullFlattenJSON("", s, true, true) + + // Gather only the number of indexes that have been configured, in descending order (most recent, if date-stamped). + for i := indicesCount - 1; i >= indicesCount-indicesToTrackCount; i-- { + indexName := matchingIndices[i] + + err := e.gatherSingleIndexStats(indexName, indices[indexName], now, acc) if err != nil { return err } - acc.AddFields("elasticsearch_indices_stats_"+m, f.Fields, indexTag, now) } + } - if e.IndicesLevel == "shards" { - for shardNumber, shards := range index.Shards { - for _, shard := range shards { + return nil +} - // Get Shard Stats - flattened := jsonparser.JSONFlattener{} - err := flattened.FullFlattenJSON("", shard, true, true) - if err != nil { - return err - } +func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) (map[string][]string, error) { + categorizedIndexNames := map[string][]string{} - // determine shard tag and primary/replica designation - shardType := "replica" - if flattened.Fields["routing_primary"] == true { - shardType = "primary" - } - delete(flattened.Fields, "routing_primary") + // If all indices are configured to be gathered, bucket them all together. + if len(e.IndicesInclude) == 0 || e.IndicesInclude[0] == "_all" { + for indexName := range indices { + categorizedIndexNames["_all"] = append(categorizedIndexNames["_all"], indexName) + } - routingState, ok := flattened.Fields["routing_state"].(string) - if ok { - flattened.Fields["routing_state"] = mapShardStatusToCode(routingState) - } + return categorizedIndexNames, nil + } - routingNode, _ := flattened.Fields["routing_node"].(string) - shardTags := map[string]string{ - "index_name": id, - "node_id": routingNode, - "shard_name": string(shardNumber), - "type": shardType, - } + // Bucket each returned index with its associated configured index (if any match). + for indexName := range indices { + match := indexName + for name, matcher := range e.indexMatchers { + // If a configured index matches one of the returned indexes, mark it as a match. + if matcher.Match(match) { + match = name + break + } + } - for key, field := range flattened.Fields { - switch field.(type) { - case string, bool: - delete(flattened.Fields, key) - } - } + // Bucket all matching indices together for sorting. + categorizedIndexNames[match] = append(categorizedIndexNames[match], indexName) + } + + return categorizedIndexNames, nil +} + +func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now time.Time, acc telegraf.Accumulator) error { + indexTag := map[string]string{"index_name": name} + stats := map[string]interface{}{ + "primaries": index.Primaries, + "total": index.Total, + } + for m, s := range stats { + f := jsonparser.JSONFlattener{} + // parse Json, getting strings and bools + err := f.FullFlattenJSON("", s, true, true) + if err != nil { + return err + } + acc.AddFields("elasticsearch_indices_stats_"+m, f.Fields, indexTag, now) + } + + if e.IndicesLevel == "shards" { + for shardNumber, shards := range index.Shards { + for _, shard := range shards { + + // Get Shard Stats + flattened := jsonparser.JSONFlattener{} + err := flattened.FullFlattenJSON("", shard, true, true) + if err != nil { + return err + } - acc.AddFields("elasticsearch_indices_stats_shards", - flattened.Fields, - shardTags, - now) + // determine shard tag and primary/replica designation + shardType := "replica" + if flattened.Fields["routing_primary"] == true { + shardType = "primary" } + delete(flattened.Fields, "routing_primary") + + routingState, ok := flattened.Fields["routing_state"].(string) + if ok { + flattened.Fields["routing_state"] = mapShardStatusToCode(routingState) + } + + routingNode, _ := flattened.Fields["routing_node"].(string) + shardTags := map[string]string{ + "index_name": name, + "node_id": routingNode, + "shard_name": string(shardNumber), + "type": shardType, + } + + for key, field := range flattened.Fields { + switch field.(type) { + case string, bool: + delete(flattened.Fields, key) + } + } + + acc.AddFields("elasticsearch_indices_stats_shards", + flattened.Fields, + shardTags, + now) } } } @@ -656,6 +748,23 @@ func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error { return nil } +func (e *Elasticsearch) compileIndexMatchers() (map[string]filter.Filter, error) { + indexMatchers := map[string]filter.Filter{} + var err error + + // Compile each configured index into a glob matcher. + for _, configuredIndex := range e.IndicesInclude { + if _, exists := indexMatchers[configuredIndex]; !exists { + indexMatchers[configuredIndex], err = filter.Compile([]string{configuredIndex}) + if err != nil { + return nil, err + } + } + } + + return indexMatchers, nil +} + func init() { inputs.Add("elasticsearch", func() telegraf.Input { return NewElasticsearch() diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index ad91c898a1a5c..0700c7833dc15 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -310,6 +310,49 @@ func TestGatherClusterIndicesStats(t *testing.T) { map[string]string{"index_name": "twitter"}) } +func TestGatherDateStampedIndicesStats(t *testing.T) { + es := newElasticsearchWithClient() + es.IndicesInclude = []string{"twitter*", "influx*", "penguins"} + es.NumMostRecentIndices = 2 + es.Servers = []string{"http://example.com:9200"} + es.client.Transport = newTransportMock(http.StatusOK, dateStampedIndicesResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() + es.Init() + + var acc testutil.Accumulator + if err := es.gatherIndicesStats(es.Servers[0]+"/"+strings.Join(es.IndicesInclude, ",")+"/_stats", &acc); err != nil { + t.Fatal(err) + } + + // includes 2 most recent indices for "twitter", only expect the most recent two. + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "twitter_2020_08_02"}) + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "twitter_2020_08_01"}) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "twitter_2020_07_31"}) + + // includes 2 most recent indices for "influx", only expect the most recent two. + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "influx2021.01.02"}) + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "influx2021.01.01"}) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "influx2020.12.31"}) + + // not configured to sort the 'penguins' index, but ensure it is also included. + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "penguins"}) +} + func TestGatherClusterIndiceShardsStats(t *testing.T) { es := newElasticsearchWithClient() es.IndicesLevel = "shards" diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go index a04fe1521e999..1006e4848bb65 100644 --- a/plugins/inputs/elasticsearch/testdata_test.go +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -2089,6 +2089,2008 @@ const clusterIndicesResponse = ` } }` +const dateStampedIndicesResponse = ` +{ + "_shards": { + "total": 9, + "successful": 6, + "failed": 0 + }, + "_all": { + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "indices": { + "twitter_2020_08_02": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "twitter_2020_08_01": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "twitter_2020_07_31": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "influx2021.01.02": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "influx2020.12.31": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "influx2021.01.01": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "penguins": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + } + } +}` + var clusterIndicesExpected = map[string]interface{}{ "completion_size_in_bytes": float64(0), "docs_count": float64(999), From dd09f46863357bf53934268a6bc07c048757f092 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Mon, 21 Dec 2020 11:45:58 -0500 Subject: [PATCH 145/761] Add hex_key parameter for IPMI input plugin connection (#8524) --- plugins/inputs/ipmi_sensor/README.md | 8 ++++ plugins/inputs/ipmi_sensor/connection.go | 12 ++++-- plugins/inputs/ipmi_sensor/connection_test.go | 40 ++++++++++++++++++- plugins/inputs/ipmi_sensor/ipmi.go | 6 ++- plugins/inputs/ipmi_sensor/ipmi_test.go | 20 ++++++---- 5 files changed, 72 insertions(+), 14 deletions(-) diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index 0f9faa97f1f3d..f620b93cb659e 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -19,6 +19,11 @@ When one or more servers are specified, the plugin will use the following comman ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ``` +Any of the following parameters will be added to the aformentioned query if they're configured: +``` +-y hex_key -L privilege +``` + ### Configuration ```toml @@ -53,6 +58,9 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ## Schema Version: (Optional, defaults to version 1) metric_version = 2 + + ## Optionally provide the hex key for the IMPI connection. + # hex_key = "" ``` ### Measurements diff --git a/plugins/inputs/ipmi_sensor/connection.go b/plugins/inputs/ipmi_sensor/connection.go index 7f6a4c3594f61..69ae04b78cf9f 100644 --- a/plugins/inputs/ipmi_sensor/connection.go +++ b/plugins/inputs/ipmi_sensor/connection.go @@ -15,11 +15,14 @@ type Connection struct { Port int Interface string Privilege string + HexKey string } -func NewConnection(server string, privilege string) *Connection { - conn := &Connection{} - conn.Privilege = privilege +func NewConnection(server, privilege, hexKey string) *Connection { + conn := &Connection{ + Privilege: privilege, + HexKey: hexKey, + } inx1 := strings.LastIndex(server, "@") inx2 := strings.Index(server, "(") @@ -57,6 +60,9 @@ func (t *Connection) options() []string { "-I", intf, } + if t.HexKey != "" { + options = append(options, "-y", t.HexKey) + } if t.Port != 0 { options = append(options, "-p", strconv.Itoa(t.Port)) } diff --git a/plugins/inputs/ipmi_sensor/connection_test.go b/plugins/inputs/ipmi_sensor/connection_test.go index 74944890f7a0c..21d1957c95126 100644 --- a/plugins/inputs/ipmi_sensor/connection_test.go +++ b/plugins/inputs/ipmi_sensor/connection_test.go @@ -3,7 +3,7 @@ package ipmi_sensor import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type conTest struct { @@ -24,6 +24,7 @@ func TestNewConnection(t *testing.T) { Password: "PASSW0RD", Interface: "lan", Privilege: "USER", + HexKey: "0001", }, }, { @@ -34,11 +35,46 @@ func TestNewConnection(t *testing.T) { Password: "PASS:!@#$%^&*(234)_+W0RD", Interface: "lan", Privilege: "USER", + HexKey: "0001", }, }, } for _, v := range testData { - assert.Equal(t, v.con, NewConnection(v.addr, "USER")) + require.EqualValues(t, v.con, NewConnection(v.addr, "USER", "0001")) + } +} + +func TestGetCommandOptions(t *testing.T) { + testData := []struct { + connection *Connection + options []string + }{ + { + &Connection{ + Hostname: "192.168.1.1", + Username: "user", + Password: "password", + Interface: "lan", + Privilege: "USER", + HexKey: "0001", + }, + []string{"-H", "192.168.1.1", "-U", "user", "-P", "password", "-I", "lan", "-y", "0001", "-L", "USER"}, + }, + { + &Connection{ + Hostname: "192.168.1.1", + Username: "user", + Password: "password", + Interface: "lan", + Privilege: "USER", + HexKey: "", + }, + []string{"-H", "192.168.1.1", "-U", "user", "-P", "password", "-I", "lan", "-L", "USER"}, + }, + } + + for _, data := range testData { + require.EqualValues(t, data.options, data.connection.options()) } } diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index fb53e1bc746fe..5572a195b2c29 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -29,6 +29,7 @@ var ( type Ipmi struct { Path string Privilege string + HexKey string `toml:"hex_key"` Servers []string Timeout internal.Duration MetricVersion int @@ -65,6 +66,9 @@ var sampleConfig = ` ## Schema Version: (Optional, defaults to version 1) metric_version = 2 + + ## Optionally provide the hex key for the IMPI connection. + # hex_key = "" ` // SampleConfig returns the documentation about the sample configuration @@ -110,7 +114,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { opts := make([]string, 0) hostname := "" if server != "" { - conn := NewConnection(server, m.Privilege) + conn := NewConnection(server, m.Privilege, m.HexKey) hostname = conn.Hostname opts = conn.options() } diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index bd5e02c196e76..81139ef40ee94 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -10,7 +10,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -20,7 +19,9 @@ func TestGather(t *testing.T) { Path: "ipmitool", Privilege: "USER", Timeout: internal.Duration{Duration: time.Second * 5}, + HexKey: "1234567F", } + // overwriting exec commands with mock commands execCommand = fakeExecCommand var acc testutil.Accumulator @@ -29,11 +30,12 @@ func TestGather(t *testing.T) { require.NoError(t, err) - assert.Equal(t, acc.NFields(), 262, "non-numeric measurements should be ignored") + require.EqualValues(t, acc.NFields(), 262, "non-numeric measurements should be ignored") - conn := NewConnection(i.Servers[0], i.Privilege) - assert.Equal(t, "USERID", conn.Username) - assert.Equal(t, "lan", conn.Interface) + conn := NewConnection(i.Servers[0], i.Privilege, i.HexKey) + require.EqualValues(t, "USERID", conn.Username) + require.EqualValues(t, "lan", conn.Interface) + require.EqualValues(t, "1234567F", conn.HexKey) var testsWithServer = []struct { fields map[string]interface{} @@ -388,6 +390,7 @@ func TestGatherV2(t *testing.T) { Privilege: "USER", Timeout: internal.Duration{Duration: time.Second * 5}, MetricVersion: 2, + HexKey: "0000000F", } // overwriting exec commands with mock commands execCommand = fakeExecCommandV2 @@ -397,9 +400,10 @@ func TestGatherV2(t *testing.T) { require.NoError(t, err) - conn := NewConnection(i.Servers[0], i.Privilege) - assert.Equal(t, "USERID", conn.Username) - assert.Equal(t, "lan", conn.Interface) + conn := NewConnection(i.Servers[0], i.Privilege, i.HexKey) + require.EqualValues(t, "USERID", conn.Username) + require.EqualValues(t, "lan", conn.Interface) + require.EqualValues(t, "0000000F", conn.HexKey) var testsWithServer = []struct { fields map[string]interface{} From 9f31184e209ad8d917a4c8bac51fac654a500f64 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Mon, 21 Dec 2020 14:37:50 -0500 Subject: [PATCH 146/761] Fix readme link for line protocol in influx parser (#8610) --- plugins/parsers/influx/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/parsers/influx/README.md b/plugins/parsers/influx/README.md index 51c0106e623f3..874bb279d5a77 100644 --- a/plugins/parsers/influx/README.md +++ b/plugins/parsers/influx/README.md @@ -3,7 +3,7 @@ There are no additional configuration options for InfluxDB [line protocol][]. The metrics are parsed directly into Telegraf metrics. -[line protocol]: https://docs.influxdata.com/influxdb/latest/write_protocols/line/ +[line protocol]: https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/ ### Configuration From 0731585d614e3463f30fd217978e543704eeae07 Mon Sep 17 00:00:00 2001 From: Aleksei Magusev <248290+lexmag@users.noreply.github.com> Date: Mon, 21 Dec 2020 21:44:51 +0100 Subject: [PATCH 147/761] Unify comments style in the CPU input (#8605) --- plugins/inputs/cpu/cpu.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/cpu/cpu.go b/plugins/inputs/cpu/cpu.go index e073309e47e3b..3b6c1b8816b38 100644 --- a/plugins/inputs/cpu/cpu.go +++ b/plugins/inputs/cpu/cpu.go @@ -37,9 +37,9 @@ var sampleConfig = ` percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## If true, collect raw CPU time metrics. + ## If true, collect raw CPU time metrics collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. + ## If true, compute and report the sum of all non-idle CPU states report_active = false ` From 35a221018bfb3c6b927b0066dda407214211be83 Mon Sep 17 00:00:00 2001 From: Sam Lai <70988+slai@users.noreply.github.com> Date: Wed, 23 Dec 2020 15:09:42 +0000 Subject: [PATCH 148/761] [http_listener_v2] Stop() succeeds even if fails to start (#8502) * [http_listener_v2] Stop() succeeds even if fails to start In cases where the http_listener_v2 plugin config is invalid, when the agent attempts to cleanup by stopping all the inputs, the Stop method here panics as it tries to call listener.Stop() when no listener has been set. This also masks the error message returned from the Start method. ``` > telegraf --test 2020-10-27T12:21:45Z I! Starting Telegraf 1.16.0 2020-10-27T12:21:45Z I! Using config file: /etc/telegraf/telegraf.conf ... panic: runtime error: invalid memory address or nil pointer dereference [signal SIGSEGV: segmentation violation code=0x1 addr=0x28 pc=0x1245130] goroutine 45 [running]: github.com/influxdata/telegraf/plugins/inputs/http_listener_v2.(*HTTPListenerV2).Stop(0xc00043e000) /go/src/github.com/influxdata/telegraf/plugins/inputs/http_listener_v2/http_listener_v2.go:178 +0x30 github.com/influxdata/telegraf/agent.stopServiceInputs(0xc00045e480, 0x5, 0x8) /go/src/github.com/influxdata/telegraf/agent/agent.go:445 +0x82 github.com/influxdata/telegraf/agent.(*Agent).testRunInputs(0xc000288080, 0x32be8c0, 0xc0000f1f00, 0x0, 0xc00000f480, 0x0, 0x0) /go/src/github.com/influxdata/telegraf/agent/agent.go:434 +0x1b7 github.com/influxdata/telegraf/agent.(*Agent).test.func4(0xc000057b70, 0xc000288080, 0x32be8c0, 0xc0000f1f00, 0x0, 0xc00000f480) /go/src/github.com/influxdata/telegraf/agent/agent.go:977 +0x8b created by github.com/influxdata/telegraf/agent.(*Agent).test /go/src/github.com/influxdata/telegraf/agent/agent.go:975 +0x352 ``` This fixes this issue by checking if the listener has been set before calling listener.Stop. ``` > ./telegraf --config test.conf --test 2020-10-27T12:43:25Z I! Starting Telegraf 2020-10-27T12:43:25Z E! [agent] Starting input inputs.http_listener_v2: listen tcp: address address_without_port: missing port in address ``` * retry CI --- .../http_listener_v2/http_listener_v2.go | 4 +++- .../http_listener_v2/http_listener_v2_test.go | 21 +++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 1023c0d10bcf5..41ce35df504e4 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -175,7 +175,9 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { // Stop cleans up all resources func (h *HTTPListenerV2) Stop() { - h.listener.Close() + if h.listener != nil { + h.listener.Close() + } h.wg.Wait() } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index 4457fcacda79d..1f3b629d09e4c 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -103,6 +103,27 @@ func createURL(listener *HTTPListenerV2, scheme string, path string, rawquery st return u.String() } +func TestInvalidListenerConfig(t *testing.T) { + parser, _ := parsers.NewInfluxParser() + + listener := &HTTPListenerV2{ + Log: testutil.Logger{}, + ServiceAddress: "address_without_port", + Path: "/write", + Methods: []string{"POST"}, + Parser: parser, + TimeFunc: time.Now, + MaxBodySize: internal.Size{Size: 70000}, + DataSource: "body", + } + + acc := &testutil.Accumulator{} + require.Error(t, listener.Start(acc)) + + // Stop is called when any ServiceInput fails to start; it must succeed regardless of state + listener.Stop() +} + func TestWriteHTTPSNoClientAuth(t *testing.T) { listener := newTestHTTPSListenerV2() listener.TLSAllowedCACerts = nil From 2c61fad895db596a0628c00e80b5387c24fc00ed Mon Sep 17 00:00:00 2001 From: Peng Xiao Date: Wed, 23 Dec 2020 16:12:41 +0100 Subject: [PATCH 149/761] improve the error log message for snmp trap (#8552) add OID value and source information into error log and make it easier for troubleshooting --- plugins/inputs/snmp_trap/snmp_trap.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index d380d582bad66..e14d129e84f1b 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -284,7 +284,7 @@ func makeTrapHandler(s *SnmpTrap) handler { if trapOid != "" { e, err := s.lookup(trapOid) if err != nil { - s.Log.Errorf("Error resolving V1 OID: %v", err) + s.Log.Errorf("Error resolving V1 OID, oid=%s, source=%s: %v", trapOid, tags["source"], err) return } setTrapOid(tags, trapOid, e) @@ -322,7 +322,7 @@ func makeTrapHandler(s *SnmpTrap) handler { var err error e, err = s.lookup(val) if nil != err { - s.Log.Errorf("Error resolving value OID: %v", err) + s.Log.Errorf("Error resolving value OID, oid=%s, source=%s: %v", val, tags["source"], err) return } @@ -340,7 +340,7 @@ func makeTrapHandler(s *SnmpTrap) handler { e, err := s.lookup(v.Name) if nil != err { - s.Log.Errorf("Error resolving OID: %v", err) + s.Log.Errorf("Error resolving OID oid=%s, source=%s: %v", v.Name, tags["source"], err) return } From ed72aac0be3bec74ce03238538a3afe712b837a8 Mon Sep 17 00:00:00 2001 From: JS1010111 <10931029+JS1010111@users.noreply.github.com> Date: Wed, 23 Dec 2020 12:21:38 -0300 Subject: [PATCH 150/761] Add support for an inclusive job list in Jenkins plugin (#8287) * Add support for an inclusive job list * Update jenkins plugin tests * Update jenkins plugin docs * Update jenkins plugin docs --- plugins/inputs/jenkins/README.md | 9 +++++--- plugins/inputs/jenkins/jenkins.go | 30 ++++++++++++++++++++------ plugins/inputs/jenkins/jenkins_test.go | 4 ++++ 3 files changed, 33 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index dc9889fe628fc..891e2fc0587d7 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -39,11 +39,14 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API ## empty will use default value 10 # max_subjob_per_layer = 10 - ## Jobs to exclude from gathering - # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] + ## Jobs to include or exclude from gathering + ## When using both lists, job_exclude has priority. + ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] + # job_include = [ "*" ] + # job_exclude = [ ] ## Nodes to exclude from gathering - # node_exclude = [ "node1", "node2" ] + # node_exclude = [ ] ## Worker pool for jenkins plugin only ## Empty this field will use default value 5 diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index 46637836b2cb2..78820da55f6ad 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -37,7 +37,9 @@ type Jenkins struct { MaxSubJobDepth int `toml:"max_subjob_depth"` MaxSubJobPerLayer int `toml:"max_subjob_per_layer"` JobExclude []string `toml:"job_exclude"` - jobFilter filter.Filter + JobInclude []string `toml:"job_include"` + jobFilterExclude filter.Filter + jobFilterInclude filter.Filter NodeExclude []string `toml:"node_exclude"` nodeFilter filter.Filter @@ -77,11 +79,14 @@ const sampleConfig = ` ## empty will use default value 10 # max_subjob_per_layer = 10 - ## Jobs to exclude from gathering - # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] + ## Jobs to include or exclude from gathering + ## When using both lists, job_exclude has priority. + ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] + # job_include = [ "*" ] + # job_exclude = [ ] ## Nodes to exclude from gathering - # node_exclude = [ "node1", "node2" ] + # node_exclude = [ ] ## Worker pool for jenkins plugin only ## Empty this field will use default value 5 @@ -157,8 +162,13 @@ func (j *Jenkins) initialize(client *http.Client) error { } j.Source = u.Hostname() - // init job filter - j.jobFilter, err = filter.Compile(j.JobExclude) + // init job filters + j.jobFilterExclude, err = filter.Compile(j.JobExclude) + if err != nil { + return fmt.Errorf("error compile job filters[%s]: %v", j.URL, err) + } + + j.jobFilterInclude, err = filter.Compile(j.JobInclude) if err != nil { return fmt.Errorf("error compile job filters[%s]: %v", j.URL, err) } @@ -303,8 +313,14 @@ func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { if j.MaxSubJobDepth > 0 && jr.layer == j.MaxSubJobDepth { return nil } + + // filter out not included job. + if j.jobFilterInclude != nil && j.jobFilterInclude.Match(jr.hierarchyName()) == false { + return nil + } + // filter out excluded job. - if j.jobFilter != nil && j.jobFilter.Match(jr.hierarchyName()) { + if j.jobFilterExclude != nil && j.jobFilterExclude.Match(jr.hierarchyName()) { return nil } diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index f09f5f9a936bf..ffac5d8305647 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -368,6 +368,7 @@ func TestInitialize(t *testing.T) { Log: testutil.Logger{}, URL: ts.URL, ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + JobInclude: []string{"jobA", "jobB"}, JobExclude: []string{"job1", "job2"}, NodeExclude: []string{"node1", "node2"}, }, @@ -806,6 +807,9 @@ func TestGatherJobs(t *testing.T) { URL: ts.URL, MaxBuildAge: internal.Duration{Duration: time.Hour}, ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + JobInclude: []string{ + "*", + }, JobExclude: []string{ "ignore-1", "apps/ignore-all/*", From 7c17055178947497917b87ef6acc62931f94c41c Mon Sep 17 00:00:00 2001 From: Greg <2653109+glinton@users.noreply.github.com> Date: Wed, 23 Dec 2020 08:30:47 -0700 Subject: [PATCH 151/761] Provide method to include core count when reporting cpu_usage in procstat input (#6165) * Provide a non-irix reporting of cpu_usage in procstat input * Update sample config to include cpu gathering mode * cleanup readme from merge --- plugins/inputs/procstat/README.md | 3 +++ plugins/inputs/procstat/procstat.go | 22 +++++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 73c40ef79213e..8d43d86eaf568 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -44,6 +44,9 @@ Processes can be selected for monitoring using one of several methods: ## When true add the full cmdline as a tag. # cmdline_tag = false + ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. + # mode = "irix" + ## Add the PID as a tag instead of as a field. When collecting multiple ## processes with otherwise matching tags this setting should be enabled to ## ensure each process has a unique identity. diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 1d6af5df42246..aa654da560c10 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -6,7 +6,9 @@ import ( "io/ioutil" "os/exec" "path/filepath" + "runtime" "strconv" + "strings" "time" "github.com/influxdata/telegraf" @@ -34,6 +36,9 @@ type Procstat struct { CGroup string `toml:"cgroup"` PidTag bool WinService string `toml:"win_service"` + Mode string + + solarisMode bool finder PIDFinder @@ -69,6 +74,9 @@ var sampleConfig = ` ## When true add the full cmdline as a tag. # cmdline_tag = false + ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. + # mode = "irix" + ## Add the PID as a tag instead of as a field. When collecting multiple ## processes with otherwise matching tags this setting should be enabled to ## ensure each process has a unique identity. @@ -240,7 +248,11 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { cpu_perc, err := proc.Percent(time.Duration(0)) if err == nil { - fields[prefix+"cpu_usage"] = cpu_perc + if p.solarisMode { + fields[prefix+"cpu_usage"] = cpu_perc / float64(runtime.NumCPU()) + } else { + fields[prefix+"cpu_usage"] = cpu_perc + } } mem, err := proc.MemoryInfo() @@ -461,6 +473,14 @@ func (p *Procstat) winServicePIDs() ([]PID, error) { return pids, nil } +func (p *Procstat) Init() error { + if strings.ToLower(p.Mode) == "solaris" { + p.solarisMode = true + } + + return nil +} + func init() { inputs.Add("procstat", func() telegraf.Input { return &Procstat{} From ea4feb1a07edfea3243cc328f1994f63852e9f96 Mon Sep 17 00:00:00 2001 From: David Pryor Date: Wed, 23 Dec 2020 11:19:53 -0500 Subject: [PATCH 152/761] fixed formatting (+1 squashed commit) (#8541) Squashed commits: [c4e2bee2] Closes #8530: Extended the internal snmp wrapper to support AES192, AES192C, AES256, and AES256C. Updated the example configuration with the new privProtocols. Added the warning that those protocols are only supported if you have the appropriate tooling on your system. Added test to ensure all 4 new privProtocols could be selected and properly encrypt the priv password. --- internal/snmp/wrapper.go | 8 +++ plugins/inputs/snmp/README.md | 4 +- plugins/inputs/snmp/snmp_test.go | 119 +++++++++++++++++++++++++++++++ 3 files changed, 130 insertions(+), 1 deletion(-) diff --git a/internal/snmp/wrapper.go b/internal/snmp/wrapper.go index 23a15594ed6f7..db0f225c9f793 100644 --- a/internal/snmp/wrapper.go +++ b/internal/snmp/wrapper.go @@ -125,6 +125,14 @@ func NewWrapper(s ClientConfig) (GosnmpWrapper, error) { sp.PrivacyProtocol = gosnmp.DES case "aes": sp.PrivacyProtocol = gosnmp.AES + case "aes192": + sp.PrivacyProtocol = gosnmp.AES192 + case "aes192c": + sp.PrivacyProtocol = gosnmp.AES192C + case "aes256": + sp.PrivacyProtocol = gosnmp.AES256 + case "aes256c": + sp.PrivacyProtocol = gosnmp.AES256C case "": sp.PrivacyProtocol = gosnmp.NoPriv default: diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index 0eb0ac31a0c97..c4aa3367f787c 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -56,7 +56,9 @@ information. # sec_level = "authNoPriv" ## Context Name. # context_name = "" - ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". + ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "". + ### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools + ### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html) # priv_protocol = "" ## Privacy password used for encrypted messages. # priv_password = "" diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 199fbe83c156f..a5cda3d4ca491 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -345,6 +345,125 @@ func TestGetSNMPConnection_v3(t *testing.T) { assert.EqualValues(t, 2, sp.AuthoritativeEngineTime) } +func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { + testCases := []struct { + Name string + Algorithm gosnmp.SnmpV3PrivProtocol + Config *Snmp + }{ + { + Name: "AES192", + Algorithm: gosnmp.AES192, + Config: &Snmp{ + Agents: []string{"1.2.3.4"}, + ClientConfig: config.ClientConfig{ + Version: 3, + MaxRepetitions: 20, + ContextName: "mycontext", + SecLevel: "authPriv", + SecName: "myuser", + AuthProtocol: "md5", + AuthPassword: "password123", + PrivProtocol: "AES192", + PrivPassword: "password123", + EngineID: "myengineid", + EngineBoots: 1, + EngineTime: 2, + }, + }, + }, + { + Name: "AES192C", + Algorithm: gosnmp.AES192C, + Config: &Snmp{ + Agents: []string{"1.2.3.4"}, + ClientConfig: config.ClientConfig{ + Version: 3, + MaxRepetitions: 20, + ContextName: "mycontext", + SecLevel: "authPriv", + SecName: "myuser", + AuthProtocol: "md5", + AuthPassword: "password123", + PrivProtocol: "AES192C", + PrivPassword: "password123", + EngineID: "myengineid", + EngineBoots: 1, + EngineTime: 2, + }, + }, + }, + { + Name: "AES256", + Algorithm: gosnmp.AES256, + Config: &Snmp{ + Agents: []string{"1.2.3.4"}, + ClientConfig: config.ClientConfig{ + Version: 3, + MaxRepetitions: 20, + ContextName: "mycontext", + SecLevel: "authPriv", + SecName: "myuser", + AuthProtocol: "md5", + AuthPassword: "password123", + PrivProtocol: "AES256", + PrivPassword: "password123", + EngineID: "myengineid", + EngineBoots: 1, + EngineTime: 2, + }, + }, + }, + { + Name: "AES256C", + Algorithm: gosnmp.AES256C, + Config: &Snmp{ + Agents: []string{"1.2.3.4"}, + ClientConfig: config.ClientConfig{ + Version: 3, + MaxRepetitions: 20, + ContextName: "mycontext", + SecLevel: "authPriv", + SecName: "myuser", + AuthProtocol: "md5", + AuthPassword: "password123", + PrivProtocol: "AES256C", + PrivPassword: "password123", + EngineID: "myengineid", + EngineBoots: 1, + EngineTime: 2, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + s := tc.Config + err := s.init() + require.NoError(t, err) + + gsc, err := s.getConnection(0) + require.NoError(t, err) + gs := gsc.(snmp.GosnmpWrapper) + assert.Equal(t, gs.Version, gosnmp.Version3) + sp := gs.SecurityParameters.(*gosnmp.UsmSecurityParameters) + assert.Equal(t, "1.2.3.4", gsc.Host()) + assert.EqualValues(t, 20, gs.MaxRepetitions) + assert.Equal(t, "mycontext", gs.ContextName) + assert.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv) + assert.Equal(t, "myuser", sp.UserName) + assert.Equal(t, gosnmp.MD5, sp.AuthenticationProtocol) + assert.Equal(t, "password123", sp.AuthenticationPassphrase) + assert.Equal(t, tc.Algorithm, sp.PrivacyProtocol) + assert.Equal(t, "password123", sp.PrivacyPassphrase) + assert.Equal(t, "myengineid", sp.AuthoritativeEngineID) + assert.EqualValues(t, 1, sp.AuthoritativeEngineBoots) + assert.EqualValues(t, 2, sp.AuthoritativeEngineTime) + }) + } +} + func TestGetSNMPConnection_caching(t *testing.T) { s := &Snmp{ Agents: []string{"1.2.3.4", "1.2.3.5", "1.2.3.5"}, From 841e971acebf562c7d5e2c32ea673b65b4a049f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Jaber?= Date: Wed, 23 Dec 2020 10:43:21 -0600 Subject: [PATCH 153/761] Fix spelling and clarify docs (#8164) --- plugins/processors/topk/README.md | 56 ++++++++++++++----------------- 1 file changed, 26 insertions(+), 30 deletions(-) diff --git a/plugins/processors/topk/README.md b/plugins/processors/topk/README.md index 308d4f9f85f05..cfcb0b2176d38 100644 --- a/plugins/processors/topk/README.md +++ b/plugins/processors/topk/README.md @@ -1,16 +1,18 @@ # TopK Processor Plugin -The TopK processor plugin is a filter designed to get the top series over a period of time. It can be tweaked to do its top k computation over a period of time, so spikes can be smoothed out. +The TopK processor plugin is a filter designed to get the top series over a period of time. It can be tweaked to calculate the top metrics via different aggregation functions. This processor goes through these steps when processing a batch of metrics: - 1. Groups metrics in buckets using their tags and name as key - 2. Aggregates each of the selected fields for each bucket by the selected aggregation function (sum, mean, etc) - 3. Orders the buckets by one of the generated aggregations, returns all metrics in the top `K` buckets, then reorders the buckets by the next of the generated aggregations, returns all metrics in the top `K` buckets, etc, etc, etc, until it runs out of fields. + 1. Groups measurements in buckets based on their tags and name + 2. Every N seconds, for each bucket, for each selected field: aggregate all the measurements using a given aggregation function (min, sum, mean, etc) and the field. + 3. For each computed aggregation: order the buckets by the aggregation, then returns all measurements in the top `K` buckets -The plugin makes sure not to duplicate metrics - -Note that depending on the amount of metrics on each computed bucket, more than `K` metrics may be returned +Notes: + * The deduplicates metrics + * The name of the measurement is always used when grouping it + * Depending on the amount of metrics on each bucket, more than `K` series may be returned + * If a measurement does not have one of the selected fields, it is dropped from the aggregation ### Configuration: @@ -19,46 +21,40 @@ Note that depending on the amount of metrics on each computed bucket, more than ## How many seconds between aggregations # period = 10 - ## How many top metrics to return + ## How many top buckets to return # k = 10 - ## Over which tags should the aggregation be done. Globs can be specified, in - ## which case any tag matching the glob will aggregated over. If set to an - ## empty list is no aggregation over tags is done + ## Based on which tags should the buckets be computed. Globs can be specified. + ## If set to an empty list tags are not considered when creating the buckets # group_by = ['*'] - ## Over which fields are the top k are calculated + ## Over which fields is the aggregation done # fields = ["value"] - ## What aggregation to use. Options: sum, mean, min, max + ## What aggregation function to use. Options: sum, mean, min, max # aggregation = "mean" - ## Instead of the top k largest metrics, return the bottom k lowest metrics + ## Instead of the top k buckets, return the bottom k buckets # bottomk = false - ## The plugin assigns each metric a GroupBy tag generated from its name and - ## tags. If this setting is different than "" the plugin will add a - ## tag (which name will be the value of this setting) to each metric with - ## the value of the calculated GroupBy tag. Useful for debugging + ## This setting provides a way to know wich metrics where group together. + ## Add a tag (which name will be the value of this setting) to each metric. + ## The value will be the tags used to pick its bucket. # add_groupby_tag = "" - ## These settings provide a way to know the position of each metric in - ## the top k. The 'add_rank_field' setting allows to specify for which - ## fields the position is required. If the list is non empty, then a field - ## will be added to each and every metric for each string present in this - ## setting. This field will contain the ranking of the group that - ## the metric belonged to when aggregated over that field. + ## This setting provides a way to know the position of each metric's bucket in the top k + ## If the list is non empty, a field will be added to each and every metric + ## for each string present in this setting. This field will contain the ranking + ## of the bucket that the metric belonged to when aggregated over that field. ## The name of the field will be set to the name of the aggregation field, ## suffixed with the string '_topk_rank' # add_rank_fields = [] ## These settings provide a way to know what values the plugin is generating - ## when aggregating metrics. The 'add_aggregate_field' setting allows to - ## specify for which fields the final aggregation value is required. If the - ## list is non empty, then a field will be added to each every metric for - ## each field present in this setting. This field will contain - ## the computed aggregation for the group that the metric belonged to when - ## aggregated over that field. + ## when aggregating metrics. If the list is non empty, then a field will be + ## added to each every metric for each field present in this setting. + ## This field will contain the computed aggregation for the bucket that the + ## metric belonged to when aggregated over that field. ## The name of the field will be set to the name of the aggregation field, ## suffixed with the string '_topk_aggregate' # add_aggregate_fields = [] From 3c9c013f4bf562d43074beafac50f373d6682609 Mon Sep 17 00:00:00 2001 From: Andreas Fuchs Date: Wed, 23 Dec 2020 14:39:43 -0500 Subject: [PATCH 154/761] common/tls: Allow specifying SNI hostnames (#7897) * tls_config: Allow specifying SNI hostnames Add a new configration field `tls_server_name` that allows specifying the server name that'll be sent in the ClientHello when telegraf makes a request to TLS servers. This allows checking against load balancers responding to specific hostnames that otherwise wouldn't resolve to their addresses. Add the setting to the documentation of common TLS options, as well as to the http_response plugin. Fixes #7598. * Adjust the x509_cert to allow usage of tls_server_name This plugin has been using ServerName previously, and will have to deal with the new setting, too: Extract the server-name choosing into a method & add a test to ensure we choose the right value (and error under the right circumstances). Also document that the two settings are mutually exclusive. * Improve documentation on what we try to accomplish in the nil return Also get rid of the TODO, as I am fairly certain this behavior is the correct one. * Remove unused struct field in tests --- docs/TLS.md | 2 + plugins/common/tls/config.go | 18 ++++++--- plugins/common/tls/config_test.go | 8 ++++ plugins/inputs/http_response/README.md | 4 +- plugins/inputs/http_response/http_response.go | 4 +- .../http_response/http_response_test.go | 38 +++++++++++++++++++ plugins/inputs/x509_cert/README.md | 5 ++- plugins/inputs/x509_cert/x509_cert.go | 28 ++++++++++---- plugins/inputs/x509_cert/x509_cert_test.go | 38 +++++++++++++++++++ 9 files changed, 128 insertions(+), 17 deletions(-) diff --git a/docs/TLS.md b/docs/TLS.md index 3cd6a1025fc4b..355da32bb98be 100644 --- a/docs/TLS.md +++ b/docs/TLS.md @@ -18,6 +18,8 @@ For client TLS support we have the following options: # tls_key = "/etc/telegraf/key.pem" ## Skip TLS verification. # insecure_skip_verify = false +## Send the specified TLS server name via SNI. +# tls_server_name = "foo.example.com" ``` ### Server Configuration diff --git a/plugins/common/tls/config.go b/plugins/common/tls/config.go index 59fbc49526745..9a752fbce5714 100644 --- a/plugins/common/tls/config.go +++ b/plugins/common/tls/config.go @@ -14,6 +14,7 @@ type ClientConfig struct { TLSCert string `toml:"tls_cert"` TLSKey string `toml:"tls_key"` InsecureSkipVerify bool `toml:"insecure_skip_verify"` + ServerName string `toml:"tls_server_name"` // Deprecated in 1.7; use TLS variables above SSLCA string `toml:"ssl_ca"` @@ -45,11 +46,14 @@ func (c *ClientConfig) TLSConfig() (*tls.Config, error) { c.TLSKey = c.SSLKey } - // TODO: return default tls.Config; plugins should not call if they don't - // want TLS, this will require using another option to determine. In the - // case of an HTTP plugin, you could use `https`. Other plugins may need - // the dedicated option `TLSEnable`. - if c.TLSCA == "" && c.TLSKey == "" && c.TLSCert == "" && !c.InsecureSkipVerify { + // This check returns a nil (aka, "use the default") + // tls.Config if no field is set that would have an effect on + // a TLS connection. That is, any of: + // * client certificate settings, + // * peer certificate authorities, + // * disabled security, or + // * an SNI server name. + if c.TLSCA == "" && c.TLSKey == "" && c.TLSCert == "" && !c.InsecureSkipVerify && c.ServerName == "" { return nil, nil } @@ -73,6 +77,10 @@ func (c *ClientConfig) TLSConfig() (*tls.Config, error) { } } + if c.ServerName != "" { + tlsConfig.ServerName = c.ServerName + } + return tlsConfig, nil } diff --git a/plugins/common/tls/config_test.go b/plugins/common/tls/config_test.go index 93656087dfd55..2784ace6920e3 100644 --- a/plugins/common/tls/config_test.go +++ b/plugins/common/tls/config_test.go @@ -86,6 +86,14 @@ func TestClientConfig(t *testing.T) { SSLKey: pki.ClientKeyPath(), }, }, + { + name: "set SNI server name", + client: tls.ClientConfig{ + ServerName: "foo.example.com", + }, + expNil: false, + expErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 4e01bc0bbdfaf..81b512e80743f 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -63,6 +63,8 @@ This input plugin checks HTTP/HTTPS connections. # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Use the given name as the SNI server name on each URL + # tls_server_name = "" ## HTTP Request Headers (all values must be strings) # [inputs.http_response.headers] @@ -91,7 +93,7 @@ This input plugin checks HTTP/HTTPS connections. - response_string_match (int, 0 = mismatch / body read error, 1 = match) - response_status_code_match (int, 0 = mismatch, 1 = match) - http_response_code (int, response status code) - - result_type (string, deprecated in 1.6: use `result` tag and `result_code` field) + - result_type (string, deprecated in 1.6: use `result` tag and `result_code` field) - result_code (int, [see below](#result--result_code)) #### `result` / `result_code` diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 434dccca8d9c6..01ce81401a745 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -97,8 +97,8 @@ var sampleConfig = ` # {'fake':'data'} # ''' - ## Optional name of the field that will contain the body of the response. - ## By default it is set to an empty String indicating that the body's content won't be added + ## Optional name of the field that will contain the body of the response. + ## By default it is set to an empty String indicating that the body's content won't be added # response_body_field = '' ## Maximum allowed HTTP response body size in bytes. diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 7b25b4be57220..24ded226346b6 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -17,6 +17,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -1266,3 +1267,40 @@ func TestStatusCodeAndStringMatchFail(t *testing.T) { } checkOutput(t, &acc, expectedFields, expectedTags, nil, nil) } + +func TestSNI(t *testing.T) { + ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "super-special-hostname.example.com", r.TLS.ServerName) + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + URLs: []string{ts.URL + "/good"}, + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + ServerName: "super-special-hostname.example.com", + }, + } + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusOK, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + } + absentFields := []string{"response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) +} diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md index 760813ecc7adb..42adc39217358 100644 --- a/plugins/inputs/x509_cert/README.md +++ b/plugins/inputs/x509_cert/README.md @@ -15,7 +15,9 @@ file or network connection. ## Timeout for SSL connection # timeout = "5s" - ## Pass a different name into the TLS request (Server Name Indication) + ## Pass a different name into the TLS request (Server Name Indication). + ## This is synonymous with tls_server_name, and only one of the two + ## options may be specified at one time. ## example: server_name = "myhost.example.org" # server_name = "myhost.example.org" @@ -23,6 +25,7 @@ file or network connection. # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" + # tls_server_name = "myhost.example.org" ``` diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index e5c2f835baacf..b4a8a0716ffb5 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -73,6 +73,19 @@ func (c *X509Cert) locationToURL(location string) (*url.URL, error) { return u, nil } +func (c *X509Cert) serverName(u *url.URL) (string, error) { + if c.tlsCfg.ServerName != "" { + if c.ServerName != "" { + return "", fmt.Errorf("both server_name (%q) and tls_server_name (%q) are set, but they are mutually exclusive", c.ServerName, c.tlsCfg.ServerName) + } + return c.tlsCfg.ServerName, nil + } + if c.ServerName != "" { + return c.ServerName, nil + } + return u.Hostname(), nil +} + func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certificate, error) { switch u.Scheme { case "https": @@ -87,11 +100,11 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica } defer ipConn.Close() - if c.ServerName == "" { - c.tlsCfg.ServerName = u.Hostname() - } else { - c.tlsCfg.ServerName = c.ServerName + serverName, err := c.serverName(u) + if err != nil { + return nil, err } + c.tlsCfg.ServerName = serverName c.tlsCfg.InsecureSkipVerify = true conn := tls.Client(ipConn, c.tlsCfg) @@ -218,10 +231,9 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, } if i == 0 { - if c.ServerName == "" { - opts.DNSName = u.Hostname() - } else { - opts.DNSName = c.ServerName + opts.DNSName, err = c.serverName(u) + if err != nil { + return err } for j, cert := range certs { if j != 0 { diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 041d20db787ea..58f86a65473f9 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -6,6 +6,7 @@ import ( "fmt" "io/ioutil" "math/big" + "net/url" "os" "path/filepath" "runtime" @@ -17,6 +18,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + _tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" ) @@ -347,3 +349,39 @@ func TestGatherCert(t *testing.T) { assert.True(t, acc.HasMeasurement("x509_cert")) } + +func TestServerName(t *testing.T) { + tests := []struct { + name string + fromTLS string + fromCfg string + url string + expected string + err bool + }{ + {name: "in cfg", fromCfg: "example.com", url: "https://other.example.com", expected: "example.com"}, + {name: "in tls", fromTLS: "example.com", url: "https://other.example.com", expected: "example.com"}, + {name: "from URL", url: "https://other.example.com", expected: "other.example.com"}, + {name: "errors", fromCfg: "otherex.com", fromTLS: "example.com", url: "https://other.example.com", err: true}, + } + + for _, elt := range tests { + test := elt + t.Run(test.name, func(t *testing.T) { + sc := &X509Cert{ + ServerName: test.fromCfg, + ClientConfig: _tls.ClientConfig{ServerName: test.fromTLS}, + } + sc.Init() + u, err := url.Parse(test.url) + require.NoError(t, err) + actual, err := sc.serverName(u) + if test.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, test.expected, actual) + }) + } +} From 3d30fd85c6cf9dd7a9384abe4c86e9b050690a49 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Wed, 23 Dec 2020 13:41:59 -0800 Subject: [PATCH 155/761] update intel powerstat readme (#8600) --- plugins/inputs/intel_powerstat/README.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/intel_powerstat/README.md b/plugins/inputs/intel_powerstat/README.md index 9efb4176d525b..009c8cafc1cfb 100644 --- a/plugins/inputs/intel_powerstat/README.md +++ b/plugins/inputs/intel_powerstat/README.md @@ -1,11 +1,9 @@ # Intel PowerStat Input Plugin +This input plugin monitors power statistics on Intel-based platforms and assumes presence of Linux based OS. -Telemetry frameworks allow users to monitor critical platform level metrics. +Main use cases are power saving and workload migration. Telemetry frameworks allow users to monitor critical platform level metrics. Key source of platform telemetry is power domain that is beneficial for MANO/Monitoring&Analytics systems -to take preventive/corrective actions based on platform busyness, CPU temperature, actual CPU utilization -and power statistics. Main use cases are power saving and workload migration. - -Intel PowerStat plugin supports Intel based platforms and assumes presence of Linux based OS. +to take preventive/corrective actions based on platform busyness, CPU temperature, actual CPU utilization and power statistics. ### Configuration: ```toml From b75221e6c625f9fd92d5b8d834c11a657185bb2d Mon Sep 17 00:00:00 2001 From: Ted Timmons Date: Tue, 29 Dec 2020 11:37:19 -0800 Subject: [PATCH 156/761] update influxdb_v2 config documentation in main (#8618) [Per the v2 output readme](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb_v2/README.md) and trial/error the fields aren't arrays, and "org" is actually "organization". --- docs/CONFIGURATION.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 9b8b07263b700..d97c86ba082d3 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -87,16 +87,16 @@ INFLUX_BUCKET="replace_with_your_bucket_name" # For InfluxDB OSS 2: [[outputs.influxdb_v2]] urls = ["${INFLUX_HOST}"] - token = ["${INFLUX_TOKEN}"] - org = ["${INFLUX_ORG}"] - bucket = ["${INFLUX_BUCKET}"] + token = "${INFLUX_TOKEN}" + organization = "${INFLUX_ORG}" + bucket = "${INFLUX_BUCKET}" # For InfluxDB Cloud 2: [[outputs.influxdb_v2]] urls = ["${INFLUX_HOST}"] - token = ["${INFLUX_TOKEN}"] - org = ["${INFLUX_ORG}"] - bucket = ["${INFLUX_BUCKET}"] + token = "${INFLUX_TOKEN}" + organization = "${INFLUX_ORG}" + bucket = "${INFLUX_BUCKET}" ``` The above files will produce the following effective configuration file to be @@ -117,7 +117,7 @@ parsed: [[outputs.influxdb_v2]] urls = ["http://127.0.0.1:8086"] # double check the port. could be 9999 if using OSS Beta token = "replace_with_your_token" - org = "your_username" + organization = "your_username" bucket = "replace_with_your_bucket_name" # For InfluxDB Cloud 2: @@ -126,7 +126,7 @@ parsed: INFLUX_HOST="https://us-west-2-1.aws.cloud2.influxdata.com" # Other Cloud URLs at https://v2.docs.influxdata.com/v2.0/reference/urls/#influxdb-cloud-urls token = "replace_with_your_token" - org = "yourname@yourcompany.com" + organization = "yourname@yourcompany.com" bucket = "replace_with_your_bucket_name" ``` From 33d5ba49dcb7c9f37b85346abbd9a28e4c04e28a Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 29 Dec 2020 14:58:16 -0600 Subject: [PATCH 157/761] Lgtm config (#8625) * Hide python * new line --- .lgtm.yml | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .lgtm.yml diff --git a/.lgtm.yml b/.lgtm.yml new file mode 100644 index 0000000000000..dbbf05d492ef9 --- /dev/null +++ b/.lgtm.yml @@ -0,0 +1,2 @@ +queries: + - exclude: py/* From c319e63a5a5474babeedb073d7e16a0e3b27eb8d Mon Sep 17 00:00:00 2001 From: Yuri Grigorov Date: Wed, 30 Dec 2020 21:59:58 +0300 Subject: [PATCH 158/761] outputs/http: add option to control idle connection timeout (#8055) Co-authored-by: Yuri Grigorov --- plugins/outputs/http/README.md | 7 ++++++- plugins/outputs/http/http.go | 7 +++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/http/README.md b/plugins/outputs/http/README.md index 0229c0e6ada7f..27de975c0761a 100644 --- a/plugins/outputs/http/README.md +++ b/plugins/outputs/http/README.md @@ -1,7 +1,7 @@ # HTTP Output Plugin This plugin sends metrics in a HTTP message encoded using one of the output -data formats. For data_formats that support batching, metrics are sent in batch format. +data formats. For data_formats that support batching, metrics are sent in batch format. ### Configuration: @@ -48,4 +48,9 @@ data formats. For data_formats that support batching, metrics are sent in batch # [outputs.http.headers] # # Should be set manually to "application/json" for json data_format # Content-Type = "text/plain; charset=utf-8" + + ## Idle (keep-alive) connection timeout. + ## Maximum amount of time before idle connection is closed. + ## Zero means no limit. + # idle_conn_timeout = 0 ``` diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index d75d5ef5a4df2..95d3fcf71a096 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -64,6 +64,11 @@ var sampleConfig = ` # [outputs.http.headers] # # Should be set manually to "application/json" for json data_format # Content-Type = "text/plain; charset=utf-8" + + ## Idle (keep-alive) connection timeout. + ## Maximum amount of time before idle connection is closed. + ## Zero means no limit. + # idle_conn_timeout = 0 ` const ( @@ -84,6 +89,7 @@ type HTTP struct { TokenURL string `toml:"token_url"` Scopes []string `toml:"scopes"` ContentEncoding string `toml:"content_encoding"` + IdleConnTimeout internal.Duration `toml:"idle_conn_timeout"` tls.ClientConfig client *http.Client @@ -104,6 +110,7 @@ func (h *HTTP) createClient(ctx context.Context) (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, Proxy: http.ProxyFromEnvironment, + IdleConnTimeout: h.IdleConnTimeout.Duration, }, Timeout: h.Timeout.Duration, } From 61c64cba1effd8006cedaaedb62683591eebd478 Mon Sep 17 00:00:00 2001 From: Mariusz Brzeski Date: Tue, 5 Jan 2021 17:57:22 +0100 Subject: [PATCH 159/761] Open Hardware Monitor (#8646) Co-authored-by: Mariano --- EXTERNAL_PLUGINS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index bdbd244ca3ec3..c78d7d19dd61a 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -13,6 +13,7 @@ Pull requests welcome. - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. +- [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open hardware Monitor](http://openhardwaremonitor.org) ## Outputs - [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. From e545167ba890b1e770c92daa8b22d6987d91bca2 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 5 Jan 2021 11:02:32 -0800 Subject: [PATCH 160/761] alphabetize external plugins list (#8647) * alphabetize --- EXTERNAL_PLUGINS.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index c78d7d19dd61a..a529122dcc94c 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -5,15 +5,14 @@ Check out the [external plugin documentation](/docs/EXTERNAL_PLUGINS.md) for mor Pull requests welcome. - ## Inputs -- [rand](https://github.com/ssoroka/rand) - Generate random numbers -- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts -- [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. +- [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open Hardware Monitor](http://openhardwaremonitor.org) +- [rand](https://github.com/ssoroka/rand) - Generate random numbers - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. -- [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open hardware Monitor](http://openhardwaremonitor.org) +- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts +- [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels ## Outputs - [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. From 4608620924fd3f07c0b93091e794d73fe528f3b6 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 5 Jan 2021 14:47:13 -0500 Subject: [PATCH 161/761] remove redundant reference to docs in data formats docs (#8652) --- docs/DATA_FORMATS_INPUT.md | 3 --- docs/DATA_FORMATS_OUTPUT.md | 3 --- 2 files changed, 6 deletions(-) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index ff660ab204f8f..dbcb283a10cc8 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -30,9 +30,6 @@ desired parser: name_suffix = "_mycollector" ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" ``` diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index 2b3e953601218..12a301bc5f54f 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -22,8 +22,5 @@ You will be able to identify the plugins with support by the presence of a files = ["stdout"] ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ``` From c8584a7b3f4383e49e7aa9adbbe4b4663b5a27c4 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 6 Jan 2021 16:23:29 -0600 Subject: [PATCH 162/761] Upgrade circle-ci config to v2.1 (#8621) * Upgrade to 2.1 circle-ci config * new line --- .circleci/config.yml | 186 +++++++++++++------------ .gitignore | 1 + plugins/outputs/sumologic/sumologic.go | 2 +- scripts/local_circleci.sh | 6 + 4 files changed, 107 insertions(+), 88 deletions(-) create mode 100755 scripts/local_circleci.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 2b55149d0abc2..5e44cfe59f4c2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,16 +1,19 @@ ---- -defaults: - defaults: &defaults +version: 2.1 + +executors: + go-1_14: working_directory: '/go/src/github.com/influxdata/telegraf' - environment: - GOFLAGS: -p=8 - go-1_14: &go-1_14 docker: - image: 'quay.io/influxdb/telegraf-ci:1.14.9' - go-1_15: &go-1_15 + environment: + GOFLAGS: -p=8 + go-1_15: + working_directory: '/go/src/github.com/influxdata/telegraf' docker: - image: 'quay.io/influxdb/telegraf-ci:1.15.5' - mac: &mac + environment: + GOFLAGS: -p=8 + mac: macos: xcode: 12.1.0 working_directory: '~/go/src/github.com/influxdata/telegraf' @@ -18,10 +21,52 @@ defaults: HOMEBREW_NO_AUTO_UPDATE: 1 GOFLAGS: -p=8 -version: 2 +commands: + test-go: + parameters: + goarch: + type: string + default: "amd64" + steps: + - checkout + - attach_workspace: + at: '/go' + - run: 'GOARCH=<< parameters.goarch >> make' + - run: 'GOARCH=<< parameters.goarch >> make check' + - run: 'GOARCH=<< parameters.goarch >> make check-deps' + - run: 'GOARCH=<< parameters.goarch >> make test' + test-go-mac: + steps: + - checkout + - attach_workspace: + at: '/' + - run: 'make' + - run: 'make check' + - run: 'make test' + package: + parameters: + nightly: + type: boolean + default: false + steps: + - checkout + - attach_workspace: + at: '/go' + - when: + condition: << parameters.nightly >> + steps: + - run: 'NIGHTLY=1 make package' + - run: 'make upload-nightly' + - unless: + condition: << parameters.nightly >> + steps: + - run: 'make package' + - store_artifacts: + path: './build/dist' + destination: 'build/dist' jobs: deps: - <<: [ *defaults, *go-1_15 ] + executor: go-1_15 steps: - checkout - restore_cache: @@ -38,7 +83,7 @@ jobs: paths: - '*' macdeps: - <<: [ *mac ] + executor: mac steps: - checkout - restore_cache: @@ -62,75 +107,42 @@ jobs: - 'usr/local/bin/gofmt' - 'Users/distiller/go' - test-go-1.14: - <<: [ *defaults, *go-1_14 ] + test-go-1_14: + executor: go-1_14 steps: - - attach_workspace: - at: '/go' - - run: 'make' - - run: 'make test' - test-go-1.14-386: - <<: [ *defaults, *go-1_14 ] + - test-go + test-go-1_14-386: + executor: go-1_14 steps: - - attach_workspace: - at: '/go' - - run: 'GOARCH=386 make' - - run: 'GOARCH=386 make test' - test-go-1.15: - <<: [ *defaults, *go-1_15 ] + - test-go: + goarch: "386" + test-go-1_15: + executor: go-1_15 steps: - - attach_workspace: - at: '/go' - - run: 'make' - - run: 'make check' - - run: 'make check-deps' - - run: 'make test' - test-go-1.15-386: - <<: [ *defaults, *go-1_15 ] + - test-go + test-go-1_15-386: + executor: go-1_15 steps: - - attach_workspace: - at: '/go' - - run: 'GOARCH=386 make' - - run: 'GOARCH=386 make check' - - run: 'GOARCH=386 make test' + - test-go: + goarch: "386" test-go-darwin: - <<: [ *mac ] + executor: mac steps: - - attach_workspace: - at: '/' - - run: 'make' - - run: 'make check' - - run: 'make test' + - test-go-mac package: - <<: [ *defaults, *go-1_15 ] + executor: go-1_15 steps: - - attach_workspace: - at: '/go' - - run: 'make package' - - store_artifacts: - path: './build/dist' - destination: 'build/dist' - + - package release: - <<: [ *defaults, *go-1_15 ] + executor: go-1_15 steps: - - attach_workspace: - at: '/go' - - run: 'make package' - - store_artifacts: - path: './build/dist' - destination: 'build/dist' + - package nightly: - <<: [ *defaults, *go-1_15 ] + executor: go-1_15 steps: - - attach_workspace: - at: '/go' - - run: 'NIGHTLY=1 make package' - - run: 'make upload-nightly' - - store_artifacts: - path: './build/dist' - destination: 'build/dist' + - package: + nightly: true workflows: version: 2 @@ -144,25 +156,25 @@ workflows: filters: tags: only: /.*/ - - 'test-go-1.14': + - 'test-go-1_14': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1.14-386': + - 'test-go-1_14-386': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1.15': + - 'test-go-1_15': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1.15-386': + - 'test-go-1_15-386': requires: - 'deps' filters: @@ -177,17 +189,17 @@ workflows: - 'package': requires: - 'test-go-darwin' - - 'test-go-1.14' - - 'test-go-1.14-386' - - 'test-go-1.15' - - 'test-go-1.15-386' + - 'test-go-1_14' + - 'test-go-1_14-386' + - 'test-go-1_15' + - 'test-go-1_15-386' - 'release': requires: - 'test-go-darwin' - - 'test-go-1.14' - - 'test-go-1.14-386' - - 'test-go-1.15' - - 'test-go-1.15-386' + - 'test-go-1_14' + - 'test-go-1_14-386' + - 'test-go-1_15' + - 'test-go-1_15-386' filters: tags: only: /.*/ @@ -197,16 +209,16 @@ workflows: jobs: - 'deps' - 'macdeps' - - 'test-go-1.14': + - 'test-go-1_14': requires: - 'deps' - - 'test-go-1.14-386': + - 'test-go-1_14-386': requires: - 'deps' - - 'test-go-1.15': + - 'test-go-1_15': requires: - 'deps' - - 'test-go-1.15-386': + - 'test-go-1_15-386': requires: - 'deps' - 'test-go-darwin': @@ -215,10 +227,10 @@ workflows: - 'nightly': requires: - 'test-go-darwin' - - 'test-go-1.14' - - 'test-go-1.14-386' - - 'test-go-1.15' - - 'test-go-1.15-386' + - 'test-go-1_14' + - 'test-go-1_14-386' + - 'test-go-1_15' + - 'test-go-1_15-386' triggers: - schedule: cron: "0 7 * * *" diff --git a/.gitignore b/.gitignore index df2b3d06643c5..c733e317ce1a7 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ /telegraf.gz /vendor .DS_Store +process.yml diff --git a/plugins/outputs/sumologic/sumologic.go b/plugins/outputs/sumologic/sumologic.go index fd9fe908ba099..da86eadfae585 100644 --- a/plugins/outputs/sumologic/sumologic.go +++ b/plugins/outputs/sumologic/sumologic.go @@ -75,7 +75,7 @@ const ( defaultClientTimeout = 5 * time.Second defaultMethod = http.MethodPost - defaultMaxRequestBodySize = 1_000_000 + defaultMaxRequestBodySize = 1000000 contentTypeHeader = "Content-Type" carbon2ContentType = "application/vnd.sumologic.carbon2" diff --git a/scripts/local_circleci.sh b/scripts/local_circleci.sh new file mode 100755 index 0000000000000..87623713d605e --- /dev/null +++ b/scripts/local_circleci.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +jobName=$1 + +circleci config process .circleci/config.yml > process.yml +circleci local execute -c process.yml --job $jobName From ee76535a12c32a98f5261ad857cbbbf2f32cca72 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 6 Jan 2021 16:23:40 -0600 Subject: [PATCH 163/761] Provide test environment for kibana input plugin (#8629) --- plugins/inputs/kibana/README.md | 15 ++++ .../basic_kibana_telegraf.conf | 75 +++++++++++++++++++ .../test_environment/docker-compose.yml | 48 ++++++++++++ .../kibana/test_environment/run_test_env.sh | 3 + 4 files changed, 141 insertions(+) create mode 100644 plugins/inputs/kibana/test_environment/basic_kibana_telegraf.conf create mode 100644 plugins/inputs/kibana/test_environment/docker-compose.yml create mode 100755 plugins/inputs/kibana/test_environment/run_test_env.sh diff --git a/plugins/inputs/kibana/README.md b/plugins/inputs/kibana/README.md index 73bf4a2981d63..a5002d5f21204 100644 --- a/plugins/inputs/kibana/README.md +++ b/plugins/inputs/kibana/README.md @@ -53,3 +53,18 @@ The `kibana` plugin queries the [Kibana][] API to obtain the service status. ``` kibana,host=myhost,name=my-kibana,source=localhost:5601,status=green,version=6.5.4 concurrent_connections=8i,heap_max_bytes=447778816i,heap_total_bytes=447778816i,heap_used_bytes=380603352i,requests_per_sec=1,response_time_avg_ms=57.6,response_time_max_ms=220i,status_code=1i,uptime_ms=6717489805i 1534864502000000000 ``` + +## Run example environment + +Requires the following tools: + +* [Docker](https://docs.docker.com/get-docker/) +* [Docker Compose](https://docs.docker.com/compose/install/) + +From the root of this project execute the following script: `./plugins/inputs/kibana/test_environment/run_test_env.sh` + +This will build the latest Telegraf and then start up Kibana and Elasticsearch, Telegraf will begin monitoring Kibana's status and write its results to the file `/tmp/metrics.out` in the Telegraf container. + +Then you can attach to the telegraf container to inspect the file `/tmp/metrics.out` to see if the status is being reported. + +The Visual Studio Code [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension provides an easy user interface to attach to the running container. \ No newline at end of file diff --git a/plugins/inputs/kibana/test_environment/basic_kibana_telegraf.conf b/plugins/inputs/kibana/test_environment/basic_kibana_telegraf.conf new file mode 100644 index 0000000000000..c67f346b5c170 --- /dev/null +++ b/plugins/inputs/kibana/test_environment/basic_kibana_telegraf.conf @@ -0,0 +1,75 @@ +# Telegraf Configuration for basic Kibana example + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + +# Send telegraf metrics to file(s) +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" + +############################################################################### +# INPUT PLUGINS # +############################################################################### + +# Read status information from one or more Kibana servers +[[inputs.kibana]] + ## Specify a list of one or more Kibana servers + servers = ["http://kib01:5601"] + + ## Timeout for HTTP requests + timeout = "5s" diff --git a/plugins/inputs/kibana/test_environment/docker-compose.yml b/plugins/inputs/kibana/test_environment/docker-compose.yml new file mode 100644 index 0000000000000..8aa6db00df009 --- /dev/null +++ b/plugins/inputs/kibana/test_environment/docker-compose.yml @@ -0,0 +1,48 @@ +## Reference: https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-cli-run-dev-mode +version: '2.2' +services: + es01: + image: docker.elastic.co/elasticsearch/elasticsearch:7.10.1 + container_name: es01 + environment: + - node.name=es01 + - cluster.name=es-docker-cluster + - cluster.initial_master_nodes=es01 + - bootstrap.memory_lock=true + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + ulimits: + memlock: + soft: -1 + hard: -1 + volumes: + - data01:/usr/share/elasticsearch/data + ports: + - 9200:9200 + networks: + - elastic + + kib01: + image: docker.elastic.co/kibana/kibana:7.10.1 + container_name: kib01 + ports: + - 5601:5601 + environment: + ELASTICSEARCH_URL: http://es01:9200 + ELASTICSEARCH_HOSTS: http://es01:9200 + networks: + - elastic + + telegraf: + image: local_telegraf + volumes: + - ./basic_kibana_telegraf.conf:/etc/telegraf/telegraf.conf:ro + networks: + - elastic + +volumes: + data01: + driver: local + +networks: + elastic: + driver: bridge diff --git a/plugins/inputs/kibana/test_environment/run_test_env.sh b/plugins/inputs/kibana/test_environment/run_test_env.sh new file mode 100755 index 0000000000000..8ea741ac3f98e --- /dev/null +++ b/plugins/inputs/kibana/test_environment/run_test_env.sh @@ -0,0 +1,3 @@ +docker build -t local_telegraf -f scripts/alpine.docker . + +docker-compose -f plugins/inputs/kibana/test_environment/docker-compose.yml up From 9ee6e034fb6149d78515511ba4b841db8ac9bdc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Du=C5=A1an=20Pajin?= <1251740+dpajin@users.noreply.github.com> Date: Thu, 7 Jan 2021 16:44:33 +0100 Subject: [PATCH 164/761] Input SNMP plugin - upgrade gosnmp library to version 1.29.0 (#8588) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 1 + go.sum | 3 +++ internal/snmp/wrapper.go | 2 +- plugins/inputs/snmp/snmp.go | 2 +- plugins/inputs/snmp/snmp_test.go | 2 +- 6 files changed, 8 insertions(+), 3 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 2ae86bb537eaf..642c79673b18c 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -71,6 +71,7 @@ following works: - github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) - github.com/gopcua/opcua [MIT License](https://github.com/gopcua/opcua/blob/master/LICENSE) - github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE) +- github.com/gosnmp/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/gosnmp/gosnmp/blob/master/LICENSE) - github.com/grpc-ecosystem/grpc-gateway [BSD 3-Clause "New" or "Revised" License](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/LICENSE.txt) - github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) - github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE) diff --git a/go.mod b/go.mod index d0da1c1516390..eba35c8b51dc9 100644 --- a/go.mod +++ b/go.mod @@ -66,6 +66,7 @@ require ( github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.12 github.com/gorilla/mux v1.6.2 + github.com/gosnmp/gosnmp v1.29.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 diff --git a/go.sum b/go.sum index a0d9277dc9574..f51b9c9b8c2ec 100644 --- a/go.sum +++ b/go.sum @@ -258,6 +258,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -315,6 +316,8 @@ github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8 github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gosnmp/gosnmp v1.29.0 h1:fEkud7oiYVzR64L+/BQA7uvp+7COI9+XkrUQi8JunYM= +github.com/gosnmp/gosnmp v1.29.0/go.mod h1:Ux0YzU4nV5yDET7dNIijd0VST0BCy8ijBf+gTVFQeaM= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= diff --git a/internal/snmp/wrapper.go b/internal/snmp/wrapper.go index db0f225c9f793..0655285060d37 100644 --- a/internal/snmp/wrapper.go +++ b/internal/snmp/wrapper.go @@ -6,7 +6,7 @@ import ( "strconv" "strings" - "github.com/soniah/gosnmp" + "github.com/gosnmp/gosnmp" ) // GosnmpWrapper wraps a *gosnmp.GoSNMP object so we can use it as a snmpConnection. diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index f8fa500043e9b..9aac89b8d70e9 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -14,12 +14,12 @@ import ( "sync" "time" + "github.com/gosnmp/gosnmp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/wlog" - "github.com/soniah/gosnmp" ) const description = `Retrieves SNMP values from remote agents` diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index a5cda3d4ca491..e14305d087144 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -9,13 +9,13 @@ import ( "testing" "time" + "github.com/gosnmp/gosnmp" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/snmp" config "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" - "github.com/soniah/gosnmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) From 4b7d11385c97240cf7dee30636bcd3950cd0ba25 Mon Sep 17 00:00:00 2001 From: Aladex Date: Thu, 7 Jan 2021 19:21:09 +0300 Subject: [PATCH 165/761] Using mime-type in prometheus parser to handle protocol-buffer responses (#8545) --- plugins/inputs/prometheus/prometheus.go | 2 +- plugins/parsers/prometheus/parser.go | 27 +++++- plugins/parsers/prometheus/parser_test.go | 105 ++++++++++++++++++++++ 3 files changed, 130 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 5a7891ceb60ef..8ec316bb8aaf6 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -330,7 +330,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error } if p.MetricVersion == 2 { - parser := parser_v2.Parser{} + parser := parser_v2.Parser{Header: resp.Header} metrics, err = parser.Parse(body) } else { metrics, err = Parse(body, resp.Header) diff --git a/plugins/parsers/prometheus/parser.go b/plugins/parsers/prometheus/parser.go index c5355ffe07a8f..e512d1c9934d5 100644 --- a/plugins/parsers/prometheus/parser.go +++ b/plugins/parsers/prometheus/parser.go @@ -4,7 +4,11 @@ import ( "bufio" "bytes" "fmt" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "io" "math" + "mime" + "net/http" "time" "github.com/influxdata/telegraf" @@ -17,6 +21,7 @@ import ( type Parser struct { DefaultTags map[string]string + Header http.Header } func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { @@ -31,9 +36,25 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { // Prepare output metricFamilies := make(map[string]*dto.MetricFamily) - metricFamilies, err = parser.TextToMetricFamilies(reader) - if err != nil { - return nil, fmt.Errorf("reading text format failed: %s", err) + mediatype, params, err := mime.ParseMediaType(p.Header.Get("Content-Type")) + if err == nil && mediatype == "application/vnd.google.protobuf" && + params["encoding"] == "delimited" && + params["proto"] == "io.prometheus.client.MetricFamily" { + for { + mf := &dto.MetricFamily{} + if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil { + if ierr == io.EOF { + break + } + return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr) + } + metricFamilies[mf.GetName()] = mf + } + } else { + metricFamilies, err = parser.TextToMetricFamilies(reader) + if err != nil { + return nil, fmt.Errorf("reading text format failed: %s", err) + } } now := time.Now() diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go index 74530ef1b9233..8b8a4ad2ff7b0 100644 --- a/plugins/parsers/prometheus/parser_test.go +++ b/plugins/parsers/prometheus/parser_test.go @@ -2,6 +2,9 @@ package prometheus import ( "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" "testing" "time" @@ -344,3 +347,105 @@ func parse(buf []byte) ([]telegraf.Metric, error) { parser := Parser{} return parser.Parse(buf) } + +func TestParserProtobufHeader(t *testing.T) { + var uClient = &http.Client{ + Transport: &http.Transport{ + DisableKeepAlives: true, + }, + } + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "omsk", + }, + map[string]interface{}{ + "swap_free": 9.77911808e+08, + }, + time.Unix(0, 0), + 2, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "omsk", + }, + map[string]interface{}{ + "swap_in": 2.031616e+06, + }, + time.Unix(0, 0), + 1, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "omsk", + }, + map[string]interface{}{ + "swap_out": 1.579008e+07, + }, + time.Unix(0, 0), + 1, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "omsk", + }, + map[string]interface{}{ + "swap_total": 9.93185792e+08, + }, + time.Unix(0, 0), + 2, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "omsk", + }, + map[string]interface{}{ + "swap_used": 1.5273984e+07, + }, + time.Unix(0, 0), + 2, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "omsk", + }, + map[string]interface{}{ + "swap_used_percent": 1.5378778193395661, + }, + time.Unix(0, 0), + 2, + ), + } + sampleProtoBufData := []uint8{67, 10, 9, 115, 119, 97, 112, 95, 102, 114, 101, 101, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 0, 0, 0, 0, 224, 36, 205, 65, 65, 10, 7, 115, 119, 97, 112, 95, 105, 110, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 0, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 26, 9, 9, 0, 0, 0, 0, 0, 0, 63, 65, 66, 10, 8, 115, 119, 97, 112, 95, 111, 117, 116, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 0, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 26, 9, 9, 0, 0, 0, 0, 0, 30, 110, 65, 68, 10, 10, 115, 119, 97, 112, 95, 116, 111, 116, 97, 108, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 0, 0, 0, 0, 104, 153, 205, 65, 67, 10, 9, 115, 119, 97, 112, 95, 117, 115, 101, 100, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 0, 0, 0, 0, 0, 34, 109, 65, 75, 10, 17, 115, 119, 97, 112, 95, 117, 115, 101, 100, 95, 112, 101, 114, 99, 101, 110, 116, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 109, 234, 180, 197, 37, 155, 248, 63} + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited") + w.Write(sampleProtoBufData) + })) + defer ts.Close() + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatalf("unable to create new request '%s': %s", ts.URL, err) + } + var resp *http.Response + resp, err = uClient.Do(req) + if err != nil { + t.Fatalf("error making HTTP request to %s: %s", ts.URL, err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading body: %s", err) + } + parser := Parser{Header: resp.Header} + metrics, err := parser.Parse(body) + if err != nil { + t.Fatalf("error reading metrics for %s: %s", ts.URL, err) + } + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} From 910b7268763c74849f1574cbc305c7a53b358b49 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Thu, 7 Jan 2021 11:39:12 -0500 Subject: [PATCH 166/761] Optimize SeriesGrouper & aggregators.merge (#8391) The previous implementation of SeriesGrouper required breaking a metric object apart into its constituents, converting tags and keys into unoptimized maps, only to have it put them back together into another metric object. This resulted in a significant performance overhead. This overhead was further compounded when the number of fields was large. This change adds a new AddMetric method to SeriesGrouper which preserves the metric object and removes the back-and-forth conversion. Additionlly the method used for calculating the metric's hash was switched to use maphash, which is optimized for this case. ---- Benchmarks Before: BenchmarkMergeOne-16 106012 11790 ns/op BenchmarkMergeTwo-16 48529 24819 ns/op BenchmarkGroupID-16 780018 1608 ns/op After: BenchmarkMergeOne-16 907093 1173 ns/op BenchmarkMergeTwo-16 508321 2168 ns/op BenchmarkGroupID-16 11217788 99.4 ns/op --- metric/series_grouper.go | 71 +++++++++++++++++-------- metric/series_grouper_test.go | 37 +++++++++++++ plugins/aggregators/merge/merge.go | 8 +-- plugins/aggregators/merge/merge_test.go | 69 +++++++++++++++++++++++- 4 files changed, 154 insertions(+), 31 deletions(-) create mode 100644 metric/series_grouper_test.go diff --git a/metric/series_grouper.go b/metric/series_grouper.go index 5dc66e11b8e00..c6ba23793d478 100644 --- a/metric/series_grouper.go +++ b/metric/series_grouper.go @@ -1,10 +1,9 @@ package metric import ( - "hash/fnv" - "io" + "encoding/binary" + "hash/maphash" "sort" - "strconv" "time" "github.com/influxdata/telegraf" @@ -23,14 +22,17 @@ import ( // + cpu,host=localhost idle_time=42,usage_time=42 func NewSeriesGrouper() *SeriesGrouper { return &SeriesGrouper{ - metrics: make(map[uint64]telegraf.Metric), - ordered: []telegraf.Metric{}, + metrics: make(map[uint64]telegraf.Metric), + ordered: []telegraf.Metric{}, + hashSeed: maphash.MakeSeed(), } } type SeriesGrouper struct { metrics map[uint64]telegraf.Metric ordered []telegraf.Metric + + hashSeed maphash.Seed } // Add adds a field key and value to the series. @@ -41,8 +43,15 @@ func (g *SeriesGrouper) Add( field string, fieldValue interface{}, ) error { + taglist := make([]*telegraf.Tag, 0, len(tags)) + for k, v := range tags { + taglist = append(taglist, + &telegraf.Tag{Key: k, Value: v}) + } + sort.Slice(taglist, func(i, j int) bool { return taglist[i].Key < taglist[j].Key }) + var err error - id := groupID(measurement, tags, tm) + id := groupID(g.hashSeed, measurement, taglist, tm) metric := g.metrics[id] if metric == nil { metric, err = New(measurement, tags, map[string]interface{}{field: fieldValue}, tm) @@ -57,30 +66,46 @@ func (g *SeriesGrouper) Add( return nil } +// AddMetric adds a metric to the series, merging with any previous matching metrics. +func (g *SeriesGrouper) AddMetric( + metric telegraf.Metric, +) { + id := groupID(g.hashSeed, metric.Name(), metric.TagList(), metric.Time()) + m := g.metrics[id] + if m == nil { + m = metric.Copy() + g.metrics[id] = m + g.ordered = append(g.ordered, m) + } else { + for _, f := range metric.FieldList() { + m.AddField(f.Key, f.Value) + } + } +} + // Metrics returns the metrics grouped by series and time. func (g *SeriesGrouper) Metrics() []telegraf.Metric { return g.ordered } -func groupID(measurement string, tags map[string]string, tm time.Time) uint64 { - h := fnv.New64a() - h.Write([]byte(measurement)) - h.Write([]byte("\n")) +func groupID(seed maphash.Seed, measurement string, taglist []*telegraf.Tag, tm time.Time) uint64 { + var mh maphash.Hash + mh.SetSeed(seed) + + mh.WriteString(measurement) + mh.WriteByte(0) - taglist := make([]*telegraf.Tag, 0, len(tags)) - for k, v := range tags { - taglist = append(taglist, - &telegraf.Tag{Key: k, Value: v}) - } - sort.Slice(taglist, func(i, j int) bool { return taglist[i].Key < taglist[j].Key }) for _, tag := range taglist { - h.Write([]byte(tag.Key)) - h.Write([]byte("\n")) - h.Write([]byte(tag.Value)) - h.Write([]byte("\n")) + mh.WriteString(tag.Key) + mh.WriteByte(0) + mh.WriteString(tag.Value) + mh.WriteByte(0) } - h.Write([]byte("\n")) + mh.WriteByte(0) + + var tsBuf [8]byte + binary.BigEndian.PutUint64(tsBuf[:], uint64(tm.UnixNano())) + mh.Write(tsBuf[:]) - io.WriteString(h, strconv.FormatInt(tm.UnixNano(), 10)) - return h.Sum64() + return mh.Sum64() } diff --git a/metric/series_grouper_test.go b/metric/series_grouper_test.go new file mode 100644 index 0000000000000..32fbecb6e41b2 --- /dev/null +++ b/metric/series_grouper_test.go @@ -0,0 +1,37 @@ +package metric + +import ( + "hash/maphash" + "testing" + "time" +) + +var m, _ = New( + "mymetric", + map[string]string{ + "host": "host.example.com", + "mykey": "myvalue", + "another key": "another value", + }, + map[string]interface{}{ + "f1": 1, + "f2": 2, + "f3": 3, + "f4": 4, + "f5": 5, + "f6": 6, + "f7": 7, + "f8": 8, + }, + time.Now(), +) + +var result uint64 + +var hashSeed = maphash.MakeSeed() + +func BenchmarkGroupID(b *testing.B) { + for n := 0; n < b.N; n++ { + result = groupID(hashSeed, m.Name(), m.TagList(), m.Time()) + } +} diff --git a/plugins/aggregators/merge/merge.go b/plugins/aggregators/merge/merge.go index 083c8fd3e6b0a..35be286d3bc01 100644 --- a/plugins/aggregators/merge/merge.go +++ b/plugins/aggregators/merge/merge.go @@ -36,13 +36,7 @@ func (a *Merge) SampleConfig() string { } func (a *Merge) Add(m telegraf.Metric) { - tags := m.Tags() - for _, field := range m.FieldList() { - err := a.grouper.Add(m.Name(), tags, m.Time(), field.Key, field.Value) - if err != nil { - a.log.Errorf("Error adding metric: %v", err) - } - } + a.grouper.AddMetric(m) } func (a *Merge) Push(acc telegraf.Accumulator) { diff --git a/plugins/aggregators/merge/merge_test.go b/plugins/aggregators/merge/merge_test.go index 2f2703c8f4b7c..552c8618e3482 100644 --- a/plugins/aggregators/merge/merge_test.go +++ b/plugins/aggregators/merge/merge_test.go @@ -4,9 +4,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestSimple(t *testing.T) { @@ -184,3 +186,68 @@ func TestReset(t *testing.T) { testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) } + +var m1, _ = metric.New( + "mymetric", + map[string]string{ + "host": "host.example.com", + "mykey": "myvalue", + "another key": "another value", + }, + map[string]interface{}{ + "f1": 1, + "f2": 2, + "f3": 3, + "f4": 4, + "f5": 5, + "f6": 6, + "f7": 7, + "f8": 8, + }, + time.Now(), +) +var m2, _ = metric.New( + "mymetric", + map[string]string{ + "host": "host.example.com", + "mykey": "myvalue", + "another key": "another value", + }, + map[string]interface{}{ + "f8": 8, + "f9": 9, + "f10": 10, + "f11": 11, + "f12": 12, + "f13": 13, + "f14": 14, + "f15": 15, + "f16": 16, + }, + m1.Time(), +) + +func BenchmarkMergeOne(b *testing.B) { + var merger Merge + merger.Init() + var acc testutil.NopAccumulator + + for n := 0; n < b.N; n++ { + merger.Reset() + merger.Add(m1) + merger.Push(&acc) + } +} + +func BenchmarkMergeTwo(b *testing.B) { + var merger Merge + merger.Init() + var acc testutil.NopAccumulator + + for n := 0; n < b.N; n++ { + merger.Reset() + merger.Add(m1) + merger.Add(m2) + merger.Push(&acc) + } +} From 8b4fb2b75ec58b6cdeba97aaa73d4e7373052334 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 8 Jan 2021 00:07:24 -0500 Subject: [PATCH 167/761] add todo note about wavefront dependencies --- plugins/serializers/wavefront/wavefront.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/serializers/wavefront/wavefront.go b/plugins/serializers/wavefront/wavefront.go index 67fa1ae3a6834..2538d402298de 100755 --- a/plugins/serializers/wavefront/wavefront.go +++ b/plugins/serializers/wavefront/wavefront.go @@ -7,7 +7,7 @@ import ( "sync" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/outputs/wavefront" + "github.com/influxdata/telegraf/plugins/outputs/wavefront" // TODO: this dependency is going the wrong way: Move MetricPoint into the serializer. ) // WavefrontSerializer : WavefrontSerializer struct From f31203b4b211203f0baf3634852f87ad28d4e612 Mon Sep 17 00:00:00 2001 From: Nathan Ferch Date: Fri, 8 Jan 2021 13:06:59 -0500 Subject: [PATCH 168/761] Add Beat input plugin (#6653) This plugin is known to work with Kafkabeat and Filebeat, and will likely work with other Beat instances that have a similar HTTP API. It is based on work done by @dmitryilyin. Co-authored-by: Dmitry Ilyin --- plugins/inputs/all/all.go | 1 + plugins/inputs/beat/README.md | 143 ++++++++++++++++ plugins/inputs/beat/beat.go | 234 +++++++++++++++++++++++++++ plugins/inputs/beat/beat6_info.json | 7 + plugins/inputs/beat/beat6_stats.json | 137 ++++++++++++++++ plugins/inputs/beat/beat_test.go | 228 ++++++++++++++++++++++++++ 6 files changed, 750 insertions(+) create mode 100644 plugins/inputs/beat/README.md create mode 100644 plugins/inputs/beat/beat.go create mode 100644 plugins/inputs/beat/beat6_info.json create mode 100644 plugins/inputs/beat/beat6_stats.json create mode 100644 plugins/inputs/beat/beat_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 6ad302d668e47..e732f2871f0ee 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -10,6 +10,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/azure_storage_queue" _ "github.com/influxdata/telegraf/plugins/inputs/bcache" _ "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" + _ "github.com/influxdata/telegraf/plugins/inputs/beat" _ "github.com/influxdata/telegraf/plugins/inputs/bind" _ "github.com/influxdata/telegraf/plugins/inputs/bond" _ "github.com/influxdata/telegraf/plugins/inputs/burrow" diff --git a/plugins/inputs/beat/README.md b/plugins/inputs/beat/README.md new file mode 100644 index 0000000000000..113187acda585 --- /dev/null +++ b/plugins/inputs/beat/README.md @@ -0,0 +1,143 @@ +# Beat Plugin +The Beat plugin will collect metrics from the given Beat instances. It is +known to work with Filebeat and Kafkabeat. +### Configuration: +```toml + ## An URL from which to read beat-formatted JSON + ## Default is "http://127.0.0.1:5066". + url = "http://127.0.0.1:5066" + + ## Enable collection of the listed stats + ## An empty list means collect all. Available options are currently + ## "beat", "libbeat", "system" and "filebeat". + # include = ["beat", "libbeat", "filebeat"] + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## Override HTTP "Host" header + # host_header = "logstash.example.com" + + ## Timeout for HTTP requests + # timeout = "5s" + + ## Optional HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` +### Measurements & Fields +- **beat** + * Fields: + - cpu_system_ticks + - cpu_system_time_ms + - cpu_total_ticks + - cpu_total_time_ms + - cpu_total_value + - cpu_user_ticks + - cpu_user_time_ms + - info_uptime_ms + - memstats_gc_next + - memstats_memory_alloc + - memstats_memory_total + - memstats_rss + * Tags: + - beat_beat + - beat_host + - beat_id + - beat_name + - beat_version + +- **beat_filebeat** + * Fields: + - events_active + - events_added + - events_done + - harvester_closed + - harvester_open_files + - harvester_running + - harvester_skipped + - harvester_started + - input_log_files_renamed + - input_log_files_truncated + * Tags: + - beat_beat + - beat_host + - beat_id + - beat_name + - beat_version + +- **beat_libbeat** + * Fields: + - config_module_running + - config_module_starts + - config_module_stops + - config_reloads + - output_events_acked + - output_events_active + - output_events_batches + - output_events_dropped + - output_events_duplicates + - output_events_failed + - output_events_total + - output_type + - output_read_bytes + - output_read_errors + - output_write_bytes + - output_write_errors + - outputs_kafka_bytes_read + - outputs_kafka_bytes_write + - pipeline_clients + - pipeline_events_active + - pipeline_events_dropped + - pipeline_events_failed + - pipeline_events_filtered + - pipeline_events_published + - pipeline_events_retry + - pipeline_events_total + - pipeline_queue_acked + * Tags: + - beat_beat + - beat_host + - beat_id + - beat_name + - beat_version + +- **beat_system** + * Field: + - cpu_cores + - load_1 + - load_15 + - load_5 + - load_norm_1 + - load_norm_15 + - load_norm_5 + * Tags: + - beat_beat + - beat_host + - beat_id + - beat_name + - beat_version + +### Example Output: +``` +$ telegraf --input-filter beat --test + +> beat,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 + cpu_system_ticks=656750,cpu_system_time_ms=656750,cpu_total_ticks=5461190,cpu_total_time_ms=5461198,cpu_total_value=5461190,cpu_user_ticks=4804440,cpu_user_time_ms=4804448,info_uptime_ms=342634196,memstats_gc_next=20199584,memstats_memory_alloc=12547424,memstats_memory_total=486296424792,memstats_rss=72552448 1540316047000000000 +> beat_libbeat,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 + config_module_running=0,config_module_starts=0,config_module_stops=0,config_reloads=0,output_events_acked=192404,output_events_active=0,output_events_batches=1607,output_events_dropped=0,output_events_duplicates=0,output_events_failed=0,output_events_total=192404,output_read_bytes=0,output_read_errors=0,output_write_bytes=0,output_write_errors=0,outputs_kafka_bytes_read=1118528,outputs_kafka_bytes_write=48002014,pipeline_clients=1,pipeline_events_active=0,pipeline_events_dropped=0,pipeline_events_failed=0,pipeline_events_filtered=11496,pipeline_events_published=192404,pipeline_events_retry=14,pipeline_events_total=203900,pipeline_queue_acked=192404 1540316047000000000 +> beat_system,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 + cpu_cores=32,load_1=46.08,load_15=49.82,load_5=47.88,load_norm_1=1.44,load_norm_15=1.5569,load_norm_5=1.4963 1540316047000000000 +> beat_filebeat,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 + events_active=0,events_added=3223,events_done=3223,harvester_closed=0,harvester_open_files=0,harvester_running=0,harvester_skipped=0,harvester_started=0,input_log_files_renamed=0,input_log_files_truncated=0 1540320286000000000 +``` diff --git a/plugins/inputs/beat/beat.go b/plugins/inputs/beat/beat.go new file mode 100644 index 0000000000000..017b4c27e340a --- /dev/null +++ b/plugins/inputs/beat/beat.go @@ -0,0 +1,234 @@ +package beat + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" + + jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" +) + +const sampleConfig = ` + ## An URL from which to read Beat-formatted JSON + ## Default is "http://127.0.0.1:5066". + url = "http://127.0.0.1:5066" + + ## Enable collection of the listed stats + ## An empty list means collect all. Available options are currently + ## "beat", "libbeat", "system" and "filebeat". + # include = ["beat", "libbeat", "filebeat"] + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## Override HTTP "Host" header + # host_header = "logstash.example.com" + + ## Timeout for HTTP requests + # timeout = "5s" + + ## Optional HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +const description = "Read metrics exposed by Beat" + +const suffixInfo = "/" +const suffixStats = "/stats" + +type BeatInfo struct { + Beat string `json:"beat"` + Hostname string `json:"hostname"` + Name string `json:"name"` + UUID string `json:"uuid"` + Version string `json:"version"` +} + +type BeatStats struct { + Beat map[string]interface{} `json:"beat"` + FileBeat interface{} `json:"filebeat"` + Libbeat interface{} `json:"libbeat"` + System interface{} `json:"system"` +} + +type Beat struct { + URL string `toml:"url"` + + Includes []string `toml:"include"` + + Username string `toml:"username"` + Password string `toml:"password"` + Method string `toml:"method"` + Headers map[string]string `toml:"headers"` + HostHeader string `toml:"host_header"` + Timeout internal.Duration `toml:"timeout"` + + tls.ClientConfig + client *http.Client +} + +func NewBeat() *Beat { + return &Beat{ + URL: "http://127.0.0.1:5066", + Includes: []string{"beat", "libbeat", "filebeat"}, + Method: "GET", + Headers: make(map[string]string), + Timeout: internal.Duration{Duration: time.Second * 5}, + } +} + +func (beat *Beat) Init() error { + availableStats := []string{"beat", "libbeat", "system", "filebeat"} + + var err error + beat.client, err = beat.createHTTPClient() + + if err != nil { + return err + } + + err = choice.CheckSlice(beat.Includes, availableStats) + if err != nil { + return err + } + + return nil +} + +func (beat *Beat) Description() string { + return description +} + +func (beat *Beat) SampleConfig() string { + return sampleConfig +} + +// createHTTPClient create a clients to access API +func (beat *Beat) createHTTPClient() (*http.Client, error) { + tlsConfig, err := beat.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + Timeout: beat.Timeout.Duration, + } + + return client, nil +} + +// gatherJSONData query the data source and parse the response JSON +func (beat *Beat) gatherJSONData(url string, value interface{}) error { + request, err := http.NewRequest(beat.Method, url, nil) + if err != nil { + return err + } + + if beat.Username != "" { + request.SetBasicAuth(beat.Username, beat.Password) + } + for k, v := range beat.Headers { + request.Header.Add(k, v) + } + if beat.HostHeader != "" { + request.Host = beat.HostHeader + } + + response, err := beat.client.Do(request) + if err != nil { + return err + } + + defer response.Body.Close() + + return json.NewDecoder(response.Body).Decode(value) +} + +func (beat *Beat) Gather(accumulator telegraf.Accumulator) error { + beatStats := &BeatStats{} + beatInfo := &BeatInfo{} + + infoUrl, err := url.Parse(beat.URL + suffixInfo) + if err != nil { + return err + } + statsUrl, err := url.Parse(beat.URL + suffixStats) + if err != nil { + return err + } + + err = beat.gatherJSONData(infoUrl.String(), beatInfo) + if err != nil { + return err + } + tags := map[string]string{ + "beat_beat": beatInfo.Beat, + "beat_id": beatInfo.UUID, + "beat_name": beatInfo.Name, + "beat_host": beatInfo.Hostname, + "beat_version": beatInfo.Version, + } + + err = beat.gatherJSONData(statsUrl.String(), beatStats) + if err != nil { + return err + } + + for _, name := range beat.Includes { + var stats interface{} + var metric string + + switch name { + case "beat": + stats = beatStats.Beat + metric = "beat" + case "filebeat": + stats = beatStats.FileBeat + metric = "beat_filebeat" + case "system": + stats = beatStats.System + metric = "beat_system" + case "libbeat": + stats = beatStats.Libbeat + metric = "beat_libbeat" + default: + return fmt.Errorf("unknown stats-type %q", name) + } + flattener := jsonparser.JSONFlattener{} + err := flattener.FullFlattenJSON("", stats, true, true) + if err != nil { + return err + } + accumulator.AddFields(metric, flattener.Fields, tags) + } + + return nil +} + +func init() { + inputs.Add("beat", func() telegraf.Input { + return NewBeat() + }) +} diff --git a/plugins/inputs/beat/beat6_info.json b/plugins/inputs/beat/beat6_info.json new file mode 100644 index 0000000000000..3cc318c330447 --- /dev/null +++ b/plugins/inputs/beat/beat6_info.json @@ -0,0 +1,7 @@ +{ + "beat": "filebeat", + "hostname": "node-6", + "name": "node-6-test", + "uuid": "9c1c8697-acb4-4df0-987d-28197814f785", + "version": "6.4.2" +} diff --git a/plugins/inputs/beat/beat6_stats.json b/plugins/inputs/beat/beat6_stats.json new file mode 100644 index 0000000000000..f34b9d1f06d1e --- /dev/null +++ b/plugins/inputs/beat/beat6_stats.json @@ -0,0 +1,137 @@ +{ + "beat": { + "cpu": { + "system": { + "ticks": 626970, + "time": { + "ms": 626972 + } + }, + "total": { + "ticks": 5215010, + "time": { + "ms": 5215018 + }, + "value": 5215010 + }, + "user": { + "ticks": 4588040, + "time": { + "ms": 4588046 + } + } + }, + "info": { + "ephemeral_id": "809e3b63-4fa0-4f74-822a-8e3c08298336", + "uptime": { + "ms": 327248661 + } + }, + "memstats": { + "gc_next": 20611808, + "memory_alloc": 12692544, + "memory_total": 462910102088, + "rss": 80273408 + } + }, + "filebeat": { + "events": { + "active": 0, + "added": 182990, + "done": 182990 + }, + "harvester": { + "closed": 2222, + "open_files": 4, + "running": 4, + "skipped": 0, + "started": 2226 + }, + "input": { + "log": { + "files": { + "renamed": 0, + "truncated": 0 + } + } + } + }, + "libbeat": { + "config": { + "module": { + "running": 0, + "starts": 0, + "stops": 0 + }, + "reloads": 0 + }, + "output": { + "events": { + "acked": 172067, + "active": 0, + "batches": 1490, + "dropped": 0, + "duplicates": 0, + "failed": 0, + "total": 172067 + }, + "read": { + "bytes": 0, + "errors": 0 + }, + "type": "kafka", + "write": { + "bytes": 0, + "errors": 0 + } + }, + "outputs": { + "kafka": { + "bytes_read": 1048670, + "bytes_write": 43136887 + } + }, + "pipeline": { + "clients": 1, + "events": { + "active": 0, + "dropped": 0, + "failed": 0, + "filtered": 10923, + "published": 172067, + "retry": 14, + "total": 182990 + }, + "queue": { + "acked": 172067 + } + } + }, + "registrar": { + "states": { + "cleanup": 3446, + "current": 16409, + "update": 182990 + }, + "writes": { + "fail": 0, + "success": 11718, + "total": 11718 + } + }, + "system": { + "cpu": { + "cores": 32 + }, + "load": { + "1": 32.49, + "15": 41.9, + "5": 40.16, + "norm": { + "1": 1.0153, + "15": 1.3094, + "5": 1.255 + } + } + } +} diff --git a/plugins/inputs/beat/beat_test.go b/plugins/inputs/beat/beat_test.go new file mode 100644 index 0000000000000..30dd48569f3a6 --- /dev/null +++ b/plugins/inputs/beat/beat_test.go @@ -0,0 +1,228 @@ +package beat + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func Test_BeatStats(test *testing.T) { + var beat6StatsAccumulator testutil.Accumulator + var beatTest = NewBeat() + // System stats are disabled by default + beatTest.Includes = []string{"beat", "libbeat", "system", "filebeat"} + err := beatTest.Init() + if err != nil { + panic(fmt.Sprintf("could not init beat: %s", err)) + } + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, request *http.Request) { + var jsonFilePath string + + switch request.URL.Path { + case suffixInfo: + jsonFilePath = "beat6_info.json" + case suffixStats: + jsonFilePath = "beat6_stats.json" + default: + panic("Cannot handle request") + } + + data, err := ioutil.ReadFile(jsonFilePath) + + if err != nil { + panic(fmt.Sprintf("could not read from data file %s", jsonFilePath)) + } + w.Write(data) + })) + requestURL, err := url.Parse(beatTest.URL) + if err != nil { + test.Logf("Can't parse URL %s", beatTest.URL) + } + fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + if err != nil { + test.Logf("Can't listen for %s: %v", requestURL, err) + } + + fakeServer.Start() + defer fakeServer.Close() + + err = beatTest.Gather(&beat6StatsAccumulator) + if err != nil { + test.Logf("Can't gather stats") + } + + beat6StatsAccumulator.AssertContainsTaggedFields( + test, + "beat", + map[string]interface{}{ + "cpu_system_ticks": float64(626970), + "cpu_system_time_ms": float64(626972), + "cpu_total_ticks": float64(5215010), + "cpu_total_time_ms": float64(5215018), + "cpu_total_value": float64(5215010), + "cpu_user_ticks": float64(4588040), + "cpu_user_time_ms": float64(4588046), + "info_uptime_ms": float64(327248661), + "info_ephemeral_id": "809e3b63-4fa0-4f74-822a-8e3c08298336", + "memstats_gc_next": float64(20611808), + "memstats_memory_alloc": float64(12692544), + "memstats_memory_total": float64(462910102088), + "memstats_rss": float64(80273408), + }, + map[string]string{ + "beat_beat": string("filebeat"), + "beat_host": string("node-6"), + "beat_id": string("9c1c8697-acb4-4df0-987d-28197814f785"), + "beat_name": string("node-6-test"), + "beat_version": string("6.4.2"), + }, + ) + beat6StatsAccumulator.AssertContainsTaggedFields( + test, + "beat_filebeat", + map[string]interface{}{ + "events_active": float64(0), + "events_added": float64(182990), + "events_done": float64(182990), + "harvester_closed": float64(2222), + "harvester_open_files": float64(4), + "harvester_running": float64(4), + "harvester_skipped": float64(0), + "harvester_started": float64(2226), + "input_log_files_renamed": float64(0), + "input_log_files_truncated": float64(0), + }, + map[string]string{ + "beat_beat": string("filebeat"), + "beat_host": string("node-6"), + "beat_id": string("9c1c8697-acb4-4df0-987d-28197814f785"), + "beat_name": string("node-6-test"), + "beat_version": string("6.4.2"), + }, + ) + beat6StatsAccumulator.AssertContainsTaggedFields( + test, + "beat_libbeat", + map[string]interface{}{ + "config_module_running": float64(0), + "config_module_starts": float64(0), + "config_module_stops": float64(0), + "config_reloads": float64(0), + "output_type": "kafka", + "output_events_acked": float64(172067), + "output_events_active": float64(0), + "output_events_batches": float64(1490), + "output_events_dropped": float64(0), + "output_events_duplicates": float64(0), + "output_events_failed": float64(0), + "output_events_total": float64(172067), + "output_read_bytes": float64(0), + "output_read_errors": float64(0), + "output_write_bytes": float64(0), + "output_write_errors": float64(0), + "outputs_kafka_bytes_read": float64(1048670), + "outputs_kafka_bytes_write": float64(43136887), + "pipeline_clients": float64(1), + "pipeline_events_active": float64(0), + "pipeline_events_dropped": float64(0), + "pipeline_events_failed": float64(0), + "pipeline_events_filtered": float64(10923), + "pipeline_events_published": float64(172067), + "pipeline_events_retry": float64(14), + "pipeline_events_total": float64(182990), + "pipeline_queue_acked": float64(172067), + }, + map[string]string{ + "beat_beat": string("filebeat"), + "beat_host": string("node-6"), + "beat_id": string("9c1c8697-acb4-4df0-987d-28197814f785"), + "beat_name": string("node-6-test"), + "beat_version": string("6.4.2"), + }, + ) + beat6StatsAccumulator.AssertContainsTaggedFields( + test, + "beat_system", + map[string]interface{}{ + "cpu_cores": float64(32), + "load_1": float64(32.49), + "load_15": float64(41.9), + "load_5": float64(40.16), + "load_norm_1": float64(1.0153), + "load_norm_15": float64(1.3094), + "load_norm_5": float64(1.255), + }, + map[string]string{ + "beat_beat": string("filebeat"), + "beat_host": string("node-6"), + "beat_id": string("9c1c8697-acb4-4df0-987d-28197814f785"), + "beat_name": string("node-6-test"), + "beat_version": string("6.4.2"), + }, + ) +} + +func Test_BeatRequest(test *testing.T) { + var beat6StatsAccumulator testutil.Accumulator + beatTest := NewBeat() + // System stats are disabled by default + beatTest.Includes = []string{"beat", "libbeat", "system", "filebeat"} + err := beatTest.Init() + if err != nil { + panic(fmt.Sprintf("could not init beat: %s", err)) + } + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, request *http.Request) { + var jsonFilePath string + + switch request.URL.Path { + case suffixInfo: + jsonFilePath = "beat6_info.json" + case suffixStats: + jsonFilePath = "beat6_stats.json" + default: + panic("Cannot handle request") + } + + data, err := ioutil.ReadFile(jsonFilePath) + + if err != nil { + panic(fmt.Sprintf("could not read from data file %s", jsonFilePath)) + } + assert.Equal(test, request.Host, "beat.test.local") + assert.Equal(test, request.Method, "POST") + assert.Equal(test, request.Header.Get("Authorization"), "Basic YWRtaW46UFdE") + assert.Equal(test, request.Header.Get("X-Test"), "test-value") + + w.Write(data) + })) + + requestURL, err := url.Parse(beatTest.URL) + if err != nil { + test.Logf("Can't parse URL %s", beatTest.URL) + } + fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + if err != nil { + test.Logf("Can't listen for %s: %v", requestURL, err) + } + fakeServer.Start() + defer fakeServer.Close() + + beatTest.Headers["X-Test"] = "test-value" + beatTest.HostHeader = "beat.test.local" + beatTest.Method = "POST" + beatTest.Username = "admin" + beatTest.Password = "PWD" + + err = beatTest.Gather(&beat6StatsAccumulator) + if err != nil { + test.Logf("Can't gather stats") + } + +} From 9814d3d84d30bdd43207cbdb41a06e66bfcd9093 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 8 Jan 2021 17:30:43 -0500 Subject: [PATCH 169/761] fix some annoying tests due to ports in use --- plugins/inputs/redfish/redfish_test.go | 18 --------------- plugins/inputs/statsd/statsd_test.go | 4 ++-- plugins/outputs/graphite/graphite_test.go | 28 +++++++++++++++-------- 3 files changed, 20 insertions(+), 30 deletions(-) diff --git a/plugins/inputs/redfish/redfish_test.go b/plugins/inputs/redfish/redfish_test.go index c3c6f0d104719..6bd28214840b8 100644 --- a/plugins/inputs/redfish/redfish_test.go +++ b/plugins/inputs/redfish/redfish_test.go @@ -8,7 +8,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -644,23 +643,6 @@ func checkAuth(r *http.Request, username, password string) bool { return user == username && pass == password } -func TestConnection(t *testing.T) { - r := &Redfish{ - Address: "http://127.0.0.1", - Username: "test", - Password: "test", - ComputerSystemId: "System.Embedded.1", - } - - var acc testutil.Accumulator - r.Init() - err := r.Gather(&acc) - if assert.Error(t, err) { - _, ok := err.(*url.Error) - assert.True(t, ok) - } -} - func TestInvalidUsernameorPassword(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index fd3b49b9203f0..4a129266deebc 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -1766,14 +1766,14 @@ func TestUdp(t *testing.T) { statsd := Statsd{ Log: testutil.Logger{}, Protocol: "udp", - ServiceAddress: "localhost:8125", + ServiceAddress: "localhost:14223", AllowedPendingMessages: 250000, } var acc testutil.Accumulator require.NoError(t, statsd.Start(&acc)) defer statsd.Stop() - conn, err := net.Dial("udp", "127.0.0.1:8125") + conn, err := net.Dial("udp", "127.0.0.1:14223") _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) require.NoError(t, err) err = conn.Close() diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go index 82aad0d7d6ee6..025ee23ec1679 100644 --- a/plugins/outputs/graphite/graphite_test.go +++ b/plugins/outputs/graphite/graphite_test.go @@ -18,7 +18,7 @@ import ( func TestGraphiteError(t *testing.T) { // Init plugin g := Graphite{ - Servers: []string{"127.0.0.1:2003", "127.0.0.1:12003"}, + Servers: []string{"127.0.0.1:12004", "127.0.0.1:12003"}, Prefix: "my.prefix", } // Init metrics @@ -48,7 +48,8 @@ func TestGraphiteOK(t *testing.T) { // Init plugin g := Graphite{ - Prefix: "my.prefix", + Prefix: "my.prefix", + Servers: []string{"localhost:12003"}, } // Init metrics @@ -109,6 +110,7 @@ func TestGraphiteOkWithSeparatorDot(t *testing.T) { g := Graphite{ Prefix: "my.prefix", GraphiteSeparator: ".", + Servers: []string{"localhost:12003"}, } // Init metrics @@ -169,6 +171,7 @@ func TestGraphiteOkWithSeparatorUnderscore(t *testing.T) { g := Graphite{ Prefix: "my.prefix", GraphiteSeparator: "_", + Servers: []string{"localhost:12003"}, } // Init metrics @@ -233,6 +236,7 @@ func TestGraphiteOKWithMultipleTemplates(t *testing.T) { "my_* host.measurement.tags.field", "measurement.tags.host.field", }, + Servers: []string{"localhost:12003"}, } // Init metrics @@ -293,6 +297,7 @@ func TestGraphiteOkWithTags(t *testing.T) { g := Graphite{ Prefix: "my.prefix", GraphiteTagSupport: true, + Servers: []string{"localhost:12003"}, } // Init metrics @@ -354,6 +359,7 @@ func TestGraphiteOkWithTagsAndSeparatorDot(t *testing.T) { Prefix: "my.prefix", GraphiteTagSupport: true, GraphiteSeparator: ".", + Servers: []string{"localhost:12003"}, } // Init metrics @@ -415,6 +421,7 @@ func TestGraphiteOkWithTagsAndSeparatorUnderscore(t *testing.T) { Prefix: "my_prefix", GraphiteTagSupport: true, GraphiteSeparator: "_", + Servers: []string{"localhost:12003"}, } // Init metrics @@ -465,7 +472,8 @@ func TestGraphiteOkWithTagsAndSeparatorUnderscore(t *testing.T) { } func TCPServer1(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, err := net.Listen("tcp", "127.0.0.1:12003") + require.NoError(t, err) go func() { defer wg.Done() conn, _ := (tcpServer).Accept() @@ -479,7 +487,7 @@ func TCPServer1(t *testing.T, wg *sync.WaitGroup) { } func TCPServer2(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn2, _ := (tcpServer).Accept() @@ -495,7 +503,7 @@ func TCPServer2(t *testing.T, wg *sync.WaitGroup) { } func TCPServer1WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn, _ := (tcpServer).Accept() @@ -509,7 +517,7 @@ func TCPServer1WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { } func TCPServer2WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn2, _ := (tcpServer).Accept() @@ -525,7 +533,7 @@ func TCPServer2WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { } func TCPServer1WithTags(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn, _ := (tcpServer).Accept() @@ -539,7 +547,7 @@ func TCPServer1WithTags(t *testing.T, wg *sync.WaitGroup) { } func TCPServer2WithTags(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn2, _ := (tcpServer).Accept() @@ -555,7 +563,7 @@ func TCPServer2WithTags(t *testing.T, wg *sync.WaitGroup) { } func TCPServer1WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn, _ := (tcpServer).Accept() @@ -569,7 +577,7 @@ func TCPServer1WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) { } func TCPServer2WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) { - tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + tcpServer, _ := net.Listen("tcp", "127.0.0.1:12003") go func() { defer wg.Done() conn2, _ := (tcpServer).Accept() From 3531e9ddc6e00f8b4fc8ee7cc6ef12159728a991 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 11 Jan 2021 10:53:17 -0600 Subject: [PATCH 170/761] Removing .lgtm.yml (#8664) * Removed LGTM * Empty build.py to remove false python alerts --- .lgtm.yml | 2 -- scripts/build.py | 0 2 files changed, 2 deletions(-) delete mode 100644 .lgtm.yml create mode 100644 scripts/build.py diff --git a/.lgtm.yml b/.lgtm.yml deleted file mode 100644 index dbbf05d492ef9..0000000000000 --- a/.lgtm.yml +++ /dev/null @@ -1,2 +0,0 @@ -queries: - - exclude: py/* diff --git a/scripts/build.py b/scripts/build.py new file mode 100644 index 0000000000000..e69de29bb2d1d From 3b87438deae5f42342a725b8e7232c2e6dcd8569 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Mon, 11 Jan 2021 13:53:06 -0500 Subject: [PATCH 171/761] Added ability to define skip values in csv parser (#8627) --- config/config.go | 3 +- plugins/parsers/csv/README.md | 4 +++ plugins/parsers/csv/parser.go | 8 +++++ plugins/parsers/csv/parser_test.go | 54 ++++++++++++++++++++++++++++++ plugins/parsers/registry.go | 2 ++ 5 files changed, 70 insertions(+), 1 deletion(-) diff --git a/config/config.go b/config/config.go index f86692835afb4..b395f7df1c725 100644 --- a/config/config.go +++ b/config/config.go @@ -1320,6 +1320,7 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, c.getFieldInt(tbl, "csv_skip_rows", &pc.CSVSkipRows) c.getFieldInt(tbl, "csv_skip_columns", &pc.CSVSkipColumns) c.getFieldBool(tbl, "csv_trim_space", &pc.CSVTrimSpace) + c.getFieldStringSlice(tbl, "csv_skip_values", &pc.CSVSkipValues) c.getFieldStringSlice(tbl, "form_urlencoded_tag_keys", &pc.FormUrlencodedTagKeys) @@ -1413,7 +1414,7 @@ func (c *Config) missingTomlField(typ reflect.Type, key string) error { "collectd_security_level", "collectd_typesdb", "collection_jitter", "csv_column_names", "csv_column_types", "csv_comment", "csv_delimiter", "csv_header_row_count", "csv_measurement_column", "csv_skip_columns", "csv_skip_rows", "csv_tag_columns", - "csv_timestamp_column", "csv_timestamp_format", "csv_timezone", "csv_trim_space", + "csv_timestamp_column", "csv_timestamp_format", "csv_timezone", "csv_trim_space", "csv_skip_values", "data_format", "data_type", "delay", "drop", "drop_original", "dropwizard_metric_registry_path", "dropwizard_tag_paths", "dropwizard_tags_path", "dropwizard_time_format", "dropwizard_time_path", "fielddrop", "fieldpass", "flush_interval", "flush_jitter", "form_urlencoded_tag_keys", diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index b44d2fc2d2576..220ac60686636 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -73,6 +73,10 @@ values. ## in case of there is no timezone information. ## It follows the IANA Time Zone database. csv_timezone = "" + + ## Indicates values to skip, such as an empty string value "". + ## The field will be skipped entirely where it matches any values inserted here. + csv_skip_values = [] ``` #### csv_timestamp_column, csv_timestamp_format diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 1c3d511ef43eb..3f370b507dc4f 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -31,6 +31,7 @@ type Config struct { TimestampFormat string `toml:"csv_timestamp_format"` Timezone string `toml:"csv_timezone"` TrimSpace bool `toml:"csv_trim_space"` + SkipValues []string `toml:"csv_skip_values"` gotColumnNames bool @@ -197,6 +198,13 @@ outer: value = strings.Trim(value, " ") } + // don't record fields where the value matches a skip value + for _, s := range p.SkipValues { + if value == s { + continue outer + } + } + for _, tagName := range p.TagColumns { if tagName == fieldName { tags[tagName] = value diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index 31fd4b02a0966..f942eb0716346 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -613,3 +613,57 @@ func TestStaticMeasurementName(t *testing.T) { } testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime()) } + +func TestSkipEmptyStringValue(t *testing.T) { + p, err := NewParser( + &Config{ + MetricName: "csv", + HeaderRowCount: 1, + ColumnNames: []string{"a", "b"}, + SkipValues: []string{""}, + }, + ) + require.NoError(t, err) + testCSV := `a,b +1,""` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric("csv", + map[string]string{}, + map[string]interface{}{ + "a": 1, + }, + time.Unix(0, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime()) +} + +func TestSkipSpecifiedStringValue(t *testing.T) { + p, err := NewParser( + &Config{ + MetricName: "csv", + HeaderRowCount: 1, + ColumnNames: []string{"a", "b"}, + SkipValues: []string{"MM"}, + }, + ) + require.NoError(t, err) + testCSV := `a,b +1,MM` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric("csv", + map[string]string{}, + map[string]interface{}{ + "a": 1, + }, + time.Unix(0, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime()) +} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index ac31a374dd75d..54edf3300b612 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -146,6 +146,7 @@ type Config struct { CSVTimestampFormat string `toml:"csv_timestamp_format"` CSVTimezone string `toml:"csv_timezone"` CSVTrimSpace bool `toml:"csv_trim_space"` + CSVSkipValues []string `toml:"csv_skip_values"` // FormData configuration FormUrlencodedTagKeys []string `toml:"form_urlencoded_tag_keys"` @@ -222,6 +223,7 @@ func NewParser(config *Config) (Parser, error) { TimestampFormat: config.CSVTimestampFormat, Timezone: config.CSVTimezone, DefaultTags: config.DefaultTags, + SkipValues: config.CSVSkipValues, } return csv.NewParser(config) From 0c99ae9e1d5c32cb2da7baba87e35a5da34c02b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=B6ldi=20Tam=C3=A1s?= Date: Mon, 11 Jan 2021 19:53:44 +0100 Subject: [PATCH 172/761] Add timestamp column support to postgresql_extensible (#8602) --- .../inputs/postgresql_extensible/README.md | 5 ++++ .../postgresql_extensible.go | 30 ++++++++++++++++++- .../postgresql_extensible_test.go | 9 ++++++ 3 files changed, 43 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md index abbdd07f43d1b..70464140aedf4 100644 --- a/plugins/inputs/postgresql_extensible/README.md +++ b/plugins/inputs/postgresql_extensible/README.md @@ -52,12 +52,17 @@ The example below has two queries are specified, with the following parameters: # defined tags. The values in these columns must be of a string-type, # a number-type or a blob-type. # + # The timestamp field is used to override the data points timestamp value. By + # default, all rows inserted with current time. By setting a timestamp column, + # the row will be inserted with that column's value. + # # Structure : # [[inputs.postgresql_extensible.query]] # sqlquery string # version string # withdbname boolean # tagvalue string (coma separated) + # timestamp string [[inputs.postgresql_extensible.query]] sqlquery="SELECT * FROM pg_stat_database where datname" version=901 diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index f91feaf407d49..044ba1fc4a8ca 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "os" "strings" + "time" _ "github.com/jackc/pgx/stdlib" @@ -19,6 +20,7 @@ type Postgresql struct { postgresql.Service Databases []string AdditionalTags []string + Timestamp string Query query Debug bool @@ -32,6 +34,7 @@ type query []struct { Withdbname bool Tagvalue string Measurement string + Timestamp string } var ignoredColumns = map[string]bool{"stats_reset": true} @@ -82,6 +85,15 @@ var sampleConfig = ` ## The script option can be used to specify the .sql file path. ## If script and sqlquery options specified at same time, sqlquery will be used ## + ## the tagvalue field is used to define custom tags (separated by comas). + ## the query is expected to return columns which match the names of the + ## defined tags. The values in these columns must be of a string-type, + ## a number-type or a blob-type. + ## + ## The timestamp field is used to override the data points timestamp value. By + ## default, all rows inserted with current time. By setting a timestamp column, + ## the row will be inserted with that column's value. + ## ## Structure : ## [[inputs.postgresql_extensible.query]] ## sqlquery string @@ -89,6 +101,7 @@ var sampleConfig = ` ## withdbname boolean ## tagvalue string (comma separated) ## measurement string + ## timestamp string [[inputs.postgresql_extensible.query]] sqlquery="SELECT * FROM pg_stat_database" version=901 @@ -150,6 +163,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { query string tag_value string meas_name string + timestamp string columns []string ) @@ -164,6 +178,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { for i := range p.Query { sql_query = p.Query[i].Sqlquery tag_value = p.Query[i].Tagvalue + timestamp = p.Query[i].Timestamp if p.Query[i].Measurement != "" { meas_name = p.Query[i].Measurement @@ -206,6 +221,8 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { } } + p.Timestamp = timestamp + for rows.Next() { err = p.accRow(meas_name, rows, acc, columns) if err != nil { @@ -228,6 +245,7 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula columnVars []interface{} dbname bytes.Buffer tagAddress string + timestamp time.Time ) // this is where we'll store the column name with its *interface{} @@ -269,6 +287,9 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula "db": dbname.String(), } + // set default timestamp to Now + timestamp = time.Now() + fields := make(map[string]interface{}) COLUMN: for col, val := range columnMap { @@ -278,6 +299,13 @@ COLUMN: continue } + if col == p.Timestamp { + if v, ok := (*val).(time.Time); ok { + timestamp = v + } + continue + } + for _, tag := range p.AdditionalTags { if col != tag { continue @@ -301,7 +329,7 @@ COLUMN: fields[col] = *val } } - acc.AddFields(meas_name, fields, tags) + acc.AddFields(meas_name, fields, tags, timestamp) return nil } diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index bca009f167cf7..ac0ad05c8bd88 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "testing" + "time" "github.com/influxdata/telegraf/plugins/inputs/postgresql" "github.com/influxdata/telegraf/testutil" @@ -126,6 +127,13 @@ func TestPostgresqlQueryOutputTests(t *testing.T) { assert.True(t, found) assert.Equal(t, true, v) }, + "SELECT timestamp'1980-07-23' as ts, true AS myvalue": func(acc *testutil.Accumulator) { + expectedTime := time.Date(1980, 7, 23, 0, 0, 0, 0, time.UTC) + v, found := acc.BoolField(measurement, "myvalue") + assert.True(t, found) + assert.Equal(t, true, v) + assert.True(t, acc.HasTimestamp(measurement, expectedTime)) + }, } for q, assertions := range examples { @@ -134,6 +142,7 @@ func TestPostgresqlQueryOutputTests(t *testing.T) { Version: 901, Withdbname: false, Tagvalue: "", + Timestamp: "ts", }}) assertions(acc) } From baa658a4bbb76dbf2586f87ac2cc687cff48be02 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 12 Jan 2021 09:44:59 -0800 Subject: [PATCH 173/761] update data formats output docs (#8674) --- docs/DATA_FORMATS_OUTPUT.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index 12a301bc5f54f..0d0bdfff4bb27 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -9,9 +9,10 @@ plugins. 1. [Graphite](/plugins/serializers/graphite) 1. [JSON](/plugins/serializers/json) 1. [Prometheus](/plugins/serializers/prometheus) +1. [Prometheus Remote Write](/plugins/serializers/prometheusremotewrite) +1. [ServiceNow Metrics](/plugins/serializers/nowmetric) 1. [SplunkMetric](/plugins/serializers/splunkmetric) 1. [Wavefront](/plugins/serializers/wavefront) -1. [ServiceNow Metrics](/plugins/serializers/nowmetric) You will be able to identify the plugins with support by the presence of a `data_format` config option, for example, in the `file` output plugin: From 70d2b1f790ba9a692b872fa086a9d4c0463b2b26 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Tue, 12 Jan 2021 17:59:13 -0500 Subject: [PATCH 174/761] Procstat input plugin should use the same timestamp in all metrics in the same Gather() cycle. (#8658) --- plugins/inputs/procstat/procstat.go | 12 +++++++----- plugins/inputs/procstat/procstat_test.go | 17 +++++++++++++++++ 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index aa654da560c10..35f60342270dd 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -118,6 +118,8 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { } pids, tags, err := p.findPids(acc) + now := time.Now() + if err != nil { fields := map[string]interface{}{ "pid_count": 0, @@ -128,7 +130,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { "pid_finder": p.PidFinder, "result": "lookup_error", } - acc.AddFields("procstat_lookup", fields, tags) + acc.AddFields("procstat_lookup", fields, tags, now) return err } @@ -140,7 +142,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { p.procs = procs for _, proc := range p.procs { - p.addMetric(proc, acc) + p.addMetric(proc, acc, now) } fields := map[string]interface{}{ @@ -150,13 +152,13 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { } tags["pid_finder"] = p.PidFinder tags["result"] = "success" - acc.AddFields("procstat_lookup", fields, tags) + acc.AddFields("procstat_lookup", fields, tags, now) return nil } // Add metrics a single Process -func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { +func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time) { var prefix string if p.Prefix != "" { prefix = p.Prefix + "_" @@ -309,7 +311,7 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { } } - acc.AddFields("procstat", fields, proc.Tags()) + acc.AddFields("procstat", fields, proc.Tags(), t) } // Update monitored Processes diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index e1ee8ab921841..9836feaec8b89 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -402,3 +402,20 @@ func TestProcstatLookupMetric(t *testing.T) { require.NoError(t, err) require.Equal(t, len(p.procs)+1, len(acc.Metrics)) } + +func TestGather_SameTimestamps(t *testing.T) { + var acc testutil.Accumulator + pidfile := "/path/to/pidfile" + + p := Procstat{ + PidFile: pidfile, + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: newTestProc, + } + require.NoError(t, acc.GatherError(p.Gather)) + + procstat, _ := acc.Get("procstat") + procstat_lookup, _ := acc.Get("procstat_lookup") + + require.Equal(t, procstat.Time, procstat_lookup.Time) +} From d9f237759dcd775bcf90c045d910f0ce2d54c6ce Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Tue, 12 Jan 2021 17:59:42 -0500 Subject: [PATCH 175/761] Use the 'measurement' json field from the particle webhook as the measurment name, or if it's blank, use the 'name' field of the event's json. (#8609) --- .../webhooks/particle/particle_webhooks.go | 10 +++- .../particle/particle_webhooks_test.go | 46 ++++++++++++++++++- 2 files changed, 53 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/webhooks/particle/particle_webhooks.go b/plugins/inputs/webhooks/particle/particle_webhooks.go index aa3499935f49a..ad93ea7c56477 100644 --- a/plugins/inputs/webhooks/particle/particle_webhooks.go +++ b/plugins/inputs/webhooks/particle/particle_webhooks.go @@ -14,7 +14,7 @@ type event struct { Data data `json:"data"` TTL int `json:"ttl"` PublishedAt string `json:"published_at"` - Database string `json:"measurement"` + Measurement string `json:"measurement"` } type data struct { @@ -59,6 +59,12 @@ func (rb *ParticleWebhook) eventHandler(w http.ResponseWriter, r *http.Request) pTime = time.Now() } - rb.acc.AddFields(e.Name, e.Data.Fields, e.Data.Tags, pTime) + // Use 'measurement' event field as the measurement, or default to the event name. + measurementName := e.Measurement + if measurementName == "" { + measurementName = e.Name + } + + rb.acc.AddFields(measurementName, e.Data.Fields, e.Data.Tags, pTime) w.WriteHeader(http.StatusOK) } diff --git a/plugins/inputs/webhooks/particle/particle_webhooks_test.go b/plugins/inputs/webhooks/particle/particle_webhooks_test.go index dc6213367dda9..c00d49fbb28a6 100644 --- a/plugins/inputs/webhooks/particle/particle_webhooks_test.go +++ b/plugins/inputs/webhooks/particle/particle_webhooks_test.go @@ -44,7 +44,7 @@ func TestNewItem(t *testing.T) { "location": "TravelingWilbury", } - acc.AssertContainsTaggedFields(t, "temperature", fields, tags) + acc.AssertContainsTaggedFields(t, "mydata", fields, tags) } func TestUnknowItem(t *testing.T) { @@ -57,6 +57,50 @@ func TestUnknowItem(t *testing.T) { } } +func TestDefaultMeasurementName(t *testing.T) { + t.Parallel() + var acc testutil.Accumulator + rb := &ParticleWebhook{Path: "/particle", acc: &acc} + resp := postWebhooks(rb, BlankMeasurementJSON()) + if resp.Code != http.StatusOK { + t.Errorf("POST new_item returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK) + } + + fields := map[string]interface{}{ + "temp_c": 26.680000, + } + + tags := map[string]string{ + "id": "230035001147343438323536", + } + + acc.AssertContainsTaggedFields(t, "eventName", fields, tags) +} + +func BlankMeasurementJSON() string { + return ` + { + "event": "eventName", + "data": { + "tags": { + "id": "230035001147343438323536" + }, + "values": { + "temp_c": 26.680000 + } + }, + "ttl": 60, + "published_at": "2017-09-28T21:54:10.897Z", + "coreid": "123456789938323536", + "userid": "1234ee123ac8e5ec1231a123d", + "version": 10, + "public": false, + "productID": 1234, + "name": "sensor", + "measurement": "" + }` +} + func NewItemJSON() string { return ` { From 6bd5334f8bcdb3f6eea6cddbf1d3318ee3c798e5 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 13 Jan 2021 09:51:07 -0600 Subject: [PATCH 176/761] Update template, remove CLA checkbox (#8680) --- .github/PULL_REQUEST_TEMPLATE.md | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 527555bdfc7a8..4b2eaad4fbab5 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,5 +1,4 @@ ### Required for all PRs: -- [ ] Signed [CLA](https://influxdata.com/community/cla/). - [ ] Associated README.md updated. - [ ] Has appropriate unit tests. From fbd54e84a2c5b4355b92a4ade0fd50805a5bff7e Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Wed, 13 Jan 2021 15:48:21 -0500 Subject: [PATCH 177/761] GNMI plugin should not take off the first character of field keys when no 'alias path' exists. (#8659) * GNMI plugin should not take off the first character of field keys when no 'alias path' exists. * fix test method name * fix test file formatting * fix test file formatting * Remove my unnecessary failing test --- plugins/inputs/gnmi/gnmi.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index 694ca7851f2be..5e99092f82927 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -290,11 +290,11 @@ func (c *GNMI) handleSubscribeResponseUpdate(address string, response *gnmi.Subs // Group metrics for k, v := range fields { key := k - if len(aliasPath) < len(key) { + if len(aliasPath) < len(key) && len(aliasPath) != 0 { // This may not be an exact prefix, due to naming style // conversion on the key. key = key[len(aliasPath)+1:] - } else { + } else if len(aliasPath) >= len(key) { // Otherwise use the last path element as the field key. key = path.Base(key) From 76c2201bbe9f098a09e01fa1e39790a973d7da8e Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Thu, 14 Jan 2021 11:47:00 -0500 Subject: [PATCH 178/761] Fix Redis output field type inconsistencies (#8678) --- plugins/inputs/redis/redis.go | 230 +++++++++++++++++++++ plugins/inputs/redis/redis_test.go | 322 +++++++++++++++++++++++------ 2 files changed, 487 insertions(+), 65 deletions(-) diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index 3a76a351c05de..72b85dddaa8d3 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "net/url" + "reflect" "regexp" "strconv" "strings" @@ -46,6 +47,117 @@ type RedisClient struct { tags map[string]string } +// RedisFieldTypes defines the types expected for each of the fields redis reports on +type RedisFieldTypes struct { + ActiveDefragHits int64 `json:"active_defrag_hits"` + ActiveDefragKeyHits int64 `json:"active_defrag_key_hits"` + ActiveDefragKeyMisses int64 `json:"active_defrag_key_misses"` + ActiveDefragMisses int64 `json:"active_defrag_misses"` + ActiveDefragRunning int64 `json:"active_defrag_running"` + AllocatorActive int64 `json:"allocator_active"` + AllocatorAllocated int64 `json:"allocator_allocated"` + AllocatorFragBytes float64 `json:"allocator_frag_bytes"` // for historical reasons this was left as float although redis reports it as an int + AllocatorFragRatio float64 `json:"allocator_frag_ratio"` + AllocatorResident int64 `json:"allocator_resident"` + AllocatorRssBytes int64 `json:"allocator_rss_bytes"` + AllocatorRssRatio float64 `json:"allocator_rss_ratio"` + AofCurrentRewriteTimeSec int64 `json:"aof_current_rewrite_time_sec"` + AofEnabled int64 `json:"aof_enabled"` + AofLastBgrewriteStatus string `json:"aof_last_bgrewrite_status"` + AofLastCowSize int64 `json:"aof_last_cow_size"` + AofLastRewriteTimeSec int64 `json:"aof_last_rewrite_time_sec"` + AofLastWriteStatus string `json:"aof_last_write_status"` + AofRewriteInProgress int64 `json:"aof_rewrite_in_progress"` + AofRewriteScheduled int64 `json:"aof_rewrite_scheduled"` + BlockedClients int64 `json:"blocked_clients"` + ClientRecentMaxInputBuffer int64 `json:"client_recent_max_input_buffer"` + ClientRecentMaxOutputBuffer int64 `json:"client_recent_max_output_buffer"` + Clients int64 `json:"clients"` + ClientsInTimeoutTable int64 `json:"clients_in_timeout_table"` + ClusterEnabled int64 `json:"cluster_enabled"` + ConnectedSlaves int64 `json:"connected_slaves"` + EvictedKeys int64 `json:"evicted_keys"` + ExpireCycleCPUMilliseconds int64 `json:"expire_cycle_cpu_milliseconds"` + ExpiredKeys int64 `json:"expired_keys"` + ExpiredStalePerc float64 `json:"expired_stale_perc"` + ExpiredTimeCapReachedCount int64 `json:"expired_time_cap_reached_count"` + InstantaneousInputKbps float64 `json:"instantaneous_input_kbps"` + InstantaneousOpsPerSec int64 `json:"instantaneous_ops_per_sec"` + InstantaneousOutputKbps float64 `json:"instantaneous_output_kbps"` + IoThreadedReadsProcessed int64 `json:"io_threaded_reads_processed"` + IoThreadedWritesProcessed int64 `json:"io_threaded_writes_processed"` + KeyspaceHits int64 `json:"keyspace_hits"` + KeyspaceMisses int64 `json:"keyspace_misses"` + LatestForkUsec int64 `json:"latest_fork_usec"` + LazyfreePendingObjects int64 `json:"lazyfree_pending_objects"` + Loading int64 `json:"loading"` + LruClock int64 `json:"lru_clock"` + MasterReplOffset int64 `json:"master_repl_offset"` + MaxMemory int64 `json:"maxmemory"` + MaxMemoryPolicy string `json:"maxmemory_policy"` + MemAofBuffer int64 `json:"mem_aof_buffer"` + MemClientsNormal int64 `json:"mem_clients_normal"` + MemClientsSlaves int64 `json:"mem_clients_slaves"` + MemFragmentationBytes int64 `json:"mem_fragmentation_bytes"` + MemFragmentationRatio float64 `json:"mem_fragmentation_ratio"` + MemNotCountedForEvict int64 `json:"mem_not_counted_for_evict"` + MemReplicationBacklog int64 `json:"mem_replication_backlog"` + MigrateCachedSockets int64 `json:"migrate_cached_sockets"` + ModuleForkInProgress int64 `json:"module_fork_in_progress"` + ModuleForkLastCowSize int64 `json:"module_fork_last_cow_size"` + NumberOfCachedScripts int64 `json:"number_of_cached_scripts"` + PubsubChannels int64 `json:"pubsub_channels"` + PubsubPatterns int64 `json:"pubsub_patterns"` + RdbBgsaveInProgress int64 `json:"rdb_bgsave_in_progress"` + RdbChangesSinceLastSave int64 `json:"rdb_changes_since_last_save"` + RdbCurrentBgsaveTimeSec int64 `json:"rdb_current_bgsave_time_sec"` + RdbLastBgsaveStatus string `json:"rdb_last_bgsave_status"` + RdbLastBgsaveTimeSec int64 `json:"rdb_last_bgsave_time_sec"` + RdbLastCowSize int64 `json:"rdb_last_cow_size"` + RdbLastSaveTime int64 `json:"rdb_last_save_time"` + RdbLastSaveTimeElapsed int64 `json:"rdb_last_save_time_elapsed"` + RedisVersion string `json:"redis_version"` + RejectedConnections int64 `json:"rejected_connections"` + ReplBacklogActive int64 `json:"repl_backlog_active"` + ReplBacklogFirstByteOffset int64 `json:"repl_backlog_first_byte_offset"` + ReplBacklogHistlen int64 `json:"repl_backlog_histlen"` + ReplBacklogSize int64 `json:"repl_backlog_size"` + RssOverheadBytes int64 `json:"rss_overhead_bytes"` + RssOverheadRatio float64 `json:"rss_overhead_ratio"` + SecondReplOffset int64 `json:"second_repl_offset"` + SlaveExpiresTrackedKeys int64 `json:"slave_expires_tracked_keys"` + SyncFull int64 `json:"sync_full"` + SyncPartialErr int64 `json:"sync_partial_err"` + SyncPartialOk int64 `json:"sync_partial_ok"` + TotalCommandsProcessed int64 `json:"total_commands_processed"` + TotalConnectionsReceived int64 `json:"total_connections_received"` + TotalNetInputBytes int64 `json:"total_net_input_bytes"` + TotalNetOutputBytes int64 `json:"total_net_output_bytes"` + TotalReadsProcessed int64 `json:"total_reads_processed"` + TotalSystemMemory int64 `json:"total_system_memory"` + TotalWritesProcessed int64 `json:"total_writes_processed"` + TrackingClients int64 `json:"tracking_clients"` + TrackingTotalItems int64 `json:"tracking_total_items"` + TrackingTotalKeys int64 `json:"tracking_total_keys"` + TrackingTotalPrefixes int64 `json:"tracking_total_prefixes"` + UnexpectedErrorReplies int64 `json:"unexpected_error_replies"` + Uptime int64 `json:"uptime"` + UsedCPUSys float64 `json:"used_cpu_sys"` + UsedCPUSysChildren float64 `json:"used_cpu_sys_children"` + UsedCPUUser float64 `json:"used_cpu_user"` + UsedCPUUserChildren float64 `json:"used_cpu_user_children"` + UsedMemory int64 `json:"used_memory"` + UsedMemoryDataset int64 `json:"used_memory_dataset"` + UsedMemoryDatasetPerc float64 `json:"used_memory_dataset_perc"` + UsedMemoryLua int64 `json:"used_memory_lua"` + UsedMemoryOverhead int64 `json:"used_memory_overhead"` + UsedMemoryPeak int64 `json:"used_memory_peak"` + UsedMemoryPeakPerc float64 `json:"used_memory_peak_perc"` + UsedMemoryRss int64 `json:"used_memory_rss"` + UsedMemoryScripts int64 `json:"used_memory_scripts"` + UsedMemoryStartup int64 `json:"used_memory_startup"` +} + func (r *RedisClient) Do(returnType string, args ...interface{}) (interface{}, error) { rawVal := r.client.Do(args...) @@ -352,6 +464,12 @@ func gatherInfoOutput( keyspace_hitrate = float64(keyspace_hits) / float64(keyspace_hits+keyspace_misses) } fields["keyspace_hitrate"] = keyspace_hitrate + + o := RedisFieldTypes{} + + setStructFieldsFromObject(fields, &o) + setExistingFieldsFromStruct(fields, &o) + acc.AddFields("redis", fields, tags) return nil } @@ -479,3 +597,115 @@ func init() { return &Redis{} }) } + +func setExistingFieldsFromStruct(fields map[string]interface{}, o *RedisFieldTypes) { + val := reflect.ValueOf(o).Elem() + typ := val.Type() + + for key := range fields { + if _, exists := fields[key]; exists { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + jsonFieldName := f.Tag.Get("json") + if jsonFieldName == key { + fields[key] = val.Field(i).Interface() + break + } + } + } + } +} + +func setStructFieldsFromObject(fields map[string]interface{}, o *RedisFieldTypes) { + val := reflect.ValueOf(o).Elem() + typ := val.Type() + + for key, value := range fields { + if _, exists := fields[key]; exists { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + jsonFieldName := f.Tag.Get("json") + if jsonFieldName == key { + structFieldValue := val.Field(i) + structFieldValue.Set(coerceType(value, structFieldValue.Type())) + break + } + } + } + } +} + +func coerceType(value interface{}, typ reflect.Type) reflect.Value { + switch sourceType := value.(type) { + case bool: + switch typ.Kind() { + case reflect.String: + if sourceType { + value = "true" + } else { + value = "false" + } + case reflect.Int64: + if sourceType { + value = int64(1) + } else { + value = int64(0) + } + case reflect.Float64: + if sourceType { + value = float64(1) + } else { + value = float64(0) + } + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + case int, int8, int16, int32, int64: + switch typ.Kind() { + case reflect.String: + value = fmt.Sprintf("%d", value) + case reflect.Int64: + // types match + case reflect.Float64: + value = float64(reflect.ValueOf(sourceType).Int()) + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + case uint, uint8, uint16, uint32, uint64: + switch typ.Kind() { + case reflect.String: + value = fmt.Sprintf("%d", value) + case reflect.Int64: + // types match + case reflect.Float64: + value = float64(reflect.ValueOf(sourceType).Uint()) + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + case float32, float64: + switch typ.Kind() { + case reflect.String: + value = fmt.Sprintf("%f", value) + case reflect.Int64: + value = int64(reflect.ValueOf(sourceType).Float()) + case reflect.Float64: + // types match + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + case string: + switch typ.Kind() { + case reflect.String: + // types match + case reflect.Int64: + value, _ = strconv.ParseInt(value.(string), 10, 64) + case reflect.Float64: + value, _ = strconv.ParseFloat(value.(string), 64) + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + default: + panic(fmt.Sprintf("unhandled source type %T", sourceType)) + } + return reflect.ValueOf(value) +} diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index d5aaa7a7bfa38..5765b18607e00 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -83,62 +83,115 @@ func TestRedis_ParseMetrics(t *testing.T) { tags = map[string]string{"host": "redis.net", "replication_role": "master"} fields := map[string]interface{}{ - "uptime": int64(238), - "lru_clock": int64(2364819), - "clients": int64(1), - "client_longest_output_list": int64(0), - "client_biggest_input_buf": int64(0), - "blocked_clients": int64(0), - "used_memory": int64(1003936), - "used_memory_rss": int64(811008), - "used_memory_peak": int64(1003936), - "used_memory_lua": int64(33792), - "used_memory_peak_perc": float64(93.58), - "used_memory_dataset_perc": float64(20.27), - "mem_fragmentation_ratio": float64(0.81), - "loading": int64(0), - "rdb_changes_since_last_save": int64(0), - "rdb_bgsave_in_progress": int64(0), - "rdb_last_save_time": int64(1428427941), - "rdb_last_bgsave_status": "ok", - "rdb_last_bgsave_time_sec": int64(-1), - "rdb_current_bgsave_time_sec": int64(-1), - "aof_enabled": int64(0), - "aof_rewrite_in_progress": int64(0), - "aof_rewrite_scheduled": int64(0), - "aof_last_rewrite_time_sec": int64(-1), - "aof_current_rewrite_time_sec": int64(-1), - "aof_last_bgrewrite_status": "ok", - "aof_last_write_status": "ok", - "total_connections_received": int64(2), - "total_commands_processed": int64(1), - "instantaneous_ops_per_sec": int64(0), - "instantaneous_input_kbps": float64(876.16), - "instantaneous_output_kbps": float64(3010.23), - "rejected_connections": int64(0), - "sync_full": int64(0), - "sync_partial_ok": int64(0), - "sync_partial_err": int64(0), - "expired_keys": int64(0), - "evicted_keys": int64(0), - "keyspace_hits": int64(1), - "keyspace_misses": int64(1), - "pubsub_channels": int64(0), - "pubsub_patterns": int64(0), - "latest_fork_usec": int64(0), - "connected_slaves": int64(2), - "master_repl_offset": int64(0), - "repl_backlog_active": int64(0), - "repl_backlog_size": int64(1048576), - "repl_backlog_first_byte_offset": int64(0), - "repl_backlog_histlen": int64(0), - "second_repl_offset": int64(-1), - "used_cpu_sys": float64(0.14), - "used_cpu_user": float64(0.05), - "used_cpu_sys_children": float64(0.00), - "used_cpu_user_children": float64(0.00), - "keyspace_hitrate": float64(0.50), - "redis_version": "2.8.9", + "uptime": int64(238), + "lru_clock": int64(2364819), + "clients": int64(1), + "client_longest_output_list": int64(0), + "client_biggest_input_buf": int64(0), + "blocked_clients": int64(0), + "used_memory": int64(1003936), + "used_memory_rss": int64(811008), + "used_memory_peak": int64(1003936), + "used_memory_lua": int64(33792), + "used_memory_peak_perc": float64(93.58), + "used_memory_dataset_perc": float64(20.27), + "mem_fragmentation_ratio": float64(0.81), + "loading": int64(0), + "rdb_changes_since_last_save": int64(0), + "rdb_bgsave_in_progress": int64(0), + "rdb_last_save_time": int64(1428427941), + "rdb_last_bgsave_status": "ok", + "rdb_last_bgsave_time_sec": int64(-1), + "rdb_current_bgsave_time_sec": int64(-1), + "aof_enabled": int64(0), + "aof_rewrite_in_progress": int64(0), + "aof_rewrite_scheduled": int64(0), + "aof_last_rewrite_time_sec": int64(-1), + "aof_current_rewrite_time_sec": int64(-1), + "aof_last_bgrewrite_status": "ok", + "aof_last_write_status": "ok", + "total_connections_received": int64(2), + "total_commands_processed": int64(1), + "instantaneous_ops_per_sec": int64(0), + "instantaneous_input_kbps": float64(876.16), + "instantaneous_output_kbps": float64(3010.23), + "rejected_connections": int64(0), + "sync_full": int64(0), + "sync_partial_ok": int64(0), + "sync_partial_err": int64(0), + "expired_keys": int64(0), + "evicted_keys": int64(0), + "keyspace_hits": int64(1), + "keyspace_misses": int64(1), + "pubsub_channels": int64(0), + "pubsub_patterns": int64(0), + "latest_fork_usec": int64(0), + "connected_slaves": int64(2), + "master_repl_offset": int64(0), + "repl_backlog_active": int64(0), + "repl_backlog_size": int64(1048576), + "repl_backlog_first_byte_offset": int64(0), + "repl_backlog_histlen": int64(0), + "second_repl_offset": int64(-1), + "used_cpu_sys": float64(0.14), + "used_cpu_user": float64(0.05), + "used_cpu_sys_children": float64(0.00), + "used_cpu_user_children": float64(0.00), + "keyspace_hitrate": float64(0.50), + "redis_version": "6.0.9", + "active_defrag_hits": int64(0), + "active_defrag_key_hits": int64(0), + "active_defrag_key_misses": int64(0), + "active_defrag_misses": int64(0), + "active_defrag_running": int64(0), + "allocator_active": int64(1022976), + "allocator_allocated": int64(1019632), + "allocator_frag_bytes": float64(3344), + "allocator_frag_ratio": float64(1.00), + "allocator_resident": int64(1022976), + "allocator_rss_bytes": int64(0), + "allocator_rss_ratio": float64(1.00), + "aof_last_cow_size": int64(0), + "client_recent_max_input_buffer": int64(16), + "client_recent_max_output_buffer": int64(0), + "clients_in_timeout_table": int64(0), + "cluster_enabled": int64(0), + "expire_cycle_cpu_milliseconds": int64(669), + "expired_stale_perc": float64(0.00), + "expired_time_cap_reached_count": int64(0), + "io_threaded_reads_processed": int64(0), + "io_threaded_writes_processed": int64(0), + "total_reads_processed": int64(31), + "total_writes_processed": int64(17), + "lazyfree_pending_objects": int64(0), + "maxmemory": int64(0), + "maxmemory_policy": string("noeviction"), + "mem_aof_buffer": int64(0), + "mem_clients_normal": int64(17440), + "mem_clients_slaves": int64(0), + "mem_fragmentation_bytes": int64(41232), + "mem_not_counted_for_evict": int64(0), + "mem_replication_backlog": int64(0), + "rss_overhead_bytes": int64(37888), + "rss_overhead_ratio": float64(1.04), + "total_system_memory": int64(17179869184), + "used_memory_dataset": int64(47088), + "used_memory_overhead": int64(1019152), + "used_memory_scripts": int64(0), + "used_memory_startup": int64(1001712), + "migrate_cached_sockets": int64(0), + "module_fork_in_progress": int64(0), + "module_fork_last_cow_size": int64(0), + "number_of_cached_scripts": int64(0), + "rdb_last_cow_size": int64(0), + "slave_expires_tracked_keys": int64(0), + "unexpected_error_replies": int64(0), + "total_net_input_bytes": int64(381), + "total_net_output_bytes": int64(71521), + "tracking_clients": int64(0), + "tracking_total_items": int64(0), + "tracking_total_keys": int64(0), + "tracking_total_prefixes": int64(0), } // We have to test rdb_last_save_time_offset manually because the value is based on the time when gathered @@ -210,26 +263,110 @@ func TestRedis_ParseMetrics(t *testing.T) { acc.AssertContainsTaggedFields(t, "redis_replication", replicationFields, replicationTags) } +func TestRedis_ParseFloatOnInts(t *testing.T) { + var acc testutil.Accumulator + tags := map[string]string{"host": "redis.net"} + rdr := bufio.NewReader(strings.NewReader(strings.Replace(testOutput, "mem_fragmentation_ratio:0.81", "mem_fragmentation_ratio:1", 1))) + err := gatherInfoOutput(rdr, &acc, tags) + require.NoError(t, err) + var m *testutil.Metric + for i := range acc.Metrics { + if _, ok := acc.Metrics[i].Fields["mem_fragmentation_ratio"]; ok { + m = acc.Metrics[i] + break + } + } + require.NotNil(t, m) + fragRatio, ok := m.Fields["mem_fragmentation_ratio"] + require.True(t, ok) + require.IsType(t, float64(0.0), fragRatio) +} + +func TestRedis_ParseIntOnFloats(t *testing.T) { + var acc testutil.Accumulator + tags := map[string]string{"host": "redis.net"} + rdr := bufio.NewReader(strings.NewReader(strings.Replace(testOutput, "clients_in_timeout_table:0", "clients_in_timeout_table:0.0", 1))) + err := gatherInfoOutput(rdr, &acc, tags) + require.NoError(t, err) + var m *testutil.Metric + for i := range acc.Metrics { + if _, ok := acc.Metrics[i].Fields["clients_in_timeout_table"]; ok { + m = acc.Metrics[i] + break + } + } + require.NotNil(t, m) + clientsInTimeout, ok := m.Fields["clients_in_timeout_table"] + require.True(t, ok) + require.IsType(t, int64(0), clientsInTimeout) +} + +func TestRedis_ParseStringOnInts(t *testing.T) { + var acc testutil.Accumulator + tags := map[string]string{"host": "redis.net"} + rdr := bufio.NewReader(strings.NewReader(strings.Replace(testOutput, "maxmemory_policy:no-eviction", "maxmemory_policy:1", 1))) + err := gatherInfoOutput(rdr, &acc, tags) + require.NoError(t, err) + var m *testutil.Metric + for i := range acc.Metrics { + if _, ok := acc.Metrics[i].Fields["maxmemory_policy"]; ok { + m = acc.Metrics[i] + break + } + } + require.NotNil(t, m) + maxmemoryPolicy, ok := m.Fields["maxmemory_policy"] + require.True(t, ok) + require.IsType(t, string(""), maxmemoryPolicy) +} + +func TestRedis_ParseIntOnString(t *testing.T) { + var acc testutil.Accumulator + tags := map[string]string{"host": "redis.net"} + rdr := bufio.NewReader(strings.NewReader(strings.Replace(testOutput, "clients_in_timeout_table:0", `clients_in_timeout_table:""`, 1))) + err := gatherInfoOutput(rdr, &acc, tags) + require.NoError(t, err) + var m *testutil.Metric + for i := range acc.Metrics { + if _, ok := acc.Metrics[i].Fields["clients_in_timeout_table"]; ok { + m = acc.Metrics[i] + break + } + } + require.NotNil(t, m) + clientsInTimeout, ok := m.Fields["clients_in_timeout_table"] + require.True(t, ok) + require.IsType(t, int64(0), clientsInTimeout) +} + const testOutput = `# Server -redis_version:2.8.9 +redis_version:6.0.9 redis_git_sha1:00000000 redis_git_dirty:0 -redis_build_id:9ccc8119ea98f6e1 +redis_build_id:26c3229b35eb3beb redis_mode:standalone -os:Darwin 14.1.0 x86_64 +os:Darwin 19.6.0 x86_64 arch_bits:64 multiplexing_api:kqueue +atomicvar_api:atomic-builtin gcc_version:4.2.1 -process_id:40235 -run_id:37d020620aadf0627282c0f3401405d774a82664 +process_id:46677 +run_id:5d6bf38087b23e48f1a59b7aca52e2b55438b02f tcp_port:6379 uptime_in_seconds:238 uptime_in_days:0 hz:10 +configured_hz:10 lru_clock:2364819 +executable:/usr/local/opt/redis/bin/redis-server config_file:/usr/local/etc/redis.conf +io_threads_active:0 # Clients +client_recent_max_input_buffer:16 +client_recent_max_output_buffer:0 +tracking_clients:0 +clients_in_timeout_table:0 connected_clients:1 client_longest_output_list:0 client_biggest_input_buf:0 @@ -239,13 +376,43 @@ blocked_clients:0 used_memory:1003936 used_memory_human:980.41K used_memory_rss:811008 +used_memory_rss_human:1.01M used_memory_peak:1003936 used_memory_peak_human:980.41K +used_memory_peak_perc:93.58% +used_memory_overhead:1019152 +used_memory_startup:1001712 +used_memory_dataset:47088 +used_memory_dataset_perc:20.27% +allocator_allocated:1019632 +allocator_active:1022976 +allocator_resident:1022976 +total_system_memory:17179869184 +total_system_memory_human:16.00G used_memory_lua:33792 +used_memory_lua_human:37.00K +used_memory_scripts:0 +used_memory_scripts_human:0B +number_of_cached_scripts:0 +maxmemory:0 +maxmemory_human:0B +maxmemory_policy:noeviction +allocator_frag_ratio:1.00 +allocator_frag_bytes:3344 +allocator_rss_ratio:1.00 +allocator_rss_bytes:0 +rss_overhead_ratio:1.04 +rss_overhead_bytes:37888 mem_fragmentation_ratio:0.81 +mem_fragmentation_bytes:41232 +mem_not_counted_for_evict:0 +mem_replication_backlog:0 +mem_clients_slaves:0 +mem_clients_normal:17440 +mem_aof_buffer:0 mem_allocator:libc -used_memory_peak_perc:93.58% -used_memory_dataset_perc:20.27% +active_defrag_running:0 +lazyfree_pending_objects:0 # Persistence loading:0 @@ -255,6 +422,7 @@ rdb_last_save_time:1428427941 rdb_last_bgsave_status:ok rdb_last_bgsave_time_sec:-1 rdb_current_bgsave_time_sec:-1 +rdb_last_cow_size:0 aof_enabled:0 aof_rewrite_in_progress:0 aof_rewrite_scheduled:0 @@ -262,11 +430,16 @@ aof_last_rewrite_time_sec:-1 aof_current_rewrite_time_sec:-1 aof_last_bgrewrite_status:ok aof_last_write_status:ok +aof_last_cow_size:0 +module_fork_in_progress:0 +module_fork_last_cow_size:0 # Stats total_connections_received:2 total_commands_processed:1 instantaneous_ops_per_sec:0 +total_net_input_bytes:381 +total_net_output_bytes:71521 instantaneous_input_kbps:876.16 instantaneous_output_kbps:3010.23 rejected_connections:0 @@ -274,12 +447,29 @@ sync_full:0 sync_partial_ok:0 sync_partial_err:0 expired_keys:0 +expired_stale_perc:0.00 +expired_time_cap_reached_count:0 +expire_cycle_cpu_milliseconds:669 evicted_keys:0 keyspace_hits:1 keyspace_misses:1 pubsub_channels:0 pubsub_patterns:0 latest_fork_usec:0 +migrate_cached_sockets:0 +slave_expires_tracked_keys:0 +active_defrag_hits:0 +active_defrag_misses:0 +active_defrag_key_hits:0 +active_defrag_key_misses:0 +tracking_total_keys:0 +tracking_total_items:0 +tracking_total_prefixes:0 +unexpected_error_replies:0 +total_reads_processed:31 +total_writes_processed:17 +io_threaded_reads_processed:0 +io_threaded_writes_processed:0 # Replication role:master @@ -301,6 +491,9 @@ used_cpu_user:0.05 used_cpu_sys_children:0.00 used_cpu_user_children:0.00 +# Cluster +cluster_enabled:0 + # Commandstats cmdstat_set:calls=261265,usec=1634157,usec_per_call=6.25 cmdstat_command:calls=1,usec=990,usec_per_call=990.00 @@ -308,5 +501,4 @@ cmdstat_command:calls=1,usec=990,usec_per_call=990.00 # Keyspace db0:keys=2,expires=0,avg_ttl=0 -(error) ERR unknown command 'eof' -` +(error) ERR unknown command 'eof'` From 6ed1431348c200760443c2a0422b0d54bea927ec Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Fri, 15 Jan 2021 10:16:37 -0800 Subject: [PATCH 179/761] update readme: prometheus remote write (#8683) --- plugins/serializers/prometheusremotewrite/README.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/plugins/serializers/prometheusremotewrite/README.md b/plugins/serializers/prometheusremotewrite/README.md index 8bad919b2e923..a0dc4a8deb03b 100644 --- a/plugins/serializers/prometheusremotewrite/README.md +++ b/plugins/serializers/prometheusremotewrite/README.md @@ -1,4 +1,4 @@ -# Prometheus +# Prometheus remote write The `prometheusremotewrite` data format converts metrics into the Prometheus protobuf exposition format. @@ -13,11 +13,17 @@ use only the `prometheus_client` output. ```toml [[outputs.http]] + ## URL is the address to send metrics to url = "https://cortex/api/prom/push" - data_format = "prometheusremotewrite" + + ## Optional TLS Config tls_ca = "/etc/telegraf/ca.pem" tls_cert = "/etc/telegraf/cert.pem" tls_key = "/etc/telegraf/key.pem" + + ## Data format to output. + data_format = "prometheusremotewrite" + [outputs.http.headers] Content-Type = "application/x-protobuf" Content-Encoding = "snappy" From 1bf5a19582665d7df40f389aebd9d23450231d65 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Tue, 19 Jan 2021 11:03:19 -0500 Subject: [PATCH 180/761] Add Event Log support for Windows (#8616) * Add event log support for windows when not running as a windows service. * Add error message for initializing event logger. * Add build windows flag. * Only register event logger when running telegraf under windows. * Update logger/event_logger.go Co-authored-by: Steven Soroka * Remove unnecessary 'fmt' import * Remove unnecessary 'fmt' import * Remove unnecessary error check * use constants for eid levels. Co-authored-by: Steven Soroka --- cmd/telegraf/telegraf_windows.go | 10 ++++------ logger/event_logger.go | 33 ++++++++++++++++++++++---------- logger/event_logger_test.go | 13 ++++++------- 3 files changed, 33 insertions(+), 23 deletions(-) diff --git a/cmd/telegraf/telegraf_windows.go b/cmd/telegraf/telegraf_windows.go index 830e6eaa4f8a0..52b9c43b99a2f 100644 --- a/cmd/telegraf/telegraf_windows.go +++ b/cmd/telegraf/telegraf_windows.go @@ -12,6 +12,9 @@ import ( ) func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { + // Register the eventlog logging target for windows. + logger.RegisterEventLogger(*fServiceName) + if runtime.GOOS == "windows" && windowsRunAsService() { runAsWindowsService( inputFilters, @@ -96,12 +99,7 @@ func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, process } os.Exit(0) } else { - winlogger, err := s.Logger(nil) - if err == nil { - //When in service mode, register eventlog target andd setup default logging to eventlog - logger.RegisterEventLogger(winlogger) - logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog}) - } + logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog}) err = s.Run() if err != nil { diff --git a/logger/event_logger.go b/logger/event_logger.go index 48b645ddedb3c..44d5bce656a04 100644 --- a/logger/event_logger.go +++ b/logger/event_logger.go @@ -1,35 +1,41 @@ +//+build windows + package logger import ( "io" + "log" "strings" "github.com/influxdata/wlog" - "github.com/kardianos/service" + "golang.org/x/sys/windows/svc/eventlog" ) const ( LogTargetEventlog = "eventlog" + eidInfo = 1 + eidWarning = 2 + eidError = 3 ) type eventLogger struct { - logger service.Logger + logger *eventlog.Log } func (t *eventLogger) Write(b []byte) (n int, err error) { loc := prefixRegex.FindIndex(b) n = len(b) if loc == nil { - err = t.logger.Info(b) + err = t.logger.Info(1, string(b)) } else if n > 2 { //skip empty log messages line := strings.Trim(string(b[loc[1]:]), " \t\r\n") switch rune(b[loc[0]]) { case 'I': - err = t.logger.Info(line) + err = t.logger.Info(eidInfo, line) case 'W': - err = t.logger.Warning(line) + err = t.logger.Warning(eidWarning, line) case 'E': - err = t.logger.Error(line) + err = t.logger.Error(eidError, line) } } @@ -37,13 +43,20 @@ func (t *eventLogger) Write(b []byte) (n int, err error) { } type eventLoggerCreator struct { - serviceLogger service.Logger + logger *eventlog.Log } func (e *eventLoggerCreator) CreateLogger(config LogConfig) (io.Writer, error) { - return wlog.NewWriter(&eventLogger{logger: e.serviceLogger}), nil + return wlog.NewWriter(&eventLogger{logger: e.logger}), nil } -func RegisterEventLogger(serviceLogger service.Logger) { - registerLogger(LogTargetEventlog, &eventLoggerCreator{serviceLogger: serviceLogger}) +func RegisterEventLogger(name string) error { + eventLog, err := eventlog.Open(name) + if err != nil { + log.Printf("E! An error occurred while initializing an event logger. %s", err) + return err + } + + registerLogger(LogTargetEventlog, &eventLoggerCreator{logger: eventLog}) + return nil } diff --git a/logger/event_logger_test.go b/logger/event_logger_test.go index f2d4eb4209e89..4dddeb2ec2b85 100644 --- a/logger/event_logger_test.go +++ b/logger/event_logger_test.go @@ -10,9 +10,9 @@ import ( "testing" "time" - "github.com/kardianos/service" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/sys/windows/svc/eventlog" ) type Levels int @@ -30,7 +30,8 @@ type Event struct { func getEventLog(t *testing.T, since time.Time) []Event { timeStr := since.UTC().Format(time.RFC3339) - cmd := exec.Command("wevtutil", "qe", "Application", "/rd:true", "/q:Event[System[TimeCreated[@SystemTime >= '"+timeStr+"'] and Provider[@Name='Telegraf']]]") + timeStr = timeStr[:19] + cmd := exec.Command("wevtutil", "qe", "Application", "/rd:true", "/q:Event[System[TimeCreated[@SystemTime >= '"+timeStr+"'] and Provider[@Name='telegraf']]]") var out bytes.Buffer cmd.Stdout = &out err := cmd.Run() @@ -91,10 +92,8 @@ func TestRestrictedEventLog(t *testing.T) { } func prepareLogger(t *testing.T) { - svc, err := service.New(nil, &service.Config{Name: "Telegraf"}) + eventLog, err := eventlog.Open("telegraf") require.NoError(t, err) - svcLogger, err := svc.SystemLogger(nil) - require.NoError(t, err) - require.NotNil(t, svcLogger) - registerLogger(LogTargetEventlog, &eventLoggerCreator{serviceLogger: svcLogger}) + require.NotNil(t, eventLog) + registerLogger(LogTargetEventlog, &eventLoggerCreator{logger: eventLog}) } From 149c2c2738896e213ad3e63a7a4625006aeb2ee9 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 19 Jan 2021 10:17:36 -0600 Subject: [PATCH 181/761] Create dependabot.yml (#8614) Check weekly for dependency updates, will create a pull request if any are found. --- .github/dependabot.yml | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000000..f1b219b47ce50 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,6 @@ +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" From 4387b8c2b85fb6022eb15988cd7a8d43ac4f60e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Jan 2021 12:03:06 -0500 Subject: [PATCH 182/761] Bump github.com/newrelic/newrelic-telemetry-sdk-go from 0.2.0 to 0.5.1 (#8712) Bumps [github.com/newrelic/newrelic-telemetry-sdk-go](https://github.com/newrelic/newrelic-telemetry-sdk-go) from 0.2.0 to 0.5.1. - [Release notes](https://github.com/newrelic/newrelic-telemetry-sdk-go/releases) - [Changelog](https://github.com/newrelic/newrelic-telemetry-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/newrelic/newrelic-telemetry-sdk-go/compare/v0.2.0...v0.5.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index eba35c8b51dc9..19c7f581ccc77 100644 --- a/go.mod +++ b/go.mod @@ -98,7 +98,7 @@ require ( github.com/naoina/go-stringutil v0.1.0 // indirect github.com/nats-io/nats-server/v2 v2.1.4 github.com/nats-io/nats.go v1.9.1 - github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 + github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 github.com/nsqio/go-nsq v1.0.7 github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 github.com/opencontainers/go-digest v1.0.0-rc1 // indirect diff --git a/go.sum b/go.sum index f51b9c9b8c2ec..c3e278600f876 100644 --- a/go.sum +++ b/go.sum @@ -473,8 +473,8 @@ github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 h1:W8+lNIfAldCScGiikToSprbf3DCaMXk0VIM9l73BIpY= -github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ= +github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 h1:9YEHXplqlVkOltThchh+RxeODvTb1TBvQ1181aXg3pY= +github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1/go.mod h1:2kY6OeOxrJ+RIQlVjWDc/pZlT3MIf30prs6drzMfJ6E= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY= From 54422f4f1dd0aa7816fa73b7bdc9aaf1f042e4a2 Mon Sep 17 00:00:00 2001 From: Dae-Ho Kim Date: Wed, 20 Jan 2021 11:45:24 +0900 Subject: [PATCH 183/761] add kafka connect example to jolokia2 input (#8709) * feat: add kafka connect example to jolokia2 input * docs: add kafka connect example link * chore: polishing --- plugins/inputs/jolokia2/README.md | 1 + .../jolokia2/examples/kafka-connect.conf | 90 +++++++++++++++++++ 2 files changed, 91 insertions(+) create mode 100644 plugins/inputs/jolokia2/examples/kafka-connect.conf diff --git a/plugins/inputs/jolokia2/README.md b/plugins/inputs/jolokia2/README.md index 4a7b8f4200a42..a944949dbab7e 100644 --- a/plugins/inputs/jolokia2/README.md +++ b/plugins/inputs/jolokia2/README.md @@ -179,6 +179,7 @@ Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configuration - [Java JVM](/plugins/inputs/jolokia2/examples/java.conf) - [JBoss](/plugins/inputs/jolokia2/examples/jboss.conf) - [Kafka](/plugins/inputs/jolokia2/examples/kafka.conf) +- [Kafka Connect](/plugins/inputs/jolokia2/examples/kafka-connect.conf) - [Tomcat](/plugins/inputs/jolokia2/examples/tomcat.conf) - [Weblogic](/plugins/inputs/jolokia2/examples/weblogic.conf) - [ZooKeeper](/plugins/inputs/jolokia2/examples/zookeeper.conf) diff --git a/plugins/inputs/jolokia2/examples/kafka-connect.conf b/plugins/inputs/jolokia2/examples/kafka-connect.conf new file mode 100644 index 0000000000000..d84f5fd58df2c --- /dev/null +++ b/plugins/inputs/jolokia2/examples/kafka-connect.conf @@ -0,0 +1,90 @@ +[[inputs.jolokia2_agent]] + urls = ["http://localhost:8080/jolokia"] + name_prefix = "kafka.connect." + + [[processors.enum]] + [[processors.enum.mapping]] + field = "status" + + [processors.enum.mapping.value_mappings] + paused = 0 + running = 1 + unassigned = 2 + failed = 3 + destroyed = 4 + + [inputs.jolokia2_agent.tags] + input_type = "kafka-connect" + + # https://kafka.apache.org/documentation/#connect_monitoring + [[inputs.jolokia2_agent.metric]] + name = "connectWorkerMetrics" + mbean = "kafka.connect:type=connect-worker-metrics" + paths = ["connector-count", "connector-startup-attempts-total", "connector-startup-failure-percentage", "connector-startup-failure-total", "connector-startup-success-percentage", "connector-startup-success-total", "task-count", "task-startup-attempts-total", "task-startup-failure-percentage", "task-startup-failure-total", "task-startup-success-percentage", "task-startup-success-total"] + + [[inputs.jolokia2_agent.metric]] + name = "connectWorkerMetrics" + mbean = "kafka.connect:type=connect-worker-metrics,connector=*" + paths = ["connector-destroyed-task-count", "connector-failed-task-count", "connector-paused-task-count", "connector-running-task-count", "connector-total-task-count", "connector-unassigned-task-count"] + tag_keys = ["connector"] + + [[inputs.jolokia2_agent.metric]] + name = "connectWorkerRebalanceMetrics" + mbean = "kafka.connect:type=connect-worker-rebalance-metrics" + paths = ["completed-rebalances-total", "connect-protocol", "epoch", "leader-name", "rebalance-avg-time-ms", "rebalance-max-time-ms", "rebalancing", "time-since-last-rebalance-ms"] + + [[inputs.jolokia2_agent.metric]] + name = "connectorMetrics" + mbean = "kafka.connect:type=connector-metrics,connector=*" + paths = ["connector-class", "connector-version", "connector-type", "status"] + tag_keys = ["connector"] + + [[inputs.jolokia2_agent.metric]] + name = "connectorTaskMetrics" + mbean = "kafka.connect:type=connector-task-metrics,connector=*,task=*" + paths = ["batch-size-avg", "batch-size-max", "offset-commit-avg-time-ms", "offset-commit-failure-percentage", "offset-commit-max-time-ms", "offset-commit-success-percentage", "pause-ratio", "running-ratio", "status"] + tag_keys = ["connector", "task"] + + [[inputs.jolokia2_agent.metric]] + name = "sinkTaskMetrics" + mbean = "kafka.connect:type=sink-task-metrics,connector=*,task=*" + paths = ["offset-commit-completion-rate", "offset-commit-completion-total", "offset-commit-seq-no", "offset-commit-skip-rate", "offset-commit-skip-total", "partition-count", "put-batch-avg-time-ms", "put-batch-max-time-ms", "sink-record-active-count", "sink-record-active-count-avg", "sink-record-active-count-max", "sink-record-lag-max", "sink-record-read-rate", "sink-record-read-total", "sink-record-send-rate", "sink-record-send-total"] + tag_keys = ["connector", "task"] + + [[inputs.jolokia2_agent.metric]] + name = "sourceTaskMetrics" + mbean = "kafka.connect:type=source-task-metrics,connector=*,task=*" + paths = ["poll-batch-avg-time-ms", "poll-batch-max-time-ms", "source-record-active-count", "source-record-active-count-avg", "source-record-active-count-max", "source-record-poll-rate", "source-record-poll-total", "source-record-write-rate", "source-record-write-total"] + tag_keys = ["connector", "task"] + + [[inputs.jolokia2_agent.metric]] + name = "taskErrorMetrics" + mbean = "kafka.connect:type=task-error-metrics,connector=*,task=*" + paths = ["deadletterqueue-produce-failures", "deadletterqueue-produce-requests", "last-error-timestamp", "total-errors-logged", "total-record-errors", "total-record-failures", "total-records-skipped", "total-retries"] + tag_keys = ["connector", "task"] + + # https://kafka.apache.org/documentation/#selector_monitoring + [[inputs.jolokia2_agent.metric]] + name = "connectMetrics" + mbean = "kafka.connect:type=connect-metrics,client-id=*" + paths = ["connection-close-rate", "connection-close-total", "connection-creation-rate", "connection-creation-total", "network-io-rate", "network-io-total", "outgoing-byte-rate", "outgoing-byte-total", "request-rate", "request-total", "request-size-avg", "request-size-max", "incoming-byte-rate", "incoming-byte-rate", "incoming-byte-total", "response-rate", "response-total", "select-rate", "select-total", "io-wait-time-ns-avg", "io-wait-ratio", "io-time-ns-avg", "io-ratio", "connection-count", "successful-authentication-rate", "successful-authentication-total", "failed-authentication-rate", "failed-authentication-total", "successful-reauthentication-rate", "successful-reauthentication-total", "reauthentication-latency-max", "reauthentication-latency-avg", "failed-reauthentication-rate", "failed-reauthentication-total", "successful-authentication-no-reauth-total"] + tag_keys = ["client-id"] + + # https://kafka.apache.org/documentation/#common_node_monitoring + [[inputs.jolokia2_agent.metric]] + name = "connectNodeMetrics" + mbean = "kafka.connect:type=connect-node-metrics,client-id=*,node-id=*" + paths = ["outgoing-byte-rate", "outgoing-byte-total", "request-rate", "request-total", "request-size-avg", "request-size-max", "incoming-byte-rate", "incoming-byte-total", "request-latency-avg", "request-latency-max", "response-rate", "response-total"] + tag_keys = ["client-id", "node-id"] + + [[inputs.jolokia2_agent.metric]] + name = "appInfo" + mbean = "kafka.connect:type=app-info,client-id=*" + paths = ["start-time-ms", "commit-id", "version"] + tag_keys = ["client-id"] + + [[inputs.jolokia2_agent.metric]] + name = "connectCoordinatorMetrics" + mbean = "kafka.connect:type=connect-coordinator-metrics,client-id=*" + paths = ["join-time-max", "failed-rebalance-rate-per-hour", "rebalance-latency-total", "sync-time-avg", "join-rate", "sync-rate", "failed-rebalance-total", "rebalance-total", "last-heartbeat-seconds-ago", "heartbeat-rate", "join-time-avg", "sync-total", "rebalance-latency-max", "sync-time-max", "last-rebalance-seconds-ago", "rebalance-rate-per-hour", "assigned-connectors", "heartbeat-total", "assigned-tasks", "heartbeat-response-time-max", "rebalance-latency-avg", "join-total"] + tag_keys = ["client-id"] \ No newline at end of file From 4462b172f31f2efe0ef194c562b0dcf24952b81e Mon Sep 17 00:00:00 2001 From: Adrian Thurston Date: Wed, 20 Jan 2021 15:33:59 -0800 Subject: [PATCH 184/761] fix: remove ambiguity on '\v' from line-protocol parser (#8720) --- plugins/parsers/influx/machine.go | 31601 ++----------------------- plugins/parsers/influx/machine.go.rl | 6 +- 2 files changed, 1664 insertions(+), 29943 deletions(-) diff --git a/plugins/parsers/influx/machine.go b/plugins/parsers/influx/machine.go index 332b73592486e..5d715af1c4aaf 100644 --- a/plugins/parsers/influx/machine.go +++ b/plugins/parsers/influx/machine.go @@ -30,14 +30,14 @@ var ( //line plugins/parsers/influx/machine.go:33 -const LineProtocol_start int = 269 -const LineProtocol_first_final int = 269 +const LineProtocol_start int = 46 +const LineProtocol_first_final int = 46 const LineProtocol_error int = 0 -const LineProtocol_en_main int = 269 -const LineProtocol_en_discard_line int = 257 -const LineProtocol_en_align int = 739 -const LineProtocol_en_series int = 260 +const LineProtocol_en_main int = 46 +const LineProtocol_en_discard_line int = 34 +const LineProtocol_en_align int = 85 +const LineProtocol_en_series int = 37 //line plugins/parsers/influx/machine.go.rl:321 @@ -173,8 +173,8 @@ func (m *machine) exec() error { _again: switch ( m.cs) { - case 269: - goto st269 + case 46: + goto st46 case 1: goto st1 case 2: @@ -189,12 +189,12 @@ _again: goto st5 case 6: goto st6 - case 270: - goto st270 - case 271: - goto st271 - case 272: - goto st272 + case 47: + goto st47 + case 48: + goto st48 + case 49: + goto st49 case 7: goto st7 case 8: @@ -203,200 +203,12 @@ _again: goto st9 case 10: goto st10 - case 11: - goto st11 - case 12: - goto st12 - case 13: - goto st13 - case 14: - goto st14 - case 15: - goto st15 - case 16: - goto st16 - case 17: - goto st17 - case 18: - goto st18 - case 19: - goto st19 - case 20: - goto st20 - case 21: - goto st21 - case 22: - goto st22 - case 23: - goto st23 - case 24: - goto st24 - case 25: - goto st25 - case 26: - goto st26 - case 27: - goto st27 - case 28: - goto st28 - case 29: - goto st29 - case 30: - goto st30 - case 31: - goto st31 - case 273: - goto st273 - case 274: - goto st274 - case 32: - goto st32 - case 33: - goto st33 - case 275: - goto st275 - case 276: - goto st276 - case 277: - goto st277 - case 34: - goto st34 - case 278: - goto st278 - case 279: - goto st279 - case 280: - goto st280 - case 281: - goto st281 - case 282: - goto st282 - case 283: - goto st283 - case 284: - goto st284 - case 285: - goto st285 - case 286: - goto st286 - case 287: - goto st287 - case 288: - goto st288 - case 289: - goto st289 - case 290: - goto st290 - case 291: - goto st291 - case 292: - goto st292 - case 293: - goto st293 - case 294: - goto st294 - case 295: - goto st295 - case 35: - goto st35 - case 36: - goto st36 - case 296: - goto st296 - case 297: - goto st297 - case 298: - goto st298 - case 37: - goto st37 - case 38: - goto st38 - case 39: - goto st39 - case 40: - goto st40 - case 41: - goto st41 - case 299: - goto st299 - case 300: - goto st300 - case 301: - goto st301 - case 302: - goto st302 - case 42: - goto st42 - case 303: - goto st303 - case 304: - goto st304 - case 305: - goto st305 - case 306: - goto st306 - case 307: - goto st307 - case 308: - goto st308 - case 309: - goto st309 - case 310: - goto st310 - case 311: - goto st311 - case 312: - goto st312 - case 313: - goto st313 - case 314: - goto st314 - case 315: - goto st315 - case 316: - goto st316 - case 317: - goto st317 - case 318: - goto st318 - case 319: - goto st319 - case 320: - goto st320 - case 321: - goto st321 - case 322: - goto st322 - case 323: - goto st323 - case 324: - goto st324 - case 43: - goto st43 - case 44: - goto st44 - case 45: - goto st45 - case 46: - goto st46 - case 47: - goto st47 - case 48: - goto st48 - case 49: - goto st49 case 50: goto st50 case 51: goto st51 case 52: goto st52 - case 325: - goto st325 - case 326: - goto st326 - case 327: - goto st327 case 53: goto st53 case 54: @@ -409,102 +221,12 @@ _again: goto st57 case 58: goto st58 - case 328: - goto st328 - case 329: - goto st329 case 59: goto st59 - case 330: - goto st330 - case 331: - goto st331 - case 332: - goto st332 - case 333: - goto st333 - case 334: - goto st334 - case 335: - goto st335 - case 336: - goto st336 - case 337: - goto st337 - case 338: - goto st338 - case 339: - goto st339 - case 340: - goto st340 - case 341: - goto st341 - case 342: - goto st342 - case 343: - goto st343 - case 344: - goto st344 - case 345: - goto st345 - case 346: - goto st346 - case 347: - goto st347 - case 348: - goto st348 - case 349: - goto st349 case 60: goto st60 - case 350: - goto st350 - case 351: - goto st351 - case 352: - goto st352 case 61: goto st61 - case 353: - goto st353 - case 354: - goto st354 - case 355: - goto st355 - case 356: - goto st356 - case 357: - goto st357 - case 358: - goto st358 - case 359: - goto st359 - case 360: - goto st360 - case 361: - goto st361 - case 362: - goto st362 - case 363: - goto st363 - case 364: - goto st364 - case 365: - goto st365 - case 366: - goto st366 - case 367: - goto st367 - case 368: - goto st368 - case 369: - goto st369 - case 370: - goto st370 - case 371: - goto st371 - case 372: - goto st372 case 62: goto st62 case 63: @@ -515,78 +237,38 @@ _again: goto st65 case 66: goto st66 - case 373: - goto st373 case 67: goto st67 case 68: goto st68 case 69: goto st69 + case 11: + goto st11 + case 12: + goto st12 + case 13: + goto st13 + case 14: + goto st14 + case 15: + goto st15 case 70: goto st70 + case 16: + goto st16 + case 17: + goto st17 case 71: goto st71 - case 374: - goto st374 - case 375: - goto st375 - case 376: - goto st376 case 72: goto st72 case 73: goto st73 case 74: goto st74 - case 377: - goto st377 - case 378: - goto st378 - case 379: - goto st379 case 75: goto st75 - case 380: - goto st380 - case 381: - goto st381 - case 382: - goto st382 - case 383: - goto st383 - case 384: - goto st384 - case 385: - goto st385 - case 386: - goto st386 - case 387: - goto st387 - case 388: - goto st388 - case 389: - goto st389 - case 390: - goto st390 - case 391: - goto st391 - case 392: - goto st392 - case 393: - goto st393 - case 394: - goto st394 - case 395: - goto st395 - case 396: - goto st396 - case 397: - goto st397 - case 398: - goto st398 - case 399: - goto st399 case 76: goto st76 case 77: @@ -595,1072 +277,82 @@ _again: goto st78 case 79: goto st79 + case 18: + goto st18 + case 19: + goto st19 + case 20: + goto st20 case 80: goto st80 + case 21: + goto st21 + case 22: + goto st22 + case 23: + goto st23 case 81: goto st81 + case 24: + goto st24 + case 25: + goto st25 case 82: goto st82 case 83: goto st83 + case 26: + goto st26 + case 27: + goto st27 + case 28: + goto st28 + case 29: + goto st29 + case 30: + goto st30 + case 31: + goto st31 + case 32: + goto st32 + case 33: + goto st33 + case 34: + goto st34 case 84: goto st84 - case 85: - goto st85 + case 37: + goto st37 case 86: goto st86 case 87: goto st87 + case 38: + goto st38 + case 39: + goto st39 + case 40: + goto st40 + case 41: + goto st41 case 88: goto st88 + case 42: + goto st42 case 89: goto st89 - case 400: - goto st400 - case 401: - goto st401 - case 402: - goto st402 - case 403: - goto st403 - case 90: - goto st90 - case 91: - goto st91 - case 92: - goto st92 - case 93: - goto st93 - case 404: - goto st404 - case 405: - goto st405 - case 94: - goto st94 - case 95: - goto st95 - case 406: - goto st406 - case 96: - goto st96 - case 97: - goto st97 - case 407: - goto st407 - case 408: - goto st408 - case 98: - goto st98 - case 409: - goto st409 - case 410: - goto st410 - case 99: - goto st99 - case 100: - goto st100 - case 411: - goto st411 - case 412: - goto st412 - case 413: - goto st413 - case 414: - goto st414 - case 415: - goto st415 - case 416: - goto st416 - case 417: - goto st417 - case 418: - goto st418 - case 419: - goto st419 - case 420: - goto st420 - case 421: - goto st421 - case 422: - goto st422 - case 423: - goto st423 - case 424: - goto st424 - case 425: - goto st425 - case 426: - goto st426 - case 427: - goto st427 - case 428: - goto st428 - case 101: - goto st101 - case 429: - goto st429 - case 430: - goto st430 - case 431: - goto st431 - case 102: - goto st102 - case 103: - goto st103 - case 432: - goto st432 - case 433: - goto st433 - case 434: - goto st434 - case 104: - goto st104 - case 435: - goto st435 - case 436: - goto st436 - case 437: - goto st437 - case 438: - goto st438 - case 439: - goto st439 - case 440: - goto st440 - case 441: - goto st441 - case 442: - goto st442 - case 443: - goto st443 - case 444: - goto st444 - case 445: - goto st445 - case 446: - goto st446 - case 447: - goto st447 - case 448: - goto st448 - case 449: - goto st449 - case 450: - goto st450 - case 451: - goto st451 - case 452: - goto st452 - case 453: - goto st453 - case 454: - goto st454 - case 105: - goto st105 - case 455: - goto st455 - case 456: - goto st456 - case 457: - goto st457 - case 458: - goto st458 - case 459: - goto st459 - case 460: - goto st460 - case 461: - goto st461 - case 462: - goto st462 - case 463: - goto st463 - case 464: - goto st464 - case 465: - goto st465 - case 466: - goto st466 - case 467: - goto st467 - case 468: - goto st468 - case 469: - goto st469 - case 470: - goto st470 - case 471: - goto st471 - case 472: - goto st472 - case 473: - goto st473 - case 474: - goto st474 - case 475: - goto st475 - case 476: - goto st476 - case 106: - goto st106 - case 107: - goto st107 - case 108: - goto st108 - case 109: - goto st109 - case 110: - goto st110 - case 477: - goto st477 - case 111: - goto st111 - case 478: - goto st478 - case 479: - goto st479 - case 112: - goto st112 - case 480: - goto st480 - case 481: - goto st481 - case 482: - goto st482 - case 483: - goto st483 - case 484: - goto st484 - case 485: - goto st485 - case 486: - goto st486 - case 487: - goto st487 - case 488: - goto st488 - case 113: - goto st113 - case 114: - goto st114 - case 115: - goto st115 - case 489: - goto st489 - case 116: - goto st116 - case 117: - goto st117 - case 118: - goto st118 - case 490: - goto st490 - case 119: - goto st119 - case 120: - goto st120 - case 491: - goto st491 - case 492: - goto st492 - case 121: - goto st121 - case 122: - goto st122 - case 123: - goto st123 - case 124: - goto st124 - case 493: - goto st493 - case 494: - goto st494 - case 495: - goto st495 - case 125: - goto st125 - case 496: - goto st496 - case 497: - goto st497 - case 498: - goto st498 - case 499: - goto st499 - case 500: - goto st500 - case 501: - goto st501 - case 502: - goto st502 - case 503: - goto st503 - case 504: - goto st504 - case 505: - goto st505 - case 506: - goto st506 - case 507: - goto st507 - case 508: - goto st508 - case 509: - goto st509 - case 510: - goto st510 - case 511: - goto st511 - case 512: - goto st512 - case 513: - goto st513 - case 514: - goto st514 - case 515: - goto st515 - case 126: - goto st126 - case 127: - goto st127 - case 516: - goto st516 - case 517: - goto st517 - case 518: - goto st518 - case 519: - goto st519 - case 520: - goto st520 - case 521: - goto st521 - case 522: - goto st522 - case 523: - goto st523 - case 524: - goto st524 - case 128: - goto st128 - case 129: - goto st129 - case 130: - goto st130 - case 525: - goto st525 - case 131: - goto st131 - case 132: - goto st132 - case 133: - goto st133 - case 526: - goto st526 - case 134: - goto st134 - case 135: - goto st135 - case 527: - goto st527 - case 528: - goto st528 - case 136: - goto st136 - case 137: - goto st137 - case 138: - goto st138 - case 529: - goto st529 - case 530: - goto st530 - case 139: - goto st139 - case 531: - goto st531 - case 140: - goto st140 - case 532: - goto st532 - case 533: - goto st533 - case 534: - goto st534 - case 535: - goto st535 - case 536: - goto st536 - case 537: - goto st537 - case 538: - goto st538 - case 539: - goto st539 - case 141: - goto st141 - case 142: - goto st142 - case 143: - goto st143 - case 540: - goto st540 - case 144: - goto st144 - case 145: - goto st145 - case 146: - goto st146 - case 541: - goto st541 - case 147: - goto st147 - case 148: - goto st148 - case 542: - goto st542 - case 543: - goto st543 - case 544: - goto st544 - case 545: - goto st545 - case 546: - goto st546 - case 547: - goto st547 - case 548: - goto st548 - case 549: - goto st549 - case 550: - goto st550 - case 551: - goto st551 - case 552: - goto st552 - case 553: - goto st553 - case 554: - goto st554 - case 555: - goto st555 - case 556: - goto st556 - case 557: - goto st557 - case 558: - goto st558 - case 559: - goto st559 - case 560: - goto st560 - case 561: - goto st561 - case 149: - goto st149 - case 150: - goto st150 - case 562: - goto st562 - case 563: - goto st563 - case 564: - goto st564 - case 151: - goto st151 - case 565: - goto st565 - case 566: - goto st566 - case 152: - goto st152 - case 567: - goto st567 - case 568: - goto st568 - case 569: - goto st569 - case 570: - goto st570 - case 571: - goto st571 - case 572: - goto st572 - case 573: - goto st573 - case 574: - goto st574 - case 575: - goto st575 - case 576: - goto st576 - case 577: - goto st577 - case 578: - goto st578 - case 579: - goto st579 - case 580: - goto st580 - case 581: - goto st581 - case 582: - goto st582 - case 583: - goto st583 - case 584: - goto st584 - case 153: - goto st153 - case 154: - goto st154 - case 585: - goto st585 - case 155: - goto st155 - case 586: - goto st586 - case 587: - goto st587 - case 588: - goto st588 - case 589: - goto st589 - case 590: - goto st590 - case 591: - goto st591 - case 592: - goto st592 - case 593: - goto st593 - case 156: - goto st156 - case 157: - goto st157 - case 158: - goto st158 - case 594: - goto st594 - case 159: - goto st159 - case 160: - goto st160 - case 161: - goto st161 - case 595: - goto st595 - case 162: - goto st162 - case 163: - goto st163 - case 596: - goto st596 - case 597: - goto st597 - case 164: - goto st164 - case 165: - goto st165 - case 166: - goto st166 - case 167: - goto st167 - case 168: - goto st168 - case 169: - goto st169 - case 598: - goto st598 - case 599: - goto st599 - case 600: - goto st600 - case 601: - goto st601 - case 602: - goto st602 - case 603: - goto st603 - case 604: - goto st604 - case 605: - goto st605 - case 606: - goto st606 - case 607: - goto st607 - case 608: - goto st608 - case 609: - goto st609 - case 610: - goto st610 - case 611: - goto st611 - case 612: - goto st612 - case 613: - goto st613 - case 614: - goto st614 - case 615: - goto st615 - case 616: - goto st616 - case 170: - goto st170 - case 171: - goto st171 - case 172: - goto st172 - case 617: - goto st617 - case 618: - goto st618 - case 619: - goto st619 - case 173: - goto st173 - case 620: - goto st620 - case 621: - goto st621 - case 174: - goto st174 - case 622: - goto st622 - case 623: - goto st623 - case 624: - goto st624 - case 625: - goto st625 - case 626: - goto st626 - case 175: - goto st175 - case 176: - goto st176 - case 177: - goto st177 - case 627: - goto st627 - case 178: - goto st178 - case 179: - goto st179 - case 180: - goto st180 - case 628: - goto st628 - case 181: - goto st181 - case 182: - goto st182 - case 629: - goto st629 - case 630: - goto st630 - case 183: - goto st183 - case 631: - goto st631 - case 632: - goto st632 - case 633: - goto st633 - case 184: - goto st184 - case 185: - goto st185 - case 186: - goto st186 - case 634: - goto st634 - case 187: - goto st187 - case 188: - goto st188 - case 189: - goto st189 - case 635: - goto st635 - case 190: - goto st190 - case 191: - goto st191 - case 636: - goto st636 - case 637: - goto st637 - case 192: - goto st192 - case 193: - goto st193 - case 194: - goto st194 - case 638: - goto st638 - case 195: - goto st195 - case 196: - goto st196 - case 639: - goto st639 - case 640: - goto st640 - case 641: - goto st641 - case 642: - goto st642 - case 643: - goto st643 - case 644: - goto st644 - case 645: - goto st645 - case 646: - goto st646 - case 197: - goto st197 - case 198: - goto st198 - case 199: - goto st199 - case 647: - goto st647 - case 200: - goto st200 - case 201: - goto st201 - case 202: - goto st202 - case 648: - goto st648 - case 203: - goto st203 - case 204: - goto st204 - case 649: - goto st649 - case 650: - goto st650 - case 205: - goto st205 - case 206: - goto st206 - case 207: - goto st207 - case 651: - goto st651 - case 652: - goto st652 - case 653: - goto st653 - case 654: - goto st654 - case 655: - goto st655 - case 656: - goto st656 - case 657: - goto st657 - case 658: - goto st658 - case 659: - goto st659 - case 660: - goto st660 - case 661: - goto st661 - case 662: - goto st662 - case 663: - goto st663 - case 664: - goto st664 - case 665: - goto st665 - case 666: - goto st666 - case 667: - goto st667 - case 668: - goto st668 - case 669: - goto st669 - case 208: - goto st208 - case 209: - goto st209 - case 210: - goto st210 - case 211: - goto st211 - case 212: - goto st212 - case 670: - goto st670 - case 213: - goto st213 - case 214: - goto st214 - case 671: - goto st671 - case 672: - goto st672 - case 673: - goto st673 - case 674: - goto st674 - case 675: - goto st675 - case 676: - goto st676 - case 677: - goto st677 - case 678: - goto st678 - case 679: - goto st679 - case 215: - goto st215 - case 216: - goto st216 - case 217: - goto st217 - case 680: - goto st680 - case 218: - goto st218 - case 219: - goto st219 - case 220: - goto st220 - case 681: - goto st681 - case 221: - goto st221 - case 222: - goto st222 - case 682: - goto st682 - case 683: - goto st683 - case 223: - goto st223 - case 224: - goto st224 - case 225: - goto st225 - case 684: - goto st684 - case 226: - goto st226 - case 227: - goto st227 - case 685: - goto st685 - case 686: - goto st686 - case 687: - goto st687 - case 688: - goto st688 - case 689: - goto st689 - case 690: - goto st690 - case 691: - goto st691 - case 692: - goto st692 - case 228: - goto st228 - case 229: - goto st229 - case 230: - goto st230 - case 693: - goto st693 - case 231: - goto st231 - case 232: - goto st232 - case 694: - goto st694 - case 695: - goto st695 - case 696: - goto st696 - case 697: - goto st697 - case 698: - goto st698 - case 699: - goto st699 - case 700: - goto st700 - case 701: - goto st701 - case 233: - goto st233 - case 234: - goto st234 - case 235: - goto st235 - case 702: - goto st702 - case 236: - goto st236 - case 237: - goto st237 - case 238: - goto st238 - case 703: - goto st703 - case 239: - goto st239 - case 240: - goto st240 - case 704: - goto st704 - case 705: - goto st705 - case 241: - goto st241 - case 242: - goto st242 - case 243: - goto st243 - case 706: - goto st706 - case 707: - goto st707 - case 708: - goto st708 - case 709: - goto st709 - case 710: - goto st710 - case 711: - goto st711 - case 712: - goto st712 - case 713: - goto st713 - case 714: - goto st714 - case 715: - goto st715 - case 716: - goto st716 - case 717: - goto st717 - case 718: - goto st718 - case 719: - goto st719 - case 720: - goto st720 - case 721: - goto st721 - case 722: - goto st722 - case 723: - goto st723 - case 724: - goto st724 - case 244: - goto st244 - case 245: - goto st245 - case 725: - goto st725 - case 246: - goto st246 - case 247: - goto st247 - case 726: - goto st726 - case 727: - goto st727 - case 728: - goto st728 - case 729: - goto st729 - case 730: - goto st730 - case 731: - goto st731 - case 732: - goto st732 - case 733: - goto st733 - case 248: - goto st248 - case 249: - goto st249 - case 250: - goto st250 - case 734: - goto st734 - case 251: - goto st251 - case 252: - goto st252 - case 253: - goto st253 - case 735: - goto st735 - case 254: - goto st254 - case 255: - goto st255 - case 736: - goto st736 - case 737: - goto st737 - case 256: - goto st256 - case 257: - goto st257 - case 738: - goto st738 - case 260: - goto st260 - case 740: - goto st740 - case 741: - goto st741 - case 261: - goto st261 - case 262: - goto st262 - case 263: - goto st263 - case 264: - goto st264 - case 742: - goto st742 - case 265: - goto st265 - case 743: - goto st743 - case 266: - goto st266 - case 267: - goto st267 - case 268: - goto st268 - case 739: - goto st739 - case 258: - goto st258 - case 259: - goto st259 + case 43: + goto st43 + case 44: + goto st44 + case 45: + goto st45 + case 85: + goto st85 + case 35: + goto st35 + case 36: + goto st36 } if ( m.p)++; ( m.p) == ( m.pe) { @@ -1668,8 +360,8 @@ _again: } _resume: switch ( m.cs) { - case 269: - goto st_case_269 + case 46: + goto st_case_46 case 1: goto st_case_1 case 2: @@ -1684,12 +376,12 @@ _resume: goto st_case_5 case 6: goto st_case_6 - case 270: - goto st_case_270 - case 271: - goto st_case_271 - case 272: - goto st_case_272 + case 47: + goto st_case_47 + case 48: + goto st_case_48 + case 49: + goto st_case_49 case 7: goto st_case_7 case 8: @@ -1698,6 +390,46 @@ _resume: goto st_case_9 case 10: goto st_case_10 + case 50: + goto st_case_50 + case 51: + goto st_case_51 + case 52: + goto st_case_52 + case 53: + goto st_case_53 + case 54: + goto st_case_54 + case 55: + goto st_case_55 + case 56: + goto st_case_56 + case 57: + goto st_case_57 + case 58: + goto st_case_58 + case 59: + goto st_case_59 + case 60: + goto st_case_60 + case 61: + goto st_case_61 + case 62: + goto st_case_62 + case 63: + goto st_case_63 + case 64: + goto st_case_64 + case 65: + goto st_case_65 + case 66: + goto st_case_66 + case 67: + goto st_case_67 + case 68: + goto st_case_68 + case 69: + goto st_case_69 case 11: goto st_case_11 case 12: @@ -1708,26 +440,54 @@ _resume: goto st_case_14 case 15: goto st_case_15 + case 70: + goto st_case_70 case 16: goto st_case_16 case 17: goto st_case_17 + case 71: + goto st_case_71 + case 72: + goto st_case_72 + case 73: + goto st_case_73 + case 74: + goto st_case_74 + case 75: + goto st_case_75 + case 76: + goto st_case_76 + case 77: + goto st_case_77 + case 78: + goto st_case_78 + case 79: + goto st_case_79 case 18: goto st_case_18 case 19: goto st_case_19 case 20: goto st_case_20 + case 80: + goto st_case_80 case 21: goto st_case_21 case 22: goto st_case_22 case 23: goto st_case_23 + case 81: + goto st_case_81 case 24: goto st_case_24 case 25: goto st_case_25 + case 82: + goto st_case_82 + case 83: + goto st_case_83 case 26: goto st_case_26 case 27: @@ -1740,70 +500,20 @@ _resume: goto st_case_30 case 31: goto st_case_31 - case 273: - goto st_case_273 - case 274: - goto st_case_274 case 32: goto st_case_32 case 33: goto st_case_33 - case 275: - goto st_case_275 - case 276: - goto st_case_276 - case 277: - goto st_case_277 case 34: goto st_case_34 - case 278: - goto st_case_278 - case 279: - goto st_case_279 - case 280: - goto st_case_280 - case 281: - goto st_case_281 - case 282: - goto st_case_282 - case 283: - goto st_case_283 - case 284: - goto st_case_284 - case 285: - goto st_case_285 - case 286: - goto st_case_286 - case 287: - goto st_case_287 - case 288: - goto st_case_288 - case 289: - goto st_case_289 - case 290: - goto st_case_290 - case 291: - goto st_case_291 - case 292: - goto st_case_292 - case 293: - goto st_case_293 - case 294: - goto st_case_294 - case 295: - goto st_case_295 - case 35: - goto st_case_35 - case 36: - goto st_case_36 - case 296: - goto st_case_296 - case 297: - goto st_case_297 - case 298: - goto st_case_298 + case 84: + goto st_case_84 case 37: goto st_case_37 + case 86: + goto st_case_86 + case 87: + goto st_case_87 case 38: goto st_case_38 case 39: @@ -1812,1384 +522,56 @@ _resume: goto st_case_40 case 41: goto st_case_41 - case 299: - goto st_case_299 - case 300: - goto st_case_300 - case 301: - goto st_case_301 - case 302: - goto st_case_302 + case 88: + goto st_case_88 case 42: goto st_case_42 - case 303: - goto st_case_303 - case 304: - goto st_case_304 - case 305: - goto st_case_305 - case 306: - goto st_case_306 - case 307: - goto st_case_307 - case 308: - goto st_case_308 - case 309: - goto st_case_309 - case 310: - goto st_case_310 - case 311: - goto st_case_311 - case 312: - goto st_case_312 - case 313: - goto st_case_313 - case 314: - goto st_case_314 - case 315: - goto st_case_315 - case 316: - goto st_case_316 - case 317: - goto st_case_317 - case 318: - goto st_case_318 - case 319: - goto st_case_319 - case 320: - goto st_case_320 - case 321: - goto st_case_321 - case 322: - goto st_case_322 - case 323: - goto st_case_323 - case 324: - goto st_case_324 + case 89: + goto st_case_89 case 43: goto st_case_43 case 44: goto st_case_44 case 45: goto st_case_45 - case 46: - goto st_case_46 - case 47: - goto st_case_47 - case 48: - goto st_case_48 - case 49: - goto st_case_49 - case 50: - goto st_case_50 - case 51: - goto st_case_51 - case 52: - goto st_case_52 - case 325: - goto st_case_325 - case 326: - goto st_case_326 - case 327: - goto st_case_327 - case 53: - goto st_case_53 - case 54: - goto st_case_54 - case 55: - goto st_case_55 - case 56: - goto st_case_56 - case 57: - goto st_case_57 - case 58: - goto st_case_58 - case 328: - goto st_case_328 - case 329: - goto st_case_329 - case 59: - goto st_case_59 - case 330: - goto st_case_330 - case 331: - goto st_case_331 - case 332: - goto st_case_332 - case 333: - goto st_case_333 - case 334: - goto st_case_334 - case 335: - goto st_case_335 - case 336: - goto st_case_336 - case 337: - goto st_case_337 - case 338: - goto st_case_338 - case 339: - goto st_case_339 - case 340: - goto st_case_340 - case 341: - goto st_case_341 - case 342: - goto st_case_342 - case 343: - goto st_case_343 - case 344: - goto st_case_344 - case 345: - goto st_case_345 - case 346: - goto st_case_346 - case 347: - goto st_case_347 - case 348: - goto st_case_348 - case 349: - goto st_case_349 - case 60: - goto st_case_60 - case 350: - goto st_case_350 - case 351: - goto st_case_351 - case 352: - goto st_case_352 - case 61: - goto st_case_61 - case 353: - goto st_case_353 - case 354: - goto st_case_354 - case 355: - goto st_case_355 - case 356: - goto st_case_356 - case 357: - goto st_case_357 - case 358: - goto st_case_358 - case 359: - goto st_case_359 - case 360: - goto st_case_360 - case 361: - goto st_case_361 - case 362: - goto st_case_362 - case 363: - goto st_case_363 - case 364: - goto st_case_364 - case 365: - goto st_case_365 - case 366: - goto st_case_366 - case 367: - goto st_case_367 - case 368: - goto st_case_368 - case 369: - goto st_case_369 - case 370: - goto st_case_370 - case 371: - goto st_case_371 - case 372: - goto st_case_372 - case 62: - goto st_case_62 - case 63: - goto st_case_63 - case 64: - goto st_case_64 - case 65: - goto st_case_65 - case 66: - goto st_case_66 - case 373: - goto st_case_373 - case 67: - goto st_case_67 - case 68: - goto st_case_68 - case 69: - goto st_case_69 - case 70: - goto st_case_70 - case 71: - goto st_case_71 - case 374: - goto st_case_374 - case 375: - goto st_case_375 - case 376: - goto st_case_376 - case 72: - goto st_case_72 - case 73: - goto st_case_73 - case 74: - goto st_case_74 - case 377: - goto st_case_377 - case 378: - goto st_case_378 - case 379: - goto st_case_379 - case 75: - goto st_case_75 - case 380: - goto st_case_380 - case 381: - goto st_case_381 - case 382: - goto st_case_382 - case 383: - goto st_case_383 - case 384: - goto st_case_384 - case 385: - goto st_case_385 - case 386: - goto st_case_386 - case 387: - goto st_case_387 - case 388: - goto st_case_388 - case 389: - goto st_case_389 - case 390: - goto st_case_390 - case 391: - goto st_case_391 - case 392: - goto st_case_392 - case 393: - goto st_case_393 - case 394: - goto st_case_394 - case 395: - goto st_case_395 - case 396: - goto st_case_396 - case 397: - goto st_case_397 - case 398: - goto st_case_398 - case 399: - goto st_case_399 - case 76: - goto st_case_76 - case 77: - goto st_case_77 - case 78: - goto st_case_78 - case 79: - goto st_case_79 - case 80: - goto st_case_80 - case 81: - goto st_case_81 - case 82: - goto st_case_82 - case 83: - goto st_case_83 - case 84: - goto st_case_84 case 85: goto st_case_85 - case 86: - goto st_case_86 - case 87: - goto st_case_87 - case 88: - goto st_case_88 - case 89: - goto st_case_89 - case 400: - goto st_case_400 - case 401: - goto st_case_401 - case 402: - goto st_case_402 - case 403: - goto st_case_403 - case 90: - goto st_case_90 - case 91: - goto st_case_91 - case 92: - goto st_case_92 - case 93: - goto st_case_93 - case 404: - goto st_case_404 - case 405: - goto st_case_405 - case 94: - goto st_case_94 - case 95: - goto st_case_95 - case 406: - goto st_case_406 - case 96: - goto st_case_96 - case 97: - goto st_case_97 - case 407: - goto st_case_407 - case 408: - goto st_case_408 - case 98: - goto st_case_98 - case 409: - goto st_case_409 - case 410: - goto st_case_410 - case 99: - goto st_case_99 - case 100: - goto st_case_100 - case 411: - goto st_case_411 - case 412: - goto st_case_412 - case 413: - goto st_case_413 - case 414: - goto st_case_414 - case 415: - goto st_case_415 - case 416: - goto st_case_416 - case 417: - goto st_case_417 - case 418: - goto st_case_418 - case 419: - goto st_case_419 - case 420: - goto st_case_420 - case 421: - goto st_case_421 - case 422: - goto st_case_422 - case 423: - goto st_case_423 - case 424: - goto st_case_424 - case 425: - goto st_case_425 - case 426: - goto st_case_426 - case 427: - goto st_case_427 - case 428: - goto st_case_428 - case 101: - goto st_case_101 - case 429: - goto st_case_429 - case 430: - goto st_case_430 - case 431: - goto st_case_431 - case 102: - goto st_case_102 - case 103: - goto st_case_103 - case 432: - goto st_case_432 - case 433: - goto st_case_433 - case 434: - goto st_case_434 - case 104: - goto st_case_104 - case 435: - goto st_case_435 - case 436: - goto st_case_436 - case 437: - goto st_case_437 - case 438: - goto st_case_438 - case 439: - goto st_case_439 - case 440: - goto st_case_440 - case 441: - goto st_case_441 - case 442: - goto st_case_442 - case 443: - goto st_case_443 - case 444: - goto st_case_444 - case 445: - goto st_case_445 - case 446: - goto st_case_446 - case 447: - goto st_case_447 - case 448: - goto st_case_448 - case 449: - goto st_case_449 - case 450: - goto st_case_450 - case 451: - goto st_case_451 - case 452: - goto st_case_452 - case 453: - goto st_case_453 - case 454: - goto st_case_454 - case 105: - goto st_case_105 - case 455: - goto st_case_455 - case 456: - goto st_case_456 - case 457: - goto st_case_457 - case 458: - goto st_case_458 - case 459: - goto st_case_459 - case 460: - goto st_case_460 - case 461: - goto st_case_461 - case 462: - goto st_case_462 - case 463: - goto st_case_463 - case 464: - goto st_case_464 - case 465: - goto st_case_465 - case 466: - goto st_case_466 - case 467: - goto st_case_467 - case 468: - goto st_case_468 - case 469: - goto st_case_469 - case 470: - goto st_case_470 - case 471: - goto st_case_471 - case 472: - goto st_case_472 - case 473: - goto st_case_473 - case 474: - goto st_case_474 - case 475: - goto st_case_475 - case 476: - goto st_case_476 - case 106: - goto st_case_106 - case 107: - goto st_case_107 - case 108: - goto st_case_108 - case 109: - goto st_case_109 - case 110: - goto st_case_110 - case 477: - goto st_case_477 - case 111: - goto st_case_111 - case 478: - goto st_case_478 - case 479: - goto st_case_479 - case 112: - goto st_case_112 - case 480: - goto st_case_480 - case 481: - goto st_case_481 - case 482: - goto st_case_482 - case 483: - goto st_case_483 - case 484: - goto st_case_484 - case 485: - goto st_case_485 - case 486: - goto st_case_486 - case 487: - goto st_case_487 - case 488: - goto st_case_488 - case 113: - goto st_case_113 - case 114: - goto st_case_114 - case 115: - goto st_case_115 - case 489: - goto st_case_489 - case 116: - goto st_case_116 - case 117: - goto st_case_117 - case 118: - goto st_case_118 - case 490: - goto st_case_490 - case 119: - goto st_case_119 - case 120: - goto st_case_120 - case 491: - goto st_case_491 - case 492: - goto st_case_492 - case 121: - goto st_case_121 - case 122: - goto st_case_122 - case 123: - goto st_case_123 - case 124: - goto st_case_124 - case 493: - goto st_case_493 - case 494: - goto st_case_494 - case 495: - goto st_case_495 - case 125: - goto st_case_125 - case 496: - goto st_case_496 - case 497: - goto st_case_497 - case 498: - goto st_case_498 - case 499: - goto st_case_499 - case 500: - goto st_case_500 - case 501: - goto st_case_501 - case 502: - goto st_case_502 - case 503: - goto st_case_503 - case 504: - goto st_case_504 - case 505: - goto st_case_505 - case 506: - goto st_case_506 - case 507: - goto st_case_507 - case 508: - goto st_case_508 - case 509: - goto st_case_509 - case 510: - goto st_case_510 - case 511: - goto st_case_511 - case 512: - goto st_case_512 - case 513: - goto st_case_513 - case 514: - goto st_case_514 - case 515: - goto st_case_515 - case 126: - goto st_case_126 - case 127: - goto st_case_127 - case 516: - goto st_case_516 - case 517: - goto st_case_517 - case 518: - goto st_case_518 - case 519: - goto st_case_519 - case 520: - goto st_case_520 - case 521: - goto st_case_521 - case 522: - goto st_case_522 - case 523: - goto st_case_523 - case 524: - goto st_case_524 - case 128: - goto st_case_128 - case 129: - goto st_case_129 - case 130: - goto st_case_130 - case 525: - goto st_case_525 - case 131: - goto st_case_131 - case 132: - goto st_case_132 - case 133: - goto st_case_133 - case 526: - goto st_case_526 - case 134: - goto st_case_134 - case 135: - goto st_case_135 - case 527: - goto st_case_527 - case 528: - goto st_case_528 - case 136: - goto st_case_136 - case 137: - goto st_case_137 - case 138: - goto st_case_138 - case 529: - goto st_case_529 - case 530: - goto st_case_530 - case 139: - goto st_case_139 - case 531: - goto st_case_531 - case 140: - goto st_case_140 - case 532: - goto st_case_532 - case 533: - goto st_case_533 - case 534: - goto st_case_534 - case 535: - goto st_case_535 - case 536: - goto st_case_536 - case 537: - goto st_case_537 - case 538: - goto st_case_538 - case 539: - goto st_case_539 - case 141: - goto st_case_141 - case 142: - goto st_case_142 - case 143: - goto st_case_143 - case 540: - goto st_case_540 - case 144: - goto st_case_144 - case 145: - goto st_case_145 - case 146: - goto st_case_146 - case 541: - goto st_case_541 - case 147: - goto st_case_147 - case 148: - goto st_case_148 - case 542: - goto st_case_542 - case 543: - goto st_case_543 - case 544: - goto st_case_544 - case 545: - goto st_case_545 - case 546: - goto st_case_546 - case 547: - goto st_case_547 - case 548: - goto st_case_548 - case 549: - goto st_case_549 - case 550: - goto st_case_550 - case 551: - goto st_case_551 - case 552: - goto st_case_552 - case 553: - goto st_case_553 - case 554: - goto st_case_554 - case 555: - goto st_case_555 - case 556: - goto st_case_556 - case 557: - goto st_case_557 - case 558: - goto st_case_558 - case 559: - goto st_case_559 - case 560: - goto st_case_560 - case 561: - goto st_case_561 - case 149: - goto st_case_149 - case 150: - goto st_case_150 - case 562: - goto st_case_562 - case 563: - goto st_case_563 - case 564: - goto st_case_564 - case 151: - goto st_case_151 - case 565: - goto st_case_565 - case 566: - goto st_case_566 - case 152: - goto st_case_152 - case 567: - goto st_case_567 - case 568: - goto st_case_568 - case 569: - goto st_case_569 - case 570: - goto st_case_570 - case 571: - goto st_case_571 - case 572: - goto st_case_572 - case 573: - goto st_case_573 - case 574: - goto st_case_574 - case 575: - goto st_case_575 - case 576: - goto st_case_576 - case 577: - goto st_case_577 - case 578: - goto st_case_578 - case 579: - goto st_case_579 - case 580: - goto st_case_580 - case 581: - goto st_case_581 - case 582: - goto st_case_582 - case 583: - goto st_case_583 - case 584: - goto st_case_584 - case 153: - goto st_case_153 - case 154: - goto st_case_154 - case 585: - goto st_case_585 - case 155: - goto st_case_155 - case 586: - goto st_case_586 - case 587: - goto st_case_587 - case 588: - goto st_case_588 - case 589: - goto st_case_589 - case 590: - goto st_case_590 - case 591: - goto st_case_591 - case 592: - goto st_case_592 - case 593: - goto st_case_593 - case 156: - goto st_case_156 - case 157: - goto st_case_157 - case 158: - goto st_case_158 - case 594: - goto st_case_594 - case 159: - goto st_case_159 - case 160: - goto st_case_160 - case 161: - goto st_case_161 - case 595: - goto st_case_595 - case 162: - goto st_case_162 - case 163: - goto st_case_163 - case 596: - goto st_case_596 - case 597: - goto st_case_597 - case 164: - goto st_case_164 - case 165: - goto st_case_165 - case 166: - goto st_case_166 - case 167: - goto st_case_167 - case 168: - goto st_case_168 - case 169: - goto st_case_169 - case 598: - goto st_case_598 - case 599: - goto st_case_599 - case 600: - goto st_case_600 - case 601: - goto st_case_601 - case 602: - goto st_case_602 - case 603: - goto st_case_603 - case 604: - goto st_case_604 - case 605: - goto st_case_605 - case 606: - goto st_case_606 - case 607: - goto st_case_607 - case 608: - goto st_case_608 - case 609: - goto st_case_609 - case 610: - goto st_case_610 - case 611: - goto st_case_611 - case 612: - goto st_case_612 - case 613: - goto st_case_613 - case 614: - goto st_case_614 - case 615: - goto st_case_615 - case 616: - goto st_case_616 - case 170: - goto st_case_170 - case 171: - goto st_case_171 - case 172: - goto st_case_172 - case 617: - goto st_case_617 - case 618: - goto st_case_618 - case 619: - goto st_case_619 - case 173: - goto st_case_173 - case 620: - goto st_case_620 - case 621: - goto st_case_621 - case 174: - goto st_case_174 - case 622: - goto st_case_622 - case 623: - goto st_case_623 - case 624: - goto st_case_624 - case 625: - goto st_case_625 - case 626: - goto st_case_626 - case 175: - goto st_case_175 - case 176: - goto st_case_176 - case 177: - goto st_case_177 - case 627: - goto st_case_627 - case 178: - goto st_case_178 - case 179: - goto st_case_179 - case 180: - goto st_case_180 - case 628: - goto st_case_628 - case 181: - goto st_case_181 - case 182: - goto st_case_182 - case 629: - goto st_case_629 - case 630: - goto st_case_630 - case 183: - goto st_case_183 - case 631: - goto st_case_631 - case 632: - goto st_case_632 - case 633: - goto st_case_633 - case 184: - goto st_case_184 - case 185: - goto st_case_185 - case 186: - goto st_case_186 - case 634: - goto st_case_634 - case 187: - goto st_case_187 - case 188: - goto st_case_188 - case 189: - goto st_case_189 - case 635: - goto st_case_635 - case 190: - goto st_case_190 - case 191: - goto st_case_191 - case 636: - goto st_case_636 - case 637: - goto st_case_637 - case 192: - goto st_case_192 - case 193: - goto st_case_193 - case 194: - goto st_case_194 - case 638: - goto st_case_638 - case 195: - goto st_case_195 - case 196: - goto st_case_196 - case 639: - goto st_case_639 - case 640: - goto st_case_640 - case 641: - goto st_case_641 - case 642: - goto st_case_642 - case 643: - goto st_case_643 - case 644: - goto st_case_644 - case 645: - goto st_case_645 - case 646: - goto st_case_646 - case 197: - goto st_case_197 - case 198: - goto st_case_198 - case 199: - goto st_case_199 - case 647: - goto st_case_647 - case 200: - goto st_case_200 - case 201: - goto st_case_201 - case 202: - goto st_case_202 - case 648: - goto st_case_648 - case 203: - goto st_case_203 - case 204: - goto st_case_204 - case 649: - goto st_case_649 - case 650: - goto st_case_650 - case 205: - goto st_case_205 - case 206: - goto st_case_206 - case 207: - goto st_case_207 - case 651: - goto st_case_651 - case 652: - goto st_case_652 - case 653: - goto st_case_653 - case 654: - goto st_case_654 - case 655: - goto st_case_655 - case 656: - goto st_case_656 - case 657: - goto st_case_657 - case 658: - goto st_case_658 - case 659: - goto st_case_659 - case 660: - goto st_case_660 - case 661: - goto st_case_661 - case 662: - goto st_case_662 - case 663: - goto st_case_663 - case 664: - goto st_case_664 - case 665: - goto st_case_665 - case 666: - goto st_case_666 - case 667: - goto st_case_667 - case 668: - goto st_case_668 - case 669: - goto st_case_669 - case 208: - goto st_case_208 - case 209: - goto st_case_209 - case 210: - goto st_case_210 - case 211: - goto st_case_211 - case 212: - goto st_case_212 - case 670: - goto st_case_670 - case 213: - goto st_case_213 - case 214: - goto st_case_214 - case 671: - goto st_case_671 - case 672: - goto st_case_672 - case 673: - goto st_case_673 - case 674: - goto st_case_674 - case 675: - goto st_case_675 - case 676: - goto st_case_676 - case 677: - goto st_case_677 - case 678: - goto st_case_678 - case 679: - goto st_case_679 - case 215: - goto st_case_215 - case 216: - goto st_case_216 - case 217: - goto st_case_217 - case 680: - goto st_case_680 - case 218: - goto st_case_218 - case 219: - goto st_case_219 - case 220: - goto st_case_220 - case 681: - goto st_case_681 - case 221: - goto st_case_221 - case 222: - goto st_case_222 - case 682: - goto st_case_682 - case 683: - goto st_case_683 - case 223: - goto st_case_223 - case 224: - goto st_case_224 - case 225: - goto st_case_225 - case 684: - goto st_case_684 - case 226: - goto st_case_226 - case 227: - goto st_case_227 - case 685: - goto st_case_685 - case 686: - goto st_case_686 - case 687: - goto st_case_687 - case 688: - goto st_case_688 - case 689: - goto st_case_689 - case 690: - goto st_case_690 - case 691: - goto st_case_691 - case 692: - goto st_case_692 - case 228: - goto st_case_228 - case 229: - goto st_case_229 - case 230: - goto st_case_230 - case 693: - goto st_case_693 - case 231: - goto st_case_231 - case 232: - goto st_case_232 - case 694: - goto st_case_694 - case 695: - goto st_case_695 - case 696: - goto st_case_696 - case 697: - goto st_case_697 - case 698: - goto st_case_698 - case 699: - goto st_case_699 - case 700: - goto st_case_700 - case 701: - goto st_case_701 - case 233: - goto st_case_233 - case 234: - goto st_case_234 - case 235: - goto st_case_235 - case 702: - goto st_case_702 - case 236: - goto st_case_236 - case 237: - goto st_case_237 - case 238: - goto st_case_238 - case 703: - goto st_case_703 - case 239: - goto st_case_239 - case 240: - goto st_case_240 - case 704: - goto st_case_704 - case 705: - goto st_case_705 - case 241: - goto st_case_241 - case 242: - goto st_case_242 - case 243: - goto st_case_243 - case 706: - goto st_case_706 - case 707: - goto st_case_707 - case 708: - goto st_case_708 - case 709: - goto st_case_709 - case 710: - goto st_case_710 - case 711: - goto st_case_711 - case 712: - goto st_case_712 - case 713: - goto st_case_713 - case 714: - goto st_case_714 - case 715: - goto st_case_715 - case 716: - goto st_case_716 - case 717: - goto st_case_717 - case 718: - goto st_case_718 - case 719: - goto st_case_719 - case 720: - goto st_case_720 - case 721: - goto st_case_721 - case 722: - goto st_case_722 - case 723: - goto st_case_723 - case 724: - goto st_case_724 - case 244: - goto st_case_244 - case 245: - goto st_case_245 - case 725: - goto st_case_725 - case 246: - goto st_case_246 - case 247: - goto st_case_247 - case 726: - goto st_case_726 - case 727: - goto st_case_727 - case 728: - goto st_case_728 - case 729: - goto st_case_729 - case 730: - goto st_case_730 - case 731: - goto st_case_731 - case 732: - goto st_case_732 - case 733: - goto st_case_733 - case 248: - goto st_case_248 - case 249: - goto st_case_249 - case 250: - goto st_case_250 - case 734: - goto st_case_734 - case 251: - goto st_case_251 - case 252: - goto st_case_252 - case 253: - goto st_case_253 - case 735: - goto st_case_735 - case 254: - goto st_case_254 - case 255: - goto st_case_255 - case 736: - goto st_case_736 - case 737: - goto st_case_737 - case 256: - goto st_case_256 - case 257: - goto st_case_257 - case 738: - goto st_case_738 - case 260: - goto st_case_260 - case 740: - goto st_case_740 - case 741: - goto st_case_741 - case 261: - goto st_case_261 - case 262: - goto st_case_262 - case 263: - goto st_case_263 - case 264: - goto st_case_264 - case 742: - goto st_case_742 - case 265: - goto st_case_265 - case 743: - goto st_case_743 - case 266: - goto st_case_266 - case 267: - goto st_case_267 - case 268: - goto st_case_268 - case 739: - goto st_case_739 - case 258: - goto st_case_258 - case 259: - goto st_case_259 + case 35: + goto st_case_35 + case 36: + goto st_case_36 } goto st_out - st269: + st46: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof269 + goto _test_eof46 } - st_case_269: + st_case_46: switch ( m.data)[( m.p)] { case 10: - goto tr33 - case 11: - goto tr457 + goto tr31 case 13: - goto tr33 + goto tr31 case 32: - goto tr456 + goto tr80 case 35: - goto tr33 + goto tr31 case 44: - goto tr33 + goto tr31 case 92: - goto tr458 + goto tr81 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr456 + goto tr80 } - goto tr455 -tr31: + goto tr79 +tr29: //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto st1 -tr455: +tr79: //line plugins/parsers/influx/machine.go.rl:82 m.beginMetric = true @@ -3204,20 +586,18 @@ tr455: goto _test_eof1 } st_case_1: -//line plugins/parsers/influx/machine.go:3208 +//line plugins/parsers/influx/machine.go:590 switch ( m.data)[( m.p)] { case 10: goto tr2 - case 11: - goto tr3 case 13: goto tr2 case 32: goto tr1 case 44: - goto tr4 + goto tr3 case 92: - goto st94 + goto st8 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 @@ -3231,12 +611,12 @@ tr1: if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again -tr58: +tr56: ( m.cs) = 2 //line plugins/parsers/influx/machine.go.rl:99 @@ -3244,7 +624,7 @@ tr58: if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } @@ -3254,28 +634,26 @@ tr58: goto _test_eof2 } st_case_2: -//line plugins/parsers/influx/machine.go:3258 +//line plugins/parsers/influx/machine.go:638 switch ( m.data)[( m.p)] { case 10: - goto tr8 - case 11: - goto tr9 + goto tr7 case 13: - goto tr8 + goto tr7 case 32: goto st2 case 44: - goto tr8 + goto tr7 case 61: - goto tr8 + goto tr7 case 92: - goto tr10 + goto tr8 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto st2 } - goto tr6 -tr6: + goto tr5 +tr5: //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -3286,24 +664,19 @@ tr6: goto _test_eof3 } st_case_3: -//line plugins/parsers/influx/machine.go:3290 +//line plugins/parsers/influx/machine.go:668 switch ( m.data)[( m.p)] { case 32: - goto tr8 + goto tr7 case 44: - goto tr8 + goto tr7 case 61: - goto tr12 + goto tr10 case 92: - goto st34 + goto st12 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr7 } goto st3 tr2: @@ -3313,206 +686,76 @@ tr2: err = ErrTagParse ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } goto _again -tr8: +tr7: ( m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr33: - ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:32 - - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr37: - ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:32 - - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } goto _again -tr41: +tr31: ( m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:32 err = ErrNameParse ( m.p)-- - ( m.cs) = 257; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr45: - ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr103: - ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - - goto _again -tr130: - ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } goto _again -tr196: +tr35: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - //line plugins/parsers/influx/machine.go.rl:53 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } goto _again -tr421: +tr82: ( m.cs) = 0 -//line plugins/parsers/influx/machine.go.rl:32 - - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - //line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } - goto _again -tr424: - ( m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:53 err = ErrTimestampParse ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } goto _again -tr1053: +tr135: //line plugins/parsers/influx/machine.go.rl:73 ( m.p)-- - {goto st269 } + {goto st46 } goto st0 -//line plugins/parsers/influx/machine.go:3511 +//line plugins/parsers/influx/machine.go:754 st_case_0: st0: ( m.cs) = 0 goto _out -tr12: +tr10: //line plugins/parsers/influx/machine.go.rl:108 m.key = m.text() @@ -3523,29 +766,29 @@ tr12: goto _test_eof4 } st_case_4: -//line plugins/parsers/influx/machine.go:3527 +//line plugins/parsers/influx/machine.go:770 switch ( m.data)[( m.p)] { case 34: goto st5 case 45: - goto tr15 + goto tr13 case 46: - goto tr16 + goto tr14 case 48: - goto tr17 + goto tr15 case 70: - goto tr19 + goto tr17 case 84: - goto tr20 + goto tr18 case 102: - goto tr21 + goto tr19 case 116: - goto tr22 + goto tr20 } if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr18 + goto tr16 } - goto tr8 + goto tr7 st5: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof5 @@ -3553,20 +796,20 @@ tr12: st_case_5: switch ( m.data)[( m.p)] { case 10: - goto tr24 + goto tr22 case 34: - goto tr25 + goto tr23 case 92: - goto tr26 + goto tr24 } - goto tr23 -tr23: + goto tr21 +tr21: //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto st6 -tr24: +tr22: //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -3578,7 +821,7 @@ tr24: m.sol++ // next char will be the first column in the line goto st6 -tr28: +tr26: //line plugins/parsers/influx/machine.go.rl:166 m.lineno++ @@ -3591,18 +834,18 @@ tr28: goto _test_eof6 } st_case_6: -//line plugins/parsers/influx/machine.go:3595 +//line plugins/parsers/influx/machine.go:838 switch ( m.data)[( m.p)] { case 10: - goto tr28 + goto tr26 case 34: - goto tr29 + goto tr27 case 92: - goto st73 + goto st13 } goto st6 -tr25: - ( m.cs) = 270 +tr23: + ( m.cs) = 47 //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -3613,138 +856,138 @@ tr25: if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again -tr29: - ( m.cs) = 270 +tr27: + ( m.cs) = 47 //line plugins/parsers/influx/machine.go.rl:148 err = m.handler.AddString(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again - st270: + st47: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof270 + goto _test_eof47 } - st_case_270: -//line plugins/parsers/influx/machine.go:3640 + st_case_47: +//line plugins/parsers/influx/machine.go:883 switch ( m.data)[( m.p)] { case 10: - goto tr101 + goto tr34 case 13: - goto st32 + goto st9 case 32: - goto st271 + goto st48 case 44: - goto st35 + goto st11 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st271 + goto st48 } - goto tr103 -tr921: - ( m.cs) = 271 + goto tr82 +tr110: + ( m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again -tr1041: - ( m.cs) = 271 +tr117: + ( m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again -tr1044: - ( m.cs) = 271 +tr122: + ( m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again -tr1047: - ( m.cs) = 271 +tr127: + ( m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again - st271: + st48: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof271 + goto _test_eof48 } - st_case_271: -//line plugins/parsers/influx/machine.go:3712 + st_case_48: +//line plugins/parsers/influx/machine.go:955 switch ( m.data)[( m.p)] { case 10: - goto tr101 + goto tr34 case 13: - goto st32 + goto st9 case 32: - goto st271 + goto st48 case 45: - goto tr462 + goto tr86 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr463 + goto tr87 } case ( m.data)[( m.p)] >= 9: - goto st271 + goto st48 } - goto tr424 -tr101: + goto tr35 +tr34: //line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line - goto st272 -tr468: - ( m.cs) = 272 + goto st49 +tr89: + ( m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } @@ -3755,15 +998,15 @@ tr468: m.sol++ // next char will be the first column in the line goto _again -tr730: - ( m.cs) = 272 +tr111: + ( m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } @@ -3774,15 +1017,15 @@ tr730: m.sol++ // next char will be the first column in the line goto _again -tr942: - ( m.cs) = 272 +tr118: + ( m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } @@ -3793,15 +1036,15 @@ tr942: m.sol++ // next char will be the first column in the line goto _again -tr948: - ( m.cs) = 272 +tr123: + ( m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } @@ -3812,15 +1055,15 @@ tr948: m.sol++ // next char will be the first column in the line goto _again -tr954: - ( m.cs) = 272 +tr128: + ( m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } @@ -3831,39 +1074,37 @@ tr954: m.sol++ // next char will be the first column in the line goto _again - st272: + st49: //line plugins/parsers/influx/machine.go.rl:172 m.finishMetric = true - ( m.cs) = 739; + ( m.cs) = 85; {( m.p)++; goto _out } if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof272 + goto _test_eof49 } - st_case_272: -//line plugins/parsers/influx/machine.go:3846 + st_case_49: +//line plugins/parsers/influx/machine.go:1089 switch ( m.data)[( m.p)] { case 10: - goto tr33 - case 11: - goto tr34 + goto tr31 case 13: - goto tr33 + goto tr31 case 32: goto st7 case 35: - goto tr33 + goto tr31 case 44: - goto tr33 + goto tr31 case 92: - goto tr35 + goto tr32 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto st7 } - goto tr31 -tr456: + goto tr29 +tr80: //line plugins/parsers/influx/machine.go.rl:82 m.beginMetric = true @@ -3874,34 +1115,32 @@ tr456: goto _test_eof7 } st_case_7: -//line plugins/parsers/influx/machine.go:3878 +//line plugins/parsers/influx/machine.go:1119 switch ( m.data)[( m.p)] { case 10: - goto tr33 - case 11: - goto tr34 + goto tr31 case 13: - goto tr33 + goto tr31 case 32: goto st7 case 35: - goto tr33 + goto tr31 case 44: - goto tr33 + goto tr31 case 92: - goto tr35 + goto tr32 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto st7 } - goto tr31 -tr34: + goto tr29 +tr32: //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto st8 -tr457: +tr81: //line plugins/parsers/influx/machine.go.rl:82 m.beginMetric = true @@ -3916,2013 +1155,1528 @@ tr457: goto _test_eof8 } st_case_8: -//line plugins/parsers/influx/machine.go:3920 - switch ( m.data)[( m.p)] { - case 10: - goto tr37 - case 11: - goto tr38 - case 13: - goto tr37 - case 32: - goto tr36 - case 35: - goto st1 - case 44: - goto tr4 - case 92: - goto tr35 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr36 +//line plugins/parsers/influx/machine.go:1159 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st0 } - goto tr31 -tr36: + goto st1 +tr90: ( m.cs) = 9 -//line plugins/parsers/influx/machine.go.rl:86 +//line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetMeasurement(m.text()) + err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again - st9: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof9 - } - st_case_9: -//line plugins/parsers/influx/machine.go:3959 - switch ( m.data)[( m.p)] { - case 10: - goto tr41 - case 11: - goto tr42 - case 13: - goto tr41 - case 32: - goto st9 - case 35: - goto tr6 - case 44: - goto tr41 - case 61: - goto tr31 - case 92: - goto tr43 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st9 - } - goto tr39 -tr39: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st10 - st10: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof10 - } - st_case_10: -//line plugins/parsers/influx/machine.go:3993 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr46 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st10 -tr46: - ( m.cs) = 11 -//line plugins/parsers/influx/machine.go.rl:86 +tr112: + ( m.cs) = 9 +//line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.SetMeasurement(m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again -tr49: - ( m.cs) = 11 -//line plugins/parsers/influx/machine.go.rl:86 +tr119: + ( m.cs) = 9 +//line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.SetMeasurement(m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - goto _again - st11: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof11 - } - st_case_11: -//line plugins/parsers/influx/machine.go:4049 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr49 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto tr43 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto tr39 -tr4: - ( m.cs) = 12 -//line plugins/parsers/influx/machine.go.rl:86 +tr124: + ( m.cs) = 9 +//line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.SetMeasurement(m.text()) + err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again -tr60: - ( m.cs) = 12 -//line plugins/parsers/influx/machine.go.rl:99 +tr129: + ( m.cs) = 9 +//line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddTag(m.key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again - st12: + st9: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof12 - } - st_case_12: -//line plugins/parsers/influx/machine.go:4101 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr2 - case 92: - goto tr51 + goto _test_eof9 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 + st_case_9: +//line plugins/parsers/influx/machine.go:1234 + if ( m.data)[( m.p)] == 10 { + goto tr34 } - goto tr50 -tr50: + goto st0 +tr86: //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st13 - st13: + goto st10 + st10: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof13 - } - st_case_13: -//line plugins/parsers/influx/machine.go:4132 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr53 - case 92: - goto st23 + goto _test_eof10 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 + st_case_10: +//line plugins/parsers/influx/machine.go:1250 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st50 } - goto st13 -tr53: -//line plugins/parsers/influx/machine.go.rl:95 + goto tr35 +tr87: +//line plugins/parsers/influx/machine.go.rl:28 - m.key = m.text() + m.pb = m.p - goto st14 - st14: + goto st50 + st50: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof14 + goto _test_eof50 } - st_case_14: -//line plugins/parsers/influx/machine.go:4163 + st_case_50: +//line plugins/parsers/influx/machine.go:1266 switch ( m.data)[( m.p)] { + case 10: + goto tr89 + case 13: + goto tr90 case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr2 - case 92: - goto tr56 + goto tr88 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st52 } case ( m.data)[( m.p)] >= 9: - goto tr2 + goto tr88 } - goto tr55 -tr55: -//line plugins/parsers/influx/machine.go.rl:28 + goto tr35 +tr88: + ( m.cs) = 51 +//line plugins/parsers/influx/machine.go.rl:157 - m.pb = m.p + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- - goto st15 - st15: + ( m.cs) = 34; + {( m.p)++; goto _out } + } + + goto _again + st51: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof15 + goto _test_eof51 } - st_case_15: -//line plugins/parsers/influx/machine.go:4194 + st_case_51: +//line plugins/parsers/influx/machine.go:1302 switch ( m.data)[( m.p)] { case 10: - goto tr2 - case 11: - goto tr59 + goto tr34 case 13: - goto tr2 + goto st9 case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr2 - case 92: - goto st21 + goto st51 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 + goto st51 } - goto st15 -tr59: - ( m.cs) = 16 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st16: + goto st0 + st52: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof16 + goto _test_eof52 } - st_case_16: -//line plugins/parsers/influx/machine.go:4233 + st_case_52: switch ( m.data)[( m.p)] { case 10: - goto tr45 - case 11: - goto tr63 + goto tr89 case 13: - goto tr45 + goto tr90 case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto tr64 + goto tr88 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st53 + } + case ( m.data)[( m.p)] >= 9: + goto tr88 } - goto tr62 -tr62: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st17 - st17: + goto tr35 + st53: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof17 + goto _test_eof53 } - st_case_17: -//line plugins/parsers/influx/machine.go:4265 + st_case_53: switch ( m.data)[( m.p)] { case 10: - goto tr45 - case 11: - goto tr66 + goto tr89 case 13: - goto tr45 + goto tr90 case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 + goto tr88 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st54 + } + case ( m.data)[( m.p)] >= 9: + goto tr88 } - goto st17 -tr66: - ( m.cs) = 18 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr63: - ( m.cs) = 18 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st18: + goto tr35 + st54: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof18 + goto _test_eof54 } - st_case_18: -//line plugins/parsers/influx/machine.go:4321 + st_case_54: switch ( m.data)[( m.p)] { case 10: - goto tr45 - case 11: - goto tr63 + goto tr89 case 13: - goto tr45 + goto tr90 case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto tr64 + goto tr88 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st55 + } + case ( m.data)[( m.p)] >= 9: + goto tr88 } - goto tr62 -tr64: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st19 - st19: + goto tr35 + st55: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof19 + goto _test_eof55 } - st_case_19: -//line plugins/parsers/influx/machine.go:4353 - if ( m.data)[( m.p)] == 92 { - goto st20 + st_case_55: + switch ( m.data)[( m.p)] { + case 10: + goto tr89 + case 13: + goto tr90 + case 32: + goto tr88 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st56 } case ( m.data)[( m.p)] >= 9: - goto tr45 + goto tr88 } - goto st17 - st20: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - + goto tr35 + st56: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof20 + goto _test_eof56 } - st_case_20: -//line plugins/parsers/influx/machine.go:4374 + st_case_56: switch ( m.data)[( m.p)] { case 10: - goto tr45 - case 11: - goto tr66 + goto tr89 case 13: - goto tr45 + goto tr90 case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 + goto tr88 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st57 + } + case ( m.data)[( m.p)] >= 9: + goto tr88 } - goto st17 -tr56: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st21 - st21: + goto tr35 + st57: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof21 + goto _test_eof57 } - st_case_21: -//line plugins/parsers/influx/machine.go:4406 - if ( m.data)[( m.p)] == 92 { - goto st22 + st_case_57: + switch ( m.data)[( m.p)] { + case 10: + goto tr89 + case 13: + goto tr90 + case 32: + goto tr88 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st58 } case ( m.data)[( m.p)] >= 9: - goto tr2 + goto tr88 } - goto st15 - st22: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - + goto tr35 + st58: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof22 + goto _test_eof58 } - st_case_22: -//line plugins/parsers/influx/machine.go:4427 + st_case_58: switch ( m.data)[( m.p)] { case 10: - goto tr2 - case 11: - goto tr59 + goto tr89 case 13: - goto tr2 + goto tr90 case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr2 - case 92: - goto st21 + goto tr88 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st59 + } + case ( m.data)[( m.p)] >= 9: + goto tr88 } - goto st15 -tr51: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st23 - st23: + goto tr35 + st59: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof23 + goto _test_eof59 } - st_case_23: -//line plugins/parsers/influx/machine.go:4459 - if ( m.data)[( m.p)] == 92 { - goto st24 + st_case_59: + switch ( m.data)[( m.p)] { + case 10: + goto tr89 + case 13: + goto tr90 + case 32: + goto tr88 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st60 } case ( m.data)[( m.p)] >= 9: - goto tr2 + goto tr88 } - goto st13 - st24: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - + goto tr35 + st60: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof24 + goto _test_eof60 } - st_case_24: -//line plugins/parsers/influx/machine.go:4480 + st_case_60: switch ( m.data)[( m.p)] { + case 10: + goto tr89 + case 13: + goto tr90 case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr53 - case 92: - goto st23 + goto tr88 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st61 } case ( m.data)[( m.p)] >= 9: - goto tr2 + goto tr88 } - goto st13 -tr47: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st25 -tr423: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st25 - st25: + goto tr35 + st61: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof25 + goto _test_eof61 } - st_case_25: -//line plugins/parsers/influx/machine.go:4521 + st_case_61: switch ( m.data)[( m.p)] { case 10: - goto tr45 - case 11: - goto tr3 + goto tr89 case 13: - goto tr45 + goto tr90 case 32: - goto tr1 - case 34: - goto st28 - case 44: - goto tr4 - case 45: - goto tr72 - case 46: - goto tr73 - case 48: - goto tr74 - case 70: - goto tr76 - case 84: - goto tr77 - case 92: - goto st94 - case 102: - goto tr78 - case 116: - goto tr79 + goto tr88 } switch { case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr75 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st62 } case ( m.data)[( m.p)] >= 9: - goto tr1 + goto tr88 } - goto st1 -tr3: - ( m.cs) = 26 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st26: + goto tr35 + st62: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof26 + goto _test_eof62 } - st_case_26: -//line plugins/parsers/influx/machine.go:4579 + st_case_62: switch ( m.data)[( m.p)] { case 10: - goto tr45 - case 11: - goto tr49 + goto tr89 case 13: - goto tr45 + goto tr90 case 32: - goto tr1 - case 44: - goto tr4 - case 61: - goto st1 - case 92: - goto tr43 + goto tr88 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st63 + } + case ( m.data)[( m.p)] >= 9: + goto tr88 } - goto tr39 -tr43: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st27 - st27: + goto tr35 + st63: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof27 + goto _test_eof63 + } + st_case_63: + switch ( m.data)[( m.p)] { + case 10: + goto tr89 + case 13: + goto tr90 + case 32: + goto tr88 } - st_case_27: -//line plugins/parsers/influx/machine.go:4611 switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st64 } case ( m.data)[( m.p)] >= 9: - goto tr8 + goto tr88 } - goto st10 - st28: + goto tr35 + st64: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof28 + goto _test_eof64 } - st_case_28: + st_case_64: switch ( m.data)[( m.p)] { case 10: - goto tr24 - case 11: - goto tr82 + goto tr89 case 13: - goto tr23 + goto tr90 case 32: - goto tr81 - case 34: - goto tr83 - case 44: - goto tr84 - case 92: - goto tr85 + goto tr88 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr81 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st65 + } + case ( m.data)[( m.p)] >= 9: + goto tr88 } - goto tr80 -tr80: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st29 - st29: + goto tr35 + st65: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof29 + goto _test_eof65 } - st_case_29: -//line plugins/parsers/influx/machine.go:4657 + st_case_65: switch ( m.data)[( m.p)] { case 10: - goto tr28 - case 11: + goto tr89 + case 13: + goto tr90 + case 32: + goto tr88 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st66 + } + case ( m.data)[( m.p)] >= 9: goto tr88 + } + goto tr35 + st66: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof66 + } + st_case_66: + switch ( m.data)[( m.p)] { + case 10: + goto tr89 case 13: - goto st6 + goto tr90 case 32: - goto tr87 - case 34: + goto tr88 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st67 + } + case ( m.data)[( m.p)] >= 9: + goto tr88 + } + goto tr35 + st67: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof67 + } + st_case_67: + switch ( m.data)[( m.p)] { + case 10: goto tr89 - case 44: + case 13: goto tr90 - case 92: - goto st140 + case 32: + goto tr88 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st68 + } + case ( m.data)[( m.p)] >= 9: + goto tr88 + } + goto tr35 + st68: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof68 + } + st_case_68: + switch ( m.data)[( m.p)] { + case 10: + goto tr89 + case 13: + goto tr90 + case 32: + goto tr88 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st69 + } + case ( m.data)[( m.p)] >= 9: + goto tr88 + } + goto tr35 + st69: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof69 + } + st_case_69: + switch ( m.data)[( m.p)] { + case 10: + goto tr89 + case 13: + goto tr90 + case 32: + goto tr88 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 + goto tr88 } - goto st29 -tr87: - ( m.cs) = 30 -//line plugins/parsers/influx/machine.go.rl:86 + goto tr35 +tr113: + ( m.cs) = 11 +//line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.SetMeasurement(m.text()) + err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again -tr81: - ( m.cs) = 30 -//line plugins/parsers/influx/machine.go.rl:86 +tr120: + ( m.cs) = 11 +//line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.SetMeasurement(m.text()) + err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:28 + goto _again +tr125: + ( m.cs) = 11 +//line plugins/parsers/influx/machine.go.rl:121 - m.pb = m.p + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 34; + {( m.p)++; goto _out } + } goto _again -tr229: - ( m.cs) = 30 -//line plugins/parsers/influx/machine.go.rl:99 +tr130: + ( m.cs) = 11 +//line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddTag(m.key, m.text()) + err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again - st30: + st11: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof30 + goto _test_eof11 } - st_case_30: -//line plugins/parsers/influx/machine.go:4726 + st_case_11: +//line plugins/parsers/influx/machine.go:1763 switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr94 - case 13: - goto st6 case 32: - goto st30 - case 34: - goto tr95 + goto tr7 case 44: - goto st6 + goto tr7 case 61: - goto st6 + goto tr7 case 92: - goto tr96 + goto tr8 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st30 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr7 } - goto tr92 -tr92: + goto tr5 +tr8: //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st31 - st31: + goto st12 + st12: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof31 + goto _test_eof12 } - st_case_31: -//line plugins/parsers/influx/machine.go:4760 + st_case_12: +//line plugins/parsers/influx/machine.go:1789 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr7 + } + goto st3 +tr24: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st13 + st13: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof13 + } + st_case_13: +//line plugins/parsers/influx/machine.go:1805 switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 case 34: - goto tr98 - case 44: goto st6 - case 61: - goto tr99 case 92: - goto st75 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { goto st6 } - goto st31 -tr95: - ( m.cs) = 273 + goto tr7 +tr13: //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr98: - ( m.cs) = 273 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr384: - ( m.cs) = 273 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - + goto st14 + st14: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof14 + } + st_case_14: +//line plugins/parsers/influx/machine.go:1824 + switch ( m.data)[( m.p)] { + case 46: + goto st15 + case 48: + goto st72 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st75 + } + goto tr7 +tr14: //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto _again - st273: + goto st15 + st15: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof273 - } - st_case_273: -//line plugins/parsers/influx/machine.go:4833 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st274 - case 13: - goto st32 - case 32: - goto st271 - case 44: - goto st35 - case 61: - goto tr12 - case 92: - goto st34 + goto _test_eof15 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st271 + st_case_15: +//line plugins/parsers/influx/machine.go:1846 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st70 } - goto st3 - st274: + goto tr7 + st70: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof274 + goto _test_eof70 } - st_case_274: + st_case_70: switch ( m.data)[( m.p)] { case 10: - goto tr101 - case 11: - goto st274 + goto tr111 case 13: - goto st32 + goto tr112 case 32: - goto st271 + goto tr110 case 44: - goto tr103 - case 45: - goto tr465 - case 61: - goto tr12 - case 92: - goto st34 + goto tr113 + case 69: + goto st16 + case 101: + goto st16 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr466 + goto st70 } case ( m.data)[( m.p)] >= 9: - goto st271 + goto tr110 } - goto st3 -tr470: - ( m.cs) = 32 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr732: - ( m.cs) = 32 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr944: - ( m.cs) = 32 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr950: - ( m.cs) = 32 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr956: - ( m.cs) = 32 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st32: + goto tr82 + st16: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof32 + goto _test_eof16 } - st_case_32: -//line plugins/parsers/influx/machine.go:4956 - if ( m.data)[( m.p)] == 10 { - goto tr101 + st_case_16: + switch ( m.data)[( m.p)] { + case 34: + goto st17 + case 43: + goto st17 + case 45: + goto st17 } - goto st0 -tr465: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st33 - st33: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st71 + } + goto tr7 + st17: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof33 + goto _test_eof17 } - st_case_33: -//line plugins/parsers/influx/machine.go:4972 + st_case_17: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st71 + } + goto tr7 + st71: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof71 + } + st_case_71: switch ( m.data)[( m.p)] { + case 10: + goto tr111 + case 13: + goto tr112 case 32: - goto tr103 + goto tr110 case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + goto tr113 } switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr103 - } - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st275 + goto st71 } - default: - goto tr103 + case ( m.data)[( m.p)] >= 9: + goto tr110 } - goto st3 -tr466: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st275 - st275: + goto tr82 + st72: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof275 + goto _test_eof72 } - st_case_275: -//line plugins/parsers/influx/machine.go:5007 + st_case_72: switch ( m.data)[( m.p)] { case 10: - goto tr468 - case 11: - goto tr469 + goto tr111 case 13: - goto tr470 + goto tr112 case 32: - goto tr467 + goto tr110 case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + goto tr113 + case 46: + goto st70 + case 69: + goto st16 + case 101: + goto st16 + case 105: + goto st74 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st278 + goto st73 } case ( m.data)[( m.p)] >= 9: - goto tr467 + goto tr110 } - goto st3 -tr467: - ( m.cs) = 276 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st276: + goto tr82 + st73: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof276 + goto _test_eof73 } - st_case_276: -//line plugins/parsers/influx/machine.go:5051 + st_case_73: switch ( m.data)[( m.p)] { case 10: - goto tr101 + goto tr111 case 13: - goto st32 + goto tr112 case 32: - goto st276 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st276 + goto tr110 + case 44: + goto tr113 + case 46: + goto st70 + case 69: + goto st16 + case 101: + goto st16 } - goto st0 -tr469: - ( m.cs) = 277 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st277: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof277 - } - st_case_277: -//line plugins/parsers/influx/machine.go:5082 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st277 - case 13: - goto st32 - case 32: - goto st276 - case 44: - goto tr8 - case 61: - goto tr12 - case 92: - goto st34 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st276 - } - goto st3 -tr10: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st34 - st34: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof34 - } - st_case_34: -//line plugins/parsers/influx/machine.go:5114 switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st73 } case ( m.data)[( m.p)] >= 9: - goto tr8 + goto tr110 } - goto st3 - st278: + goto tr82 + st74: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof278 + goto _test_eof74 } - st_case_278: + st_case_74: switch ( m.data)[( m.p)] { case 10: - goto tr468 - case 11: - goto tr469 + goto tr118 case 13: - goto tr470 + goto tr119 case 32: - goto tr467 + goto tr117 case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + goto tr120 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st279 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr117 } - goto st3 - st279: + goto tr82 + st75: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof279 + goto _test_eof75 } - st_case_279: + st_case_75: switch ( m.data)[( m.p)] { case 10: - goto tr468 - case 11: - goto tr469 + goto tr111 case 13: - goto tr470 + goto tr112 case 32: - goto tr467 + goto tr110 case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + goto tr113 + case 46: + goto st70 + case 69: + goto st16 + case 101: + goto st16 + case 105: + goto st74 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st280 + goto st75 } case ( m.data)[( m.p)] >= 9: - goto tr467 + goto tr110 } - goto st3 - st280: + goto tr82 +tr15: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st76 + st76: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof280 + goto _test_eof76 } - st_case_280: + st_case_76: +//line plugins/parsers/influx/machine.go:2053 switch ( m.data)[( m.p)] { case 10: - goto tr468 - case 11: - goto tr469 + goto tr111 case 13: - goto tr470 + goto tr112 case 32: - goto tr467 + goto tr110 case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + goto tr113 + case 46: + goto st70 + case 69: + goto st16 + case 101: + goto st16 + case 105: + goto st74 + case 117: + goto st77 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st281 + goto st73 } case ( m.data)[( m.p)] >= 9: - goto tr467 + goto tr110 } - goto st3 - st281: + goto tr82 + st77: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof281 + goto _test_eof77 } - st_case_281: + st_case_77: switch ( m.data)[( m.p)] { case 10: - goto tr468 - case 11: - goto tr469 + goto tr123 case 13: - goto tr470 + goto tr124 case 32: - goto tr467 + goto tr122 case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + goto tr125 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st282 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr122 } - goto st3 - st282: + goto tr82 +tr16: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st78 + st78: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof282 + goto _test_eof78 } - st_case_282: + st_case_78: +//line plugins/parsers/influx/machine.go:2113 switch ( m.data)[( m.p)] { case 10: - goto tr468 - case 11: - goto tr469 + goto tr111 case 13: - goto tr470 + goto tr112 case 32: - goto tr467 + goto tr110 case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + goto tr113 + case 46: + goto st70 + case 69: + goto st16 + case 101: + goto st16 + case 105: + goto st74 + case 117: + goto st77 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st283 + goto st78 } case ( m.data)[( m.p)] >= 9: - goto tr467 + goto tr110 } - goto st3 - st283: + goto tr82 +tr17: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st79 + st79: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof283 + goto _test_eof79 } - st_case_283: + st_case_79: +//line plugins/parsers/influx/machine.go:2154 switch ( m.data)[( m.p)] { case 10: - goto tr468 - case 11: - goto tr469 + goto tr128 case 13: - goto tr470 + goto tr129 case 32: - goto tr467 + goto tr127 case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + goto tr130 + case 65: + goto st18 + case 97: + goto st21 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st284 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr127 } - goto st3 - st284: + goto tr82 + st18: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof284 + goto _test_eof18 } - st_case_284: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + st_case_18: + if ( m.data)[( m.p)] == 76 { + goto st19 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st285 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + goto tr7 + st19: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof19 } - goto st3 - st285: + st_case_19: + if ( m.data)[( m.p)] == 83 { + goto st20 + } + goto tr7 + st20: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof20 + } + st_case_20: + if ( m.data)[( m.p)] == 69 { + goto st80 + } + goto tr7 + st80: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof285 + goto _test_eof80 } - st_case_285: + st_case_80: switch ( m.data)[( m.p)] { case 10: - goto tr468 - case 11: - goto tr469 + goto tr128 case 13: - goto tr470 + goto tr129 case 32: - goto tr467 + goto tr127 case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + goto tr130 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st286 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr127 } - goto st3 - st286: + goto tr82 + st21: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof286 + goto _test_eof21 } - st_case_286: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + st_case_21: + if ( m.data)[( m.p)] == 108 { + goto st22 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st287 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + goto tr7 + st22: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof22 } - goto st3 - st287: + st_case_22: + if ( m.data)[( m.p)] == 115 { + goto st23 + } + goto tr7 + st23: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof23 + } + st_case_23: + if ( m.data)[( m.p)] == 101 { + goto st80 + } + goto tr7 +tr18: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st81 + st81: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof287 + goto _test_eof81 } - st_case_287: + st_case_81: +//line plugins/parsers/influx/machine.go:2257 switch ( m.data)[( m.p)] { case 10: - goto tr468 - case 11: - goto tr469 + goto tr128 case 13: - goto tr470 + goto tr129 case 32: - goto tr467 + goto tr127 case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + goto tr130 + case 82: + goto st24 + case 114: + goto st25 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st288 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr127 } - goto st3 - st288: + goto tr82 + st24: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof24 + } + st_case_24: + if ( m.data)[( m.p)] == 85 { + goto st20 + } + goto tr7 + st25: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof25 + } + st_case_25: + if ( m.data)[( m.p)] == 117 { + goto st23 + } + goto tr7 +tr19: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st82 + st82: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof288 + goto _test_eof82 } - st_case_288: + st_case_82: +//line plugins/parsers/influx/machine.go:2305 switch ( m.data)[( m.p)] { case 10: - goto tr468 - case 11: - goto tr469 + goto tr128 case 13: - goto tr470 + goto tr129 case 32: - goto tr467 + goto tr127 case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + goto tr130 + case 97: + goto st21 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st289 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr127 } - goto st3 - st289: + goto tr82 +tr20: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st83 + st83: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof289 + goto _test_eof83 } - st_case_289: + st_case_83: +//line plugins/parsers/influx/machine.go:2333 switch ( m.data)[( m.p)] { case 10: - goto tr468 - case 11: - goto tr469 + goto tr128 case 13: - goto tr470 + goto tr129 case 32: - goto tr467 + goto tr127 case 44: - goto tr103 - case 61: - goto tr12 - case 92: - goto st34 + goto tr130 + case 114: + goto st25 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st290 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr127 } - goto st3 - st290: + goto tr82 +tr3: + ( m.cs) = 26 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 34; + {( m.p)++; goto _out } + } + + goto _again +tr57: + ( m.cs) = 26 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 34; + {( m.p)++; goto _out } + } + + goto _again + st26: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof290 + goto _test_eof26 } - st_case_290: + st_case_26: +//line plugins/parsers/influx/machine.go:2381 switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 case 32: - goto tr467 + goto tr2 case 44: - goto tr103 + goto tr2 case 61: - goto tr12 + goto tr2 case 92: - goto st34 + goto tr49 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st291 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 } - goto st3 - st291: + goto tr48 +tr48: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st27 + st27: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof291 + goto _test_eof27 } - st_case_291: + st_case_27: +//line plugins/parsers/influx/machine.go:2407 switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 case 32: - goto tr467 + goto tr2 case 44: - goto tr103 + goto tr2 case 61: - goto tr12 + goto tr51 case 92: - goto st34 + goto st32 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st292 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 } - goto st3 - st292: + goto st27 +tr51: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + + goto st28 + st28: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof292 + goto _test_eof28 } - st_case_292: + st_case_28: +//line plugins/parsers/influx/machine.go:2433 switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 case 32: - goto tr467 + goto tr2 case 44: - goto tr103 + goto tr2 case 61: - goto tr12 + goto tr2 case 92: - goto st34 + goto tr54 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st293 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 } - goto st3 - st293: + goto tr53 +tr53: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st29 + st29: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof293 + goto _test_eof29 } - st_case_293: + st_case_29: +//line plugins/parsers/influx/machine.go:2459 switch ( m.data)[( m.p)] { case 10: - goto tr468 - case 11: - goto tr469 + goto tr2 case 13: - goto tr470 + goto tr2 case 32: - goto tr467 + goto tr56 case 44: - goto tr103 + goto tr57 case 61: - goto tr12 + goto tr2 case 92: - goto st34 + goto st30 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st294 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr56 } - goto st3 - st294: + goto st29 +tr54: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st30 + st30: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof30 + } + st_case_30: +//line plugins/parsers/influx/machine.go:2489 + if ( m.data)[( m.p)] == 92 { + goto st31 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + goto st29 + st31: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof294 + goto _test_eof31 } - st_case_294: + st_case_31: +//line plugins/parsers/influx/machine.go:2505 switch ( m.data)[( m.p)] { case 10: - goto tr468 - case 11: - goto tr469 + goto tr2 case 13: - goto tr470 + goto tr2 case 32: - goto tr467 + goto tr56 case 44: - goto tr103 + goto tr57 case 61: - goto tr12 + goto tr2 case 92: - goto st34 + goto st30 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st295 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr56 } - goto st3 - st295: + goto st29 +tr49: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st32 + st32: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof32 + } + st_case_32: +//line plugins/parsers/influx/machine.go:2535 + if ( m.data)[( m.p)] == 92 { + goto st33 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + goto st27 + st33: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof295 + goto _test_eof33 } - st_case_295: + st_case_33: +//line plugins/parsers/influx/machine.go:2551 switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr469 - case 13: - goto tr470 case 32: - goto tr467 + goto tr2 case 44: - goto tr103 + goto tr2 case 61: - goto tr12 + goto tr51 case 92: - goto st34 + goto st32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr467 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 } - goto st3 -tr922: - ( m.cs) = 35 -//line plugins/parsers/influx/machine.go.rl:130 + goto st27 + st34: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof34 + } + st_case_34: + if ( m.data)[( m.p)] == 10 { + goto tr62 + } + goto st34 +tr62: +//line plugins/parsers/influx/machine.go.rl:166 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1042: - ( m.cs) = 35 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1045: - ( m.cs) = 35 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1048: - ( m.cs) = 35 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- +//line plugins/parsers/influx/machine.go.rl:78 - ( m.cs) = 257; - {( m.p)++; goto _out } - } + {goto st85 } - goto _again - st35: + goto st84 + st84: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof35 + goto _test_eof84 } - st_case_35: -//line plugins/parsers/influx/machine.go:5716 + st_case_84: +//line plugins/parsers/influx/machine.go:2592 + goto st0 + st37: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof37 + } + st_case_37: switch ( m.data)[( m.p)] { case 32: - goto tr8 + goto tr31 + case 35: + goto tr31 case 44: - goto tr8 - case 61: - goto tr8 + goto tr31 case 92: - goto tr10 + goto tr66 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr31 } - goto tr6 -tr99: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() + goto tr65 +tr65: +//line plugins/parsers/influx/machine.go.rl:82 - goto st36 - st36: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof36 - } - st_case_36: -//line plugins/parsers/influx/machine.go:5747 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr105 - case 45: - goto tr106 - case 46: - goto tr107 - case 48: - goto tr108 - case 70: - goto tr110 - case 84: - goto tr111 - case 92: - goto st73 - case 102: - goto tr112 - case 116: - goto tr113 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr109 - } - goto st6 -tr105: - ( m.cs) = 296 -//line plugins/parsers/influx/machine.go.rl:148 + m.beginMetric = true - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- +//line plugins/parsers/influx/machine.go.rl:28 - ( m.cs) = 257; - {( m.p)++; goto _out } - } + m.pb = m.p - goto _again - st296: + goto st86 + st86: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof296 + goto _test_eof86 } - st_case_296: -//line plugins/parsers/influx/machine.go:5792 + st_case_86: +//line plugins/parsers/influx/machine.go:2628 switch ( m.data)[( m.p)] { case 10: - goto tr492 + goto tr138 case 13: - goto tr493 + goto tr139 case 32: - goto tr491 - case 34: - goto tr25 + goto tr2 case 44: - goto tr494 + goto tr140 case 92: - goto tr26 + goto st45 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr491 + goto tr2 } - goto tr23 -tr491: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st297 -tr980: - ( m.cs) = 297 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr985: - ( m.cs) = 297 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr988: - ( m.cs) = 297 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + goto st86 +tr67: +//line plugins/parsers/influx/machine.go.rl:166 - ( m.cs) = 257; - {( m.p)++; goto _out } - } + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr991: - ( m.cs) = 297 -//line plugins/parsers/influx/machine.go.rl:139 + goto st87 +tr138: + ( m.cs) = 87 +//line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.AddBool(m.key, m.text()) + err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } - goto _again - st297: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof297 - } - st_case_297: -//line plugins/parsers/influx/machine.go:5874 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 13: - goto st72 - case 32: - goto st297 - case 34: - goto tr29 - case 45: - goto tr497 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr498 - } - case ( m.data)[( m.p)] >= 9: - goto st297 - } - goto st6 -tr492: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto st298 -tr219: //line plugins/parsers/influx/machine.go.rl:166 m.lineno++ m.sol = m.p m.sol++ // next char will be the first column in the line - goto st298 -tr636: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:130 + goto _again +tr142: + ( m.cs) = 87 +//line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddFloat(m.key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } @@ -5933,25400 +2687,530 @@ tr636: m.sol++ // next char will be the first column in the line goto _again -tr600: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:166 + st87: +//line plugins/parsers/influx/machine.go.rl:172 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.finishMetric = true + ( m.cs) = 85; + {( m.p)++; goto _out } -//line plugins/parsers/influx/machine.go.rl:157 + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof87 + } + st_case_87: +//line plugins/parsers/influx/machine.go:2702 + goto st0 +tr139: + ( m.cs) = 38 +//line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetTimestamp(m.text()) + err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again -tr817: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:112 +tr143: + ( m.cs) = 38 +//line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddInt(m.key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - goto _again -tr822: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:121 + st38: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof38 + } + st_case_38: +//line plugins/parsers/influx/machine.go:2735 + if ( m.data)[( m.p)] == 10 { + goto tr67 + } + goto st0 +tr140: + ( m.cs) = 39 +//line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.AddUint(m.key, m.text()) + err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - goto _again -tr803: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:139 +tr144: + ( m.cs) = 39 +//line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddBool(m.key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; goto _out } } goto _again -tr758: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr791: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr797: - ( m.cs) = 298 -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st298: -//line plugins/parsers/influx/machine.go.rl:172 - - m.finishMetric = true - ( m.cs) = 739; - {( m.p)++; goto _out } - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof298 - } - st_case_298: -//line plugins/parsers/influx/machine.go:6081 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr115 - case 13: - goto st6 - case 32: - goto st37 - case 34: - goto tr116 - case 35: - goto st6 - case 44: - goto st6 - case 92: - goto tr85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st37 - } - goto tr80 - st37: + st39: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof37 + goto _test_eof39 } - st_case_37: + st_case_39: +//line plugins/parsers/influx/machine.go:2771 switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr115 - case 13: - goto st6 case 32: - goto st37 - case 34: - goto tr116 - case 35: - goto st6 + goto tr2 case 44: - goto st6 + goto tr2 + case 61: + goto tr2 case 92: - goto tr85 + goto tr69 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st37 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 } - goto tr80 -tr115: + goto tr68 +tr68: //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st38 - st38: + goto st40 + st40: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof38 + goto _test_eof40 } - st_case_38: -//line plugins/parsers/influx/machine.go:6142 + st_case_40: +//line plugins/parsers/influx/machine.go:2797 switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr118 - case 13: - goto st6 case 32: - goto tr117 - case 34: - goto tr83 - case 35: - goto st29 + goto tr2 case 44: - goto tr90 + goto tr2 + case 61: + goto tr71 case 92: - goto tr85 + goto st43 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr117 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 } - goto tr80 -tr117: - ( m.cs) = 39 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + goto st40 +tr71: +//line plugins/parsers/influx/machine.go.rl:95 - ( m.cs) = 257; - {( m.p)++; goto _out } - } + m.key = m.text() - goto _again - st39: + goto st41 + st41: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof39 + goto _test_eof41 } - st_case_39: -//line plugins/parsers/influx/machine.go:6183 + st_case_41: +//line plugins/parsers/influx/machine.go:2823 switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr121 - case 13: - goto st6 case 32: - goto st39 - case 34: - goto tr122 - case 35: - goto tr92 + goto tr2 case 44: - goto st6 + goto tr2 case 61: - goto tr80 + goto tr2 case 92: - goto tr123 + goto tr74 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st39 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 } - goto tr119 -tr119: + goto tr73 +tr73: //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st40 - st40: + goto st88 + st88: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof40 + goto _test_eof88 } - st_case_40: -//line plugins/parsers/influx/machine.go:6219 + st_case_88: +//line plugins/parsers/influx/machine.go:2849 switch ( m.data)[( m.p)] { case 10: - goto tr28 - case 11: - goto tr125 + goto tr142 case 13: - goto st6 + goto tr143 case 32: - goto tr87 - case 34: - goto tr126 + goto tr2 case 44: - goto tr90 + goto tr144 case 61: - goto tr127 + goto tr2 case 92: - goto st92 + goto st42 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 + goto tr2 } - goto st40 -tr125: - ( m.cs) = 41 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr129: - ( m.cs) = 41 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - + goto st88 +tr74: //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto _again - st41: + goto st42 + st42: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof41 + goto _test_eof42 } - st_case_41: -//line plugins/parsers/influx/machine.go:6277 + st_case_42: +//line plugins/parsers/influx/machine.go:2879 + if ( m.data)[( m.p)] == 92 { + goto st89 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + goto st88 + st89: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof89 + } + st_case_89: +//line plugins/parsers/influx/machine.go:2895 switch ( m.data)[( m.p)] { case 10: - goto tr28 - case 11: - goto tr129 + goto tr142 case 13: - goto st6 + goto tr143 case 32: - goto tr87 - case 34: - goto tr122 + goto tr2 case 44: - goto tr90 + goto tr144 case 61: - goto tr127 + goto tr2 case 92: - goto tr123 + goto st42 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 + goto tr2 } - goto tr119 -tr122: - ( m.cs) = 299 + goto st88 +tr69: //line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr126: - ( m.cs) = 299 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st299: + goto st43 + st43: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof43 + } + st_case_43: +//line plugins/parsers/influx/machine.go:2925 + if ( m.data)[( m.p)] == 92 { + goto st44 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + goto st40 + st44: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof299 + goto _test_eof44 } - st_case_299: -//line plugins/parsers/influx/machine.go:6335 + st_case_44: +//line plugins/parsers/influx/machine.go:2941 switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr500 - case 13: - goto st32 case 32: - goto tr499 + goto tr2 case 44: - goto tr501 + goto tr2 case 61: - goto tr47 + goto tr71 case 92: - goto st27 + goto st43 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr499 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 } - goto st10 -tr499: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr563: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr811: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr729: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr941: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr947: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr953: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1005: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1009: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1013: - ( m.cs) = 300 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } + goto st40 +tr66: +//line plugins/parsers/influx/machine.go.rl:82 -//line plugins/parsers/influx/machine.go.rl:139 + m.beginMetric = true - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- +//line plugins/parsers/influx/machine.go.rl:28 - ( m.cs) = 257; - {( m.p)++; goto _out } - } + m.pb = m.p - goto _again - st300: + goto st45 + st45: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof300 - } - st_case_300: -//line plugins/parsers/influx/machine.go:6571 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr503 - case 13: - goto st32 - case 32: - goto st300 - case 44: - goto tr103 - case 45: - goto tr465 - case 61: - goto tr103 - case 92: - goto tr10 + goto _test_eof45 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr466 - } - case ( m.data)[( m.p)] >= 9: - goto st300 + st_case_45: +//line plugins/parsers/influx/machine.go:2971 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st0 } - goto tr6 -tr503: -//line plugins/parsers/influx/machine.go.rl:28 + goto st86 +tr63: +//line plugins/parsers/influx/machine.go.rl:166 - m.pb = m.p + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st301 - st301: + goto st85 + st85: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof301 + goto _test_eof85 } - st_case_301: -//line plugins/parsers/influx/machine.go:6610 + st_case_85: +//line plugins/parsers/influx/machine.go:2989 switch ( m.data)[( m.p)] { case 10: - goto tr101 - case 11: - goto tr503 + goto tr63 case 13: - goto st32 + goto st35 case 32: - goto st300 - case 44: - goto tr103 - case 45: - goto tr465 - case 61: - goto tr12 - case 92: - goto tr10 + goto st85 + case 35: + goto st36 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr466 - } - case ( m.data)[( m.p)] >= 9: - goto st300 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st85 } - goto tr6 -tr500: - ( m.cs) = 302 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr504: - ( m.cs) = 302 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st302: + goto tr135 + st35: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof302 + goto _test_eof35 } - st_case_302: -//line plugins/parsers/influx/machine.go:6673 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr504 - case 13: - goto st32 - case 32: - goto tr499 - case 44: - goto tr4 - case 45: - goto tr505 - case 61: - goto tr47 - case 92: - goto tr43 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr506 - } - case ( m.data)[( m.p)] >= 9: - goto tr499 - } - goto tr39 -tr505: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st42 - st42: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof42 - } - st_case_42: -//line plugins/parsers/influx/machine.go:6712 - switch ( m.data)[( m.p)] { - case 10: - goto tr130 - case 11: - goto tr46 - case 13: - goto tr130 - case 32: - goto tr1 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st303 - } - case ( m.data)[( m.p)] >= 9: - goto tr1 - } - goto st10 -tr506: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st303 - st303: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof303 - } - st_case_303: -//line plugins/parsers/influx/machine.go:6749 - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st307 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 -tr512: - ( m.cs) = 304 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr572: - ( m.cs) = 304 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr507: - ( m.cs) = 304 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr569: - ( m.cs) = 304 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st304: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof304 - } - st_case_304: -//line plugins/parsers/influx/machine.go:6852 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr511 - case 13: - goto st32 - case 32: - goto st304 - case 44: - goto tr8 - case 61: - goto tr8 - case 92: - goto tr10 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st304 - } - goto tr6 -tr511: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st305 - st305: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof305 - } - st_case_305: -//line plugins/parsers/influx/machine.go:6884 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr511 - case 13: - goto st32 - case 32: - goto st304 - case 44: - goto tr8 - case 61: - goto tr12 - case 92: - goto tr10 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st304 - } - goto tr6 -tr513: - ( m.cs) = 306 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr508: - ( m.cs) = 306 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st306: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof306 - } - st_case_306: -//line plugins/parsers/influx/machine.go:6950 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr513 - case 13: - goto st32 - case 32: - goto tr512 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto tr43 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr512 - } - goto tr39 - st307: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof307 - } - st_case_307: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st308 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st308: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof308 - } - st_case_308: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st309 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 + st_case_35: + if ( m.data)[( m.p)] == 10 { + goto tr63 } - goto st10 - st309: + goto st0 + st36: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof309 - } - st_case_309: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 + goto _test_eof36 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st310 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 + st_case_36: + if ( m.data)[( m.p)] == 10 { + goto tr63 } - goto st10 - st310: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof310 - } - st_case_310: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st311 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st311: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof311 - } - st_case_311: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st312 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st312: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof312 - } - st_case_312: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st313 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st313: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof313 - } - st_case_313: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st314 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st314: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof314 - } - st_case_314: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st315 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st315: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof315 - } - st_case_315: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st316 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st316: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof316 - } - st_case_316: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st317 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st317: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof317 - } - st_case_317: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st318 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st318: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof318 - } - st_case_318: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st319 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st319: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof319 - } - st_case_319: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st320 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st320: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof320 - } - st_case_320: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st321 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st321: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof321 - } - st_case_321: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st322 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st322: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof322 - } - st_case_322: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st323 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st323: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof323 - } - st_case_323: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st324 - } - case ( m.data)[( m.p)] >= 9: - goto tr507 - } - goto st10 - st324: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof324 - } - st_case_324: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr508 - case 13: - goto tr470 - case 32: - goto tr507 - case 44: - goto tr4 - case 61: - goto tr47 - case 92: - goto st27 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr507 - } - goto st10 -tr501: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr565: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr813: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr733: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr945: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr951: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr957: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1007: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1011: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1015: - ( m.cs) = 43 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st43: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof43 - } - st_case_43: -//line plugins/parsers/influx/machine.go:7721 - switch ( m.data)[( m.p)] { - case 32: - goto tr45 - case 44: - goto tr45 - case 61: - goto tr45 - case 92: - goto tr133 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto tr132 -tr132: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st44 - st44: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof44 - } - st_case_44: -//line plugins/parsers/influx/machine.go:7752 - switch ( m.data)[( m.p)] { - case 32: - goto tr45 - case 44: - goto tr45 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st44 -tr135: -//line plugins/parsers/influx/machine.go.rl:95 - - m.key = m.text() - -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st45 - st45: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof45 - } - st_case_45: -//line plugins/parsers/influx/machine.go:7787 - switch ( m.data)[( m.p)] { - case 32: - goto tr45 - case 34: - goto tr137 - case 44: - goto tr45 - case 45: - goto tr138 - case 46: - goto tr139 - case 48: - goto tr140 - case 61: - goto tr45 - case 70: - goto tr142 - case 84: - goto tr143 - case 92: - goto tr56 - case 102: - goto tr144 - case 116: - goto tr145 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr45 - } - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr141 - } - default: - goto tr45 - } - goto tr55 -tr137: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st46 - st46: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof46 - } - st_case_46: -//line plugins/parsers/influx/machine.go:7838 - switch ( m.data)[( m.p)] { - case 10: - goto tr24 - case 11: - goto tr148 - case 13: - goto tr23 - case 32: - goto tr147 - case 34: - goto tr149 - case 44: - goto tr150 - case 61: - goto tr23 - case 92: - goto tr151 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr147 - } - goto tr146 -tr146: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st47 - st47: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof47 - } - st_case_47: -//line plugins/parsers/influx/machine.go:7872 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 -tr178: - ( m.cs) = 48 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr153: - ( m.cs) = 48 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr147: - ( m.cs) = 48 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st48: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof48 - } - st_case_48: -//line plugins/parsers/influx/machine.go:7943 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr160 - case 13: - goto st6 - case 32: - goto st48 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr161 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st48 - } - goto tr158 -tr158: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st49 - st49: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof49 - } - st_case_49: -//line plugins/parsers/influx/machine.go:7977 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st49 -tr163: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st50 - st50: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof50 - } - st_case_50: -//line plugins/parsers/influx/machine.go:8009 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr105 - case 45: - goto tr165 - case 46: - goto tr166 - case 48: - goto tr167 - case 70: - goto tr169 - case 84: - goto tr170 - case 92: - goto st73 - case 102: - goto tr171 - case 116: - goto tr172 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr168 - } - goto st6 -tr165: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st51 - st51: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof51 - } - st_case_51: -//line plugins/parsers/influx/machine.go:8047 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 46: - goto st52 - case 48: - goto st631 - case 92: - goto st73 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st632 - } - goto st6 -tr166: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st52 - st52: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof52 - } - st_case_52: -//line plugins/parsers/influx/machine.go:8075 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st325 - } - goto st6 - st325: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof325 - } - st_case_325: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 69: - goto st173 - case 92: - goto st73 - case 101: - goto st173 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st325 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 -tr916: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st326 -tr531: - ( m.cs) = 326 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr923: - ( m.cs) = 326 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr925: - ( m.cs) = 326 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr928: - ( m.cs) = 326 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st326: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof326 - } - st_case_326: -//line plugins/parsers/influx/machine.go:8183 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 13: - goto st102 - case 32: - goto st326 - case 34: - goto tr29 - case 45: - goto tr538 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr539 - } - case ( m.data)[( m.p)] >= 9: - goto st326 - } - goto st6 -tr665: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto st327 -tr273: -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto st327 -tr532: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again -tr674: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr737: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again -tr743: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again -tr749: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr891: - ( m.cs) = 327 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again - st327: -//line plugins/parsers/influx/machine.go.rl:172 - - m.finishMetric = true - ( m.cs) = 739; - {( m.p)++; goto _out } - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof327 - } - st_case_327: -//line plugins/parsers/influx/machine.go:8352 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr337 - case 13: - goto st6 - case 32: - goto st164 - case 34: - goto tr116 - case 35: - goto st6 - case 44: - goto st6 - case 92: - goto tr338 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st164 - } - goto tr335 -tr335: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st53 - st53: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof53 - } - st_case_53: -//line plugins/parsers/influx/machine.go:8386 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 -tr179: - ( m.cs) = 54 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st54: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof54 - } - st_case_54: -//line plugins/parsers/influx/machine.go:8425 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr183 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr122 - case 44: - goto tr180 - case 61: - goto st53 - case 92: - goto tr184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto tr182 -tr182: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st55 - st55: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof55 - } - st_case_55: -//line plugins/parsers/influx/machine.go:8459 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr186 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st55 -tr186: - ( m.cs) = 56 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr183: - ( m.cs) = 56 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st56: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof56 - } - st_case_56: -//line plugins/parsers/influx/machine.go:8517 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr183 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr122 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto tr184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto tr182 -tr180: - ( m.cs) = 57 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr156: - ( m.cs) = 57 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr150: - ( m.cs) = 57 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st57: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof57 - } - st_case_57: -//line plugins/parsers/influx/machine.go:8588 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr190 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr191 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr189 -tr189: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st58 - st58: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof58 - } - st_case_58: -//line plugins/parsers/influx/machine.go:8620 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr193 - case 44: - goto st6 - case 61: - goto tr194 - case 92: - goto st69 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st58 -tr190: - ( m.cs) = 328 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr193: - ( m.cs) = 328 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st328: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof328 - } - st_case_328: -//line plugins/parsers/influx/machine.go:8676 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st329 - case 13: - goto st32 - case 32: - goto st271 - case 44: - goto st35 - case 61: - goto tr53 - case 92: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st271 - } - goto st13 - st329: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof329 - } - st_case_329: - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st329 - case 13: - goto st32 - case 32: - goto st271 - case 44: - goto tr196 - case 45: - goto tr541 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr542 - } - case ( m.data)[( m.p)] >= 9: - goto st271 - } - goto st13 -tr541: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st59 - st59: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof59 - } - st_case_59: -//line plugins/parsers/influx/machine.go:8740 - switch ( m.data)[( m.p)] { - case 32: - goto tr196 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr196 - } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st330 - } - default: - goto tr196 - } - goto st13 -tr542: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st330 - st330: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof330 - } - st_case_330: -//line plugins/parsers/influx/machine.go:8775 - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st332 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 -tr543: - ( m.cs) = 331 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st331: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof331 - } - st_case_331: -//line plugins/parsers/influx/machine.go:8819 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st331 - case 13: - goto st32 - case 32: - goto st276 - case 44: - goto tr2 - case 61: - goto tr53 - case 92: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st276 - } - goto st13 - st332: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof332 - } - st_case_332: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st333 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st333: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof333 - } - st_case_333: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st334 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st334: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof334 - } - st_case_334: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st335 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st335: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof335 - } - st_case_335: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st336 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st336: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof336 - } - st_case_336: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st337 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st337: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof337 - } - st_case_337: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st338 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st338: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof338 - } - st_case_338: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st339 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st339: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof339 - } - st_case_339: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st340 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st340: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof340 - } - st_case_340: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st341 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st341: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof341 - } - st_case_341: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st342 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st342: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof342 - } - st_case_342: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st343 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st343: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof343 - } - st_case_343: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st344 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st344: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof344 - } - st_case_344: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st345 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st345: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof345 - } - st_case_345: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st346 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st346: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof346 - } - st_case_346: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st347 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st347: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof347 - } - st_case_347: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st348 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st348: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof348 - } - st_case_348: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st349 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st13 - st349: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof349 - } - st_case_349: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr543 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr196 - case 61: - goto tr53 - case 92: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr467 - } - goto st13 -tr194: -//line plugins/parsers/influx/machine.go.rl:95 - - m.key = m.text() - - goto st60 - st60: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof60 - } - st_case_60: -//line plugins/parsers/influx/machine.go:9386 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr149 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr151 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr146 -tr149: - ( m.cs) = 350 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr155: - ( m.cs) = 350 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st350: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof350 - } - st_case_350: -//line plugins/parsers/influx/machine.go:9442 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr564 - case 13: - goto st32 - case 32: - goto tr563 - case 44: - goto tr565 - case 61: - goto tr130 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr563 - } - goto st15 -tr564: - ( m.cs) = 351 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr731: - ( m.cs) = 351 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr943: - ( m.cs) = 351 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr949: - ( m.cs) = 351 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr955: - ( m.cs) = 351 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st351: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof351 - } - st_case_351: -//line plugins/parsers/influx/machine.go:9573 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr566 - case 13: - goto st32 - case 32: - goto tr563 - case 44: - goto tr60 - case 45: - goto tr567 - case 61: - goto tr130 - case 92: - goto tr64 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr568 - } - case ( m.data)[( m.p)] >= 9: - goto tr563 - } - goto tr62 -tr591: - ( m.cs) = 352 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr566: - ( m.cs) = 352 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st352: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof352 - } - st_case_352: -//line plugins/parsers/influx/machine.go:9636 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr566 - case 13: - goto st32 - case 32: - goto tr563 - case 44: - goto tr60 - case 45: - goto tr567 - case 61: - goto tr12 - case 92: - goto tr64 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr568 - } - case ( m.data)[( m.p)] >= 9: - goto tr563 - } - goto tr62 -tr567: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st61 - st61: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof61 - } - st_case_61: -//line plugins/parsers/influx/machine.go:9675 - switch ( m.data)[( m.p)] { - case 10: - goto tr130 - case 11: - goto tr66 - case 13: - goto tr130 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st353 - } - case ( m.data)[( m.p)] >= 9: - goto tr58 - } - goto st17 -tr568: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st353 - st353: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof353 - } - st_case_353: -//line plugins/parsers/influx/machine.go:9712 - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st355 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 -tr573: - ( m.cs) = 354 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr570: - ( m.cs) = 354 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st354: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof354 - } - st_case_354: -//line plugins/parsers/influx/machine.go:9783 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr573 - case 13: - goto st32 - case 32: - goto tr572 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto tr64 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr572 - } - goto tr62 - st355: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof355 - } - st_case_355: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st356 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st356: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof356 - } - st_case_356: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st357 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st357: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof357 - } - st_case_357: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st358 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st358: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof358 - } - st_case_358: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st359 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st359: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof359 - } - st_case_359: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st360 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st360: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof360 - } - st_case_360: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st361 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st361: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof361 - } - st_case_361: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st362 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st362: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof362 - } - st_case_362: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st363 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st363: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof363 - } - st_case_363: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st364 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st364: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof364 - } - st_case_364: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st365 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st365: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof365 - } - st_case_365: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st366 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st366: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof366 - } - st_case_366: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st367 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st367: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof367 - } - st_case_367: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st368 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st368: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof368 - } - st_case_368: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st369 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st369: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof369 - } - st_case_369: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st370 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st370: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof370 - } - st_case_370: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st371 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st371: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof371 - } - st_case_371: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st372 - } - case ( m.data)[( m.p)] >= 9: - goto tr569 - } - goto st17 - st372: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof372 - } - st_case_372: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr570 - case 13: - goto tr470 - case 32: - goto tr569 - case 44: - goto tr60 - case 61: - goto tr12 - case 92: - goto st19 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr569 - } - goto st17 -tr151: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st62 - st62: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof62 - } - st_case_62: -//line plugins/parsers/influx/machine.go:10350 - switch ( m.data)[( m.p)] { - case 34: - goto st47 - case 92: - goto st63 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st15 - st63: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof63 - } - st_case_63: -//line plugins/parsers/influx/machine.go:10374 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 -tr154: - ( m.cs) = 64 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr148: - ( m.cs) = 64 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st64: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof64 - } - st_case_64: -//line plugins/parsers/influx/machine.go:10432 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr201 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr202 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto tr203 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto tr200 -tr200: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st65 - st65: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof65 - } - st_case_65: -//line plugins/parsers/influx/machine.go:10466 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr205 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st65 -tr205: - ( m.cs) = 66 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr201: - ( m.cs) = 66 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st66: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof66 - } - st_case_66: -//line plugins/parsers/influx/machine.go:10524 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr201 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr202 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto tr203 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto tr200 -tr202: - ( m.cs) = 373 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr206: - ( m.cs) = 373 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st373: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof373 - } - st_case_373: -//line plugins/parsers/influx/machine.go:10582 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr591 - case 13: - goto st32 - case 32: - goto tr563 - case 44: - goto tr565 - case 61: - goto tr12 - case 92: - goto st19 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr563 - } - goto st17 -tr203: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st67 - st67: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof67 - } - st_case_67: -//line plugins/parsers/influx/machine.go:10614 - switch ( m.data)[( m.p)] { - case 34: - goto st65 - case 92: - goto st68 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st17 - st68: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof68 - } - st_case_68: -//line plugins/parsers/influx/machine.go:10638 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr205 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st65 -tr191: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st69 - st69: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof69 - } - st_case_69: -//line plugins/parsers/influx/machine.go:10672 - switch ( m.data)[( m.p)] { - case 34: - goto st58 - case 92: - goto st70 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st13 - st70: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof70 - } - st_case_70: -//line plugins/parsers/influx/machine.go:10696 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr193 - case 44: - goto st6 - case 61: - goto tr194 - case 92: - goto st69 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st58 -tr187: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st71 -tr344: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st71 - st71: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof71 - } - st_case_71: -//line plugins/parsers/influx/machine.go:10738 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr210 - case 44: - goto tr180 - case 45: - goto tr211 - case 46: - goto tr212 - case 48: - goto tr213 - case 70: - goto tr215 - case 84: - goto tr216 - case 92: - goto st155 - case 102: - goto tr217 - case 116: - goto tr218 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr214 - } - case ( m.data)[( m.p)] >= 9: - goto tr178 - } - goto st53 -tr210: - ( m.cs) = 374 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st374: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof374 - } - st_case_374: -//line plugins/parsers/influx/machine.go:10796 - switch ( m.data)[( m.p)] { - case 10: - goto tr492 - case 11: - goto tr593 - case 13: - goto tr493 - case 32: - goto tr592 - case 34: - goto tr83 - case 44: - goto tr594 - case 92: - goto tr85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr592 - } - goto tr80 -tr623: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr592: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr762: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr635: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr757: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr790: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr796: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr802: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr816: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr821: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr826: - ( m.cs) = 375 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st375: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof375 - } - st_case_375: -//line plugins/parsers/influx/machine.go:11049 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr596 - case 13: - goto st72 - case 32: - goto st375 - case 34: - goto tr95 - case 44: - goto st6 - case 45: - goto tr597 - case 61: - goto st6 - case 92: - goto tr96 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr598 - } - case ( m.data)[( m.p)] >= 9: - goto st375 - } - goto tr92 -tr596: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st376 - st376: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof376 - } - st_case_376: -//line plugins/parsers/influx/machine.go:11090 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr596 - case 13: - goto st72 - case 32: - goto st375 - case 34: - goto tr95 - case 44: - goto st6 - case 45: - goto tr597 - case 61: - goto tr99 - case 92: - goto tr96 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr598 - } - case ( m.data)[( m.p)] >= 9: - goto st375 - } - goto tr92 -tr493: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st72 -tr602: - ( m.cs) = 72 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr638: - ( m.cs) = 72 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr793: - ( m.cs) = 72 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr799: - ( m.cs) = 72 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr805: - ( m.cs) = 72 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st72: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof72 - } - st_case_72: -//line plugins/parsers/influx/machine.go:11196 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 34: - goto tr29 - case 92: - goto st73 - } - goto st6 -tr26: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st73 - st73: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof73 - } - st_case_73: -//line plugins/parsers/influx/machine.go:11217 - switch ( m.data)[( m.p)] { - case 34: - goto st6 - case 92: - goto st6 - } - goto tr8 -tr597: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st74 - st74: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof74 - } - st_case_74: -//line plugins/parsers/influx/machine.go:11236 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st377 - } - case ( m.data)[( m.p)] >= 12: - goto st6 - } - goto st31 -tr598: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st377 - st377: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof377 - } - st_case_377: -//line plugins/parsers/influx/machine.go:11273 - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st380 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 -tr599: - ( m.cs) = 378 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st378: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof378 - } - st_case_378: -//line plugins/parsers/influx/machine.go:11319 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 13: - goto st72 - case 32: - goto st378 - case 34: - goto tr29 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st378 - } - goto st6 -tr601: - ( m.cs) = 379 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st379: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof379 - } - st_case_379: -//line plugins/parsers/influx/machine.go:11354 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto st379 - case 13: - goto st72 - case 32: - goto st378 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st378 - } - goto st31 -tr96: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st75 - st75: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof75 - } - st_case_75: -//line plugins/parsers/influx/machine.go:11388 - switch ( m.data)[( m.p)] { - case 34: - goto st31 - case 92: - goto st31 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st3 - st380: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof380 - } - st_case_380: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st381 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st381: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof381 - } - st_case_381: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st382 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st382: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof382 - } - st_case_382: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st383 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st383: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof383 - } - st_case_383: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st384 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st384: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof384 - } - st_case_384: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st385 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st385: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof385 - } - st_case_385: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st386 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st386: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof386 - } - st_case_386: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st387 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st387: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof387 - } - st_case_387: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st388 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st388: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof388 - } - st_case_388: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st389 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st389: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof389 - } - st_case_389: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st390 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st390: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof390 - } - st_case_390: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st391 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st391: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof391 - } - st_case_391: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st392 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st392: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof392 - } - st_case_392: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st393 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st393: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof393 - } - st_case_393: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st394 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st394: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof394 - } - st_case_394: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st395 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st395: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof395 - } - st_case_395: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st396 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st396: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof396 - } - st_case_396: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st397 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st31 - st397: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof397 - } - st_case_397: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr601 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto st75 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr599 - } - goto st31 -tr593: - ( m.cs) = 398 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr637: - ( m.cs) = 398 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr818: - ( m.cs) = 398 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr823: - ( m.cs) = 398 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr827: - ( m.cs) = 398 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st398: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof398 - } - st_case_398: -//line plugins/parsers/influx/machine.go:12089 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr624 - case 13: - goto st72 - case 32: - goto tr623 - case 34: - goto tr122 - case 44: - goto tr90 - case 45: - goto tr625 - case 61: - goto st29 - case 92: - goto tr123 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr626 - } - case ( m.data)[( m.p)] >= 9: - goto tr623 - } - goto tr119 -tr624: - ( m.cs) = 399 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st399: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof399 - } - st_case_399: -//line plugins/parsers/influx/machine.go:12141 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr624 - case 13: - goto st72 - case 32: - goto tr623 - case 34: - goto tr122 - case 44: - goto tr90 - case 45: - goto tr625 - case 61: - goto tr127 - case 92: - goto tr123 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr626 - } - case ( m.data)[( m.p)] >= 9: - goto tr623 - } - goto tr119 -tr90: - ( m.cs) = 76 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr84: - ( m.cs) = 76 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr231: - ( m.cs) = 76 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st76: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof76 - } - st_case_76: -//line plugins/parsers/influx/machine.go:12219 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr190 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr222 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr221 -tr221: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st77 - st77: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof77 - } - st_case_77: -//line plugins/parsers/influx/machine.go:12251 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr193 - case 44: - goto st6 - case 61: - goto tr224 - case 92: - goto st87 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st77 -tr224: -//line plugins/parsers/influx/machine.go.rl:95 - - m.key = m.text() - - goto st78 - st78: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof78 - } - st_case_78: -//line plugins/parsers/influx/machine.go:12283 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr149 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr227 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr226 -tr226: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st79 - st79: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof79 - } - st_case_79: -//line plugins/parsers/influx/machine.go:12315 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 -tr230: - ( m.cs) = 80 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st80: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof80 - } - st_case_80: -//line plugins/parsers/influx/machine.go:12356 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr234 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr202 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto tr235 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto tr233 -tr233: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st81 - st81: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof81 - } - st_case_81: -//line plugins/parsers/influx/machine.go:12390 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr237 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st81 -tr237: - ( m.cs) = 82 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr234: - ( m.cs) = 82 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st82: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof82 - } - st_case_82: -//line plugins/parsers/influx/machine.go:12448 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr234 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr202 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto tr235 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto tr233 -tr235: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st83 - st83: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof83 - } - st_case_83: -//line plugins/parsers/influx/machine.go:12482 - switch ( m.data)[( m.p)] { - case 34: - goto st81 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st17 - st84: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof84 - } - st_case_84: -//line plugins/parsers/influx/machine.go:12506 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr237 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st81 -tr227: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st85 - st85: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof85 - } - st_case_85: -//line plugins/parsers/influx/machine.go:12540 - switch ( m.data)[( m.p)] { - case 34: - goto st79 - case 92: - goto st86 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st15 - st86: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof86 - } - st_case_86: -//line plugins/parsers/influx/machine.go:12564 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 -tr222: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st87 - st87: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof87 - } - st_case_87: -//line plugins/parsers/influx/machine.go:12598 - switch ( m.data)[( m.p)] { - case 34: - goto st77 - case 92: - goto st88 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st13 - st88: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof88 - } - st_case_88: -//line plugins/parsers/influx/machine.go:12622 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr193 - case 44: - goto st6 - case 61: - goto tr224 - case 92: - goto st87 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st77 -tr625: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st89 - st89: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof89 - } - st_case_89: -//line plugins/parsers/influx/machine.go:12654 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr125 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st400 - } - case ( m.data)[( m.p)] >= 9: - goto tr87 - } - goto st40 -tr626: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st400 - st400: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof400 - } - st_case_400: -//line plugins/parsers/influx/machine.go:12693 - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st544 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 -tr632: - ( m.cs) = 401 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr769: - ( m.cs) = 401 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr627: - ( m.cs) = 401 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr766: - ( m.cs) = 401 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st401: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof401 - } - st_case_401: -//line plugins/parsers/influx/machine.go:12798 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr631 - case 13: - goto st72 - case 32: - goto st401 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr96 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st401 - } - goto tr92 -tr631: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st402 - st402: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof402 - } - st_case_402: -//line plugins/parsers/influx/machine.go:12832 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr631 - case 13: - goto st72 - case 32: - goto st401 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto tr96 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st401 - } - goto tr92 -tr633: - ( m.cs) = 403 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr628: - ( m.cs) = 403 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st403: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof403 - } - st_case_403: -//line plugins/parsers/influx/machine.go:12900 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr633 - case 13: - goto st72 - case 32: - goto tr632 - case 34: - goto tr122 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto tr123 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr632 - } - goto tr119 -tr127: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st90 -tr381: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st90 - st90: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof90 - } - st_case_90: -//line plugins/parsers/influx/machine.go:12944 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr210 - case 44: - goto tr90 - case 45: - goto tr243 - case 46: - goto tr244 - case 48: - goto tr245 - case 70: - goto tr247 - case 84: - goto tr248 - case 92: - goto st140 - case 102: - goto tr249 - case 116: - goto tr250 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr246 - } - case ( m.data)[( m.p)] >= 9: - goto tr87 - } - goto st29 -tr88: - ( m.cs) = 91 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr82: - ( m.cs) = 91 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st91: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof91 - } - st_case_91: -//line plugins/parsers/influx/machine.go:13019 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr129 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr122 - case 44: - goto tr90 - case 61: - goto st29 - case 92: - goto tr123 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto tr119 -tr123: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st92 - st92: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof92 - } - st_case_92: -//line plugins/parsers/influx/machine.go:13053 - switch ( m.data)[( m.p)] { - case 34: - goto st40 - case 92: - goto st40 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st10 -tr243: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st93 - st93: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof93 - } - st_case_93: -//line plugins/parsers/influx/machine.go:13080 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 46: - goto st95 - case 48: - goto st532 - case 92: - goto st140 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st535 - } - case ( m.data)[( m.p)] >= 9: - goto tr87 - } - goto st29 -tr83: - ( m.cs) = 404 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr89: - ( m.cs) = 404 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr116: - ( m.cs) = 404 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st404: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof404 - } - st_case_404: -//line plugins/parsers/influx/machine.go:13162 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr634 - case 13: - goto st32 - case 32: - goto tr499 - case 44: - goto tr501 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr499 - } - goto st1 -tr634: - ( m.cs) = 405 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr812: - ( m.cs) = 405 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1006: - ( m.cs) = 405 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1010: - ( m.cs) = 405 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1014: - ( m.cs) = 405 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st405: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof405 - } - st_case_405: -//line plugins/parsers/influx/machine.go:13291 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr504 - case 13: - goto st32 - case 32: - goto tr499 - case 44: - goto tr4 - case 45: - goto tr505 - case 61: - goto st1 - case 92: - goto tr43 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr506 - } - case ( m.data)[( m.p)] >= 9: - goto tr499 - } - goto tr39 -tr35: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st94 -tr458: -//line plugins/parsers/influx/machine.go.rl:82 - - m.beginMetric = true - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st94 - st94: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof94 - } - st_case_94: -//line plugins/parsers/influx/machine.go:13340 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st0 - } - case ( m.data)[( m.p)] >= 9: - goto st0 - } - goto st1 -tr244: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st95 - st95: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof95 - } - st_case_95: -//line plugins/parsers/influx/machine.go:13361 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st140 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st406 - } - case ( m.data)[( m.p)] >= 9: - goto tr87 - } - goto st29 - st406: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof406 - } - st_case_406: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 69: - goto st138 - case 92: - goto st140 - case 101: - goto st138 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st406 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 -tr594: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr639: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr760: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr794: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr800: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr806: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr819: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr824: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr828: - ( m.cs) = 96 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st96: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof96 - } - st_case_96: -//line plugins/parsers/influx/machine.go:13627 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr256 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr257 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr255 -tr255: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st97 - st97: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof97 - } - st_case_97: -//line plugins/parsers/influx/machine.go:13659 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr259 - case 44: - goto st6 - case 61: - goto tr260 - case 92: - goto st136 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st97 -tr256: - ( m.cs) = 407 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr259: - ( m.cs) = 407 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st407: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof407 - } - st_case_407: -//line plugins/parsers/influx/machine.go:13715 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st408 - case 13: - goto st32 - case 32: - goto st271 - case 44: - goto st35 - case 61: - goto tr135 - case 92: - goto st99 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st271 - } - goto st44 - st408: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof408 - } - st_case_408: - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st408 - case 13: - goto st32 - case 32: - goto st271 - case 44: - goto tr130 - case 45: - goto tr642 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr643 - } - case ( m.data)[( m.p)] >= 9: - goto st271 - } - goto st44 -tr642: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st98 - st98: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof98 - } - st_case_98: -//line plugins/parsers/influx/machine.go:13779 - switch ( m.data)[( m.p)] { - case 32: - goto tr130 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr130 - } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st409 - } - default: - goto tr130 - } - goto st44 -tr643: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st409 - st409: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof409 - } - st_case_409: -//line plugins/parsers/influx/machine.go:13814 - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st411 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 -tr644: - ( m.cs) = 410 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st410: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof410 - } - st_case_410: -//line plugins/parsers/influx/machine.go:13858 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto st410 - case 13: - goto st32 - case 32: - goto st276 - case 44: - goto tr45 - case 61: - goto tr135 - case 92: - goto st99 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st276 - } - goto st44 -tr133: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st99 - st99: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof99 - } - st_case_99: -//line plugins/parsers/influx/machine.go:13890 - if ( m.data)[( m.p)] == 92 { - goto st100 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st44 - st100: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof100 - } - st_case_100: -//line plugins/parsers/influx/machine.go:13911 - switch ( m.data)[( m.p)] { - case 32: - goto tr45 - case 44: - goto tr45 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st44 - st411: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof411 - } - st_case_411: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st412 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st412: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof412 - } - st_case_412: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st413 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st413: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof413 - } - st_case_413: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st414 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st414: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof414 - } - st_case_414: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st415 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st415: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof415 - } - st_case_415: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st416 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st416: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof416 - } - st_case_416: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st417 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st417: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof417 - } - st_case_417: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st418 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st418: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof418 - } - st_case_418: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st419 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st419: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof419 - } - st_case_419: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st420 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st420: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof420 - } - st_case_420: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st421 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st421: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof421 - } - st_case_421: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st422 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st422: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof422 - } - st_case_422: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st423 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st423: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof423 - } - st_case_423: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st424 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st424: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof424 - } - st_case_424: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st425 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st425: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof425 - } - st_case_425: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st426 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st426: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof426 - } - st_case_426: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st427 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st427: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof427 - } - st_case_427: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st428 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto st44 - st428: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof428 - } - st_case_428: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 11: - goto tr644 - case 13: - goto tr470 - case 32: - goto tr467 - case 44: - goto tr130 - case 61: - goto tr135 - case 92: - goto st99 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr467 - } - goto st44 -tr260: -//line plugins/parsers/influx/machine.go.rl:95 - - m.key = m.text() - -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st101 - st101: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof101 - } - st_case_101: -//line plugins/parsers/influx/machine.go:14481 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr264 - case 44: - goto st6 - case 45: - goto tr265 - case 46: - goto tr266 - case 48: - goto tr267 - case 61: - goto st6 - case 70: - goto tr269 - case 84: - goto tr270 - case 92: - goto tr227 - case 102: - goto tr271 - case 116: - goto tr272 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr268 - } - case ( m.data)[( m.p)] >= 12: - goto st6 - } - goto tr226 -tr264: - ( m.cs) = 429 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st429: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof429 - } - st_case_429: -//line plugins/parsers/influx/machine.go:14543 - switch ( m.data)[( m.p)] { - case 10: - goto tr665 - case 11: - goto tr666 - case 13: - goto tr667 - case 32: - goto tr664 - case 34: - goto tr149 - case 44: - goto tr668 - case 61: - goto tr23 - case 92: - goto tr151 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr664 - } - goto tr146 -tr854: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr697: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr664: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr850: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr725: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr736: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr742: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr748: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr882: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr886: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr890: - ( m.cs) = 430 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st430: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof430 - } - st_case_430: -//line plugins/parsers/influx/machine.go:14798 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr670 - case 13: - goto st102 - case 32: - goto st430 - case 34: - goto tr95 - case 44: - goto st6 - case 45: - goto tr671 - case 61: - goto st6 - case 92: - goto tr161 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr672 - } - case ( m.data)[( m.p)] >= 9: - goto st430 - } - goto tr158 -tr670: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st431 - st431: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof431 - } - st_case_431: -//line plugins/parsers/influx/machine.go:14839 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr670 - case 13: - goto st102 - case 32: - goto st430 - case 34: - goto tr95 - case 44: - goto st6 - case 45: - goto tr671 - case 61: - goto tr163 - case 92: - goto tr161 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr672 - } - case ( m.data)[( m.p)] >= 9: - goto st430 - } - goto tr158 -tr667: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st102 -tr676: - ( m.cs) = 102 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr533: - ( m.cs) = 102 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr739: - ( m.cs) = 102 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr745: - ( m.cs) = 102 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr751: - ( m.cs) = 102 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st102: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof102 - } - st_case_102: -//line plugins/parsers/influx/machine.go:14945 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 34: - goto tr29 - case 92: - goto st73 - } - goto st6 -tr671: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st103 - st103: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof103 - } - st_case_103: -//line plugins/parsers/influx/machine.go:14966 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st432 - } - case ( m.data)[( m.p)] >= 12: - goto st6 - } - goto st49 -tr672: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st432 - st432: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof432 - } - st_case_432: -//line plugins/parsers/influx/machine.go:15003 - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st435 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 -tr673: - ( m.cs) = 433 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st433: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof433 - } - st_case_433: -//line plugins/parsers/influx/machine.go:15049 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 13: - goto st102 - case 32: - goto st433 - case 34: - goto tr29 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st433 - } - goto st6 -tr675: - ( m.cs) = 434 -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st434: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof434 - } - st_case_434: -//line plugins/parsers/influx/machine.go:15084 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto st434 - case 13: - goto st102 - case 32: - goto st433 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st433 - } - goto st49 -tr161: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st104 - st104: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof104 - } - st_case_104: -//line plugins/parsers/influx/machine.go:15118 - switch ( m.data)[( m.p)] { - case 34: - goto st49 - case 92: - goto st49 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st3 - st435: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof435 - } - st_case_435: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st436 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st436: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof436 - } - st_case_436: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st437 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st437: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof437 - } - st_case_437: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st438 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st438: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof438 - } - st_case_438: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st439 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st439: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof439 - } - st_case_439: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st440 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st440: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof440 - } - st_case_440: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st441 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st441: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof441 - } - st_case_441: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st442 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st442: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof442 - } - st_case_442: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st443 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st443: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof443 - } - st_case_443: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st444 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st444: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof444 - } - st_case_444: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st445 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st445: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof445 - } - st_case_445: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st446 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st446: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof446 - } - st_case_446: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st447 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st447: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof447 - } - st_case_447: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st448 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st448: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof448 - } - st_case_448: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st449 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st449: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof449 - } - st_case_449: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st450 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st450: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof450 - } - st_case_450: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st451 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st451: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof451 - } - st_case_451: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st452 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st49 - st452: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof452 - } - st_case_452: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr675 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto st104 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr673 - } - goto st49 -tr666: - ( m.cs) = 453 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr726: - ( m.cs) = 453 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr738: - ( m.cs) = 453 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr744: - ( m.cs) = 453 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr750: - ( m.cs) = 453 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st453: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof453 - } - st_case_453: -//line plugins/parsers/influx/machine.go:15819 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr698 - case 13: - goto st102 - case 32: - goto tr697 - case 34: - goto tr202 - case 44: - goto tr156 - case 45: - goto tr699 - case 61: - goto st6 - case 92: - goto tr203 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr700 - } - case ( m.data)[( m.p)] >= 9: - goto tr697 - } - goto tr200 -tr698: - ( m.cs) = 454 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st454: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof454 - } - st_case_454: -//line plugins/parsers/influx/machine.go:15871 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr698 - case 13: - goto st102 - case 32: - goto tr697 - case 34: - goto tr202 - case 44: - goto tr156 - case 45: - goto tr699 - case 61: - goto tr163 - case 92: - goto tr203 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr700 - } - case ( m.data)[( m.p)] >= 9: - goto tr697 - } - goto tr200 -tr699: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st105 - st105: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof105 - } - st_case_105: -//line plugins/parsers/influx/machine.go:15912 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr205 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st455 - } - case ( m.data)[( m.p)] >= 9: - goto tr153 - } - goto st65 -tr700: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st455 - st455: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof455 - } - st_case_455: -//line plugins/parsers/influx/machine.go:15951 - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st459 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 -tr861: - ( m.cs) = 456 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr706: - ( m.cs) = 456 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr858: - ( m.cs) = 456 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr701: - ( m.cs) = 456 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st456: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof456 - } - st_case_456: -//line plugins/parsers/influx/machine.go:16056 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr705 - case 13: - goto st102 - case 32: - goto st456 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr161 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st456 - } - goto tr158 -tr705: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st457 - st457: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof457 - } - st_case_457: -//line plugins/parsers/influx/machine.go:16090 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr705 - case 13: - goto st102 - case 32: - goto st456 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto tr161 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st456 - } - goto tr158 -tr707: - ( m.cs) = 458 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr702: - ( m.cs) = 458 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st458: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof458 - } - st_case_458: -//line plugins/parsers/influx/machine.go:16158 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr707 - case 13: - goto st102 - case 32: - goto tr706 - case 34: - goto tr202 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto tr203 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr706 - } - goto tr200 - st459: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof459 - } - st_case_459: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st460 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st460: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof460 - } - st_case_460: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st461 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st461: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof461 - } - st_case_461: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st462 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st462: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof462 - } - st_case_462: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st463 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st463: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof463 - } - st_case_463: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st464 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st464: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof464 - } - st_case_464: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st465 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st465: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof465 - } - st_case_465: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st466 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st466: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof466 - } - st_case_466: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st467 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st467: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof467 - } - st_case_467: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st468 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st468: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof468 - } - st_case_468: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st469 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st469: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof469 - } - st_case_469: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st470 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st470: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof470 - } - st_case_470: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st471 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st471: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof471 - } - st_case_471: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st472 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st472: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof472 - } - st_case_472: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st473 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st473: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof473 - } - st_case_473: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st474 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st474: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof474 - } - st_case_474: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st475 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st475: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof475 - } - st_case_475: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st476 - } - case ( m.data)[( m.p)] >= 9: - goto tr701 - } - goto st65 - st476: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof476 - } - st_case_476: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr702 - case 13: - goto tr676 - case 32: - goto tr701 - case 34: - goto tr206 - case 44: - goto tr156 - case 61: - goto tr163 - case 92: - goto st67 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr701 - } - goto st65 -tr668: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr852: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr727: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr740: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr746: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr752: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr884: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr888: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr893: - ( m.cs) = 106 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st106: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof106 - } - st_case_106: -//line plugins/parsers/influx/machine.go:16958 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr256 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr277 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr276 -tr276: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st107 - st107: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof107 - } - st_case_107: -//line plugins/parsers/influx/machine.go:16990 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr259 - case 44: - goto st6 - case 61: - goto tr279 - case 92: - goto st121 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st107 -tr279: -//line plugins/parsers/influx/machine.go.rl:95 - - m.key = m.text() - -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st108 - st108: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof108 - } - st_case_108: -//line plugins/parsers/influx/machine.go:17026 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr264 - case 44: - goto st6 - case 45: - goto tr281 - case 46: - goto tr282 - case 48: - goto tr283 - case 61: - goto st6 - case 70: - goto tr285 - case 84: - goto tr286 - case 92: - goto tr151 - case 102: - goto tr287 - case 116: - goto tr288 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr284 - } - case ( m.data)[( m.p)] >= 12: - goto st6 - } - goto tr146 -tr281: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st109 - st109: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof109 - } - st_case_109: -//line plugins/parsers/influx/machine.go:17077 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 46: - goto st110 - case 48: - goto st481 - case 61: - goto st6 - case 92: - goto st62 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st484 - } - case ( m.data)[( m.p)] >= 9: - goto tr153 - } - goto st47 -tr282: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st110 - st110: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof110 - } - st_case_110: -//line plugins/parsers/influx/machine.go:17120 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st477 - } - case ( m.data)[( m.p)] >= 9: - goto tr153 - } - goto st47 - st477: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof477 - } - st_case_477: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 61: - goto st6 - case 69: - goto st111 - case 92: - goto st62 - case 101: - goto st111 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st477 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 - st111: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof111 - } - st_case_111: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr293 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st480 - } - default: - goto st112 - } - goto st47 -tr293: - ( m.cs) = 478 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st478: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof478 - } - st_case_478: -//line plugins/parsers/influx/machine.go:17238 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr564 - case 13: - goto st32 - case 32: - goto tr563 - case 44: - goto tr565 - case 61: - goto tr130 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st479 - } - case ( m.data)[( m.p)] >= 9: - goto tr563 - } - goto st15 - st479: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof479 - } - st_case_479: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 61: - goto tr130 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st479 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 - st112: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof112 - } - st_case_112: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st480 - } - case ( m.data)[( m.p)] >= 9: - goto tr153 - } - goto st47 - st480: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof480 - } - st_case_480: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 61: - goto st6 - case 92: - goto st62 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st480 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 - st481: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof481 - } - st_case_481: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 46: - goto st477 - case 61: - goto st6 - case 69: - goto st111 - case 92: - goto st62 - case 101: - goto st111 - case 105: - goto st483 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st482 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 - st482: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof482 - } - st_case_482: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 46: - goto st477 - case 61: - goto st6 - case 69: - goto st111 - case 92: - goto st62 - case 101: - goto st111 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st482 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 - st483: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof483 - } - st_case_483: - switch ( m.data)[( m.p)] { - case 10: - goto tr737 - case 11: - goto tr738 - case 13: - goto tr739 - case 32: - goto tr736 - case 34: - goto tr155 - case 44: - goto tr740 - case 61: - goto st6 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr736 - } - goto st47 - st484: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof484 - } - st_case_484: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 46: - goto st477 - case 61: - goto st6 - case 69: - goto st111 - case 92: - goto st62 - case 101: - goto st111 - case 105: - goto st483 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st484 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 -tr283: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st485 - st485: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof485 - } - st_case_485: -//line plugins/parsers/influx/machine.go:17514 - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 46: - goto st477 - case 61: - goto st6 - case 69: - goto st111 - case 92: - goto st62 - case 101: - goto st111 - case 105: - goto st483 - case 117: - goto st486 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st482 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 - st486: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof486 - } - st_case_486: - switch ( m.data)[( m.p)] { - case 10: - goto tr743 - case 11: - goto tr744 - case 13: - goto tr745 - case 32: - goto tr742 - case 34: - goto tr155 - case 44: - goto tr746 - case 61: - goto st6 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr742 - } - goto st47 -tr284: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st487 - st487: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof487 - } - st_case_487: -//line plugins/parsers/influx/machine.go:17590 - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr726 - case 13: - goto tr533 - case 32: - goto tr725 - case 34: - goto tr155 - case 44: - goto tr727 - case 46: - goto st477 - case 61: - goto st6 - case 69: - goto st111 - case 92: - goto st62 - case 101: - goto st111 - case 105: - goto st483 - case 117: - goto st486 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st487 - } - case ( m.data)[( m.p)] >= 9: - goto tr725 - } - goto st47 -tr285: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st488 - st488: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof488 - } - st_case_488: -//line plugins/parsers/influx/machine.go:17639 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 11: - goto tr750 - case 13: - goto tr751 - case 32: - goto tr748 - case 34: - goto tr155 - case 44: - goto tr752 - case 61: - goto st6 - case 65: - goto st113 - case 92: - goto st62 - case 97: - goto st116 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr748 - } - goto st47 - st113: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof113 - } - st_case_113: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 76: - goto st114 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 - st114: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof114 - } - st_case_114: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 83: - goto st115 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 - st115: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof115 - } - st_case_115: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 69: - goto st489 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 - st489: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof489 - } - st_case_489: - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 11: - goto tr750 - case 13: - goto tr751 - case 32: - goto tr748 - case 34: - goto tr155 - case 44: - goto tr752 - case 61: - goto st6 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr748 - } - goto st47 - st116: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof116 - } - st_case_116: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - case 108: - goto st117 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 - st117: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof117 - } - st_case_117: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - case 115: - goto st118 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 - st118: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof118 - } - st_case_118: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - case 101: - goto st489 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 -tr286: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st490 - st490: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof490 - } - st_case_490: -//line plugins/parsers/influx/machine.go:17878 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 11: - goto tr750 - case 13: - goto tr751 - case 32: - goto tr748 - case 34: - goto tr155 - case 44: - goto tr752 - case 61: - goto st6 - case 82: - goto st119 - case 92: - goto st62 - case 114: - goto st120 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr748 - } - goto st47 - st119: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof119 - } - st_case_119: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 85: - goto st115 - case 92: - goto st62 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 - st120: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof120 - } - st_case_120: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr154 - case 13: - goto st6 - case 32: - goto tr153 - case 34: - goto tr155 - case 44: - goto tr156 - case 61: - goto st6 - case 92: - goto st62 - case 117: - goto st118 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr153 - } - goto st47 -tr287: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st491 - st491: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof491 - } - st_case_491: -//line plugins/parsers/influx/machine.go:17974 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 11: - goto tr750 - case 13: - goto tr751 - case 32: - goto tr748 - case 34: - goto tr155 - case 44: - goto tr752 - case 61: - goto st6 - case 92: - goto st62 - case 97: - goto st116 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr748 - } - goto st47 -tr288: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st492 - st492: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof492 - } - st_case_492: -//line plugins/parsers/influx/machine.go:18010 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 11: - goto tr750 - case 13: - goto tr751 - case 32: - goto tr748 - case 34: - goto tr155 - case 44: - goto tr752 - case 61: - goto st6 - case 92: - goto st62 - case 114: - goto st120 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr748 - } - goto st47 -tr277: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st121 - st121: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof121 - } - st_case_121: -//line plugins/parsers/influx/machine.go:18046 - switch ( m.data)[( m.p)] { - case 34: - goto st107 - case 92: - goto st122 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st44 - st122: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof122 - } - st_case_122: -//line plugins/parsers/influx/machine.go:18070 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr259 - case 44: - goto st6 - case 61: - goto tr279 - case 92: - goto st121 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st107 -tr265: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st123 - st123: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof123 - } - st_case_123: -//line plugins/parsers/influx/machine.go:18102 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 46: - goto st124 - case 48: - goto st517 - case 61: - goto st6 - case 92: - goto st85 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st520 - } - case ( m.data)[( m.p)] >= 9: - goto tr229 - } - goto st79 -tr266: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st124 - st124: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof124 - } - st_case_124: -//line plugins/parsers/influx/machine.go:18145 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st493 - } - case ( m.data)[( m.p)] >= 9: - goto tr229 - } - goto st79 - st493: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof493 - } - st_case_493: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 61: - goto st6 - case 69: - goto st126 - case 92: - goto st85 - case 101: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st493 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 -tr759: - ( m.cs) = 494 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr792: - ( m.cs) = 494 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr798: - ( m.cs) = 494 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr804: - ( m.cs) = 494 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st494: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof494 - } - st_case_494: -//line plugins/parsers/influx/machine.go:18306 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr763 - case 13: - goto st72 - case 32: - goto tr762 - case 34: - goto tr202 - case 44: - goto tr231 - case 45: - goto tr764 - case 61: - goto st6 - case 92: - goto tr235 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr765 - } - case ( m.data)[( m.p)] >= 9: - goto tr762 - } - goto tr233 -tr763: - ( m.cs) = 495 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st495: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof495 - } - st_case_495: -//line plugins/parsers/influx/machine.go:18358 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr763 - case 13: - goto st72 - case 32: - goto tr762 - case 34: - goto tr202 - case 44: - goto tr231 - case 45: - goto tr764 - case 61: - goto tr99 - case 92: - goto tr235 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr765 - } - case ( m.data)[( m.p)] >= 9: - goto tr762 - } - goto tr233 -tr764: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st125 - st125: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof125 - } - st_case_125: -//line plugins/parsers/influx/machine.go:18399 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr237 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st496 - } - case ( m.data)[( m.p)] >= 9: - goto tr229 - } - goto st81 -tr765: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st496 - st496: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof496 - } - st_case_496: -//line plugins/parsers/influx/machine.go:18438 - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st498 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 -tr770: - ( m.cs) = 497 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr767: - ( m.cs) = 497 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st497: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof497 - } - st_case_497: -//line plugins/parsers/influx/machine.go:18511 - switch ( m.data)[( m.p)] { - case 10: - goto tr219 - case 11: - goto tr770 - case 13: - goto st72 - case 32: - goto tr769 - case 34: - goto tr202 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto tr235 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr769 - } - goto tr233 - st498: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof498 - } - st_case_498: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st499 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st499: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof499 - } - st_case_499: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st500 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st500: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof500 - } - st_case_500: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st501 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st501: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof501 - } - st_case_501: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st502 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st502: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof502 - } - st_case_502: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st503 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st503: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof503 - } - st_case_503: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st504 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st504: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof504 - } - st_case_504: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st505 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st505: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof505 - } - st_case_505: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st506 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st506: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof506 - } - st_case_506: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st507 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st507: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof507 - } - st_case_507: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st508 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st508: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof508 - } - st_case_508: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st509 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st509: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof509 - } - st_case_509: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st510 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st510: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof510 - } - st_case_510: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st511 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st511: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof511 - } - st_case_511: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st512 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st512: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof512 - } - st_case_512: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st513 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st513: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof513 - } - st_case_513: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st514 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st514: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof514 - } - st_case_514: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st515 - } - case ( m.data)[( m.p)] >= 9: - goto tr766 - } - goto st81 - st515: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof515 - } - st_case_515: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr767 - case 13: - goto tr602 - case 32: - goto tr766 - case 34: - goto tr206 - case 44: - goto tr231 - case 61: - goto tr99 - case 92: - goto st83 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr766 - } - goto st81 - st126: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof126 - } - st_case_126: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr293 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st516 - } - default: - goto st127 - } - goto st79 - st127: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof127 - } - st_case_127: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st516 - } - case ( m.data)[( m.p)] >= 9: - goto tr229 - } - goto st79 - st516: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof516 - } - st_case_516: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 61: - goto st6 - case 92: - goto st85 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st516 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 - st517: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof517 - } - st_case_517: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 46: - goto st493 - case 61: - goto st6 - case 69: - goto st126 - case 92: - goto st85 - case 101: - goto st126 - case 105: - goto st519 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st518 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 - st518: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof518 - } - st_case_518: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 46: - goto st493 - case 61: - goto st6 - case 69: - goto st126 - case 92: - goto st85 - case 101: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st518 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 - st519: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof519 - } - st_case_519: - switch ( m.data)[( m.p)] { - case 10: - goto tr791 - case 11: - goto tr792 - case 13: - goto tr793 - case 32: - goto tr790 - case 34: - goto tr155 - case 44: - goto tr794 - case 61: - goto st6 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr790 - } - goto st79 - st520: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof520 - } - st_case_520: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 46: - goto st493 - case 61: - goto st6 - case 69: - goto st126 - case 92: - goto st85 - case 101: - goto st126 - case 105: - goto st519 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st520 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 -tr267: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st521 - st521: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof521 - } - st_case_521: -//line plugins/parsers/influx/machine.go:19361 - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 46: - goto st493 - case 61: - goto st6 - case 69: - goto st126 - case 92: - goto st85 - case 101: - goto st126 - case 105: - goto st519 - case 117: - goto st522 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st518 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 - st522: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof522 - } - st_case_522: - switch ( m.data)[( m.p)] { - case 10: - goto tr797 - case 11: - goto tr798 - case 13: - goto tr799 - case 32: - goto tr796 - case 34: - goto tr155 - case 44: - goto tr800 - case 61: - goto st6 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr796 - } - goto st79 -tr268: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st523 - st523: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof523 - } - st_case_523: -//line plugins/parsers/influx/machine.go:19437 - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 11: - goto tr759 - case 13: - goto tr638 - case 32: - goto tr757 - case 34: - goto tr155 - case 44: - goto tr760 - case 46: - goto st493 - case 61: - goto st6 - case 69: - goto st126 - case 92: - goto st85 - case 101: - goto st126 - case 105: - goto st519 - case 117: - goto st522 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st523 - } - case ( m.data)[( m.p)] >= 9: - goto tr757 - } - goto st79 -tr269: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st524 - st524: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof524 - } - st_case_524: -//line plugins/parsers/influx/machine.go:19486 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr804 - case 13: - goto tr805 - case 32: - goto tr802 - case 34: - goto tr155 - case 44: - goto tr806 - case 61: - goto st6 - case 65: - goto st128 - case 92: - goto st85 - case 97: - goto st131 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr802 - } - goto st79 - st128: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof128 - } - st_case_128: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 76: - goto st129 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 - st129: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof129 - } - st_case_129: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 83: - goto st130 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 - st130: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof130 - } - st_case_130: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 69: - goto st525 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 - st525: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof525 - } - st_case_525: - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr804 - case 13: - goto tr805 - case 32: - goto tr802 - case 34: - goto tr155 - case 44: - goto tr806 - case 61: - goto st6 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr802 - } - goto st79 - st131: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof131 - } - st_case_131: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - case 108: - goto st132 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 - st132: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof132 - } - st_case_132: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - case 115: - goto st133 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 - st133: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof133 - } - st_case_133: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - case 101: - goto st525 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 -tr270: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st526 - st526: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof526 - } - st_case_526: -//line plugins/parsers/influx/machine.go:19725 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr804 - case 13: - goto tr805 - case 32: - goto tr802 - case 34: - goto tr155 - case 44: - goto tr806 - case 61: - goto st6 - case 82: - goto st134 - case 92: - goto st85 - case 114: - goto st135 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr802 - } - goto st79 - st134: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof134 - } - st_case_134: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 85: - goto st130 - case 92: - goto st85 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 - st135: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof135 - } - st_case_135: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr230 - case 13: - goto st6 - case 32: - goto tr229 - case 34: - goto tr155 - case 44: - goto tr231 - case 61: - goto st6 - case 92: - goto st85 - case 117: - goto st133 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr229 - } - goto st79 -tr271: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st527 - st527: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof527 - } - st_case_527: -//line plugins/parsers/influx/machine.go:19821 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr804 - case 13: - goto tr805 - case 32: - goto tr802 - case 34: - goto tr155 - case 44: - goto tr806 - case 61: - goto st6 - case 92: - goto st85 - case 97: - goto st131 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr802 - } - goto st79 -tr272: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st528 - st528: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof528 - } - st_case_528: -//line plugins/parsers/influx/machine.go:19857 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr804 - case 13: - goto tr805 - case 32: - goto tr802 - case 34: - goto tr155 - case 44: - goto tr806 - case 61: - goto st6 - case 92: - goto st85 - case 114: - goto st135 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr802 - } - goto st79 -tr257: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st136 - st136: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof136 - } - st_case_136: -//line plugins/parsers/influx/machine.go:19893 - switch ( m.data)[( m.p)] { - case 34: - goto st97 - case 92: - goto st137 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr45 - } - case ( m.data)[( m.p)] >= 9: - goto tr45 - } - goto st44 - st137: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof137 - } - st_case_137: -//line plugins/parsers/influx/machine.go:19917 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr259 - case 44: - goto st6 - case 61: - goto tr260 - case 92: - goto st136 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st97 - st138: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof138 - } - st_case_138: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr315 - case 44: - goto tr90 - case 92: - goto st140 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st531 - } - default: - goto st139 - } - goto st29 -tr315: - ( m.cs) = 529 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st529: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof529 - } - st_case_529: -//line plugins/parsers/influx/machine.go:19990 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr634 - case 13: - goto st32 - case 32: - goto tr499 - case 44: - goto tr501 - case 92: - goto st94 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st530 - } - case ( m.data)[( m.p)] >= 9: - goto tr499 - } - goto st1 - st530: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof530 - } - st_case_530: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 92: - goto st94 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st530 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 - st139: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof139 - } - st_case_139: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st140 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st531 - } - case ( m.data)[( m.p)] >= 9: - goto tr87 - } - goto st29 - st531: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof531 - } - st_case_531: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 92: - goto st140 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st531 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 -tr85: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st140 - st140: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof140 - } - st_case_140: -//line plugins/parsers/influx/machine.go:20113 - switch ( m.data)[( m.p)] { - case 34: - goto st29 - case 92: - goto st29 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st1 - st532: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof532 - } - st_case_532: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 46: - goto st406 - case 69: - goto st138 - case 92: - goto st140 - case 101: - goto st138 - case 105: - goto st534 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st533 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 - st533: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof533 - } - st_case_533: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 46: - goto st406 - case 69: - goto st138 - case 92: - goto st140 - case 101: - goto st138 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st533 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 - st534: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof534 - } - st_case_534: - switch ( m.data)[( m.p)] { - case 10: - goto tr817 - case 11: - goto tr818 - case 13: - goto tr793 - case 32: - goto tr816 - case 34: - goto tr89 - case 44: - goto tr819 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr816 - } - goto st29 - st535: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof535 - } - st_case_535: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 46: - goto st406 - case 69: - goto st138 - case 92: - goto st140 - case 101: - goto st138 - case 105: - goto st534 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st535 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 -tr245: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st536 - st536: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof536 - } - st_case_536: -//line plugins/parsers/influx/machine.go:20277 - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 46: - goto st406 - case 69: - goto st138 - case 92: - goto st140 - case 101: - goto st138 - case 105: - goto st534 - case 117: - goto st537 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st533 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 - st537: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof537 - } - st_case_537: - switch ( m.data)[( m.p)] { - case 10: - goto tr822 - case 11: - goto tr823 - case 13: - goto tr799 - case 32: - goto tr821 - case 34: - goto tr89 - case 44: - goto tr824 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr821 - } - goto st29 -tr246: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st538 - st538: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof538 - } - st_case_538: -//line plugins/parsers/influx/machine.go:20349 - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 11: - goto tr637 - case 13: - goto tr638 - case 32: - goto tr635 - case 34: - goto tr89 - case 44: - goto tr639 - case 46: - goto st406 - case 69: - goto st138 - case 92: - goto st140 - case 101: - goto st138 - case 105: - goto st534 - case 117: - goto st537 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st538 - } - case ( m.data)[( m.p)] >= 9: - goto tr635 - } - goto st29 -tr247: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st539 - st539: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof539 - } - st_case_539: -//line plugins/parsers/influx/machine.go:20396 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr827 - case 13: - goto tr805 - case 32: - goto tr826 - case 34: - goto tr89 - case 44: - goto tr828 - case 65: - goto st141 - case 92: - goto st140 - case 97: - goto st144 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr826 - } - goto st29 - st141: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof141 - } - st_case_141: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 76: - goto st142 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 - st142: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof142 - } - st_case_142: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 83: - goto st143 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 - st143: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof143 - } - st_case_143: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 69: - goto st540 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 - st540: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof540 - } - st_case_540: - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr827 - case 13: - goto tr805 - case 32: - goto tr826 - case 34: - goto tr89 - case 44: - goto tr828 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr826 - } - goto st29 - st144: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof144 - } - st_case_144: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st140 - case 108: - goto st145 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 - st145: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof145 - } - st_case_145: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st140 - case 115: - goto st146 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 - st146: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof146 - } - st_case_146: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st140 - case 101: - goto st540 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 -tr248: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st541 - st541: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof541 - } - st_case_541: -//line plugins/parsers/influx/machine.go:20619 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr827 - case 13: - goto tr805 - case 32: - goto tr826 - case 34: - goto tr89 - case 44: - goto tr828 - case 82: - goto st147 - case 92: - goto st140 - case 114: - goto st148 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr826 - } - goto st29 - st147: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof147 - } - st_case_147: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 85: - goto st143 - case 92: - goto st140 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 - st148: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof148 - } - st_case_148: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr88 - case 13: - goto st6 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st140 - case 117: - goto st146 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr87 - } - goto st29 -tr249: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st542 - st542: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof542 - } - st_case_542: -//line plugins/parsers/influx/machine.go:20709 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr827 - case 13: - goto tr805 - case 32: - goto tr826 - case 34: - goto tr89 - case 44: - goto tr828 - case 92: - goto st140 - case 97: - goto st144 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr826 - } - goto st29 -tr250: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st543 - st543: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof543 - } - st_case_543: -//line plugins/parsers/influx/machine.go:20743 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 11: - goto tr827 - case 13: - goto tr805 - case 32: - goto tr826 - case 34: - goto tr89 - case 44: - goto tr828 - case 92: - goto st140 - case 114: - goto st148 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr826 - } - goto st29 - st544: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof544 - } - st_case_544: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st545 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st545: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof545 - } - st_case_545: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st546 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st546: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof546 - } - st_case_546: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st547 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st547: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof547 - } - st_case_547: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st548 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st548: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof548 - } - st_case_548: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st549 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st549: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof549 - } - st_case_549: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st550 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st550: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof550 - } - st_case_550: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st551 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st551: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof551 - } - st_case_551: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st552 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st552: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof552 - } - st_case_552: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st553 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st553: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof553 - } - st_case_553: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st554 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st554: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof554 - } - st_case_554: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st555 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st555: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof555 - } - st_case_555: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st556 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st556: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof556 - } - st_case_556: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st557 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st557: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof557 - } - st_case_557: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st558 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st558: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof558 - } - st_case_558: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st559 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st559: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof559 - } - st_case_559: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st560 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st560: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof560 - } - st_case_560: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st561 - } - case ( m.data)[( m.p)] >= 9: - goto tr627 - } - goto st40 - st561: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof561 - } - st_case_561: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 11: - goto tr628 - case 13: - goto tr602 - case 32: - goto tr627 - case 34: - goto tr126 - case 44: - goto tr90 - case 61: - goto tr127 - case 92: - goto st92 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr627 - } - goto st40 -tr211: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st149 - st149: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof149 - } - st_case_149: -//line plugins/parsers/influx/machine.go:21348 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 46: - goto st150 - case 48: - goto st586 - case 92: - goto st155 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st589 - } - case ( m.data)[( m.p)] >= 9: - goto tr178 - } - goto st53 -tr212: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st150 - st150: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof150 - } - st_case_150: -//line plugins/parsers/influx/machine.go:21389 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st562 - } - case ( m.data)[( m.p)] >= 9: - goto tr178 - } - goto st53 - st562: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof562 - } - st_case_562: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 69: - goto st153 - case 92: - goto st155 - case 101: - goto st153 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st562 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 -tr851: - ( m.cs) = 563 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr883: - ( m.cs) = 563 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr887: - ( m.cs) = 563 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr892: - ( m.cs) = 563 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st563: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof563 - } - st_case_563: -//line plugins/parsers/influx/machine.go:21546 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr855 - case 13: - goto st102 - case 32: - goto tr854 - case 34: - goto tr122 - case 44: - goto tr180 - case 45: - goto tr856 - case 61: - goto st53 - case 92: - goto tr184 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr857 - } - case ( m.data)[( m.p)] >= 9: - goto tr854 - } - goto tr182 -tr855: - ( m.cs) = 564 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again - st564: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof564 - } - st_case_564: -//line plugins/parsers/influx/machine.go:21598 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr855 - case 13: - goto st102 - case 32: - goto tr854 - case 34: - goto tr122 - case 44: - goto tr180 - case 45: - goto tr856 - case 61: - goto tr187 - case 92: - goto tr184 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr857 - } - case ( m.data)[( m.p)] >= 9: - goto tr854 - } - goto tr182 -tr856: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st151 - st151: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof151 - } - st_case_151: -//line plugins/parsers/influx/machine.go:21639 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr186 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st565 - } - case ( m.data)[( m.p)] >= 9: - goto tr178 - } - goto st55 -tr857: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st565 - st565: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof565 - } - st_case_565: -//line plugins/parsers/influx/machine.go:21678 - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st567 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 -tr862: - ( m.cs) = 566 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto _again -tr859: - ( m.cs) = 566 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st566: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof566 - } - st_case_566: -//line plugins/parsers/influx/machine.go:21751 - switch ( m.data)[( m.p)] { - case 10: - goto tr273 - case 11: - goto tr862 - case 13: - goto st102 - case 32: - goto tr861 - case 34: - goto tr122 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto tr184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr861 - } - goto tr182 -tr184: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st152 - st152: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof152 - } - st_case_152: -//line plugins/parsers/influx/machine.go:21785 - switch ( m.data)[( m.p)] { - case 34: - goto st55 - case 92: - goto st55 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st10 - st567: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof567 - } - st_case_567: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st568 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st568: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof568 - } - st_case_568: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st569 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st569: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof569 - } - st_case_569: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st570 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st570: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof570 - } - st_case_570: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st571 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st571: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof571 - } - st_case_571: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st572 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st572: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof572 - } - st_case_572: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st573 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st573: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof573 - } - st_case_573: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st574 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st574: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof574 - } - st_case_574: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st575 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st575: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof575 - } - st_case_575: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st576 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st576: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof576 - } - st_case_576: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st577 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st577: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof577 - } - st_case_577: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st578 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st578: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof578 - } - st_case_578: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st579 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st579: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof579 - } - st_case_579: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st580 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st580: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof580 - } - st_case_580: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st581 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st581: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof581 - } - st_case_581: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st582 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st582: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof582 - } - st_case_582: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st583 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st583: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof583 - } - st_case_583: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st584 - } - case ( m.data)[( m.p)] >= 9: - goto tr858 - } - goto st55 - st584: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof584 - } - st_case_584: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 11: - goto tr859 - case 13: - goto tr676 - case 32: - goto tr858 - case 34: - goto tr126 - case 44: - goto tr180 - case 61: - goto tr187 - case 92: - goto st152 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr858 - } - goto st55 - st153: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof153 - } - st_case_153: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr315 - case 44: - goto tr180 - case 92: - goto st155 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st585 - } - default: - goto st154 - } - goto st53 - st154: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof154 - } - st_case_154: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st585 - } - case ( m.data)[( m.p)] >= 9: - goto tr178 - } - goto st53 - st585: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof585 - } - st_case_585: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 92: - goto st155 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st585 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 -tr338: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st155 - st155: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof155 - } - st_case_155: -//line plugins/parsers/influx/machine.go:22477 - switch ( m.data)[( m.p)] { - case 34: - goto st53 - case 92: - goto st53 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st1 - st586: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof586 - } - st_case_586: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 46: - goto st562 - case 69: - goto st153 - case 92: - goto st155 - case 101: - goto st153 - case 105: - goto st588 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st587 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 - st587: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof587 - } - st_case_587: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 46: - goto st562 - case 69: - goto st153 - case 92: - goto st155 - case 101: - goto st153 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st587 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 - st588: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof588 - } - st_case_588: - switch ( m.data)[( m.p)] { - case 10: - goto tr737 - case 11: - goto tr883 - case 13: - goto tr739 - case 32: - goto tr882 - case 34: - goto tr89 - case 44: - goto tr884 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr882 - } - goto st53 - st589: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof589 - } - st_case_589: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 46: - goto st562 - case 69: - goto st153 - case 92: - goto st155 - case 101: - goto st153 - case 105: - goto st588 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st589 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 -tr213: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st590 - st590: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof590 - } - st_case_590: -//line plugins/parsers/influx/machine.go:22641 - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 46: - goto st562 - case 69: - goto st153 - case 92: - goto st155 - case 101: - goto st153 - case 105: - goto st588 - case 117: - goto st591 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st587 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 - st591: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof591 - } - st_case_591: - switch ( m.data)[( m.p)] { - case 10: - goto tr743 - case 11: - goto tr887 - case 13: - goto tr745 - case 32: - goto tr886 - case 34: - goto tr89 - case 44: - goto tr888 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr886 - } - goto st53 -tr214: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st592 - st592: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof592 - } - st_case_592: -//line plugins/parsers/influx/machine.go:22713 - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 11: - goto tr851 - case 13: - goto tr533 - case 32: - goto tr850 - case 34: - goto tr89 - case 44: - goto tr852 - case 46: - goto st562 - case 69: - goto st153 - case 92: - goto st155 - case 101: - goto st153 - case 105: - goto st588 - case 117: - goto st591 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st592 - } - case ( m.data)[( m.p)] >= 9: - goto tr850 - } - goto st53 -tr215: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st593 - st593: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof593 - } - st_case_593: -//line plugins/parsers/influx/machine.go:22760 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 11: - goto tr892 - case 13: - goto tr751 - case 32: - goto tr890 - case 34: - goto tr89 - case 44: - goto tr893 - case 65: - goto st156 - case 92: - goto st155 - case 97: - goto st159 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr890 - } - goto st53 - st156: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof156 - } - st_case_156: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 76: - goto st157 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 - st157: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof157 - } - st_case_157: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 83: - goto st158 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 - st158: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof158 - } - st_case_158: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 69: - goto st594 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 - st594: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof594 - } - st_case_594: - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 11: - goto tr892 - case 13: - goto tr751 - case 32: - goto tr890 - case 34: - goto tr89 - case 44: - goto tr893 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr890 - } - goto st53 - st159: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof159 - } - st_case_159: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - case 108: - goto st160 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 - st160: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof160 - } - st_case_160: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - case 115: - goto st161 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 - st161: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof161 - } - st_case_161: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - case 101: - goto st594 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 -tr216: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st595 - st595: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof595 - } - st_case_595: -//line plugins/parsers/influx/machine.go:22983 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 11: - goto tr892 - case 13: - goto tr751 - case 32: - goto tr890 - case 34: - goto tr89 - case 44: - goto tr893 - case 82: - goto st162 - case 92: - goto st155 - case 114: - goto st163 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr890 - } - goto st53 - st162: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof162 - } - st_case_162: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 85: - goto st158 - case 92: - goto st155 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 - st163: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof163 - } - st_case_163: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr179 - case 13: - goto st6 - case 32: - goto tr178 - case 34: - goto tr89 - case 44: - goto tr180 - case 92: - goto st155 - case 117: - goto st161 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr178 - } - goto st53 -tr217: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st596 - st596: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof596 - } - st_case_596: -//line plugins/parsers/influx/machine.go:23073 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 11: - goto tr892 - case 13: - goto tr751 - case 32: - goto tr890 - case 34: - goto tr89 - case 44: - goto tr893 - case 92: - goto st155 - case 97: - goto st159 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr890 - } - goto st53 -tr218: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st597 - st597: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof597 - } - st_case_597: -//line plugins/parsers/influx/machine.go:23107 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 11: - goto tr892 - case 13: - goto tr751 - case 32: - goto tr890 - case 34: - goto tr89 - case 44: - goto tr893 - case 92: - goto st155 - case 114: - goto st163 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr890 - } - goto st53 - st164: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof164 - } - st_case_164: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr337 - case 13: - goto st6 - case 32: - goto st164 - case 34: - goto tr116 - case 35: - goto st6 - case 44: - goto st6 - case 92: - goto tr338 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st164 - } - goto tr335 -tr337: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st165 - st165: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof165 - } - st_case_165: -//line plugins/parsers/influx/machine.go:23168 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr340 - case 13: - goto st6 - case 32: - goto tr339 - case 34: - goto tr83 - case 35: - goto st53 - case 44: - goto tr180 - case 92: - goto tr338 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr339 - } - goto tr335 -tr339: - ( m.cs) = 166 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st166: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof166 - } - st_case_166: -//line plugins/parsers/influx/machine.go:23209 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr342 - case 13: - goto st6 - case 32: - goto st166 - case 34: - goto tr122 - case 35: - goto tr158 - case 44: - goto st6 - case 61: - goto tr335 - case 92: - goto tr184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st166 - } - goto tr182 -tr342: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st167 -tr343: - ( m.cs) = 167 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st167: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof167 - } - st_case_167: -//line plugins/parsers/influx/machine.go:23262 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr343 - case 13: - goto st6 - case 32: - goto tr339 - case 34: - goto tr122 - case 44: - goto tr180 - case 61: - goto tr344 - case 92: - goto tr184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr339 - } - goto tr182 -tr340: - ( m.cs) = 168 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st168: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof168 - } - st_case_168: -//line plugins/parsers/influx/machine.go:23307 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr343 - case 13: - goto st6 - case 32: - goto tr339 - case 34: - goto tr122 - case 44: - goto tr180 - case 61: - goto tr335 - case 92: - goto tr184 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr339 - } - goto tr182 -tr538: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st169 - st169: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof169 - } - st_case_169: -//line plugins/parsers/influx/machine.go:23341 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st598 - } - goto st6 -tr539: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st598 - st598: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof598 - } - st_case_598: -//line plugins/parsers/influx/machine.go:23365 - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st599 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st599: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof599 - } - st_case_599: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st600 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st600: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof600 - } - st_case_600: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st601 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st601: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof601 - } - st_case_601: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st602 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st602: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof602 - } - st_case_602: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st603 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st603: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof603 - } - st_case_603: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st604 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st604: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof604 - } - st_case_604: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st605 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st605: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof605 - } - st_case_605: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st606 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st606: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof606 - } - st_case_606: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st607 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st607: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof607 - } - st_case_607: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st608 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st608: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof608 - } - st_case_608: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st609 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st609: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof609 - } - st_case_609: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st610 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st610: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof610 - } - st_case_610: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st611 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st611: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof611 - } - st_case_611: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st612 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st612: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof612 - } - st_case_612: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st613 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st613: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof613 - } - st_case_613: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st614 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st614: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof614 - } - st_case_614: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st615 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st615: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof615 - } - st_case_615: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st616 - } - case ( m.data)[( m.p)] >= 9: - goto tr673 - } - goto st6 - st616: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof616 - } - st_case_616: - switch ( m.data)[( m.p)] { - case 10: - goto tr674 - case 13: - goto tr676 - case 32: - goto tr673 - case 34: - goto tr29 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr673 - } - goto st6 -tr917: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st170 -tr534: - ( m.cs) = 170 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr924: - ( m.cs) = 170 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr926: - ( m.cs) = 170 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr929: - ( m.cs) = 170 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st170: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof170 - } - st_case_170: -//line plugins/parsers/influx/machine.go:23913 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr347 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr346 -tr346: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st171 - st171: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof171 - } - st_case_171: -//line plugins/parsers/influx/machine.go:23945 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr349 - case 92: - goto st183 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st171 -tr349: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st172 - st172: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof172 - } - st_case_172: -//line plugins/parsers/influx/machine.go:23977 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr351 - case 45: - goto tr165 - case 46: - goto tr166 - case 48: - goto tr167 - case 70: - goto tr352 - case 84: - goto tr353 - case 92: - goto st73 - case 102: - goto tr354 - case 116: - goto tr355 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr168 - } - goto st6 -tr351: - ( m.cs) = 617 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st617: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof617 - } - st_case_617: -//line plugins/parsers/influx/machine.go:24022 - switch ( m.data)[( m.p)] { - case 10: - goto tr665 - case 13: - goto tr667 - case 32: - goto tr916 - case 34: - goto tr25 - case 44: - goto tr917 - case 92: - goto tr26 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr916 - } - goto tr23 -tr167: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st618 - st618: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof618 - } - st_case_618: -//line plugins/parsers/influx/machine.go:24052 - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 46: - goto st325 - case 69: - goto st173 - case 92: - goto st73 - case 101: - goto st173 - case 105: - goto st623 - case 117: - goto st624 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st619 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 - st619: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof619 - } - st_case_619: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 46: - goto st325 - case 69: - goto st173 - case 92: - goto st73 - case 101: - goto st173 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st619 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 - st173: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof173 - } - st_case_173: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr356 - case 43: - goto st174 - case 45: - goto st174 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st622 - } - goto st6 -tr356: - ( m.cs) = 620 -//line plugins/parsers/influx/machine.go.rl:148 - - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st620: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof620 - } - st_case_620: -//line plugins/parsers/influx/machine.go:24159 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 13: - goto st32 - case 32: - goto st271 - case 44: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st621 - } - case ( m.data)[( m.p)] >= 9: - goto st271 - } - goto tr103 - st621: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof621 - } - st_case_621: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 13: - goto tr732 - case 32: - goto tr921 - case 44: - goto tr922 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st621 - } - case ( m.data)[( m.p)] >= 9: - goto tr921 - } - goto tr103 - st174: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof174 - } - st_case_174: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st622 - } - goto st6 - st622: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof622 - } - st_case_622: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st622 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 - st623: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof623 - } - st_case_623: - switch ( m.data)[( m.p)] { - case 10: - goto tr737 - case 13: - goto tr739 - case 32: - goto tr923 - case 34: - goto tr29 - case 44: - goto tr924 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr923 - } - goto st6 - st624: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof624 - } - st_case_624: - switch ( m.data)[( m.p)] { - case 10: - goto tr743 - case 13: - goto tr745 - case 32: - goto tr925 - case 34: - goto tr29 - case 44: - goto tr926 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr925 - } - goto st6 -tr168: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st625 - st625: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof625 - } - st_case_625: -//line plugins/parsers/influx/machine.go:24305 - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 46: - goto st325 - case 69: - goto st173 - case 92: - goto st73 - case 101: - goto st173 - case 105: - goto st623 - case 117: - goto st624 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st625 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 -tr352: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st626 - st626: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof626 - } - st_case_626: -//line plugins/parsers/influx/machine.go:24350 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 65: - goto st175 - case 92: - goto st73 - case 97: - goto st178 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 - st175: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof175 - } - st_case_175: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 76: - goto st176 - case 92: - goto st73 - } - goto st6 - st176: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof176 - } - st_case_176: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 83: - goto st177 - case 92: - goto st73 - } - goto st6 - st177: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof177 - } - st_case_177: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 69: - goto st627 - case 92: - goto st73 - } - goto st6 - st627: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof627 - } - st_case_627: - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 - st178: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof178 - } - st_case_178: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 108: - goto st179 - } - goto st6 - st179: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof179 - } - st_case_179: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 115: - goto st180 - } - goto st6 - st180: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof180 - } - st_case_180: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 101: - goto st627 - } - goto st6 -tr353: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st628 - st628: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof628 - } - st_case_628: -//line plugins/parsers/influx/machine.go:24503 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 82: - goto st181 - case 92: - goto st73 - case 114: - goto st182 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 - st181: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof181 - } - st_case_181: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 85: - goto st177 - case 92: - goto st73 - } - goto st6 - st182: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof182 - } - st_case_182: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 117: - goto st180 - } - goto st6 -tr354: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st629 - st629: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof629 - } - st_case_629: -//line plugins/parsers/influx/machine.go:24569 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 92: - goto st73 - case 97: - goto st178 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 -tr355: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st630 - st630: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof630 - } - st_case_630: -//line plugins/parsers/influx/machine.go:24601 - switch ( m.data)[( m.p)] { - case 10: - goto tr749 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 92: - goto st73 - case 114: - goto st182 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 -tr347: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st183 - st183: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof183 - } - st_case_183: -//line plugins/parsers/influx/machine.go:24633 - switch ( m.data)[( m.p)] { - case 34: - goto st171 - case 92: - goto st171 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st3 - st631: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof631 - } - st_case_631: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 46: - goto st325 - case 69: - goto st173 - case 92: - goto st73 - case 101: - goto st173 - case 105: - goto st623 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st619 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 - st632: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof632 - } - st_case_632: - switch ( m.data)[( m.p)] { - case 10: - goto tr532 - case 13: - goto tr533 - case 32: - goto tr531 - case 34: - goto tr29 - case 44: - goto tr534 - case 46: - goto st325 - case 69: - goto st173 - case 92: - goto st73 - case 101: - goto st173 - case 105: - goto st623 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st632 - } - case ( m.data)[( m.p)] >= 9: - goto tr531 - } - goto st6 -tr169: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st633 - st633: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof633 - } - st_case_633: -//line plugins/parsers/influx/machine.go:24732 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 65: - goto st184 - case 92: - goto st73 - case 97: - goto st187 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 - st184: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof184 - } - st_case_184: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 76: - goto st185 - case 92: - goto st73 - } - goto st6 - st185: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof185 - } - st_case_185: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 83: - goto st186 - case 92: - goto st73 - } - goto st6 - st186: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof186 - } - st_case_186: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 69: - goto st634 - case 92: - goto st73 - } - goto st6 - st634: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof634 - } - st_case_634: - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 - st187: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof187 - } - st_case_187: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 108: - goto st188 - } - goto st6 - st188: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof188 - } - st_case_188: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 115: - goto st189 - } - goto st6 - st189: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof189 - } - st_case_189: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 101: - goto st634 - } - goto st6 -tr170: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st635 - st635: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof635 - } - st_case_635: -//line plugins/parsers/influx/machine.go:24885 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 82: - goto st190 - case 92: - goto st73 - case 114: - goto st191 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 - st190: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof190 - } - st_case_190: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 85: - goto st186 - case 92: - goto st73 - } - goto st6 - st191: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof191 - } - st_case_191: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 117: - goto st189 - } - goto st6 -tr171: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st636 - st636: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof636 - } - st_case_636: -//line plugins/parsers/influx/machine.go:24951 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 92: - goto st73 - case 97: - goto st187 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 -tr172: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st637 - st637: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof637 - } - st_case_637: -//line plugins/parsers/influx/machine.go:24983 - switch ( m.data)[( m.p)] { - case 10: - goto tr891 - case 13: - goto tr751 - case 32: - goto tr928 - case 34: - goto tr29 - case 44: - goto tr929 - case 92: - goto st73 - case 114: - goto st191 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr928 - } - goto st6 -tr160: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st192 - st192: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof192 - } - st_case_192: -//line plugins/parsers/influx/machine.go:25015 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr160 - case 13: - goto st6 - case 32: - goto st48 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto tr163 - case 92: - goto tr161 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st48 - } - goto tr158 -tr138: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st193 - st193: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof193 - } - st_case_193: -//line plugins/parsers/influx/machine.go:25049 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 46: - goto st194 - case 48: - goto st639 - case 61: - goto tr45 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st642 - } - case ( m.data)[( m.p)] >= 9: - goto tr58 - } - goto st15 -tr139: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st194 - st194: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof194 - } - st_case_194: -//line plugins/parsers/influx/machine.go:25090 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st638 - } - case ( m.data)[( m.p)] >= 9: - goto tr58 - } - goto st15 - st638: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof638 - } - st_case_638: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 61: - goto tr130 - case 69: - goto st195 - case 92: - goto st21 - case 101: - goto st195 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st638 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 - st195: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof195 - } - st_case_195: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 34: - goto st196 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st479 - } - default: - goto st196 - } - goto st15 - st196: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof196 - } - st_case_196: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st479 - } - case ( m.data)[( m.p)] >= 9: - goto tr58 - } - goto st15 - st639: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof639 - } - st_case_639: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 46: - goto st638 - case 61: - goto tr130 - case 69: - goto st195 - case 92: - goto st21 - case 101: - goto st195 - case 105: - goto st641 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st640 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 - st640: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof640 - } - st_case_640: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 46: - goto st638 - case 61: - goto tr130 - case 69: - goto st195 - case 92: - goto st21 - case 101: - goto st195 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st640 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 - st641: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof641 - } - st_case_641: - switch ( m.data)[( m.p)] { - case 10: - goto tr942 - case 11: - goto tr943 - case 13: - goto tr944 - case 32: - goto tr941 - case 44: - goto tr945 - case 61: - goto tr130 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr941 - } - goto st15 - st642: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof642 - } - st_case_642: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 46: - goto st638 - case 61: - goto tr130 - case 69: - goto st195 - case 92: - goto st21 - case 101: - goto st195 - case 105: - goto st641 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st642 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 -tr140: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st643 - st643: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof643 - } - st_case_643: -//line plugins/parsers/influx/machine.go:25364 - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 46: - goto st638 - case 61: - goto tr130 - case 69: - goto st195 - case 92: - goto st21 - case 101: - goto st195 - case 105: - goto st641 - case 117: - goto st644 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st640 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 - st644: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof644 - } - st_case_644: - switch ( m.data)[( m.p)] { - case 10: - goto tr948 - case 11: - goto tr949 - case 13: - goto tr950 - case 32: - goto tr947 - case 44: - goto tr951 - case 61: - goto tr130 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr947 - } - goto st15 -tr141: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st645 - st645: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof645 - } - st_case_645: -//line plugins/parsers/influx/machine.go:25436 - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr731 - case 13: - goto tr732 - case 32: - goto tr729 - case 44: - goto tr733 - case 46: - goto st638 - case 61: - goto tr130 - case 69: - goto st195 - case 92: - goto st21 - case 101: - goto st195 - case 105: - goto st641 - case 117: - goto st644 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st645 - } - case ( m.data)[( m.p)] >= 9: - goto tr729 - } - goto st15 -tr142: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st646 - st646: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof646 - } - st_case_646: -//line plugins/parsers/influx/machine.go:25483 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr955 - case 13: - goto tr956 - case 32: - goto tr953 - case 44: - goto tr957 - case 61: - goto tr130 - case 65: - goto st197 - case 92: - goto st21 - case 97: - goto st200 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr953 - } - goto st15 - st197: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof197 - } - st_case_197: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 76: - goto st198 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 - st198: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof198 - } - st_case_198: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 83: - goto st199 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 - st199: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof199 - } - st_case_199: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 69: - goto st647 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 - st647: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof647 - } - st_case_647: - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr955 - case 13: - goto tr956 - case 32: - goto tr953 - case 44: - goto tr957 - case 61: - goto tr130 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr953 - } - goto st15 - st200: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof200 - } - st_case_200: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - case 108: - goto st201 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 - st201: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof201 - } - st_case_201: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - case 115: - goto st202 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 - st202: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof202 - } - st_case_202: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - case 101: - goto st647 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 -tr143: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st648 - st648: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof648 - } - st_case_648: -//line plugins/parsers/influx/machine.go:25706 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr955 - case 13: - goto tr956 - case 32: - goto tr953 - case 44: - goto tr957 - case 61: - goto tr130 - case 82: - goto st203 - case 92: - goto st21 - case 114: - goto st204 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr953 - } - goto st15 - st203: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof203 - } - st_case_203: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 85: - goto st199 - case 92: - goto st21 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 - st204: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof204 - } - st_case_204: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr59 - case 13: - goto tr45 - case 32: - goto tr58 - case 44: - goto tr60 - case 61: - goto tr45 - case 92: - goto st21 - case 117: - goto st202 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr58 - } - goto st15 -tr144: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st649 - st649: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof649 - } - st_case_649: -//line plugins/parsers/influx/machine.go:25796 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr955 - case 13: - goto tr956 - case 32: - goto tr953 - case 44: - goto tr957 - case 61: - goto tr130 - case 92: - goto st21 - case 97: - goto st200 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr953 - } - goto st15 -tr145: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st650 - st650: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof650 - } - st_case_650: -//line plugins/parsers/influx/machine.go:25830 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr955 - case 13: - goto tr956 - case 32: - goto tr953 - case 44: - goto tr957 - case 61: - goto tr130 - case 92: - goto st21 - case 114: - goto st204 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr953 - } - goto st15 -tr121: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st205 -tr380: - ( m.cs) = 205 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st205: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof205 - } - st_case_205: -//line plugins/parsers/influx/machine.go:25881 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr380 - case 13: - goto st6 - case 32: - goto tr117 - case 34: - goto tr122 - case 44: - goto tr90 - case 61: - goto tr381 - case 92: - goto tr123 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr117 - } - goto tr119 -tr118: - ( m.cs) = 206 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st206: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof206 - } - st_case_206: -//line plugins/parsers/influx/machine.go:25926 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr380 - case 13: - goto st6 - case 32: - goto tr117 - case 34: - goto tr122 - case 44: - goto tr90 - case 61: - goto tr80 - case 92: - goto tr123 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr117 - } - goto tr119 -tr497: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st207 - st207: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof207 - } - st_case_207: -//line plugins/parsers/influx/machine.go:25960 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st651 - } - goto st6 -tr498: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st651 - st651: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof651 - } - st_case_651: -//line plugins/parsers/influx/machine.go:25984 - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st652 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st652: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof652 - } - st_case_652: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st653 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st653: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof653 - } - st_case_653: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st654 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st654: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof654 - } - st_case_654: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st655 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st655: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof655 - } - st_case_655: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st656 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st656: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof656 - } - st_case_656: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st657 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st657: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof657 - } - st_case_657: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st658 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st658: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof658 - } - st_case_658: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st659 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st659: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof659 - } - st_case_659: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st660 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st660: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof660 - } - st_case_660: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st661 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st661: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof661 - } - st_case_661: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st662 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st662: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof662 - } - st_case_662: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st663 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st663: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof663 - } - st_case_663: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st664 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st664: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof664 - } - st_case_664: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st665 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st665: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof665 - } - st_case_665: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st666 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st666: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof666 - } - st_case_666: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st667 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st667: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof667 - } - st_case_667: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st668 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st668: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof668 - } - st_case_668: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st669 - } - case ( m.data)[( m.p)] >= 9: - goto tr599 - } - goto st6 - st669: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof669 - } - st_case_669: - switch ( m.data)[( m.p)] { - case 10: - goto tr600 - case 13: - goto tr602 - case 32: - goto tr599 - case 34: - goto tr29 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr599 - } - goto st6 -tr494: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st208 -tr981: - ( m.cs) = 208 -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr986: - ( m.cs) = 208 -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr989: - ( m.cs) = 208 -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr992: - ( m.cs) = 208 -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st208: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof208 - } - st_case_208: -//line plugins/parsers/influx/machine.go:26532 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr384 - case 44: - goto st6 - case 61: - goto st6 - case 92: - goto tr385 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto tr383 -tr383: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st209 - st209: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof209 - } - st_case_209: -//line plugins/parsers/influx/machine.go:26564 - switch ( m.data)[( m.p)] { - case 9: - goto st6 - case 10: - goto tr28 - case 32: - goto st6 - case 34: - goto tr98 - case 44: - goto st6 - case 61: - goto tr387 - case 92: - goto st223 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st6 - } - goto st209 -tr387: -//line plugins/parsers/influx/machine.go.rl:108 - - m.key = m.text() - - goto st210 - st210: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof210 - } - st_case_210: -//line plugins/parsers/influx/machine.go:26596 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr351 - case 45: - goto tr389 - case 46: - goto tr390 - case 48: - goto tr391 - case 70: - goto tr110 - case 84: - goto tr111 - case 92: - goto st73 - case 102: - goto tr112 - case 116: - goto tr113 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr392 - } - goto st6 -tr389: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st211 - st211: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof211 - } - st_case_211: -//line plugins/parsers/influx/machine.go:26634 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 46: - goto st212 - case 48: - goto st672 - case 92: - goto st73 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st675 - } - goto st6 -tr390: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st212 - st212: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof212 - } - st_case_212: -//line plugins/parsers/influx/machine.go:26662 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st670 - } - goto st6 - st670: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof670 - } - st_case_670: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 69: - goto st213 - case 92: - goto st73 - case 101: - goto st213 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st670 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st213: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof213 - } - st_case_213: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr356 - case 43: - goto st214 - case 45: - goto st214 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st671 - } - goto st6 - st214: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof214 - } - st_case_214: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st671 - } - goto st6 - st671: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof671 - } - st_case_671: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st671 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st672: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof672 - } - st_case_672: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st670 - case 69: - goto st213 - case 92: - goto st73 - case 101: - goto st213 - case 105: - goto st674 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st673 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st673: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof673 - } - st_case_673: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st670 - case 69: - goto st213 - case 92: - goto st73 - case 101: - goto st213 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st673 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st674: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof674 - } - st_case_674: - switch ( m.data)[( m.p)] { - case 10: - goto tr791 - case 13: - goto tr793 - case 32: - goto tr985 - case 34: - goto tr29 - case 44: - goto tr986 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr985 - } - goto st6 - st675: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof675 - } - st_case_675: - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st670 - case 69: - goto st213 - case 92: - goto st73 - case 101: - goto st213 - case 105: - goto st674 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st675 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 -tr391: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st676 - st676: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof676 - } - st_case_676: -//line plugins/parsers/influx/machine.go:26913 - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st670 - case 69: - goto st213 - case 92: - goto st73 - case 101: - goto st213 - case 105: - goto st674 - case 117: - goto st677 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st673 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st677: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof677 - } - st_case_677: - switch ( m.data)[( m.p)] { - case 10: - goto tr797 - case 13: - goto tr799 - case 32: - goto tr988 - case 34: - goto tr29 - case 44: - goto tr989 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr988 - } - goto st6 -tr392: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st678 - st678: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof678 - } - st_case_678: -//line plugins/parsers/influx/machine.go:26981 - switch ( m.data)[( m.p)] { - case 10: - goto tr758 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st670 - case 69: - goto st213 - case 92: - goto st73 - case 101: - goto st213 - case 105: - goto st674 - case 117: - goto st677 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st678 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 -tr110: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st679 - st679: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof679 - } - st_case_679: -//line plugins/parsers/influx/machine.go:27026 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 13: - goto tr805 - case 32: - goto tr991 - case 34: - goto tr29 - case 44: - goto tr992 - case 65: - goto st215 - case 92: - goto st73 - case 97: - goto st218 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr991 - } - goto st6 - st215: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof215 - } - st_case_215: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 76: - goto st216 - case 92: - goto st73 - } - goto st6 - st216: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof216 - } - st_case_216: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 83: - goto st217 - case 92: - goto st73 - } - goto st6 - st217: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof217 - } - st_case_217: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 69: - goto st680 - case 92: - goto st73 - } - goto st6 - st680: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof680 - } - st_case_680: - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 13: - goto tr805 - case 32: - goto tr991 - case 34: - goto tr29 - case 44: - goto tr992 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr991 - } - goto st6 - st218: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof218 - } - st_case_218: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 108: - goto st219 - } - goto st6 - st219: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof219 - } - st_case_219: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 115: - goto st220 - } - goto st6 - st220: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof220 - } - st_case_220: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 101: - goto st680 - } - goto st6 -tr111: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st681 - st681: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof681 - } - st_case_681: -//line plugins/parsers/influx/machine.go:27179 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 13: - goto tr805 - case 32: - goto tr991 - case 34: - goto tr29 - case 44: - goto tr992 - case 82: - goto st221 - case 92: - goto st73 - case 114: - goto st222 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr991 - } - goto st6 - st221: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof221 - } - st_case_221: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 85: - goto st217 - case 92: - goto st73 - } - goto st6 - st222: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof222 - } - st_case_222: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - case 117: - goto st220 - } - goto st6 -tr112: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st682 - st682: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof682 - } - st_case_682: -//line plugins/parsers/influx/machine.go:27245 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 13: - goto tr805 - case 32: - goto tr991 - case 34: - goto tr29 - case 44: - goto tr992 - case 92: - goto st73 - case 97: - goto st218 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr991 - } - goto st6 -tr113: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st683 - st683: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof683 - } - st_case_683: -//line plugins/parsers/influx/machine.go:27277 - switch ( m.data)[( m.p)] { - case 10: - goto tr803 - case 13: - goto tr805 - case 32: - goto tr991 - case 34: - goto tr29 - case 44: - goto tr992 - case 92: - goto st73 - case 114: - goto st222 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr991 - } - goto st6 -tr385: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st223 - st223: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof223 - } - st_case_223: -//line plugins/parsers/influx/machine.go:27309 - switch ( m.data)[( m.p)] { - case 34: - goto st209 - case 92: - goto st209 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr8 - } - case ( m.data)[( m.p)] >= 9: - goto tr8 - } - goto st3 -tr106: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st224 - st224: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof224 - } - st_case_224: -//line plugins/parsers/influx/machine.go:27336 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 46: - goto st225 - case 48: - goto st686 - case 92: - goto st73 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st689 - } - goto st6 -tr107: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st225 - st225: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof225 - } - st_case_225: -//line plugins/parsers/influx/machine.go:27364 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st684 - } - goto st6 - st684: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof684 - } - st_case_684: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 69: - goto st226 - case 92: - goto st73 - case 101: - goto st226 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st684 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st226: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof226 - } - st_case_226: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr356 - case 43: - goto st227 - case 45: - goto st227 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st685 - } - goto st6 - st227: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof227 - } - st_case_227: - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 34: - goto tr29 - case 92: - goto st73 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st685 - } - goto st6 - st685: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof685 - } - st_case_685: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 92: - goto st73 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st685 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st686: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof686 - } - st_case_686: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st684 - case 69: - goto st226 - case 92: - goto st73 - case 101: - goto st226 - case 105: - goto st688 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st687 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st687: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof687 - } - st_case_687: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st684 - case 69: - goto st226 - case 92: - goto st73 - case 101: - goto st226 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st687 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st688: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof688 - } - st_case_688: - switch ( m.data)[( m.p)] { - case 10: - goto tr817 - case 13: - goto tr793 - case 32: - goto tr985 - case 34: - goto tr29 - case 44: - goto tr986 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr985 - } - goto st6 - st689: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof689 - } - st_case_689: - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st684 - case 69: - goto st226 - case 92: - goto st73 - case 101: - goto st226 - case 105: - goto st688 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st689 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 -tr108: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st690 - st690: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof690 - } - st_case_690: -//line plugins/parsers/influx/machine.go:27615 - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st684 - case 69: - goto st226 - case 92: - goto st73 - case 101: - goto st226 - case 105: - goto st688 - case 117: - goto st691 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st687 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 - st691: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof691 - } - st_case_691: - switch ( m.data)[( m.p)] { - case 10: - goto tr822 - case 13: - goto tr799 - case 32: - goto tr988 - case 34: - goto tr29 - case 44: - goto tr989 - case 92: - goto st73 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr988 - } - goto st6 -tr109: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st692 - st692: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof692 - } - st_case_692: -//line plugins/parsers/influx/machine.go:27683 - switch ( m.data)[( m.p)] { - case 10: - goto tr636 - case 13: - goto tr638 - case 32: - goto tr980 - case 34: - goto tr29 - case 44: - goto tr981 - case 46: - goto st684 - case 69: - goto st226 - case 92: - goto st73 - case 101: - goto st226 - case 105: - goto st688 - case 117: - goto st691 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st692 - } - case ( m.data)[( m.p)] >= 9: - goto tr980 - } - goto st6 -tr94: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st228 - st228: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof228 - } - st_case_228: -//line plugins/parsers/influx/machine.go:27728 - switch ( m.data)[( m.p)] { - case 10: - goto tr28 - case 11: - goto tr94 - case 13: - goto st6 - case 32: - goto st30 - case 34: - goto tr95 - case 44: - goto st6 - case 61: - goto tr99 - case 92: - goto tr96 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st30 - } - goto tr92 -tr72: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st229 - st229: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof229 - } - st_case_229: -//line plugins/parsers/influx/machine.go:27762 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 46: - goto st230 - case 48: - goto st694 - case 92: - goto st94 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st697 - } - case ( m.data)[( m.p)] >= 9: - goto tr1 - } - goto st1 -tr73: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st230 - st230: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof230 - } - st_case_230: -//line plugins/parsers/influx/machine.go:27801 - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 92: - goto st94 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st693 - } - case ( m.data)[( m.p)] >= 9: - goto tr1 - } - goto st1 - st693: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof693 - } - st_case_693: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 69: - goto st231 - case 92: - goto st94 - case 101: - goto st231 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st693 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 - st231: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof231 - } - st_case_231: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 34: - goto st232 - case 44: - goto tr4 - case 92: - goto st94 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st530 - } - default: - goto st232 - } - goto st1 - st232: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof232 - } - st_case_232: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 92: - goto st94 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st530 - } - case ( m.data)[( m.p)] >= 9: - goto tr1 - } - goto st1 - st694: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof694 - } - st_case_694: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 46: - goto st693 - case 69: - goto st231 - case 92: - goto st94 - case 101: - goto st231 - case 105: - goto st696 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st695 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 - st695: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof695 - } - st_case_695: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 46: - goto st693 - case 69: - goto st231 - case 92: - goto st94 - case 101: - goto st231 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st695 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 - st696: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof696 - } - st_case_696: - switch ( m.data)[( m.p)] { - case 10: - goto tr942 - case 11: - goto tr1006 - case 13: - goto tr944 - case 32: - goto tr1005 - case 44: - goto tr1007 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1005 - } - goto st1 - st697: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof697 - } - st_case_697: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 46: - goto st693 - case 69: - goto st231 - case 92: - goto st94 - case 101: - goto st231 - case 105: - goto st696 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st697 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 -tr74: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st698 - st698: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof698 - } - st_case_698: -//line plugins/parsers/influx/machine.go:28059 - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 46: - goto st693 - case 69: - goto st231 - case 92: - goto st94 - case 101: - goto st231 - case 105: - goto st696 - case 117: - goto st699 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st695 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 - st699: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof699 - } - st_case_699: - switch ( m.data)[( m.p)] { - case 10: - goto tr948 - case 11: - goto tr1010 - case 13: - goto tr950 - case 32: - goto tr1009 - case 44: - goto tr1011 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1009 - } - goto st1 -tr75: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st700 - st700: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof700 - } - st_case_700: -//line plugins/parsers/influx/machine.go:28127 - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 11: - goto tr812 - case 13: - goto tr732 - case 32: - goto tr811 - case 44: - goto tr813 - case 46: - goto st693 - case 69: - goto st231 - case 92: - goto st94 - case 101: - goto st231 - case 105: - goto st696 - case 117: - goto st699 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st700 - } - case ( m.data)[( m.p)] >= 9: - goto tr811 - } - goto st1 -tr76: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st701 - st701: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof701 - } - st_case_701: -//line plugins/parsers/influx/machine.go:28172 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr1014 - case 13: - goto tr956 - case 32: - goto tr1013 - case 44: - goto tr1015 - case 65: - goto st233 - case 92: - goto st94 - case 97: - goto st236 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1013 - } - goto st1 - st233: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof233 - } - st_case_233: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 76: - goto st234 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 - st234: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof234 - } - st_case_234: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 83: - goto st235 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 - st235: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof235 - } - st_case_235: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 69: - goto st702 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 - st702: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof702 - } - st_case_702: - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr1014 - case 13: - goto tr956 - case 32: - goto tr1013 - case 44: - goto tr1015 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1013 - } - goto st1 - st236: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof236 - } - st_case_236: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 92: - goto st94 - case 108: - goto st237 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 - st237: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof237 - } - st_case_237: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 92: - goto st94 - case 115: - goto st238 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 - st238: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof238 - } - st_case_238: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 92: - goto st94 - case 101: - goto st702 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 -tr77: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st703 - st703: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof703 - } - st_case_703: -//line plugins/parsers/influx/machine.go:28379 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr1014 - case 13: - goto tr956 - case 32: - goto tr1013 - case 44: - goto tr1015 - case 82: - goto st239 - case 92: - goto st94 - case 114: - goto st240 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1013 - } - goto st1 - st239: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof239 - } - st_case_239: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 85: - goto st235 - case 92: - goto st94 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 - st240: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof240 - } - st_case_240: - switch ( m.data)[( m.p)] { - case 10: - goto tr45 - case 11: - goto tr3 - case 13: - goto tr45 - case 32: - goto tr1 - case 44: - goto tr4 - case 92: - goto st94 - case 117: - goto st238 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1 - } - goto st1 -tr78: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st704 - st704: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof704 - } - st_case_704: -//line plugins/parsers/influx/machine.go:28463 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr1014 - case 13: - goto tr956 - case 32: - goto tr1013 - case 44: - goto tr1015 - case 92: - goto st94 - case 97: - goto st236 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1013 - } - goto st1 -tr79: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st705 - st705: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof705 - } - st_case_705: -//line plugins/parsers/influx/machine.go:28495 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 11: - goto tr1014 - case 13: - goto tr956 - case 32: - goto tr1013 - case 44: - goto tr1015 - case 92: - goto st94 - case 114: - goto st240 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1013 - } - goto st1 -tr42: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st241 -tr422: - ( m.cs) = 241 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st241: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof241 - } - st_case_241: -//line plugins/parsers/influx/machine.go:28544 - switch ( m.data)[( m.p)] { - case 10: - goto tr421 - case 11: - goto tr422 - case 13: - goto tr421 - case 32: - goto tr36 - case 44: - goto tr4 - case 61: - goto tr423 - case 92: - goto tr43 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr36 - } - goto tr39 -tr38: - ( m.cs) = 242 -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st242: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof242 - } - st_case_242: -//line plugins/parsers/influx/machine.go:28587 - switch ( m.data)[( m.p)] { - case 10: - goto tr421 - case 11: - goto tr422 - case 13: - goto tr421 - case 32: - goto tr36 - case 44: - goto tr4 - case 61: - goto tr31 - case 92: - goto tr43 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr36 - } - goto tr39 -tr462: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st243 - st243: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof243 - } - st_case_243: -//line plugins/parsers/influx/machine.go:28619 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st706 - } - goto tr424 -tr463: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st706 - st706: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof706 - } - st_case_706: -//line plugins/parsers/influx/machine.go:28635 - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st707 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st707: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof707 - } - st_case_707: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st708 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st708: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof708 - } - st_case_708: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st709 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st709: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof709 - } - st_case_709: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st710 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st710: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof710 - } - st_case_710: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st711 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st711: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof711 - } - st_case_711: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st712 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st712: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof712 - } - st_case_712: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st713 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st713: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof713 - } - st_case_713: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st714 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st714: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof714 - } - st_case_714: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st715 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st715: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof715 - } - st_case_715: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st716 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st716: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof716 - } - st_case_716: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st717 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st717: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof717 - } - st_case_717: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st718 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st718: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof718 - } - st_case_718: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st719 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st719: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof719 - } - st_case_719: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st720 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st720: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof720 - } - st_case_720: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st721 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st721: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof721 - } - st_case_721: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st722 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st722: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof722 - } - st_case_722: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st723 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st723: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof723 - } - st_case_723: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st724 - } - case ( m.data)[( m.p)] >= 9: - goto tr467 - } - goto tr424 - st724: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof724 - } - st_case_724: - switch ( m.data)[( m.p)] { - case 10: - goto tr468 - case 13: - goto tr470 - case 32: - goto tr467 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr467 - } - goto tr424 -tr15: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st244 - st244: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof244 - } - st_case_244: -//line plugins/parsers/influx/machine.go:29055 - switch ( m.data)[( m.p)] { - case 46: - goto st245 - case 48: - goto st726 - } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st729 - } - goto tr8 -tr16: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st245 - st245: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof245 - } - st_case_245: -//line plugins/parsers/influx/machine.go:29077 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st725 - } - goto tr8 - st725: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof725 - } - st_case_725: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 13: - goto tr732 - case 32: - goto tr921 - case 44: - goto tr922 - case 69: - goto st246 - case 101: - goto st246 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st725 - } - case ( m.data)[( m.p)] >= 9: - goto tr921 - } - goto tr103 - st246: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof246 - } - st_case_246: - switch ( m.data)[( m.p)] { - case 34: - goto st247 - case 43: - goto st247 - case 45: - goto st247 - } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st621 - } - goto tr8 - st247: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof247 - } - st_case_247: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st621 - } - goto tr8 - st726: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof726 - } - st_case_726: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 13: - goto tr732 - case 32: - goto tr921 - case 44: - goto tr922 - case 46: - goto st725 - case 69: - goto st246 - case 101: - goto st246 - case 105: - goto st728 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st727 - } - case ( m.data)[( m.p)] >= 9: - goto tr921 - } - goto tr103 - st727: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof727 - } - st_case_727: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 13: - goto tr732 - case 32: - goto tr921 - case 44: - goto tr922 - case 46: - goto st725 - case 69: - goto st246 - case 101: - goto st246 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st727 - } - case ( m.data)[( m.p)] >= 9: - goto tr921 - } - goto tr103 - st728: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof728 - } - st_case_728: - switch ( m.data)[( m.p)] { - case 10: - goto tr942 - case 13: - goto tr944 - case 32: - goto tr1041 - case 44: - goto tr1042 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1041 - } - goto tr103 - st729: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof729 - } - st_case_729: - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 13: - goto tr732 - case 32: - goto tr921 - case 44: - goto tr922 - case 46: - goto st725 - case 69: - goto st246 - case 101: - goto st246 - case 105: - goto st728 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st729 - } - case ( m.data)[( m.p)] >= 9: - goto tr921 - } - goto tr103 -tr17: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st730 - st730: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof730 - } - st_case_730: -//line plugins/parsers/influx/machine.go:29260 - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 13: - goto tr732 - case 32: - goto tr921 - case 44: - goto tr922 - case 46: - goto st725 - case 69: - goto st246 - case 101: - goto st246 - case 105: - goto st728 - case 117: - goto st731 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st727 - } - case ( m.data)[( m.p)] >= 9: - goto tr921 - } - goto tr103 - st731: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof731 - } - st_case_731: - switch ( m.data)[( m.p)] { - case 10: - goto tr948 - case 13: - goto tr950 - case 32: - goto tr1044 - case 44: - goto tr1045 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1044 - } - goto tr103 -tr18: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st732 - st732: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof732 - } - st_case_732: -//line plugins/parsers/influx/machine.go:29320 - switch ( m.data)[( m.p)] { - case 10: - goto tr730 - case 13: - goto tr732 - case 32: - goto tr921 - case 44: - goto tr922 - case 46: - goto st725 - case 69: - goto st246 - case 101: - goto st246 - case 105: - goto st728 - case 117: - goto st731 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st732 - } - case ( m.data)[( m.p)] >= 9: - goto tr921 - } - goto tr103 -tr19: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st733 - st733: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof733 - } - st_case_733: -//line plugins/parsers/influx/machine.go:29361 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 13: - goto tr956 - case 32: - goto tr1047 - case 44: - goto tr1048 - case 65: - goto st248 - case 97: - goto st251 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1047 - } - goto tr103 - st248: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof248 - } - st_case_248: - if ( m.data)[( m.p)] == 76 { - goto st249 - } - goto tr8 - st249: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof249 - } - st_case_249: - if ( m.data)[( m.p)] == 83 { - goto st250 - } - goto tr8 - st250: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof250 - } - st_case_250: - if ( m.data)[( m.p)] == 69 { - goto st734 - } - goto tr8 - st734: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof734 - } - st_case_734: - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 13: - goto tr956 - case 32: - goto tr1047 - case 44: - goto tr1048 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1047 - } - goto tr103 - st251: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof251 - } - st_case_251: - if ( m.data)[( m.p)] == 108 { - goto st252 - } - goto tr8 - st252: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof252 - } - st_case_252: - if ( m.data)[( m.p)] == 115 { - goto st253 - } - goto tr8 - st253: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof253 - } - st_case_253: - if ( m.data)[( m.p)] == 101 { - goto st734 - } - goto tr8 -tr20: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st735 - st735: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof735 - } - st_case_735: -//line plugins/parsers/influx/machine.go:29464 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 13: - goto tr956 - case 32: - goto tr1047 - case 44: - goto tr1048 - case 82: - goto st254 - case 114: - goto st255 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1047 - } - goto tr103 - st254: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof254 - } - st_case_254: - if ( m.data)[( m.p)] == 85 { - goto st250 - } - goto tr8 - st255: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof255 - } - st_case_255: - if ( m.data)[( m.p)] == 117 { - goto st253 - } - goto tr8 -tr21: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st736 - st736: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof736 - } - st_case_736: -//line plugins/parsers/influx/machine.go:29512 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 13: - goto tr956 - case 32: - goto tr1047 - case 44: - goto tr1048 - case 97: - goto st251 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1047 - } - goto tr103 -tr22: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st737 - st737: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof737 - } - st_case_737: -//line plugins/parsers/influx/machine.go:29540 - switch ( m.data)[( m.p)] { - case 10: - goto tr954 - case 13: - goto tr956 - case 32: - goto tr1047 - case 44: - goto tr1048 - case 114: - goto st255 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr1047 - } - goto tr103 -tr9: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st256 - st256: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof256 - } - st_case_256: -//line plugins/parsers/influx/machine.go:29568 - switch ( m.data)[( m.p)] { - case 10: - goto tr8 - case 11: - goto tr9 - case 13: - goto tr8 - case 32: - goto st2 - case 44: - goto tr8 - case 61: - goto tr12 - case 92: - goto tr10 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st2 - } - goto tr6 - st257: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof257 - } - st_case_257: - if ( m.data)[( m.p)] == 10 { - goto tr438 - } - goto st257 -tr438: -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - -//line plugins/parsers/influx/machine.go.rl:78 - - {goto st739 } - - goto st738 - st738: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof738 - } - st_case_738: -//line plugins/parsers/influx/machine.go:29615 - goto st0 - st260: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof260 - } - st_case_260: - switch ( m.data)[( m.p)] { - case 32: - goto tr33 - case 35: - goto tr33 - case 44: - goto tr33 - case 92: - goto tr442 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr33 - } - case ( m.data)[( m.p)] >= 9: - goto tr33 - } - goto tr441 -tr441: -//line plugins/parsers/influx/machine.go.rl:82 - - m.beginMetric = true - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st740 - st740: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof740 - } - st_case_740: -//line plugins/parsers/influx/machine.go:29656 - switch ( m.data)[( m.p)] { - case 9: - goto tr2 - case 10: - goto tr1056 - case 12: - goto tr2 - case 13: - goto tr1057 - case 32: - goto tr2 - case 44: - goto tr1058 - case 92: - goto st268 - } - goto st740 -tr443: -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto st741 -tr1056: - ( m.cs) = 741 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again -tr1060: - ( m.cs) = 741 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto _again - st741: -//line plugins/parsers/influx/machine.go.rl:172 - - m.finishMetric = true - ( m.cs) = 739; - {( m.p)++; goto _out } - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof741 - } - st_case_741: -//line plugins/parsers/influx/machine.go:29731 - goto st0 -tr1057: - ( m.cs) = 261 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1061: - ( m.cs) = 261 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st261: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof261 - } - st_case_261: -//line plugins/parsers/influx/machine.go:29764 - if ( m.data)[( m.p)] == 10 { - goto tr443 - } - goto st0 -tr1058: - ( m.cs) = 262 -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again -tr1062: - ( m.cs) = 262 -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; goto _out } - } - - goto _again - st262: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof262 - } - st_case_262: -//line plugins/parsers/influx/machine.go:29800 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr2 - case 92: - goto tr445 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto tr444 -tr444: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st263 - st263: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof263 - } - st_case_263: -//line plugins/parsers/influx/machine.go:29831 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr447 - case 92: - goto st266 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto st263 -tr447: -//line plugins/parsers/influx/machine.go.rl:95 - - m.key = m.text() - - goto st264 - st264: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof264 - } - st_case_264: -//line plugins/parsers/influx/machine.go:29862 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr2 - case 92: - goto tr450 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto tr449 -tr449: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st742 - st742: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof742 - } - st_case_742: -//line plugins/parsers/influx/machine.go:29893 - switch ( m.data)[( m.p)] { - case 9: - goto tr2 - case 10: - goto tr1060 - case 12: - goto tr2 - case 13: - goto tr1061 - case 32: - goto tr2 - case 44: - goto tr1062 - case 61: - goto tr2 - case 92: - goto st265 - } - goto st742 -tr450: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st265 - st265: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof265 - } - st_case_265: -//line plugins/parsers/influx/machine.go:29924 - if ( m.data)[( m.p)] == 92 { - goto st743 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto st742 - st743: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof743 - } - st_case_743: -//line plugins/parsers/influx/machine.go:29945 - switch ( m.data)[( m.p)] { - case 9: - goto tr2 - case 10: - goto tr1060 - case 12: - goto tr2 - case 13: - goto tr1061 - case 32: - goto tr2 - case 44: - goto tr1062 - case 61: - goto tr2 - case 92: - goto st265 - } - goto st742 -tr445: -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st266 - st266: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof266 - } - st_case_266: -//line plugins/parsers/influx/machine.go:29976 - if ( m.data)[( m.p)] == 92 { - goto st267 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto st263 - st267: -//line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof267 - } - st_case_267: -//line plugins/parsers/influx/machine.go:29997 - switch ( m.data)[( m.p)] { - case 32: - goto tr2 - case 44: - goto tr2 - case 61: - goto tr447 - case 92: - goto st266 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr2 - } - case ( m.data)[( m.p)] >= 9: - goto tr2 - } - goto st263 -tr442: -//line plugins/parsers/influx/machine.go.rl:82 - - m.beginMetric = true - -//line plugins/parsers/influx/machine.go.rl:28 - - m.pb = m.p - - goto st268 - st268: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof268 - } - st_case_268: -//line plugins/parsers/influx/machine.go:30032 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st0 - } - case ( m.data)[( m.p)] >= 9: - goto st0 - } - goto st740 -tr439: -//line plugins/parsers/influx/machine.go.rl:166 - - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line - - goto st739 - st739: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof739 - } - st_case_739: -//line plugins/parsers/influx/machine.go:30055 - switch ( m.data)[( m.p)] { - case 10: - goto tr439 - case 13: - goto st258 - case 32: - goto st739 - case 35: - goto st259 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st739 - } - goto tr1053 - st258: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof258 - } - st_case_258: - if ( m.data)[( m.p)] == 10 { - goto tr439 - } - goto st0 - st259: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof259 - } - st_case_259: - if ( m.data)[( m.p)] == 10 { - goto tr439 - } - goto st259 - st_out: - _test_eof269: ( m.cs) = 269; goto _test_eof - _test_eof1: ( m.cs) = 1; goto _test_eof - _test_eof2: ( m.cs) = 2; goto _test_eof - _test_eof3: ( m.cs) = 3; goto _test_eof - _test_eof4: ( m.cs) = 4; goto _test_eof - _test_eof5: ( m.cs) = 5; goto _test_eof - _test_eof6: ( m.cs) = 6; goto _test_eof - _test_eof270: ( m.cs) = 270; goto _test_eof - _test_eof271: ( m.cs) = 271; goto _test_eof - _test_eof272: ( m.cs) = 272; goto _test_eof - _test_eof7: ( m.cs) = 7; goto _test_eof - _test_eof8: ( m.cs) = 8; goto _test_eof - _test_eof9: ( m.cs) = 9; goto _test_eof - _test_eof10: ( m.cs) = 10; goto _test_eof - _test_eof11: ( m.cs) = 11; goto _test_eof - _test_eof12: ( m.cs) = 12; goto _test_eof - _test_eof13: ( m.cs) = 13; goto _test_eof - _test_eof14: ( m.cs) = 14; goto _test_eof - _test_eof15: ( m.cs) = 15; goto _test_eof - _test_eof16: ( m.cs) = 16; goto _test_eof - _test_eof17: ( m.cs) = 17; goto _test_eof - _test_eof18: ( m.cs) = 18; goto _test_eof - _test_eof19: ( m.cs) = 19; goto _test_eof - _test_eof20: ( m.cs) = 20; goto _test_eof - _test_eof21: ( m.cs) = 21; goto _test_eof - _test_eof22: ( m.cs) = 22; goto _test_eof - _test_eof23: ( m.cs) = 23; goto _test_eof - _test_eof24: ( m.cs) = 24; goto _test_eof - _test_eof25: ( m.cs) = 25; goto _test_eof - _test_eof26: ( m.cs) = 26; goto _test_eof - _test_eof27: ( m.cs) = 27; goto _test_eof - _test_eof28: ( m.cs) = 28; goto _test_eof - _test_eof29: ( m.cs) = 29; goto _test_eof - _test_eof30: ( m.cs) = 30; goto _test_eof - _test_eof31: ( m.cs) = 31; goto _test_eof - _test_eof273: ( m.cs) = 273; goto _test_eof - _test_eof274: ( m.cs) = 274; goto _test_eof - _test_eof32: ( m.cs) = 32; goto _test_eof - _test_eof33: ( m.cs) = 33; goto _test_eof - _test_eof275: ( m.cs) = 275; goto _test_eof - _test_eof276: ( m.cs) = 276; goto _test_eof - _test_eof277: ( m.cs) = 277; goto _test_eof - _test_eof34: ( m.cs) = 34; goto _test_eof - _test_eof278: ( m.cs) = 278; goto _test_eof - _test_eof279: ( m.cs) = 279; goto _test_eof - _test_eof280: ( m.cs) = 280; goto _test_eof - _test_eof281: ( m.cs) = 281; goto _test_eof - _test_eof282: ( m.cs) = 282; goto _test_eof - _test_eof283: ( m.cs) = 283; goto _test_eof - _test_eof284: ( m.cs) = 284; goto _test_eof - _test_eof285: ( m.cs) = 285; goto _test_eof - _test_eof286: ( m.cs) = 286; goto _test_eof - _test_eof287: ( m.cs) = 287; goto _test_eof - _test_eof288: ( m.cs) = 288; goto _test_eof - _test_eof289: ( m.cs) = 289; goto _test_eof - _test_eof290: ( m.cs) = 290; goto _test_eof - _test_eof291: ( m.cs) = 291; goto _test_eof - _test_eof292: ( m.cs) = 292; goto _test_eof - _test_eof293: ( m.cs) = 293; goto _test_eof - _test_eof294: ( m.cs) = 294; goto _test_eof - _test_eof295: ( m.cs) = 295; goto _test_eof - _test_eof35: ( m.cs) = 35; goto _test_eof - _test_eof36: ( m.cs) = 36; goto _test_eof - _test_eof296: ( m.cs) = 296; goto _test_eof - _test_eof297: ( m.cs) = 297; goto _test_eof - _test_eof298: ( m.cs) = 298; goto _test_eof - _test_eof37: ( m.cs) = 37; goto _test_eof - _test_eof38: ( m.cs) = 38; goto _test_eof - _test_eof39: ( m.cs) = 39; goto _test_eof - _test_eof40: ( m.cs) = 40; goto _test_eof - _test_eof41: ( m.cs) = 41; goto _test_eof - _test_eof299: ( m.cs) = 299; goto _test_eof - _test_eof300: ( m.cs) = 300; goto _test_eof - _test_eof301: ( m.cs) = 301; goto _test_eof - _test_eof302: ( m.cs) = 302; goto _test_eof - _test_eof42: ( m.cs) = 42; goto _test_eof - _test_eof303: ( m.cs) = 303; goto _test_eof - _test_eof304: ( m.cs) = 304; goto _test_eof - _test_eof305: ( m.cs) = 305; goto _test_eof - _test_eof306: ( m.cs) = 306; goto _test_eof - _test_eof307: ( m.cs) = 307; goto _test_eof - _test_eof308: ( m.cs) = 308; goto _test_eof - _test_eof309: ( m.cs) = 309; goto _test_eof - _test_eof310: ( m.cs) = 310; goto _test_eof - _test_eof311: ( m.cs) = 311; goto _test_eof - _test_eof312: ( m.cs) = 312; goto _test_eof - _test_eof313: ( m.cs) = 313; goto _test_eof - _test_eof314: ( m.cs) = 314; goto _test_eof - _test_eof315: ( m.cs) = 315; goto _test_eof - _test_eof316: ( m.cs) = 316; goto _test_eof - _test_eof317: ( m.cs) = 317; goto _test_eof - _test_eof318: ( m.cs) = 318; goto _test_eof - _test_eof319: ( m.cs) = 319; goto _test_eof - _test_eof320: ( m.cs) = 320; goto _test_eof - _test_eof321: ( m.cs) = 321; goto _test_eof - _test_eof322: ( m.cs) = 322; goto _test_eof - _test_eof323: ( m.cs) = 323; goto _test_eof - _test_eof324: ( m.cs) = 324; goto _test_eof - _test_eof43: ( m.cs) = 43; goto _test_eof - _test_eof44: ( m.cs) = 44; goto _test_eof - _test_eof45: ( m.cs) = 45; goto _test_eof - _test_eof46: ( m.cs) = 46; goto _test_eof - _test_eof47: ( m.cs) = 47; goto _test_eof - _test_eof48: ( m.cs) = 48; goto _test_eof - _test_eof49: ( m.cs) = 49; goto _test_eof - _test_eof50: ( m.cs) = 50; goto _test_eof - _test_eof51: ( m.cs) = 51; goto _test_eof - _test_eof52: ( m.cs) = 52; goto _test_eof - _test_eof325: ( m.cs) = 325; goto _test_eof - _test_eof326: ( m.cs) = 326; goto _test_eof - _test_eof327: ( m.cs) = 327; goto _test_eof - _test_eof53: ( m.cs) = 53; goto _test_eof - _test_eof54: ( m.cs) = 54; goto _test_eof - _test_eof55: ( m.cs) = 55; goto _test_eof - _test_eof56: ( m.cs) = 56; goto _test_eof - _test_eof57: ( m.cs) = 57; goto _test_eof - _test_eof58: ( m.cs) = 58; goto _test_eof - _test_eof328: ( m.cs) = 328; goto _test_eof - _test_eof329: ( m.cs) = 329; goto _test_eof - _test_eof59: ( m.cs) = 59; goto _test_eof - _test_eof330: ( m.cs) = 330; goto _test_eof - _test_eof331: ( m.cs) = 331; goto _test_eof - _test_eof332: ( m.cs) = 332; goto _test_eof - _test_eof333: ( m.cs) = 333; goto _test_eof - _test_eof334: ( m.cs) = 334; goto _test_eof - _test_eof335: ( m.cs) = 335; goto _test_eof - _test_eof336: ( m.cs) = 336; goto _test_eof - _test_eof337: ( m.cs) = 337; goto _test_eof - _test_eof338: ( m.cs) = 338; goto _test_eof - _test_eof339: ( m.cs) = 339; goto _test_eof - _test_eof340: ( m.cs) = 340; goto _test_eof - _test_eof341: ( m.cs) = 341; goto _test_eof - _test_eof342: ( m.cs) = 342; goto _test_eof - _test_eof343: ( m.cs) = 343; goto _test_eof - _test_eof344: ( m.cs) = 344; goto _test_eof - _test_eof345: ( m.cs) = 345; goto _test_eof - _test_eof346: ( m.cs) = 346; goto _test_eof - _test_eof347: ( m.cs) = 347; goto _test_eof - _test_eof348: ( m.cs) = 348; goto _test_eof - _test_eof349: ( m.cs) = 349; goto _test_eof - _test_eof60: ( m.cs) = 60; goto _test_eof - _test_eof350: ( m.cs) = 350; goto _test_eof - _test_eof351: ( m.cs) = 351; goto _test_eof - _test_eof352: ( m.cs) = 352; goto _test_eof - _test_eof61: ( m.cs) = 61; goto _test_eof - _test_eof353: ( m.cs) = 353; goto _test_eof - _test_eof354: ( m.cs) = 354; goto _test_eof - _test_eof355: ( m.cs) = 355; goto _test_eof - _test_eof356: ( m.cs) = 356; goto _test_eof - _test_eof357: ( m.cs) = 357; goto _test_eof - _test_eof358: ( m.cs) = 358; goto _test_eof - _test_eof359: ( m.cs) = 359; goto _test_eof - _test_eof360: ( m.cs) = 360; goto _test_eof - _test_eof361: ( m.cs) = 361; goto _test_eof - _test_eof362: ( m.cs) = 362; goto _test_eof - _test_eof363: ( m.cs) = 363; goto _test_eof - _test_eof364: ( m.cs) = 364; goto _test_eof - _test_eof365: ( m.cs) = 365; goto _test_eof - _test_eof366: ( m.cs) = 366; goto _test_eof - _test_eof367: ( m.cs) = 367; goto _test_eof - _test_eof368: ( m.cs) = 368; goto _test_eof - _test_eof369: ( m.cs) = 369; goto _test_eof - _test_eof370: ( m.cs) = 370; goto _test_eof - _test_eof371: ( m.cs) = 371; goto _test_eof - _test_eof372: ( m.cs) = 372; goto _test_eof - _test_eof62: ( m.cs) = 62; goto _test_eof - _test_eof63: ( m.cs) = 63; goto _test_eof - _test_eof64: ( m.cs) = 64; goto _test_eof - _test_eof65: ( m.cs) = 65; goto _test_eof - _test_eof66: ( m.cs) = 66; goto _test_eof - _test_eof373: ( m.cs) = 373; goto _test_eof - _test_eof67: ( m.cs) = 67; goto _test_eof - _test_eof68: ( m.cs) = 68; goto _test_eof - _test_eof69: ( m.cs) = 69; goto _test_eof - _test_eof70: ( m.cs) = 70; goto _test_eof - _test_eof71: ( m.cs) = 71; goto _test_eof - _test_eof374: ( m.cs) = 374; goto _test_eof - _test_eof375: ( m.cs) = 375; goto _test_eof - _test_eof376: ( m.cs) = 376; goto _test_eof - _test_eof72: ( m.cs) = 72; goto _test_eof - _test_eof73: ( m.cs) = 73; goto _test_eof - _test_eof74: ( m.cs) = 74; goto _test_eof - _test_eof377: ( m.cs) = 377; goto _test_eof - _test_eof378: ( m.cs) = 378; goto _test_eof - _test_eof379: ( m.cs) = 379; goto _test_eof - _test_eof75: ( m.cs) = 75; goto _test_eof - _test_eof380: ( m.cs) = 380; goto _test_eof - _test_eof381: ( m.cs) = 381; goto _test_eof - _test_eof382: ( m.cs) = 382; goto _test_eof - _test_eof383: ( m.cs) = 383; goto _test_eof - _test_eof384: ( m.cs) = 384; goto _test_eof - _test_eof385: ( m.cs) = 385; goto _test_eof - _test_eof386: ( m.cs) = 386; goto _test_eof - _test_eof387: ( m.cs) = 387; goto _test_eof - _test_eof388: ( m.cs) = 388; goto _test_eof - _test_eof389: ( m.cs) = 389; goto _test_eof - _test_eof390: ( m.cs) = 390; goto _test_eof - _test_eof391: ( m.cs) = 391; goto _test_eof - _test_eof392: ( m.cs) = 392; goto _test_eof - _test_eof393: ( m.cs) = 393; goto _test_eof - _test_eof394: ( m.cs) = 394; goto _test_eof - _test_eof395: ( m.cs) = 395; goto _test_eof - _test_eof396: ( m.cs) = 396; goto _test_eof - _test_eof397: ( m.cs) = 397; goto _test_eof - _test_eof398: ( m.cs) = 398; goto _test_eof - _test_eof399: ( m.cs) = 399; goto _test_eof - _test_eof76: ( m.cs) = 76; goto _test_eof - _test_eof77: ( m.cs) = 77; goto _test_eof - _test_eof78: ( m.cs) = 78; goto _test_eof - _test_eof79: ( m.cs) = 79; goto _test_eof - _test_eof80: ( m.cs) = 80; goto _test_eof - _test_eof81: ( m.cs) = 81; goto _test_eof - _test_eof82: ( m.cs) = 82; goto _test_eof - _test_eof83: ( m.cs) = 83; goto _test_eof - _test_eof84: ( m.cs) = 84; goto _test_eof - _test_eof85: ( m.cs) = 85; goto _test_eof - _test_eof86: ( m.cs) = 86; goto _test_eof - _test_eof87: ( m.cs) = 87; goto _test_eof - _test_eof88: ( m.cs) = 88; goto _test_eof - _test_eof89: ( m.cs) = 89; goto _test_eof - _test_eof400: ( m.cs) = 400; goto _test_eof - _test_eof401: ( m.cs) = 401; goto _test_eof - _test_eof402: ( m.cs) = 402; goto _test_eof - _test_eof403: ( m.cs) = 403; goto _test_eof - _test_eof90: ( m.cs) = 90; goto _test_eof - _test_eof91: ( m.cs) = 91; goto _test_eof - _test_eof92: ( m.cs) = 92; goto _test_eof - _test_eof93: ( m.cs) = 93; goto _test_eof - _test_eof404: ( m.cs) = 404; goto _test_eof - _test_eof405: ( m.cs) = 405; goto _test_eof - _test_eof94: ( m.cs) = 94; goto _test_eof - _test_eof95: ( m.cs) = 95; goto _test_eof - _test_eof406: ( m.cs) = 406; goto _test_eof - _test_eof96: ( m.cs) = 96; goto _test_eof - _test_eof97: ( m.cs) = 97; goto _test_eof - _test_eof407: ( m.cs) = 407; goto _test_eof - _test_eof408: ( m.cs) = 408; goto _test_eof - _test_eof98: ( m.cs) = 98; goto _test_eof - _test_eof409: ( m.cs) = 409; goto _test_eof - _test_eof410: ( m.cs) = 410; goto _test_eof - _test_eof99: ( m.cs) = 99; goto _test_eof - _test_eof100: ( m.cs) = 100; goto _test_eof - _test_eof411: ( m.cs) = 411; goto _test_eof - _test_eof412: ( m.cs) = 412; goto _test_eof - _test_eof413: ( m.cs) = 413; goto _test_eof - _test_eof414: ( m.cs) = 414; goto _test_eof - _test_eof415: ( m.cs) = 415; goto _test_eof - _test_eof416: ( m.cs) = 416; goto _test_eof - _test_eof417: ( m.cs) = 417; goto _test_eof - _test_eof418: ( m.cs) = 418; goto _test_eof - _test_eof419: ( m.cs) = 419; goto _test_eof - _test_eof420: ( m.cs) = 420; goto _test_eof - _test_eof421: ( m.cs) = 421; goto _test_eof - _test_eof422: ( m.cs) = 422; goto _test_eof - _test_eof423: ( m.cs) = 423; goto _test_eof - _test_eof424: ( m.cs) = 424; goto _test_eof - _test_eof425: ( m.cs) = 425; goto _test_eof - _test_eof426: ( m.cs) = 426; goto _test_eof - _test_eof427: ( m.cs) = 427; goto _test_eof - _test_eof428: ( m.cs) = 428; goto _test_eof - _test_eof101: ( m.cs) = 101; goto _test_eof - _test_eof429: ( m.cs) = 429; goto _test_eof - _test_eof430: ( m.cs) = 430; goto _test_eof - _test_eof431: ( m.cs) = 431; goto _test_eof - _test_eof102: ( m.cs) = 102; goto _test_eof - _test_eof103: ( m.cs) = 103; goto _test_eof - _test_eof432: ( m.cs) = 432; goto _test_eof - _test_eof433: ( m.cs) = 433; goto _test_eof - _test_eof434: ( m.cs) = 434; goto _test_eof - _test_eof104: ( m.cs) = 104; goto _test_eof - _test_eof435: ( m.cs) = 435; goto _test_eof - _test_eof436: ( m.cs) = 436; goto _test_eof - _test_eof437: ( m.cs) = 437; goto _test_eof - _test_eof438: ( m.cs) = 438; goto _test_eof - _test_eof439: ( m.cs) = 439; goto _test_eof - _test_eof440: ( m.cs) = 440; goto _test_eof - _test_eof441: ( m.cs) = 441; goto _test_eof - _test_eof442: ( m.cs) = 442; goto _test_eof - _test_eof443: ( m.cs) = 443; goto _test_eof - _test_eof444: ( m.cs) = 444; goto _test_eof - _test_eof445: ( m.cs) = 445; goto _test_eof - _test_eof446: ( m.cs) = 446; goto _test_eof - _test_eof447: ( m.cs) = 447; goto _test_eof - _test_eof448: ( m.cs) = 448; goto _test_eof - _test_eof449: ( m.cs) = 449; goto _test_eof - _test_eof450: ( m.cs) = 450; goto _test_eof - _test_eof451: ( m.cs) = 451; goto _test_eof - _test_eof452: ( m.cs) = 452; goto _test_eof - _test_eof453: ( m.cs) = 453; goto _test_eof - _test_eof454: ( m.cs) = 454; goto _test_eof - _test_eof105: ( m.cs) = 105; goto _test_eof - _test_eof455: ( m.cs) = 455; goto _test_eof - _test_eof456: ( m.cs) = 456; goto _test_eof - _test_eof457: ( m.cs) = 457; goto _test_eof - _test_eof458: ( m.cs) = 458; goto _test_eof - _test_eof459: ( m.cs) = 459; goto _test_eof - _test_eof460: ( m.cs) = 460; goto _test_eof - _test_eof461: ( m.cs) = 461; goto _test_eof - _test_eof462: ( m.cs) = 462; goto _test_eof - _test_eof463: ( m.cs) = 463; goto _test_eof - _test_eof464: ( m.cs) = 464; goto _test_eof - _test_eof465: ( m.cs) = 465; goto _test_eof - _test_eof466: ( m.cs) = 466; goto _test_eof - _test_eof467: ( m.cs) = 467; goto _test_eof - _test_eof468: ( m.cs) = 468; goto _test_eof - _test_eof469: ( m.cs) = 469; goto _test_eof - _test_eof470: ( m.cs) = 470; goto _test_eof - _test_eof471: ( m.cs) = 471; goto _test_eof - _test_eof472: ( m.cs) = 472; goto _test_eof - _test_eof473: ( m.cs) = 473; goto _test_eof - _test_eof474: ( m.cs) = 474; goto _test_eof - _test_eof475: ( m.cs) = 475; goto _test_eof - _test_eof476: ( m.cs) = 476; goto _test_eof - _test_eof106: ( m.cs) = 106; goto _test_eof - _test_eof107: ( m.cs) = 107; goto _test_eof - _test_eof108: ( m.cs) = 108; goto _test_eof - _test_eof109: ( m.cs) = 109; goto _test_eof - _test_eof110: ( m.cs) = 110; goto _test_eof - _test_eof477: ( m.cs) = 477; goto _test_eof - _test_eof111: ( m.cs) = 111; goto _test_eof - _test_eof478: ( m.cs) = 478; goto _test_eof - _test_eof479: ( m.cs) = 479; goto _test_eof - _test_eof112: ( m.cs) = 112; goto _test_eof - _test_eof480: ( m.cs) = 480; goto _test_eof - _test_eof481: ( m.cs) = 481; goto _test_eof - _test_eof482: ( m.cs) = 482; goto _test_eof - _test_eof483: ( m.cs) = 483; goto _test_eof - _test_eof484: ( m.cs) = 484; goto _test_eof - _test_eof485: ( m.cs) = 485; goto _test_eof - _test_eof486: ( m.cs) = 486; goto _test_eof - _test_eof487: ( m.cs) = 487; goto _test_eof - _test_eof488: ( m.cs) = 488; goto _test_eof - _test_eof113: ( m.cs) = 113; goto _test_eof - _test_eof114: ( m.cs) = 114; goto _test_eof - _test_eof115: ( m.cs) = 115; goto _test_eof - _test_eof489: ( m.cs) = 489; goto _test_eof - _test_eof116: ( m.cs) = 116; goto _test_eof - _test_eof117: ( m.cs) = 117; goto _test_eof - _test_eof118: ( m.cs) = 118; goto _test_eof - _test_eof490: ( m.cs) = 490; goto _test_eof - _test_eof119: ( m.cs) = 119; goto _test_eof - _test_eof120: ( m.cs) = 120; goto _test_eof - _test_eof491: ( m.cs) = 491; goto _test_eof - _test_eof492: ( m.cs) = 492; goto _test_eof - _test_eof121: ( m.cs) = 121; goto _test_eof - _test_eof122: ( m.cs) = 122; goto _test_eof - _test_eof123: ( m.cs) = 123; goto _test_eof - _test_eof124: ( m.cs) = 124; goto _test_eof - _test_eof493: ( m.cs) = 493; goto _test_eof - _test_eof494: ( m.cs) = 494; goto _test_eof - _test_eof495: ( m.cs) = 495; goto _test_eof - _test_eof125: ( m.cs) = 125; goto _test_eof - _test_eof496: ( m.cs) = 496; goto _test_eof - _test_eof497: ( m.cs) = 497; goto _test_eof - _test_eof498: ( m.cs) = 498; goto _test_eof - _test_eof499: ( m.cs) = 499; goto _test_eof - _test_eof500: ( m.cs) = 500; goto _test_eof - _test_eof501: ( m.cs) = 501; goto _test_eof - _test_eof502: ( m.cs) = 502; goto _test_eof - _test_eof503: ( m.cs) = 503; goto _test_eof - _test_eof504: ( m.cs) = 504; goto _test_eof - _test_eof505: ( m.cs) = 505; goto _test_eof - _test_eof506: ( m.cs) = 506; goto _test_eof - _test_eof507: ( m.cs) = 507; goto _test_eof - _test_eof508: ( m.cs) = 508; goto _test_eof - _test_eof509: ( m.cs) = 509; goto _test_eof - _test_eof510: ( m.cs) = 510; goto _test_eof - _test_eof511: ( m.cs) = 511; goto _test_eof - _test_eof512: ( m.cs) = 512; goto _test_eof - _test_eof513: ( m.cs) = 513; goto _test_eof - _test_eof514: ( m.cs) = 514; goto _test_eof - _test_eof515: ( m.cs) = 515; goto _test_eof - _test_eof126: ( m.cs) = 126; goto _test_eof - _test_eof127: ( m.cs) = 127; goto _test_eof - _test_eof516: ( m.cs) = 516; goto _test_eof - _test_eof517: ( m.cs) = 517; goto _test_eof - _test_eof518: ( m.cs) = 518; goto _test_eof - _test_eof519: ( m.cs) = 519; goto _test_eof - _test_eof520: ( m.cs) = 520; goto _test_eof - _test_eof521: ( m.cs) = 521; goto _test_eof - _test_eof522: ( m.cs) = 522; goto _test_eof - _test_eof523: ( m.cs) = 523; goto _test_eof - _test_eof524: ( m.cs) = 524; goto _test_eof - _test_eof128: ( m.cs) = 128; goto _test_eof - _test_eof129: ( m.cs) = 129; goto _test_eof - _test_eof130: ( m.cs) = 130; goto _test_eof - _test_eof525: ( m.cs) = 525; goto _test_eof - _test_eof131: ( m.cs) = 131; goto _test_eof - _test_eof132: ( m.cs) = 132; goto _test_eof - _test_eof133: ( m.cs) = 133; goto _test_eof - _test_eof526: ( m.cs) = 526; goto _test_eof - _test_eof134: ( m.cs) = 134; goto _test_eof - _test_eof135: ( m.cs) = 135; goto _test_eof - _test_eof527: ( m.cs) = 527; goto _test_eof - _test_eof528: ( m.cs) = 528; goto _test_eof - _test_eof136: ( m.cs) = 136; goto _test_eof - _test_eof137: ( m.cs) = 137; goto _test_eof - _test_eof138: ( m.cs) = 138; goto _test_eof - _test_eof529: ( m.cs) = 529; goto _test_eof - _test_eof530: ( m.cs) = 530; goto _test_eof - _test_eof139: ( m.cs) = 139; goto _test_eof - _test_eof531: ( m.cs) = 531; goto _test_eof - _test_eof140: ( m.cs) = 140; goto _test_eof - _test_eof532: ( m.cs) = 532; goto _test_eof - _test_eof533: ( m.cs) = 533; goto _test_eof - _test_eof534: ( m.cs) = 534; goto _test_eof - _test_eof535: ( m.cs) = 535; goto _test_eof - _test_eof536: ( m.cs) = 536; goto _test_eof - _test_eof537: ( m.cs) = 537; goto _test_eof - _test_eof538: ( m.cs) = 538; goto _test_eof - _test_eof539: ( m.cs) = 539; goto _test_eof - _test_eof141: ( m.cs) = 141; goto _test_eof - _test_eof142: ( m.cs) = 142; goto _test_eof - _test_eof143: ( m.cs) = 143; goto _test_eof - _test_eof540: ( m.cs) = 540; goto _test_eof - _test_eof144: ( m.cs) = 144; goto _test_eof - _test_eof145: ( m.cs) = 145; goto _test_eof - _test_eof146: ( m.cs) = 146; goto _test_eof - _test_eof541: ( m.cs) = 541; goto _test_eof - _test_eof147: ( m.cs) = 147; goto _test_eof - _test_eof148: ( m.cs) = 148; goto _test_eof - _test_eof542: ( m.cs) = 542; goto _test_eof - _test_eof543: ( m.cs) = 543; goto _test_eof - _test_eof544: ( m.cs) = 544; goto _test_eof - _test_eof545: ( m.cs) = 545; goto _test_eof - _test_eof546: ( m.cs) = 546; goto _test_eof - _test_eof547: ( m.cs) = 547; goto _test_eof - _test_eof548: ( m.cs) = 548; goto _test_eof - _test_eof549: ( m.cs) = 549; goto _test_eof - _test_eof550: ( m.cs) = 550; goto _test_eof - _test_eof551: ( m.cs) = 551; goto _test_eof - _test_eof552: ( m.cs) = 552; goto _test_eof - _test_eof553: ( m.cs) = 553; goto _test_eof - _test_eof554: ( m.cs) = 554; goto _test_eof - _test_eof555: ( m.cs) = 555; goto _test_eof - _test_eof556: ( m.cs) = 556; goto _test_eof - _test_eof557: ( m.cs) = 557; goto _test_eof - _test_eof558: ( m.cs) = 558; goto _test_eof - _test_eof559: ( m.cs) = 559; goto _test_eof - _test_eof560: ( m.cs) = 560; goto _test_eof - _test_eof561: ( m.cs) = 561; goto _test_eof - _test_eof149: ( m.cs) = 149; goto _test_eof - _test_eof150: ( m.cs) = 150; goto _test_eof - _test_eof562: ( m.cs) = 562; goto _test_eof - _test_eof563: ( m.cs) = 563; goto _test_eof - _test_eof564: ( m.cs) = 564; goto _test_eof - _test_eof151: ( m.cs) = 151; goto _test_eof - _test_eof565: ( m.cs) = 565; goto _test_eof - _test_eof566: ( m.cs) = 566; goto _test_eof - _test_eof152: ( m.cs) = 152; goto _test_eof - _test_eof567: ( m.cs) = 567; goto _test_eof - _test_eof568: ( m.cs) = 568; goto _test_eof - _test_eof569: ( m.cs) = 569; goto _test_eof - _test_eof570: ( m.cs) = 570; goto _test_eof - _test_eof571: ( m.cs) = 571; goto _test_eof - _test_eof572: ( m.cs) = 572; goto _test_eof - _test_eof573: ( m.cs) = 573; goto _test_eof - _test_eof574: ( m.cs) = 574; goto _test_eof - _test_eof575: ( m.cs) = 575; goto _test_eof - _test_eof576: ( m.cs) = 576; goto _test_eof - _test_eof577: ( m.cs) = 577; goto _test_eof - _test_eof578: ( m.cs) = 578; goto _test_eof - _test_eof579: ( m.cs) = 579; goto _test_eof - _test_eof580: ( m.cs) = 580; goto _test_eof - _test_eof581: ( m.cs) = 581; goto _test_eof - _test_eof582: ( m.cs) = 582; goto _test_eof - _test_eof583: ( m.cs) = 583; goto _test_eof - _test_eof584: ( m.cs) = 584; goto _test_eof - _test_eof153: ( m.cs) = 153; goto _test_eof - _test_eof154: ( m.cs) = 154; goto _test_eof - _test_eof585: ( m.cs) = 585; goto _test_eof - _test_eof155: ( m.cs) = 155; goto _test_eof - _test_eof586: ( m.cs) = 586; goto _test_eof - _test_eof587: ( m.cs) = 587; goto _test_eof - _test_eof588: ( m.cs) = 588; goto _test_eof - _test_eof589: ( m.cs) = 589; goto _test_eof - _test_eof590: ( m.cs) = 590; goto _test_eof - _test_eof591: ( m.cs) = 591; goto _test_eof - _test_eof592: ( m.cs) = 592; goto _test_eof - _test_eof593: ( m.cs) = 593; goto _test_eof - _test_eof156: ( m.cs) = 156; goto _test_eof - _test_eof157: ( m.cs) = 157; goto _test_eof - _test_eof158: ( m.cs) = 158; goto _test_eof - _test_eof594: ( m.cs) = 594; goto _test_eof - _test_eof159: ( m.cs) = 159; goto _test_eof - _test_eof160: ( m.cs) = 160; goto _test_eof - _test_eof161: ( m.cs) = 161; goto _test_eof - _test_eof595: ( m.cs) = 595; goto _test_eof - _test_eof162: ( m.cs) = 162; goto _test_eof - _test_eof163: ( m.cs) = 163; goto _test_eof - _test_eof596: ( m.cs) = 596; goto _test_eof - _test_eof597: ( m.cs) = 597; goto _test_eof - _test_eof164: ( m.cs) = 164; goto _test_eof - _test_eof165: ( m.cs) = 165; goto _test_eof - _test_eof166: ( m.cs) = 166; goto _test_eof - _test_eof167: ( m.cs) = 167; goto _test_eof - _test_eof168: ( m.cs) = 168; goto _test_eof - _test_eof169: ( m.cs) = 169; goto _test_eof - _test_eof598: ( m.cs) = 598; goto _test_eof - _test_eof599: ( m.cs) = 599; goto _test_eof - _test_eof600: ( m.cs) = 600; goto _test_eof - _test_eof601: ( m.cs) = 601; goto _test_eof - _test_eof602: ( m.cs) = 602; goto _test_eof - _test_eof603: ( m.cs) = 603; goto _test_eof - _test_eof604: ( m.cs) = 604; goto _test_eof - _test_eof605: ( m.cs) = 605; goto _test_eof - _test_eof606: ( m.cs) = 606; goto _test_eof - _test_eof607: ( m.cs) = 607; goto _test_eof - _test_eof608: ( m.cs) = 608; goto _test_eof - _test_eof609: ( m.cs) = 609; goto _test_eof - _test_eof610: ( m.cs) = 610; goto _test_eof - _test_eof611: ( m.cs) = 611; goto _test_eof - _test_eof612: ( m.cs) = 612; goto _test_eof - _test_eof613: ( m.cs) = 613; goto _test_eof - _test_eof614: ( m.cs) = 614; goto _test_eof - _test_eof615: ( m.cs) = 615; goto _test_eof - _test_eof616: ( m.cs) = 616; goto _test_eof - _test_eof170: ( m.cs) = 170; goto _test_eof - _test_eof171: ( m.cs) = 171; goto _test_eof - _test_eof172: ( m.cs) = 172; goto _test_eof - _test_eof617: ( m.cs) = 617; goto _test_eof - _test_eof618: ( m.cs) = 618; goto _test_eof - _test_eof619: ( m.cs) = 619; goto _test_eof - _test_eof173: ( m.cs) = 173; goto _test_eof - _test_eof620: ( m.cs) = 620; goto _test_eof - _test_eof621: ( m.cs) = 621; goto _test_eof - _test_eof174: ( m.cs) = 174; goto _test_eof - _test_eof622: ( m.cs) = 622; goto _test_eof - _test_eof623: ( m.cs) = 623; goto _test_eof - _test_eof624: ( m.cs) = 624; goto _test_eof - _test_eof625: ( m.cs) = 625; goto _test_eof - _test_eof626: ( m.cs) = 626; goto _test_eof - _test_eof175: ( m.cs) = 175; goto _test_eof - _test_eof176: ( m.cs) = 176; goto _test_eof - _test_eof177: ( m.cs) = 177; goto _test_eof - _test_eof627: ( m.cs) = 627; goto _test_eof - _test_eof178: ( m.cs) = 178; goto _test_eof - _test_eof179: ( m.cs) = 179; goto _test_eof - _test_eof180: ( m.cs) = 180; goto _test_eof - _test_eof628: ( m.cs) = 628; goto _test_eof - _test_eof181: ( m.cs) = 181; goto _test_eof - _test_eof182: ( m.cs) = 182; goto _test_eof - _test_eof629: ( m.cs) = 629; goto _test_eof - _test_eof630: ( m.cs) = 630; goto _test_eof - _test_eof183: ( m.cs) = 183; goto _test_eof - _test_eof631: ( m.cs) = 631; goto _test_eof - _test_eof632: ( m.cs) = 632; goto _test_eof - _test_eof633: ( m.cs) = 633; goto _test_eof - _test_eof184: ( m.cs) = 184; goto _test_eof - _test_eof185: ( m.cs) = 185; goto _test_eof - _test_eof186: ( m.cs) = 186; goto _test_eof - _test_eof634: ( m.cs) = 634; goto _test_eof - _test_eof187: ( m.cs) = 187; goto _test_eof - _test_eof188: ( m.cs) = 188; goto _test_eof - _test_eof189: ( m.cs) = 189; goto _test_eof - _test_eof635: ( m.cs) = 635; goto _test_eof - _test_eof190: ( m.cs) = 190; goto _test_eof - _test_eof191: ( m.cs) = 191; goto _test_eof - _test_eof636: ( m.cs) = 636; goto _test_eof - _test_eof637: ( m.cs) = 637; goto _test_eof - _test_eof192: ( m.cs) = 192; goto _test_eof - _test_eof193: ( m.cs) = 193; goto _test_eof - _test_eof194: ( m.cs) = 194; goto _test_eof - _test_eof638: ( m.cs) = 638; goto _test_eof - _test_eof195: ( m.cs) = 195; goto _test_eof - _test_eof196: ( m.cs) = 196; goto _test_eof - _test_eof639: ( m.cs) = 639; goto _test_eof - _test_eof640: ( m.cs) = 640; goto _test_eof - _test_eof641: ( m.cs) = 641; goto _test_eof - _test_eof642: ( m.cs) = 642; goto _test_eof - _test_eof643: ( m.cs) = 643; goto _test_eof - _test_eof644: ( m.cs) = 644; goto _test_eof - _test_eof645: ( m.cs) = 645; goto _test_eof - _test_eof646: ( m.cs) = 646; goto _test_eof - _test_eof197: ( m.cs) = 197; goto _test_eof - _test_eof198: ( m.cs) = 198; goto _test_eof - _test_eof199: ( m.cs) = 199; goto _test_eof - _test_eof647: ( m.cs) = 647; goto _test_eof - _test_eof200: ( m.cs) = 200; goto _test_eof - _test_eof201: ( m.cs) = 201; goto _test_eof - _test_eof202: ( m.cs) = 202; goto _test_eof - _test_eof648: ( m.cs) = 648; goto _test_eof - _test_eof203: ( m.cs) = 203; goto _test_eof - _test_eof204: ( m.cs) = 204; goto _test_eof - _test_eof649: ( m.cs) = 649; goto _test_eof - _test_eof650: ( m.cs) = 650; goto _test_eof - _test_eof205: ( m.cs) = 205; goto _test_eof - _test_eof206: ( m.cs) = 206; goto _test_eof - _test_eof207: ( m.cs) = 207; goto _test_eof - _test_eof651: ( m.cs) = 651; goto _test_eof - _test_eof652: ( m.cs) = 652; goto _test_eof - _test_eof653: ( m.cs) = 653; goto _test_eof - _test_eof654: ( m.cs) = 654; goto _test_eof - _test_eof655: ( m.cs) = 655; goto _test_eof - _test_eof656: ( m.cs) = 656; goto _test_eof - _test_eof657: ( m.cs) = 657; goto _test_eof - _test_eof658: ( m.cs) = 658; goto _test_eof - _test_eof659: ( m.cs) = 659; goto _test_eof - _test_eof660: ( m.cs) = 660; goto _test_eof - _test_eof661: ( m.cs) = 661; goto _test_eof - _test_eof662: ( m.cs) = 662; goto _test_eof - _test_eof663: ( m.cs) = 663; goto _test_eof - _test_eof664: ( m.cs) = 664; goto _test_eof - _test_eof665: ( m.cs) = 665; goto _test_eof - _test_eof666: ( m.cs) = 666; goto _test_eof - _test_eof667: ( m.cs) = 667; goto _test_eof - _test_eof668: ( m.cs) = 668; goto _test_eof - _test_eof669: ( m.cs) = 669; goto _test_eof - _test_eof208: ( m.cs) = 208; goto _test_eof - _test_eof209: ( m.cs) = 209; goto _test_eof - _test_eof210: ( m.cs) = 210; goto _test_eof - _test_eof211: ( m.cs) = 211; goto _test_eof - _test_eof212: ( m.cs) = 212; goto _test_eof - _test_eof670: ( m.cs) = 670; goto _test_eof - _test_eof213: ( m.cs) = 213; goto _test_eof - _test_eof214: ( m.cs) = 214; goto _test_eof - _test_eof671: ( m.cs) = 671; goto _test_eof - _test_eof672: ( m.cs) = 672; goto _test_eof - _test_eof673: ( m.cs) = 673; goto _test_eof - _test_eof674: ( m.cs) = 674; goto _test_eof - _test_eof675: ( m.cs) = 675; goto _test_eof - _test_eof676: ( m.cs) = 676; goto _test_eof - _test_eof677: ( m.cs) = 677; goto _test_eof - _test_eof678: ( m.cs) = 678; goto _test_eof - _test_eof679: ( m.cs) = 679; goto _test_eof - _test_eof215: ( m.cs) = 215; goto _test_eof - _test_eof216: ( m.cs) = 216; goto _test_eof - _test_eof217: ( m.cs) = 217; goto _test_eof - _test_eof680: ( m.cs) = 680; goto _test_eof - _test_eof218: ( m.cs) = 218; goto _test_eof - _test_eof219: ( m.cs) = 219; goto _test_eof - _test_eof220: ( m.cs) = 220; goto _test_eof - _test_eof681: ( m.cs) = 681; goto _test_eof - _test_eof221: ( m.cs) = 221; goto _test_eof - _test_eof222: ( m.cs) = 222; goto _test_eof - _test_eof682: ( m.cs) = 682; goto _test_eof - _test_eof683: ( m.cs) = 683; goto _test_eof - _test_eof223: ( m.cs) = 223; goto _test_eof - _test_eof224: ( m.cs) = 224; goto _test_eof - _test_eof225: ( m.cs) = 225; goto _test_eof - _test_eof684: ( m.cs) = 684; goto _test_eof - _test_eof226: ( m.cs) = 226; goto _test_eof - _test_eof227: ( m.cs) = 227; goto _test_eof - _test_eof685: ( m.cs) = 685; goto _test_eof - _test_eof686: ( m.cs) = 686; goto _test_eof - _test_eof687: ( m.cs) = 687; goto _test_eof - _test_eof688: ( m.cs) = 688; goto _test_eof - _test_eof689: ( m.cs) = 689; goto _test_eof - _test_eof690: ( m.cs) = 690; goto _test_eof - _test_eof691: ( m.cs) = 691; goto _test_eof - _test_eof692: ( m.cs) = 692; goto _test_eof - _test_eof228: ( m.cs) = 228; goto _test_eof - _test_eof229: ( m.cs) = 229; goto _test_eof - _test_eof230: ( m.cs) = 230; goto _test_eof - _test_eof693: ( m.cs) = 693; goto _test_eof - _test_eof231: ( m.cs) = 231; goto _test_eof - _test_eof232: ( m.cs) = 232; goto _test_eof - _test_eof694: ( m.cs) = 694; goto _test_eof - _test_eof695: ( m.cs) = 695; goto _test_eof - _test_eof696: ( m.cs) = 696; goto _test_eof - _test_eof697: ( m.cs) = 697; goto _test_eof - _test_eof698: ( m.cs) = 698; goto _test_eof - _test_eof699: ( m.cs) = 699; goto _test_eof - _test_eof700: ( m.cs) = 700; goto _test_eof - _test_eof701: ( m.cs) = 701; goto _test_eof - _test_eof233: ( m.cs) = 233; goto _test_eof - _test_eof234: ( m.cs) = 234; goto _test_eof - _test_eof235: ( m.cs) = 235; goto _test_eof - _test_eof702: ( m.cs) = 702; goto _test_eof - _test_eof236: ( m.cs) = 236; goto _test_eof - _test_eof237: ( m.cs) = 237; goto _test_eof - _test_eof238: ( m.cs) = 238; goto _test_eof - _test_eof703: ( m.cs) = 703; goto _test_eof - _test_eof239: ( m.cs) = 239; goto _test_eof - _test_eof240: ( m.cs) = 240; goto _test_eof - _test_eof704: ( m.cs) = 704; goto _test_eof - _test_eof705: ( m.cs) = 705; goto _test_eof - _test_eof241: ( m.cs) = 241; goto _test_eof - _test_eof242: ( m.cs) = 242; goto _test_eof - _test_eof243: ( m.cs) = 243; goto _test_eof - _test_eof706: ( m.cs) = 706; goto _test_eof - _test_eof707: ( m.cs) = 707; goto _test_eof - _test_eof708: ( m.cs) = 708; goto _test_eof - _test_eof709: ( m.cs) = 709; goto _test_eof - _test_eof710: ( m.cs) = 710; goto _test_eof - _test_eof711: ( m.cs) = 711; goto _test_eof - _test_eof712: ( m.cs) = 712; goto _test_eof - _test_eof713: ( m.cs) = 713; goto _test_eof - _test_eof714: ( m.cs) = 714; goto _test_eof - _test_eof715: ( m.cs) = 715; goto _test_eof - _test_eof716: ( m.cs) = 716; goto _test_eof - _test_eof717: ( m.cs) = 717; goto _test_eof - _test_eof718: ( m.cs) = 718; goto _test_eof - _test_eof719: ( m.cs) = 719; goto _test_eof - _test_eof720: ( m.cs) = 720; goto _test_eof - _test_eof721: ( m.cs) = 721; goto _test_eof - _test_eof722: ( m.cs) = 722; goto _test_eof - _test_eof723: ( m.cs) = 723; goto _test_eof - _test_eof724: ( m.cs) = 724; goto _test_eof - _test_eof244: ( m.cs) = 244; goto _test_eof - _test_eof245: ( m.cs) = 245; goto _test_eof - _test_eof725: ( m.cs) = 725; goto _test_eof - _test_eof246: ( m.cs) = 246; goto _test_eof - _test_eof247: ( m.cs) = 247; goto _test_eof - _test_eof726: ( m.cs) = 726; goto _test_eof - _test_eof727: ( m.cs) = 727; goto _test_eof - _test_eof728: ( m.cs) = 728; goto _test_eof - _test_eof729: ( m.cs) = 729; goto _test_eof - _test_eof730: ( m.cs) = 730; goto _test_eof - _test_eof731: ( m.cs) = 731; goto _test_eof - _test_eof732: ( m.cs) = 732; goto _test_eof - _test_eof733: ( m.cs) = 733; goto _test_eof - _test_eof248: ( m.cs) = 248; goto _test_eof - _test_eof249: ( m.cs) = 249; goto _test_eof - _test_eof250: ( m.cs) = 250; goto _test_eof - _test_eof734: ( m.cs) = 734; goto _test_eof - _test_eof251: ( m.cs) = 251; goto _test_eof - _test_eof252: ( m.cs) = 252; goto _test_eof - _test_eof253: ( m.cs) = 253; goto _test_eof - _test_eof735: ( m.cs) = 735; goto _test_eof - _test_eof254: ( m.cs) = 254; goto _test_eof - _test_eof255: ( m.cs) = 255; goto _test_eof - _test_eof736: ( m.cs) = 736; goto _test_eof - _test_eof737: ( m.cs) = 737; goto _test_eof - _test_eof256: ( m.cs) = 256; goto _test_eof - _test_eof257: ( m.cs) = 257; goto _test_eof - _test_eof738: ( m.cs) = 738; goto _test_eof - _test_eof260: ( m.cs) = 260; goto _test_eof - _test_eof740: ( m.cs) = 740; goto _test_eof - _test_eof741: ( m.cs) = 741; goto _test_eof - _test_eof261: ( m.cs) = 261; goto _test_eof - _test_eof262: ( m.cs) = 262; goto _test_eof - _test_eof263: ( m.cs) = 263; goto _test_eof - _test_eof264: ( m.cs) = 264; goto _test_eof - _test_eof742: ( m.cs) = 742; goto _test_eof - _test_eof265: ( m.cs) = 265; goto _test_eof - _test_eof743: ( m.cs) = 743; goto _test_eof - _test_eof266: ( m.cs) = 266; goto _test_eof - _test_eof267: ( m.cs) = 267; goto _test_eof - _test_eof268: ( m.cs) = 268; goto _test_eof - _test_eof739: ( m.cs) = 739; goto _test_eof - _test_eof258: ( m.cs) = 258; goto _test_eof - _test_eof259: ( m.cs) = 259; goto _test_eof - - _test_eof: {} - if ( m.p) == ( m.eof) { - switch ( m.cs) { - case 7, 260: -//line plugins/parsers/influx/machine.go.rl:32 - - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 2, 3, 4, 5, 6, 27, 30, 31, 34, 35, 36, 48, 49, 50, 51, 52, 72, 73, 75, 92, 102, 104, 140, 152, 155, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256: -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 12, 13, 14, 21, 23, 24, 262, 263, 264, 265, 266, 267: -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 243: -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 740: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - - case 742, 743: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - - case 270, 271, 272, 273, 274, 276, 277, 296, 297, 298, 300, 301, 304, 305, 326, 327, 328, 329, 331, 375, 376, 378, 379, 401, 402, 407, 408, 410, 430, 431, 433, 434, 456, 457, 617, 620: -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 9, 37, 39, 164, 166: -//line plugins/parsers/influx/machine.go.rl:32 - - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 33, 74, 103, 169, 207: -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 19, 43, 44, 45, 57, 58, 60, 62, 67, 69, 70, 76, 77, 78, 83, 85, 87, 88, 96, 97, 99, 100, 101, 106, 107, 108, 121, 122, 136, 137: -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 59: -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 269: -//line plugins/parsers/influx/machine.go.rl:82 - - m.beginMetric = true - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 1: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 299, 302, 306, 374, 398, 399, 403, 404, 405, 529, 563, 564, 566: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 15, 22: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 350, 351, 352, 354, 373, 429, 453, 454, 458, 478, 494, 495, 497: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 623, 674, 688, 728: -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 624, 677, 691, 731: -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 325, 618, 619, 621, 622, 625, 631, 632, 670, 671, 672, 673, 675, 676, 678, 684, 685, 686, 687, 689, 690, 692, 725, 726, 727, 729, 730, 732: -//line plugins/parsers/influx/machine.go.rl:130 - - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 626, 627, 628, 629, 630, 633, 634, 635, 636, 637, 679, 680, 681, 682, 683, 733, 734, 735, 736, 737: -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 275, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 330, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 377, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 409, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 432, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724: -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 8: -//line plugins/parsers/influx/machine.go.rl:32 - - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } + goto st36 + st_out: + _test_eof46: ( m.cs) = 46; goto _test_eof + _test_eof1: ( m.cs) = 1; goto _test_eof + _test_eof2: ( m.cs) = 2; goto _test_eof + _test_eof3: ( m.cs) = 3; goto _test_eof + _test_eof4: ( m.cs) = 4; goto _test_eof + _test_eof5: ( m.cs) = 5; goto _test_eof + _test_eof6: ( m.cs) = 6; goto _test_eof + _test_eof47: ( m.cs) = 47; goto _test_eof + _test_eof48: ( m.cs) = 48; goto _test_eof + _test_eof49: ( m.cs) = 49; goto _test_eof + _test_eof7: ( m.cs) = 7; goto _test_eof + _test_eof8: ( m.cs) = 8; goto _test_eof + _test_eof9: ( m.cs) = 9; goto _test_eof + _test_eof10: ( m.cs) = 10; goto _test_eof + _test_eof50: ( m.cs) = 50; goto _test_eof + _test_eof51: ( m.cs) = 51; goto _test_eof + _test_eof52: ( m.cs) = 52; goto _test_eof + _test_eof53: ( m.cs) = 53; goto _test_eof + _test_eof54: ( m.cs) = 54; goto _test_eof + _test_eof55: ( m.cs) = 55; goto _test_eof + _test_eof56: ( m.cs) = 56; goto _test_eof + _test_eof57: ( m.cs) = 57; goto _test_eof + _test_eof58: ( m.cs) = 58; goto _test_eof + _test_eof59: ( m.cs) = 59; goto _test_eof + _test_eof60: ( m.cs) = 60; goto _test_eof + _test_eof61: ( m.cs) = 61; goto _test_eof + _test_eof62: ( m.cs) = 62; goto _test_eof + _test_eof63: ( m.cs) = 63; goto _test_eof + _test_eof64: ( m.cs) = 64; goto _test_eof + _test_eof65: ( m.cs) = 65; goto _test_eof + _test_eof66: ( m.cs) = 66; goto _test_eof + _test_eof67: ( m.cs) = 67; goto _test_eof + _test_eof68: ( m.cs) = 68; goto _test_eof + _test_eof69: ( m.cs) = 69; goto _test_eof + _test_eof11: ( m.cs) = 11; goto _test_eof + _test_eof12: ( m.cs) = 12; goto _test_eof + _test_eof13: ( m.cs) = 13; goto _test_eof + _test_eof14: ( m.cs) = 14; goto _test_eof + _test_eof15: ( m.cs) = 15; goto _test_eof + _test_eof70: ( m.cs) = 70; goto _test_eof + _test_eof16: ( m.cs) = 16; goto _test_eof + _test_eof17: ( m.cs) = 17; goto _test_eof + _test_eof71: ( m.cs) = 71; goto _test_eof + _test_eof72: ( m.cs) = 72; goto _test_eof + _test_eof73: ( m.cs) = 73; goto _test_eof + _test_eof74: ( m.cs) = 74; goto _test_eof + _test_eof75: ( m.cs) = 75; goto _test_eof + _test_eof76: ( m.cs) = 76; goto _test_eof + _test_eof77: ( m.cs) = 77; goto _test_eof + _test_eof78: ( m.cs) = 78; goto _test_eof + _test_eof79: ( m.cs) = 79; goto _test_eof + _test_eof18: ( m.cs) = 18; goto _test_eof + _test_eof19: ( m.cs) = 19; goto _test_eof + _test_eof20: ( m.cs) = 20; goto _test_eof + _test_eof80: ( m.cs) = 80; goto _test_eof + _test_eof21: ( m.cs) = 21; goto _test_eof + _test_eof22: ( m.cs) = 22; goto _test_eof + _test_eof23: ( m.cs) = 23; goto _test_eof + _test_eof81: ( m.cs) = 81; goto _test_eof + _test_eof24: ( m.cs) = 24; goto _test_eof + _test_eof25: ( m.cs) = 25; goto _test_eof + _test_eof82: ( m.cs) = 82; goto _test_eof + _test_eof83: ( m.cs) = 83; goto _test_eof + _test_eof26: ( m.cs) = 26; goto _test_eof + _test_eof27: ( m.cs) = 27; goto _test_eof + _test_eof28: ( m.cs) = 28; goto _test_eof + _test_eof29: ( m.cs) = 29; goto _test_eof + _test_eof30: ( m.cs) = 30; goto _test_eof + _test_eof31: ( m.cs) = 31; goto _test_eof + _test_eof32: ( m.cs) = 32; goto _test_eof + _test_eof33: ( m.cs) = 33; goto _test_eof + _test_eof34: ( m.cs) = 34; goto _test_eof + _test_eof84: ( m.cs) = 84; goto _test_eof + _test_eof37: ( m.cs) = 37; goto _test_eof + _test_eof86: ( m.cs) = 86; goto _test_eof + _test_eof87: ( m.cs) = 87; goto _test_eof + _test_eof38: ( m.cs) = 38; goto _test_eof + _test_eof39: ( m.cs) = 39; goto _test_eof + _test_eof40: ( m.cs) = 40; goto _test_eof + _test_eof41: ( m.cs) = 41; goto _test_eof + _test_eof88: ( m.cs) = 88; goto _test_eof + _test_eof42: ( m.cs) = 42; goto _test_eof + _test_eof89: ( m.cs) = 89; goto _test_eof + _test_eof43: ( m.cs) = 43; goto _test_eof + _test_eof44: ( m.cs) = 44; goto _test_eof + _test_eof45: ( m.cs) = 45; goto _test_eof + _test_eof85: ( m.cs) = 85; goto _test_eof + _test_eof35: ( m.cs) = 35; goto _test_eof + _test_eof36: ( m.cs) = 36; goto _test_eof - case 98: -//line plugins/parsers/influx/machine.go.rl:46 + _test_eof: {} + if ( m.p) == ( m.eof) { + switch ( m.cs) { + case 7, 37: +//line plugins/parsers/influx/machine.go.rl:32 - err = ErrTagParse + err = ErrNameParse ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } + case 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25: //line plugins/parsers/influx/machine.go.rl:39 err = ErrFieldParse ( m.p)-- - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } - case 10, 11, 25, 26, 28, 29, 40, 41, 53, 54, 55, 56, 71, 90, 91, 93, 95, 138, 139, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 153, 154, 156, 157, 158, 159, 160, 161, 162, 163, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - + case 26, 27, 28, 30, 32, 33, 39, 40, 41, 42, 43, 44: //line plugins/parsers/influx/machine.go.rl:46 err = ErrTagParse ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:39 + case 10: +//line plugins/parsers/influx/machine.go.rl:53 - err = ErrFieldParse + err = ErrTimestampParse ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } - case 534, 588, 696: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:112 - - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 537, 591, 699: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:121 - - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:178 - - m.finishMetric = true - - case 406, 530, 531, 532, 533, 535, 536, 538, 562, 585, 586, 587, 589, 590, 592, 693, 694, 695, 697, 698, 700: + case 86: //line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:130 + case 88, 89: +//line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddFloat(m.key, m.text()) + err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } } + case 47, 48, 49, 51: //line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 539, 540, 541, 542, 543, 593, 594, 595, 596, 597, 701, 702, 703, 704, 705: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:139 - - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + case 46: +//line plugins/parsers/influx/machine.go.rl:82 - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:178 m.finishMetric = true - case 303, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 400, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 565, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584: + case 1: //line plugins/parsers/influx/machine.go.rl:86 err = m.handler.SetMeasurement(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } } -//line plugins/parsers/influx/machine.go.rl:157 - - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } +//line plugins/parsers/influx/machine.go.rl:46 -//line plugins/parsers/influx/machine.go.rl:178 + err = ErrTagParse + ( m.p)-- - m.finishMetric = true + ( m.cs) = 34; + {( m.p)++; ( m.cs) = 0; goto _out } - case 16, 17, 18, 20, 46, 47, 63, 64, 65, 66, 68, 79, 80, 81, 82, 84, 86, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 123, 124, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204: + case 29, 31: //line plugins/parsers/influx/machine.go.rl:99 err = m.handler.AddTag(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } } @@ -31335,35 +3219,17 @@ tr439: err = ErrTagParse ( m.p)-- - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } - case 483, 519, 641: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - + case 74: //line plugins/parsers/influx/machine.go.rl:112 err = m.handler.AddInt(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } } @@ -31371,24 +3237,14 @@ tr439: m.finishMetric = true - case 486, 522, 644: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - + case 77: //line plugins/parsers/influx/machine.go.rl:121 err = m.handler.AddUint(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } } @@ -31396,24 +3252,14 @@ tr439: m.finishMetric = true - case 477, 479, 480, 481, 482, 484, 485, 487, 493, 516, 517, 518, 520, 521, 523, 638, 639, 640, 642, 643, 645: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - + case 70, 71, 72, 73, 75, 76, 78: //line plugins/parsers/influx/machine.go.rl:130 err = m.handler.AddFloat(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } } @@ -31421,24 +3267,14 @@ tr439: m.finishMetric = true - case 488, 489, 490, 491, 492, 524, 525, 526, 527, 528, 646, 647, 648, 649, 650: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - + case 79, 80, 81, 82, 83: //line plugins/parsers/influx/machine.go.rl:139 err = m.handler.AddBool(m.key, m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } } @@ -31446,24 +3282,14 @@ tr439: m.finishMetric = true - case 353, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 455, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 496, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - + case 50, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69: //line plugins/parsers/influx/machine.go.rl:157 err = m.handler.SetTimestamp(m.text()) if err != nil { ( m.p)-- - ( m.cs) = 257; + ( m.cs) = 34; {( m.p)++; ( m.cs) = 0; goto _out } } @@ -31471,112 +3297,7 @@ tr439: m.finishMetric = true - case 38, 165, 167, 168, 205, 206, 241, 242: -//line plugins/parsers/influx/machine.go.rl:32 - - err = ErrNameParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 42, 89, 151: -//line plugins/parsers/influx/machine.go.rl:86 - - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - - case 61, 105, 125: -//line plugins/parsers/influx/machine.go.rl:99 - - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - } - -//line plugins/parsers/influx/machine.go.rl:46 - - err = ErrTagParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:39 - - err = ErrFieldParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go.rl:53 - - err = ErrTimestampParse - ( m.p)-- - - ( m.cs) = 257; - {( m.p)++; ( m.cs) = 0; goto _out } - -//line plugins/parsers/influx/machine.go:31580 +//line plugins/parsers/influx/machine.go:3301 } } diff --git a/plugins/parsers/influx/machine.go.rl b/plugins/parsers/influx/machine.go.rl index f8f40cd7c1dc0..29f4307860ea2 100644 --- a/plugins/parsers/influx/machine.go.rl +++ b/plugins/parsers/influx/machine.go.rl @@ -204,7 +204,7 @@ timestamp = ('-'? digit{1,19}) >begin %timestamp; fieldkeychar = - [^\t\n\f\r ,=\\] | ( '\\' [^\t\n\f\r] ); + [^\t\n\v\f\r ,=\\] | ( '\\' [^\t\n\v\f\r] ); fieldkey = fieldkeychar+ >begin %fieldkey; @@ -245,7 +245,7 @@ fieldset = field ( ',' field )*; tagchar = - [^\t\n\f\r ,=\\] | ( '\\' [^\t\n\f\r\\] ) | '\\\\' %to{ fhold; }; + [^\t\n\v\f\r ,=\\] | ( '\\' [^\t\n\v\f\r\\] ) | '\\\\' %to{ fhold; }; tagkey = tagchar+ >begin %tagkey; @@ -257,7 +257,7 @@ tagset = ((',' tagkey '=' tagvalue) $err(tagset_error))*; measurement_chars = - [^\t\n\f\r ,\\] | ( '\\' [^\t\n\f\r] ); + [^\t\n\v\f\r ,\\] | ( '\\' [^\t\n\v\f\r] ); measurement_start = measurement_chars - '#'; From 8d95d3b5294259afa7505228645408378340b32a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Jan 2021 14:19:35 -0500 Subject: [PATCH 185/761] Bump github.com/Shopify/sarama from 1.27.1 to 1.27.2 (#8715) Bumps [github.com/Shopify/sarama](https://github.com/Shopify/sarama) from 1.27.1 to 1.27.2. - [Release notes](https://github.com/Shopify/sarama/releases) - [Changelog](https://github.com/Shopify/sarama/blob/master/CHANGELOG.md) - [Commits](https://github.com/Shopify/sarama/compare/v1.27.1...v1.27.2) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 19c7f581ccc77..24847ba9bce6b 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee github.com/Microsoft/ApplicationInsights-Go v0.4.2 github.com/Microsoft/go-winio v0.4.9 // indirect - github.com/Shopify/sarama v1.27.1 + github.com/Shopify/sarama v1.27.2 github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect github.com/aerospike/aerospike-client-go v1.27.0 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 diff --git a/go.sum b/go.sum index c3e278600f876..99694dc7f0d0b 100644 --- a/go.sum +++ b/go.sum @@ -86,8 +86,8 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.27.1 h1:iUlzHymqWsITyttu6KxazcAz8WEj5FqcwFK/oEi7rE8= -github.com/Shopify/sarama v1.27.1/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= +github.com/Shopify/sarama v1.27.2 h1:1EyY1dsxNDUQEv0O/4TsjosHI2CgB1uo9H/v56xzTxc= +github.com/Shopify/sarama v1.27.2/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= From eb42f11cd6191221c6b815ec57933cd9f81b23f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Jan 2021 14:20:40 -0500 Subject: [PATCH 186/761] Bump github.com/nsqio/go-nsq from 1.0.7 to 1.0.8 (#8714) Bumps [github.com/nsqio/go-nsq](https://github.com/nsqio/go-nsq) from 1.0.7 to 1.0.8. - [Release notes](https://github.com/nsqio/go-nsq/releases) - [Changelog](https://github.com/nsqio/go-nsq/blob/master/ChangeLog.md) - [Commits](https://github.com/nsqio/go-nsq/compare/v1.0.7...v1.0.8) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 24847ba9bce6b..bd0ec9345ffbc 100644 --- a/go.mod +++ b/go.mod @@ -99,7 +99,7 @@ require ( github.com/nats-io/nats-server/v2 v2.1.4 github.com/nats-io/nats.go v1.9.1 github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 - github.com/nsqio/go-nsq v1.0.7 + github.com/nsqio/go-nsq v1.0.8 github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect diff --git a/go.sum b/go.sum index 99694dc7f0d0b..79588d467c4f9 100644 --- a/go.sum +++ b/go.sum @@ -477,8 +477,8 @@ github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 h1:9YEHXplqlVkOltThchh+RxeO github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1/go.mod h1:2kY6OeOxrJ+RIQlVjWDc/pZlT3MIf30prs6drzMfJ6E= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY= -github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= +github.com/nsqio/go-nsq v1.0.8 h1:3L2F8tNLlwXXlp2slDUrUWSBn2O3nMh8R1/KEDFTHPk= +github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= From 358633bc4d15e7b848c89e747f4cfd00d7a6b15d Mon Sep 17 00:00:00 2001 From: "A. Binzxxxxxx" Date: Fri, 22 Jan 2021 00:14:21 +0100 Subject: [PATCH 187/761] Add setting to enable caching in ipmitool (#8335) --- plugins/inputs/ipmi_sensor/README.md | 9 +++++++ plugins/inputs/ipmi_sensor/ipmi.go | 38 ++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index f620b93cb659e..609409985cb35 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -61,6 +61,15 @@ Any of the following parameters will be added to the aformentioned query if they ## Optionally provide the hex key for the IMPI connection. # hex_key = "" + + ## If ipmitool should use a cache + ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) + ## the cache file may not work well for you if some sensors come up late + # use_cache = false + + ## Path to the ipmitools cache file (defaults to OS temp dir) + ## The provided path must exist and must be writable + # cache_path = "" ``` ### Measurements diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index 5572a195b2c29..eb344f539e695 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -5,7 +5,9 @@ import ( "bytes" "fmt" "log" + "os" "os/exec" + "path/filepath" "regexp" "strconv" "strings" @@ -34,6 +36,8 @@ type Ipmi struct { Timeout internal.Duration MetricVersion int UseSudo bool + UseCache bool + CachePath string } var sampleConfig = ` @@ -69,6 +73,15 @@ var sampleConfig = ` ## Optionally provide the hex key for the IMPI connection. # hex_key = "" + + ## If ipmitool should use a cache + ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) + ## the cache file may not work well for you if some sensors come up late + # use_cache = false + + ## Path to the ipmitools cache file (defaults to OS temp dir) + ## The provided path must exist and must be writable + # cache_path = "" ` // SampleConfig returns the documentation about the sample configuration @@ -119,6 +132,29 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { opts = conn.options() } opts = append(opts, "sdr") + if m.UseCache { + cacheFile := filepath.Join(m.CachePath, server+"_ipmi_cache") + _, err := os.Stat(cacheFile) + if os.IsNotExist(err) { + dumpOpts := opts + // init cache file + dumpOpts = append(dumpOpts, "dump") + dumpOpts = append(dumpOpts, cacheFile) + name := m.Path + if m.UseSudo { + // -n - avoid prompting the user for input of any kind + dumpOpts = append([]string{"-n", name}, dumpOpts...) + name = "sudo" + } + cmd := execCommand(name, dumpOpts...) + out, err := internal.CombinedOutputTimeout(cmd, m.Timeout.Duration) + if err != nil { + return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) + } + } + opts = append(opts, "-S") + opts = append(opts, cacheFile) + } if m.MetricVersion == 2 { opts = append(opts, "elist") } @@ -294,6 +330,8 @@ func init() { m.Path = path } m.Timeout = internal.Duration{Duration: time.Second * 20} + m.UseCache = false + m.CachePath = os.TempDir() inputs.Add("ipmi_sensor", func() telegraf.Input { m := m return &m From eda1dbc4dce580ab73214e992eb3f258412aa3d1 Mon Sep 17 00:00:00 2001 From: viperstars Date: Wed, 27 Jan 2021 02:02:23 +0800 Subject: [PATCH 188/761] fix x509 cert timeout issue (#8741) --- plugins/inputs/x509_cert/x509_cert.go | 2 +- plugins/inputs/x509_cert/x509_cert_test.go | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index b4a8a0716ffb5..529b4c76dfc2d 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -215,7 +215,7 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { return nil } - certs, err := c.getCert(u, c.Timeout.Duration*time.Second) + certs, err := c.getCert(u, c.Timeout.Duration) if err != nil { acc.AddError(fmt.Errorf("cannot get SSL cert '%s': %s", location, err.Error())) } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 58f86a65473f9..bb882a470c96d 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -350,6 +350,24 @@ func TestGatherCert(t *testing.T) { assert.True(t, acc.HasMeasurement("x509_cert")) } +func TestGatherCertMustNotTimeout(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + duration := time.Duration(15) * time.Second + m := &X509Cert{ + Sources: []string{"https://www.influxdata.com:443"}, + Timeout: internal.Duration{Duration: duration}, + } + m.Init() + + var acc testutil.Accumulator + err := m.Gather(&acc) + require.NoError(t, err) + require.Empty(t, acc.Errors) + assert.True(t, acc.HasMeasurement("x509_cert")) +} + func TestServerName(t *testing.T) { tests := []struct { name string From d41569caed8da4b0f8a533528e8c9f7d4e6ace3f Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 26 Jan 2021 13:06:12 -0500 Subject: [PATCH 189/761] Integration test audit (#8725) --- CONTRIBUTING.md | 8 +- Makefile | 4 + docker-compose.yml | 3 + docs/INTEGRATION_TESTS.md | 61 +++++++++++++ internal/internal_test.go | 5 +- logger/event_logger_test.go | 4 +- plugins/inputs/aerospike/aerospike_test.go | 20 ++-- plugins/inputs/disque/disque_test.go | 4 +- plugins/inputs/dns_query/dns_query_test.go | 14 ++- plugins/inputs/dovecot/dovecot_test.go | 2 +- plugins/inputs/mcrouter/mcrouter_test.go | 91 ++++++++++++++----- plugins/inputs/memcached/memcached_test.go | 2 +- plugins/inputs/mysql/mysql_test.go | 13 ++- plugins/inputs/opcua/opcua_client_test.go | 6 +- plugins/inputs/openldap/openldap_test.go | 38 +++----- plugins/inputs/pgbouncer/pgbouncer_test.go | 9 +- plugins/inputs/ping/ping_test.go | 4 +- plugins/inputs/postgresql/postgresql_test.go | 12 +-- plugins/inputs/postgresql/service.go | 7 +- .../postgresql_extensible_test.go | 8 +- .../procstat/native_finder_windows_test.go | 6 +- plugins/inputs/prometheus/prometheus_test.go | 2 +- plugins/inputs/redis/redis_test.go | 2 +- plugins/inputs/sqlserver/sqlserver_test.go | 8 +- .../win_perf_counters_integration_test.go | 29 +++--- .../win_services_integration_test.go | 6 +- plugins/inputs/x509_cert/x509_cert_test.go | 8 +- plugins/inputs/zookeeper/zookeeper_test.go | 2 +- plugins/outputs/cratedb/cratedb_test.go | 16 ++-- .../elasticsearch/elasticsearch_test.go | 8 +- plugins/outputs/exec/exec_test.go | 7 +- plugins/outputs/kafka/kafka_test.go | 6 +- plugins/outputs/mqtt/mqtt_test.go | 2 +- plugins/outputs/nats/nats_test.go | 2 +- plugins/outputs/nsq/nsq_test.go | 2 +- plugins/outputs/opentsdb/opentsdb_test.go | 71 +++++++-------- .../outputs/riemann_legacy/riemann_test.go | 4 +- plugins/processors/ifname/ifname_test.go | 11 +-- 38 files changed, 305 insertions(+), 202 deletions(-) create mode 100644 docs/INTEGRATION_TESTS.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0b2ad0ede3f28..a5ff6b2977560 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -61,7 +61,13 @@ running. You can start the containers with: docker-compose up ``` -And run the full test suite with: +To run only the integration tests use: + +``` +make test-integration +``` + +To run the full test suite use: ``` make test-all ``` diff --git a/Makefile b/Makefile index 284cbaf86d938..ac2281499602c 100644 --- a/Makefile +++ b/Makefile @@ -98,6 +98,10 @@ go-install: test: go test -short $(race_detector) ./... +.PHONY: test-integration +test-integration: + go test -run Integration $(race_detector) ./... + .PHONY: fmt fmt: @gofmt -s -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)) diff --git a/docker-compose.yml b/docker-compose.yml index 092a7b9144c3e..1da9d2a3b0de6 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -40,6 +40,9 @@ services: - MYSQL_ALLOW_EMPTY_PASSWORD=yes ports: - "3306:3306" + # removes warning "mbind operation not permitted" enables you to see the docker logs + cap_add: + - SYS_NICE # CAP_SYS_NICE memcached: image: memcached ports: diff --git a/docs/INTEGRATION_TESTS.md b/docs/INTEGRATION_TESTS.md new file mode 100644 index 0000000000000..b7af829588c8b --- /dev/null +++ b/docs/INTEGRATION_TESTS.md @@ -0,0 +1,61 @@ +# Integration Tests + +To run our current integration test suite: + +Running the integration tests requires several docker containers to be +running. You can start the containers with: +``` +docker-compose up +``` + +To run only the integration tests use: + +``` +make test-integration +``` + +Use `make docker-kill` to stop the containers. + +Contributing integration tests: + +- Add Integration to the end of the test name so it will be run with the above command. +- Writes tests where no library is being used in the plugin +- There is poor code coverage +- It has dynamic code that only gets run at runtime eg: SQL + +Current areas we have integration tests: + +| Area | What it does | +|------------------------------------|-------------------------------------------| +| Inputs: Aerospike | | +| Inputs: Disque | | +| Inputs: Dovecot | | +| Inputs: Mcrouter | | +| Inputs: Memcached | | +| Inputs: Mysql | | +| Inputs: Opcua | | +| Inputs: Openldap | | +| Inputs: Pgbouncer | | +| Inputs: Postgresql | | +| Inputs: Postgresql extensible | | +| Inputs: Procstat / Native windows | | +| Inputs: Prometheus | | +| Inputs: Redis | | +| Inputs: Sqlserver | | +| Inputs: Win perf counters | | +| Inputs: Win services | | +| Inputs: Zookeeper | | +| Outputs: Cratedb / Postgres | | +| Outputs: Elasticsearch | | +| Outputs: Kafka | | +| Outputs: MQTT | | +| Outputs: Nats | | +| Outputs: NSQ | | + +Areas we would benefit most from new integration tests: + +| Area | +|------------------------------------| +| SNMP | +| MYSQL | +| SQLSERVER | diff --git a/internal/internal_test.go b/internal/internal_test.go index 25f0503ba20a8..2161a300b2956 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -52,9 +52,8 @@ var ( ) func TestRunTimeout(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test due to random failures.") - } + t.Skip("Skipping test due to random failures & a data race when running test-all.") + if sleepbin == "" { t.Skip("'sleep' binary not available on OS, skipping.") } diff --git a/logger/event_logger_test.go b/logger/event_logger_test.go index 4dddeb2ec2b85..05c27b1757e87 100644 --- a/logger/event_logger_test.go +++ b/logger/event_logger_test.go @@ -45,7 +45,7 @@ func getEventLog(t *testing.T, since time.Time) []Event { return events.Events } -func TestEventLog(t *testing.T) { +func TestEventLogIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -68,7 +68,7 @@ func TestEventLog(t *testing.T) { assert.Contains(t, events, Event{Message: "Err message", Level: Error}) } -func TestRestrictedEventLog(t *testing.T) { +func TestRestrictedEventLogIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go index e88c078b7ae5e..e48e2d7f23de4 100644 --- a/plugins/inputs/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAerospikeStatistics(t *testing.T) { +func TestAerospikeStatisticsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -34,7 +34,7 @@ func TestAerospikeStatistics(t *testing.T) { } -func TestAerospikeStatisticsPartialErr(t *testing.T) { +func TestAerospikeStatisticsPartialErrIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -58,7 +58,7 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) { assert.Equal(t, namespaceName, "test") } -func TestSelectNamepsaces(t *testing.T) { +func TestSelectNamepsacesIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -92,7 +92,7 @@ func TestSelectNamepsaces(t *testing.T) { assert.False(t, acc.HasInt64Field("aerospke_namespace", "appeals_tx_remaining")) } -func TestDisableQueryNamespaces(t *testing.T) { +func TestDisableQueryNamespacesIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -119,7 +119,7 @@ func TestDisableQueryNamespaces(t *testing.T) { assert.True(t, acc.HasMeasurement("aerospike_namespace")) } -func TestQuerySets(t *testing.T) { +func TestQuerySetsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -168,7 +168,7 @@ func TestQuerySets(t *testing.T) { } -func TestSelectQuerySets(t *testing.T) { +func TestSelectQuerySetsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -218,7 +218,7 @@ func TestSelectQuerySets(t *testing.T) { } -func TestDisableTTLHistogram(t *testing.T) { +func TestDisableTTLHistogramIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -238,7 +238,7 @@ func TestDisableTTLHistogram(t *testing.T) { assert.False(t, acc.HasMeasurement("aerospike_histogram_ttl")) } -func TestTTLHistogram(t *testing.T) { +func TestTTLHistogramIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } else { @@ -266,7 +266,7 @@ func TestTTLHistogram(t *testing.T) { assert.True(t, FindTagValue(&acc, "aerospike_histogram_ttl", "namespace", "test")) } -func TestDisableObjectSizeLinearHistogram(t *testing.T) { +func TestDisableObjectSizeLinearHistogramIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -286,7 +286,7 @@ func TestDisableObjectSizeLinearHistogram(t *testing.T) { assert.False(t, acc.HasMeasurement("aerospike_histogram_object_size_linear")) } -func TestObjectSizeLinearHistogram(t *testing.T) { +func TestObjectSizeLinearHistogramIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") diff --git a/plugins/inputs/disque/disque_test.go b/plugins/inputs/disque/disque_test.go index 1e5b764f9c820..e215e78a5f777 100644 --- a/plugins/inputs/disque/disque_test.go +++ b/plugins/inputs/disque/disque_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestDisqueGeneratesMetrics(t *testing.T) { +func TestDisqueGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -76,7 +76,7 @@ func TestDisqueGeneratesMetrics(t *testing.T) { acc.AssertContainsFields(t, "disque", fields) } -func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) { +func TestDisqueCanPullStatsFromMultipleServersIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/inputs/dns_query/dns_query_test.go b/plugins/inputs/dns_query/dns_query_test.go index 5a1379764cff0..3fa2accbc9ec3 100644 --- a/plugins/inputs/dns_query/dns_query_test.go +++ b/plugins/inputs/dns_query/dns_query_test.go @@ -67,8 +67,13 @@ func TestGatheringRootDomain(t *testing.T) { "server": "8.8.8.8", "domain": ".", "record_type": "MX", + "rcode": "NOERROR", + "result": "success", + } + fields := map[string]interface{}{ + "rcode_value": int(0), + "result_code": uint64(0), } - fields := map[string]interface{}{} err := acc.GatherError(dnsConfig.Gather) assert.NoError(t, err) @@ -93,8 +98,13 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { "server": "8.8.8.8", "domain": "google.com", "record_type": "NS", + "rcode": "NOERROR", + "result": "success", + } + fields := map[string]interface{}{ + "rcode_value": int(0), + "result_code": uint64(0), } - fields := map[string]interface{}{} err := acc.GatherError(dnsConfig.Gather) assert.NoError(t, err) diff --git a/plugins/inputs/dovecot/dovecot_test.go b/plugins/inputs/dovecot/dovecot_test.go index c801d4f0ca5f7..a9c799a274ecb 100644 --- a/plugins/inputs/dovecot/dovecot_test.go +++ b/plugins/inputs/dovecot/dovecot_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestDovecot(t *testing.T) { +func TestDovecotIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") diff --git a/plugins/inputs/mcrouter/mcrouter_test.go b/plugins/inputs/mcrouter/mcrouter_test.go index e17c13b6d6655..a9b525d46b79c 100644 --- a/plugins/inputs/mcrouter/mcrouter_test.go +++ b/plugins/inputs/mcrouter/mcrouter_test.go @@ -44,7 +44,7 @@ func TestAddressParsing(t *testing.T) { } } -func TestMcrouterGeneratesMetrics(t *testing.T) { +func TestMcrouterGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -58,26 +58,75 @@ func TestMcrouterGeneratesMetrics(t *testing.T) { err := acc.GatherError(m.Gather) require.NoError(t, err) - intMetrics := []string{"uptime", "num_servers", "num_servers_new", "num_servers_up", - "num_servers_down", "num_servers_closed", "num_clients", - "num_suspect_servers", "destination_batches_sum", "destination_requests_sum", - "outstanding_route_get_reqs_queued", "outstanding_route_update_reqs_queued", - "outstanding_route_get_avg_queue_size", "outstanding_route_update_avg_queue_size", - "outstanding_route_get_avg_wait_time_sec", "outstanding_route_update_avg_wait_time_sec", - "retrans_closed_connections", "destination_pending_reqs", "destination_inflight_reqs", - "destination_batch_size", "asynclog_requests", "proxy_reqs_processing", - "proxy_reqs_waiting", "client_queue_notify_period", - "ps_num_minor_faults", "ps_num_major_faults", - "ps_vsize", "ps_rss", "fibers_allocated", "fibers_pool_size", "fibers_stack_high_watermark", - "successful_client_connections", "duration_us", "destination_max_pending_reqs", - "destination_max_inflight_reqs", "retrans_per_kbyte_max", "cmd_get_count", "cmd_delete_out", - "cmd_lease_get", "cmd_set", "cmd_get_out_all", "cmd_get_out", "cmd_lease_set_count", - "cmd_other_out_all", "cmd_lease_get_out", "cmd_set_count", "cmd_lease_set_out", - "cmd_delete_count", "cmd_other", "cmd_delete", "cmd_get", "cmd_lease_set", "cmd_set_out", - "cmd_lease_get_count", "cmd_other_out", "cmd_lease_get_out_all", "cmd_set_out_all", - "cmd_other_count", "cmd_delete_out_all", "cmd_lease_set_out_all"} - - floatMetrics := []string{"rusage_system", "rusage_user", "ps_user_time_sec", "ps_system_time_sec"} + intMetrics := []string{ + "uptime", + // "num_servers", + // "num_servers_new", + // "num_servers_up", + // "num_servers_down", + // "num_servers_closed", + // "num_clients", + // "num_suspect_servers", + // "destination_batches_sum", + // "destination_requests_sum", + // "outstanding_route_get_reqs_queued", + // "outstanding_route_update_reqs_queued", + // "outstanding_route_get_avg_queue_size", + // "outstanding_route_update_avg_queue_size", + // "outstanding_route_get_avg_wait_time_sec", + // "outstanding_route_update_avg_wait_time_sec", + // "retrans_closed_connections", + // "destination_pending_reqs", + // "destination_inflight_reqs", + // "destination_batch_size", + // "asynclog_requests", + // "proxy_reqs_processing", + // "proxy_reqs_waiting", + // "client_queue_notify_period", + // "ps_num_minor_faults", + // "ps_num_major_faults", + // "ps_vsize", + // "ps_rss", + // "fibers_allocated", + // "fibers_pool_size", + // "fibers_stack_high_watermark", + // "successful_client_connections", + // "duration_us", + // "destination_max_pending_reqs", + // "destination_max_inflight_reqs", + // "retrans_per_kbyte_max", + // "cmd_get_count", + // "cmd_delete_out", + // "cmd_lease_get", + "cmd_set", + // "cmd_get_out_all", + // "cmd_get_out", + // "cmd_lease_set_count", + // "cmd_other_out_all", + // "cmd_lease_get_out", + // "cmd_set_count", + // "cmd_lease_set_out", + // "cmd_delete_count", + // "cmd_other", + // "cmd_delete", + "cmd_get", + // "cmd_lease_set", + // "cmd_set_out", + // "cmd_lease_get_count", + // "cmd_other_out", + // "cmd_lease_get_out_all", + // "cmd_set_out_all", + // "cmd_other_count", + // "cmd_delete_out_all", + // "cmd_lease_set_out_all" + } + + floatMetrics := []string{ + "rusage_system", + "rusage_user", + // "ps_user_time_sec", + // "ps_system_time_sec", + } for _, metric := range intMetrics { assert.True(t, acc.HasInt64Field("mcrouter", metric), metric) diff --git a/plugins/inputs/memcached/memcached_test.go b/plugins/inputs/memcached/memcached_test.go index 3c8a239f06d73..1d0807625b31b 100644 --- a/plugins/inputs/memcached/memcached_test.go +++ b/plugins/inputs/memcached/memcached_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestMemcachedGeneratesMetrics(t *testing.T) { +func TestMemcachedGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index 4d8d5ff6e2a38..0cdcd4b1cd345 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestMysqlDefaultsToLocal(t *testing.T) { +func TestMysqlDefaultsToLocalIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -26,7 +26,7 @@ func TestMysqlDefaultsToLocal(t *testing.T) { assert.True(t, acc.HasMeasurement("mysql")) } -func TestMysqlMultipleInstances(t *testing.T) { +func TestMysqlMultipleInstancesIntegration(t *testing.T) { // Invoke Gather() from two separate configurations and // confirm they don't interfere with each other if testing.Short() { @@ -34,8 +34,10 @@ func TestMysqlMultipleInstances(t *testing.T) { } testServer := "root@tcp(127.0.0.1:3306)/?tls=false" m := &Mysql{ - Servers: []string{testServer}, - IntervalSlow: "30s", + Servers: []string{testServer}, + IntervalSlow: "30s", + GatherGlobalVars: true, + MetricVersion: 2, } var acc, acc2 testutil.Accumulator @@ -46,7 +48,8 @@ func TestMysqlMultipleInstances(t *testing.T) { assert.True(t, acc.HasMeasurement("mysql_variables")) m2 := &Mysql{ - Servers: []string{testServer}, + Servers: []string{testServer}, + MetricVersion: 2, } err = m2.Gather(&acc2) require.NoError(t, err) diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go index 26dd2fbd4f40d..f13e5ba9a1ee4 100644 --- a/plugins/inputs/opcua/opcua_client_test.go +++ b/plugins/inputs/opcua/opcua_client_test.go @@ -19,10 +19,8 @@ type OPCTags struct { Want string } -func TestClient1(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestClient1Integration(t *testing.T) { + t.Skip("Skipping due to dial tcp 195.254.227.245:4840: connect: connection refused") var testopctags = []OPCTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, diff --git a/plugins/inputs/openldap/openldap_test.go b/plugins/inputs/openldap/openldap_test.go index 76d9cc3a9dd42..b3e171b22e9db 100644 --- a/plugins/inputs/openldap/openldap_test.go +++ b/plugins/inputs/openldap/openldap_test.go @@ -33,7 +33,7 @@ func TestOpenldapMockResult(t *testing.T) { commonTests(t, o, &acc) } -func TestOpenldapNoConnection(t *testing.T) { +func TestOpenldapNoConnectionIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -50,10 +50,8 @@ func TestOpenldapNoConnection(t *testing.T) { assert.NotEmpty(t, acc.Errors) // test that we set an error } -func TestOpenldapGeneratesMetrics(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestOpenldapGeneratesMetricsIntegration(t *testing.T) { + t.Skip("skipping test as unable to read LDAP response packet: unexpected EOF") o := &Openldap{ Host: testutil.GetLocalHost(), @@ -66,10 +64,8 @@ func TestOpenldapGeneratesMetrics(t *testing.T) { commonTests(t, o, &acc) } -func TestOpenldapStartTLS(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestOpenldapStartTLSIntegration(t *testing.T) { + t.Skip("skipping test as unable to read LDAP response packet: unexpected EOF") o := &Openldap{ Host: testutil.GetLocalHost(), @@ -84,10 +80,8 @@ func TestOpenldapStartTLS(t *testing.T) { commonTests(t, o, &acc) } -func TestOpenldapLDAPS(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestOpenldapLDAPSIntegration(t *testing.T) { + t.Skip("skipping test as unable to read LDAP response packet: unexpected EOF") o := &Openldap{ Host: testutil.GetLocalHost(), @@ -102,10 +96,8 @@ func TestOpenldapLDAPS(t *testing.T) { commonTests(t, o, &acc) } -func TestOpenldapInvalidSSL(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestOpenldapInvalidSSLIntegration(t *testing.T) { + t.Skip("skipping test as unable to read LDAP response packet: unexpected EOF") o := &Openldap{ Host: testutil.GetLocalHost(), @@ -121,10 +113,8 @@ func TestOpenldapInvalidSSL(t *testing.T) { assert.NotEmpty(t, acc.Errors) // test that we set an error } -func TestOpenldapBind(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestOpenldapBindIntegration(t *testing.T) { + t.Skip("skipping test as unable to read LDAP response packet: unexpected EOF") o := &Openldap{ Host: testutil.GetLocalHost(), @@ -149,10 +139,8 @@ func commonTests(t *testing.T, o *Openldap, acc *testutil.Accumulator) { assert.True(t, acc.HasInt64Field("openldap", "total_connections"), "Has an integer field called total_connections") } -func TestOpenldapReverseMetrics(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestOpenldapReverseMetricsIntegration(t *testing.T) { + t.Skip("skipping test as unable to read LDAP response packet: unexpected EOF") o := &Openldap{ Host: testutil.GetLocalHost(), diff --git a/plugins/inputs/pgbouncer/pgbouncer_test.go b/plugins/inputs/pgbouncer/pgbouncer_test.go index 44e28c7f3335e..7de58a78bd013 100644 --- a/plugins/inputs/pgbouncer/pgbouncer_test.go +++ b/plugins/inputs/pgbouncer/pgbouncer_test.go @@ -2,17 +2,16 @@ package pgbouncer import ( "fmt" + "testing" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" ) -func TestPgBouncerGeneratesMetrics(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestPgBouncerGeneratesMetricsIntegration(t *testing.T) { + t.Skip("Skipping due to not allowed (SQLSTATE 08P01)") p := &PgBouncer{ Service: postgresql.Service{ diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index e3d725de33253..7aadba223e224 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -403,9 +403,7 @@ func mockHostResolver(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, // Test that Gather function works using native ping func TestPingGatherNative(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test due to permission requirements.") - } + t.Skip("Skipping test due to permission requirements.") var acc testutil.Accumulator p := Ping{ diff --git a/plugins/inputs/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go index b23321019f5f8..934d06414b7e6 100644 --- a/plugins/inputs/postgresql/postgresql_test.go +++ b/plugins/inputs/postgresql/postgresql_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestPostgresqlGeneratesMetrics(t *testing.T) { +func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -94,7 +94,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { assert.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) } -func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) { +func TestPostgresqlTagsMetricsWithDatabaseNameIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -120,7 +120,7 @@ func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) { assert.Equal(t, "postgres", point.Tags["db"]) } -func TestPostgresqlDefaultsToAllDatabases(t *testing.T) { +func TestPostgresqlDefaultsToAllDatabasesIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -153,7 +153,7 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) { assert.True(t, found) } -func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { +func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -176,7 +176,7 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { } } -func TestPostgresqlDatabaseWhitelistTest(t *testing.T) { +func TestPostgresqlDatabaseWhitelistTestIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -216,7 +216,7 @@ func TestPostgresqlDatabaseWhitelistTest(t *testing.T) { assert.False(t, foundTemplate1) } -func TestPostgresqlDatabaseBlacklistTest(t *testing.T) { +func TestPostgresqlDatabaseBlacklistTestIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go index 96a9a63175658..580ae20e50f07 100644 --- a/plugins/inputs/postgresql/service.go +++ b/plugins/inputs/postgresql/service.go @@ -3,15 +3,16 @@ package postgresql import ( "database/sql" "fmt" - "github.com/jackc/pgx" - "github.com/jackc/pgx/pgtype" - "github.com/jackc/pgx/stdlib" "net" "net/url" "regexp" "sort" "strings" + "github.com/jackc/pgx" + "github.com/jackc/pgx/pgtype" + "github.com/jackc/pgx/stdlib" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" ) diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index ac0ad05c8bd88..f78e46199a122 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -32,7 +32,7 @@ func queryRunner(t *testing.T, q query) *testutil.Accumulator { return &acc } -func TestPostgresqlGeneratesMetrics(t *testing.T) { +func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -99,7 +99,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { assert.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) } -func TestPostgresqlQueryOutputTests(t *testing.T) { +func TestPostgresqlQueryOutputTestsIntegration(t *testing.T) { const measurement = "postgresql" if testing.Short() { @@ -148,7 +148,7 @@ func TestPostgresqlQueryOutputTests(t *testing.T) { } } -func TestPostgresqlFieldOutput(t *testing.T) { +func TestPostgresqlFieldOutputIntegration(t *testing.T) { const measurement = "postgresql" if testing.Short() { t.Skip("Skipping integration test in short mode") @@ -237,7 +237,7 @@ func TestPostgresqlSqlScript(t *testing.T) { require.NoError(t, acc.GatherError(p.Gather)) } -func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { +func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/inputs/procstat/native_finder_windows_test.go b/plugins/inputs/procstat/native_finder_windows_test.go index ef9c5ffb11523..6f3067545364e 100644 --- a/plugins/inputs/procstat/native_finder_windows_test.go +++ b/plugins/inputs/procstat/native_finder_windows_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestGather_RealPattern(t *testing.T) { +func TestGather_RealPatternIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -22,7 +22,7 @@ func TestGather_RealPattern(t *testing.T) { assert.Equal(t, len(pids) > 0, true) } -func TestGather_RealFullPattern(t *testing.T) { +func TestGather_RealFullPatternIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -34,7 +34,7 @@ func TestGather_RealFullPattern(t *testing.T) { assert.Equal(t, len(pids) > 0, true) } -func TestGather_RealUser(t *testing.T) { +func TestGather_RealUserIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index d33cba273c276..1c1411b881a1d 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -99,7 +99,7 @@ func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { assert.True(t, acc.TagValue("test_metric", "url") == ts.URL) } -func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) { +func TestPrometheusGeneratesMetricsAlthoughFirstDNSFailsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index 5765b18607e00..7ce1112787d4f 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -28,7 +28,7 @@ func (t *testClient) Do(returnType string, args ...interface{}) (interface{}, er return 2, nil } -func TestRedisConnect(t *testing.T) { +func TestRedisConnectIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index 8f5d355ef4df3..f9306ee2f98fd 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -113,12 +113,11 @@ func TestSqlServer_ParseMetrics(t *testing.T) { } } -func TestSqlServer_MultipleInstance(t *testing.T) { +func TestSqlServer_MultipleInstanceIntegration(t *testing.T) { // Invoke Gather() from two separate configurations and // confirm they don't interfere with each other - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } + t.Skip("Skipping as unable to open tcp connection with host '127.0.0.1:1433") + testServer := "Server=127.0.0.1;Port=1433;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" s := &SQLServer{ Servers: []string{testServer}, @@ -139,7 +138,6 @@ func TestSqlServer_MultipleInstance(t *testing.T) { require.NoError(t, err) assert.Equal(t, s.isInitialized, true) assert.Equal(t, s2.isInitialized, true) - // acc includes size metrics, and excludes memory metrics assert.False(t, acc.HasMeasurement("Memory breakdown (%)")) assert.True(t, acc.HasMeasurement("Log size (bytes)")) diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go index 78917c2f2261f..43b20eb611577 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go @@ -8,13 +8,14 @@ import ( "testing" "time" + "strings" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "strings" ) -func TestWinPerformanceQueryImpl(t *testing.T) { +func TestWinPerformanceQueryImplIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -116,7 +117,7 @@ func TestWinPerformanceQueryImpl(t *testing.T) { } -func TestWinPerfcountersConfigGet1(t *testing.T) { +func TestWinPerfcountersConfigGet1Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -150,7 +151,7 @@ func TestWinPerfcountersConfigGet1(t *testing.T) { require.NoError(t, err) } -func TestWinPerfcountersConfigGet2(t *testing.T) { +func TestWinPerfcountersConfigGet2Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -196,7 +197,7 @@ func TestWinPerfcountersConfigGet2(t *testing.T) { } } -func TestWinPerfcountersConfigGet3(t *testing.T) { +func TestWinPerfcountersConfigGet3Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -245,7 +246,7 @@ func TestWinPerfcountersConfigGet3(t *testing.T) { } } -func TestWinPerfcountersConfigGet4(t *testing.T) { +func TestWinPerfcountersConfigGet4Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -294,7 +295,7 @@ func TestWinPerfcountersConfigGet4(t *testing.T) { } } -func TestWinPerfcountersConfigGet5(t *testing.T) { +func TestWinPerfcountersConfigGet5Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -342,7 +343,7 @@ func TestWinPerfcountersConfigGet5(t *testing.T) { } } -func TestWinPerfcountersConfigGet6(t *testing.T) { +func TestWinPerfcountersConfigGet6Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -376,7 +377,7 @@ func TestWinPerfcountersConfigGet6(t *testing.T) { require.NoError(t, err) } -func TestWinPerfcountersConfigGet7(t *testing.T) { +func TestWinPerfcountersConfigGet7Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -424,7 +425,7 @@ func TestWinPerfcountersConfigGet7(t *testing.T) { } } -func TestWinPerfcountersConfigError1(t *testing.T) { +func TestWinPerfcountersConfigError1Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -458,7 +459,7 @@ func TestWinPerfcountersConfigError1(t *testing.T) { require.Error(t, err) } -func TestWinPerfcountersConfigError2(t *testing.T) { +func TestWinPerfcountersConfigError2Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -494,7 +495,7 @@ func TestWinPerfcountersConfigError2(t *testing.T) { require.Error(t, err) } -func TestWinPerfcountersConfigError3(t *testing.T) { +func TestWinPerfcountersConfigError3Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -528,7 +529,7 @@ func TestWinPerfcountersConfigError3(t *testing.T) { require.Error(t, err) } -func TestWinPerfcountersCollect1(t *testing.T) { +func TestWinPerfcountersCollect1Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -572,7 +573,7 @@ func TestWinPerfcountersCollect1(t *testing.T) { } } -func TestWinPerfcountersCollect2(t *testing.T) { +func TestWinPerfcountersCollect2Integration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/inputs/win_services/win_services_integration_test.go b/plugins/inputs/win_services/win_services_integration_test.go index 028954f13a609..998aa1ed5eb2f 100644 --- a/plugins/inputs/win_services/win_services_integration_test.go +++ b/plugins/inputs/win_services/win_services_integration_test.go @@ -13,7 +13,7 @@ import ( var InvalidServices = []string{"XYZ1@", "ZYZ@", "SDF_@#"} var KnownServices = []string{"LanmanServer", "TermService"} -func TestList(t *testing.T) { +func TestListIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -33,7 +33,7 @@ func TestList(t *testing.T) { require.Equal(t, services[1], KnownServices[1]) } -func TestEmptyList(t *testing.T) { +func TestEmptyListIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -51,7 +51,7 @@ func TestEmptyList(t *testing.T) { require.Condition(t, func() bool { return len(services) > 20 }, "Too few service") } -func TestGatherErrors(t *testing.T) { +func TestGatherErrorsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index bb882a470c96d..0a0d0575adadc 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -27,10 +27,8 @@ var pki = testutil.NewPKI("../../../testutil/pki") // Make sure X509Cert implements telegraf.Input var _ telegraf.Input = &X509Cert{} -func TestGatherRemote(t *testing.T) { - if testing.Short() { - t.Skip("Skipping network-dependent test in short mode.") - } +func TestGatherRemoteIntegration(t *testing.T) { + t.Skip("Skipping network-dependent test due to race condition when test-all") tmpfile, err := ioutil.TempFile("", "example") if err != nil { @@ -333,7 +331,7 @@ func TestStrings(t *testing.T) { } } -func TestGatherCert(t *testing.T) { +func TestGatherCertIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/inputs/zookeeper/zookeeper_test.go b/plugins/inputs/zookeeper/zookeeper_test.go index 37cabbada78fc..bbc2a37cb5cb4 100644 --- a/plugins/inputs/zookeeper/zookeeper_test.go +++ b/plugins/inputs/zookeeper/zookeeper_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestZookeeperGeneratesMetrics(t *testing.T) { +func TestZookeeperGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/outputs/cratedb/cratedb_test.go b/plugins/outputs/cratedb/cratedb_test.go index 0cd93e8273810..993c9d7cbfeb2 100644 --- a/plugins/outputs/cratedb/cratedb_test.go +++ b/plugins/outputs/cratedb/cratedb_test.go @@ -2,6 +2,7 @@ package cratedb import ( "database/sql" + "fmt" "os" "strings" "testing" @@ -14,17 +15,15 @@ import ( "github.com/stretchr/testify/require" ) -func TestConnectAndWrite(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestConnectAndWriteIntegration(t *testing.T) { + t.Skip("Skipping due to trust authentication failure") if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" { t.Skip("Skipping test on CircleCI due to docker failures") } url := testURL() - table := "test" + table := "test-1" // dropSQL drops our table before each test. This simplifies changing the // schema during development :). @@ -94,10 +93,8 @@ VALUES } } -func Test_escapeValue(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func Test_escapeValueIntegration(t *testing.T) { + t.Skip("Skipping due to trust authentication failure") if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" { t.Skip("Skipping test on CircleCI due to docker failures") @@ -135,6 +132,7 @@ func Test_escapeValue(t *testing.T) { } url := testURL() + fmt.Println("url", url) db, err := sql.Open("pgx", url) require.NoError(t, err) defer db.Close() diff --git a/plugins/outputs/elasticsearch/elasticsearch_test.go b/plugins/outputs/elasticsearch/elasticsearch_test.go index e2a583402dfcc..b0caf8448c6ec 100644 --- a/plugins/outputs/elasticsearch/elasticsearch_test.go +++ b/plugins/outputs/elasticsearch/elasticsearch_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestConnectAndWrite(t *testing.T) { +func TestConnectAndWriteIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -38,7 +38,7 @@ func TestConnectAndWrite(t *testing.T) { } -func TestTemplateManagementEmptyTemplate(t *testing.T) { +func TestTemplateManagementEmptyTemplateIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -61,7 +61,7 @@ func TestTemplateManagementEmptyTemplate(t *testing.T) { } -func TestTemplateManagement(t *testing.T) { +func TestTemplateManagementIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -87,7 +87,7 @@ func TestTemplateManagement(t *testing.T) { require.NoError(t, err) } -func TestTemplateInvalidIndexPattern(t *testing.T) { +func TestTemplateInvalidIndexPatternIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/outputs/exec/exec_test.go b/plugins/outputs/exec/exec_test.go index 850ba7328a03b..5758c307b56e7 100644 --- a/plugins/outputs/exec/exec_test.go +++ b/plugins/outputs/exec/exec_test.go @@ -6,18 +6,15 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestExec(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test due to OS/executable dependencies") - } + t.Skip("Skipping test due to OS/executable dependencies and race condition when ran as part of a test-all") tests := []struct { name string diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index 4e93515febc4b..52b020813975b 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -17,7 +17,7 @@ type topicSuffixTestpair struct { expectedTopic string } -func TestConnectAndWrite(t *testing.T) { +func TestConnectAndWriteIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -43,7 +43,7 @@ func TestConnectAndWrite(t *testing.T) { k.Close() } -func TestTopicSuffixes(t *testing.T) { +func TestTopicSuffixesIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -90,7 +90,7 @@ func TestTopicSuffixes(t *testing.T) { } } -func TestValidateTopicSuffixMethod(t *testing.T) { +func TestValidateTopicSuffixMethodIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go index 260eb0c640c54..8affce1c93ddf 100644 --- a/plugins/outputs/mqtt/mqtt_test.go +++ b/plugins/outputs/mqtt/mqtt_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestConnectAndWrite(t *testing.T) { +func TestConnectAndWriteIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/outputs/nats/nats_test.go b/plugins/outputs/nats/nats_test.go index 432c9241875c4..30004f6ae543d 100644 --- a/plugins/outputs/nats/nats_test.go +++ b/plugins/outputs/nats/nats_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestConnectAndWrite(t *testing.T) { +func TestConnectAndWriteIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/outputs/nsq/nsq_test.go b/plugins/outputs/nsq/nsq_test.go index e2b0fc31d43e5..f7f55ddf34d07 100644 --- a/plugins/outputs/nsq/nsq_test.go +++ b/plugins/outputs/nsq/nsq_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestConnectAndWrite(t *testing.T) { +func TestConnectAndWriteIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } diff --git a/plugins/outputs/opentsdb/opentsdb_test.go b/plugins/outputs/opentsdb/opentsdb_test.go index 096337c5c6648..16d764ebe7eb8 100644 --- a/plugins/outputs/opentsdb/opentsdb_test.go +++ b/plugins/outputs/opentsdb/opentsdb_test.go @@ -164,41 +164,38 @@ func BenchmarkHttpSend(b *testing.B) { o.Write(metrics) } } +func TestWriteIntegration(t *testing.T) { + t.Skip("Skip as OpenTSDB not running") -// func TestWrite(t *testing.T) { -// if testing.Short() { -// t.Skip("Skipping integration test in short mode") -// } - -// o := &OpenTSDB{ -// Host: testutil.GetLocalHost(), -// Port: 4242, -// Prefix: "prefix.test.", -// } - -// // Verify that we can connect to the OpenTSDB instance -// err := o.Connect() -// require.NoError(t, err) - -// // Verify that we can successfully write data to OpenTSDB -// err = o.Write(testutil.MockMetrics()) -// require.NoError(t, err) - -// // Verify positive and negative test cases of writing data -// metrics := testutil.MockMetrics() -// metrics = append(metrics, testutil.TestMetric(float64(1.0), -// "justametric.float")) -// metrics = append(metrics, testutil.TestMetric(int64(123456789), -// "justametric.int")) -// metrics = append(metrics, testutil.TestMetric(uint64(123456789012345), -// "justametric.uint")) -// metrics = append(metrics, testutil.TestMetric("Lorem Ipsum", -// "justametric.string")) -// metrics = append(metrics, testutil.TestMetric(float64(42.0), -// "justametric.anotherfloat")) -// metrics = append(metrics, testutil.TestMetric(float64(42.0), -// "metric w/ specialchars")) - -// err = o.Write(metrics) -// require.NoError(t, err) -// } + o := &OpenTSDB{ + Host: testutil.GetLocalHost(), + Port: 4242, + Prefix: "prefix.test.", + } + + // Verify that we can connect to the OpenTSDB instance + err := o.Connect() + require.NoError(t, err) + + // Verify that we can successfully write data to OpenTSDB + err = o.Write(testutil.MockMetrics()) + require.NoError(t, err) + + // Verify positive and negative test cases of writing data + metrics := testutil.MockMetrics() + metrics = append(metrics, testutil.TestMetric(float64(1.0), + "justametric.float")) + metrics = append(metrics, testutil.TestMetric(int64(123456789), + "justametric.int")) + metrics = append(metrics, testutil.TestMetric(uint64(123456789012345), + "justametric.uint")) + metrics = append(metrics, testutil.TestMetric("Lorem Ipsum", + "justametric.string")) + metrics = append(metrics, testutil.TestMetric(float64(42.0), + "justametric.anotherfloat")) + metrics = append(metrics, testutil.TestMetric(float64(42.0), + "metric w/ specialchars")) + + err = o.Write(metrics) + require.NoError(t, err) +} diff --git a/plugins/outputs/riemann_legacy/riemann_test.go b/plugins/outputs/riemann_legacy/riemann_test.go index e57cbb43cc2c4..6450956ff1275 100644 --- a/plugins/outputs/riemann_legacy/riemann_test.go +++ b/plugins/outputs/riemann_legacy/riemann_test.go @@ -8,9 +8,7 @@ import ( ) func TestConnectAndWrite(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } + t.Skip("Skipping legacy integration test") url := testutil.GetLocalHost() + ":5555" diff --git a/plugins/processors/ifname/ifname_test.go b/plugins/processors/ifname/ifname_test.go index 25e130e3a7ac7..cea03cfd3fe62 100644 --- a/plugins/processors/ifname/ifname_test.go +++ b/plugins/processors/ifname/ifname_test.go @@ -16,9 +16,7 @@ import ( ) func TestTable(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } + t.Skip("Skipping test due to connect failures") d := IfName{} d.Init() @@ -43,10 +41,9 @@ func TestTable(t *testing.T) { require.NotEmpty(t, m) } -func TestIfName(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } +func TestIfNameIntegration(t *testing.T) { + t.Skip("Skipping test due to connect failures") + d := IfName{ SourceTag: "ifIndex", DestTag: "ifName", From c2379896312ba0582a1e7a9be79f9dc826a955a2 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 26 Jan 2021 16:02:43 -0600 Subject: [PATCH 190/761] Use go-ping for "native" execution in Ping plugin (#8679) * Use go-ping for "native" execution in Ping plugin * Check for ipv6 and deadline out of go func * ensure dns failure * Move interval and timeout calc to init Removed dns failure check, 3rd parties libary responsibility * Rename timeout to avoid conflict * Move native ping to interface Update tests * Check for zero length --- docs/LICENSE_OF_DEPENDENCIES.md | 2 +- go.mod | 2 +- go.sum | 5 +- plugins/inputs/ping/ping.go | 371 +++++++++---------------------- plugins/inputs/ping/ping_test.go | 127 ++++++++--- 5 files changed, 207 insertions(+), 300 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 642c79673b18c..14c46448c3b4a 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -51,9 +51,9 @@ following works: - github.com/eclipse/paho.mqtt.golang [Eclipse Public License - v 1.0](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE) - github.com/ericchiang/k8s [Apache License 2.0](https://github.com/ericchiang/k8s/blob/master/LICENSE) - github.com/ghodss/yaml [MIT License](https://github.com/ghodss/yaml/blob/master/LICENSE) -- github.com/glinton/ping [MIT License](https://github.com/glinton/ping/blob/master/LICENSE) - github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) - github.com/go-ole/go-ole [MIT License](https://github.com/go-ole/go-ole/blob/master/LICENSE) +- github.com/go-ping/ping [MIT License](https://github.com/go-ping/ping/blob/master/LICENSE) - github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE) - github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) - github.com/goburrow/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/goburrow/modbus/blob/master/LICENSE) diff --git a/go.mod b/go.mod index bd0ec9345ffbc..45a9a48ba618e 100644 --- a/go.mod +++ b/go.mod @@ -49,9 +49,9 @@ require ( github.com/eclipse/paho.mqtt.golang v1.2.0 github.com/ericchiang/k8s v1.2.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 github.com/go-logfmt/logfmt v0.4.0 github.com/go-ole/go-ole v1.2.1 // indirect + github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663 github.com/go-redis/redis v6.15.9+incompatible github.com/go-sql-driver/mysql v1.5.0 github.com/goburrow/modbus v0.1.0 diff --git a/go.sum b/go.sum index 79588d467c4f9..18fc73ab9df13 100644 --- a/go.sum +++ b/go.sum @@ -208,8 +208,6 @@ github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2H github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 h1:YpooqMW354GG47PXNBiaCv6yCQizyP3MXD9NUPrCEQ8= -github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -225,6 +223,8 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663 h1:jI2GiiRh+pPbey52EVmbU6kuLiXqwy4CXZ4gwUBj8Y0= +github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663/go.mod h1:35JbSyV/BYqHwwRA6Zr1uVDm1637YlNOU61wI797NPI= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= @@ -698,7 +698,6 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 87f7af8e7489f..f242a80b85400 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -1,23 +1,16 @@ package ping import ( - "context" "errors" "fmt" - "log" "math" - "net" - "os/exec" "runtime" - "sort" "strings" "sync" - "sync/atomic" "time" - "github.com/glinton/ping" + "github.com/go-ping/ping" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -26,13 +19,16 @@ import ( // for unit test purposes (see ping_test.go) type HostPinger func(binary string, timeout float64, args ...string) (string, error) -type HostResolver func(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) - -type IsCorrectNetwork func(ip net.IPAddr) bool - type Ping struct { + // wg is used to wait for ping with multiple URLs wg sync.WaitGroup + // Pre-calculated interval and timeout + calcInterval time.Duration + calcTimeout time.Duration + + Log telegraf.Logger `toml:"-"` + // Interval at which to ping (ping -i ) PingInterval float64 `toml:"ping_interval"` @@ -67,11 +63,7 @@ type Ping struct { // host ping function pingHost HostPinger - // resolve host function - resolveHost HostResolver - - // listenAddr is the address associated with the interface defined. - listenAddr string + nativePingFunc NativePingFunc // Calculate the given percentiles when using native method Percentiles []int @@ -134,10 +126,6 @@ func (*Ping) SampleConfig() string { } func (p *Ping) Gather(acc telegraf.Accumulator) error { - if p.Interface != "" && p.listenAddr == "" { - p.listenAddr = getAddr(p.Interface) - } - for _, host := range p.Urls { p.wg.Add(1) go func(host string) { @@ -157,204 +145,113 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { return nil } -func getAddr(iface string) string { - if addr := net.ParseIP(iface); addr != nil { - return addr.String() - } +type pingStats struct { + ping.Statistics + ttl int +} - ifaces, err := net.Interfaces() - if err != nil { - return "" - } +type NativePingFunc func(destination string) (*pingStats, error) - var ip net.IP - for i := range ifaces { - if ifaces[i].Name == iface { - addrs, err := ifaces[i].Addrs() - if err != nil { - return "" - } - if len(addrs) > 0 { - switch v := addrs[0].(type) { - case *net.IPNet: - ip = v.IP - case *net.IPAddr: - ip = v.IP - } - if len(ip) == 0 { - return "" - } - return ip.String() - } - } - } - - return "" -} +func (p *Ping) nativePing(destination string) (*pingStats, error) { + ps := &pingStats{} -func hostPinger(binary string, timeout float64, args ...string) (string, error) { - bin, err := exec.LookPath(binary) + pinger, err := ping.NewPinger(destination) if err != nil { - return "", err + return nil, fmt.Errorf("Failed to create new pinger: %w", err) } - c := exec.Command(bin, args...) - out, err := internal.CombinedOutputTimeout(c, - time.Second*time.Duration(timeout+5)) - return string(out), err -} -func filterIPs(addrs []net.IPAddr, filterFunc IsCorrectNetwork) []net.IPAddr { - n := 0 - for _, x := range addrs { - if filterFunc(x) { - addrs[n] = x - n++ - } + // Required for windows. Despite the method name, this should work without the need to elevate privileges and has been tested on Windows 10 + if runtime.GOOS == "windows" { + pinger.SetPrivileged(true) } - return addrs[:n] -} -func hostResolver(ctx context.Context, ipv6 bool, destination string) (*net.IPAddr, error) { - resolver := &net.Resolver{} - ips, err := resolver.LookupIPAddr(ctx, destination) + if p.IPv6 { + pinger.SetNetwork("ip6") + } - if err != nil { - return nil, err + pinger.Interval = p.calcInterval + pinger.Timeout = p.calcTimeout + + if p.Deadline > 0 { + // If deadline is set ping exits regardless of how many packets have been sent or received + timer := time.AfterFunc(time.Duration(p.Deadline)*time.Second, func() { + pinger.Stop() + }) + defer timer.Stop() } - if ipv6 { - ips = filterIPs(ips, isV6) - } else { - ips = filterIPs(ips, isV4) + // Get Time to live (TTL) of first response, matching original implementation + once := &sync.Once{} + pinger.OnRecv = func(pkt *ping.Packet) { + once.Do(func() { + ps.ttl = pkt.Ttl + }) } - if len(ips) == 0 { - return nil, errors.New("Cannot resolve ip address") + pinger.Count = p.Count + err = pinger.Run() + if err != nil { + return nil, fmt.Errorf("Failed to run pinger: %w", err) } - return &ips[0], err -} -func isV4(ip net.IPAddr) bool { - return ip.IP.To4() != nil -} + ps.Statistics = *pinger.Statistics() -func isV6(ip net.IPAddr) bool { - return !isV4(ip) + return ps, nil } func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { - ctx := context.Background() - interval := p.PingInterval - if interval < 0.2 { - interval = 0.2 - } - - timeout := p.Timeout - if timeout == 0 { - timeout = 5 - } - tick := time.NewTicker(time.Duration(interval * float64(time.Second))) - defer tick.Stop() - - if p.Deadline > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Duration(p.Deadline)*time.Second) - defer cancel() - } + tags := map[string]string{"url": destination} + fields := map[string]interface{}{} - host, err := p.resolveHost(ctx, p.IPv6, destination) + stats, err := p.nativePingFunc(destination) if err != nil { - acc.AddFields( - "ping", - map[string]interface{}{"result_code": 1}, - map[string]string{"url": destination}, - ) - acc.AddError(err) + if strings.Contains(err.Error(), "unknown") { + fields["result_code"] = 1 + } else { + fields["result_code"] = 2 + } + acc.AddFields("ping", fields, tags) return } - resps := make(chan *ping.Response) - rsps := []*ping.Response{} - - r := &sync.WaitGroup{} - r.Add(1) - go func() { - for res := range resps { - rsps = append(rsps, res) - } - r.Done() - }() - - wg := &sync.WaitGroup{} - c := ping.Client{} - - var doErr error - var packetsSent int32 - - type sentReq struct { - err error - sent bool + fields = map[string]interface{}{ + "result_code": 0, + "packets_transmitted": stats.PacketsSent, + "packets_received": stats.PacketsRecv, } - sents := make(chan sentReq) - r.Add(1) - go func() { - for sent := range sents { - if sent.err != nil { - doErr = sent.err - } - if sent.sent { - atomic.AddInt32(&packetsSent, 1) - } - } - r.Done() - }() - - for i := 0; i < p.Count; i++ { - select { - case <-ctx.Done(): - goto finish - case <-tick.C: - ctx, cancel := context.WithTimeout(ctx, time.Duration(timeout*float64(time.Second))) - defer cancel() - - wg.Add(1) - go func(seq int) { - defer wg.Done() - resp, err := c.Do(ctx, &ping.Request{ - Dst: net.ParseIP(host.String()), - Src: net.ParseIP(p.listenAddr), - Seq: seq, - }) - - sent := sentReq{err: err, sent: true} - if err != nil { - if strings.Contains(err.Error(), "not permitted") { - sent.sent = false - } - sents <- sent - return - } - - resps <- resp - sents <- sent - }(i + 1) - } + if stats.PacketsSent == 0 { + fields["result_code"] = 2 + acc.AddFields("ping", fields, tags) + return } -finish: - wg.Wait() - close(resps) - close(sents) + if stats.PacketsRecv == 0 { + fields["result_code"] = 1 + fields["percent_packet_loss"] = float64(100) + acc.AddFields("ping", fields, tags) + return + } - r.Wait() + for _, perc := range p.Percentiles { + var value = percentile(durationSlice(stats.Rtts), perc) + var field = fmt.Sprintf("percentile%v_ms", perc) + fields[field] = float64(value.Nanoseconds()) / float64(time.Millisecond) + } - if doErr != nil && strings.Contains(doErr.Error(), "not permitted") { - log.Printf("D! [inputs.ping] %s", doErr.Error()) + // Set TTL only on supported platform. See golang.org/x/net/ipv4/payload_cmsg.go + switch runtime.GOOS { + case "aix", "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": + fields["ttl"] = stats.ttl } - tags, fields := onFin(packetsSent, rsps, doErr, destination, p.Percentiles) + fields["percent_packet_loss"] = float64(stats.PacketLoss) + fields["minimum_response_ms"] = float64(stats.MinRtt) / float64(time.Millisecond) + fields["average_response_ms"] = float64(stats.AvgRtt) / float64(time.Millisecond) + fields["maximum_response_ms"] = float64(stats.MaxRtt) / float64(time.Millisecond) + fields["standard_deviation_ms"] = float64(stats.StdDevRtt) / float64(time.Millisecond) + acc.AddFields("ping", fields, tags) } @@ -366,6 +263,9 @@ func (p durationSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // R7 from Hyndman and Fan (1996), which matches Excel func percentile(values durationSlice, perc int) time.Duration { + if len(values) == 0 { + return 0 + } if perc < 0 { perc = 0 } @@ -388,91 +288,24 @@ func percentile(values durationSlice, perc int) time.Duration { } } -func onFin(packetsSent int32, resps []*ping.Response, err error, destination string, percentiles []int) (map[string]string, map[string]interface{}) { - packetsRcvd := len(resps) - - tags := map[string]string{"url": destination} - fields := map[string]interface{}{ - "result_code": 0, - "packets_transmitted": packetsSent, - "packets_received": packetsRcvd, - } - - if packetsSent == 0 { - if err != nil { - fields["result_code"] = 2 - } - return tags, fields - } - - if packetsRcvd == 0 { - if err != nil { - fields["result_code"] = 1 - } - fields["percent_packet_loss"] = float64(100) - return tags, fields +// Init ensures the plugin is configured correctly. +func (p *Ping) Init() error { + if p.Count < 1 { + return errors.New("bad number of packets to transmit") } - fields["percent_packet_loss"] = float64(int(packetsSent)-packetsRcvd) / float64(packetsSent) * 100 - ttl := resps[0].TTL - - var min, max, avg, total time.Duration - - if len(percentiles) > 0 { - var rtt []time.Duration - for _, resp := range resps { - rtt = append(rtt, resp.RTT) - total += resp.RTT - } - sort.Sort(durationSlice(rtt)) - min = rtt[0] - max = rtt[len(rtt)-1] - - for _, perc := range percentiles { - var value = percentile(durationSlice(rtt), perc) - var field = fmt.Sprintf("percentile%v_ms", perc) - fields[field] = float64(value.Nanoseconds()) / float64(time.Millisecond) - } + // The interval cannot be below 0.2 seconds, matching ping implementation: https://linux.die.net/man/8/ping + if p.PingInterval < 0.2 { + p.calcInterval = time.Duration(.2 * float64(time.Second)) } else { - min = resps[0].RTT - max = resps[0].RTT - - for _, res := range resps { - if res.RTT < min { - min = res.RTT - } - if res.RTT > max { - max = res.RTT - } - total += res.RTT - } - } - - avg = total / time.Duration(packetsRcvd) - var sumsquares time.Duration - for _, res := range resps { - sumsquares += (res.RTT - avg) * (res.RTT - avg) - } - stdDev := time.Duration(math.Sqrt(float64(sumsquares / time.Duration(packetsRcvd)))) - - // Set TTL only on supported platform. See golang.org/x/net/ipv4/payload_cmsg.go - switch runtime.GOOS { - case "aix", "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": - fields["ttl"] = ttl + p.calcInterval = time.Duration(p.PingInterval * float64(time.Second)) } - fields["minimum_response_ms"] = float64(min.Nanoseconds()) / float64(time.Millisecond) - fields["average_response_ms"] = float64(avg.Nanoseconds()) / float64(time.Millisecond) - fields["maximum_response_ms"] = float64(max.Nanoseconds()) / float64(time.Millisecond) - fields["standard_deviation_ms"] = float64(stdDev.Nanoseconds()) / float64(time.Millisecond) - - return tags, fields -} - -// Init ensures the plugin is configured correctly. -func (p *Ping) Init() error { - if p.Count < 1 { - return errors.New("bad number of packets to transmit") + // If no timeout is given default to 5 seconds, matching original implementation + if p.Timeout == 0 { + p.calcTimeout = time.Duration(5) * time.Second + } else { + p.calcTimeout = time.Duration(p.Timeout) * time.Second } return nil @@ -480,9 +313,7 @@ func (p *Ping) Init() error { func init() { inputs.Add("ping", func() telegraf.Input { - return &Ping{ - pingHost: hostPinger, - resolveHost: hostResolver, + p := &Ping{ PingInterval: 1.0, Count: 1, Timeout: 1.0, @@ -492,5 +323,7 @@ func init() { Arguments: []string{}, Percentiles: []int{}, } + p.nativePingFunc = p.nativePing + return p }) } diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 7aadba223e224..0afa53706ab5d 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -5,11 +5,14 @@ package ping import ( "context" "errors" + "fmt" "net" "reflect" "sort" "testing" + "time" + "github.com/go-ping/ping" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -403,43 +406,115 @@ func mockHostResolver(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, // Test that Gather function works using native ping func TestPingGatherNative(t *testing.T) { - t.Skip("Skipping test due to permission requirements.") + type test struct { + P *Ping + } - var acc testutil.Accumulator - p := Ping{ + fakePingFunc := func(destination string) (*pingStats, error) { + s := &pingStats{ + Statistics: ping.Statistics{ + PacketsSent: 5, + PacketsRecv: 5, + Rtts: []time.Duration{ + 1 * time.Millisecond, + 2 * time.Millisecond, + 3 * time.Millisecond, + 4 * time.Millisecond, + 5 * time.Millisecond, + }, + }, + ttl: 1, + } + + return s, nil + } + + tests := []test{ + { + P: &Ping{ + Urls: []string{"localhost", "127.0.0.2"}, + Method: "native", + Count: 5, + Percentiles: []int{50, 95, 99}, + nativePingFunc: fakePingFunc, + }, + }, + { + P: &Ping{ + Urls: []string{"localhost", "127.0.0.2"}, + Method: "native", + Count: 5, + PingInterval: 1, + Percentiles: []int{50, 95, 99}, + nativePingFunc: fakePingFunc, + }, + }, + } + + for _, tc := range tests { + var acc testutil.Accumulator + err := tc.P.Init() + require.NoError(t, err) + require.NoError(t, acc.GatherError(tc.P.Gather)) + assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) + assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) + assert.True(t, acc.HasField("ping", "percentile50_ms")) + assert.True(t, acc.HasField("ping", "percentile95_ms")) + assert.True(t, acc.HasField("ping", "percentile99_ms")) + assert.True(t, acc.HasField("ping", "percent_packet_loss")) + assert.True(t, acc.HasField("ping", "minimum_response_ms")) + assert.True(t, acc.HasField("ping", "average_response_ms")) + assert.True(t, acc.HasField("ping", "maximum_response_ms")) + assert.True(t, acc.HasField("ping", "standard_deviation_ms")) + } + +} + +func TestNoPacketsSent(t *testing.T) { + p := &Ping{ Urls: []string{"localhost", "127.0.0.2"}, Method: "native", Count: 5, - resolveHost: mockHostResolver, Percentiles: []int{50, 95, 99}, + nativePingFunc: func(destination string) (*pingStats, error) { + s := &pingStats{ + Statistics: ping.Statistics{ + PacketsSent: 0, + PacketsRecv: 0, + }, + } + + return s, nil + }, } - assert.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) - assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) - assert.True(t, acc.HasField("ping", "percentile50_ms")) - assert.True(t, acc.HasField("ping", "percentile95_ms")) - assert.True(t, acc.HasField("ping", "percentile99_ms")) -} - -func mockHostResolverError(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) { - return nil, errors.New("myMock error") + var testAcc testutil.Accumulator + err := p.Init() + require.NoError(t, err) + p.pingToURLNative("localhost", &testAcc) + require.Zero(t, testAcc.Errors) + require.True(t, testAcc.HasField("ping", "result_code")) + require.Equal(t, 2, testAcc.Metrics[0].Fields["result_code"]) } // Test failed DNS resolutions func TestDNSLookupError(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test due to permission requirements.") - } - - var acc testutil.Accumulator - p := Ping{ - Urls: []string{"localhost"}, - Method: "native", - IPv6: false, - resolveHost: mockHostResolverError, + p := &Ping{ + Count: 1, + Log: testutil.Logger{}, + Urls: []string{"localhost"}, + Method: "native", + IPv6: false, + nativePingFunc: func(destination string) (*pingStats, error) { + return nil, fmt.Errorf("unknown") + }, } - acc.GatherError(p.Gather) - assert.True(t, len(acc.Errors) > 0) + var testAcc testutil.Accumulator + err := p.Init() + require.NoError(t, err) + p.pingToURLNative("localhost", &testAcc) + require.Zero(t, testAcc.Errors) + require.True(t, testAcc.HasField("ping", "result_code")) + require.Equal(t, 1, testAcc.Metrics[0].Fields["result_code"]) } From fa16231770d6b57d7411a916b3ab22d244b990b8 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 27 Jan 2021 19:58:40 +0100 Subject: [PATCH 191/761] Update grok-library to v1.0.1 with dots and dash-patterns fixed. (#8673) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 45a9a48ba618e..912eb3f414c79 100644 --- a/go.mod +++ b/go.mod @@ -127,7 +127,7 @@ require ( github.com/tidwall/gjson v1.6.0 github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect - github.com/vjeantet/grok v1.0.0 + github.com/vjeantet/grok v1.0.1 github.com/vmware/govmomi v0.19.0 github.com/wavefronthq/wavefront-sdk-go v0.9.2 github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf diff --git a/go.sum b/go.sum index 18fc73ab9df13..6ccd8b5bacbbd 100644 --- a/go.sum +++ b/go.sum @@ -600,8 +600,8 @@ github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Su github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vjeantet/grok v1.0.0 h1:uxMqatJP6MOFXsj6C1tZBnqqAThQEeqnizUZ48gSJQQ= -github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= +github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= +github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY= github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/wavefronthq/wavefront-sdk-go v0.9.2 h1:/LvWgZYNjHFUg+ZUX+qv+7e+M8sEMi0lM15zPp681Gk= From 8707a2d847890013e05e4fb1f1718db9315f2a71 Mon Sep 17 00:00:00 2001 From: omgold Date: Wed, 27 Jan 2021 21:36:29 +0100 Subject: [PATCH 192/761] Fix crash in lustre2 input plugin, when field name and value (#7967) are not separated by whitespace, which happens when numbers grow large (#7966) Co-authored-by: Oliver Mangold --- plugins/inputs/lustre2/lustre2.go | 11 +++++++++-- plugins/inputs/lustre2/lustre2_test.go | 4 ++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 44e046c7337f0..06c70de78d51d 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -15,6 +15,7 @@ package lustre2 import ( "io/ioutil" "path/filepath" + "regexp" "strconv" "strings" @@ -367,6 +368,8 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a return err } + fieldSplitter := regexp.MustCompile(`[ :]+`) + for _, file := range files { /* Turn /proc/fs/lustre/obdfilter//stats and similar * into just the object store target name @@ -397,7 +400,11 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a if len(line) < 1 { continue } - parts := strings.Fields(line) + + parts := fieldSplitter.Split(line, -1) + if len(parts[0]) == 0 { + parts = parts[1:] + } var fields map[string]interface{} fields, ok := l.allFields[tags{name, jobid}] @@ -408,7 +415,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a for _, wanted := range wantedFields { var data uint64 - if strings.TrimSuffix(parts[0], ":") == wanted.inProc { + if parts[0] == wanted.inProc { wantedField := wanted.field // if not set, assume field[1]. Shouldn't be field[0], as // that's a string diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 1fb55d30491ce..7741c83ac530a 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -47,7 +47,7 @@ const obdfilterJobStatsContents = `job_stats: - job_id: cluster-testjob1 snapshot_time: 1461772761 read_bytes: { samples: 1, unit: bytes, min: 4096, max: 4096, sum: 4096 } - write_bytes: { samples: 25, unit: bytes, min: 1048576, max: 1048576, sum: 26214400 } + write_bytes: { samples: 25, unit: bytes, min: 1048576, max:16777216, sum: 26214400 } getattr: { samples: 0, unit: reqs } setattr: { samples: 0, unit: reqs } punch: { samples: 1, unit: reqs } @@ -259,7 +259,7 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { "jobstats_read_bytes": uint64(4096), "jobstats_write_calls": uint64(25), "jobstats_write_min_size": uint64(1048576), - "jobstats_write_max_size": uint64(1048576), + "jobstats_write_max_size": uint64(16777216), "jobstats_write_bytes": uint64(26214400), "jobstats_ost_getattr": uint64(0), "jobstats_ost_setattr": uint64(0), From 5c4c0e1494f8f10bc7f783bd7f56ab2f1228831a Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Thu, 28 Jan 2021 04:39:28 +0800 Subject: [PATCH 193/761] improve mntr regex to match user specific keys. (#7533) * improve mntr regex to match user specific keys. * Update plugins/inputs/zookeeper/zookeeper.go Co-authored-by: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Co-authored-by: guoxu Co-authored-by: Sven Rebhan <36194019+srebhan@users.noreply.github.com> --- plugins/inputs/zookeeper/zookeeper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index 5259e25b7163e..dd8ff7ea4e3b9 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -17,7 +17,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -var zookeeperFormatRE = regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`) +var zookeeperFormatRE = regexp.MustCompile(`^zk_(\w[\w\.\-]*)\s+([\w\.\-]+)`) // Zookeeper is a zookeeper plugin type Zookeeper struct { From c0524dbe2ec342de392c6d94159a3eed78bb89fb Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 27 Jan 2021 15:58:31 -0500 Subject: [PATCH 194/761] Add geoip external project reference --- EXTERNAL_PLUGINS.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index a529122dcc94c..0a165a412ec07 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -16,3 +16,7 @@ Pull requests welcome. ## Outputs - [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. + +## Processors + - [geoip](https://github.com/a-bali/telegraf-geoip) - Add GeoIP information to IP addresses. + From 52aaadfd79c29a15c8f2d02444e041fd98492bd1 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 27 Jan 2021 16:01:06 -0500 Subject: [PATCH 195/761] Fix issue with elasticsearch output being really noisy about some errors (#8748) --- plugins/outputs/elasticsearch/elasticsearch.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go index b17a945b3523c..352d0357933b5 100644 --- a/plugins/outputs/elasticsearch/elasticsearch.go +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -309,6 +309,7 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { if res.Errors { for id, err := range res.Failed() { log.Printf("E! Elasticsearch indexing failure, id: %d, error: %s, caused by: %s, %s", id, err.Error.Reason, err.Error.CausedBy["reason"], err.Error.CausedBy["type"]) + break } return fmt.Errorf("W! Elasticsearch failed to index %d metrics", len(res.Failed())) } From cbe99ef59678bcdaa18ec94e524b1a526465a4cf Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 27 Jan 2021 16:02:37 -0500 Subject: [PATCH 196/761] add line about measurement being specified in docs (#8734) --- docs/TEMPLATE_PATTERN.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/TEMPLATE_PATTERN.md b/docs/TEMPLATE_PATTERN.md index 4244369d7dcab..42a5abea56f30 100644 --- a/docs/TEMPLATE_PATTERN.md +++ b/docs/TEMPLATE_PATTERN.md @@ -22,6 +22,7 @@ correspond to the field name. Any part of the template that is not a keyword is treated as a tag key. This can also be specified multiple times. +**NOTE:** `measurement` must be specified in your template. **NOTE:** `field*` cannot be used in conjunction with `measurement*`. ### Examples From 9c7cf99fa71c12c8df39dd244d7f104fa7e6ecad Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 27 Jan 2021 16:07:42 -0500 Subject: [PATCH 197/761] [outputs.influxdb_v2] add exponential backoff, and respect client error responses (#8662) * [outputs.influxdb_v2] add exponential backoff, and respect client error responses * add test * Update to 60 seconds * fix test --- plugins/outputs/influxdb_v2/http.go | 94 +++++++++++++------ .../outputs/influxdb_v2/http_internal_test.go | 27 ++++++ 2 files changed, 94 insertions(+), 27 deletions(-) diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index 2a32c5f4c60ea..0d94452389269 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -9,6 +9,7 @@ import ( "io" "io/ioutil" "log" + "math" "net" "net/http" "net/url" @@ -36,7 +37,7 @@ func (e APIError) Error() string { const ( defaultRequestTimeout = time.Second * 5 - defaultMaxWait = 10 // seconds + defaultMaxWait = 60 // seconds defaultDatabase = "telegraf" ) @@ -70,6 +71,7 @@ type httpClient struct { serializer *influx.Serializer url *url.URL retryTime time.Time + retryCount int } func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { @@ -233,7 +235,18 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te } defer resp.Body.Close() - if resp.StatusCode == http.StatusNoContent { + switch resp.StatusCode { + case + // this is the expected response: + http.StatusNoContent, + // but if we get these we should still accept it as delivered: + http.StatusOK, + http.StatusCreated, + http.StatusAccepted, + http.StatusPartialContent, + http.StatusMultiStatus, + http.StatusAlreadyReported: + c.retryCount = 0 return nil } @@ -245,33 +258,37 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te } switch resp.StatusCode { - case http.StatusBadRequest, http.StatusRequestEntityTooLarge: - log.Printf("E! [outputs.influxdb_v2] Failed to write metric: %s\n", desc) + case + // request was malformed: + http.StatusBadRequest, + // request was too large: + http.StatusRequestEntityTooLarge, + // request was received but server refused to process it due to a semantic problem with the request. + // for example, submitting metrics outside the retention period. + // Clients should *not* repeat the request and the metrics should be dropped. + http.StatusUnprocessableEntity, + http.StatusNotAcceptable: + log.Printf("E! [outputs.influxdb_v2] Failed to write metric (will be dropped: %s): %s\n", resp.Status, desc) return nil case http.StatusUnauthorized, http.StatusForbidden: - return fmt.Errorf("failed to write metric: %s", desc) - case http.StatusTooManyRequests: - retryAfter := resp.Header.Get("Retry-After") - retry, err := strconv.Atoi(retryAfter) - if err != nil { - return errors.New("rate limit exceeded") - } - if retry > defaultMaxWait { - retry = defaultMaxWait - } - c.retryTime = time.Now().Add(time.Duration(retry) * time.Second) - return fmt.Errorf("waiting %ds for server before sending metric again", retry) - case http.StatusServiceUnavailable: - retryAfter := resp.Header.Get("Retry-After") - retry, err := strconv.Atoi(retryAfter) - if err != nil { - return errors.New("server responded: service unavailable") - } - if retry > defaultMaxWait { - retry = defaultMaxWait - } - c.retryTime = time.Now().Add(time.Duration(retry) * time.Second) - return fmt.Errorf("waiting %ds for server before sending metric again", retry) + return fmt.Errorf("failed to write metric (%s): %s", resp.Status, desc) + case http.StatusTooManyRequests, + http.StatusServiceUnavailable, + http.StatusBadGateway, + http.StatusGatewayTimeout: + // ^ these handle the cases where the server is likely overloaded, and may not be able to say so. + c.retryCount++ + retryDuration := c.getRetryDuration(resp.Header) + c.retryTime = time.Now().Add(retryDuration) + log.Printf("W! [outputs.influxdb_v2] Failed to write; will retry in %s. (%s)\n", retryDuration, resp.Status) + return fmt.Errorf("waiting %s for server before sending metric again", retryDuration) + } + + // if it's any other 4xx code, the client should not retry as it's the client's mistake. + // retrying will not make the request magically work. + if len(resp.Status) > 0 && resp.Status[0] == '4' { + log.Printf("E! [outputs.influxdb_v2] Failed to write metric (will be dropped: %s): %s\n", resp.Status, desc) + return nil } // This is only until platform spec is fully implemented. As of the @@ -287,6 +304,29 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te } } +// retryDuration takes the longer of the Retry-After header and our own back-off calculation +func (c *httpClient) getRetryDuration(headers http.Header) time.Duration { + // basic exponential backoff (x^2)/40 (denominator to widen the slope) + // at 40 denominator, it'll take 35 retries to hit the max defaultMaxWait of 30s + backoff := math.Pow(float64(c.retryCount), 2) / 40 + + // get any value from the header, if available + retryAfterHeader := float64(0) + retryAfterHeaderString := headers.Get("Retry-After") + if len(retryAfterHeaderString) > 0 { + var err error + retryAfterHeader, err = strconv.ParseFloat(retryAfterHeaderString, 64) + if err != nil { + // there was a value but we couldn't parse it? guess minimum 10 sec + retryAfterHeader = 10 + } + } + // take the highest value from both, but not over the max wait. + retry := math.Max(backoff, retryAfterHeader) + retry = math.Min(retry, defaultMaxWait) + return time.Duration(retry) * time.Second +} + func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) { var err error diff --git a/plugins/outputs/influxdb_v2/http_internal_test.go b/plugins/outputs/influxdb_v2/http_internal_test.go index e9685da129aa7..2ff4990fa8a3b 100644 --- a/plugins/outputs/influxdb_v2/http_internal_test.go +++ b/plugins/outputs/influxdb_v2/http_internal_test.go @@ -1,8 +1,11 @@ package influxdb_v2 import ( + "fmt" + "net/http" "net/url" "testing" + "time" "github.com/stretchr/testify/require" ) @@ -45,3 +48,27 @@ func TestMakeWriteURL(t *testing.T) { } } } + +func TestExponentialBackoffCalculation(t *testing.T) { + c := &httpClient{} + tests := []struct { + retryCount int + expected time.Duration + }{ + {retryCount: 0, expected: 0}, + {retryCount: 1, expected: 0}, + {retryCount: 5, expected: 0}, + {retryCount: 10, expected: 2 * time.Second}, + {retryCount: 30, expected: 22 * time.Second}, + {retryCount: 40, expected: 40 * time.Second}, + {retryCount: 50, expected: 60 * time.Second}, + {retryCount: 100, expected: 60 * time.Second}, + {retryCount: 1000, expected: 60 * time.Second}, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_retries", test.retryCount), func(t *testing.T) { + c.retryCount = test.retryCount + require.EqualValues(t, test.expected, c.getRetryDuration(http.Header{})) + }) + } +} From d415d9f0fb293e59846a13e6736c88cc538c8060 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 27 Jan 2021 16:59:18 -0500 Subject: [PATCH 198/761] Update changelog (cherry picked from commit 057f626cd85bb9813706ca029ab7a5ef2c6ab2e0) --- CHANGELOG.md | 39 +++++++++++++++++++++++++++++++++++++++ etc/telegraf.conf | 47 ++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 79 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e33f1c6cc430..226888781d448 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,42 @@ +## v1.17.1 [2021-01-27] + +#### Release Notes + + Included a few more changes that add configuration options to plugins as it's been while since the last release + + - [#8335](https://github.com/influxdata/telegraf/pull/8335) `inputs.ipmi_sensor` Add setting to enable caching in ipmitool + - [#8616](https://github.com/influxdata/telegraf/pull/8616) Add Event Log support for Windows + - [#8602](https://github.com/influxdata/telegraf/pull/8602) `inputs.postgresql_extensible` Add timestamp column support to postgresql_extensible + - [#8627](https://github.com/influxdata/telegraf/pull/8627) `parsers.csv` Added ability to define skip values in csv parser + - [#8646](https://github.com/influxdata/telegraf/pull/8646) link to Open Hardware Monitor + - [#8055](https://github.com/influxdata/telegraf/pull/8055) `outputs.http` outputs/http: add option to control idle connection timeout + - [#7897](https://github.com/influxdata/telegraf/pull/7897) `common.tls` common/tls: Allow specifying SNI hostnames + - [#8541](https://github.com/influxdata/telegraf/pull/8541) `inputs.snmp` Extended the internal snmp wrapper to support AES192, AES192C, AES256, and AES256C + - [#6165](https://github.com/influxdata/telegraf/pull/6165) `inputs.procstat` Provide method to include core count when reporting cpu_usage in procstat input + - [#8287](https://github.com/influxdata/telegraf/pull/8287) `inputs.jenkins` Add support for an inclusive job list in Jenkins plugin + - [#8524](https://github.com/influxdata/telegraf/pull/8524) `inputs.ipmi_sensor` Add hex_key parameter for IPMI input plugin connection + +#### Bugfixes + + - [#8662](https://github.com/influxdata/telegraf/pull/8662) `outputs.influxdb_v2` [outputs.influxdb_v2] add exponential backoff, and respect client error responses + - [#8748](https://github.com/influxdata/telegraf/pull/8748) `outputs.elasticsearch` Fix issue with elasticsearch output being really noisy about some errors + - [#7533](https://github.com/influxdata/telegraf/pull/7533) `inputs.zookeeper` improve mntr regex to match user specific keys. + - [#7967](https://github.com/influxdata/telegraf/pull/7967) `inputs.lustre2` Fix crash in lustre2 input plugin, when field name and value + - [#8673](https://github.com/influxdata/telegraf/pull/8673) Update grok-library to v1.0.1 with dots and dash-patterns fixed. + - [#8679](https://github.com/influxdata/telegraf/pull/8679) `inputs.ping` Use go-ping for "native" execution in Ping plugin + - [#8741](https://github.com/influxdata/telegraf/pull/8741) `inputs.x509_cert` fix x509 cert timeout issue + - [#8714](https://github.com/influxdata/telegraf/pull/8714) Bump github.com/nsqio/go-nsq from 1.0.7 to 1.0.8 + - [#8715](https://github.com/influxdata/telegraf/pull/8715) Bump github.com/Shopify/sarama from 1.27.1 to 1.27.2 + - [#8712](https://github.com/influxdata/telegraf/pull/8712) Bump github.com/newrelic/newrelic-telemetry-sdk-go from 0.2.0 to 0.5.1 + - [#8659](https://github.com/influxdata/telegraf/pull/8659) `inputs.gnmi` GNMI plugin should not take off the first character of field keys when no 'alias path' exists. + - [#8609](https://github.com/influxdata/telegraf/pull/8609) `inputs.webhooks` Use the 'measurement' json field from the particle webhook as the measurment name, or if it's blank, use the 'name' field of the event's json. + - [#8658](https://github.com/influxdata/telegraf/pull/8658) `inputs.procstat` Procstat input plugin should use the same timestamp in all metrics in the same Gather() cycle. + - [#8391](https://github.com/influxdata/telegraf/pull/8391) `aggregators.merge` Optimize SeriesGrouper & aggregators.merge + - [#8545](https://github.com/influxdata/telegraf/pull/8545) `inputs.prometheus` Using mime-type in prometheus parser to handle protocol-buffer responses + - [#8588](https://github.com/influxdata/telegraf/pull/8588) `inputs.snmp` Input SNMP plugin - upgrade gosnmp library to version 1.29.0 + - [#8502](https://github.com/influxdata/telegraf/pull/8502) `inputs.http_listener_v2` Fix Stop() bug when plugin fails to start + + ## v1.17.0 [2020-12-18] #### Release Notes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index dc74540e4c6e1..1fe44afa3cdac 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -744,6 +744,11 @@ # # [outputs.http.headers] # # # Should be set manually to "application/json" for json data_format # # Content-Type = "text/plain; charset=utf-8" +# +# ## Idle (keep-alive) connection timeout. +# ## Maximum amount of time before idle connection is closed. +# ## Zero means no limit. +# # idle_conn_timeout = 0 # # Configuration for sending metrics to InfluxDB @@ -1787,10 +1792,10 @@ # # Map enum values according to given table. # [[processors.enum]] # [[processors.enum.mapping]] -# ## Name of the field to map +# ## Name of the field to map. Globs accepted. # field = "status" # -# ## Name of the tag to map +# ## Name of the tag to map. Globs accepted. # # tag = "status" # # ## Destination tag or field to be used for the mapped value. By default the @@ -2326,9 +2331,9 @@ percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## If true, collect raw CPU time metrics. + ## If true, collect raw CPU time metrics collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. + ## If true, compute and report the sum of all non-idle CPU states report_active = false @@ -3630,6 +3635,18 @@ # # ## Schema Version: (Optional, defaults to version 1) # metric_version = 2 +# +# ## Optionally provide the hex key for the IMPI connection. +# # hex_key = "" +# +# ## If ipmitool should use a cache +# ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) +# ## the cache file may not work well for you if some sensors come up late +# # use_cache = false +# +# ## Path to the ipmitools cache file (defaults to OS temp dir) +# ## The provided path must exist and must be writable +# # cache_path = "" # # Gather packets and bytes counters from Linux ipsets @@ -3701,11 +3718,14 @@ # ## empty will use default value 10 # # max_subjob_per_layer = 10 # -# ## Jobs to exclude from gathering -# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] +# ## Jobs to include or exclude from gathering +# ## When using both lists, job_exclude has priority. +# ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] +# # job_include = [ "*" ] +# # job_exclude = [ ] # # ## Nodes to exclude from gathering -# # node_exclude = [ "node1", "node2" ] +# # node_exclude = [ ] # # ## Worker pool for jenkins plugin only # ## Empty this field will use default value 5 @@ -4936,6 +4956,9 @@ # ## When true add the full cmdline as a tag. # # cmdline_tag = false # +# ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. +# # mode = "irix" +# # ## Add the PID as a tag instead of as a field. When collecting multiple # ## processes with otherwise matching tags this setting should be enabled to # ## ensure each process has a unique identity. @@ -7038,6 +7061,15 @@ # ## The script option can be used to specify the .sql file path. # ## If script and sqlquery options specified at same time, sqlquery will be used # ## +# ## the tagvalue field is used to define custom tags (separated by comas). +# ## the query is expected to return columns which match the names of the +# ## defined tags. The values in these columns must be of a string-type, +# ## a number-type or a blob-type. +# ## +# ## The timestamp field is used to override the data points timestamp value. By +# ## default, all rows inserted with current time. By setting a timestamp column, +# ## the row will be inserted with that column's value. +# ## # ## Structure : # ## [[inputs.postgresql_extensible.query]] # ## sqlquery string @@ -7045,6 +7077,7 @@ # ## withdbname boolean # ## tagvalue string (comma separated) # ## measurement string +# ## timestamp string # [[inputs.postgresql_extensible.query]] # sqlquery="SELECT * FROM pg_stat_database" # version=901 From 03fe914c5941294a0f34e97fa5b877284d349335 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 28 Jan 2021 09:54:29 -0600 Subject: [PATCH 199/761] Resolve regression, re-add missing function (#8764) --- plugins/inputs/ping/ping.go | 14 ++++++++++++++ plugins/inputs/ping/ping_test.go | 16 ++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index f242a80b85400..7550559dd469e 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "math" + "os/exec" "runtime" "strings" "sync" @@ -11,6 +12,7 @@ import ( "github.com/go-ping/ping" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -311,9 +313,21 @@ func (p *Ping) Init() error { return nil } +func hostPinger(binary string, timeout float64, args ...string) (string, error) { + bin, err := exec.LookPath(binary) + if err != nil { + return "", err + } + c := exec.Command(bin, args...) + out, err := internal.CombinedOutputTimeout(c, + time.Second*time.Duration(timeout+5)) + return string(out), err +} + func init() { inputs.Add("ping", func() telegraf.Input { p := &Ping{ + pingHost: hostPinger, PingInterval: 1.0, Count: 1, Timeout: 1.0, diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 0afa53706ab5d..9f88cc17da844 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/go-ping/ping" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -261,6 +262,21 @@ func TestPingGather(t *testing.T) { acc.AssertContainsTaggedFields(t, "ping", fields, tags) } +func TestPingGatherIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode, retrieves systems ping utility") + } + + var acc testutil.Accumulator + p, ok := inputs.Inputs["ping"]().(*Ping) + require.True(t, ok) + p.Urls = []string{"localhost", "influxdata.com"} + err := acc.GatherError(p.Gather) + require.NoError(t, err) + require.Equal(t, 0, acc.Metrics[0].Fields["result_code"]) + require.Equal(t, 0, acc.Metrics[1].Fields["result_code"]) +} + var lossyPingOutput = ` PING www.google.com (216.58.218.164) 56(84) bytes of data. 64 bytes from host.net (216.58.218.164): icmp_seq=1 ttl=63 time=35.2 ms From 244178e5ca7600d9b7c3096774a74e4020ea75c0 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 28 Jan 2021 11:30:00 -0600 Subject: [PATCH 200/761] Set interface for native (#8770) Support both name and IP --- plugins/inputs/ping/ping.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 7550559dd469e..1bec73f4ea585 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "math" + "net" "os/exec" "runtime" "strings" @@ -29,6 +30,8 @@ type Ping struct { calcInterval time.Duration calcTimeout time.Duration + sourceAddress string + Log telegraf.Logger `toml:"-"` // Interval at which to ping (ping -i ) @@ -171,6 +174,7 @@ func (p *Ping) nativePing(destination string) (*pingStats, error) { pinger.SetNetwork("ip6") } + pinger.Source = p.sourceAddress pinger.Interval = p.calcInterval pinger.Timeout = p.calcTimeout @@ -310,6 +314,23 @@ func (p *Ping) Init() error { p.calcTimeout = time.Duration(p.Timeout) * time.Second } + // Support either an IP address or interface name + if p.Interface != "" { + if addr := net.ParseIP(p.Interface); addr != nil { + p.sourceAddress = p.Interface + } else { + i, err := net.InterfaceByName(p.Interface) + if err != nil { + return fmt.Errorf("Failed to get interface: %w", err) + } + addrs, err := i.Addrs() + if err != nil { + return fmt.Errorf("Failed to get the address of interface: %w", err) + } + p.sourceAddress = addrs[0].(*net.IPNet).IP.String() + } + } + return nil } From 13520ba6e5cacae82f72b627fb040b8b37c5a7d6 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 28 Jan 2021 16:59:56 -0500 Subject: [PATCH 201/761] Update changelog (cherry picked from commit ddf4147dd5b0ca168168658d079a316e9469f2c9) --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 226888781d448..33fb315909385 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +## v1.17.2 [2021-01-28] + +#### Bugfixes + + - [#8770](https://github.com/influxdata/telegraf/pull/8770) `inputs.ping` Set interface for native + - [#8764](https://github.com/influxdata/telegraf/pull/8764) `inputs.ping` Resolve regression, re-add missing function + + ## v1.17.1 [2021-01-27] #### Release Notes From c43de16bcec5259aef2273b5a13341723b583c40 Mon Sep 17 00:00:00 2001 From: Mike Summers Date: Mon, 1 Feb 2021 11:54:42 -0600 Subject: [PATCH 202/761] Add HTTP proxy setting to New Relic output plugin (#8749) --- plugins/outputs/newrelic/README.md | 4 +++ plugins/outputs/newrelic/newrelic.go | 44 +++++++++++++++++++---- plugins/outputs/newrelic/newrelic_test.go | 8 +++++ 3 files changed, 49 insertions(+), 7 deletions(-) diff --git a/plugins/outputs/newrelic/README.md b/plugins/outputs/newrelic/README.md index fbafd06adb8d4..462c0c3152f55 100644 --- a/plugins/outputs/newrelic/README.md +++ b/plugins/outputs/newrelic/README.md @@ -17,6 +17,10 @@ Telegraf minimum version: Telegraf 1.15.0 ## Timeout for writes to the New Relic API. # timeout = "15s" + + ## HTTP Proxy override. If unset use values from the standard + ## proxy environment variables to determine proxy, if any. + # http_proxy = "http://corporate.proxy:3128" ``` [Metrics API]: https://docs.newrelic.com/docs/data-ingest-apis/get-data-new-relic/metric-api/introduction-metric-api diff --git a/plugins/outputs/newrelic/newrelic.go b/plugins/outputs/newrelic/newrelic.go index da000c222c823..883e8911f60d5 100644 --- a/plugins/outputs/newrelic/newrelic.go +++ b/plugins/outputs/newrelic/newrelic.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "net/http" + "net/url" "time" "github.com/influxdata/telegraf" @@ -19,12 +20,13 @@ type NewRelic struct { InsightsKey string `toml:"insights_key"` MetricPrefix string `toml:"metric_prefix"` Timeout internal.Duration `toml:"timeout"` + HttpProxy string `toml:"http_proxy"` harvestor *telemetry.Harvester dc *cumulative.DeltaCalculator savedErrors map[int]interface{} errorCount int - Client http.Client `toml:"-"` + client http.Client `toml:"-"` } // Description returns a one-sentence description on the Output @@ -43,6 +45,10 @@ func (nr *NewRelic) SampleConfig() string { ## Timeout for writes to the New Relic API. # timeout = "15s" + + ## HTTP Proxy override. If unset use values from the standard + ## proxy environment variables to determine proxy, if any. + # http_proxy = "http://corporate.proxy:3128" ` } @@ -51,14 +57,18 @@ func (nr *NewRelic) Connect() error { if nr.InsightsKey == "" { return fmt.Errorf("InsightKey is a required for newrelic") } - var err error + err := nr.initClient() + if err != nil { + return err + } + nr.harvestor, err = telemetry.NewHarvester(telemetry.ConfigAPIKey(nr.InsightsKey), telemetry.ConfigHarvestPeriod(0), func(cfg *telemetry.Config) { cfg.Product = "NewRelic-Telegraf-Plugin" cfg.ProductVersion = "1.0" cfg.HarvestTimeout = nr.Timeout.Duration - cfg.Client = &nr.Client + cfg.Client = &nr.client cfg.ErrorLogger = func(e map[string]interface{}) { var errorString string for k, v := range e { @@ -79,7 +89,7 @@ func (nr *NewRelic) Connect() error { // Close any connections to the Output func (nr *NewRelic) Close() error { nr.errorCount = 0 - nr.Client.CloseIdleConnections() + nr.client.CloseIdleConnections() return nil } @@ -108,7 +118,7 @@ func (nr *NewRelic) Write(metrics []telegraf.Metric) error { case uint64: mvalue = float64(n) case float64: - mvalue = float64(n) + mvalue = n case bool: mvalue = float64(0) if n { @@ -119,7 +129,7 @@ func (nr *NewRelic) Write(metrics []telegraf.Metric) error { // we just skip continue default: - return fmt.Errorf("Undefined field type: %T", field.Value) + return fmt.Errorf("undefined field type: %T", field.Value) } switch metric.Type() { @@ -152,7 +162,27 @@ func init() { outputs.Add("newrelic", func() telegraf.Output { return &NewRelic{ Timeout: internal.Duration{Duration: time.Second * 15}, - Client: http.Client{}, } }) } + +func (nr *NewRelic) initClient() error { + if nr.HttpProxy == "" { + nr.client = http.Client{} + return nil + } + + proxyURL, err := url.Parse(nr.HttpProxy) + if err != nil { + return err + } + + transport := &http.Transport{ + Proxy: http.ProxyURL(proxyURL), + } + + nr.client = http.Client{ + Transport: transport, + } + return nil +} diff --git a/plugins/outputs/newrelic/newrelic_test.go b/plugins/outputs/newrelic/newrelic_test.go index aa23950c72611..d6613e55fa535 100644 --- a/plugins/outputs/newrelic/newrelic_test.go +++ b/plugins/outputs/newrelic/newrelic_test.go @@ -168,6 +168,14 @@ func TestNewRelic_Connect(t *testing.T) { }, wantErr: false, }, + { + name: "Test: HTTP Proxy", + newrelic: &NewRelic{ + InsightsKey: "12121212", + HttpProxy: "https://my.proxy", + }, + wantErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 8ddbab47a46e256281392ad0aac876715189c117 Mon Sep 17 00:00:00 2001 From: Nicolas Filotto Date: Mon, 1 Feb 2021 20:34:44 +0100 Subject: [PATCH 203/761] Allow to provide constants to a starlark script (#8772) --- plugins/processors/starlark/README.md | 30 ++++ plugins/processors/starlark/field_dict.go | 50 ++++-- plugins/processors/starlark/starlark.go | 24 ++- plugins/processors/starlark/starlark_test.go | 160 ++++++++++++++++++- 4 files changed, 249 insertions(+), 15 deletions(-) diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index a22296f48f3da..03d9f7a939250 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -30,6 +30,13 @@ def apply(metric): ## File containing a Starlark script. # script = "/usr/local/bin/myscript.star" + + ## The constants of the Starlark script. + # [processors.starlark.constants] + # max_size = 10 + # threshold = 0.75 + # default_name = "Julia" + # debug_mode = true ``` ### Usage @@ -182,6 +189,29 @@ def apply(metric): def failing(metric): json.decode("non-json-content") ``` +**How to reuse the same script but with different parameters?** + +In case you have a generic script that you would like to reuse for different instances of the plugin, you can use constants as input parameters of your script. + +So for example, assuming that you have the next configuration: + +```toml +[[processors.starlark]] + script = "/usr/local/bin/myscript.star" + + [processors.starlark.constants] + somecustomnum = 10 + somecustomstr = "mycustomfield" +``` + +Your script could then use the constants defined in the configuration as follows: + +```python +def apply(metric): + if metric.fields[somecustomstr] >= somecustomnum: + metric.fields.clear() + return metric +``` ### Examples diff --git a/plugins/processors/starlark/field_dict.go b/plugins/processors/starlark/field_dict.go index e0c0349b617a1..1e48ac7c02cc2 100644 --- a/plugins/processors/starlark/field_dict.go +++ b/plugins/processors/starlark/field_dict.go @@ -3,6 +3,7 @@ package starlark import ( "errors" "fmt" + "reflect" "strings" "github.com/influxdata/telegraf" @@ -210,17 +211,44 @@ func (i *FieldIterator) Done() { // AsStarlarkValue converts a field value to a starlark.Value. func asStarlarkValue(value interface{}) (starlark.Value, error) { - switch v := value.(type) { - case float64: - return starlark.Float(v), nil - case int64: - return starlark.MakeInt64(v), nil - case uint64: - return starlark.MakeUint64(v), nil - case string: - return starlark.String(v), nil - case bool: - return starlark.Bool(v), nil + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Slice: + length := v.Len() + array := make([]starlark.Value, length) + for i := 0; i < length; i++ { + sVal, err := asStarlarkValue(v.Index(i).Interface()) + if err != nil { + return starlark.None, err + } + array[i] = sVal + } + return starlark.NewList(array), nil + case reflect.Map: + dict := starlark.NewDict(v.Len()) + iter := v.MapRange() + for iter.Next() { + sKey, err := asStarlarkValue(iter.Key().Interface()) + if err != nil { + return starlark.None, err + } + sValue, err := asStarlarkValue(iter.Value().Interface()) + if err != nil { + return starlark.None, err + } + dict.SetKey(sKey, sValue) + } + return dict, nil + case reflect.Float32, reflect.Float64: + return starlark.Float(v.Float()), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return starlark.MakeInt64(v.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return starlark.MakeUint64(v.Uint()), nil + case reflect.String: + return starlark.String(v.String()), nil + case reflect.Bool: + return starlark.Bool(v.Bool()), nil } return starlark.None, errors.New("invalid type") diff --git a/plugins/processors/starlark/starlark.go b/plugins/processors/starlark/starlark.go index 9a055ce56db6f..64666398d2e50 100644 --- a/plugins/processors/starlark/starlark.go +++ b/plugins/processors/starlark/starlark.go @@ -27,12 +27,20 @@ def apply(metric): ## File containing a Starlark script. # script = "/usr/local/bin/myscript.star" + + ## The constants of the Starlark script. + # [processors.starlark.constants] + # max_size = 10 + # threshold = 0.75 + # default_name = "Julia" + # debug_mode = true ` ) type Starlark struct { - Source string `toml:"source"` - Script string `toml:"script"` + Source string `toml:"source"` + Script string `toml:"script"` + Constants map[string]interface{} `toml:"constants"` Log telegraf.Logger `toml:"-"` @@ -61,6 +69,7 @@ func (s *Starlark) Init() error { builtins["Metric"] = starlark.NewBuiltin("Metric", newMetric) builtins["deepcopy"] = starlark.NewBuiltin("deepcopy", deepcopy) builtins["catch"] = starlark.NewBuiltin("catch", catch) + s.addConstants(&builtins) program, err := s.sourceProgram(builtins) if err != nil { @@ -197,6 +206,17 @@ func (s *Starlark) Stop() error { return nil } +// Add all the constants defined in the plugin as constants of the script +func (s *Starlark) addConstants(builtins *starlark.StringDict) { + for key, val := range s.Constants { + sVal, err := asStarlarkValue(val) + if err != nil { + s.Log.Errorf("Unsupported type: %T", val) + } + (*builtins)[key] = sVal + } +} + func containsMetric(metrics []telegraf.Metric, metric telegraf.Metric) bool { for _, m := range metrics { if m == metric { diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index afcb721025d55..f506e26ecfa0b 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -1,6 +1,7 @@ package starlark import ( + "errors" "fmt" "io/ioutil" "os" @@ -10,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -250,6 +252,7 @@ func TestMetric(t *testing.T) { var tests = []struct { name string source string + constants map[string]interface{} input []telegraf.Metric expected []telegraf.Metric expectedErrorStr string @@ -2418,13 +2421,64 @@ def process(metric): ), }, }, + { + name: "support constants", + source: ` +def apply(metric): + metric.fields["p1"] = max_size + metric.fields["p2"] = threshold + metric.fields["p3"] = default_name + metric.fields["p4"] = debug_mode + metric.fields["p5"] = supported_values[0] + metric.fields["p6"] = supported_values[1] + metric.fields["p7"] = supported_entries[2] + metric.fields["p8"] = supported_entries["3"] + return metric + `, + constants: map[string]interface{}{ + "max_size": 10, + "threshold": 0.75, + "default_name": "Julia", + "debug_mode": true, + "supported_values": []interface{}{2, "3"}, + "supported_entries": map[interface{}]interface{}{ + 2: "two", + "3": "three", + }, + "unsupported_type": time.Now(), + }, + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "p1": 10, + "p2": 0.75, + "p3": "Julia", + "p4": true, + "p5": 2, + "p6": "3", + "p7": "two", + "p8": "three", + }, + time.Unix(0, 0), + ), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { plugin := &Starlark{ - Source: tt.source, - Log: testutil.Logger{}, + Source: tt.source, + Log: testutil.Logger{}, + Constants: tt.constants, } err := plugin.Init() require.NoError(t, err) @@ -2451,6 +2505,108 @@ def process(metric): } } +// Tests the behavior of the plugin according the provided TOML configuration. +func TestConfig(t *testing.T) { + var tests = []struct { + name string + config string + input []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "support constants from configuration", + config: ` +[[processors.starlark]] + source = ''' +def apply(metric): + metric.fields["p1"] = max_size + metric.fields["p2"] = threshold + metric.fields["p3"] = default_name + metric.fields["p4"] = debug_mode + metric.fields["p5"] = supported_values[0] + metric.fields["p6"] = supported_values[1] + metric.fields["p7"] = supported_entries["2"] + metric.fields["p8"] = supported_entries["3"] + return metric +''' + [processors.starlark.constants] + max_size = 10 + threshold = 0.75 + default_name = "Elsa" + debug_mode = true + supported_values = ["2", "3"] + supported_entries = { "2" = "two", "3" = "three" } + unsupported_type = 2009-06-12 + `, + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "p1": 10, + "p2": 0.75, + "p3": "Elsa", + "p4": true, + "p5": "2", + "p6": "3", + "p7": "two", + "p8": "three", + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin, err := buildPlugin(tt.config) + require.NoError(t, err) + err = plugin.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + + err = plugin.Start(&acc) + require.NoError(t, err) + + for _, m := range tt.input { + err = plugin.Add(m, &acc) + require.NoError(t, err) + } + + err = plugin.Stop() + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} + +// Build a Starlark plugin from the provided configuration. +func buildPlugin(configContent string) (*Starlark, error) { + c := config.NewConfig() + err := c.LoadConfigData([]byte(configContent)) + if err != nil { + return nil, err + } + if len(c.Processors) != 1 { + return nil, errors.New("Only one processor was expected") + } + plugin, ok := (c.Processors[0].Processor).(*Starlark) + if !ok { + return nil, errors.New("Only a Starlark processor was expected") + } + plugin.Log = testutil.Logger{} + return plugin, nil +} + func TestScript(t *testing.T) { var tests = []struct { name string From f2cf447e638ea6371334602b07872a98394acf35 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 1 Feb 2021 14:43:24 -0600 Subject: [PATCH 204/761] Update go-ping to latest version (#8771) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 912eb3f414c79..3f8ead8b69429 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.4.0 github.com/go-ole/go-ole v1.2.1 // indirect - github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663 + github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c github.com/go-redis/redis v6.15.9+incompatible github.com/go-sql-driver/mysql v1.5.0 github.com/goburrow/modbus v0.1.0 diff --git a/go.sum b/go.sum index 6ccd8b5bacbbd..021e9859293a8 100644 --- a/go.sum +++ b/go.sum @@ -223,8 +223,8 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663 h1:jI2GiiRh+pPbey52EVmbU6kuLiXqwy4CXZ4gwUBj8Y0= -github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663/go.mod h1:35JbSyV/BYqHwwRA6Zr1uVDm1637YlNOU61wI797NPI= +github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c h1:fWdhUpCuoeNIPiQ+pkAmmERYEjhVx5/cbVGK7T99OkI= +github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c/go.mod h1:35JbSyV/BYqHwwRA6Zr1uVDm1637YlNOU61wI797NPI= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= From 3b8df55b9c8c15353b8778ab3a19f0b1a5021d29 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Mon, 1 Feb 2021 13:46:30 -0800 Subject: [PATCH 205/761] Update CHANGELOG.md (#8782) separate out open hardware monitoring external plugin --- CHANGELOG.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 33fb315909385..21fabb86a6e47 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,6 @@ - [#8616](https://github.com/influxdata/telegraf/pull/8616) Add Event Log support for Windows - [#8602](https://github.com/influxdata/telegraf/pull/8602) `inputs.postgresql_extensible` Add timestamp column support to postgresql_extensible - [#8627](https://github.com/influxdata/telegraf/pull/8627) `parsers.csv` Added ability to define skip values in csv parser - - [#8646](https://github.com/influxdata/telegraf/pull/8646) link to Open Hardware Monitor - [#8055](https://github.com/influxdata/telegraf/pull/8055) `outputs.http` outputs/http: add option to control idle connection timeout - [#7897](https://github.com/influxdata/telegraf/pull/7897) `common.tls` common/tls: Allow specifying SNI hostnames - [#8541](https://github.com/influxdata/telegraf/pull/8541) `inputs.snmp` Extended the internal snmp wrapper to support AES192, AES192C, AES256, and AES256C @@ -44,6 +43,10 @@ - [#8588](https://github.com/influxdata/telegraf/pull/8588) `inputs.snmp` Input SNMP plugin - upgrade gosnmp library to version 1.29.0 - [#8502](https://github.com/influxdata/telegraf/pull/8502) `inputs.http_listener_v2` Fix Stop() bug when plugin fails to start +#### New External Plugins + + - [#8646](https://github.com/influxdata/telegraf/pull/8646) [Open Hardware Monitoring](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) Input Plugin + ## v1.17.0 [2020-12-18] From 7e78a08eba1c6b5a846ba2ffdb58c431a5ff1e4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patryk=20Ma=C5=82ek?= <69143962+pmalek-sumo@users.noreply.github.com> Date: Thu, 4 Feb 2021 23:02:27 +0100 Subject: [PATCH 206/761] AWS EC2 metadata processor Using StreamingProcessor (#8707) --- docs/LICENSE_OF_DEPENDENCIES.md | 9 + go.mod | 7 +- go.sum | 20 ++ .../parallel/ordered.go | 0 .../parallel/parallel.go | 0 .../parallel/parallel_test.go | 2 +- .../parallel/unordered.go | 0 plugins/processors/all/all.go | 1 + plugins/processors/aws/ec2/README.md | 52 +++ plugins/processors/aws/ec2/ec2.go | 310 ++++++++++++++++++ plugins/processors/aws/ec2/ec2_test.go | 59 ++++ plugins/processors/ifname/ifname.go | 2 +- plugins/processors/reverse_dns/reversedns.go | 2 +- 13 files changed, 460 insertions(+), 4 deletions(-) rename plugins/{processors/reverse_dns => common}/parallel/ordered.go (100%) rename plugins/{processors/reverse_dns => common}/parallel/parallel.go (100%) rename plugins/{processors/reverse_dns => common}/parallel/parallel_test.go (96%) rename plugins/{processors/reverse_dns => common}/parallel/unordered.go (100%) create mode 100644 plugins/processors/aws/ec2/README.md create mode 100644 plugins/processors/aws/ec2/ec2.go create mode 100644 plugins/processors/aws/ec2/ec2_test.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 14c46448c3b4a..154f13a88bde0 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -25,6 +25,15 @@ following works: - github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE) - github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING) - github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/config [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/config/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/credentials [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/credentials/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/feature/ec2/imds [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/ec2/imds/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/ec2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/presigned-url/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/sso [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/sts [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/sts/LICENSE.txt) +- github.com/aws/smithy-go [Apache License 2.0](https://github.com/aws/smithy-go/blob/main/LICENSE) - github.com/benbjohnson/clock [MIT License](https://github.com/benbjohnson/clock/blob/master/LICENSE) - github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) - github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 3f8ead8b69429..f65401c5a141c 100644 --- a/go.mod +++ b/go.mod @@ -27,6 +27,11 @@ require ( github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.0 // indirect github.com/aws/aws-sdk-go v1.34.34 + github.com/aws/aws-sdk-go-v2 v1.1.0 + github.com/aws/aws-sdk-go-v2/config v1.1.0 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.1 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 + github.com/aws/smithy-go v1.0.0 github.com/benbjohnson/clock v1.0.3 github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 @@ -62,7 +67,7 @@ require ( github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/protobuf v1.3.5 github.com/golang/snappy v0.0.1 - github.com/google/go-cmp v0.5.2 + github.com/google/go-cmp v0.5.4 github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.12 github.com/gorilla/mux v1.6.2 diff --git a/go.sum b/go.sum index 021e9859293a8..220a706d09e11 100644 --- a/go.sum +++ b/go.sum @@ -115,6 +115,24 @@ github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUq github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/aws/aws-sdk-go v1.34.34 h1:5dC0ZU0xy25+UavGNEkQ/5MOQwxXDA2YXtjCL1HfYKI= github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go-v2 v1.1.0 h1:sKP6QWxdN1oRYjl+k6S3bpgBI+XUx/0mqVOLIw4lR/Q= +github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= +github.com/aws/aws-sdk-go-v2/config v1.1.0 h1:f3QVGpAcKrWpYNhKB8hE/buMjcfei95buQ5xdr/xYcU= +github.com/aws/aws-sdk-go-v2/config v1.1.0/go.mod h1:zfTyI6wH8yiZEvb6hGVza+S5oIB2lts2M7TDB4zMoeo= +github.com/aws/aws-sdk-go-v2/credentials v1.1.0 h1:RV0yzjGSNnJhTBco+01lwvWlc2m8gqBfha3D9dQDk78= +github.com/aws/aws-sdk-go-v2/credentials v1.1.0/go.mod h1:cV0qgln5tz/76IxAV0EsJVmmR5ZzKSQwWixsIvzk6lY= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.1 h1:eoT5e1jJf8Vcacu+mkEe1cgsgEAkuabpjhgq03GiXKc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.1/go.mod h1:b+8dhYiS3m1xpzTZWk5EuQml/vSmPhKlzM/bAm/fttY= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 h1:+VnEgB1yp+7KlOsk6FXX/v/fU9uL5oSujIMkKQBBmp8= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0/go.mod h1:/6514fU/SRcY3+ousB1zjUqiXjruSuti2qcfE70osOc= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1 h1:E7zGGgca12s7jA3VqirtaltXj5Wwe5eUIsUlNl1v+d8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1/go.mod h1:PISaKWylTYAyruocNk4Lr9miOOJjOcVBd7twCPbydDk= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.0 h1:oQ/FE7bk1MldOs6RBTr+D7uMv1RfQ8WxxBRuH4lYEEo= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.0/go.mod h1:VnS0vieB4YxutHFP9ROJ3ciT3T/XJZjxxv9L39eo8OQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.1.0 h1:X9oTTSm14wc0ef4dit7aIB02UIw1kVi/imV7zLhFDdM= +github.com/aws/aws-sdk-go-v2/service/sts v1.1.0/go.mod h1:A15vQm/MsXL3a410CxwKQ5IBoSvIg+cr10fEFzPgEYs= +github.com/aws/smithy-go v1.0.0 h1:hkhcRKG9rJ4Fn+RbfXY7Tz7b3ITLDyolBnLLBhwbg/c= +github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= @@ -287,6 +305,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= diff --git a/plugins/processors/reverse_dns/parallel/ordered.go b/plugins/common/parallel/ordered.go similarity index 100% rename from plugins/processors/reverse_dns/parallel/ordered.go rename to plugins/common/parallel/ordered.go diff --git a/plugins/processors/reverse_dns/parallel/parallel.go b/plugins/common/parallel/parallel.go similarity index 100% rename from plugins/processors/reverse_dns/parallel/parallel.go rename to plugins/common/parallel/parallel.go diff --git a/plugins/processors/reverse_dns/parallel/parallel_test.go b/plugins/common/parallel/parallel_test.go similarity index 96% rename from plugins/processors/reverse_dns/parallel/parallel_test.go rename to plugins/common/parallel/parallel_test.go index 0d2839a24f4cd..c24f67e17c79d 100644 --- a/plugins/processors/reverse_dns/parallel/parallel_test.go +++ b/plugins/common/parallel/parallel_test.go @@ -7,7 +7,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/plugins/processors/reverse_dns/parallel" + "github.com/influxdata/telegraf/plugins/common/parallel" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/processors/reverse_dns/parallel/unordered.go b/plugins/common/parallel/unordered.go similarity index 100% rename from plugins/processors/reverse_dns/parallel/unordered.go rename to plugins/common/parallel/unordered.go diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index c84ee81110ee5..face81ad39241 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -1,6 +1,7 @@ package all import ( + _ "github.com/influxdata/telegraf/plugins/processors/aws/ec2" _ "github.com/influxdata/telegraf/plugins/processors/clone" _ "github.com/influxdata/telegraf/plugins/processors/converter" _ "github.com/influxdata/telegraf/plugins/processors/date" diff --git a/plugins/processors/aws/ec2/README.md b/plugins/processors/aws/ec2/README.md new file mode 100644 index 0000000000000..583ca536e62c9 --- /dev/null +++ b/plugins/processors/aws/ec2/README.md @@ -0,0 +1,52 @@ +# AWS EC2 Metadata Processor Plugin + +AWS EC2 Metadata processor plugin appends metadata gathered from [AWS IMDS][] +to metrics associated with EC2 instances. + +[AWS IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html + +## Configuration + +```toml +[[processors.aws_ec2]] + ## Tags to attach to metrics. Available tags: + ## * accountId + ## * architecture + ## * availabilityZone + ## * billingProducts + ## * imageId + ## * instanceId + ## * instanceType + ## * kernelId + ## * pendingTime + ## * privateIp + ## * ramdiskId + ## * region + ## * version + tags = [] + + ## Timeout for http requests made by against AWS EC2 metadata endpoint. + timeout = "10s" + + ## ordered controls whether or not the metrics need to stay in the same order + ## this plugin received them in. If false, this plugin will change the order + ## with requests hitting cached results moving through immediately and not + ## waiting on slower lookups. This may cause issues for you if you are + ## depending on the order of metrics staying the same. If so, set this to true. + ## Keeping the metrics ordered may be slightly slower. + ordered = false +``` + +## Example + +Append `accountId` and `instanceId` to metrics tags: + +```toml +[[processors.aws_ec2]] + tags = [ "accountId", "instanceId"] +``` + +```diff +- cpu,hostname=localhost time_idle=42 ++ cpu,hostname=localhost,accountId=123456789,instanceId=i-123456789123 time_idle=42 +``` diff --git a/plugins/processors/aws/ec2/ec2.go b/plugins/processors/aws/ec2/ec2.go new file mode 100644 index 0000000000000..8d22a65305ccd --- /dev/null +++ b/plugins/processors/aws/ec2/ec2.go @@ -0,0 +1,310 @@ +package ec2 + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/parallel" + "github.com/influxdata/telegraf/plugins/processors" +) + +type AwsEc2Processor struct { + ImdsTags []string `toml:"imds_tags"` + EC2Tags []string `toml:"ec2_tags"` + Timeout config.Duration `toml:"timeout"` + Ordered bool `toml:"ordered"` + MaxParallelCalls int `toml:"max_parallel_calls"` + + Log telegraf.Logger `toml:"-"` + imdsClient *imds.Client `toml:"-"` + imdsTags map[string]struct{} `toml:"-"` + ec2Client *ec2.Client `toml:"-"` + parallel parallel.Parallel `toml:"-"` + instanceID string `toml:"-"` +} + +const sampleConfig = ` + ## Instance identity document tags to attach to metrics. + ## For more information see: + ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html + ## + ## Available tags: + ## * accountId + ## * architecture + ## * availabilityZone + ## * billingProducts + ## * imageId + ## * instanceId + ## * instanceType + ## * kernelId + ## * pendingTime + ## * privateIp + ## * ramdiskId + ## * region + ## * version + imds_tags = [] + + ## EC2 instance tags retrieved with DescribeTags action. + ## In case tag is empty upon retrieval it's omitted when tagging metrics. + ## Note that in order for this to work, role attached to EC2 instance or AWS + ## credentials available from the environment must have a policy attached, that + ## allows ec2:DescribeTags. + ## + ## For more information see: + ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html + ec2_tags = [] + + ## Timeout for http requests made by against aws ec2 metadata endpoint. + timeout = "10s" + + ## ordered controls whether or not the metrics need to stay in the same order + ## this plugin received them in. If false, this plugin will change the order + ## with requests hitting cached results moving through immediately and not + ## waiting on slower lookups. This may cause issues for you if you are + ## depending on the order of metrics staying the same. If so, set this to true. + ## Keeping the metrics ordered may be slightly slower. + ordered = false + + ## max_parallel_calls is the maximum number of AWS API calls to be in flight + ## at the same time. + ## It's probably best to keep this number fairly low. + max_parallel_calls = 10 +` + +const ( + DefaultMaxOrderedQueueSize = 10_000 + DefaultMaxParallelCalls = 10 + DefaultTimeout = 10 * time.Second +) + +var allowedImdsTags = map[string]struct{}{ + "accountId": {}, + "architecture": {}, + "availabilityZone": {}, + "billingProducts": {}, + "imageId": {}, + "instanceId": {}, + "instanceType": {}, + "kernelId": {}, + "pendingTime": {}, + "privateIp": {}, + "ramdiskId": {}, + "region": {}, + "version": {}, +} + +func (r *AwsEc2Processor) SampleConfig() string { + return sampleConfig +} + +func (r *AwsEc2Processor) Description() string { + return "Attach AWS EC2 metadata to metrics" +} + +func (r *AwsEc2Processor) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { + r.parallel.Enqueue(metric) + return nil +} + +func (r *AwsEc2Processor) Init() error { + r.Log.Debug("Initializing AWS EC2 Processor") + if len(r.EC2Tags) == 0 && len(r.ImdsTags) == 0 { + return errors.New("no tags specified in configuration") + } + + ctx := context.Background() + cfg, err := awsconfig.LoadDefaultConfig(ctx) + if err != nil { + return fmt.Errorf("failed loading default AWS config: %w", err) + } + r.imdsClient = imds.NewFromConfig(cfg) + + iido, err := r.imdsClient.GetInstanceIdentityDocument( + ctx, + &imds.GetInstanceIdentityDocumentInput{}, + ) + if err != nil { + return fmt.Errorf("failed getting instance identity document: %w", err) + } + + r.instanceID = iido.InstanceID + + if len(r.EC2Tags) > 0 { + // Add region to AWS config when creating EC2 service client since it's required. + cfg.Region = iido.Region + + r.ec2Client = ec2.NewFromConfig(cfg) + + // Chceck if instance is allowed to call DescribeTags. + _, err = r.ec2Client.DescribeTags(ctx, &ec2.DescribeTagsInput{ + DryRun: true, + }) + var ae smithy.APIError + if errors.As(err, &ae) { + if ae.ErrorCode() != "DryRunOperation" { + return fmt.Errorf("instance doesn't have permissions to call DescribeTags: %w", err) + } + } else if err != nil { + return fmt.Errorf("error calling DescribeTags: %w", err) + } + } + + for _, tag := range r.ImdsTags { + if len(tag) > 0 && isImdsTagAllowed(tag) { + r.imdsTags[tag] = struct{}{} + } else { + return fmt.Errorf("not allowed metadata tag specified in configuration: %s", tag) + } + } + if len(r.imdsTags) == 0 && len(r.EC2Tags) == 0 { + return errors.New("no allowed metadata tags specified in configuration") + } + + return nil +} + +func (r *AwsEc2Processor) Start(acc telegraf.Accumulator) error { + if r.Ordered { + r.parallel = parallel.NewOrdered(acc, r.asyncAdd, DefaultMaxOrderedQueueSize, r.MaxParallelCalls) + } else { + r.parallel = parallel.NewUnordered(acc, r.asyncAdd, r.MaxParallelCalls) + } + + return nil +} + +func (r *AwsEc2Processor) Stop() error { + if r.parallel == nil { + return errors.New("Trying to stop unstarted AWS EC2 Processor") + } + r.parallel.Stop() + return nil +} + +func (r *AwsEc2Processor) asyncAdd(metric telegraf.Metric) []telegraf.Metric { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(r.Timeout)) + defer cancel() + + // Add IMDS Instance Identity Document tags. + if len(r.imdsTags) > 0 { + iido, err := r.imdsClient.GetInstanceIdentityDocument( + ctx, + &imds.GetInstanceIdentityDocumentInput{}, + ) + if err != nil { + r.Log.Errorf("Error when calling GetInstanceIdentityDocument: %v", err) + return []telegraf.Metric{metric} + } + + for tag := range r.imdsTags { + if v := getTagFromInstanceIdentityDocument(iido, tag); v != "" { + metric.AddTag(tag, v) + } + } + } + + // Add EC2 instance tags. + if len(r.EC2Tags) > 0 { + dto, err := r.ec2Client.DescribeTags(ctx, &ec2.DescribeTagsInput{ + Filters: createFilterFromTags(r.instanceID, r.EC2Tags), + }) + if err != nil { + r.Log.Errorf("Error during EC2 DescribeTags: %v", err) + return []telegraf.Metric{metric} + } + + for _, tag := range r.EC2Tags { + if v := getTagFromDescribeTags(dto, tag); v != "" { + metric.AddTag(tag, v) + } + } + } + + return []telegraf.Metric{metric} +} + +func init() { + processors.AddStreaming("aws_ec2", func() telegraf.StreamingProcessor { + return newAwsEc2Processor() + }) +} + +func newAwsEc2Processor() *AwsEc2Processor { + return &AwsEc2Processor{ + MaxParallelCalls: DefaultMaxParallelCalls, + Timeout: config.Duration(DefaultTimeout), + imdsTags: make(map[string]struct{}), + } +} + +func createFilterFromTags(instanceID string, tagNames []string) []types.Filter { + return []types.Filter{ + { + Name: aws.String("resource-id"), + Values: []string{instanceID}, + }, + { + Name: aws.String("key"), + Values: tagNames, + }, + } +} + +func getTagFromDescribeTags(o *ec2.DescribeTagsOutput, tag string) string { + for _, t := range o.Tags { + if *t.Key == tag { + return *t.Value + } + } + return "" +} + +func getTagFromInstanceIdentityDocument(o *imds.GetInstanceIdentityDocumentOutput, tag string) string { + switch tag { + case "accountId": + return o.AccountID + case "architecture": + return o.Architecture + case "availabilityZone": + return o.AvailabilityZone + case "billingProducts": + return strings.Join(o.BillingProducts, ",") + case "imageId": + return o.ImageID + case "instanceId": + return o.InstanceID + case "instanceType": + return o.InstanceType + case "kernelId": + return o.KernelID + case "pendingTime": + return o.PendingTime.String() + case "privateIp": + return o.PrivateIP + case "ramdiskId": + return o.RamdiskID + case "region": + return o.Region + case "version": + return o.Version + default: + return "" + } +} + +func isImdsTagAllowed(tag string) bool { + _, ok := allowedImdsTags[tag] + return ok +} diff --git a/plugins/processors/aws/ec2/ec2_test.go b/plugins/processors/aws/ec2/ec2_test.go new file mode 100644 index 0000000000000..8eb599206ff99 --- /dev/null +++ b/plugins/processors/aws/ec2/ec2_test.go @@ -0,0 +1,59 @@ +package ec2 + +import ( + "testing" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestBasicStartup(t *testing.T) { + p := newAwsEc2Processor() + p.Log = &testutil.Logger{} + p.ImdsTags = []string{"accountId", "instanceId"} + acc := &testutil.Accumulator{} + require.NoError(t, p.Start(acc)) + require.NoError(t, p.Stop()) + + require.Len(t, acc.GetTelegrafMetrics(), 0) + require.Len(t, acc.Errors, 0) +} + +func TestBasicStartupWithEC2Tags(t *testing.T) { + p := newAwsEc2Processor() + p.Log = &testutil.Logger{} + p.ImdsTags = []string{"accountId", "instanceId"} + p.EC2Tags = []string{"Name"} + acc := &testutil.Accumulator{} + require.NoError(t, p.Start(acc)) + require.NoError(t, p.Stop()) + + require.Len(t, acc.GetTelegrafMetrics(), 0) + require.Len(t, acc.Errors, 0) +} + +func TestBasicInitNoTagsReturnAnError(t *testing.T) { + p := newAwsEc2Processor() + p.Log = &testutil.Logger{} + p.ImdsTags = []string{} + err := p.Init() + require.Error(t, err) +} + +func TestBasicInitInvalidTagsReturnAnError(t *testing.T) { + p := newAwsEc2Processor() + p.Log = &testutil.Logger{} + p.ImdsTags = []string{"dummy", "qwerty"} + err := p.Init() + require.Error(t, err) +} + +func TestLoadingConfig(t *testing.T) { + confFile := []byte("[[processors.aws_ec2]]" + "\n" + sampleConfig) + c := config.NewConfig() + err := c.LoadConfigData(confFile) + require.NoError(t, err) + + require.Len(t, c.Processors, 1) +} diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index a5666bf0030a8..68b41e9f4baf5 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -10,9 +10,9 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/snmp" + "github.com/influxdata/telegraf/plugins/common/parallel" si "github.com/influxdata/telegraf/plugins/inputs/snmp" "github.com/influxdata/telegraf/plugins/processors" - "github.com/influxdata/telegraf/plugins/processors/reverse_dns/parallel" ) var sampleConfig = ` diff --git a/plugins/processors/reverse_dns/reversedns.go b/plugins/processors/reverse_dns/reversedns.go index bef79a01c92eb..616294fc5e54d 100644 --- a/plugins/processors/reverse_dns/reversedns.go +++ b/plugins/processors/reverse_dns/reversedns.go @@ -5,8 +5,8 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/parallel" "github.com/influxdata/telegraf/plugins/processors" - "github.com/influxdata/telegraf/plugins/processors/reverse_dns/parallel" ) const sampleConfig = ` From 86e50f85b39fe9afe1b62b8e1f5ef8c268ff1894 Mon Sep 17 00:00:00 2001 From: Sam Arnold Date: Fri, 5 Feb 2021 10:57:10 -0400 Subject: [PATCH 207/761] Increase build verbosity with go mod download -x (#8496) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ac2281499602c..f74a3fd556b84 100644 --- a/Makefile +++ b/Makefile @@ -83,7 +83,7 @@ help: .PHONY: deps deps: - go mod download + go mod download -x .PHONY: telegraf telegraf: From 90392e16d19c9dacfa068ab31aaabe02c3cecd29 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 8 Feb 2021 09:36:23 -0600 Subject: [PATCH 208/761] Update README for inputs.ping with correct cmd for native ping on Linux (#8787) * Update readme to enable native ping * Provide more information on the numbers being set * reference for ping_group_range already exists --- plugins/inputs/ping/README.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 83a91a2eeb96d..7293a17081a71 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -127,12 +127,10 @@ setting capabilities. [man 7 capabilities]: http://man7.org/linux/man-pages/man7/capabilities.7.html -When Telegraf cannot listen on a privileged ICMP socket it will attempt to use -ICMP echo sockets. If you wish to use this method you must ensure Telegraf's -group, usually `telegraf`, is allowed to use ICMP echo sockets: +On Linux the default behaviour is to restrict creation of ping sockets for everybody. Execute the below command to enable creation of ping sockets for all possible user groups. The integers provided to ping_group_range defines the range of user groups that are permited to create ping sockets, were 2147483647 (the max of a signed int 2^31) is the max group identifier (GID). ```sh -$ sysctl -w net.ipv4.ping_group_range="GROUP_ID_LOW GROUP_ID_HIGH" +$ sudo sysctl -w net.ipv4.ping_group_range="0 2147483647" ``` Reference [`man 7 icmp`][man 7 icmp] for more information about ICMP echo From ba66d4facbda8899e176c53d3cc0394c78196e25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Mon, 8 Feb 2021 17:18:40 +0100 Subject: [PATCH 209/761] Revive fixes - part 1 (#8797) * Revive fixes regarding following set of rules: [rule.blank-imports] [rule.context-as-argument] [rule.context-keys-type] [rule.dot-imports] [rule.error-return] [rule.error-strings] [rule.indent-error-flow] [rule.errorf] --- config/aws/credentials.go | 4 +- internal/exec_unix.go | 2 +- internal/exec_windows.go | 2 +- internal/internal.go | 10 ++-- internal/internal_test.go | 6 +- models/buffer.go | 3 +- models/running_output_test.go | 4 +- plugins/aggregators/all/all.go | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/apache/apache.go | 6 +- plugins/inputs/bind/bind.go | 4 +- plugins/inputs/bind/bind_test.go | 2 +- plugins/inputs/cassandra/cassandra.go | 32 +++++----- .../cisco_telemetry_mdt.go | 6 +- plugins/inputs/docker_log/docker_log.go | 3 +- plugins/inputs/dovecot/dovecot.go | 3 +- plugins/inputs/haproxy/haproxy.go | 7 +-- plugins/inputs/intel_rdt/intel_rdt.go | 7 +-- plugins/inputs/jolokia/jolokia.go | 15 +++-- plugins/inputs/jolokia/jolokia_test.go | 2 +- .../openconfig_telemetry.go | 18 +++--- .../kafka_consumer_legacy.go | 6 +- plugins/inputs/kernel/kernel.go | 2 +- plugins/inputs/kernel_vmstat/kernel_vmstat.go | 2 +- .../inputs/minecraft/internal/rcon/rcon.go | 10 ++-- plugins/inputs/modbus/modbus.go | 7 +-- plugins/inputs/modbus/modbus_test.go | 5 ++ plugins/inputs/monit/monit.go | 6 +- plugins/inputs/multifile/multifile.go | 3 +- plugins/inputs/mysql/mysql.go | 22 ++++--- plugins/inputs/opcua/opcua_client.go | 25 ++++---- plugins/inputs/openldap/openldap.go | 18 +++--- plugins/inputs/opensmtpd/opensmtpd.go | 3 +- plugins/inputs/passenger/passenger.go | 2 +- plugins/inputs/passenger/passenger_test.go | 2 +- plugins/inputs/phpfpm/fcgi_client.go | 2 +- plugins/inputs/phpfpm/phpfpm.go | 5 +- plugins/inputs/ping/ping.go | 16 ++--- .../postgresql_extensible.go | 2 +- plugins/inputs/processes/processes_test.go | 2 +- plugins/inputs/procstat/process.go | 2 +- plugins/inputs/prometheus/parser.go | 6 +- plugins/inputs/rethinkdb/rethinkdb.go | 4 +- plugins/inputs/rethinkdb/rethinkdb_server.go | 28 ++++----- plugins/inputs/snmp_legacy/snmp_legacy.go | 18 +++--- plugins/inputs/system/ps.go | 2 +- plugins/inputs/tail/tail.go | 18 +++--- plugins/inputs/varnish/varnish.go | 3 +- plugins/inputs/zookeeper/zookeeper.go | 5 +- plugins/outputs/all/all.go | 1 + plugins/outputs/amon/amon.go | 18 +++--- plugins/outputs/amqp/amqp.go | 32 +++++----- .../application_insights.go | 59 ++++++++----------- .../application_insights_test.go | 9 +++ .../application_insights/transmitter.go | 8 +-- .../outputs/azure_monitor/azure_monitor.go | 34 +++++------ .../azure_monitor/azure_monitor_test.go | 8 +++ plugins/outputs/cloud_pubsub/pubsub.go | 8 +-- plugins/outputs/cratedb/cratedb.go | 5 +- plugins/outputs/datadog/datadog.go | 18 +++--- plugins/outputs/datadog/datadog_test.go | 2 +- plugins/outputs/exec/exec.go | 2 +- plugins/outputs/graphite/graphite.go | 37 ++++++------ plugins/outputs/graphite/graphite_test.go | 11 +++- plugins/outputs/health/compares.go | 3 +- plugins/outputs/health/health.go | 13 ++-- plugins/outputs/health/health_test.go | 10 ++++ plugins/outputs/instrumental/instrumental.go | 9 +-- plugins/outputs/librato/librato.go | 47 +++++++-------- plugins/outputs/librato/librato_test.go | 20 +++---- plugins/outputs/logzio/logzio.go | 12 ++-- plugins/outputs/opentsdb/opentsdb.go | 29 ++++----- .../prometheus_client/prometheus_client.go | 3 +- plugins/outputs/riemann/riemann.go | 24 ++++---- plugins/outputs/riemann/riemann_test.go | 9 ++- plugins/outputs/riemann_legacy/riemann.go | 5 +- plugins/outputs/wavefront/wavefront.go | 9 ++- plugins/parsers/influx/escape.go | 9 +-- plugins/parsers/prometheus/parser.go | 18 +++--- plugins/processors/all/all.go | 1 + plugins/processors/converter/converter.go | 15 ++--- plugins/processors/ifname/ifname.go | 3 +- plugins/processors/ifname/ttl_cache.go | 6 +- plugins/processors/strings/strings.go | 8 +-- plugins/processors/topk/topk.go | 33 +++++------ plugins/serializers/graphite/graphite.go | 11 ++-- plugins/serializers/influx/escape.go | 9 +-- plugins/serializers/influx/influx.go | 10 ++-- 88 files changed, 460 insertions(+), 472 deletions(-) diff --git a/config/aws/credentials.go b/config/aws/credentials.go index f9c98edbf0a4f..d697d96ac335d 100644 --- a/config/aws/credentials.go +++ b/config/aws/credentials.go @@ -22,9 +22,9 @@ type CredentialConfig struct { func (c *CredentialConfig) Credentials() client.ConfigProvider { if c.RoleARN != "" { return c.assumeCredentials() - } else { - return c.rootCredentials() } + + return c.rootCredentials() } func (c *CredentialConfig) rootCredentials() client.ConfigProvider { diff --git a/internal/exec_unix.go b/internal/exec_unix.go index d41aae825d6d5..60b606cfb5f32 100644 --- a/internal/exec_unix.go +++ b/internal/exec_unix.go @@ -50,7 +50,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { // If SIGTERM was sent then treat any process error as a timeout. if termSent { - return TimeoutErr + return ErrTimeout } // Otherwise there was an error unrelated to termination. diff --git a/internal/exec_windows.go b/internal/exec_windows.go index f010bdd96756b..7bab1baf3ac3f 100644 --- a/internal/exec_windows.go +++ b/internal/exec_windows.go @@ -33,7 +33,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { // If SIGTERM was sent then treat any process error as a timeout. if termSent { - return TimeoutErr + return ErrTimeout } // Otherwise there was an error unrelated to termination. diff --git a/internal/internal.go b/internal/internal.go index 777128f667bf6..8b0dfff1fd418 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -26,11 +26,9 @@ import ( const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" var ( - TimeoutErr = errors.New("Command timed out.") - - NotImplementedError = errors.New("not implemented yet") - - VersionAlreadySetError = errors.New("version has already been set") + ErrTimeout = errors.New("command timed out") + ErrorNotImplemented = errors.New("not implemented yet") + ErrorVersionAlreadySet = errors.New("version has already been set") ) // Set via the main module @@ -58,7 +56,7 @@ type ReadWaitCloser struct { // SetVersion sets the telegraf agent version func SetVersion(v string) error { if version != "" { - return VersionAlreadySetError + return ErrorVersionAlreadySet } version = v return nil diff --git a/internal/internal_test.go b/internal/internal_test.go index 2161a300b2956..890a787bf258c 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -62,7 +62,7 @@ func TestRunTimeout(t *testing.T) { err := RunTimeout(cmd, time.Millisecond*20) elapsed := time.Since(start) - assert.Equal(t, TimeoutErr, err) + assert.Equal(t, ErrTimeout, err) // Verify that command gets killed in 20ms, with some breathing room assert.True(t, elapsed < time.Millisecond*75) } @@ -102,7 +102,7 @@ func TestCombinedOutputTimeout(t *testing.T) { _, err := CombinedOutputTimeout(cmd, time.Millisecond*20) elapsed := time.Since(start) - assert.Equal(t, TimeoutErr, err) + assert.Equal(t, ErrTimeout, err) // Verify that command gets killed in 20ms, with some breathing room assert.True(t, elapsed < time.Millisecond*75) } @@ -273,7 +273,7 @@ func TestVersionAlreadySet(t *testing.T) { err = SetVersion("bar") assert.Error(t, err) - assert.IsType(t, VersionAlreadySetError, err) + assert.IsType(t, ErrorVersionAlreadySet, err) assert.Equal(t, "foo", Version()) } diff --git a/models/buffer.go b/models/buffer.go index 9cc1a3d889f38..6cd1a6c71ae26 100644 --- a/models/buffer.go +++ b/models/buffer.go @@ -226,9 +226,8 @@ func (b *Buffer) Reject(batch []telegraf.Metric) { func (b *Buffer) dist(begin, end int) int { if begin <= end { return end - begin - } else { - return b.cap - begin + end } + return b.cap - begin + end } // next returns the next index with wrapping. diff --git a/models/running_output_test.go b/models/running_output_test.go index 38f79f9db397d..abde752bc15e2 100644 --- a/models/running_output_test.go +++ b/models/running_output_test.go @@ -541,7 +541,7 @@ func (m *mockOutput) Write(metrics []telegraf.Metric) error { m.Lock() defer m.Unlock() if m.failWrite { - return fmt.Errorf("Failed Write!") + return fmt.Errorf("failed write") } if m.metrics == nil { @@ -583,7 +583,7 @@ func (m *perfOutput) SampleConfig() string { func (m *perfOutput) Write(metrics []telegraf.Metric) error { if m.failWrite { - return fmt.Errorf("Failed Write!") + return fmt.Errorf("failed write") } return nil } diff --git a/plugins/aggregators/all/all.go b/plugins/aggregators/all/all.go index eabfaa4bf8460..f59e9450d3a49 100644 --- a/plugins/aggregators/all/all.go +++ b/plugins/aggregators/all/all.go @@ -1,6 +1,7 @@ package all import ( + //Blank imports for plugins to register themselves _ "github.com/influxdata/telegraf/plugins/aggregators/basicstats" _ "github.com/influxdata/telegraf/plugins/aggregators/final" _ "github.com/influxdata/telegraf/plugins/aggregators/histogram" diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index e732f2871f0ee..e20d43479344e 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -1,6 +1,7 @@ package all import ( + //Blank imports for plugins to register themselves _ "github.com/influxdata/telegraf/plugins/inputs/activemq" _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" _ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index ff7341b838f75..0220b43530495 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -77,7 +77,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error { for _, u := range n.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("unable to parse address '%s': %s", u, err)) continue } @@ -111,7 +111,7 @@ func (n *Apache) createHttpClient() (*http.Client, error) { func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { req, err := http.NewRequest("GET", addr.String(), nil) if err != nil { - return fmt.Errorf("error on new request to %s : %s\n", addr.String(), err) + return fmt.Errorf("error on new request to %s : %s", addr.String(), err) } if len(n.Username) != 0 && len(n.Password) != 0 { @@ -120,7 +120,7 @@ func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { resp, err := n.client.Do(req) if err != nil { - return fmt.Errorf("error on request to %s : %s\n", addr.String(), err) + return fmt.Errorf("error on request to %s : %s", addr.String(), err) } defer resp.Body.Close() diff --git a/plugins/inputs/bind/bind.go b/plugins/inputs/bind/bind.go index e27fdfc38ec71..7247b23a4d6fa 100644 --- a/plugins/inputs/bind/bind.go +++ b/plugins/inputs/bind/bind.go @@ -58,7 +58,7 @@ func (b *Bind) Gather(acc telegraf.Accumulator) error { for _, u := range b.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("unable to parse address '%s': %s", u, err)) continue } @@ -88,7 +88,7 @@ func (b *Bind) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { // BIND 9.9+ return b.readStatsXMLv3(addr, acc) default: - return fmt.Errorf("URL %s is ambiguous. Please check plugin documentation for supported URL formats.", + return fmt.Errorf("provided URL %s is ambiguous, please check plugin documentation for supported URL formats", addr) } } diff --git a/plugins/inputs/bind/bind_test.go b/plugins/inputs/bind/bind_test.go index 7ca79c1ef19a4..f7849e1735255 100644 --- a/plugins/inputs/bind/bind_test.go +++ b/plugins/inputs/bind/bind_test.go @@ -623,5 +623,5 @@ func TestBindUnparseableURL(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.Contains(t, err.Error(), "Unable to parse address") + assert.Contains(t, err.Error(), "unable to parse address") } diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index 6f6f86e32f592..cfb077bd64963 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io/ioutil" - "log" "net/http" "net/url" "strings" @@ -28,9 +27,10 @@ func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error type Cassandra struct { jClient JolokiaClient - Context string - Servers []string - Metrics []string + Context string `toml:"context"` + Servers []string `toml:"servers"` + Metrics []string `toml:"metrics"` + Log telegraf.Logger `toml:"-"` } type javaMetric struct { @@ -125,8 +125,7 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) { } j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags) } else { - j.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n", - j.metric, out)) + j.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", j.metric, out)) } } @@ -157,8 +156,7 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) { addCassandraMetric(k, c, v.(map[string]interface{})) } } else { - c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n", - c.metric, out)) + c.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", c.metric, out)) return } } else { @@ -166,8 +164,7 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) { addCassandraMetric(r.(map[string]interface{})["mbean"].(string), c, values.(map[string]interface{})) } else { - c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n", - c.metric, out)) + c.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", c.metric, out)) return } } @@ -215,7 +212,7 @@ func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) // Process response if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", requestUrl, resp.StatusCode, http.StatusText(resp.StatusCode), @@ -232,8 +229,8 @@ func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) // Unmarshal json var jsonOut map[string]interface{} - if err = json.Unmarshal([]byte(body), &jsonOut); err != nil { - return nil, errors.New("Error decoding JSON response") + if err = json.Unmarshal(body, &jsonOut); err != nil { + return nil, errors.New("error decoding JSON response") } return jsonOut, nil @@ -263,8 +260,8 @@ func parseServerTokens(server string) map[string]string { return serverTokens } -func (c *Cassandra) Start(acc telegraf.Accumulator) error { - log.Println("W! DEPRECATED: The cassandra plugin has been deprecated. " + +func (c *Cassandra) Start(_ telegraf.Accumulator) error { + c.Log.Warn("DEPRECATED: The cassandra plugin has been deprecated. " + "Please use the jolokia2 plugin instead. " + "https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2") return nil @@ -290,8 +287,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error { m = newCassandraMetric(serverTokens["host"], metric, acc) } else { // unsupported metric type - acc.AddError(fmt.Errorf("E! Unsupported Cassandra metric [%s], skipping", - metric)) + acc.AddError(fmt.Errorf("unsupported Cassandra metric [%s], skipping", metric)) continue } @@ -313,7 +309,7 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error { continue } if out["status"] != 200.0 { - acc.AddError(fmt.Errorf("URL returned with status %v - %s\n", out["status"], requestUrl)) + acc.AddError(fmt.Errorf("provided URL returned with status %v - %s", out["status"], requestUrl)) continue } m.addTagsFields(out) diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index 1a669e96f878e..db34ba94d5f0a 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -20,8 +20,8 @@ import ( internaltls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "google.golang.org/grpc" - "google.golang.org/grpc/credentials" // Register GRPC gzip decoder to support compressed telemetry - _ "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/credentials" + _ "google.golang.org/grpc/encoding/gzip" // Register GRPC gzip decoder to support compressed telemetry "google.golang.org/grpc/peer" ) @@ -261,7 +261,7 @@ func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) { msg := &telemetry.Telemetry{} err := proto.Unmarshal(data, msg) if err != nil { - c.acc.AddError(fmt.Errorf("Cisco MDT failed to decode: %v", err)) + c.acc.AddError(fmt.Errorf("failed to decode: %v", err)) return } diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index 27462ec5a66e7..4ae09e71cca65 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -308,9 +308,8 @@ func (d *DockerLogs) tailContainerLogs( // multiplexed. if hasTTY { return tailStream(acc, tags, container.ID, logReader, "tty") - } else { - return tailMultiplexed(acc, tags, container.ID, logReader) } + return tailMultiplexed(acc, tags, container.ID, logReader) } func parseLine(line []byte) (time.Time, string, error) { diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index 66282c43423b2..6c85acadcb39d 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -53,8 +53,7 @@ const defaultPort = "24242" // Reads stats from all configured servers. func (d *Dovecot) Gather(acc telegraf.Accumulator) error { if !validQuery[d.Type] { - return fmt.Errorf("Error: %s is not a valid query type\n", - d.Type) + return fmt.Errorf("error: %s is not a valid query type", d.Type) } if len(d.Servers) == 0 { diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 9ec9512ea170c..0abc90dbbf3f8 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -123,13 +123,13 @@ func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) erro c, err := net.Dial("unix", socketPath) if err != nil { - return fmt.Errorf("Could not connect to socket '%s': %s", addr, err) + return fmt.Errorf("could not connect to socket '%s': %s", addr, err) } _, errw := c.Write([]byte("show stat\n")) if errw != nil { - return fmt.Errorf("Could not write to socket '%s': %s", addr, errw) + return fmt.Errorf("could not write to socket '%s': %s", addr, errw) } return g.importCsvResult(c, acc, socketPath) @@ -202,9 +202,8 @@ func getSocketAddr(sock string) string { if len(socketAddr) >= 2 { return socketAddr[1] - } else { - return socketAddr[0] } + return socketAddr[0] } var typeNames = []string{"frontend", "backend", "server", "listener"} diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index bcbc1c72a9597..3b56d76e7f972 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -239,17 +239,16 @@ func (r *IntelRDT) createArgsAndStartPQOS(ctx context.Context) { if len(r.parsedCores) != 0 { coresArg := createArgCores(r.parsedCores) args = append(args, coresArg) - go r.readData(args, nil, ctx) - + go r.readData(ctx, args, nil) } else if len(r.processesPIDsMap) != 0 { processArg := createArgProcess(r.processesPIDsMap) args = append(args, processArg) - go r.readData(args, r.processesPIDsMap, ctx) + go r.readData(ctx, args, r.processesPIDsMap) } return } -func (r *IntelRDT) readData(args []string, processesPIDsAssociation map[string]string, ctx context.Context) { +func (r *IntelRDT) readData(ctx context.Context, args []string, processesPIDsAssociation map[string]string) { r.wg.Add(1) defer r.wg.Done() diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 317a47efbd115..db2440f4ffa16 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io/ioutil" - "log" "net/http" "net/url" "time" @@ -57,6 +56,7 @@ type Jolokia struct { ResponseHeaderTimeout internal.Duration `toml:"response_header_timeout"` ClientTimeout internal.Duration `toml:"client_timeout"` + Log telegraf.Logger `toml:"-"` } const sampleConfig = ` @@ -143,7 +143,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) // Process response if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", req.RequestURI, resp.StatusCode, http.StatusText(resp.StatusCode), @@ -161,7 +161,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) // Unmarshal json var jsonOut []map[string]interface{} if err = json.Unmarshal([]byte(body), &jsonOut); err != nil { - return nil, fmt.Errorf("Error decoding JSON response: %s: %s", err, body) + return nil, fmt.Errorf("error decoding JSON response: %s: %s", err, body) } return jsonOut, nil @@ -259,9 +259,8 @@ func (j *Jolokia) extractValues(measurement string, value interface{}, fields ma } func (j *Jolokia) Gather(acc telegraf.Accumulator) error { - if j.jClient == nil { - log.Println("W! DEPRECATED: the jolokia plugin has been deprecated " + + j.Log.Warn("DEPRECATED: the jolokia plugin has been deprecated " + "in favor of the jolokia2 plugin " + "(https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2)") @@ -299,18 +298,18 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error { } for i, resp := range out { if status, ok := resp["status"]; ok && status != float64(200) { - acc.AddError(fmt.Errorf("Not expected status value in response body (%s:%s mbean=\"%s\" attribute=\"%s\"): %3.f", + acc.AddError(fmt.Errorf("not expected status value in response body (%s:%s mbean=\"%s\" attribute=\"%s\"): %3.f", server.Host, server.Port, metrics[i].Mbean, metrics[i].Attribute, status)) continue } else if !ok { - acc.AddError(fmt.Errorf("Missing status in response body")) + acc.AddError(fmt.Errorf("missing status in response body")) continue } if values, ok := resp["value"]; ok { j.extractValues(metrics[i].Name, values, fields) } else { - acc.AddError(fmt.Errorf("Missing key 'value' in output response\n")) + acc.AddError(fmt.Errorf("missing key 'value' in output response")) } } diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index a1ca60604cf00..88f2ab6a19068 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -264,5 +264,5 @@ func TestHttpInvalidJson(t *testing.T) { assert.Error(t, err) assert.Equal(t, 0, len(acc.Metrics)) - assert.Contains(t, err.Error(), "Error decoding JSON response") + assert.Contains(t, err.Error(), "error decoding JSON response") } diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go index 0c6fc9e052d43..acc56b187b3e4 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go @@ -298,17 +298,15 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, acc.AddError(fmt.Errorf("could not subscribe to %s: %v", grpcServer, err)) return - } else { - // Retry with delay. If delay is not provided, use default - if m.RetryDelay.Duration > 0 { - m.Log.Debugf("Retrying %s with timeout %v", grpcServer, - m.RetryDelay.Duration) - time.Sleep(m.RetryDelay.Duration) - continue - } else { - return - } } + + // Retry with delay. If delay is not provided, use default + if m.RetryDelay.Duration > 0 { + m.Log.Debugf("Retrying %s with timeout %v", grpcServer, m.RetryDelay.Duration) + time.Sleep(m.RetryDelay.Duration) + continue + } + return } for { r, err := stream.Recv() diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go index 939fc8850ef5f..bc884a118c69d 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go @@ -140,11 +140,11 @@ func (k *Kafka) receiver() { return case err := <-k.errs: if err != nil { - k.acc.AddError(fmt.Errorf("Consumer Error: %s\n", err)) + k.acc.AddError(fmt.Errorf("consumer Error: %s", err)) } case msg := <-k.in: if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen { - k.acc.AddError(fmt.Errorf("Message longer than max_message_len (%d > %d)", + k.acc.AddError(fmt.Errorf("message longer than max_message_len (%d > %d)", len(msg.Value), k.MaxMessageLen)) } else { metrics, err := k.parser.Parse(msg.Value) @@ -173,7 +173,7 @@ func (k *Kafka) Stop() { defer k.Unlock() close(k.done) if err := k.Consumer.Close(); err != nil { - k.acc.AddError(fmt.Errorf("Error closing consumer: %s\n", err.Error())) + k.acc.AddError(fmt.Errorf("error closing consumer: %s", err.Error())) } } diff --git a/plugins/inputs/kernel/kernel.go b/plugins/inputs/kernel/kernel.go index 461c9564a38e9..ea55803d6b354 100644 --- a/plugins/inputs/kernel/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -104,7 +104,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { func (k *Kernel) getProcStat() ([]byte, error) { if _, err := os.Stat(k.statFile); os.IsNotExist(err) { - return nil, fmt.Errorf("kernel: %s does not exist!", k.statFile) + return nil, fmt.Errorf("kernel: %s does not exist", k.statFile) } else if err != nil { return nil, err } diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat.go b/plugins/inputs/kernel_vmstat/kernel_vmstat.go index ffc56d97d154e..7ebb9ab25153b 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat.go @@ -56,7 +56,7 @@ func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error { func (k *KernelVmstat) getProcVmstat() ([]byte, error) { if _, err := os.Stat(k.statFile); os.IsNotExist(err) { - return nil, fmt.Errorf("kernel_vmstat: %s does not exist!", k.statFile) + return nil, fmt.Errorf("kernel_vmstat: %s does not exist", k.statFile) } else if err != nil { return nil, err } diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go index f9e49e6e62d4e..e36a46bb07163 100644 --- a/plugins/inputs/minecraft/internal/rcon/rcon.go +++ b/plugins/inputs/minecraft/internal/rcon/rcon.go @@ -32,11 +32,11 @@ const ( // Rcon package errors. var ( - ErrInvalidWrite = errors.New("Failed to write the payload correctly to remote connection.") - ErrInvalidRead = errors.New("Failed to read the response correctly from remote connection.") - ErrInvalidChallenge = errors.New("Server failed to mirror request challenge.") - ErrUnauthorizedRequest = errors.New("Client not authorized to remote server.") - ErrFailedAuthorization = errors.New("Failed to authorize to the remote server.") + ErrInvalidWrite = errors.New("failed to write the payload correctly to remote connection") + ErrInvalidRead = errors.New("failed to read the response correctly from remote connection") + ErrInvalidChallenge = errors.New("server failed to mirror request challenge") + ErrUnauthorizedRequest = errors.New("client not authorized to remote server") + ErrFailedAuthorization = errors.New("failed to authorize to the remote server") ) type Client struct { diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index 21bd8a977da7b..d30704c42c273 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -3,7 +3,6 @@ package modbus import ( "encoding/binary" "fmt" - "log" "math" "net" "net/url" @@ -34,6 +33,7 @@ type Modbus struct { Coils []fieldContainer `toml:"coils"` HoldingRegisters []fieldContainer `toml:"holding_registers"` InputRegisters []fieldContainer `toml:"input_registers"` + Log telegraf.Logger `toml:"-"` registers []register isConnected bool tcpHandler *mb.TCPClientHandler @@ -341,9 +341,8 @@ func validateFieldContainers(t []fieldContainer, n string) error { canonical_name := item.Measurement + "." + item.Name if nameEncountered[canonical_name] { return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, n, item.Name) - } else { - nameEncountered[canonical_name] = true } + nameEncountered[canonical_name] = true if n == cInputRegisters || n == cHoldingRegisters { // search byte order @@ -696,7 +695,7 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error { if err != nil { mberr, ok := err.(*mb.ModbusError) if ok && mberr.ExceptionCode == mb.ExceptionCodeServerDeviceBusy && retry < m.Retries { - log.Printf("I! [inputs.modbus] device busy! Retrying %d more time(s)...", m.Retries-retry) + m.Log.Infof("Device busy! Retrying %d more time(s)...", m.Retries-retry) time.Sleep(m.RetriesWaitTime.Duration) continue } diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index 07af3369a66ec..4bd7e26bb3c62 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -102,6 +102,7 @@ func TestCoils(t *testing.T) { Address: []uint16{ct.address}, }, }, + Log: testutil.Logger{}, } err = modbus.Init() @@ -640,6 +641,7 @@ func TestHoldingRegisters(t *testing.T) { Address: hrt.address, }, }, + Log: testutil.Logger{}, } err = modbus.Init() @@ -694,6 +696,7 @@ func TestRetrySuccessful(t *testing.T) { Address: []uint16{0}, }, }, + Log: testutil.Logger{}, } err = modbus.Init() @@ -739,6 +742,7 @@ func TestRetryFail(t *testing.T) { Address: []uint16{0}, }, }, + Log: testutil.Logger{}, } err = modbus.Init() @@ -772,6 +776,7 @@ func TestRetryFail(t *testing.T) { Address: []uint16{0}, }, }, + Log: testutil.Logger{}, } err = modbus.Init() diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go index 00b2d96f93889..606bf0d4cb9ec 100644 --- a/plugins/inputs/monit/monit.go +++ b/plugins/inputs/monit/monit.go @@ -366,9 +366,8 @@ func linkMode(s Service) string { func serviceStatus(s Service) string { if s.Status == 0 { return "running" - } else { - return "failure" } + return "failure" } func pendingAction(s Service) string { @@ -377,9 +376,8 @@ func pendingAction(s Service) string { return "unknown" } return pendingActions[s.PendingAction-1] - } else { - return "none" } + return "none" } func monitoringMode(s Service) string { diff --git a/plugins/inputs/multifile/multifile.go b/plugins/inputs/multifile/multifile.go index 9c9813d9acf5c..359036268a981 100644 --- a/plugins/inputs/multifile/multifile.go +++ b/plugins/inputs/multifile/multifile.go @@ -2,7 +2,6 @@ package multifile import ( "bytes" - "errors" "fmt" "io/ioutil" "math" @@ -130,7 +129,7 @@ func (m *MultiFile) Gather(acc telegraf.Accumulator) error { } if value == nil { - return errors.New(fmt.Sprintf("invalid conversion %v", file.Conversion)) + return fmt.Errorf("invalid conversion %v", file.Conversion) } fields[file.Dest] = value diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 89bce5c3519c4..ca02f9889b033 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -646,9 +646,8 @@ func (m *Mysql) parseGlobalVariables(key string, value sql.RawBytes) (interface{ return v, nil } return v, fmt.Errorf("could not parse value: %q", string(value)) - } else { - return v2.ConvertGlobalVariables(key, value) } + return v2.ConvertGlobalVariables(key, value) } // gatherSlaveStatuses can be used to get replication analytics @@ -782,42 +781,42 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum case "Queries": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["queries"] = i } case "Questions": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["questions"] = i } case "Slow_queries": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["slow_queries"] = i } case "Connections": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["connections"] = i } case "Syncs": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["syncs"] = i } case "Uptime": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["uptime"] = i } @@ -965,7 +964,7 @@ func (m *Mysql) GatherUserStatisticsStatuses(db *sql.DB, serv string, acc telegr case *string: fields[cols[i]] = *v default: - return fmt.Errorf("Unknown column type - %T", v) + return fmt.Errorf("unknown column type - %T", v) } } acc.AddFields("mysql_user_stats", fields, tags) @@ -1129,7 +1128,7 @@ func getColSlice(l int) ([]interface{}, error) { }, nil } - return nil, fmt.Errorf("Not Supported - %d columns", l) + return nil, fmt.Errorf("not Supported - %d columns", l) } // gatherPerfTableIOWaits can be used to get total count and time @@ -1855,9 +1854,8 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula func (m *Mysql) parseValue(value sql.RawBytes) (interface{}, bool) { if m.MetricVersion < 2 { return v1.ParseValue(value) - } else { - return parseValue(value) } + return parseValue(value) } // parseValue can be used to convert values such as "ON","OFF","Yes","No" to 0,1 diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index 0481a3b08241e..f213826f8fa13 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -3,7 +3,6 @@ package opcua_client import ( "context" "fmt" - "log" "net/url" "sort" "strings" @@ -198,7 +197,10 @@ func (o *OpcUA) Init() error { return err } - o.setupOptions() + err = o.setupOptions() + if err != nil { + return err + } tags := map[string]string{ "endpoint": o.Endpoint, @@ -207,7 +209,6 @@ func (o *OpcUA) Init() error { o.ReadSuccess = selfstat.Register("opcua", "read_success", tags) return nil - } func (o *OpcUA) validateEndpoint() error { @@ -353,10 +354,11 @@ func (o *OpcUA) validateOPCTags() error { if _, ok := nameEncountered[mp]; ok { return fmt.Errorf("name '%s' is duplicated (metric name '%s', tags '%s')", mp.fieldName, mp.metricName, mp.tags) - } else { - //add it to the set - nameEncountered[mp] = struct{}{} } + + //add it to the set + nameEncountered[mp] = struct{}{} + //search identifier type switch node.tag.IdentifierType { case "s", "i", "g", "b": @@ -402,14 +404,14 @@ func Connect(o *OpcUA) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.ConnectTimeout)) defer cancel() if err := o.client.Connect(ctx); err != nil { - return fmt.Errorf("Error in Client Connection: %s", err) + return fmt.Errorf("error in Client Connection: %s", err) } regResp, err := o.client.RegisterNodes(&ua.RegisterNodesRequest{ NodesToRegister: o.nodeIDs, }) if err != nil { - return fmt.Errorf("RegisterNodes failed: %v", err) + return fmt.Errorf("registerNodes failed: %v", err) } o.req = &ua.ReadRequest{ @@ -420,7 +422,7 @@ func Connect(o *OpcUA) error { err = o.getData() if err != nil { - return fmt.Errorf("Get Data Failed: %v", err) + return fmt.Errorf("get Data Failed: %v", err) } default: @@ -430,11 +432,10 @@ func Connect(o *OpcUA) error { } func (o *OpcUA) setupOptions() error { - // Get a list of the endpoints for our target server endpoints, err := opcua.GetEndpoints(o.Endpoint) if err != nil { - log.Fatal(err) + return err } if o.Certificate == "" && o.PrivateKey == "" { @@ -457,7 +458,7 @@ func (o *OpcUA) getData() error { o.ReadSuccess.Incr(1) for i, d := range resp.Results { if d.Status != ua.StatusOK { - return fmt.Errorf("Status not OK: %v", d.Status) + return fmt.Errorf("status not OK: %v", d.Status) } o.nodeData[i].TagName = o.nodes[i].tag.FieldName if d.Value != nil { diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index d5ed7e4cc1c3f..af9a11e4b24bb 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -128,7 +128,7 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error { return nil } } else { - acc.AddError(fmt.Errorf("Invalid setting for ssl: %s", o.TLS)) + acc.AddError(fmt.Errorf("invalid setting for ssl: %s", o.TLS)) return nil } } else { @@ -208,15 +208,15 @@ func dnToMetric(dn string, o *Openldap) string { metricParts[i], metricParts[j] = metricParts[j], metricParts[i] } return strings.Join(metricParts[1:], "_") - } else { - metricName := strings.Trim(dn, " ") - metricName = strings.Replace(metricName, " ", "_", -1) - metricName = strings.ToLower(metricName) - metricName = strings.TrimPrefix(metricName, "cn=") - metricName = strings.Replace(metricName, strings.ToLower("cn=Monitor"), "", -1) - metricName = strings.Replace(metricName, "cn=", "_", -1) - return strings.Replace(metricName, ",", "", -1) } + + metricName := strings.Trim(dn, " ") + metricName = strings.Replace(metricName, " ", "_", -1) + metricName = strings.ToLower(metricName) + metricName = strings.TrimPrefix(metricName, "cn=") + metricName = strings.Replace(metricName, strings.ToLower("cn=Monitor"), "", -1) + metricName = strings.Replace(metricName, "cn=", "_", -1) + return strings.Replace(metricName, ",", "", -1) } func init() { diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go index c3f76f2efa850..bfff00562ac19 100644 --- a/plugins/inputs/opensmtpd/opensmtpd.go +++ b/plugins/inputs/opensmtpd/opensmtpd.go @@ -112,8 +112,7 @@ func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { fields[field], err = strconv.ParseFloat(value, 64) if err != nil { - acc.AddError(fmt.Errorf("Expected a numerical value for %s = %v\n", - stat, value)) + acc.AddError(fmt.Errorf("expected a numerical value for %s = %v", stat, value)) } } diff --git a/plugins/inputs/passenger/passenger.go b/plugins/inputs/passenger/passenger.go index f00bfc824de28..0e54164c64620 100644 --- a/plugins/inputs/passenger/passenger.go +++ b/plugins/inputs/passenger/passenger.go @@ -170,7 +170,7 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { decoder := xml.NewDecoder(bytes.NewReader(stat)) decoder.CharsetReader = charset.NewReaderLabel if err := decoder.Decode(&p); err != nil { - return fmt.Errorf("Cannot parse input with error: %v\n", err) + return fmt.Errorf("cannot parse input with error: %v", err) } tags := map[string]string{ diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index ce1ebe462cfbe..fc03f235b8082 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -61,7 +61,7 @@ func Test_Invalid_Xml(t *testing.T) { err := r.Gather(&acc) require.Error(t, err) - assert.Equal(t, "Cannot parse input with error: EOF\n", err.Error()) + assert.Equal(t, "cannot parse input with error: EOF", err.Error()) } // We test this by ensure that the error message match the path of default cli diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go index 9b42d91bd961a..d23dc526dda8d 100644 --- a/plugins/inputs/phpfpm/fcgi_client.go +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -24,7 +24,7 @@ func newFcgiClient(h string, args ...interface{}) (*conn, error) { laddr := net.UnixAddr{Name: args[0].(string), Net: h} con, err = net.DialUnix(h, nil, &laddr) default: - err = errors.New("fcgi: we only accept int (port) or string (socket) params.") + err = errors.New("fcgi: we only accept int (port) or string (socket) params") } fcgi := &conn{ rwc: con, diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index dd7d6a63074a3..e0f21176ae21f 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -144,7 +144,7 @@ func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") { u, err := url.Parse(addr) if err != nil { - return fmt.Errorf("Unable parse server address '%s': %s", addr, err) + return fmt.Errorf("unable parse server address '%s': %s", addr, err) } socketAddr := strings.Split(u.Host, ":") fcgiIp := socketAddr[0] @@ -188,9 +188,8 @@ func (p *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumula if len(fpmErr) == 0 && err == nil { importMetric(bytes.NewReader(fpmOutput), acc, addr) return nil - } else { - return fmt.Errorf("Unable parse phpfpm status. Error: %v %v", string(fpmErr), err) } + return fmt.Errorf("unable parse phpfpm status, error: %v %v", string(fpmErr), err) } // Gather stat using http protocol diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 1bec73f4ea585..44a32de8bfa38 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -162,7 +162,7 @@ func (p *Ping) nativePing(destination string) (*pingStats, error) { pinger, err := ping.NewPinger(destination) if err != nil { - return nil, fmt.Errorf("Failed to create new pinger: %w", err) + return nil, fmt.Errorf("failed to create new pinger: %w", err) } // Required for windows. Despite the method name, this should work without the need to elevate privileges and has been tested on Windows 10 @@ -197,7 +197,7 @@ func (p *Ping) nativePing(destination string) (*pingStats, error) { pinger.Count = p.Count err = pinger.Run() if err != nil { - return nil, fmt.Errorf("Failed to run pinger: %w", err) + return nil, fmt.Errorf("failed to run pinger: %w", err) } ps.Statistics = *pinger.Statistics() @@ -287,11 +287,11 @@ func percentile(values durationSlice, perc int) time.Duration { if rankInteger >= count-1 { return values[count-1] - } else { - upper := values[rankInteger+1] - lower := values[rankInteger] - return lower + time.Duration(rankFraction*float64(upper-lower)) } + + upper := values[rankInteger+1] + lower := values[rankInteger] + return lower + time.Duration(rankFraction*float64(upper-lower)) } // Init ensures the plugin is configured correctly. @@ -321,11 +321,11 @@ func (p *Ping) Init() error { } else { i, err := net.InterfaceByName(p.Interface) if err != nil { - return fmt.Errorf("Failed to get interface: %w", err) + return fmt.Errorf("failed to get interface: %w", err) } addrs, err := i.Addrs() if err != nil { - return fmt.Errorf("Failed to get the address of interface: %w", err) + return fmt.Errorf("failed to get the address of interface: %w", err) } p.sourceAddress = addrs[0].(*net.IPNet).IP.String() } diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 044ba1fc4a8ca..000e12a8ad2c8 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -8,7 +8,7 @@ import ( "strings" "time" - _ "github.com/jackc/pgx/stdlib" + _ "github.com/jackc/pgx/stdlib" //to register stdlib from PostgreSQL Driver and Toolkit "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go index ca74bd0f59442..de04fecb56fc1 100644 --- a/plugins/inputs/processes/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -189,7 +189,7 @@ func (t *tester) testProcFile2(_ string) ([]byte, error) { } func testExecPSError() ([]byte, error) { - return []byte("\nSTAT\nD\nI\nL\nR\nR+\nS\nS+\nSNs\nSs\nU\nZ\n"), fmt.Errorf("ERROR!") + return []byte("\nSTAT\nD\nI\nL\nR\nR+\nS\nS+\nSNs\nSs\nU\nZ\n"), fmt.Errorf("error") } const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index 042929f0864cf..c10624fedcbbe 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -71,7 +71,7 @@ func (p *Proc) Percent(interval time.Duration) (float64, error) { cpu_perc, err := p.Process.Percent(time.Duration(0)) if !p.hasCPUTimes && err == nil { p.hasCPUTimes = true - return 0, fmt.Errorf("Must call Percent twice to compute percent cpu.") + return 0, fmt.Errorf("must call Percent twice to compute percent cpu") } return cpu_perc, err } diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index c2235c6929d3d..c4b3cb3406f15 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -12,7 +12,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - . "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" + "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" "github.com/matttproud/golang_protobuf_extensions/pbutil" dto "github.com/prometheus/client_model/go" @@ -55,7 +55,7 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { for metricName, mf := range metricFamilies { for _, m := range mf.Metric { // reading tags - tags := MakeLabels(m, nil) + tags := common.MakeLabels(m, nil) // reading fields var fields map[string]interface{} @@ -82,7 +82,7 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { } else { t = now } - metric, err := metric.New(metricName, tags, fields, t, ValueType(mf.GetType())) + metric, err := metric.New(metricName, tags, fields, t, common.ValueType(mf.GetType())) if err == nil { metrics = append(metrics, metric) } diff --git a/plugins/inputs/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go index dc6b03620b153..9bf595761bb24 100644 --- a/plugins/inputs/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -55,7 +55,7 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { for _, serv := range r.Servers { u, err := url.Parse(serv) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err)) + acc.AddError(fmt.Errorf("unable to parse to address '%s': %s", serv, err)) continue } else if u.Scheme == "" { // fallback to simple string based address (i.e. "10.0.0.1:10000") @@ -97,7 +97,7 @@ func (r *RethinkDB) gatherServer(server *Server, acc telegraf.Accumulator) error server.session, err = gorethink.Connect(connectOpts) if err != nil { - return fmt.Errorf("Unable to connect to RethinkDB, %s\n", err.Error()) + return fmt.Errorf("unable to connect to RethinkDB, %s", err.Error()) } defer server.session.Close() diff --git a/plugins/inputs/rethinkdb/rethinkdb_server.go b/plugins/inputs/rethinkdb/rethinkdb_server.go index c10605aa6d83e..521f2b7e53d53 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server.go @@ -22,24 +22,24 @@ type Server struct { func (s *Server) gatherData(acc telegraf.Accumulator) error { if err := s.getServerStatus(); err != nil { - return fmt.Errorf("Failed to get server_status, %s\n", err) + return fmt.Errorf("failed to get server_status, %s", err) } if err := s.validateVersion(); err != nil { - return fmt.Errorf("Failed version validation, %s\n", err.Error()) + return fmt.Errorf("failed version validation, %s", err.Error()) } if err := s.addClusterStats(acc); err != nil { fmt.Printf("error adding cluster stats, %s\n", err.Error()) - return fmt.Errorf("Error adding cluster stats, %s\n", err.Error()) + return fmt.Errorf("error adding cluster stats, %s", err.Error()) } if err := s.addMemberStats(acc); err != nil { - return fmt.Errorf("Error adding member stats, %s\n", err.Error()) + return fmt.Errorf("error adding member stats, %s", err.Error()) } if err := s.addTableStats(acc); err != nil { - return fmt.Errorf("Error adding table stats, %s\n", err.Error()) + return fmt.Errorf("error adding table stats, %s", err.Error()) } return nil @@ -58,7 +58,7 @@ func (s *Server) validateVersion() error { majorVersion, err := strconv.Atoi(strings.Split(versionString, "")[0]) if err != nil || majorVersion < 2 { - return fmt.Errorf("unsupported major version %s\n", versionString) + return fmt.Errorf("unsupported major version %s", versionString) } return nil } @@ -80,7 +80,7 @@ func (s *Server) getServerStatus() error { } host, port, err := net.SplitHostPort(s.Url.Host) if err != nil { - return fmt.Errorf("unable to determine provided hostname from %s\n", s.Url.Host) + return fmt.Errorf("unable to determine provided hostname from %s", s.Url.Host) } driverPort, _ := strconv.Atoi(port) for _, ss := range serverStatuses { @@ -113,12 +113,12 @@ var ClusterTracking = []string{ func (s *Server) addClusterStats(acc telegraf.Accumulator) error { cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"cluster"}).Run(s.session) if err != nil { - return fmt.Errorf("cluster stats query error, %s\n", err.Error()) + return fmt.Errorf("cluster stats query error, %s", err.Error()) } defer cursor.Close() var clusterStats stats if err := cursor.One(&clusterStats); err != nil { - return fmt.Errorf("failure to parse cluster stats, %s\n", err.Error()) + return fmt.Errorf("failure to parse cluster stats, %s", err.Error()) } tags := s.getDefaultTags() @@ -141,12 +141,12 @@ var MemberTracking = []string{ func (s *Server) addMemberStats(acc telegraf.Accumulator) error { cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"server", s.serverStatus.Id}).Run(s.session) if err != nil { - return fmt.Errorf("member stats query error, %s\n", err.Error()) + return fmt.Errorf("member stats query error, %s", err.Error()) } defer cursor.Close() var memberStats stats if err := cursor.One(&memberStats); err != nil { - return fmt.Errorf("failure to parse member stats, %s\n", err.Error()) + return fmt.Errorf("failure to parse member stats, %s", err.Error()) } tags := s.getDefaultTags() @@ -165,7 +165,7 @@ var TableTracking = []string{ func (s *Server) addTableStats(acc telegraf.Accumulator) error { tablesCursor, err := gorethink.DB("rethinkdb").Table("table_status").Run(s.session) if err != nil { - return fmt.Errorf("table stats query error, %s\n", err.Error()) + return fmt.Errorf("table stats query error, %s", err.Error()) } defer tablesCursor.Close() @@ -179,12 +179,12 @@ func (s *Server) addTableStats(acc telegraf.Accumulator) error { Get([]string{"table_server", table.Id, s.serverStatus.Id}). Run(s.session) if err != nil { - return fmt.Errorf("table stats query error, %s\n", err.Error()) + return fmt.Errorf("table stats query error, %s", err.Error()) } defer cursor.Close() var ts tableStats if err := cursor.One(&ts); err != nil { - return fmt.Errorf("failure to parse table stats, %s\n", err.Error()) + return fmt.Errorf("failure to parse table stats, %s", err.Error()) } tags := s.getDefaultTags() diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index 62a3966fa451a..7e37fc32b8e3e 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -300,15 +300,15 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { if err != nil { s.Log.Errorf("Reading SNMPtranslate file error: %s", err.Error()) return err - } else { - for _, line := range strings.Split(string(data), "\n") { - oids := strings.Fields(string(line)) - if len(oids) == 2 && oids[1] != "" { - oid_name := oids[0] - oid := oids[1] - fillnode(s.initNode, oid_name, strings.Split(string(oid), ".")) - s.nameToOid[oid_name] = oid - } + } + + for _, line := range strings.Split(string(data), "\n") { + oids := strings.Fields(line) + if len(oids) == 2 && oids[1] != "" { + oid_name := oids[0] + oid := oids[1] + fillnode(s.initNode, oid_name, strings.Split(oid, ".")) + s.nameToOid[oid_name] = oid } } } diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index 824dbe446d5be..abda443152359 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -155,7 +155,7 @@ func (s *SystemPS) NetConnections() ([]net.ConnectionStat, error) { func (s *SystemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) { m, err := disk.IOCounters(names...) - if err == internal.NotImplementedError { + if err == internal.ErrorNotImplemented { return nil, nil } diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index fdb5b40cc3abd..557885e1b26a0 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -290,17 +290,17 @@ func parseLine(parser parsers.Parser, line string, firstLine bool) ([]telegraf.M // line from the file. if firstLine { return parser.Parse([]byte(line)) - } else { - m, err := parser.ParseLine(line) - if err != nil { - return nil, err - } + } - if m != nil { - return []telegraf.Metric{m}, nil - } - return []telegraf.Metric{}, nil + m, err := parser.ParseLine(line) + if err != nil { + return nil, err + } + + if m != nil { + return []telegraf.Metric{m}, nil } + return []telegraf.Metric{}, nil default: return parser.Parse([]byte(line)) } diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index 893f00c0a8cdd..c2dcce699d55b 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -149,8 +149,7 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error { sectionMap[section][field], err = strconv.ParseUint(value, 10, 64) if err != nil { - acc.AddError(fmt.Errorf("Expected a numeric value for %s = %v\n", - stat, value)) + acc.AddError(fmt.Errorf("expected a numeric value for %s = %v", stat, value)) } } diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index dd8ff7ea4e3b9..0cf54f3027180 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -72,9 +72,8 @@ func (z *Zookeeper) dial(ctx context.Context, addr string) (net.Conn, error) { dialer.Deadline = deadline } return tls.DialWithDialer(&dialer, "tcp", addr, z.tlsConfig) - } else { - return dialer.DialContext(ctx, "tcp", addr) } + return dialer.DialContext(ctx, "tcp", addr) } // Gather reads stats from all configured servers accumulates stats @@ -132,7 +131,7 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr service := strings.Split(address, ":") if len(service) != 2 { - return fmt.Errorf("Invalid service address: %s", address) + return fmt.Errorf("invalid service address: %s", address) } fields := make(map[string]interface{}) diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index a5f8438670093..279bbda3bdd89 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -1,6 +1,7 @@ package all import ( + //Blank imports for plugins to register themselves _ "github.com/influxdata/telegraf/plugins/outputs/amon" _ "github.com/influxdata/telegraf/plugins/outputs/amqp" _ "github.com/influxdata/telegraf/plugins/outputs/application_insights" diff --git a/plugins/outputs/amon/amon.go b/plugins/outputs/amon/amon.go index 10298173f66fb..52104eaf45e2b 100644 --- a/plugins/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "fmt" - "log" "net/http" "strings" @@ -14,9 +13,10 @@ import ( ) type Amon struct { - ServerKey string - AmonInstance string - Timeout internal.Duration + ServerKey string `toml:"server_key"` + AmonInstance string `toml:"amon_instance"` + Timeout internal.Duration `toml:"timeout"` + Log telegraf.Logger `toml:"-"` client *http.Client } @@ -76,7 +76,7 @@ func (a *Amon) Write(metrics []telegraf.Metric) error { metricCounter++ } } else { - log.Printf("I! unable to build Metric for %s, skipping\n", m.Name()) + a.Log.Infof("Unable to build Metric for %s, skipping", m.Name()) } } @@ -84,22 +84,22 @@ func (a *Amon) Write(metrics []telegraf.Metric) error { copy(ts.Series, tempSeries[0:]) tsBytes, err := json.Marshal(ts) if err != nil { - return fmt.Errorf("unable to marshal TimeSeries, %s\n", err.Error()) + return fmt.Errorf("unable to marshal TimeSeries, %s", err.Error()) } req, err := http.NewRequest("POST", a.authenticatedUrl(), bytes.NewBuffer(tsBytes)) if err != nil { - return fmt.Errorf("unable to create http.Request, %s\n", err.Error()) + return fmt.Errorf("unable to create http.Request, %s", err.Error()) } req.Header.Add("Content-Type", "application/json") resp, err := a.client.Do(req) if err != nil { - return fmt.Errorf("error POSTing metrics, %s\n", err.Error()) + return fmt.Errorf("error POSTing metrics, %s", err.Error()) } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 209 { - return fmt.Errorf("received bad status code, %d\n", resp.StatusCode) + return fmt.Errorf("received bad status code, %d", resp.StatusCode) } return nil diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index 0c7e04da7e14d..96e0970b27c52 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -3,7 +3,6 @@ package amqp import ( "bytes" "fmt" - "log" "strings" "time" @@ -55,6 +54,7 @@ type AMQP struct { Timeout internal.Duration `toml:"timeout"` UseBatchFormat bool `toml:"use_batch_format"` ContentEncoding string `toml:"content_encoding"` + Log telegraf.Logger `toml:"-"` tls.ClientConfig serializer serializers.Serializer @@ -267,7 +267,7 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error { } if q.sentMessages >= q.MaxMessages && q.MaxMessages > 0 { - log.Printf("D! Output [amqp] sent MaxMessages; closing connection") + q.Log.Debug("Sent MaxMessages; closing connection") q.client.Close() q.client = nil } @@ -296,22 +296,22 @@ func (q *AMQP) publish(key string, body []byte) error { func (q *AMQP) serialize(metrics []telegraf.Metric) ([]byte, error) { if q.UseBatchFormat { return q.serializer.SerializeBatch(metrics) - } else { - var buf bytes.Buffer - for _, metric := range metrics { - octets, err := q.serializer.Serialize(metric) - if err != nil { - log.Printf("D! [outputs.amqp] Could not serialize metric: %v", err) - continue - } - _, err = buf.Write(octets) - if err != nil { - return nil, err - } + } + + var buf bytes.Buffer + for _, metric := range metrics { + octets, err := q.serializer.Serialize(metric) + if err != nil { + q.Log.Debugf("Could not serialize metric: %v", err) + continue + } + _, err = buf.Write(octets) + if err != nil { + return nil, err } - body := buf.Bytes() - return body, nil } + body := buf.Bytes() + return body, nil } func (q *AMQP) makeClientConfig() (*ClientConfig, error) { diff --git a/plugins/outputs/application_insights/application_insights.go b/plugins/outputs/application_insights/application_insights.go index 3ab16af6fc313..5e41d629a7e2f 100644 --- a/plugins/outputs/application_insights/application_insights.go +++ b/plugins/outputs/application_insights/application_insights.go @@ -2,7 +2,6 @@ package application_insights import ( "fmt" - "log" "math" "time" "unsafe" @@ -23,23 +22,18 @@ type DiagnosticsMessageSubscriber interface { } type ApplicationInsights struct { - InstrumentationKey string - EndpointURL string - Timeout internal.Duration - EnableDiagnosticLogging bool - ContextTagSources map[string]string - diagMsgSubscriber DiagnosticsMessageSubscriber - transmitter TelemetryTransmitter - diagMsgListener appinsights.DiagnosticsMessageListener + InstrumentationKey string `toml:"instrumentation_key"` + EndpointURL string `toml:"endpoint_url"` + Timeout internal.Duration `toml:"timeout"` + EnableDiagnosticLogging bool `toml:"enable_diagnostic_logging"` + ContextTagSources map[string]string `toml:"context_tag_sources"` + Log telegraf.Logger `toml:"-"` + + diagMsgSubscriber DiagnosticsMessageSubscriber + transmitter TelemetryTransmitter + diagMsgListener appinsights.DiagnosticsMessageListener } -const ( - Error = "E! " - Warning = "W! " - Info = "I! " - Debug = "D! " -) - var ( sampleConfig = ` ## Instrumentation key of the Application Insights resource. @@ -76,7 +70,7 @@ func (a *ApplicationInsights) Description() string { func (a *ApplicationInsights) Connect() error { if a.InstrumentationKey == "" { - return fmt.Errorf("Instrumentation key is required") + return fmt.Errorf("instrumentation key is required") } if a.transmitter == nil { @@ -85,7 +79,7 @@ func (a *ApplicationInsights) Connect() error { if a.EnableDiagnosticLogging && a.diagMsgSubscriber != nil { a.diagMsgListener = a.diagMsgSubscriber.Subscribe(func(msg string) error { - logOutputMsg(Info, "%s", msg) + a.Log.Info(msg) return nil }) } @@ -117,9 +111,9 @@ func (a *ApplicationInsights) Close() error { select { case <-a.transmitter.Close(): - logOutputMsg(Info, "Closed") + a.Log.Info("Closed") case <-time.After(a.Timeout.Duration): - logOutputMsg(Warning, "Close operation timed out after %v", a.Timeout.Duration) + a.Log.Warnf("Close operation timed out after %v", a.Timeout.Duration) } return nil @@ -139,15 +133,12 @@ func (a *ApplicationInsights) createTelemetry(metric telegraf.Metric) []appinsig telemetry := a.createSimpleMetricTelemetry(metric, "value", false) if telemetry != nil { return []appinsights.Telemetry{telemetry} - } else { - return nil } - } else { - // AppInsights does not support multi-dimensional metrics at the moment, so we need to disambiguate resulting telemetry - // by adding field name as the telemetry name suffix - retval := a.createTelemetryForUnusedFields(metric, nil) - return retval + return nil } + // AppInsights does not support multi-dimensional metrics at the moment, so we need to disambiguate resulting telemetry + // by adding field name as the telemetry name suffix + return a.createTelemetryForUnusedFields(metric, nil) } func (a *ApplicationInsights) createSimpleMetricTelemetry(metric telegraf.Metric, fieldName string, useFieldNameInTelemetryName bool) *appinsights.MetricTelemetry { @@ -251,7 +242,7 @@ func getFloat64TelemetryPropertyValue( return metricValue, nil } - return 0.0, fmt.Errorf("No field from the candidate list was found in the metric") + return 0.0, fmt.Errorf("no field from the candidate list was found in the metric") } func getIntTelemetryPropertyValue( @@ -277,7 +268,7 @@ func getIntTelemetryPropertyValue( return metricValue, nil } - return 0, fmt.Errorf("No field from the candidate list was found in the metric") + return 0, fmt.Errorf("no field from the candidate list was found in the metric") } func contains(set []string, val string) bool { @@ -320,11 +311,11 @@ func toInt(value interface{}) (int, error) { case uint64: if is32Bit { if v > math.MaxInt32 { - return 0, fmt.Errorf("Value [%d] out of range of 32-bit integers", v) + return 0, fmt.Errorf("value [%d] out of range of 32-bit integers", v) } } else { if v > math.MaxInt64 { - return 0, fmt.Errorf("Value [%d] out of range of 64-bit integers", v) + return 0, fmt.Errorf("value [%d] out of range of 64-bit integers", v) } } @@ -333,7 +324,7 @@ func toInt(value interface{}) (int, error) { case int64: if is32Bit { if v > math.MaxInt32 || v < math.MinInt32 { - return 0, fmt.Errorf("Value [%d] out of range of 32-bit integers", v) + return 0, fmt.Errorf("value [%d] out of range of 32-bit integers", v) } } @@ -343,10 +334,6 @@ func toInt(value interface{}) (int, error) { return 0.0, fmt.Errorf("[%s] cannot be converted to an int value", value) } -func logOutputMsg(level string, format string, v ...interface{}) { - log.Printf(level+"[outputs.application_insights] "+format, v...) -} - func init() { outputs.Add("application_insights", func() telegraf.Output { return &ApplicationInsights{ diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go index 5a017823c02db..4553444244dae 100644 --- a/plugins/outputs/application_insights/application_insights_test.go +++ b/plugins/outputs/application_insights/application_insights_test.go @@ -1,6 +1,7 @@ package application_insights import ( + "github.com/influxdata/telegraf/testutil" "math" "testing" "time" @@ -25,6 +26,7 @@ func TestConnectFailsIfNoIkey(t *testing.T) { transmitter: transmitter, // Very long timeout to ensure we do not rely on timeouts for closing the transmitter Timeout: internal.Duration{Duration: time.Hour}, + Log: testutil.Logger{}, } err := ai.Connect() @@ -40,6 +42,7 @@ func TestOutputCloseTimesOut(t *testing.T) { ai := ApplicationInsights{ transmitter: transmitter, Timeout: internal.Duration{Duration: time.Millisecond * 50}, + Log: testutil.Logger{}, } err := ai.Close() @@ -67,6 +70,7 @@ func TestCloseRemovesDiagMsgListener(t *testing.T) { EnableDiagnosticLogging: true, diagMsgSubscriber: diagMsgSubscriber, InstrumentationKey: "1234", // Fake, but necessary to enable tracking + Log: testutil.Logger{}, } err := ai.Connect() @@ -150,6 +154,7 @@ func TestAggregateMetricCreated(t *testing.T) { ai := ApplicationInsights{ transmitter: transmitter, InstrumentationKey: "1234", // Fake, but necessary to enable tracking + Log: testutil.Logger{}, } err = ai.Connect() @@ -208,6 +213,7 @@ func TestSimpleMetricCreated(t *testing.T) { ai := ApplicationInsights{ transmitter: transmitter, InstrumentationKey: "1234", // Fake, but necessary to enable tracking + Log: testutil.Logger{}, } err = ai.Connect() @@ -278,6 +284,7 @@ func TestTagsAppliedToTelemetry(t *testing.T) { ai := ApplicationInsights{ transmitter: transmitter, InstrumentationKey: "1234", // Fake, but necessary to enable tracking + Log: testutil.Logger{}, } err = ai.Connect() @@ -319,6 +326,7 @@ func TestContextTagsSetOnSimpleTelemetry(t *testing.T) { "ai.cloud.roleInstance": "kubernetes_pod_name", "ai.user.id": "nonexistent", }, + Log: testutil.Logger{}, } err = ai.Connect() @@ -356,6 +364,7 @@ func TestContextTagsSetOnAggregateTelemetry(t *testing.T) { "ai.cloud.roleInstance": "kubernetes_pod_name", "ai.user.id": "nonexistent", }, + Log: testutil.Logger{}, } err = ai.Connect() diff --git a/plugins/outputs/application_insights/transmitter.go b/plugins/outputs/application_insights/transmitter.go index 024ea32809fb0..d66f069783048 100644 --- a/plugins/outputs/application_insights/transmitter.go +++ b/plugins/outputs/application_insights/transmitter.go @@ -11,11 +11,11 @@ type Transmitter struct { func NewTransmitter(ikey string, endpointURL string) *Transmitter { if len(endpointURL) == 0 { return &Transmitter{client: appinsights.NewTelemetryClient(ikey)} - } else { - telemetryConfig := appinsights.NewTelemetryConfiguration(ikey) - telemetryConfig.EndpointUrl = endpointURL - return &Transmitter{client: appinsights.NewTelemetryClientFromConfig(telemetryConfig)} } + + telemetryConfig := appinsights.NewTelemetryConfiguration(ikey) + telemetryConfig.EndpointUrl = endpointURL + return &Transmitter{client: appinsights.NewTelemetryClientFromConfig(telemetryConfig)} } func (t *Transmitter) Track(telemetry appinsights.Telemetry) { diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index a90dac049d6eb..cd57805e172cf 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -8,7 +8,6 @@ import ( "fmt" "hash/fnv" "io/ioutil" - "log" "net/http" "regexp" "strings" @@ -27,11 +26,12 @@ import ( // service type AzureMonitor struct { Timeout internal.Duration - NamespacePrefix string `toml:"namespace_prefix"` - StringsAsDimensions bool `toml:"strings_as_dimensions"` - Region string - ResourceID string `toml:"resource_id"` - EndpointUrl string `toml:"endpoint_url"` + NamespacePrefix string `toml:"namespace_prefix"` + StringsAsDimensions bool `toml:"strings_as_dimensions"` + Region string `toml:"region"` + ResourceID string `toml:"resource_id"` + EndpointUrl string `toml:"endpoint_url"` + Log telegraf.Logger `toml:"-"` url string auth autorest.Authorizer @@ -62,14 +62,14 @@ func (m *virtualMachineMetadata) ResourceID() string { m.Compute.ResourceGroupName, m.Compute.VMScaleSetName, ) - } else { - return fmt.Sprintf( - resourceIDTemplate, - m.Compute.SubscriptionID, - m.Compute.ResourceGroupName, - m.Compute.Name, - ) } + + return fmt.Sprintf( + resourceIDTemplate, + m.Compute.SubscriptionID, + m.Compute.ResourceGroupName, + m.Compute.Name, + ) } type dimension struct { @@ -189,7 +189,7 @@ func (a *AzureMonitor) Connect() error { a.url = fmt.Sprintf(urlOverrideTemplate, endpointUrl, resourceID) } - log.Printf("D! Writing to Azure Monitor URL: %s", a.url) + a.Log.Debugf("Writing to Azure Monitor URL: %s", a.url) a.auth, err = auth.NewAuthorizerFromEnvironmentWithResource(defaultAuthResource) if err != nil { @@ -279,14 +279,14 @@ func (a *AzureMonitor) Write(metrics []telegraf.Metric) error { if azm, ok := azmetrics[id]; !ok { amm, err := translate(m, a.NamespacePrefix) if err != nil { - log.Printf("E! [outputs.azure_monitor]: could not create azure metric for %q; discarding point", m.Name()) + a.Log.Errorf("Could not create azure metric for %q; discarding point", m.Name()) continue } azmetrics[id] = amm } else { amm, err := translate(m, a.NamespacePrefix) if err != nil { - log.Printf("E! [outputs.azure_monitor]: could not create azure metric for %q; discarding point", m.Name()) + a.Log.Errorf("Could not create azure metric for %q; discarding point", m.Name()) continue } @@ -611,7 +611,7 @@ func (a *AzureMonitor) Push() []telegraf.Metric { ) if err != nil { - log.Printf("E! [outputs.azure_monitor]: could not create metric for aggregation %q; discarding point", agg.name) + a.Log.Errorf("Could not create metric for aggregation %q; discarding point", agg.name) } metrics = append(metrics, m) diff --git a/plugins/outputs/azure_monitor/azure_monitor_test.go b/plugins/outputs/azure_monitor/azure_monitor_test.go index 6fb40805ecd3e..c702f46b0e0b5 100644 --- a/plugins/outputs/azure_monitor/azure_monitor_test.go +++ b/plugins/outputs/azure_monitor/azure_monitor_test.go @@ -29,6 +29,7 @@ func TestAggregate(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -52,6 +53,7 @@ func TestAggregate(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -75,6 +77,7 @@ func TestAggregate(t *testing.T) { Region: "test", ResourceID: "/test", StringsAsDimensions: true, + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -116,6 +119,7 @@ func TestAggregate(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, cache: make(map[time.Time]map[uint64]*aggregate, 36), }, metrics: []telegraf.Metric{ @@ -153,6 +157,7 @@ func TestAggregate(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, cache: make(map[time.Time]map[uint64]*aggregate, 36), }, metrics: []telegraf.Metric{ @@ -262,6 +267,7 @@ func TestWrite(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -282,6 +288,7 @@ func TestWrite(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -308,6 +315,7 @@ func TestWrite(t *testing.T) { plugin: &AzureMonitor{ Region: "test", ResourceID: "/test", + Log: testutil.Logger{}, }, metrics: []telegraf.Metric{ testutil.MustMetric( diff --git a/plugins/outputs/cloud_pubsub/pubsub.go b/plugins/outputs/cloud_pubsub/pubsub.go index 826a75e1c1c68..9fd89eadf0897 100644 --- a/plugins/outputs/cloud_pubsub/pubsub.go +++ b/plugins/outputs/cloud_pubsub/pubsub.go @@ -4,7 +4,6 @@ import ( "context" "encoding/base64" "fmt" - "log" "sync" "cloud.google.com/go/pubsub" @@ -79,6 +78,8 @@ type PubSub struct { PublishTimeout internal.Duration `toml:"publish_timeout"` Base64Data bool `toml:"base64_data"` + Log telegraf.Logger `toml:"-"` + t topic c *pubsub.Client @@ -111,9 +112,8 @@ func (ps *PubSub) Connect() error { if ps.stubTopic == nil { return ps.initPubSubClient() - } else { - return nil } + return nil } func (ps *PubSub) Close() error { @@ -230,7 +230,7 @@ func (ps *PubSub) toMessages(metrics []telegraf.Metric) ([]*pubsub.Message, erro for i, m := range metrics { b, err := ps.serializer.Serialize(m) if err != nil { - log.Printf("D! [outputs.cloud_pubsub] Could not serialize metric: %v", err) + ps.Log.Debugf("Could not serialize metric: %v", err) continue } diff --git a/plugins/outputs/cratedb/cratedb.go b/plugins/outputs/cratedb/cratedb.go index f6840cc38958b..c520ee3d86e7f 100644 --- a/plugins/outputs/cratedb/cratedb.go +++ b/plugins/outputs/cratedb/cratedb.go @@ -14,7 +14,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" - _ "github.com/jackc/pgx/stdlib" + _ "github.com/jackc/pgx/stdlib" //to register stdlib from PostgreSQL Driver and Toolkit ) const MaxInt64 = int64(^uint64(0) >> 1) @@ -126,9 +126,8 @@ func escapeValue(val interface{}) (string, error) { // possible value. if t <= uint64(MaxInt64) { return strconv.FormatInt(int64(t), 10), nil - } else { - return strconv.FormatInt(MaxInt64, 10), nil } + return strconv.FormatInt(MaxInt64, 10), nil case bool: return strconv.FormatBool(t), nil case time.Time: diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 2d1a937883655..3e3e5ac9141fa 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "fmt" - "log" "math" "net/http" "net/url" @@ -16,10 +15,11 @@ import ( ) type Datadog struct { - Apikey string - Timeout internal.Duration + Apikey string `toml:"apikey"` + Timeout internal.Duration `toml:"timeout"` + URL string `toml:"url"` + Log telegraf.Logger `toml:"-"` - URL string `toml:"url"` client *http.Client } @@ -96,7 +96,7 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { metricCounter++ } } else { - log.Printf("I! unable to build Metric for %s due to error '%v', skipping\n", m.Name(), err) + d.Log.Infof("Unable to build Metric for %s due to error '%v', skipping", m.Name(), err) } } @@ -109,22 +109,22 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { copy(ts.Series, tempSeries[0:]) tsBytes, err := json.Marshal(ts) if err != nil { - return fmt.Errorf("unable to marshal TimeSeries, %s\n", err.Error()) + return fmt.Errorf("unable to marshal TimeSeries, %s", err.Error()) } req, err := http.NewRequest("POST", d.authenticatedUrl(), bytes.NewBuffer(tsBytes)) if err != nil { - return fmt.Errorf("unable to create http.Request, %s\n", strings.Replace(err.Error(), d.Apikey, redactedApiKey, -1)) + return fmt.Errorf("unable to create http.Request, %s", strings.Replace(err.Error(), d.Apikey, redactedApiKey, -1)) } req.Header.Add("Content-Type", "application/json") resp, err := d.client.Do(req) if err != nil { - return fmt.Errorf("error POSTing metrics, %s\n", strings.Replace(err.Error(), d.Apikey, redactedApiKey, -1)) + return fmt.Errorf("error POSTing metrics, %s", strings.Replace(err.Error(), d.Apikey, redactedApiKey, -1)) } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 209 { - return fmt.Errorf("received bad status code, %d\n", resp.StatusCode) + return fmt.Errorf("received bad status code, %d", resp.StatusCode) } return nil diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go index be8541ee8a92d..ff88e093dc616 100644 --- a/plugins/outputs/datadog/datadog_test.go +++ b/plugins/outputs/datadog/datadog_test.go @@ -67,7 +67,7 @@ func TestBadStatusCode(t *testing.T) { if err == nil { t.Errorf("error expected but none returned") } else { - require.EqualError(t, fmt.Errorf("received bad status code, 500\n"), err.Error()) + require.EqualError(t, fmt.Errorf("received bad status code, 500"), err.Error()) } } diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go index d3697627e5f92..813b6bb9f54a7 100644 --- a/plugins/outputs/exec/exec.go +++ b/plugins/outputs/exec/exec.go @@ -101,7 +101,7 @@ func (c *CommandRunner) Run(timeout time.Duration, command []string, buffer io.R s := stderr if err != nil { - if err == internal.TimeoutErr { + if err == internal.ErrTimeout { return fmt.Errorf("%q timed out and was killed", command) } diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index 6c871ae174580..ff26b24ba422c 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -4,7 +4,6 @@ import ( "crypto/tls" "errors" "io" - "log" "math/rand" "net" "time" @@ -16,15 +15,17 @@ import ( ) type Graphite struct { - GraphiteTagSupport bool - GraphiteSeparator string + GraphiteTagSupport bool `toml:"graphite_tag_support"` + GraphiteSeparator string `toml:"graphite_separator"` // URL is only for backwards compatibility - Servers []string - Prefix string - Template string - Templates []string - Timeout int - conns []net.Conn + Servers []string `toml:"servers"` + Prefix string `toml:"prefix"` + Template string `toml:"template"` + Templates []string `toml:"templates"` + Timeout int `toml:"timeout"` + Log telegraf.Logger `toml:"-"` + + conns []net.Conn tlsint.ClientConfig } @@ -124,22 +125,22 @@ func (g *Graphite) Description() string { // We can detect that by finding an eof // if not for this, we can happily write and flush without getting errors (in Go) but getting RST tcp packets back (!) // props to Tv via the authors of carbon-relay-ng` for this trick. -func checkEOF(conn net.Conn) { +func (g *Graphite) checkEOF(conn net.Conn) { b := make([]byte, 1024) conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) num, err := conn.Read(b) if err == io.EOF { - log.Printf("E! Conn %s is closed. closing conn explicitly", conn) + g.Log.Errorf("Conn %s is closed. closing conn explicitly", conn) conn.Close() return } // just in case i misunderstand something or the remote behaves badly if num != 0 { - log.Printf("I! conn %s .conn.Read data? did not expect that. data: %s\n", conn, b[:num]) + g.Log.Infof("conn %s .conn.Read data? did not expect that. data: %s", conn, b[:num]) } // Log non-timeout errors or close. if e, ok := err.(net.Error); !(ok && e.Timeout()) { - log.Printf("E! conn %s checkEOF .conn.Read returned err != EOF, which is unexpected. closing conn. error: %s\n", conn, err) + g.Log.Errorf("conn %s checkEOF .conn.Read returned err != EOF, which is unexpected. closing conn. error: %s", conn, err) conn.Close() } } @@ -157,7 +158,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { buf, err := s.Serialize(metric) if err != nil { - log.Printf("E! Error serializing some metrics to graphite: %s", err.Error()) + g.Log.Errorf("Error serializing some metrics to graphite: %s", err.Error()) } batch = append(batch, buf...) } @@ -166,7 +167,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error { // try to reconnect and retry to send if err != nil { - log.Println("E! Graphite: Reconnecting and retrying: ") + g.Log.Error("Graphite: Reconnecting and retrying...") g.Connect() err = g.send(batch) } @@ -176,7 +177,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error { func (g *Graphite) send(batch []byte) error { // This will get set to nil if a successful write occurs - err := errors.New("Could not write to any Graphite server in cluster\n") + err := errors.New("could not write to any Graphite server in cluster") // Send data to a random server p := rand.Perm(len(g.conns)) @@ -184,10 +185,10 @@ func (g *Graphite) send(batch []byte) error { if g.Timeout > 0 { g.conns[n].SetWriteDeadline(time.Now().Add(time.Duration(g.Timeout) * time.Second)) } - checkEOF(g.conns[n]) + g.checkEOF(g.conns[n]) if _, e := g.conns[n].Write(batch); e != nil { // Error - log.Println("E! Graphite Error: " + e.Error()) + g.Log.Errorf("Graphite Error: " + e.Error()) // Close explicitly g.conns[n].Close() // Let's try the next one diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go index 025ee23ec1679..38a9691e0b73a 100644 --- a/plugins/outputs/graphite/graphite_test.go +++ b/plugins/outputs/graphite/graphite_test.go @@ -2,6 +2,7 @@ package graphite import ( "bufio" + "github.com/influxdata/telegraf/testutil" "net" "net/textproto" "sync" @@ -20,6 +21,7 @@ func TestGraphiteError(t *testing.T) { g := Graphite{ Servers: []string{"127.0.0.1:12004", "127.0.0.1:12003"}, Prefix: "my.prefix", + Log: testutil.Logger{}, } // Init metrics m1, _ := metric.New( @@ -36,7 +38,7 @@ func TestGraphiteError(t *testing.T) { require.NoError(t, err1) err2 := g.Write(metrics) require.Error(t, err2) - assert.Equal(t, "Could not write to any Graphite server in cluster\n", err2.Error()) + assert.Equal(t, "could not write to any Graphite server in cluster", err2.Error()) } func TestGraphiteOK(t *testing.T) { @@ -50,6 +52,7 @@ func TestGraphiteOK(t *testing.T) { g := Graphite{ Prefix: "my.prefix", Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics @@ -111,6 +114,7 @@ func TestGraphiteOkWithSeparatorDot(t *testing.T) { Prefix: "my.prefix", GraphiteSeparator: ".", Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics @@ -172,6 +176,7 @@ func TestGraphiteOkWithSeparatorUnderscore(t *testing.T) { Prefix: "my.prefix", GraphiteSeparator: "_", Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics @@ -237,6 +242,7 @@ func TestGraphiteOKWithMultipleTemplates(t *testing.T) { "measurement.tags.host.field", }, Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics @@ -298,6 +304,7 @@ func TestGraphiteOkWithTags(t *testing.T) { Prefix: "my.prefix", GraphiteTagSupport: true, Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics @@ -360,6 +367,7 @@ func TestGraphiteOkWithTagsAndSeparatorDot(t *testing.T) { GraphiteTagSupport: true, GraphiteSeparator: ".", Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics @@ -422,6 +430,7 @@ func TestGraphiteOkWithTagsAndSeparatorUnderscore(t *testing.T) { GraphiteTagSupport: true, GraphiteSeparator: "_", Servers: []string{"localhost:12003"}, + Log: testutil.Logger{}, } // Init metrics diff --git a/plugins/outputs/health/compares.go b/plugins/outputs/health/compares.go index 9228bd2df7187..ff19da76b0ff6 100644 --- a/plugins/outputs/health/compares.go +++ b/plugins/outputs/health/compares.go @@ -68,9 +68,8 @@ func asFloat(fv interface{}) (float64, bool) { case bool: if v { return 1.0, true - } else { - return 0.0, true } + return 0.0, true default: return 0.0, false } diff --git a/plugins/outputs/health/health.go b/plugins/outputs/health/health.go index f411305616954..e271b0b14c26a 100644 --- a/plugins/outputs/health/health.go +++ b/plugins/outputs/health/health.go @@ -4,7 +4,6 @@ import ( "context" "crypto/tls" "errors" - "log" "net" "net/http" "net/url" @@ -75,8 +74,9 @@ type Health struct { BasicPassword string `toml:"basic_password"` tlsint.ServerConfig - Compares []*Compares `toml:"compares"` - Contains []*Contains `toml:"contains"` + Compares []*Compares `toml:"compares"` + Contains []*Contains `toml:"contains"` + Log telegraf.Logger `toml:"-"` checkers []Checker wg sync.WaitGroup @@ -153,14 +153,14 @@ func (h *Health) Connect() error { h.origin = h.getOrigin(listener) - log.Printf("I! [outputs.health] Listening on %s", h.origin) + h.Log.Infof("Listening on %s", h.origin) h.wg.Add(1) go func() { defer h.wg.Done() err := h.server.Serve(listener) if err != http.ErrServerClosed { - log.Printf("E! [outputs.health] Serve error on %s: %v", h.origin, err) + h.Log.Errorf("Serve error on %s: %v", h.origin, err) } h.origin = "" }() @@ -174,9 +174,8 @@ func onAuthError(_ http.ResponseWriter) { func (h *Health) listen() (net.Listener, error) { if h.tlsConf != nil { return tls.Listen(h.network, h.address, h.tlsConf) - } else { - return net.Listen(h.network, h.address) } + return net.Listen(h.network, h.address) } func (h *Health) ServeHTTP(rw http.ResponseWriter, req *http.Request) { diff --git a/plugins/outputs/health/health_test.go b/plugins/outputs/health/health_test.go index 5bf35ad8320e4..f03cfcacba7a6 100644 --- a/plugins/outputs/health/health_test.go +++ b/plugins/outputs/health/health_test.go @@ -106,6 +106,7 @@ func TestHealth(t *testing.T) { output.ServiceAddress = "tcp://127.0.0.1:0" output.Compares = tt.options.Compares output.Contains = tt.options.Contains + output.Log = testutil.Logger{} err := output.Init() require.NoError(t, err) @@ -140,6 +141,7 @@ func TestInitServiceAddress(t *testing.T) { name: "port without scheme is not allowed", plugin: &health.Health{ ServiceAddress: ":8080", + Log: testutil.Logger{}, }, err: true, }, @@ -147,6 +149,7 @@ func TestInitServiceAddress(t *testing.T) { name: "path without scheme is not allowed", plugin: &health.Health{ ServiceAddress: "/tmp/telegraf", + Log: testutil.Logger{}, }, err: true, }, @@ -154,6 +157,7 @@ func TestInitServiceAddress(t *testing.T) { name: "tcp with port maps to http", plugin: &health.Health{ ServiceAddress: "tcp://:8080", + Log: testutil.Logger{}, }, }, { @@ -161,30 +165,35 @@ func TestInitServiceAddress(t *testing.T) { plugin: &health.Health{ ServiceAddress: "tcp://:8080", ServerConfig: *pki.TLSServerConfig(), + Log: testutil.Logger{}, }, }, { name: "tcp4 is allowed", plugin: &health.Health{ ServiceAddress: "tcp4://:8080", + Log: testutil.Logger{}, }, }, { name: "tcp6 is allowed", plugin: &health.Health{ ServiceAddress: "tcp6://:8080", + Log: testutil.Logger{}, }, }, { name: "http scheme", plugin: &health.Health{ ServiceAddress: "http://:8080", + Log: testutil.Logger{}, }, }, { name: "https scheme", plugin: &health.Health{ ServiceAddress: "https://:8080", + Log: testutil.Logger{}, }, }, } @@ -192,6 +201,7 @@ func TestInitServiceAddress(t *testing.T) { t.Run(tt.name, func(t *testing.T) { output := health.NewHealth() output.ServiceAddress = tt.plugin.ServiceAddress + output.Log = testutil.Logger{} err := output.Init() if tt.err { diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index e5decbf7f065f..a8c68499ab323 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "log" "net" "regexp" "strings" @@ -31,6 +30,8 @@ type Instrumental struct { Timeout internal.Duration Debug bool + Log telegraf.Logger `toml:"-"` + conn net.Conn } @@ -82,7 +83,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { if i.conn == nil { err := i.Connect() if err != nil { - return fmt.Errorf("FAILED to (re)connect to Instrumental. Error: %s\n", err) + return fmt.Errorf("failed to (re)connect to Instrumental. Error: %s", err) } } @@ -111,7 +112,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { buf, err := s.Serialize(m) if err != nil { - log.Printf("D! [outputs.instrumental] Could not serialize metric: %v", err) + i.Log.Debugf("Could not serialize metric: %v", err) continue } @@ -187,7 +188,7 @@ func (i *Instrumental) authenticate(conn net.Conn) error { } if string(responses)[:6] != "ok\nok\n" { - return fmt.Errorf("Authentication failed: %s", responses) + return fmt.Errorf("authentication failed: %s", responses) } i.conn = conn diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 53bb8c1249188..9f390046c74c7 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io/ioutil" - "log" "net/http" "regexp" @@ -17,12 +16,13 @@ import ( // Librato structure for configuration and client type Librato struct { - APIUser string `toml:"api_user"` - APIToken string `toml:"api_token"` - Debug bool - SourceTag string // Deprecated, keeping for backward-compatibility - Timeout internal.Duration - Template string + APIUser string `toml:"api_user"` + APIToken string `toml:"api_token"` + Debug bool `toml:"debug"` + SourceTag string `toml:"source_tag"` // Deprecated, keeping for backward-compatibility + Timeout internal.Duration `toml:"timeout"` + Template string `toml:"template"` + Log telegraf.Logger `toml:"-"` APIUrl string client *http.Client @@ -89,7 +89,6 @@ func (l *Librato) Connect() error { } func (l *Librato) Write(metrics []telegraf.Metric) error { - if len(metrics) == 0 { return nil } @@ -106,11 +105,11 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { if gauges, err := l.buildGauges(m); err == nil { for _, gauge := range gauges { tempGauges = append(tempGauges, gauge) - log.Printf("D! Got a gauge: %v\n", gauge) + l.Log.Debugf("Got a gauge: %v", gauge) } } else { - log.Printf("I! unable to build Gauge for %s, skipping\n", m.Name()) - log.Printf("D! Couldn't build gauge: %v\n", err) + l.Log.Infof("Unable to build Gauge for %s, skipping", m.Name()) + l.Log.Debugf("Couldn't build gauge: %v", err) } } @@ -129,34 +128,32 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { copy(lmetrics.Gauges, tempGauges[start:end]) metricsBytes, err := json.Marshal(lmetrics) if err != nil { - return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error()) + return fmt.Errorf("unable to marshal Metrics, %s", err.Error()) } - log.Printf("D! Librato request: %v\n", string(metricsBytes)) + l.Log.Debugf("Librato request: %v", string(metricsBytes)) req, err := http.NewRequest( "POST", l.APIUrl, bytes.NewBuffer(metricsBytes)) if err != nil { - return fmt.Errorf( - "unable to create http.Request, %s\n", - err.Error()) + return fmt.Errorf("unable to create http.Request, %s", err.Error()) } req.Header.Add("Content-Type", "application/json") req.SetBasicAuth(l.APIUser, l.APIToken) resp, err := l.client.Do(req) if err != nil { - log.Printf("D! Error POSTing metrics: %v\n", err.Error()) - return fmt.Errorf("error POSTing metrics, %s\n", err.Error()) + l.Log.Debugf("Error POSTing metrics: %v", err.Error()) + return fmt.Errorf("error POSTing metrics, %s", err.Error()) } defer resp.Body.Close() if resp.StatusCode != 200 || l.Debug { htmlData, err := ioutil.ReadAll(resp.Body) if err != nil { - log.Printf("D! Couldn't get response! (%v)\n", err) + l.Log.Debugf("Couldn't get response! (%v)", err) } if resp.StatusCode != 200 { return fmt.Errorf( @@ -164,7 +161,7 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { resp.StatusCode, string(htmlData)) } - log.Printf("D! Librato response: %v\n", string(htmlData)) + l.Log.Debugf("Librato response: %v", string(htmlData)) } } @@ -183,7 +180,6 @@ func (l *Librato) Description() string { } func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { - gauges := []*Gauge{} if m.Time().Unix() == 0 { return gauges, fmt.Errorf("time was zero %s", m.Name()) @@ -193,8 +189,7 @@ func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { "value") if metricSource == "" { return gauges, - fmt.Errorf("undeterminable Source type from Field, %s\n", - l.Template) + fmt.Errorf("undeterminable Source type from Field, %s", l.Template) } for fieldName, value := range m.Fields() { @@ -212,14 +207,12 @@ func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { continue } if err := gauge.setValue(value); err != nil { - return gauges, fmt.Errorf( - "unable to extract value from Fields, %s\n", - err.Error()) + return gauges, fmt.Errorf("unable to extract value from Fields, %s", err.Error()) } gauges = append(gauges, gauge) } - log.Printf("D! Built gauges: %v\n", gauges) + l.Log.Debugf("Built gauges: %v", gauges) return gauges, nil } diff --git a/plugins/outputs/librato/librato_test.go b/plugins/outputs/librato/librato_test.go index fe39313742751..5e78d9645ab6d 100644 --- a/plugins/outputs/librato/librato_test.go +++ b/plugins/outputs/librato/librato_test.go @@ -10,19 +10,17 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) var ( - fakeURL = "http://test.librato.com" - fakeUser = "telegraf@influxdb.com" - fakeToken = "123456" + fakeURL = "http://test.librato.com" ) -func fakeLibrato() *Librato { - l := NewLibrato(fakeURL) - l.APIUser = fakeUser - l.APIToken = fakeToken +func newTestLibrato(testURL string) *Librato { + l := NewLibrato(testURL) + l.Log = testutil.Logger{} return l } @@ -34,7 +32,7 @@ func TestUriOverride(t *testing.T) { })) defer ts.Close() - l := NewLibrato(ts.URL) + l := newTestLibrato(ts.URL) l.APIUser = "telegraf@influxdb.com" l.APIToken = "123456" err := l.Connect() @@ -50,7 +48,7 @@ func TestBadStatusCode(t *testing.T) { })) defer ts.Close() - l := NewLibrato(ts.URL) + l := newTestLibrato(ts.URL) l.APIUser = "telegraf@influxdb.com" l.APIToken = "123456" err := l.Connect() @@ -140,7 +138,7 @@ func TestBuildGauge(t *testing.T) { }, } - l := NewLibrato(fakeURL) + l := newTestLibrato(fakeURL) for _, gt := range gaugeTests { gauges, err := l.buildGauges(gt.ptIn) if err != nil && gt.err == nil { @@ -257,7 +255,7 @@ func TestBuildGaugeWithSource(t *testing.T) { }, } - l := NewLibrato(fakeURL) + l := newTestLibrato(fakeURL) for _, gt := range gaugeTests { l.Template = gt.template gauges, err := l.buildGauges(gt.ptIn) diff --git a/plugins/outputs/logzio/logzio.go b/plugins/outputs/logzio/logzio.go index e46e9bf821320..a174ba60bd1fc 100644 --- a/plugins/outputs/logzio/logzio.go +++ b/plugins/outputs/logzio/logzio.go @@ -112,18 +112,18 @@ func (l *Logzio) Write(metrics []telegraf.Metric) error { serialized, err := json.Marshal(m) if err != nil { - return fmt.Errorf("unable to marshal metric, %s\n", err.Error()) + return fmt.Errorf("unable to marshal metric, %s", err.Error()) } _, err = gz.Write(append(serialized, '\n')) if err != nil { - return fmt.Errorf("unable to write gzip meric, %s\n", err.Error()) + return fmt.Errorf("unable to write gzip meric, %s", err.Error()) } } err := gz.Close() if err != nil { - return fmt.Errorf("unable to close gzip, %s\n", err.Error()) + return fmt.Errorf("unable to close gzip, %s", err.Error()) } return l.send(buff.Bytes()) @@ -132,19 +132,19 @@ func (l *Logzio) Write(metrics []telegraf.Metric) error { func (l *Logzio) send(metrics []byte) error { req, err := http.NewRequest("POST", l.authUrl(), bytes.NewBuffer(metrics)) if err != nil { - return fmt.Errorf("unable to create http.Request, %s\n", err.Error()) + return fmt.Errorf("unable to create http.Request, %s", err.Error()) } req.Header.Add("Content-Type", "application/json") req.Header.Set("Content-Encoding", "gzip") resp, err := l.client.Do(req) if err != nil { - return fmt.Errorf("error POSTing metrics, %s\n", err.Error()) + return fmt.Errorf("error POSTing metrics, %s", err.Error()) } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode > 209 { - return fmt.Errorf("received bad status code, %d\n", resp.StatusCode) + return fmt.Errorf("received bad status code, %d", resp.StatusCode) } return nil diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index ae1e2a5362bc5..42eb824fc698a 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -2,7 +2,6 @@ package opentsdb import ( "fmt" - "log" "math" "net" "net/url" @@ -28,17 +27,19 @@ var ( ) type OpenTSDB struct { - Prefix string + Prefix string `toml:"prefix"` - Host string - Port int + Host string `toml:"host"` + Port int `toml:"port"` - HttpBatchSize int // deprecated httpBatchSize form in 1.8 - HttpPath string + HttpBatchSize int `toml:"http_batch_size"` // deprecated httpBatchSize form in 1.8 + HttpPath string `toml:"http_path"` - Debug bool + Debug bool `toml:"debug"` - Separator string + Separator string `toml:"separator"` + + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -86,7 +87,7 @@ func (o *OpenTSDB) Connect() error { // Test Connection to OpenTSDB Server u, err := url.Parse(o.Host) if err != nil { - return fmt.Errorf("Error in parsing host url: %s", err.Error()) + return fmt.Errorf("error in parsing host url: %s", err.Error()) } uri := fmt.Sprintf("%s:%d", u.Host, o.Port) @@ -109,7 +110,7 @@ func (o *OpenTSDB) Write(metrics []telegraf.Metric) error { u, err := url.Parse(o.Host) if err != nil { - return fmt.Errorf("Error in parsing host url: %s", err.Error()) + return fmt.Errorf("error in parsing host url: %s", err.Error()) } if u.Scheme == "" || u.Scheme == "tcp" { @@ -117,7 +118,7 @@ func (o *OpenTSDB) Write(metrics []telegraf.Metric) error { } else if u.Scheme == "http" || u.Scheme == "https" { return o.WriteHttp(metrics, u) } else { - return fmt.Errorf("Unknown scheme in host parameter.") + return fmt.Errorf("unknown scheme in host parameter") } } @@ -146,7 +147,7 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error { continue } default: - log.Printf("D! OpenTSDB does not support metric value: [%s] of type [%T].\n", value, value) + o.Log.Debugf("OpenTSDB does not support metric value: [%s] of type [%T].", value, value) continue } @@ -195,13 +196,13 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error { continue } default: - log.Printf("D! OpenTSDB does not support metric value: [%s] of type [%T].\n", value, value) + o.Log.Debugf("OpenTSDB does not support metric value: [%s] of type [%T].", value, value) continue } metricValue, buildError := buildValue(value) if buildError != nil { - log.Printf("E! OpenTSDB: %s\n", buildError.Error()) + o.Log.Errorf("OpenTSDB: %s", buildError.Error()) continue } diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 53713a02ba4e6..b9ef7c3a6eb47 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -183,9 +183,8 @@ func (p *PrometheusClient) Init() error { func (p *PrometheusClient) listen() (net.Listener, error) { if p.server.TLSConfig != nil { return tls.Listen("tcp", p.Listen, p.server.TLSConfig) - } else { - return net.Listen("tcp", p.Listen) } + return net.Listen("tcp", p.Listen) } func (p *PrometheusClient) Connect() error { diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go index 1738ca537bab0..3f3f1b54b18af 100644 --- a/plugins/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -2,7 +2,6 @@ package riemann import ( "fmt" - "log" "net/url" "os" "sort" @@ -16,15 +15,16 @@ import ( ) type Riemann struct { - URL string - TTL float32 - Separator string - MeasurementAsAttribute bool - StringAsState bool - TagKeys []string - Tags []string - DescriptionText string - Timeout internal.Duration + URL string `toml:"url"` + TTL float32 `toml:"ttl"` + Separator string `toml:"separator"` + MeasurementAsAttribute bool `toml:"measurement_as_attribute"` + StringAsState bool `toml:"string_as_state"` + TagKeys []string `toml:"tag_keys"` + Tags []string `toml:"tags"` + DescriptionText string `toml:"description_text"` + Timeout internal.Duration `toml:"timeout"` + Log telegraf.Logger `toml:"-"` client *raidman.Client } @@ -149,14 +149,14 @@ func (r *Riemann) buildRiemannEvents(m telegraf.Metric) []*raidman.Event { case string: // only send string metrics if explicitly enabled, skip otherwise if !r.StringAsState { - log.Printf("D! Riemann event states disabled, skipping metric value [%s]\n", value) + r.Log.Debugf("Riemann event states disabled, skipping metric value [%s]", value) continue } event.State = value.(string) case int, int64, uint64, float32, float64: event.Metric = value default: - log.Printf("D! Riemann does not support metric value [%s]\n", value) + r.Log.Debugf("Riemann does not support metric value [%s]", value) continue } diff --git a/plugins/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go index 61b7b37965e71..e53258c1bff7a 100644 --- a/plugins/outputs/riemann/riemann_test.go +++ b/plugins/outputs/riemann/riemann_test.go @@ -1,6 +1,7 @@ package riemann import ( + "github.com/influxdata/telegraf/testutil" "testing" "time" @@ -12,7 +13,9 @@ import ( func TestAttributes(t *testing.T) { tags := map[string]string{"tag1": "value1", "tag2": "value2"} - r := &Riemann{} + r := &Riemann{ + Log: testutil.Logger{}, + } require.Equal(t, map[string]string{"tag1": "value1", "tag2": "value2"}, r.attributes("test", tags)) @@ -27,6 +30,7 @@ func TestAttributes(t *testing.T) { func TestService(t *testing.T) { r := &Riemann{ Separator: "/", + Log: testutil.Logger{}, } require.Equal(t, "test/value", r.service("test", "value")) @@ -41,6 +45,7 @@ func TestTags(t *testing.T) { // all tag values plus additional tag should be present r := &Riemann{ Tags: []string{"test"}, + Log: testutil.Logger{}, } require.Equal(t, []string{"test", "value1", "value2"}, @@ -67,6 +72,7 @@ func TestMetricEvents(t *testing.T) { MeasurementAsAttribute: false, DescriptionText: "metrics from telegraf", Tags: []string{"telegraf"}, + Log: testutil.Logger{}, } // build a single event @@ -126,6 +132,7 @@ func TestMetricEvents(t *testing.T) { func TestStateEvents(t *testing.T) { r := &Riemann{ MeasurementAsAttribute: true, + Log: testutil.Logger{}, } // string metrics will be skipped unless explicitly enabled diff --git a/plugins/outputs/riemann_legacy/riemann.go b/plugins/outputs/riemann_legacy/riemann.go index a1b140436430a..64d9f997061e7 100644 --- a/plugins/outputs/riemann_legacy/riemann.go +++ b/plugins/outputs/riemann_legacy/riemann.go @@ -70,7 +70,7 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error { if r.client == nil { err := r.Connect() if err != nil { - return fmt.Errorf("FAILED to (re)connect to Riemann. Error: %s\n", err) + return fmt.Errorf("failed to (re)connect to Riemann, error: %s", err) } } @@ -85,8 +85,7 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error { var senderr = r.client.SendMulti(events) if senderr != nil { r.Close() // always returns nil - return fmt.Errorf("FAILED to send riemann message (will try to reconnect). Error: %s\n", - senderr) + return fmt.Errorf("failed to send riemann message (will try to reconnect), error: %s", senderr) } return nil diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index ef5a6418fe868..f3f1fc94bbdb7 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -142,7 +142,7 @@ func (w *Wavefront) Connect() error { FlushIntervalSeconds: flushSeconds, }) if err != nil { - return fmt.Errorf("Wavefront: Could not create Wavefront Sender for Url: %s", w.Url) + return fmt.Errorf("could not create Wavefront Sender for Url: %s", w.Url) } w.sender = sender } else { @@ -153,7 +153,7 @@ func (w *Wavefront) Connect() error { FlushIntervalSeconds: flushSeconds, }) if err != nil { - return fmt.Errorf("Wavefront: Could not create Wavefront Sender for Host: %q and Port: %d", w.Host, w.Port) + return fmt.Errorf("could not create Wavefront Sender for Host: %q and Port: %d", w.Host, w.Port) } w.sender = sender } @@ -174,7 +174,7 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error { err := w.sender.SendMetric(point.Metric, point.Value, point.Timestamp, point.Source, point.Tags) if err != nil { if isRetryable(err) { - return fmt.Errorf("Wavefront sending error: %v", err) + return fmt.Errorf("wavefront sending error: %v", err) } w.Log.Errorf("non-retryable error during Wavefront.Write: %v", err) w.Log.Debugf("Non-retryable metric data: Name: %v, Value: %v, Timestamp: %v, Source: %v, PointTags: %v ", point.Metric, point.Value, point.Timestamp, point.Source, point.Tags) @@ -306,9 +306,8 @@ func buildValue(v interface{}, name string, w *Wavefront) (float64, error) { if w.ConvertBool { if p { return 1, nil - } else { - return 0, nil } + return 0, nil } case int64: return float64(v.(int64)), nil diff --git a/plugins/parsers/influx/escape.go b/plugins/parsers/influx/escape.go index 01e42a8d51cb5..211963d8abc35 100644 --- a/plugins/parsers/influx/escape.go +++ b/plugins/parsers/influx/escape.go @@ -36,25 +36,22 @@ var ( func unescape(b []byte) string { if bytes.ContainsAny(b, escapes) { return unescaper.Replace(unsafeBytesToString(b)) - } else { - return string(b) } + return string(b) } func nameUnescape(b []byte) string { if bytes.ContainsAny(b, nameEscapes) { return nameUnescaper.Replace(unsafeBytesToString(b)) - } else { - return string(b) } + return string(b) } func stringFieldUnescape(b []byte) string { if bytes.ContainsAny(b, stringFieldEscapes) { return stringFieldUnescaper.Replace(unsafeBytesToString(b)) - } else { - return string(b) } + return string(b) } // parseIntBytes is a zero-alloc wrapper around strconv.ParseInt. diff --git a/plugins/parsers/prometheus/parser.go b/plugins/parsers/prometheus/parser.go index e512d1c9934d5..14f0eef90ca40 100644 --- a/plugins/parsers/prometheus/parser.go +++ b/plugins/parsers/prometheus/parser.go @@ -13,7 +13,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - . "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" + "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" @@ -63,7 +63,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { for metricName, mf := range metricFamilies { for _, m := range mf.Metric { // reading tags - tags := MakeLabels(m, p.DefaultTags) + tags := common.MakeLabels(m, p.DefaultTags) if mf.GetType() == dto.MetricType_SUMMARY { // summary metric @@ -81,7 +81,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { // converting to telegraf metric if len(fields) > 0 { t := getTimestamp(m, now) - metric, err := metric.New("prometheus", tags, fields, t, ValueType(mf.GetType())) + metric, err := metric.New("prometheus", tags, fields, t, common.ValueType(mf.GetType())) if err == nil { metrics = append(metrics, metric) } @@ -100,11 +100,11 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } if len(metrics) < 1 { - return nil, fmt.Errorf("No metrics in line") + return nil, fmt.Errorf("no metrics in line") } if len(metrics) > 1 { - return nil, fmt.Errorf("More than one metric in line") + return nil, fmt.Errorf("more than one metric in line") } return metrics[0], nil @@ -122,7 +122,7 @@ func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, met fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) - met, err := metric.New("prometheus", tags, fields, t, ValueType(metricType)) + met, err := metric.New("prometheus", tags, fields, t, common.ValueType(metricType)) if err == nil { metrics = append(metrics, met) } @@ -134,7 +134,7 @@ func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, met newTags["quantile"] = fmt.Sprint(q.GetQuantile()) fields[metricName] = float64(q.GetValue()) - quantileMetric, err := metric.New("prometheus", newTags, fields, t, ValueType(metricType)) + quantileMetric, err := metric.New("prometheus", newTags, fields, t, common.ValueType(metricType)) if err == nil { metrics = append(metrics, quantileMetric) } @@ -151,7 +151,7 @@ func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metri fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) - met, err := metric.New("prometheus", tags, fields, t, ValueType(metricType)) + met, err := metric.New("prometheus", tags, fields, t, common.ValueType(metricType)) if err == nil { metrics = append(metrics, met) } @@ -162,7 +162,7 @@ func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metri newTags["le"] = fmt.Sprint(b.GetUpperBound()) fields[metricName+"_bucket"] = float64(b.GetCumulativeCount()) - histogramMetric, err := metric.New("prometheus", newTags, fields, t, ValueType(metricType)) + histogramMetric, err := metric.New("prometheus", newTags, fields, t, common.ValueType(metricType)) if err == nil { metrics = append(metrics, histogramMetric) } diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index face81ad39241..faf6de1e25661 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -1,6 +1,7 @@ package all import ( + //Blank imports for plugins to register themselves _ "github.com/influxdata/telegraf/plugins/processors/aws/ec2" _ "github.com/influxdata/telegraf/plugins/processors/clone" _ "github.com/influxdata/telegraf/plugins/processors/converter" diff --git a/plugins/processors/converter/converter.go b/plugins/processors/converter/converter.go index 55a2a2d0965dc..6f69d2eb6714e 100644 --- a/plugins/processors/converter/converter.go +++ b/plugins/processors/converter/converter.go @@ -349,9 +349,8 @@ func toInteger(v interface{}) (int64, bool) { case uint64: if value <= uint64(math.MaxInt64) { return int64(value), true - } else { - return math.MaxInt64, true } + return math.MaxInt64, true case float64: if value < float64(math.MinInt64) { return math.MinInt64, true @@ -363,9 +362,8 @@ func toInteger(v interface{}) (int64, bool) { case bool: if value { return 1, true - } else { - return 0, true } + return 0, true case string: result, err := strconv.ParseInt(value, 0, 64) @@ -388,9 +386,8 @@ func toUnsigned(v interface{}) (uint64, bool) { case int64: if value < 0 { return 0, true - } else { - return uint64(value), true } + return uint64(value), true case float64: if value < 0.0 { return 0, true @@ -402,9 +399,8 @@ func toUnsigned(v interface{}) (uint64, bool) { case bool: if value { return 1, true - } else { - return 0, true } + return 0, true case string: result, err := strconv.ParseUint(value, 0, 64) @@ -431,9 +427,8 @@ func toFloat(v interface{}) (float64, bool) { case bool: if value { return 1.0, true - } else { - return 0.0, true } + return 0.0, true case string: result, err := strconv.ParseFloat(value, 64) return result, err == nil diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index 68b41e9f4baf5..fb16a78dda2a5 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -268,9 +268,8 @@ func (d *IfName) getMap(agent string) (entry nameMap, age time.Duration, err err d.rwLock.RUnlock() if ok { return m, age, nil - } else { - return nil, 0, fmt.Errorf("getting remote table from cache") } + return nil, 0, fmt.Errorf("getting remote table from cache") } // The cache missed and this is the first request for this diff --git a/plugins/processors/ifname/ttl_cache.go b/plugins/processors/ifname/ttl_cache.go index 99cbab1d5a9f1..e65a8ec7b182e 100644 --- a/plugins/processors/ifname/ttl_cache.go +++ b/plugins/processors/ifname/ttl_cache.go @@ -43,10 +43,10 @@ func (c *TTLCache) Get(key keyType) (valType, bool, time.Duration) { age := c.now().Sub(v.time) if age < c.validDuration { return v.val, ok, age - } else { - c.lru.Delete(key) - return valType{}, false, 0 } + + c.lru.Delete(key) + return valType{}, false, 0 } func (c *TTLCache) Put(key keyType, value valType) { diff --git a/plugins/processors/strings/strings.go b/plugins/processors/strings/strings.go index 1ac6c61019c6f..92ce560988384 100644 --- a/plugins/processors/strings/strings.go +++ b/plugins/processors/strings/strings.go @@ -287,9 +287,9 @@ func (s *Strings) initOnce() { newString := strings.Replace(s, c.Old, c.New, -1) if newString == "" { return s - } else { - return newString } + + return newString } s.converters = append(s.converters, c) } @@ -298,9 +298,9 @@ func (s *Strings) initOnce() { c.fn = func(s string) string { if len(s) < c.Width { return s - } else { - return s[:c.Width] } + + return s[:c.Width] } s.converters = append(s.converters, c) } diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index 907ec1cc41fc6..ff20039f3a3e2 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -2,7 +2,6 @@ package topk import ( "fmt" - "log" "math" "sort" "time" @@ -15,15 +14,16 @@ import ( ) type TopK struct { - Period internal.Duration - K int - GroupBy []string `toml:"group_by"` - Fields []string - Aggregation string - Bottomk bool - AddGroupByTag string `toml:"add_groupby_tag"` - AddRankFields []string `toml:"add_rank_fields"` - AddAggregateFields []string `toml:"add_aggregate_fields"` + Period internal.Duration `toml:"period"` + K int `toml:"k"` + GroupBy []string `toml:"group_by"` + Fields []string `toml:"fields"` + Aggregation string `toml:"aggregation"` + Bottomk bool `toml:"bottomk"` + AddGroupByTag string `toml:"add_groupby_tag"` + AddRankFields []string `toml:"add_rank_fields"` + AddAggregateFields []string `toml:"add_aggregate_fields"` + Log telegraf.Logger `toml:"-"` cache map[string][]telegraf.Metric tagsGlobs filter.Filter @@ -112,9 +112,8 @@ func sortMetrics(metrics []MetricAggregation, field string, reverse bool) { jv := metrics[j].values[field] if iv < jv { return true - } else { - return false } + return false } if reverse { @@ -174,7 +173,7 @@ func (t *TopK) groupBy(m telegraf.Metric) { if err != nil { // If we could not generate the groupkey, fail hard // by dropping this and all subsequent metrics - log.Printf("E! [processors.topk]: could not generate group key: %v", err) + t.Log.Errorf("Could not generate group key: %v", err) return } @@ -269,7 +268,7 @@ func (t *TopK) push() []telegraf.Metric { if err != nil { // If we could not generate the aggregation // function, fail hard by dropping all metrics - log.Printf("E! [processors.topk]: %v", err) + t.Log.Errorf("%v", err) return []telegraf.Metric{} } for k, ms := range t.cache { @@ -342,7 +341,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr } val, ok := convert(fieldVal) if !ok { - log.Printf("Cannot convert value '%s' from metric '%s' with tags '%s'", + t.Log.Infof("Cannot convert value '%s' from metric '%s' with tags '%s'", m.Fields()[field], m.Name(), m.Tags()) continue } @@ -408,7 +407,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr } val, ok := convert(fieldVal) if !ok { - log.Printf("Cannot convert value '%s' from metric '%s' with tags '%s'", + t.Log.Infof("Cannot convert value '%s' from metric '%s' with tags '%s'", m.Fields()[field], m.Name(), m.Tags()) continue } @@ -434,7 +433,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr }, nil default: - return nil, fmt.Errorf("Unknown aggregation function '%s'. No metrics will be processed", t.Aggregation) + return nil, fmt.Errorf("unknown aggregation function '%s', no metrics will be processed", t.Aggregation) } } diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index e580409fe2b9f..e3eee9da3d07e 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -119,9 +119,8 @@ func formatValue(value interface{}) string { case bool: if v { return "1" - } else { - return "0" } + return "0" case uint64: return strconv.FormatUint(v, 10) case int64: @@ -214,11 +213,11 @@ func InitGraphiteTemplates(templates []string) ([]*GraphiteTemplate, string, err if len(parts) == 1 { if parts[0] == "" { return nil, "", fmt.Errorf("missing template at position: %d", i) - } else { - // Override default template - defaultTemplate = t - continue } + + // Override default template + defaultTemplate = t + continue } if len(parts) > 2 { diff --git a/plugins/serializers/influx/escape.go b/plugins/serializers/influx/escape.go index 9320eb7fa5057..0f9fb5edf2add 100644 --- a/plugins/serializers/influx/escape.go +++ b/plugins/serializers/influx/escape.go @@ -38,25 +38,22 @@ var ( func escape(s string) string { if strings.ContainsAny(s, escapes) { return escaper.Replace(s) - } else { - return s } + return s } // Escape a measurement name func nameEscape(s string) string { if strings.ContainsAny(s, nameEscapes) { return nameEscaper.Replace(s) - } else { - return s } + return s } // Escape a string field func stringFieldEscape(s string) string { if strings.ContainsAny(s, stringFieldEscapes) { return stringFieldEscaper.Replace(s) - } else { - return s } + return s } diff --git a/plugins/serializers/influx/influx.go b/plugins/serializers/influx/influx.go index aa76b8accb8e1..048d3afd8b328 100644 --- a/plugins/serializers/influx/influx.go +++ b/plugins/serializers/influx/influx.go @@ -302,13 +302,11 @@ func (s *Serializer) appendFieldValue(buf []byte, value interface{}) ([]byte, er case uint64: if s.fieldTypeSupport&UintSupport != 0 { return appendUintField(buf, v), nil - } else { - if v <= uint64(MaxInt64) { - return appendIntField(buf, int64(v)), nil - } else { - return appendIntField(buf, int64(MaxInt64)), nil - } } + if v <= uint64(MaxInt64) { + return appendIntField(buf, int64(v)), nil + } + return appendIntField(buf, MaxInt64), nil case int64: return appendIntField(buf, v), nil case float64: From aa6dc79fc6d7ee95e22d8110ae43db7b13811e3d Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 9 Feb 2021 10:50:57 -0600 Subject: [PATCH 210/761] Sort and timeout is deadline (#8839) --- plugins/inputs/ping/ping.go | 9 +++------ plugins/inputs/ping/ping_test.go | 7 +++++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 44a32de8bfa38..203d9c481d54b 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -7,6 +7,7 @@ import ( "net" "os/exec" "runtime" + "sort" "strings" "sync" "time" @@ -176,14 +177,9 @@ func (p *Ping) nativePing(destination string) (*pingStats, error) { pinger.Source = p.sourceAddress pinger.Interval = p.calcInterval - pinger.Timeout = p.calcTimeout if p.Deadline > 0 { - // If deadline is set ping exits regardless of how many packets have been sent or received - timer := time.AfterFunc(time.Duration(p.Deadline)*time.Second, func() { - pinger.Stop() - }) - defer timer.Stop() + pinger.Timeout = time.Duration(p.Deadline) * time.Second } // Get Time to live (TTL) of first response, matching original implementation @@ -240,6 +236,7 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { return } + sort.Sort(durationSlice(stats.Rtts)) for _, perc := range p.Percentiles { var value = percentile(durationSlice(stats.Rtts), perc) var field = fmt.Sprintf("percentile%v_ms", perc) diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 9f88cc17da844..752714a868ad7 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -432,11 +432,11 @@ func TestPingGatherNative(t *testing.T) { PacketsSent: 5, PacketsRecv: 5, Rtts: []time.Duration{ - 1 * time.Millisecond, - 2 * time.Millisecond, 3 * time.Millisecond, 4 * time.Millisecond, + 1 * time.Millisecond, 5 * time.Millisecond, + 2 * time.Millisecond, }, }, ttl: 1, @@ -475,8 +475,11 @@ func TestPingGatherNative(t *testing.T) { assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) assert.True(t, acc.HasField("ping", "percentile50_ms")) + assert.Equal(t, float64(3), acc.Metrics[0].Fields["percentile50_ms"]) assert.True(t, acc.HasField("ping", "percentile95_ms")) + assert.Equal(t, float64(4.799999), acc.Metrics[0].Fields["percentile95_ms"]) assert.True(t, acc.HasField("ping", "percentile99_ms")) + assert.Equal(t, float64(4.96), acc.Metrics[0].Fields["percentile99_ms"]) assert.True(t, acc.HasField("ping", "percent_packet_loss")) assert.True(t, acc.HasField("ping", "minimum_response_ms")) assert.True(t, acc.HasField("ping", "average_response_ms")) From 6804cfcfef86a8f7799d8844f9dec701cd6b2daa Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 9 Feb 2021 14:12:49 -0500 Subject: [PATCH 211/761] adds missing & to flush_jitter output ref (#8838) --- config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/config.go b/config/config.go index b395f7df1c725..03a5c4f0b2e25 100644 --- a/config/config.go +++ b/config/config.go @@ -1392,7 +1392,7 @@ func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, // TODO: support FieldPass/FieldDrop on outputs c.getFieldDuration(tbl, "flush_interval", &oc.FlushInterval) - c.getFieldDuration(tbl, "flush_jitter", oc.FlushJitter) + c.getFieldDuration(tbl, "flush_jitter", &oc.FlushJitter) c.getFieldInt(tbl, "metric_buffer_limit", &oc.MetricBufferLimit) c.getFieldInt(tbl, "metric_batch_size", &oc.MetricBatchSize) From 198bcc8f36647b384311e5c209c5adc678261684 Mon Sep 17 00:00:00 2001 From: Johannes Deger Date: Wed, 10 Feb 2021 17:28:37 +0100 Subject: [PATCH 212/761] Expose v4/v6-only connection-schemes through GosnmpWrapper (#8804) --- internal/snmp/wrapper.go | 11 +++++++---- plugins/inputs/snmp/README.md | 5 +++++ plugins/inputs/snmp/snmp.go | 8 +++++++- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/internal/snmp/wrapper.go b/internal/snmp/wrapper.go index 0655285060d37..c1dad9fe77f4d 100644 --- a/internal/snmp/wrapper.go +++ b/internal/snmp/wrapper.go @@ -164,11 +164,14 @@ func (gs *GosnmpWrapper) SetAgent(agent string) error { return err } + // Only allow udp{4,6} and tcp{4,6}. + // Allowing ip{4,6} does not make sense as specifying a port + // requires the specification of a protocol. + // gosnmp does not handle these errors well, which is why + // they can result in cryptic errors by net.Dial. switch u.Scheme { - case "tcp": - gs.Transport = "tcp" - case "", "udp": - gs.Transport = "udp" + case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6": + gs.Transport = u.Scheme default: return fmt.Errorf("unsupported scheme: %v", u.Scheme) } diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index c4aa3367f787c..fa96150b94b4c 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -22,8 +22,13 @@ information. ```toml [[inputs.snmp]] ## Agent addresses to retrieve values from. + ## format: agents = [":"] + ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. + ## default is udp + ## port: optional ## example: agents = ["udp://127.0.0.1:161"] ## agents = ["tcp://127.0.0.1:161"] + ## agents = ["udp4://v4only-snmp-agent"] agents = ["udp://127.0.0.1:161"] ## Timeout for each request. diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 9aac89b8d70e9..c1dda901b7736 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -25,8 +25,13 @@ import ( const description = `Retrieves SNMP values from remote agents` const sampleConfig = ` ## Agent addresses to retrieve values from. + ## format: agents = [":"] + ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. + ## default is udp + ## port: optional ## example: agents = ["udp://127.0.0.1:161"] ## agents = ["tcp://127.0.0.1:161"] + ## agents = ["udp4://v4only-snmp-agent"] agents = ["udp://127.0.0.1:161"] ## Timeout for each request. @@ -560,7 +565,8 @@ func (s *Snmp) getConnection(idx int) (snmpConnection, error) { if err != nil { return nil, err } - gs.SetAgent(agent) + + err = gs.SetAgent(agent) if err != nil { return nil, err } From 3a66b57d2cf4656bde8f2361ac99a4a8abe869cb Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 10 Feb 2021 11:28:57 -0600 Subject: [PATCH 213/761] Move windows tests to circle-ci, remove appveyor (#8844) * windows tests to circleci * Add as requirement to package/release * Use latest windows orb * Add to nightly --- .circleci/config.yml | 20 ++++++++++++++++++++ appveyor.yml | 35 ----------------------------------- 2 files changed, 20 insertions(+), 35 deletions(-) delete mode 100644 appveyor.yml diff --git a/.circleci/config.yml b/.circleci/config.yml index 5e44cfe59f4c2..1c8b8da82a7cd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,4 +1,6 @@ version: 2.1 +orbs: + win: circleci/windows@2.4.0 executors: go-1_14: @@ -129,6 +131,16 @@ jobs: executor: mac steps: - test-go-mac + test-go-windows: + executor: + name: win/default + shell: powershell.exe + steps: + - checkout + - run: choco upgrade golang --version=1.15.5 + - run: choco install make + - run: git config --system core.longpaths true + - run: make test-windows package: executor: go-1_15 @@ -186,8 +198,13 @@ workflows: filters: tags: # only runs on tags if you specify this filter only: /.*/ + - 'test-go-windows': + filters: + tags: + only: /.*/ - 'package': requires: + - 'test-go-windows' - 'test-go-darwin' - 'test-go-1_14' - 'test-go-1_14-386' @@ -195,6 +212,7 @@ workflows: - 'test-go-1_15-386' - 'release': requires: + - 'test-go-windows' - 'test-go-darwin' - 'test-go-1_14' - 'test-go-1_14-386' @@ -224,8 +242,10 @@ workflows: - 'test-go-darwin': requires: - 'macdeps' + - 'test-go-windows' - 'nightly': requires: + - 'test-go-windows' - 'test-go-darwin' - 'test-go-1_14' - 'test-go-1_14-386' diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 6f5f6e94828c5..0000000000000 --- a/appveyor.yml +++ /dev/null @@ -1,35 +0,0 @@ -version: "{build}" - -image: Visual Studio 2019 - -cache: - - C:\gopath\pkg\mod -> go.sum - - C:\ProgramData\chocolatey\bin -> appveyor.yml - - C:\ProgramData\chocolatey\lib -> appveyor.yml - -clone_folder: C:\gopath\src\github.com\influxdata\telegraf - -environment: - GOPATH: C:\gopath - -stack: go 1.15 - -platform: x64 - -install: - - choco install make - - cd "%GOPATH%\src\github.com\influxdata\telegraf" - - git config --system core.longpaths true - - go version - - go env - -build_script: - - make deps - - make telegraf - -test_script: - - make check - - make test-windows - -artifacts: - - path: telegraf.exe From c25ae5295bf3beaba82363b6df5fa6afa00c3e12 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Wed, 10 Feb 2021 14:45:07 -0500 Subject: [PATCH 214/761] Validate the response from InfluxDB after writing/creating a database to avoid json parsing panics/errors (#8775) * Validate the response from InfluxDB after writing/creating a database to avoid json parsing panics. * Testing windows signing - ignore * Update config.yml * adding signing to workflow - test * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * revert circleci test changes * Various updates --- plugins/outputs/influxdb/http.go | 49 +++++++++++++++++++++++++-- plugins/outputs/influxdb/http_test.go | 20 +++++++++++ 2 files changed, 67 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 87c5a89b014cf..21265ba44def8 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -1,9 +1,11 @@ package influxdb import ( + "bytes" "context" "crypto/tls" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -216,8 +218,19 @@ func (c *httpClient) CreateDatabase(ctx context.Context, database string) error } defer resp.Body.Close() + body, err := c.validateResponse(resp.Body) + + // Check for poorly formatted response (can't be decoded) + if err != nil { + return &APIError{ + StatusCode: resp.StatusCode, + Title: resp.Status, + Description: "An error response was received while attempting to create the following database: " + database + ". Error: " + err.Error(), + } + } + queryResp := &QueryResponse{} - dec := json.NewDecoder(resp.Body) + dec := json.NewDecoder(body) err = dec.Decode(queryResp) if err != nil { @@ -341,8 +354,19 @@ func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []te return nil } + body, err := c.validateResponse(resp.Body) + + // Check for poorly formatted response (can't be decoded) + if err != nil { + return &APIError{ + StatusCode: resp.StatusCode, + Title: resp.Status, + Description: "An error response was received while attempting to write metrics. Error: " + err.Error(), + } + } + writeResp := &WriteResponse{} - dec := json.NewDecoder(resp.Body) + dec := json.NewDecoder(body) var desc string err = dec.Decode(writeResp) @@ -466,6 +490,27 @@ func (c *httpClient) addHeaders(req *http.Request) { } } +func (c *httpClient) validateResponse(response io.ReadCloser) (io.ReadCloser, error) { + bodyBytes, err := ioutil.ReadAll(response) + if err != nil { + return nil, err + } + defer response.Close() + + originalResponse := ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) + + // Empty response is valid. + if response == http.NoBody || len(bodyBytes) == 0 || bodyBytes == nil { + return originalResponse, nil + } + + if valid := json.Valid(bodyBytes); !valid { + err = errors.New(string(bodyBytes)) + } + + return originalResponse, err +} + func makeWriteURL(loc *url.URL, db, rp, consistency string) (string, error) { params := url.Values{} params.Set("db", db) diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index 1d030d36cd583..2115ad5918a65 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -212,6 +212,26 @@ func TestHTTP_CreateDatabase(t *testing.T) { w.WriteHeader(http.StatusOK) }, }, + { + name: "invalid json response is handled", + config: influxdb.HTTPConfig{ + URL: u, + Database: `database`, + }, + queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`invalid response`)) + }, + errFunc: func(t *testing.T, err error) { + expected := &influxdb.APIError{ + StatusCode: 400, + Title: "400 Bad Request", + Description: "An error response was received while attempting to create the following database: database. Error: invalid response", + } + + require.Equal(t, expected, err) + }, + }, } for _, tt := range tests { From f3a208ee2877608aab670becab2ab9b840fcb87f Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Thu, 11 Feb 2021 11:45:13 -0500 Subject: [PATCH 215/761] Fix reconnection issues mqtt (#8821) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 2 +- go.sum | 7 +++++-- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 4 +--- plugins/inputs/mqtt_consumer/mqtt_consumer_test.go | 5 +++++ 5 files changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 154f13a88bde0..543d59c3e7e1a 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -80,6 +80,7 @@ following works: - github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) - github.com/gopcua/opcua [MIT License](https://github.com/gopcua/opcua/blob/master/LICENSE) - github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE) +- github.com/gorilla/websocket [BSD 2-Clause "Simplified" License](https://github.com/gorilla/websocket/blob/master/LICENSE) - github.com/gosnmp/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/gosnmp/gosnmp/blob/master/LICENSE) - github.com/grpc-ecosystem/grpc-gateway [BSD 3-Clause "New" or "Revised" License](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/LICENSE.txt) - github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) diff --git a/go.mod b/go.mod index f65401c5a141c..15a296424566a 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/docker/go-connections v0.3.0 // indirect github.com/docker/go-units v0.3.3 // indirect github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 - github.com/eclipse/paho.mqtt.golang v1.2.0 + github.com/eclipse/paho.mqtt.golang v1.3.0 github.com/ericchiang/k8s v1.2.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.4.0 diff --git a/go.sum b/go.sum index 220a706d09e11..611ab4e49f6b1 100644 --- a/go.sum +++ b/go.sum @@ -204,8 +204,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0= -github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/eclipse/paho.mqtt.golang v1.3.0 h1:MU79lqr3FKNKbSrGN7d7bNYqh8MwWW7Zcx0iG+VIw9I= +github.com/eclipse/paho.mqtt.golang v1.3.0/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -336,6 +336,8 @@ github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8 github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosnmp/gosnmp v1.29.0 h1:fEkud7oiYVzR64L+/BQA7uvp+7COI9+XkrUQi8JunYM= github.com/gosnmp/gosnmp v1.29.0/go.mod h1:Ux0YzU4nV5yDET7dNIijd0VST0BCy8ijBf+gTVFQeaM= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= @@ -729,6 +731,7 @@ golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 73d41a32f0f9e..006aaac2538a2 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -184,6 +184,7 @@ func (m *MQTTConsumer) Init() error { } m.opts = opts + m.messages = map[telegraf.TrackingID]bool{} return nil } @@ -221,9 +222,6 @@ func (m *MQTTConsumer) connect() error { m.Log.Infof("Connected %v", m.Servers) m.state = Connected - m.messagesMutex.Lock() - m.messages = make(map[telegraf.TrackingID]bool) - m.messagesMutex.Unlock() // Persistent sessions should skip subscription if a session is present, as // the subscriptions are stored by the server. diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index 2d9db2c23872a..efa921cb1dd49 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -63,6 +63,7 @@ func (p *FakeParser) SetDefaultTags(tags map[string]string) { type FakeToken struct { sessionPresent bool + complete chan struct{} } // FakeToken satisfies mqtt.Token @@ -84,6 +85,10 @@ func (t *FakeToken) SessionPresent() bool { return t.sessionPresent } +func (t *FakeToken) Done() <-chan struct{} { + return t.complete +} + // Test the basic lifecycle transitions of the plugin. func TestLifecycleSanity(t *testing.T) { var acc testutil.Accumulator From 211868195846c189c079edfffd39e9c9b9ca42c8 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Thu, 11 Feb 2021 16:24:06 -0500 Subject: [PATCH 216/761] Code Signing for Windows (#8816) * Draft config * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Create sign-windows.ps1 * Updated config.yml * Updated config.yml * Delete sign-windows.ps1 * Updated config.yml * Updated config.yml * updating config * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Create windows-signing.ps1 * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml --- .circleci/config.yml | 27 ++++++++++++++++++++++++--- scripts/windows-signing.ps1 | 29 +++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 3 deletions(-) create mode 100644 scripts/windows-signing.ps1 diff --git a/.circleci/config.yml b/.circleci/config.yml index 1c8b8da82a7cd..6eff53ee8d5bb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -66,6 +66,10 @@ commands: - store_artifacts: path: './build/dist' destination: 'build/dist' + - persist_to_workspace: + root: './build' + paths: + - 'dist' jobs: deps: executor: go-1_15 @@ -108,7 +112,6 @@ jobs: - 'usr/local/Cellar/go' - 'usr/local/bin/gofmt' - 'Users/distiller/go' - test-go-1_14: executor: go-1_14 steps: @@ -155,7 +158,22 @@ jobs: steps: - package: nightly: true - + package-sign-windows: + executor: + name: win/default + shell: powershell.exe + steps: + - checkout + - attach_workspace: + at: '/build' + - run: + name: "Sign Windows Executables" + shell: powershell.exe + command: | + ./scripts/windows-signing.ps1 + - store_artifacts: + path: './build/dist' + destination: 'build/dist' workflows: version: 2 check: @@ -223,6 +241,9 @@ workflows: only: /.*/ branches: ignore: /.*/ + - 'package-sign-windows': + requires: + - 'release' nightly: jobs: - 'deps' @@ -257,4 +278,4 @@ workflows: filters: branches: only: - - master + - master \ No newline at end of file diff --git a/scripts/windows-signing.ps1 b/scripts/windows-signing.ps1 new file mode 100644 index 0000000000000..d7fca9ee1f234 --- /dev/null +++ b/scripts/windows-signing.ps1 @@ -0,0 +1,29 @@ +$tempCertFile = New-TemporaryFile + +# Retrieve environment variables for cert/password. +$certText = $env:windowsCert +$CertPass = $env:windowsCertPassword + +# Create a Cert object by converting the cert string to bytes. +$finalFileName = $tempCertFile.FullName +$certBytes = [Convert]::FromBase64String($certText) +[System.IO.File]::WriteAllBytes($finalFileName, $certBytes) +$CertPath = $finalFileName +$Cert = New-Object System.Security.Cryptography.X509Certificates.X509Certificate2($CertPath, $CertPass) + +# Go through the artifacts directory and sign the 'windows' artifacts. +$artifactDirectory = "./build/dist" +$extractDirectory = $artifactDirectory + "\" + "extracted" +foreach ($file in get-ChildItem $artifactDirectory | where {$_.name -like "*windows*"} | select name) +{ + $artifact = $artifactDirectory + "\" + $file.Name + Expand-Archive -LiteralPath $artifact -DestinationPath $extractDirectory -Force + + $subDirectoryPath = $extractDirectory + "\" + (Get-ChildItem -Path $extractDirectory | Select-Object -First 1).Name + $telegrafExePath = $subDirectoryPath + "\" + "telegraf.exe" + Set-AuthenticodeSignature -Certificate $Cert -FilePath $telegrafExePath -TimestampServer http://timestamp.digicert.com + Compress-Archive -Path $subDirectoryPath -DestinationPath $artifact -Force + Remove-Item $extractDirectory -Force -Recurse +} + +Remove-Item $finalFileName -Force From 71a3a3cf20182c4537ce832bc7ca212870e7254f Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Fri, 12 Feb 2021 11:38:40 -0500 Subject: [PATCH 217/761] Add default retry for load config via url (#8803) --- config/config.go | 27 +++++++++++++++++++-------- config/config_test.go | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 8 deletions(-) diff --git a/config/config.go b/config/config.go index 03a5c4f0b2e25..e086eebffa376 100644 --- a/config/config.go +++ b/config/config.go @@ -49,6 +49,7 @@ var ( `"`, `\"`, `\`, `\\`, ) + httpLoadConfigRetryInterval = 10 * time.Second ) // Config specifies the URL/user/password for the database that telegraf @@ -921,17 +922,27 @@ func fetchConfig(u *url.URL) ([]byte, error) { } req.Header.Add("Accept", "application/toml") req.Header.Set("User-Agent", internal.ProductToken()) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("failed to retrieve remote config: %s", resp.Status) + retries := 3 + for i := 0; i <= retries; i++ { + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("Retry %d of %d failed connecting to HTTP config server %s", i, retries, err) + } + + if resp.StatusCode != http.StatusOK { + if i < retries { + log.Printf("Error getting HTTP config. Retry %d of %d in %s. Status=%d", i, retries, httpLoadConfigRetryInterval, resp.StatusCode) + time.Sleep(httpLoadConfigRetryInterval) + continue + } + return nil, fmt.Errorf("Retry %d of %d failed to retrieve remote config: %s", i, retries, resp.Status) + } + defer resp.Body.Close() + return ioutil.ReadAll(resp.Body) } - defer resp.Body.Close() - return ioutil.ReadAll(resp.Body) + return nil, nil } // parseConfig loads a TOML configuration from a provided path and diff --git a/config/config_test.go b/config/config_test.go index 79d74e83b5a43..e238dbade1e82 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,6 +1,8 @@ package config import ( + "net/http" + "net/http/httptest" "os" "strings" "testing" @@ -278,3 +280,37 @@ func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { assert.Equal(t, "", azureMonitor.NamespacePrefix) assert.Equal(t, true, ok) } + +func TestConfig_URLRetries3Fails(t *testing.T) { + httpLoadConfigRetryInterval = 0 * time.Second + responseCounter := 0 + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + responseCounter++ + })) + defer ts.Close() + + c := NewConfig() + err := c.LoadConfig(ts.URL) + require.Error(t, err) + require.Equal(t, 4, responseCounter) +} + +func TestConfig_URLRetries3FailsThenPasses(t *testing.T) { + httpLoadConfigRetryInterval = 0 * time.Second + responseCounter := 0 + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if responseCounter <= 2 { + w.WriteHeader(http.StatusNotFound) + } else { + w.WriteHeader(http.StatusOK) + } + responseCounter++ + })) + defer ts.Close() + + c := NewConfig() + err := c.LoadConfig(ts.URL) + require.NoError(t, err) + require.Equal(t, 4, responseCounter) +} From a790529bdd9c0f097ce6e7889d8b01bdeb85a4a2 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Fri, 12 Feb 2021 10:09:17 -0800 Subject: [PATCH 218/761] update min Go version in Telegraf readme (#8846) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1999f635fba9d..726e6e74fd289 100644 --- a/README.md +++ b/README.md @@ -52,9 +52,9 @@ Ansible role: https://github.com/rossmcdonald/telegraf ### From Source: -Telegraf requires Go version 1.13 or newer, the Makefile requires GNU make. +Telegraf requires Go version 1.14 or newer, the Makefile requires GNU make. -1. [Install Go](https://golang.org/doc/install) >=1.13 (1.15 recommended) +1. [Install Go](https://golang.org/doc/install) >=1.14 (1.15 recommended) 2. Clone the Telegraf repository: ``` cd ~/src From fe16d56a3ebac49eeaeb3047206bd59a4e1183c8 Mon Sep 17 00:00:00 2001 From: viperstars Date: Wed, 17 Feb 2021 01:11:53 +0800 Subject: [PATCH 219/761] inputs.x509_cert: Fix timeout issue (#8824) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * reproduce timeout issue #8809 * fix timeout issue #8809 * set default timeout to 5s * closes #8809 Co-authored-by: 彭浩 --- plugins/inputs/x509_cert/x509_cert.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 529b4c76dfc2d..6ad87a9e0fdda 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -280,7 +280,7 @@ func init() { inputs.Add("x509_cert", func() telegraf.Input { return &X509Cert{ Sources: []string{}, - Timeout: internal.Duration{Duration: 5}, + Timeout: internal.Duration{Duration: 5 * time.Second}, // set default timeout to 5s } }) } From f09e551cbd01c13fb20c4f873387d53256d3fef1 Mon Sep 17 00:00:00 2001 From: Arnaud Lefebvre Date: Tue, 16 Feb 2021 18:15:48 +0100 Subject: [PATCH 220/761] outputs/warp10: url encode comma in tags value (#8657) --- plugins/outputs/warp10/warp10.go | 5 ++++- plugins/outputs/warp10/warp10_test.go | 16 ++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index b5996f6380a40..1b2a31934c605 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -7,6 +7,7 @@ import ( "log" "math" "net/http" + "net/url" "sort" "strconv" "strings" @@ -174,7 +175,9 @@ func buildTags(tags []*telegraf.Tag) []string { tagsString := make([]string, len(tags)+1) indexSource := 0 for index, tag := range tags { - tagsString[index] = fmt.Sprintf("%s=%s", tag.Key, tag.Value) + key := url.QueryEscape(tag.Key) + value := url.QueryEscape(tag.Value) + tagsString[index] = fmt.Sprintf("%s=%s", key, value) indexSource = index } indexSource++ diff --git a/plugins/outputs/warp10/warp10_test.go b/plugins/outputs/warp10/warp10_test.go index 5b543b34c0d8b..0b32ce4f158b2 100644 --- a/plugins/outputs/warp10/warp10_test.go +++ b/plugins/outputs/warp10/warp10_test.go @@ -24,6 +24,22 @@ func TestWriteWarp10(t *testing.T) { require.Exactly(t, "1257894000000000// unit.testtest1.value{source=telegraf,tag1=value1} 1.000000\n", payload) } +func TestWriteWarp10EncodedTags(t *testing.T) { + w := Warp10{ + Prefix: "unit.test", + WarpURL: "http://localhost:8090", + Token: "WRITE", + } + + metrics := testutil.MockMetrics() + for _, metric := range metrics { + metric.AddTag("encoded{tag", "value1,value2") + } + + payload := w.GenWarp10Payload(metrics) + require.Exactly(t, "1257894000000000// unit.testtest1.value{encoded%7Btag=value1%2Cvalue2,source=telegraf,tag1=value1} 1.000000\n", payload) +} + func TestHandleWarp10Error(t *testing.T) { w := Warp10{ Prefix: "unit.test", From f88813633344ca4a3710dba0b1645b39cf8470b2 Mon Sep 17 00:00:00 2001 From: Sreejith Pp <1743700+ppsreejith@users.noreply.github.com> Date: Tue, 16 Feb 2021 23:20:01 +0530 Subject: [PATCH 221/761] Add support for datadog distributions metric (#8179) * Add support for datadog distributions in statsd * Parse metric distribution correctly * Add tests to check distributions are parsed correctly * Update Statsd plugin Readme with details about Distributions metric * Refactor metric distribution initialization code * Update distribution metric interface to replace fields with value * Refactor statsd distribution metric test code * Fix go formatting errors * Add tests to parse only when DataDog Distributions config is enabled * Add config to enable parsing DataDog Statsd Distributions * Document use of datadog_distributions config in Readme --- etc/telegraf.conf | 3 ++ plugins/inputs/statsd/README.md | 12 ++++++ plugins/inputs/statsd/statsd.go | 49 ++++++++++++++++++++--- plugins/inputs/statsd/statsd_test.go | 60 +++++++++++++++++++++++++++- 4 files changed, 117 insertions(+), 7 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 1fe44afa3cdac..d6e0b165cb25b 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -7328,6 +7328,9 @@ # ## Parses datadog extensions to the statsd format # datadog_extensions = false # +# ## Parses distributions metric from datadog's extension to the statsd format +# datadog_distributions = false +# # ## Statsd data translation templates, more info can be read here: # ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # # templates = [ diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 26cbe26289615..a302f4095e63f 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -50,6 +50,10 @@ ## http://docs.datadoghq.com/guides/dogstatsd/ datadog_extensions = false + ## Parses distributions metric as specified in the datadog statsd format + ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition + datadog_distributions = false + ## Statsd data translation templates, more info can be read here: ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # templates = [ @@ -98,6 +102,10 @@ implementation. In short, the telegraf statsd listener will accept: - `load.time:320|ms` - `load.time.nanoseconds:1|h` - `load.time:200|ms|@0.1` <- sampled 1/10 of the time +- Distributions + - `load.time:320|d` + - `load.time.nanoseconds:1|d` + - `load.time:200|d|@0.1` <- sampled 1/10 of the time It is possible to omit repetitive names and merge individual stats into a single line by separating them with additional colons: @@ -172,6 +180,9 @@ metric type: that `P%` of all the values statsd saw for that stat during that time period are below x. The most common value that people use for `P` is the `90`, this is a great number to try to optimize. +- Distributions + - The Distribution metric represents the global statistical distribution of a set of values calculated across your entire distributed infrastructure in one time interval. A Distribution can be used to instrument logical objects, like services, independently from the underlying hosts. + - Unlike the Histogram metric type, which aggregates on the Agent during a given time interval, a Distribution metric sends all the raw data during a time interval. ### Plugin arguments @@ -195,6 +206,7 @@ the accuracy of percentiles but also increases the memory usage and cpu time. measurements and tags. - **parse_data_dog_tags** boolean: Enable parsing of tags in DataDog's dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) - **datadog_extensions** boolean: Enable parsing of DataDog's extensions to dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) +- **datadog_distributions** boolean: Enable parsing of the Distribution metric in DataDog's dogstatsd format (https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition) - **max_ttl** config.Duration: Max duration (TTL) for each metric to stay cached/reported without being updated. ### Statsd bucket -> InfluxDB line-protocol Templates diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index f74eb0ef4dc38..a88fe847c445b 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -70,6 +70,11 @@ type Statsd struct { // http://docs.datadoghq.com/guides/dogstatsd/ DataDogExtensions bool `toml:"datadog_extensions"` + // Parses distribution metrics in the datadog statsd format. + // Requires the DataDogExtension flag to be enabled. + // https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition + DataDogDistributions bool `toml:"datadog_distributions"` + // UDPPacketSize is deprecated, it's only here for legacy support // we now always create 1 max size buffer and then copy only what we need // into the in channel @@ -98,10 +103,12 @@ type Statsd struct { // Cache gauges, counters & sets so they can be aggregated as they arrive // gauges and counters map measurement/tags hash -> field name -> metrics // sets and timings map measurement/tags hash -> metrics - gauges map[string]cachedgauge - counters map[string]cachedcounter - sets map[string]cachedset - timings map[string]cachedtimings + // distributions aggregate measurement/tags and are published directly + gauges map[string]cachedgauge + counters map[string]cachedcounter + sets map[string]cachedset + timings map[string]cachedtimings + distributions []cacheddistributions // bucket -> influx templates Templates []string @@ -190,6 +197,12 @@ type cachedtimings struct { expiresAt time.Time } +type cacheddistributions struct { + name string + value float64 + tags map[string]string +} + func (_ *Statsd) Description() string { return "Statsd UDP/TCP Server" } @@ -237,6 +250,10 @@ const sampleConfig = ` ## Parses datadog extensions to the statsd format datadog_extensions = false + ## Parses distributions metric as specified in the datadog statsd format + ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition + datadog_distributions = false + ## Statsd data translation templates, more info can be read here: ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # templates = [ @@ -265,6 +282,14 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { defer s.Unlock() now := time.Now() + for _, m := range s.distributions { + fields := map[string]interface{}{ + defaultFieldName: m.value, + } + acc.AddFields(m.name, fields, m.tags, now) + } + s.distributions = make([]cacheddistributions, 0) + for _, m := range s.timings { // Defining a template to parse field names for timers allows us to split // out multiple fields per timer. In this case we prefix each stat with the @@ -336,6 +361,7 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.counters = make(map[string]cachedcounter) s.sets = make(map[string]cachedset) s.timings = make(map[string]cachedtimings) + s.distributions = make([]cacheddistributions, 0) s.Lock() defer s.Unlock() @@ -601,7 +627,7 @@ func (s *Statsd) parseStatsdLine(line string) error { // Validate metric type switch pipesplit[1] { - case "g", "c", "s", "ms", "h": + case "g", "c", "s", "ms", "h", "d": m.mtype = pipesplit[1] default: s.Log.Errorf("Metric type %q unsupported", pipesplit[1]) @@ -618,7 +644,7 @@ func (s *Statsd) parseStatsdLine(line string) error { } switch m.mtype { - case "g", "ms", "h": + case "g", "ms", "h", "d": v, err := strconv.ParseFloat(pipesplit[0], 64) if err != nil { s.Log.Errorf("Parsing value to float64, unable to parse metric: %s", line) @@ -658,6 +684,8 @@ func (s *Statsd) parseStatsdLine(line string) error { m.tags["metric_type"] = "timing" case "h": m.tags["metric_type"] = "histogram" + case "d": + m.tags["metric_type"] = "distribution" } if len(lineTags) > 0 { for k, v := range lineTags { @@ -749,6 +777,15 @@ func (s *Statsd) aggregate(m metric) { defer s.Unlock() switch m.mtype { + case "d": + if s.DataDogExtensions && s.DataDogDistributions { + cached := cacheddistributions{ + name: m.name, + value: m.floatvalue, + tags: m.tags, + } + s.distributions = append(s.distributions, cached) + } case "ms", "h": // Check if the measurement exists cached, ok := s.timings[m.hash] diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 4a129266deebc..7e6a7822359e5 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -31,6 +31,7 @@ func NewTestStatsd() *Statsd { s.counters = make(map[string]cachedcounter) s.sets = make(map[string]cachedset) s.timings = make(map[string]cachedtimings) + s.distributions = make([]cacheddistributions, 0) s.MetricSeparator = "_" @@ -430,7 +431,7 @@ func TestParse_Timings(t *testing.T) { s.Percentiles = []internal.Number{{Value: 90.0}} acc := &testutil.Accumulator{} - // Test that counters work + // Test that timings work validLines := []string{ "test.timing:1|ms", "test.timing:11|ms", @@ -461,6 +462,63 @@ func TestParse_Timings(t *testing.T) { acc.AssertContainsFields(t, "test_timing", valid) } +// Tests low-level functionality of distributions +func TestParse_Distributions(t *testing.T) { + s := NewTestStatsd() + acc := &testutil.Accumulator{} + + parseMetrics := func() { + // Test that distributions work + validLines := []string{ + "test.distribution:1|d", + "test.distribution2:2|d", + "test.distribution3:3|d", + "test.distribution4:1|d", + "test.distribution5:1|d", + } + + for _, line := range validLines { + err := s.parseStatsdLine(line) + if err != nil { + t.Errorf("Parsing line %s should not have resulted in an error\n", line) + } + } + + s.Gather(acc) + } + + validMeasurementMap := map[string]float64{ + "test_distribution": 1, + "test_distribution2": 2, + "test_distribution3": 3, + "test_distribution4": 1, + "test_distribution5": 1, + } + + // Test parsing when DataDogExtensions and DataDogDistributions aren't enabled + parseMetrics() + for key := range validMeasurementMap { + acc.AssertDoesNotContainMeasurement(t, key) + } + + // Test parsing when DataDogDistributions is enabled but not DataDogExtensions + s.DataDogDistributions = true + parseMetrics() + for key := range validMeasurementMap { + acc.AssertDoesNotContainMeasurement(t, key) + } + + // Test parsing when DataDogExtensions and DataDogDistributions are enabled + s.DataDogExtensions = true + parseMetrics() + for key, value := range validMeasurementMap { + field := map[string]interface{}{ + "value": float64(value), + } + acc.AssertContainsFields(t, key, field) + } +} + func TestParseScientificNotation(t *testing.T) { s := NewTestStatsd() sciNotationLines := []string{ From 71be90d992da6e30e5a18ecf8b9e3acb6138047a Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 16 Feb 2021 15:53:50 -0600 Subject: [PATCH 222/761] Support exclamation mark to create non-matching list in tail plugin (#8613) * Replace exclamation mark with caret * Update README and use table driven tests * Use ReplaceAll instead * Use doublestar package instead to glob filepath * Add license * Fix order of dependencies * Doc improvement, maybe better then str replace? * Forgot to remove nil from test * Use regex instead of library * Revert unnecessary change * Go back to using library replace string twice to handle edge case --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 1 + go.sum | 2 + internal/globpath/globpath.go | 43 ++++---------------- internal/globpath/globpath_test.go | 61 ++++++++++++++++------------ internal/globpath/testdata/log[!.log | 0 plugins/inputs/phpfpm/phpfpm.go | 2 +- plugins/inputs/phpfpm/phpfpm_test.go | 2 +- plugins/inputs/tail/README.md | 3 +- plugins/inputs/tail/tail.go | 3 +- 10 files changed, 52 insertions(+), 66 deletions(-) create mode 100644 internal/globpath/testdata/log[!.log diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 543d59c3e7e1a..f68d85e7bed54 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -36,6 +36,7 @@ following works: - github.com/aws/smithy-go [Apache License 2.0](https://github.com/aws/smithy-go/blob/main/LICENSE) - github.com/benbjohnson/clock [MIT License](https://github.com/benbjohnson/clock/blob/master/LICENSE) - github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) +- github.com/bmatcuk/doublestar [MIT License](https://github.com/bmatcuk/doublestar/blob/master/LICENSE) - github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) - github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) diff --git a/go.mod b/go.mod index 15a296424566a..f6fd3df03a936 100644 --- a/go.mod +++ b/go.mod @@ -34,6 +34,7 @@ require ( github.com/aws/smithy-go v1.0.0 github.com/benbjohnson/clock v1.0.3 github.com/bitly/go-hostpool v0.1.0 // indirect + github.com/bmatcuk/doublestar/v3 v3.0.0 github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 github.com/caio/go-tdigest v2.3.0+incompatible // indirect github.com/cenkalti/backoff v2.0.0+incompatible // indirect diff --git a/go.sum b/go.sum index 611ab4e49f6b1..5ff6de3a3d04e 100644 --- a/go.sum +++ b/go.sum @@ -142,6 +142,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= +github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= +github.com/bmatcuk/doublestar/v3 v3.0.0/go.mod h1:6PcTVMw80pCY1RVuoqu3V++99uQB3vsSYKPTd8AWA0k= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/caio/go-tdigest v2.3.0+incompatible h1:zP6nR0nTSUzlSqqr7F/LhslPlSZX/fZeGmgmwj2cxxY= diff --git a/internal/globpath/globpath.go b/internal/globpath/globpath.go index d4e7ffd8743bd..f69f5bfb60900 100644 --- a/internal/globpath/globpath.go +++ b/internal/globpath/globpath.go @@ -5,8 +5,8 @@ import ( "path/filepath" "strings" + "github.com/bmatcuk/doublestar/v3" "github.com/gobwas/glob" - "github.com/karrick/godirwalk" ) type GlobPath struct { @@ -45,42 +45,13 @@ func Compile(path string) (*GlobPath, error) { // If it's a static path, returns path. // All returned path will have the host platform separator. func (g *GlobPath) Match() []string { - if !g.hasMeta { - return []string{g.path} - } - if !g.HasSuperMeta { - files, _ := filepath.Glob(g.path) - return files - } - roots, err := filepath.Glob(g.rootGlob) - if err != nil { - return []string{} - } - out := []string{} - walkfn := func(path string, _ *godirwalk.Dirent) error { - if g.g.Match(path) { - out = append(out, path) - } - return nil + // This string replacement is for backwards compatibility support + // The original implemention allowed **.txt but the double star package requires **/**.txt + g.path = strings.ReplaceAll(g.path, "**/**", "**") + g.path = strings.ReplaceAll(g.path, "**", "**/**") - } - for _, root := range roots { - fileinfo, err := os.Stat(root) - if err != nil { - continue - } - if !fileinfo.IsDir() { - if g.MatchString(root) { - out = append(out, root) - } - continue - } - godirwalk.Walk(root, &godirwalk.Options{ - Callback: walkfn, - Unsorted: true, - }) - } - return out + files, _ := doublestar.Glob(g.path) + return files } // MatchString tests the path string against the glob. The path should contain diff --git a/internal/globpath/globpath_test.go b/internal/globpath/globpath_test.go index 92af2d20b88f1..4897ab2f8f879 100644 --- a/internal/globpath/globpath_test.go +++ b/internal/globpath/globpath_test.go @@ -19,32 +19,41 @@ var ( ) func TestCompileAndMatch(t *testing.T) { - // test super asterisk - g1, err := Compile(filepath.Join(testdataDir, "**")) - require.NoError(t, err) - // test single asterisk - g2, err := Compile(filepath.Join(testdataDir, "*.log")) - require.NoError(t, err) - // test no meta characters (file exists) - g3, err := Compile(filepath.Join(testdataDir, "log1.log")) - require.NoError(t, err) - // test file that doesn't exist - g4, err := Compile(filepath.Join(testdataDir, "i_dont_exist.log")) - require.NoError(t, err) - // test super asterisk that doesn't exist - g5, err := Compile(filepath.Join(testdataDir, "dir_doesnt_exist", "**")) - require.NoError(t, err) - matches := g1.Match() - require.Len(t, matches, 6) - matches = g2.Match() - require.Len(t, matches, 2) - matches = g3.Match() - require.Len(t, matches, 1) - matches = g4.Match() - require.Len(t, matches, 1) - matches = g5.Match() - require.Len(t, matches, 0) + type test struct { + path string + matches int + } + + tests := []test{ + //test super asterisk + {path: filepath.Join(testdataDir, "**"), matches: 7}, + // test single asterisk + {path: filepath.Join(testdataDir, "*.log"), matches: 3}, + // test no meta characters (file exists) + {path: filepath.Join(testdataDir, "log1.log"), matches: 1}, + // test file that doesn't exist + {path: filepath.Join(testdataDir, "i_dont_exist.log"), matches: 0}, + // test super asterisk that doesn't exist + {path: filepath.Join(testdataDir, "dir_doesnt_exist", "**"), matches: 0}, + // test exclamation mark creates non-matching list with a range + {path: filepath.Join(testdataDir, "log[!1-2]*"), matches: 1}, + // test caret creates non-matching list + {path: filepath.Join(testdataDir, "log[^1-2]*"), matches: 1}, + // test exclamation mark creates non-matching list without a range + {path: filepath.Join(testdataDir, "log[!2]*"), matches: 2}, + // test exclamation mark creates non-matching list without a range + {path: filepath.Join(testdataDir, "log\\[!*"), matches: 1}, + // test exclamation mark creates non-matching list without a range + {path: filepath.Join(testdataDir, "log\\[^*"), matches: 0}, + } + + for _, tc := range tests { + g, err := Compile(tc.path) + require.Nil(t, err) + matches := g.Match() + require.Len(t, matches, tc.matches) + } } func TestRootGlob(t *testing.T) { @@ -82,7 +91,7 @@ func TestMatch_ErrPermission(t *testing.T) { input string expected []string }{ - {"/root/foo", []string{"/root/foo"}}, + {"/root/foo", []string(nil)}, {"/root/f*", []string(nil)}, } diff --git a/internal/globpath/testdata/log[!.log b/internal/globpath/testdata/log[!.log new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index e0f21176ae21f..87eb4f649a57b 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -299,7 +299,7 @@ func globUnixSocket(url string) ([]string, error) { } paths := glob.Match() if len(paths) == 0 { - return nil, fmt.Errorf("socket doesn't exist %q: %v", pattern, err) + return nil, fmt.Errorf("socket doesn't exist %q", pattern) } addresses := make([]string, 0, len(paths)) diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 6db740df45e66..645782289e008 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -327,7 +327,7 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testi err = acc.GatherError(r.Gather) require.Error(t, err) - assert.Equal(t, `dial unix /tmp/invalid.sock: connect: no such file or directory`, err.Error()) + assert.Equal(t, `socket doesn't exist "/tmp/invalid.sock"`, err.Error()) } diff --git a/plugins/inputs/tail/README.md b/plugins/inputs/tail/README.md index 7f5315038a2ea..5664f8704eec3 100644 --- a/plugins/inputs/tail/README.md +++ b/plugins/inputs/tail/README.md @@ -29,7 +29,8 @@ The plugin expects messages in one of the ## "/var/log/**.log" -> recursively find all .log files in /var/log ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log ## "/var/log/apache.log" -> just tail the apache log file - ## + ## "/var/log/log[!1-2]* -> tail files without 1-2 + ## "/var/log/log[^1-2]* -> identical behavior as above ## See https://github.com/gobwas/glob for more examples ## files = ["/var/mymetrics.out"] diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 557885e1b26a0..54d42e44ada59 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -81,7 +81,8 @@ const sampleConfig = ` ## "/var/log/**.log" -> recursively find all .log files in /var/log ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log ## "/var/log/apache.log" -> just tail the apache log file - ## + ## "/var/log/log[!1-2]* -> tail files without 1-2 + ## "/var/log/log[^1-2]* -> identical behavior as above ## See https://github.com/gobwas/glob for more examples ## files = ["/var/mymetrics.out"] From 5606a9531af174f52ad6e71a7c05ed786dfeda2e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Feb 2021 17:08:22 -0600 Subject: [PATCH 223/761] Bump github.com/gopcua/opcua from 0.1.12 to 0.1.13 (#8744) Bumps [github.com/gopcua/opcua](https://github.com/gopcua/opcua) from 0.1.12 to 0.1.13. - [Release notes](https://github.com/gopcua/opcua/releases) - [Changelog](https://github.com/gopcua/opcua/blob/master/.goreleaser.yml) - [Commits](https://github.com/gopcua/opcua/compare/v0.1.12...v0.1.13) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f6fd3df03a936..ad020ecc1c5e0 100644 --- a/go.mod +++ b/go.mod @@ -70,7 +70,7 @@ require ( github.com/golang/snappy v0.0.1 github.com/google/go-cmp v0.5.4 github.com/google/go-github/v32 v32.1.0 - github.com/gopcua/opcua v0.1.12 + github.com/gopcua/opcua v0.1.13 github.com/gorilla/mux v1.6.2 github.com/gosnmp/gosnmp v1.29.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect diff --git a/go.sum b/go.sum index 5ff6de3a3d04e..05917cdcb92c3 100644 --- a/go.sum +++ b/go.sum @@ -332,8 +332,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gopcua/opcua v0.1.12 h1:TenluCr1CPB1NHjb9tX6yprc0eUmthznXxSc5mnJPBo= -github.com/gopcua/opcua v0.1.12/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= +github.com/gopcua/opcua v0.1.13 h1:UP746MKRFNbv+CQGfrPwgH7rGxOlSGzVu9ieZdcox4E= +github.com/gopcua/opcua v0.1.13/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= From d9736d543fc52e311738e0c8f9b5fa2620b65948 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 17 Feb 2021 00:19:50 +0100 Subject: [PATCH 224/761] Revive fixes - part 2 (#8835) * Revive fixes regarding following set of rules: [rule.if-return] [rule.increment-decrement] [rule.var-declaration] [rule.package-comments] [rule.receiver-naming] [rule.unexported-return] --- agent/tick_test.go | 4 +- internal/rotate/file_writer.go | 11 +- internal/snmp/wrapper.go | 20 +- models/running_output.go | 86 +++--- models/running_processor.go | 32 +-- plugins/common/kafka/config.go | 6 +- plugins/common/shim/config.go | 6 +- plugins/inputs/aerospike/aerospike_test.go | 2 +- plugins/inputs/bond/bond.go | 6 +- plugins/inputs/cassandra/cassandra.go | 8 +- plugins/inputs/cloudwatch/cloudwatch.go | 7 +- plugins/inputs/cpu/cpu.go | 24 +- plugins/inputs/disk/disk.go | 18 +- plugins/inputs/diskio/diskio.go | 58 ++-- plugins/inputs/diskio/diskio_linux.go | 10 +- plugins/inputs/diskio/diskio_other.go | 2 +- plugins/inputs/disque/disque.go | 32 +-- plugins/inputs/docker/stats_helpers.go | 2 +- plugins/inputs/elasticsearch/elasticsearch.go | 8 +- plugins/inputs/exec/exec.go | 8 +- plugins/inputs/filecount/filecount.go | 4 +- plugins/inputs/haproxy/haproxy.go | 46 ++-- plugins/inputs/hddtemp/hddtemp.go | 4 +- plugins/inputs/infiniband/infiniband.go | 4 +- plugins/inputs/infiniband/infiniband_linux.go | 4 +- .../influxdb_listener/influxdb_listener.go | 4 +- plugins/inputs/ipmi_sensor/connection.go | 22 +- plugins/inputs/ipset/ipset.go | 20 +- plugins/inputs/ipset/ipset_test.go | 2 +- plugins/inputs/jenkins/client.go | 12 +- .../inputs/linux_sysctl_fs/linux_sysctl_fs.go | 4 +- plugins/inputs/logstash/logstash.go | 4 +- plugins/inputs/lustre2/lustre2.go | 68 ++--- plugins/inputs/lustre2/lustre2_test.go | 50 ++-- plugins/inputs/marklogic/marklogic.go | 8 +- plugins/inputs/mem/memory.go | 14 +- plugins/inputs/mesos/mesos_test.go | 12 +- plugins/inputs/minecraft/client.go | 4 +- plugins/inputs/minecraft/client_test.go | 4 +- .../inputs/minecraft/internal/rcon/rcon.go | 2 +- plugins/inputs/minecraft/minecraft.go | 4 +- plugins/inputs/modbus/modbus.go | 46 +--- plugins/inputs/modbus/modbus_test.go | 4 +- plugins/inputs/multifile/multifile.go | 2 +- plugins/inputs/mysql/mysql.go | 249 +++++++++--------- plugins/inputs/net/net.go | 22 +- plugins/inputs/net/netstat.go | 10 +- plugins/inputs/ntpq/ntpq.go | 12 +- plugins/inputs/passenger/passenger.go | 138 +++++----- plugins/inputs/phpfpm/fcgi.go | 6 +- plugins/inputs/phpfpm/fcgi_client.go | 12 +- plugins/inputs/procstat/procstat.go | 48 ++-- plugins/inputs/procstat/procstat_test.go | 32 ++- plugins/inputs/redis/redis.go | 34 +-- plugins/inputs/snmp/snmp.go | 26 +- plugins/inputs/solr/solr.go | 10 +- plugins/inputs/stackdriver/stackdriver.go | 24 +- plugins/inputs/statsd/statsd.go | 11 +- plugins/inputs/swap/swap.go | 8 +- plugins/inputs/webhooks/webhooks.go | 30 +-- .../mocks/diagnostics_message_subscriber.go | 2 +- .../application_insights/mocks/transmitter.go | 2 +- plugins/outputs/cloudwatch/cloudwatch.go | 7 +- plugins/outputs/cloudwatch/cloudwatch_test.go | 3 +- plugins/outputs/datadog/datadog.go | 6 +- plugins/outputs/dynatrace/dynatrace.go | 8 +- plugins/outputs/file/file.go | 4 +- plugins/outputs/http/http.go | 6 +- plugins/outputs/influxdb_v2/http.go | 3 +- plugins/outputs/influxdb_v2/influxdb.go | 11 +- plugins/outputs/opentsdb/opentsdb.go | 22 +- plugins/outputs/riemann_legacy/riemann.go | 16 +- plugins/parsers/graphite/config.go | 6 +- plugins/parsers/influx/parser.go | 48 ++-- plugins/parsers/wavefront/parser.go | 14 +- plugins/processors/port_name/port_name.go | 44 ++-- plugins/processors/topk/topk.go | 11 +- plugins/serializers/influx/influx.go | 2 +- plugins/serializers/influx/reader.go | 2 +- .../prometheusremotewrite.go | 4 +- selfstat/selfstat.go | 2 +- 81 files changed, 732 insertions(+), 861 deletions(-) diff --git a/agent/tick_test.go b/agent/tick_test.go index 5b8db7e93d4c6..69bf0c2affa39 100644 --- a/agent/tick_test.go +++ b/agent/tick_test.go @@ -10,8 +10,6 @@ import ( "github.com/stretchr/testify/require" ) -var format = "2006-01-02T15:04:05.999Z07:00" - func TestAlignedTicker(t *testing.T) { interval := 10 * time.Second jitter := 0 * time.Second @@ -249,7 +247,7 @@ func simulatedDist(ticker Ticker, clock *clock.Mock) Distribution { for !clock.Now().After(until) { select { case tm := <-ticker.Elapsed(): - dist.Buckets[tm.Second()] += 1 + dist.Buckets[tm.Second()]++ dist.Count++ dist.Waittime += tm.Sub(last).Seconds() last = tm diff --git a/internal/rotate/file_writer.go b/internal/rotate/file_writer.go index a167b7cb78f7e..7cfde02692cd4 100644 --- a/internal/rotate/file_writer.go +++ b/internal/rotate/file_writer.go @@ -123,10 +123,7 @@ func (w *FileWriter) openCurrent() (err error) { w.bytesWritten = fileInfo.Size() } - if err = w.rotateIfNeeded(); err != nil { - return err - } - return nil + return w.rotateIfNeeded() } func (w *FileWriter) rotateIfNeeded() error { @@ -153,11 +150,7 @@ func (w *FileWriter) rotate() (err error) { return err } - if err = w.purgeArchivesIfNeeded(); err != nil { - return err - } - - return nil + return w.purgeArchivesIfNeeded() } func (w *FileWriter) purgeArchivesIfNeeded() (err error) { diff --git a/internal/snmp/wrapper.go b/internal/snmp/wrapper.go index c1dad9fe77f4d..92c3442bb0189 100644 --- a/internal/snmp/wrapper.go +++ b/internal/snmp/wrapper.go @@ -15,27 +15,27 @@ type GosnmpWrapper struct { } // Host returns the value of GoSNMP.Target. -func (gsw GosnmpWrapper) Host() string { - return gsw.Target +func (gs GosnmpWrapper) Host() string { + return gs.Target } // Walk wraps GoSNMP.Walk() or GoSNMP.BulkWalk(), depending on whether the // connection is using SNMPv1 or newer. // Also, if any error is encountered, it will just once reconnect and try again. -func (gsw GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error { +func (gs GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error { var err error // On error, retry once. // Unfortunately we can't distinguish between an error returned by gosnmp, and one returned by the walk function. for i := 0; i < 2; i++ { - if gsw.Version == gosnmp.Version1 { - err = gsw.GoSNMP.Walk(oid, fn) + if gs.Version == gosnmp.Version1 { + err = gs.GoSNMP.Walk(oid, fn) } else { - err = gsw.GoSNMP.BulkWalk(oid, fn) + err = gs.GoSNMP.BulkWalk(oid, fn) } if err == nil { return nil } - if err := gsw.GoSNMP.Connect(); err != nil { + if err := gs.GoSNMP.Connect(); err != nil { return fmt.Errorf("reconnecting: %w", err) } } @@ -44,15 +44,15 @@ func (gsw GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error { // Get wraps GoSNMP.GET(). // If any error is encountered, it will just once reconnect and try again. -func (gsw GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) { +func (gs GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) { var err error var pkt *gosnmp.SnmpPacket for i := 0; i < 2; i++ { - pkt, err = gsw.GoSNMP.Get(oids) + pkt, err = gs.GoSNMP.Get(oids) if err == nil { return pkt, nil } - if err := gsw.GoSNMP.Connect(); err != nil { + if err := gs.GoSNMP.Connect(); err != nil { return nil, fmt.Errorf("reconnecting: %w", err) } } diff --git a/models/running_output.go b/models/running_output.go index 894ae011c986d..fd048df6f0d4d 100644 --- a/models/running_output.go +++ b/models/running_output.go @@ -11,10 +11,10 @@ import ( const ( // Default size of metrics batch size. - DEFAULT_METRIC_BATCH_SIZE = 1000 + DefaultMetricBatchSize = 1000 // Default number of metrics kept. It should be a multiple of batch size. - DEFAULT_METRIC_BUFFER_LIMIT = 10000 + DefaultMetricBufferLimit = 10000 ) // OutputConfig containing name and filter @@ -78,13 +78,13 @@ func NewRunningOutput( bufferLimit = config.MetricBufferLimit } if bufferLimit == 0 { - bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT + bufferLimit = DefaultMetricBufferLimit } if config.MetricBatchSize > 0 { batchSize = config.MetricBatchSize } if batchSize == 0 { - batchSize = DEFAULT_METRIC_BATCH_SIZE + batchSize = DefaultMetricBatchSize } ro := &RunningOutput{ @@ -114,8 +114,8 @@ func (r *RunningOutput) LogName() string { return logName("outputs", r.Config.Name, r.Config.Alias) } -func (ro *RunningOutput) metricFiltered(metric telegraf.Metric) { - ro.MetricsFiltered.Incr(1) +func (r *RunningOutput) metricFiltered(metric telegraf.Metric) { + r.MetricsFiltered.Incr(1) metric.Drop() } @@ -133,45 +133,45 @@ func (r *RunningOutput) Init() error { // AddMetric adds a metric to the output. // // Takes ownership of metric -func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { - if ok := ro.Config.Filter.Select(metric); !ok { - ro.metricFiltered(metric) +func (r *RunningOutput) AddMetric(metric telegraf.Metric) { + if ok := r.Config.Filter.Select(metric); !ok { + r.metricFiltered(metric) return } - ro.Config.Filter.Modify(metric) + r.Config.Filter.Modify(metric) if len(metric.FieldList()) == 0 { - ro.metricFiltered(metric) + r.metricFiltered(metric) return } - if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { - ro.aggMutex.Lock() + if output, ok := r.Output.(telegraf.AggregatingOutput); ok { + r.aggMutex.Lock() output.Add(metric) - ro.aggMutex.Unlock() + r.aggMutex.Unlock() return } - if len(ro.Config.NameOverride) > 0 { - metric.SetName(ro.Config.NameOverride) + if len(r.Config.NameOverride) > 0 { + metric.SetName(r.Config.NameOverride) } - if len(ro.Config.NamePrefix) > 0 { - metric.AddPrefix(ro.Config.NamePrefix) + if len(r.Config.NamePrefix) > 0 { + metric.AddPrefix(r.Config.NamePrefix) } - if len(ro.Config.NameSuffix) > 0 { - metric.AddSuffix(ro.Config.NameSuffix) + if len(r.Config.NameSuffix) > 0 { + metric.AddSuffix(r.Config.NameSuffix) } - dropped := ro.buffer.Add(metric) - atomic.AddInt64(&ro.droppedMetrics, int64(dropped)) + dropped := r.buffer.Add(metric) + atomic.AddInt64(&r.droppedMetrics, int64(dropped)) - count := atomic.AddInt64(&ro.newMetricsCount, 1) - if count == int64(ro.MetricBatchSize) { - atomic.StoreInt64(&ro.newMetricsCount, 0) + count := atomic.AddInt64(&r.newMetricsCount, 1) + if count == int64(r.MetricBatchSize) { + atomic.StoreInt64(&r.newMetricsCount, 0) select { - case ro.BatchReady <- time.Now(): + case r.BatchReady <- time.Now(): default: } } @@ -179,50 +179,50 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { // Write writes all metrics to the output, stopping when all have been sent on // or error. -func (ro *RunningOutput) Write() error { - if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { - ro.aggMutex.Lock() +func (r *RunningOutput) Write() error { + if output, ok := r.Output.(telegraf.AggregatingOutput); ok { + r.aggMutex.Lock() metrics := output.Push() - ro.buffer.Add(metrics...) + r.buffer.Add(metrics...) output.Reset() - ro.aggMutex.Unlock() + r.aggMutex.Unlock() } - atomic.StoreInt64(&ro.newMetricsCount, 0) + atomic.StoreInt64(&r.newMetricsCount, 0) // Only process the metrics in the buffer now. Metrics added while we are // writing will be sent on the next call. - nBuffer := ro.buffer.Len() - nBatches := nBuffer/ro.MetricBatchSize + 1 + nBuffer := r.buffer.Len() + nBatches := nBuffer/r.MetricBatchSize + 1 for i := 0; i < nBatches; i++ { - batch := ro.buffer.Batch(ro.MetricBatchSize) + batch := r.buffer.Batch(r.MetricBatchSize) if len(batch) == 0 { break } - err := ro.write(batch) + err := r.write(batch) if err != nil { - ro.buffer.Reject(batch) + r.buffer.Reject(batch) return err } - ro.buffer.Accept(batch) + r.buffer.Accept(batch) } return nil } // WriteBatch writes a single batch of metrics to the output. -func (ro *RunningOutput) WriteBatch() error { - batch := ro.buffer.Batch(ro.MetricBatchSize) +func (r *RunningOutput) WriteBatch() error { + batch := r.buffer.Batch(r.MetricBatchSize) if len(batch) == 0 { return nil } - err := ro.write(batch) + err := r.write(batch) if err != nil { - ro.buffer.Reject(batch) + r.buffer.Reject(batch) return err } - ro.buffer.Accept(batch) + r.buffer.Accept(batch) return nil } diff --git a/models/running_processor.go b/models/running_processor.go index 1bd2d0f6ed0c7..5201fb27f19c0 100644 --- a/models/running_processor.go +++ b/models/running_processor.go @@ -52,8 +52,8 @@ func (rp *RunningProcessor) metricFiltered(metric telegraf.Metric) { metric.Drop() } -func (r *RunningProcessor) Init() error { - if p, ok := r.Processor.(telegraf.Initializer); ok { +func (rp *RunningProcessor) Init() error { + if p, ok := rp.Processor.(telegraf.Initializer); ok { err := p.Init() if err != nil { return err @@ -62,39 +62,39 @@ func (r *RunningProcessor) Init() error { return nil } -func (r *RunningProcessor) Log() telegraf.Logger { - return r.log +func (rp *RunningProcessor) Log() telegraf.Logger { + return rp.log } -func (r *RunningProcessor) LogName() string { - return logName("processors", r.Config.Name, r.Config.Alias) +func (rp *RunningProcessor) LogName() string { + return logName("processors", rp.Config.Name, rp.Config.Alias) } -func (r *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric { +func (rp *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric { return metric } -func (r *RunningProcessor) Start(acc telegraf.Accumulator) error { - return r.Processor.Start(acc) +func (rp *RunningProcessor) Start(acc telegraf.Accumulator) error { + return rp.Processor.Start(acc) } -func (r *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) error { - if ok := r.Config.Filter.Select(m); !ok { +func (rp *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) error { + if ok := rp.Config.Filter.Select(m); !ok { // pass downstream acc.AddMetric(m) return nil } - r.Config.Filter.Modify(m) + rp.Config.Filter.Modify(m) if len(m.FieldList()) == 0 { // drop metric - r.metricFiltered(m) + rp.metricFiltered(m) return nil } - return r.Processor.Add(m, acc) + return rp.Processor.Add(m, acc) } -func (r *RunningProcessor) Stop() { - r.Processor.Stop() +func (rp *RunningProcessor) Stop() { + rp.Processor.Stop() } diff --git a/plugins/common/kafka/config.go b/plugins/common/kafka/config.go index f68030403b3c3..1ed01d95b78a5 100644 --- a/plugins/common/kafka/config.go +++ b/plugins/common/kafka/config.go @@ -86,9 +86,5 @@ func (k *Config) SetConfig(config *sarama.Config) error { config.Net.TLS.Enable = true } - if err := k.SetSASLConfig(config); err != nil { - return err - } - - return nil + return k.SetSASLConfig(config) } diff --git a/plugins/common/shim/config.go b/plugins/common/shim/config.go index 439ec90a16283..07888752707da 100644 --- a/plugins/common/shim/config.go +++ b/plugins/common/shim/config.go @@ -34,15 +34,15 @@ func (s *Shim) LoadConfig(filePath *string) error { } if conf.Input != nil { if err = s.AddInput(conf.Input); err != nil { - return fmt.Errorf("Failed to add Input: %w", err) + return fmt.Errorf("failed to add Input: %w", err) } } else if conf.Processor != nil { if err = s.AddStreamingProcessor(conf.Processor); err != nil { - return fmt.Errorf("Failed to add Processor: %w", err) + return fmt.Errorf("failed to add Processor: %w", err) } } else if conf.Output != nil { if err = s.AddOutput(conf.Output); err != nil { - return fmt.Errorf("Failed to add Output: %w", err) + return fmt.Errorf("failed to add Output: %w", err) } } return nil diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go index e48e2d7f23de4..efc10b5d99bae 100644 --- a/plugins/inputs/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -83,7 +83,7 @@ func TestSelectNamepsacesIntegration(t *testing.T) { count := 0 for _, p := range acc.Metrics { if p.Measurement == "aerospike_namespace" { - count += 1 + count++ } } assert.Equal(t, count, 1) diff --git a/plugins/inputs/bond/bond.go b/plugins/inputs/bond/bond.go index 01f6f251be776..b71f36e629feb 100644 --- a/plugins/inputs/bond/bond.go +++ b/plugins/inputs/bond/bond.go @@ -157,10 +157,8 @@ func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf. acc.AddFields("bond_slave", fields, tags) } } - if err := scanner.Err(); err != nil { - return err - } - return nil + + return scanner.Err() } // loadPath can be used to read path firstly from config diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index cfb077bd64963..7858d3f4bf56e 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -170,7 +170,7 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) { } } -func (j *Cassandra) SampleConfig() string { +func (c *Cassandra) SampleConfig() string { return ` ## DEPRECATED: The cassandra plugin has been deprecated. Please use the ## jolokia2 plugin instead. @@ -193,18 +193,18 @@ func (j *Cassandra) SampleConfig() string { ` } -func (j *Cassandra) Description() string { +func (c *Cassandra) Description() string { return "Read Cassandra metrics through Jolokia" } -func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) { +func (c *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) { // Create + send request req, err := http.NewRequest("GET", requestUrl.String(), nil) if err != nil { return nil, err } - resp, err := j.jClient.MakeRequest(req) + resp, err := c.jClient.MakeRequest(req) if err != nil { return nil, err } diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index d1f5661a03eba..10f34a41f07ee 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -378,7 +378,7 @@ func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) { var token *string var params *cloudwatch.ListMetricsInput - var recentlyActive *string = nil + var recentlyActive *string switch c.RecentlyActive { case "PT3H": @@ -597,11 +597,6 @@ func snakeCase(s string) string { return s } -type dimension struct { - name string - value string -} - // ctod converts cloudwatch dimensions to regular dimensions. func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string { dimensions := map[string]string{} diff --git a/plugins/inputs/cpu/cpu.go b/plugins/inputs/cpu/cpu.go index 3b6c1b8816b38..b1de9a5a06f85 100644 --- a/plugins/inputs/cpu/cpu.go +++ b/plugins/inputs/cpu/cpu.go @@ -28,7 +28,7 @@ func NewCPUStats(ps system.PS) *CPUStats { } } -func (_ *CPUStats) Description() string { +func (c *CPUStats) Description() string { return "Read metrics about cpu usage" } @@ -43,12 +43,12 @@ var sampleConfig = ` report_active = false ` -func (_ *CPUStats) SampleConfig() string { +func (c *CPUStats) SampleConfig() string { return sampleConfig } -func (s *CPUStats) Gather(acc telegraf.Accumulator) error { - times, err := s.ps.CPUTimes(s.PerCPU, s.TotalCPU) +func (c *CPUStats) Gather(acc telegraf.Accumulator) error { + times, err := c.ps.CPUTimes(c.PerCPU, c.TotalCPU) if err != nil { return fmt.Errorf("error getting CPU info: %s", err) } @@ -62,7 +62,7 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { total := totalCpuTime(cts) active := activeCpuTime(cts) - if s.CollectCPUTime { + if c.CollectCPUTime { // Add cpu time metrics fieldsC := map[string]interface{}{ "time_user": cts.User, @@ -76,19 +76,19 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { "time_guest": cts.Guest, "time_guest_nice": cts.GuestNice, } - if s.ReportActive { + if c.ReportActive { fieldsC["time_active"] = activeCpuTime(cts) } acc.AddCounter("cpu", fieldsC, tags, now) } // Add in percentage - if len(s.lastStats) == 0 { + if len(c.lastStats) == 0 { // If it's the 1st gather, can't get CPU Usage stats yet continue } - lastCts, ok := s.lastStats[cts.CPU] + lastCts, ok := c.lastStats[cts.CPU] if !ok { continue } @@ -97,7 +97,7 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { totalDelta := total - lastTotal if totalDelta < 0 { - err = fmt.Errorf("Error: current total CPU time is less than previous total CPU time") + err = fmt.Errorf("current total CPU time is less than previous total CPU time") break } @@ -117,15 +117,15 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { "usage_guest": 100 * (cts.Guest - lastCts.Guest) / totalDelta, "usage_guest_nice": 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta, } - if s.ReportActive { + if c.ReportActive { fieldsG["usage_active"] = 100 * (active - lastActive) / totalDelta } acc.AddGauge("cpu", fieldsG, tags, now) } - s.lastStats = make(map[string]cpu.TimesStat) + c.lastStats = make(map[string]cpu.TimesStat) for _, cts := range times { - s.lastStats[cts.CPU] = cts + c.lastStats[cts.CPU] = cts } return err diff --git a/plugins/inputs/disk/disk.go b/plugins/inputs/disk/disk.go index b2c7e540038bb..0ceea27167389 100644 --- a/plugins/inputs/disk/disk.go +++ b/plugins/inputs/disk/disk.go @@ -19,7 +19,7 @@ type DiskStats struct { IgnoreFS []string `toml:"ignore_fs"` } -func (_ *DiskStats) Description() string { +func (ds *DiskStats) Description() string { return "Read metrics about disk usage by mount point" } @@ -32,17 +32,17 @@ var diskSampleConfig = ` ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] ` -func (_ *DiskStats) SampleConfig() string { +func (ds *DiskStats) SampleConfig() string { return diskSampleConfig } -func (s *DiskStats) Gather(acc telegraf.Accumulator) error { +func (ds *DiskStats) Gather(acc telegraf.Accumulator) error { // Legacy support: - if len(s.Mountpoints) != 0 { - s.MountPoints = s.Mountpoints + if len(ds.Mountpoints) != 0 { + ds.MountPoints = ds.Mountpoints } - disks, partitions, err := s.ps.DiskUsage(s.MountPoints, s.IgnoreFS) + disks, partitions, err := ds.ps.DiskUsage(ds.MountPoints, ds.IgnoreFS) if err != nil { return fmt.Errorf("error getting disk usage info: %s", err) } @@ -59,9 +59,9 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error { "fstype": du.Fstype, "mode": mountOpts.Mode(), } - var used_percent float64 + var usedPercent float64 if du.Used+du.Free > 0 { - used_percent = float64(du.Used) / + usedPercent = float64(du.Used) / (float64(du.Used) + float64(du.Free)) * 100 } @@ -69,7 +69,7 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error { "total": du.Total, "free": du.Free, "used": du.Used, - "used_percent": used_percent, + "used_percent": usedPercent, "inodes_total": du.InodesTotal, "inodes_free": du.InodesFree, "inodes_used": du.InodesUsed, diff --git a/plugins/inputs/diskio/diskio.go b/plugins/inputs/diskio/diskio.go index 9c1e20ebdc5de..5250b704a5370 100644 --- a/plugins/inputs/diskio/diskio.go +++ b/plugins/inputs/diskio/diskio.go @@ -30,7 +30,7 @@ type DiskIO struct { initialized bool } -func (_ *DiskIO) Description() string { +func (d *DiskIO) Description() string { return "Read metrics about disk IO by device" } @@ -62,7 +62,7 @@ var diskIOsampleConfig = ` # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] ` -func (_ *DiskIO) SampleConfig() string { +func (d *DiskIO) SampleConfig() string { return diskIOsampleConfig } @@ -71,34 +71,34 @@ func hasMeta(s string) bool { return strings.IndexAny(s, "*?[") >= 0 } -func (s *DiskIO) init() error { - for _, device := range s.Devices { +func (d *DiskIO) init() error { + for _, device := range d.Devices { if hasMeta(device) { - filter, err := filter.Compile(s.Devices) + filter, err := filter.Compile(d.Devices) if err != nil { return fmt.Errorf("error compiling device pattern: %s", err.Error()) } - s.deviceFilter = filter + d.deviceFilter = filter } } - s.initialized = true + d.initialized = true return nil } -func (s *DiskIO) Gather(acc telegraf.Accumulator) error { - if !s.initialized { - err := s.init() +func (d *DiskIO) Gather(acc telegraf.Accumulator) error { + if !d.initialized { + err := d.init() if err != nil { return err } } devices := []string{} - if s.deviceFilter == nil { - devices = s.Devices + if d.deviceFilter == nil { + devices = d.Devices } - diskio, err := s.ps.DiskIO(devices) + diskio, err := d.ps.DiskIO(devices) if err != nil { return fmt.Errorf("error getting disk io info: %s", err.Error()) } @@ -106,17 +106,17 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { for _, io := range diskio { match := false - if s.deviceFilter != nil && s.deviceFilter.Match(io.Name) { + if d.deviceFilter != nil && d.deviceFilter.Match(io.Name) { match = true } tags := map[string]string{} var devLinks []string - tags["name"], devLinks = s.diskName(io.Name) + tags["name"], devLinks = d.diskName(io.Name) - if s.deviceFilter != nil && !match { + if d.deviceFilter != nil && !match { for _, devLink := range devLinks { - if s.deviceFilter.Match(devLink) { + if d.deviceFilter.Match(devLink) { match = true break } @@ -126,11 +126,11 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { } } - for t, v := range s.diskTags(io.Name) { + for t, v := range d.diskTags(io.Name) { tags[t] = v } - if !s.SkipSerialNumber { + if !d.SkipSerialNumber { if len(io.SerialNumber) != 0 { tags["serial"] = io.SerialNumber } else { @@ -157,23 +157,23 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { return nil } -func (s *DiskIO) diskName(devName string) (string, []string) { - di, err := s.diskInfo(devName) +func (d *DiskIO) diskName(devName string) (string, []string) { + di, err := d.diskInfo(devName) devLinks := strings.Split(di["DEVLINKS"], " ") for i, devLink := range devLinks { devLinks[i] = strings.TrimPrefix(devLink, "/dev/") } - if len(s.NameTemplates) == 0 { + if len(d.NameTemplates) == 0 { return devName, devLinks } if err != nil { - s.Log.Warnf("Error gathering disk info: %s", err) + d.Log.Warnf("Error gathering disk info: %s", err) return devName, devLinks } - for _, nt := range s.NameTemplates { + for _, nt := range d.NameTemplates { miss := false name := varRegex.ReplaceAllStringFunc(nt, func(sub string) string { sub = sub[1:] // strip leading '$' @@ -195,19 +195,19 @@ func (s *DiskIO) diskName(devName string) (string, []string) { return devName, devLinks } -func (s *DiskIO) diskTags(devName string) map[string]string { - if len(s.DeviceTags) == 0 { +func (d *DiskIO) diskTags(devName string) map[string]string { + if len(d.DeviceTags) == 0 { return nil } - di, err := s.diskInfo(devName) + di, err := d.diskInfo(devName) if err != nil { - s.Log.Warnf("Error gathering disk info: %s", err) + d.Log.Warnf("Error gathering disk info: %s", err) return nil } tags := map[string]string{} - for _, dt := range s.DeviceTags { + for _, dt := range d.DeviceTags { if v, ok := di[dt]; ok { tags[dt] = v } diff --git a/plugins/inputs/diskio/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go index f2499ca17c1c2..59822a2778a9b 100644 --- a/plugins/inputs/diskio/diskio_linux.go +++ b/plugins/inputs/diskio/diskio_linux.go @@ -18,7 +18,7 @@ type diskInfoCache struct { var udevPath = "/run/udev/data" -func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { +func (d *DiskIO) diskInfo(devName string) (map[string]string, error) { var err error var stat unix.Stat_t @@ -28,10 +28,10 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { return nil, err } - if s.infoCache == nil { - s.infoCache = map[string]diskInfoCache{} + if d.infoCache == nil { + d.infoCache = map[string]diskInfoCache{} } - ic, ok := s.infoCache[devName] + ic, ok := d.infoCache[devName] if ok && stat.Mtim.Nano() == ic.modifiedAt { return ic.values, nil @@ -43,7 +43,7 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { di := map[string]string{} - s.infoCache[devName] = diskInfoCache{ + d.infoCache[devName] = diskInfoCache{ modifiedAt: stat.Mtim.Nano(), udevDataPath: udevDataPath, values: di, diff --git a/plugins/inputs/diskio/diskio_other.go b/plugins/inputs/diskio/diskio_other.go index 07fb8c3b87faa..1c883e904f92c 100644 --- a/plugins/inputs/diskio/diskio_other.go +++ b/plugins/inputs/diskio/diskio_other.go @@ -4,6 +4,6 @@ package diskio type diskInfoCache struct{} -func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { +func (d *DiskIO) diskInfo(devName string) (map[string]string, error) { return nil, nil } diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index 6585ab88eb587..021e865bfe4df 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -32,11 +32,11 @@ var sampleConfig = ` var defaultTimeout = 5 * time.Second -func (r *Disque) SampleConfig() string { +func (d *Disque) SampleConfig() string { return sampleConfig } -func (r *Disque) Description() string { +func (d *Disque) Description() string { return "Read metrics from one or many disque servers" } @@ -64,21 +64,21 @@ var ErrProtocolError = errors.New("disque protocol error") // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *Disque) Gather(acc telegraf.Accumulator) error { - if len(g.Servers) == 0 { +func (d *Disque) Gather(acc telegraf.Accumulator) error { + if len(d.Servers) == 0 { url := &url.URL{ Host: ":7711", } - g.gatherServer(url, acc) + d.gatherServer(url, acc) return nil } var wg sync.WaitGroup - for _, serv := range g.Servers { + for _, serv := range d.Servers { u, err := url.Parse(serv) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err)) + acc.AddError(fmt.Errorf("unable to parse to address '%s': %s", serv, err)) continue } else if u.Scheme == "" { // fallback to simple string based address (i.e. "10.0.0.1:10000") @@ -89,7 +89,7 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(serv string) { defer wg.Done() - acc.AddError(g.gatherServer(u, acc)) + acc.AddError(d.gatherServer(u, acc)) }(serv) } @@ -100,8 +100,8 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error { const defaultPort = "7711" -func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { - if g.c == nil { +func (d *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { + if d.c == nil { _, _, err := net.SplitHostPort(addr.Host) if err != nil { @@ -110,7 +110,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout) if err != nil { - return fmt.Errorf("Unable to connect to disque server '%s': %s", addr.Host, err) + return fmt.Errorf("unable to connect to disque server '%s': %s", addr.Host, err) } if addr.User != nil { @@ -130,15 +130,15 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { } } - g.c = c + d.c = c } // Extend connection - g.c.SetDeadline(time.Now().Add(defaultTimeout)) + d.c.SetDeadline(time.Now().Add(defaultTimeout)) - g.c.Write([]byte("info\r\n")) + d.c.Write([]byte("info\r\n")) - r := bufio.NewReader(g.c) + r := bufio.NewReader(d.c) line, err := r.ReadString('\n') if err != nil { @@ -176,7 +176,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { parts := strings.SplitN(line, ":", 2) - name := string(parts[0]) + name := parts[0] metric, ok := Tracking[name] if !ok { diff --git a/plugins/inputs/docker/stats_helpers.go b/plugins/inputs/docker/stats_helpers.go index 93ea2f2196baf..982f131d6d8d3 100644 --- a/plugins/inputs/docker/stats_helpers.go +++ b/plugins/inputs/docker/stats_helpers.go @@ -1,4 +1,4 @@ -// Helper functions copied from +// Package docker contains few helper functions copied from // https://github.com/docker/cli/blob/master/cli/command/container/stats_helpers.go package docker diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 75a04e49c28bb..33755c5ce5a28 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -664,7 +664,7 @@ func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now shardTags := map[string]string{ "index_name": name, "node_id": routingNode, - "shard_name": string(shardNumber), + "shard_name": shardNumber, "type": shardType, } @@ -741,11 +741,7 @@ func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error { r.StatusCode, http.StatusOK) } - if err = json.NewDecoder(r.Body).Decode(v); err != nil { - return err - } - - return nil + return json.NewDecoder(r.Body).Decode(v) } func (e *Elasticsearch) compileIndexMatchers() (map[string]filter.Filter, error) { diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index cb4420b0f246f..adb16c953ed8c 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -69,12 +69,12 @@ func (c CommandRunner) Run( command string, timeout time.Duration, ) ([]byte, []byte, error) { - split_cmd, err := shellquote.Split(command) - if err != nil || len(split_cmd) == 0 { + splitCmd, err := shellquote.Split(command) + if err != nil || len(splitCmd) == 0 { return nil, nil, fmt.Errorf("exec: unable to parse command, %s", err) } - cmd := exec.Command(split_cmd[0], split_cmd[1:]...) + cmd := exec.Command(splitCmd[0], splitCmd[1:]...) var ( out bytes.Buffer @@ -123,7 +123,7 @@ func removeCarriageReturns(b bytes.Buffer) bytes.Buffer { byt, er := b.ReadBytes(0x0D) end := len(byt) if nil == er { - end -= 1 + end-- } if nil != byt { buf.Write(byt[:end]) diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index 30815541c8448..c2b572c12d52f 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -65,11 +65,11 @@ type FileCount struct { Log telegraf.Logger } -func (_ *FileCount) Description() string { +func (fc *FileCount) Description() string { return "Count files in a directory" } -func (_ *FileCount) SampleConfig() string { return sampleConfig } +func (fc *FileCount) SampleConfig() string { return sampleConfig } type fileFilterFunc func(os.FileInfo) (bool, error) diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 0abc90dbbf3f8..73cf9d3345dc1 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -61,24 +61,24 @@ var sampleConfig = ` # insecure_skip_verify = false ` -func (r *haproxy) SampleConfig() string { +func (h *haproxy) SampleConfig() string { return sampleConfig } -func (r *haproxy) Description() string { +func (h *haproxy) Description() string { return "Read metrics of haproxy, via socket or csv stats page" } // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *haproxy) Gather(acc telegraf.Accumulator) error { - if len(g.Servers) == 0 { - return g.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc) +func (h *haproxy) Gather(acc telegraf.Accumulator) error { + if len(h.Servers) == 0 { + return h.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc) } - endpoints := make([]string, 0, len(g.Servers)) + endpoints := make([]string, 0, len(h.Servers)) - for _, endpoint := range g.Servers { + for _, endpoint := range h.Servers { if strings.HasPrefix(endpoint, "http") { endpoints = append(endpoints, endpoint) @@ -107,7 +107,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error { for _, server := range endpoints { go func(serv string) { defer wg.Done() - if err := g.gatherServer(serv, acc); err != nil { + if err := h.gatherServer(serv, acc); err != nil { acc.AddError(err) } }(server) @@ -117,7 +117,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error { return nil } -func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error { +func (h *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error { socketPath := getSocketAddr(addr) c, err := net.Dial("unix", socketPath) @@ -132,28 +132,28 @@ func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) erro return fmt.Errorf("could not write to socket '%s': %s", addr, errw) } - return g.importCsvResult(c, acc, socketPath) + return h.importCsvResult(c, acc, socketPath) } -func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { +func (h *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { if !strings.HasPrefix(addr, "http") { - return g.gatherServerSocket(addr, acc) + return h.gatherServerSocket(addr, acc) } - if g.client == nil { - tlsCfg, err := g.ClientConfig.TLSConfig() + if h.client == nil { + tlsCfg, err := h.ClientConfig.TLSConfig() if err != nil { return err } tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } - g.client = client + h.client = client } if !strings.HasSuffix(addr, ";csv") { @@ -176,11 +176,11 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { addr = u.String() } - if g.Username != "" || g.Password != "" { - req.SetBasicAuth(g.Username, g.Password) + if h.Username != "" || h.Password != "" { + req.SetBasicAuth(h.Username, h.Password) } - res, err := g.client.Do(req) + res, err := h.client.Do(req) if err != nil { return fmt.Errorf("unable to connect to haproxy server '%s': %s", addr, err) } @@ -190,7 +190,7 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { return fmt.Errorf("unable to get valid stat result from '%s', http response code : %d", addr, res.StatusCode) } - if err := g.importCsvResult(res.Body, acc, u.Host); err != nil { + if err := h.importCsvResult(res.Body, acc, u.Host); err != nil { return fmt.Errorf("unable to parse stat result from '%s': %s", addr, err) } @@ -222,7 +222,7 @@ var fieldRenames = map[string]string{ "hrsp_other": "http_response.other", } -func (g *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error { +func (h *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error { csvr := csv.NewReader(r) now := time.Now() @@ -259,7 +259,7 @@ func (g *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host st colName := headers[i] fieldName := colName - if !g.KeepFieldNames { + if !h.KeepFieldNames { if fieldRename, ok := fieldRenames[colName]; ok { fieldName = fieldRename } diff --git a/plugins/inputs/hddtemp/hddtemp.go b/plugins/inputs/hddtemp/hddtemp.go index 0f084ac219bff..2e6d3a53c00cd 100644 --- a/plugins/inputs/hddtemp/hddtemp.go +++ b/plugins/inputs/hddtemp/hddtemp.go @@ -20,7 +20,7 @@ type Fetcher interface { Fetch(address string) ([]gohddtemp.Disk, error) } -func (_ *HDDTemp) Description() string { +func (h *HDDTemp) Description() string { return "Monitor disks' temperatures using hddtemp" } @@ -36,7 +36,7 @@ var hddtempSampleConfig = ` # devices = ["sda", "*"] ` -func (_ *HDDTemp) SampleConfig() string { +func (h *HDDTemp) SampleConfig() string { return hddtempSampleConfig } diff --git a/plugins/inputs/infiniband/infiniband.go b/plugins/inputs/infiniband/infiniband.go index 65e1d6c712998..8a99bb0e469b6 100644 --- a/plugins/inputs/infiniband/infiniband.go +++ b/plugins/inputs/infiniband/infiniband.go @@ -13,10 +13,10 @@ type Infiniband struct { // Sample configuration for plugin var InfinibandConfig = `` -func (_ *Infiniband) SampleConfig() string { +func (i *Infiniband) SampleConfig() string { return InfinibandConfig } -func (_ *Infiniband) Description() string { +func (i *Infiniband) Description() string { return "Gets counters from all InfiniBand cards and ports installed" } diff --git a/plugins/inputs/infiniband/infiniband_linux.go b/plugins/inputs/infiniband/infiniband_linux.go index 48cd8a428900d..224d35bc2fce0 100644 --- a/plugins/inputs/infiniband/infiniband_linux.go +++ b/plugins/inputs/infiniband/infiniband_linux.go @@ -11,8 +11,7 @@ import ( ) // Gather statistics from our infiniband cards -func (_ *Infiniband) Gather(acc telegraf.Accumulator) error { - +func (i *Infiniband) Gather(acc telegraf.Accumulator) error { rdmaDevices := rdmamap.GetRdmaDeviceList() if len(rdmaDevices) == 0 { @@ -41,7 +40,6 @@ func (_ *Infiniband) Gather(acc telegraf.Accumulator) error { // Add the statistics to the accumulator func addStats(dev string, port string, stats []rdmamap.RdmaStatEntry, acc telegraf.Accumulator) { - // Allow users to filter by card and port tags := map[string]string{"device": dev, "port": port} fields := make(map[string]interface{}) diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go index 07d27ebbd934d..8d87b38f83d65 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -288,7 +288,7 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { var m telegraf.Metric var err error var parseErrorCount int - var lastPos int = 0 + var lastPos int var firstParseErrorStr string for { select { @@ -306,7 +306,7 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { // Continue parsing metrics even if some are malformed if parseErr, ok := err.(*influx.ParseError); ok { - parseErrorCount += 1 + parseErrorCount++ errStr := parseErr.Error() if firstParseErrorStr == "" { firstParseErrorStr = errStr diff --git a/plugins/inputs/ipmi_sensor/connection.go b/plugins/inputs/ipmi_sensor/connection.go index 69ae04b78cf9f..7a1fb71df359a 100644 --- a/plugins/inputs/ipmi_sensor/connection.go +++ b/plugins/inputs/ipmi_sensor/connection.go @@ -47,27 +47,27 @@ func NewConnection(server, privilege, hexKey string) *Connection { return conn } -func (t *Connection) options() []string { - intf := t.Interface +func (c *Connection) options() []string { + intf := c.Interface if intf == "" { intf = "lan" } options := []string{ - "-H", t.Hostname, - "-U", t.Username, - "-P", t.Password, + "-H", c.Hostname, + "-U", c.Username, + "-P", c.Password, "-I", intf, } - if t.HexKey != "" { - options = append(options, "-y", t.HexKey) + if c.HexKey != "" { + options = append(options, "-y", c.HexKey) } - if t.Port != 0 { - options = append(options, "-p", strconv.Itoa(t.Port)) + if c.Port != 0 { + options = append(options, "-p", strconv.Itoa(c.Port)) } - if t.Privilege != "" { - options = append(options, "-L", t.Privilege) + if c.Privilege != "" { + options = append(options, "-L", c.Privilege) } return options } diff --git a/plugins/inputs/ipset/ipset.go b/plugins/inputs/ipset/ipset.go index c459ebf4cfe26..e9f3ccabe1241 100644 --- a/plugins/inputs/ipset/ipset.go +++ b/plugins/inputs/ipset/ipset.go @@ -29,12 +29,12 @@ const measurement = "ipset" var defaultTimeout = internal.Duration{Duration: time.Second} // Description returns a short description of the plugin -func (ipset *Ipset) Description() string { +func (i *Ipset) Description() string { return "Gather packets and bytes counters from Linux ipsets" } // SampleConfig returns sample configuration options. -func (ipset *Ipset) SampleConfig() string { +func (i *Ipset) SampleConfig() string { return ` ## By default, we only show sets which have already matched at least 1 packet. ## set include_unmatched_sets = true to gather them all. @@ -46,8 +46,8 @@ func (ipset *Ipset) SampleConfig() string { ` } -func (ips *Ipset) Gather(acc telegraf.Accumulator) error { - out, e := ips.lister(ips.Timeout, ips.UseSudo) +func (i *Ipset) Gather(acc telegraf.Accumulator) error { + out, e := i.lister(i.Timeout, i.UseSudo) if e != nil { acc.AddError(e) } @@ -64,25 +64,25 @@ func (ips *Ipset) Gather(acc telegraf.Accumulator) error { data := strings.Fields(line) if len(data) < 7 { - acc.AddError(fmt.Errorf("Error parsing line (expected at least 7 fields): %s", line)) + acc.AddError(fmt.Errorf("error parsing line (expected at least 7 fields): %s", line)) continue } - if data[0] == "add" && (data[4] != "0" || ips.IncludeUnmatchedSets) { + if data[0] == "add" && (data[4] != "0" || i.IncludeUnmatchedSets) { tags := map[string]string{ "set": data[1], "rule": data[2], } - packets_total, err := strconv.ParseUint(data[4], 10, 64) + packetsTotal, err := strconv.ParseUint(data[4], 10, 64) if err != nil { acc.AddError(err) } - bytes_total, err := strconv.ParseUint(data[6], 10, 64) + bytesTotal, err := strconv.ParseUint(data[6], 10, 64) if err != nil { acc.AddError(err) } fields := map[string]interface{}{ - "packets_total": packets_total, - "bytes_total": bytes_total, + "packets_total": packetsTotal, + "bytes_total": bytesTotal, } acc.AddCounter(measurement, fields, tags) } diff --git a/plugins/inputs/ipset/ipset_test.go b/plugins/inputs/ipset/ipset_test.go index 31a9f3cfc113d..0480debe1bb4f 100644 --- a/plugins/inputs/ipset/ipset_test.go +++ b/plugins/inputs/ipset/ipset_test.go @@ -40,7 +40,7 @@ func TestIpset(t *testing.T) { value: `create hash:net family inet hashsize 1024 maxelem 65536 counters add myset 4.5.6.7 packets 123 bytes `, - err: fmt.Errorf("Error parsing line (expected at least 7 fields): \t\t\t\tadd myset 4.5.6.7 packets 123 bytes"), + err: fmt.Errorf("error parsing line (expected at least 7 fields): \t\t\t\tadd myset 4.5.6.7 packets 123 bytes"), }, { name: "Non-empty sets, counters, no comment", diff --git a/plugins/inputs/jenkins/client.go b/plugins/inputs/jenkins/client.go index 6c0a125aaaf56..9cc8e073bfa48 100644 --- a/plugins/inputs/jenkins/client.go +++ b/plugins/inputs/jenkins/client.go @@ -47,11 +47,9 @@ func (c *client) init() error { break } } + // first api fetch - if err := c.doGet(context.Background(), jobPath, new(jobResponse)); err != nil { - return err - } - return nil + return c.doGet(context.Background(), jobPath, new(jobResponse)) } func (c *client) doGet(ctx context.Context, url string, v interface{}) error { @@ -97,10 +95,8 @@ func (c *client) doGet(ctx context.Context, url string, v interface{}) error { Title: resp.Status, } } - if err = json.NewDecoder(resp.Body).Decode(v); err != nil { - return err - } - return nil + + return json.NewDecoder(resp.Body).Decode(v) } type APIError struct { diff --git a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go index ed24963404fc2..adb111836c683 100644 --- a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go @@ -20,10 +20,10 @@ type SysctlFS struct { var sysctlFSDescription = `Provides Linux sysctl fs metrics` var sysctlFSSampleConfig = `` -func (_ SysctlFS) Description() string { +func (sfs SysctlFS) Description() string { return sysctlFSDescription } -func (_ SysctlFS) SampleConfig() string { +func (sfs SysctlFS) SampleConfig() string { return sysctlFSSampleConfig } diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index e360ba032ff35..65f326b76968c 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -149,8 +149,8 @@ const processStats = "/_node/stats/process" const pipelinesStats = "/_node/stats/pipelines" const pipelineStats = "/_node/stats/pipeline" -func (i *Logstash) Init() error { - err := choice.CheckSlice(i.Collect, []string{"pipelines", "process", "jvm"}) +func (logstash *Logstash) Init() error { + err := choice.CheckSlice(logstash.Collect, []string{"pipelines", "process", "jvm"}) if err != nil { return fmt.Errorf(`cannot verify "collect" setting: %v`, err) } diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 06c70de78d51d..ecaafb50f86b3 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -1,15 +1,9 @@ // +build !windows -// lustre2 doesn't aim for Windows - -/* -Lustre 2.x Telegraf plugin - -Lustre (http://lustre.org/) is an open-source, parallel file system -for HPC environments. It stores statistics about its activity in -/proc - -*/ +// Package lustre2 (doesn't aim for Windows) +// Lustre 2.x Telegraf plugin +// Lustre (http://lustre.org/) is an open-source, parallel file system +// for HPC environments. It stores statistics about its activity in /proc package lustre2 import ( @@ -30,8 +24,8 @@ type tags struct { // Lustre proc files can change between versions, so we want to future-proof // by letting people choose what to look at. type Lustre2 struct { - Ost_procfiles []string `toml:"ost_procfiles"` - Mds_procfiles []string `toml:"mds_procfiles"` + OstProcfiles []string `toml:"ost_procfiles"` + MdsProcfiles []string `toml:"mds_procfiles"` // allFields maps and OST name to the metric fields associated with that OST allFields map[tags]map[string]interface{} @@ -63,7 +57,7 @@ type mapping struct { tag string // Additional tag to add for this metric } -var wanted_ost_fields = []*mapping{ +var wantedOstFields = []*mapping{ { inProc: "write_bytes", field: 6, @@ -95,7 +89,7 @@ var wanted_ost_fields = []*mapping{ }, } -var wanted_ost_jobstats_fields = []*mapping{ +var wantedOstJobstatsFields = []*mapping{ { // The read line has several fields, so we need to differentiate what they are inProc: "read", field: 3, @@ -228,7 +222,7 @@ var wanted_ost_jobstats_fields = []*mapping{ }, } -var wanted_mds_fields = []*mapping{ +var wantedMdsFields = []*mapping{ { inProc: "open", }, @@ -279,7 +273,7 @@ var wanted_mds_fields = []*mapping{ }, } -var wanted_mdt_jobstats_fields = []*mapping{ +var wantedMdtJobstatsFields = []*mapping{ { inProc: "open", field: 3, @@ -362,7 +356,7 @@ var wanted_mdt_jobstats_fields = []*mapping{ }, } -func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, acc telegraf.Accumulator) error { +func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping) error { files, err := filepath.Glob(fileglob) if err != nil { return err @@ -386,7 +380,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a } jobs := strings.Split(string(wholeFile), "- ") for _, job := range jobs { - lines := strings.Split(string(job), "\n") + lines := strings.Split(job, "\n") jobid := "" // figure out if the data should be tagged with job_id here @@ -422,7 +416,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a if wantedField == 0 { wantedField = 1 } - data, err = strconv.ParseUint(strings.TrimSuffix((parts[wantedField]), ","), 10, 64) + data, err = strconv.ParseUint(strings.TrimSuffix(parts[wantedField], ","), 10, 64) if err != nil { return err } @@ -454,66 +448,60 @@ func (l *Lustre2) Gather(acc telegraf.Accumulator) error { //l.allFields = make(map[string]map[string]interface{}) l.allFields = make(map[tags]map[string]interface{}) - if len(l.Ost_procfiles) == 0 { + if len(l.OstProcfiles) == 0 { // read/write bytes are in obdfilter//stats - err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats", - wanted_ost_fields, acc) + err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats", wantedOstFields) if err != nil { return err } // cache counters are in osd-ldiskfs//stats - err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats", - wanted_ost_fields, acc) + err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats", wantedOstFields) if err != nil { return err } // per job statistics are in obdfilter//job_stats - err = l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/job_stats", - wanted_ost_jobstats_fields, acc) + err = l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/job_stats", wantedOstJobstatsFields) if err != nil { return err } } - if len(l.Mds_procfiles) == 0 { + if len(l.MdsProcfiles) == 0 { // Metadata server stats - err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats", - wanted_mds_fields, acc) + err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats", wantedMdsFields) if err != nil { return err } // Metadata target job stats - err = l.GetLustreProcStats("/proc/fs/lustre/mdt/*/job_stats", - wanted_mdt_jobstats_fields, acc) + err = l.GetLustreProcStats("/proc/fs/lustre/mdt/*/job_stats", wantedMdtJobstatsFields) if err != nil { return err } } - for _, procfile := range l.Ost_procfiles { - ost_fields := wanted_ost_fields + for _, procfile := range l.OstProcfiles { + ostFields := wantedOstFields if strings.HasSuffix(procfile, "job_stats") { - ost_fields = wanted_ost_jobstats_fields + ostFields = wantedOstJobstatsFields } - err := l.GetLustreProcStats(procfile, ost_fields, acc) + err := l.GetLustreProcStats(procfile, ostFields) if err != nil { return err } } - for _, procfile := range l.Mds_procfiles { - mdt_fields := wanted_mds_fields + for _, procfile := range l.MdsProcfiles { + mdtFields := wantedMdsFields if strings.HasSuffix(procfile, "job_stats") { - mdt_fields = wanted_mdt_jobstats_fields + mdtFields = wantedMdtJobstatsFields } - err := l.GetLustreProcStats(procfile, mdt_fields, acc) + err := l.GetLustreProcStats(procfile, mdtFields) if err != nil { return err } } for tgs, fields := range l.allFields { - tags := map[string]string{ "name": tgs.name, } diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 7741c83ac530a..9614eb0597f38 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -135,33 +135,33 @@ const mdtJobStatsContents = `job_stats: func TestLustre2GeneratesMetrics(t *testing.T) { tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" - ost_name := "OST0001" + ostName := "OST0001" mdtdir := tempdir + "/mdt/" - err := os.MkdirAll(mdtdir+"/"+ost_name, 0755) + err := os.MkdirAll(mdtdir+"/"+ostName, 0755) require.NoError(t, err) osddir := tempdir + "/osd-ldiskfs/" - err = os.MkdirAll(osddir+"/"+ost_name, 0755) + err = os.MkdirAll(osddir+"/"+ostName, 0755) require.NoError(t, err) obddir := tempdir + "/obdfilter/" - err = os.MkdirAll(obddir+"/"+ost_name, 0755) + err = os.MkdirAll(obddir+"/"+ostName, 0755) require.NoError(t, err) - err = ioutil.WriteFile(mdtdir+"/"+ost_name+"/md_stats", []byte(mdtProcContents), 0644) + err = ioutil.WriteFile(mdtdir+"/"+ostName+"/md_stats", []byte(mdtProcContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(osddir+"/"+ost_name+"/stats", []byte(osdldiskfsProcContents), 0644) + err = ioutil.WriteFile(osddir+"/"+ostName+"/stats", []byte(osdldiskfsProcContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(obddir+"/"+ost_name+"/stats", []byte(obdfilterProcContents), 0644) + err = ioutil.WriteFile(obddir+"/"+ostName+"/stats", []byte(obdfilterProcContents), 0644) require.NoError(t, err) // Begin by testing standard Lustre stats m := &Lustre2{ - Ost_procfiles: []string{obddir + "/*/stats", osddir + "/*/stats"}, - Mds_procfiles: []string{mdtdir + "/*/md_stats"}, + OstProcfiles: []string{obddir + "/*/stats", osddir + "/*/stats"}, + MdsProcfiles: []string{mdtdir + "/*/md_stats"}, } var acc testutil.Accumulator @@ -170,7 +170,7 @@ func TestLustre2GeneratesMetrics(t *testing.T) { require.NoError(t, err) tags := map[string]string{ - "name": ost_name, + "name": ostName, } fields := map[string]interface{}{ @@ -208,27 +208,27 @@ func TestLustre2GeneratesMetrics(t *testing.T) { func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" - ost_name := "OST0001" - job_names := []string{"cluster-testjob1", "testjob2"} + ostName := "OST0001" + jobNames := []string{"cluster-testjob1", "testjob2"} mdtdir := tempdir + "/mdt/" - err := os.MkdirAll(mdtdir+"/"+ost_name, 0755) + err := os.MkdirAll(mdtdir+"/"+ostName, 0755) require.NoError(t, err) obddir := tempdir + "/obdfilter/" - err = os.MkdirAll(obddir+"/"+ost_name, 0755) + err = os.MkdirAll(obddir+"/"+ostName, 0755) require.NoError(t, err) - err = ioutil.WriteFile(mdtdir+"/"+ost_name+"/job_stats", []byte(mdtJobStatsContents), 0644) + err = ioutil.WriteFile(mdtdir+"/"+ostName+"/job_stats", []byte(mdtJobStatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(obddir+"/"+ost_name+"/job_stats", []byte(obdfilterJobStatsContents), 0644) + err = ioutil.WriteFile(obddir+"/"+ostName+"/job_stats", []byte(obdfilterJobStatsContents), 0644) require.NoError(t, err) // Test Lustre Jobstats m := &Lustre2{ - Ost_procfiles: []string{obddir + "/*/job_stats"}, - Mds_procfiles: []string{mdtdir + "/*/job_stats"}, + OstProcfiles: []string{obddir + "/*/job_stats"}, + MdsProcfiles: []string{mdtdir + "/*/job_stats"}, } var acc testutil.Accumulator @@ -240,12 +240,12 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { // and even further make this dependent on summing per OST tags := []map[string]string{ { - "name": ost_name, - "jobid": job_names[0], + "name": ostName, + "jobid": jobNames[0], }, { - "name": ost_name, - "jobid": job_names[1], + "name": ostName, + "jobid": jobNames[1], }, } @@ -347,7 +347,7 @@ func TestLustre2CanParseConfiguration(t *testing.T) { "/proc/fs/lustre/mdt/*/md_stats", ]`) - table, err := toml.Parse([]byte(config)) + table, err := toml.Parse(config) require.NoError(t, err) inputs, ok := table.Fields["inputs"] @@ -361,11 +361,11 @@ func TestLustre2CanParseConfiguration(t *testing.T) { require.NoError(t, toml.UnmarshalTable(lustre2.([]*ast.Table)[0], &plugin)) assert.Equal(t, Lustre2{ - Ost_procfiles: []string{ + OstProcfiles: []string{ "/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats", }, - Mds_procfiles: []string{ + MdsProcfiles: []string{ "/proc/fs/lustre/mdt/*/md_stats", }, }, plugin) diff --git a/plugins/inputs/marklogic/marklogic.go b/plugins/inputs/marklogic/marklogic.go index b350466122dc7..b79908caab618 100644 --- a/plugins/inputs/marklogic/marklogic.go +++ b/plugins/inputs/marklogic/marklogic.go @@ -220,7 +220,7 @@ func (c *Marklogic) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: time.Duration(5 * time.Second), + Timeout: 5 * time.Second, } return client, nil @@ -246,11 +246,7 @@ func (c *Marklogic) gatherJSONData(url string, v interface{}) error { response.StatusCode, http.StatusOK) } - if err = json.NewDecoder(response.Body).Decode(v); err != nil { - return err - } - - return nil + return json.NewDecoder(response.Body).Decode(v) } func init() { diff --git a/plugins/inputs/mem/memory.go b/plugins/inputs/mem/memory.go index c8dbd0c2a43b5..d01bf2a0fa156 100644 --- a/plugins/inputs/mem/memory.go +++ b/plugins/inputs/mem/memory.go @@ -14,19 +14,19 @@ type MemStats struct { platform string } -func (_ *MemStats) Description() string { +func (ms *MemStats) Description() string { return "Read metrics about memory usage" } -func (_ *MemStats) SampleConfig() string { return "" } +func (ms *MemStats) SampleConfig() string { return "" } -func (m *MemStats) Init() error { - m.platform = runtime.GOOS +func (ms *MemStats) Init() error { + ms.platform = runtime.GOOS return nil } -func (s *MemStats) Gather(acc telegraf.Accumulator) error { - vm, err := s.ps.VMStat() +func (ms *MemStats) Gather(acc telegraf.Accumulator) error { + vm, err := ms.ps.VMStat() if err != nil { return fmt.Errorf("error getting virtual memory info: %s", err) } @@ -39,7 +39,7 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error { "available_percent": 100 * float64(vm.Available) / float64(vm.Total), } - switch s.platform { + switch ms.platform { case "darwin": fields["active"] = vm.Active fields["free"] = vm.Free diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index e25f250c8f8d4..cdc5eada1792f 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -2,7 +2,6 @@ package mesos import ( "encoding/json" - "fmt" "math/rand" "net/http" "net/http/httptest" @@ -19,17 +18,10 @@ var masterMetrics map[string]interface{} var masterTestServer *httptest.Server var slaveMetrics map[string]interface{} -// var slaveTaskMetrics map[string]interface{} var slaveTestServer *httptest.Server -func randUUID() string { - b := make([]byte, 16) - rand.Read(b) - return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) -} - // master metrics that will be returned by generateMetrics() -var masterMetricNames []string = []string{ +var masterMetricNames = []string{ // resources "master/cpus_percent", "master/cpus_used", @@ -214,7 +206,7 @@ var masterMetricNames []string = []string{ } // slave metrics that will be returned by generateMetrics() -var slaveMetricNames []string = []string{ +var slaveMetricNames = []string{ // resources "slave/cpus_percent", "slave/cpus_used", diff --git a/plugins/inputs/minecraft/client.go b/plugins/inputs/minecraft/client.go index 30f56213af345..3f3f54c17d1cb 100644 --- a/plugins/inputs/minecraft/client.go +++ b/plugins/inputs/minecraft/client.go @@ -25,7 +25,7 @@ type Connector interface { Connect() (Connection, error) } -func NewConnector(hostname, port, password string) (*connector, error) { +func newConnector(hostname, port, password string) (*connector, error) { return &connector{ hostname: hostname, port: port, @@ -58,7 +58,7 @@ func (c *connector) Connect() (Connection, error) { return &connection{rcon: rcon}, nil } -func NewClient(connector Connector) (*client, error) { +func newClient(connector Connector) (*client, error) { return &client{connector: connector}, nil } diff --git a/plugins/inputs/minecraft/client_test.go b/plugins/inputs/minecraft/client_test.go index 767a0c30ef5d3..4a5ceb9db9cb7 100644 --- a/plugins/inputs/minecraft/client_test.go +++ b/plugins/inputs/minecraft/client_test.go @@ -98,7 +98,7 @@ func TestClient_Player(t *testing.T) { conn: &MockConnection{commands: tt.commands}, } - client, err := NewClient(connector) + client, err := newClient(connector) require.NoError(t, err) actual, err := client.Players() @@ -183,7 +183,7 @@ func TestClient_Scores(t *testing.T) { conn: &MockConnection{commands: tt.commands}, } - client, err := NewClient(connector) + client, err := newClient(connector) require.NoError(t, err) actual, err := client.Scores(tt.player) diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go index e36a46bb07163..12d76a366c231 100644 --- a/plugins/inputs/minecraft/internal/rcon/rcon.go +++ b/plugins/inputs/minecraft/internal/rcon/rcon.go @@ -62,7 +62,7 @@ type Packet struct { // Write method fails to write the header bytes in their little // endian byte order. func (p Packet) Compile() (payload []byte, err error) { - var size int32 = p.Header.Size + var size = p.Header.Size var buffer bytes.Buffer var padding [PacketPaddingSize]byte diff --git a/plugins/inputs/minecraft/minecraft.go b/plugins/inputs/minecraft/minecraft.go index 0de79d94a3c77..939cc2c42a7a3 100644 --- a/plugins/inputs/minecraft/minecraft.go +++ b/plugins/inputs/minecraft/minecraft.go @@ -50,12 +50,12 @@ func (s *Minecraft) SampleConfig() string { func (s *Minecraft) Gather(acc telegraf.Accumulator) error { if s.client == nil { - connector, err := NewConnector(s.Server, s.Port, s.Password) + connector, err := newConnector(s.Server, s.Port, s.Password) if err != nil { return err } - client, err := NewClient(connector) + client, err := newClient(connector) if err != nil { return err } diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index d30704c42c273..eda29095325af 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -338,11 +338,11 @@ func validateFieldContainers(t []fieldContainer, n string) error { } //search name duplicate - canonical_name := item.Measurement + "." + item.Name - if nameEncountered[canonical_name] { + canonicalName := item.Measurement + "." + item.Name + if nameEncountered[canonicalName] { return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, n, item.Name) } - nameEncountered[canonical_name] = true + nameEncountered[canonicalName] = true if n == cInputRegisters || n == cHoldingRegisters { // search byte order @@ -405,13 +405,13 @@ func removeDuplicates(elements []uint16) []uint16 { func readRegisterValues(m *Modbus, rt string, rr registerRange) ([]byte, error) { if rt == cDiscreteInputs { - return m.client.ReadDiscreteInputs(uint16(rr.address), uint16(rr.length)) + return m.client.ReadDiscreteInputs(rr.address, rr.length) } else if rt == cCoils { - return m.client.ReadCoils(uint16(rr.address), uint16(rr.length)) + return m.client.ReadCoils(rr.address, rr.length) } else if rt == cInputRegisters { - return m.client.ReadInputRegisters(uint16(rr.address), uint16(rr.length)) + return m.client.ReadInputRegisters(rr.address, rr.length) } else if rt == cHoldingRegisters { - return m.client.ReadHoldingRegisters(uint16(rr.address), uint16(rr.length)) + return m.client.ReadHoldingRegisters(rr.address, rr.length) } else { return []byte{}, fmt.Errorf("not Valid function") } @@ -462,16 +462,16 @@ func (m *Modbus) getFields() error { if register.Type == cInputRegisters || register.Type == cHoldingRegisters { for i := 0; i < len(register.Fields); i++ { - var values_t []byte + var valuesT []byte for j := 0; j < len(register.Fields[i].Address); j++ { tempArray := rawValues[register.Fields[i].Address[j]] for x := 0; x < len(tempArray); x++ { - values_t = append(values_t, tempArray[x]) + valuesT = append(valuesT, tempArray[x]) } } - register.Fields[i].value = convertDataType(register.Fields[i], values_t) + register.Fields[i].value = convertDataType(register.Fields[i], valuesT) } } @@ -587,30 +587,6 @@ func convertEndianness64(o string, b []byte) uint64 { } } -func format16(f string, r uint16) interface{} { - switch f { - case "UINT16": - return r - case "INT16": - return int16(r) - default: - return r - } -} - -func format32(f string, r uint32) interface{} { - switch f { - case "UINT32": - return r - case "INT32": - return int32(r) - case "FLOAT32-IEEE": - return math.Float32frombits(r) - default: - return r - } -} - func format64(f string, r uint64) interface{} { switch f { case "UINT64": @@ -689,7 +665,7 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error { } timestamp := time.Now() - for retry := 0; retry <= m.Retries; retry += 1 { + for retry := 0; retry <= m.Retries; retry++ { timestamp = time.Now() err := m.getFields() if err != nil { diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index 4bd7e26bb3c62..99fa7bb7da7da 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -679,7 +679,7 @@ func TestRetrySuccessful(t *testing.T) { if retries >= maxretries { except = &mbserver.Success } - retries += 1 + retries++ return data, except }) @@ -756,7 +756,7 @@ func TestRetryFail(t *testing.T) { counter := 0 serv.RegisterFunctionHandler(1, func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) { - counter += 1 + counter++ data := make([]byte, 2) data[0] = byte(1) data[1] = byte(0) diff --git a/plugins/inputs/multifile/multifile.go b/plugins/inputs/multifile/multifile.go index 359036268a981..838b1dd764d2f 100644 --- a/plugins/inputs/multifile/multifile.go +++ b/plugins/inputs/multifile/multifile.go @@ -102,7 +102,7 @@ func (m *MultiFile) Gather(acc telegraf.Accumulator) error { var value interface{} - var d int = 0 + var d int if _, errfmt := fmt.Sscanf(file.Conversion, "float(%d)", &d); errfmt == nil || file.Conversion == "float" { var v float64 v, err = strconv.ParseFloat(vStr, 64) diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index ca02f9889b033..8e09a357f47e6 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -143,7 +143,6 @@ const sampleConfig = ` ` const ( - defaultTimeout = 5 * time.Second defaultPerfEventsStatementsDigestTextLimit = 120 defaultPerfEventsStatementsLimit = 250 defaultPerfEventsStatementsTimeLimit = 86400 @@ -712,8 +711,8 @@ func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulat servtag := getDSNTag(serv) tags := map[string]string{"server": servtag} var ( - size uint64 = 0 - count uint64 = 0 + size uint64 + count uint64 fileSize uint64 fileName string ) @@ -893,16 +892,16 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. } // get count of connections from each user - conn_rows, err := db.Query("SELECT user, sum(1) AS connections FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user") + connRows, err := db.Query("SELECT user, sum(1) AS connections FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user") if err != nil { return err } - for conn_rows.Next() { + for connRows.Next() { var user string var connections int64 - err = conn_rows.Scan(&user, &connections) + err = connRows.Scan(&user, &connections) if err != nil { return err } @@ -989,142 +988,142 @@ func columnsToLower(s []string, e error) ([]string, error) { func getColSlice(l int) ([]interface{}, error) { // list of all possible column names var ( - user string - total_connections int64 - concurrent_connections int64 - connected_time int64 - busy_time int64 - cpu_time int64 - bytes_received int64 - bytes_sent int64 - binlog_bytes_written int64 - rows_read int64 - rows_sent int64 - rows_deleted int64 - rows_inserted int64 - rows_updated int64 - select_commands int64 - update_commands int64 - other_commands int64 - commit_transactions int64 - rollback_transactions int64 - denied_connections int64 - lost_connections int64 - access_denied int64 - empty_queries int64 - total_ssl_connections int64 - max_statement_time_exceeded int64 + user string + totalConnections int64 + concurrentConnections int64 + connectedTime int64 + busyTime int64 + cpuTime int64 + bytesReceived int64 + bytesSent int64 + binlogBytesWritten int64 + rowsRead int64 + rowsSent int64 + rowsDeleted int64 + rowsInserted int64 + rowsUpdated int64 + selectCommands int64 + updateCommands int64 + otherCommands int64 + commitTransactions int64 + rollbackTransactions int64 + deniedConnections int64 + lostConnections int64 + accessDenied int64 + emptyQueries int64 + totalSslConnections int64 + maxStatementTimeExceeded int64 // maria specific - fbusy_time float64 - fcpu_time float64 + fbusyTime float64 + fcpuTime float64 // percona specific - rows_fetched int64 - table_rows_read int64 + rowsFetched int64 + tableRowsRead int64 ) switch l { case 23: // maria5 return []interface{}{ &user, - &total_connections, - &concurrent_connections, - &connected_time, - &fbusy_time, - &fcpu_time, - &bytes_received, - &bytes_sent, - &binlog_bytes_written, - &rows_read, - &rows_sent, - &rows_deleted, - &rows_inserted, - &rows_updated, - &select_commands, - &update_commands, - &other_commands, - &commit_transactions, - &rollback_transactions, - &denied_connections, - &lost_connections, - &access_denied, - &empty_queries, + &totalConnections, + &concurrentConnections, + &connectedTime, + &fbusyTime, + &fcpuTime, + &bytesReceived, + &bytesSent, + &binlogBytesWritten, + &rowsRead, + &rowsSent, + &rowsDeleted, + &rowsInserted, + &rowsUpdated, + &selectCommands, + &updateCommands, + &otherCommands, + &commitTransactions, + &rollbackTransactions, + &deniedConnections, + &lostConnections, + &accessDenied, + &emptyQueries, }, nil case 25: // maria10 return []interface{}{ &user, - &total_connections, - &concurrent_connections, - &connected_time, - &fbusy_time, - &fcpu_time, - &bytes_received, - &bytes_sent, - &binlog_bytes_written, - &rows_read, - &rows_sent, - &rows_deleted, - &rows_inserted, - &rows_updated, - &select_commands, - &update_commands, - &other_commands, - &commit_transactions, - &rollback_transactions, - &denied_connections, - &lost_connections, - &access_denied, - &empty_queries, - &total_ssl_connections, - &max_statement_time_exceeded, + &totalConnections, + &concurrentConnections, + &connectedTime, + &fbusyTime, + &fcpuTime, + &bytesReceived, + &bytesSent, + &binlogBytesWritten, + &rowsRead, + &rowsSent, + &rowsDeleted, + &rowsInserted, + &rowsUpdated, + &selectCommands, + &updateCommands, + &otherCommands, + &commitTransactions, + &rollbackTransactions, + &deniedConnections, + &lostConnections, + &accessDenied, + &emptyQueries, + &totalSslConnections, + &maxStatementTimeExceeded, }, nil case 21: // mysql 5.5 return []interface{}{ &user, - &total_connections, - &concurrent_connections, - &connected_time, - &busy_time, - &cpu_time, - &bytes_received, - &bytes_sent, - &binlog_bytes_written, - &rows_fetched, - &rows_updated, - &table_rows_read, - &select_commands, - &update_commands, - &other_commands, - &commit_transactions, - &rollback_transactions, - &denied_connections, - &lost_connections, - &access_denied, - &empty_queries, + &totalConnections, + &concurrentConnections, + &connectedTime, + &busyTime, + &cpuTime, + &bytesReceived, + &bytesSent, + &binlogBytesWritten, + &rowsFetched, + &rowsUpdated, + &tableRowsRead, + &selectCommands, + &updateCommands, + &otherCommands, + &commitTransactions, + &rollbackTransactions, + &deniedConnections, + &lostConnections, + &accessDenied, + &emptyQueries, }, nil case 22: // percona return []interface{}{ &user, - &total_connections, - &concurrent_connections, - &connected_time, - &busy_time, - &cpu_time, - &bytes_received, - &bytes_sent, - &binlog_bytes_written, - &rows_fetched, - &rows_updated, - &table_rows_read, - &select_commands, - &update_commands, - &other_commands, - &commit_transactions, - &rollback_transactions, - &denied_connections, - &lost_connections, - &access_denied, - &empty_queries, - &total_ssl_connections, + &totalConnections, + &concurrentConnections, + &connectedTime, + &busyTime, + &cpuTime, + &bytesReceived, + &bytesSent, + &binlogBytesWritten, + &rowsFetched, + &rowsUpdated, + &tableRowsRead, + &selectCommands, + &updateCommands, + &otherCommands, + &commitTransactions, + &rollbackTransactions, + &deniedConnections, + &lostConnections, + &accessDenied, + &emptyQueries, + &totalSslConnections, }, nil } @@ -1685,7 +1684,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf defer rows.Close() var ( - schemaName, digest, digest_text string + schemaName, digest, digestText string count, queryTime, errors, warnings float64 rowsAffected, rowsSent, rowsExamined float64 tmpTables, tmpDiskTables float64 @@ -1700,7 +1699,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf for rows.Next() { err = rows.Scan( - &schemaName, &digest, &digest_text, + &schemaName, &digest, &digestText, &count, &queryTime, &errors, &warnings, &rowsAffected, &rowsSent, &rowsExamined, &tmpTables, &tmpDiskTables, @@ -1713,7 +1712,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf } tags["schema"] = schemaName tags["digest"] = digest - tags["digest_text"] = digest_text + tags["digest_text"] = digestText fields := map[string]interface{}{ "events_statements_total": count, diff --git a/plugins/inputs/net/net.go b/plugins/inputs/net/net.go index f91501860e749..bb1621061ae9b 100644 --- a/plugins/inputs/net/net.go +++ b/plugins/inputs/net/net.go @@ -20,7 +20,7 @@ type NetIOStats struct { Interfaces []string } -func (_ *NetIOStats) Description() string { +func (n *NetIOStats) Description() string { return "Read metrics about network interface usage" } @@ -38,18 +38,18 @@ var netSampleConfig = ` ## ` -func (_ *NetIOStats) SampleConfig() string { +func (n *NetIOStats) SampleConfig() string { return netSampleConfig } -func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { - netio, err := s.ps.NetIO() +func (n *NetIOStats) Gather(acc telegraf.Accumulator) error { + netio, err := n.ps.NetIO() if err != nil { return fmt.Errorf("error getting net io info: %s", err) } - if s.filter == nil { - if s.filter, err = filter.Compile(s.Interfaces); err != nil { + if n.filter == nil { + if n.filter, err = filter.Compile(n.Interfaces); err != nil { return fmt.Errorf("error compiling filter: %s", err) } } @@ -64,17 +64,17 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { } for _, io := range netio { - if len(s.Interfaces) != 0 { + if len(n.Interfaces) != 0 { var found bool - if s.filter.Match(io.Name) { + if n.filter.Match(io.Name) { found = true } if !found { continue } - } else if !s.skipChecks { + } else if !n.skipChecks { iface, ok := interfacesByName[io.Name] if !ok { continue @@ -108,8 +108,8 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { // Get system wide stats for different network protocols // (ignore these stats if the call fails) - if !s.IgnoreProtocolStats { - netprotos, _ := s.ps.NetProto() + if !n.IgnoreProtocolStats { + netprotos, _ := n.ps.NetProto() fields := make(map[string]interface{}) for _, proto := range netprotos { for stat, value := range proto.Stats { diff --git a/plugins/inputs/net/netstat.go b/plugins/inputs/net/netstat.go index 555b396afd357..150f271a31b53 100644 --- a/plugins/inputs/net/netstat.go +++ b/plugins/inputs/net/netstat.go @@ -13,18 +13,18 @@ type NetStats struct { ps system.PS } -func (_ *NetStats) Description() string { +func (ns *NetStats) Description() string { return "Read TCP metrics such as established, time wait and sockets counts." } var tcpstatSampleConfig = "" -func (_ *NetStats) SampleConfig() string { +func (ns *NetStats) SampleConfig() string { return tcpstatSampleConfig } -func (s *NetStats) Gather(acc telegraf.Accumulator) error { - netconns, err := s.ps.NetConnections() +func (ns *NetStats) Gather(acc telegraf.Accumulator) error { + netconns, err := ns.ps.NetConnections() if err != nil { return fmt.Errorf("error getting net connections info: %s", err) } @@ -35,7 +35,7 @@ func (s *NetStats) Gather(acc telegraf.Accumulator) error { tags := map[string]string{} for _, netcon := range netconns { if netcon.Type == syscall.SOCK_DGRAM { - counts["UDP"] += 1 + counts["UDP"]++ continue // UDP has no status } c, ok := counts[netcon.Status] diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index 80b5dcd0f16be..a952783a344a6 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -14,7 +14,7 @@ import ( ) // Mapping of ntpq header names to tag keys -var tagHeaders map[string]string = map[string]string{ +var tagHeaders = map[string]string{ "remote": "remote", "refid": "refid", "st": "stratum", @@ -128,7 +128,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "h"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h")) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index])) continue } // seconds in an hour @@ -137,7 +137,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "d"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d")) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index])) continue } // seconds in a day @@ -146,7 +146,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "m"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m")) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index])) continue } // seconds in a day @@ -157,7 +157,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { m, err := strconv.Atoi(fields[index]) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing int: %s", fields[index])) continue } mFields[key] = int64(m) @@ -174,7 +174,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { m, err := strconv.ParseFloat(fields[index], 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing float: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing float: %s", fields[index])) continue } mFields[key] = m diff --git a/plugins/inputs/passenger/passenger.go b/plugins/inputs/passenger/passenger.go index 0e54164c64620..2602161a8e9a9 100644 --- a/plugins/inputs/passenger/passenger.go +++ b/plugins/inputs/passenger/passenger.go @@ -32,27 +32,27 @@ func (p *passenger) parseCommand() (string, []string) { } type info struct { - Passenger_version string `xml:"passenger_version"` - Process_count int `xml:"process_count"` - Capacity_used int `xml:"capacity_used"` - Get_wait_list_size int `xml:"get_wait_list_size"` - Max int `xml:"max"` - Supergroups struct { + PassengerVersion string `xml:"passenger_version"` + ProcessCount int `xml:"process_count"` + CapacityUsed int `xml:"capacity_used"` + GetWaitListSize int `xml:"get_wait_list_size"` + Max int `xml:"max"` + Supergroups struct { Supergroup []struct { - Name string `xml:"name"` - Get_wait_list_size int `xml:"get_wait_list_size"` - Capacity_used int `xml:"capacity_used"` - Group []struct { - Name string `xml:"name"` - AppRoot string `xml:"app_root"` - AppType string `xml:"app_type"` - Enabled_process_count int `xml:"enabled_process_count"` - Disabling_process_count int `xml:"disabling_process_count"` - Disabled_process_count int `xml:"disabled_process_count"` - Capacity_used int `xml:"capacity_used"` - Get_wait_list_size int `xml:"get_wait_list_size"` - Processes_being_spawned int `xml:"processes_being_spawned"` - Processes struct { + Name string `xml:"name"` + GetWaitListSize int `xml:"get_wait_list_size"` + CapacityUsed int `xml:"capacity_used"` + Group []struct { + Name string `xml:"name"` + AppRoot string `xml:"app_root"` + AppType string `xml:"app_type"` + EnabledProcessCount int `xml:"enabled_process_count"` + DisablingProcessCount int `xml:"disabling_process_count"` + DisabledProcessCount int `xml:"disabled_process_count"` + CapacityUsed int `xml:"capacity_used"` + GetWaitListSize int `xml:"get_wait_list_size"` + ProcessesBeingSpawned int `xml:"processes_being_spawned"` + Processes struct { Process []*process `xml:"process"` } `xml:"processes"` } `xml:"group"` @@ -61,28 +61,28 @@ type info struct { } type process struct { - Pid int `xml:"pid"` - Concurrency int `xml:"concurrency"` - Sessions int `xml:"sessions"` - Busyness int `xml:"busyness"` - Processed int `xml:"processed"` - Spawner_creation_time int64 `xml:"spawner_creation_time"` - Spawn_start_time int64 `xml:"spawn_start_time"` - Spawn_end_time int64 `xml:"spawn_end_time"` - Last_used int64 `xml:"last_used"` - Uptime string `xml:"uptime"` - Code_revision string `xml:"code_revision"` - Life_status string `xml:"life_status"` - Enabled string `xml:"enabled"` - Has_metrics bool `xml:"has_metrics"` - Cpu int64 `xml:"cpu"` - Rss int64 `xml:"rss"` - Pss int64 `xml:"pss"` - Private_dirty int64 `xml:"private_dirty"` - Swap int64 `xml:"swap"` - Real_memory int64 `xml:"real_memory"` - Vmsize int64 `xml:"vmsize"` - Process_group_id string `xml:"process_group_id"` + Pid int `xml:"pid"` + Concurrency int `xml:"concurrency"` + Sessions int `xml:"sessions"` + Busyness int `xml:"busyness"` + Processed int `xml:"processed"` + SpawnerCreationTime int64 `xml:"spawner_creation_time"` + SpawnStartTime int64 `xml:"spawn_start_time"` + SpawnEndTime int64 `xml:"spawn_end_time"` + LastUsed int64 `xml:"last_used"` + Uptime string `xml:"uptime"` + CodeRevision string `xml:"code_revision"` + LifeStatus string `xml:"life_status"` + Enabled string `xml:"enabled"` + HasMetrics bool `xml:"has_metrics"` + Cpu int64 `xml:"cpu"` + Rss int64 `xml:"rss"` + Pss int64 `xml:"pss"` + PrivateDirty int64 `xml:"private_dirty"` + Swap int64 `xml:"swap"` + RealMemory int64 `xml:"real_memory"` + Vmsize int64 `xml:"vmsize"` + ProcessGroupId string `xml:"process_group_id"` } func (p *process) getUptime() int64 { @@ -137,31 +137,27 @@ var sampleConfig = ` command = "passenger-status -v --show=xml" ` -func (r *passenger) SampleConfig() string { +func (p *passenger) SampleConfig() string { return sampleConfig } -func (r *passenger) Description() string { +func (p *passenger) Description() string { return "Read metrics of passenger using passenger-status" } -func (g *passenger) Gather(acc telegraf.Accumulator) error { - if g.Command == "" { - g.Command = "passenger-status -v --show=xml" +func (p *passenger) Gather(acc telegraf.Accumulator) error { + if p.Command == "" { + p.Command = "passenger-status -v --show=xml" } - cmd, args := g.parseCommand() + cmd, args := p.parseCommand() out, err := exec.Command(cmd, args...).Output() if err != nil { return err } - if err = importMetric(out, acc); err != nil { - return err - } - - return nil + return importMetric(out, acc) } func importMetric(stat []byte, acc telegraf.Accumulator) error { @@ -174,13 +170,13 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { } tags := map[string]string{ - "passenger_version": p.Passenger_version, + "passenger_version": p.PassengerVersion, } fields := map[string]interface{}{ - "process_count": p.Process_count, + "process_count": p.ProcessCount, "max": p.Max, - "capacity_used": p.Capacity_used, - "get_wait_list_size": p.Get_wait_list_size, + "capacity_used": p.CapacityUsed, + "get_wait_list_size": p.GetWaitListSize, } acc.AddFields("passenger", fields, tags) @@ -189,8 +185,8 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { "name": sg.Name, } fields := map[string]interface{}{ - "get_wait_list_size": sg.Get_wait_list_size, - "capacity_used": sg.Capacity_used, + "get_wait_list_size": sg.GetWaitListSize, + "capacity_used": sg.CapacityUsed, } acc.AddFields("passenger_supergroup", fields, tags) @@ -201,9 +197,9 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { "app_type": group.AppType, } fields := map[string]interface{}{ - "get_wait_list_size": group.Get_wait_list_size, - "capacity_used": group.Capacity_used, - "processes_being_spawned": group.Processes_being_spawned, + "get_wait_list_size": group.GetWaitListSize, + "capacity_used": group.CapacityUsed, + "processes_being_spawned": group.ProcessesBeingSpawned, } acc.AddFields("passenger_group", fields, tags) @@ -213,26 +209,26 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { "app_root": group.AppRoot, "supergroup_name": sg.Name, "pid": fmt.Sprintf("%d", process.Pid), - "code_revision": process.Code_revision, - "life_status": process.Life_status, - "process_group_id": process.Process_group_id, + "code_revision": process.CodeRevision, + "life_status": process.LifeStatus, + "process_group_id": process.ProcessGroupId, } fields := map[string]interface{}{ "concurrency": process.Concurrency, "sessions": process.Sessions, "busyness": process.Busyness, "processed": process.Processed, - "spawner_creation_time": process.Spawner_creation_time, - "spawn_start_time": process.Spawn_start_time, - "spawn_end_time": process.Spawn_end_time, - "last_used": process.Last_used, + "spawner_creation_time": process.SpawnerCreationTime, + "spawn_start_time": process.SpawnStartTime, + "spawn_end_time": process.SpawnEndTime, + "last_used": process.LastUsed, "uptime": process.getUptime(), "cpu": process.Cpu, "rss": process.Rss, "pss": process.Pss, - "private_dirty": process.Private_dirty, + "private_dirty": process.PrivateDirty, "swap": process.Swap, - "real_memory": process.Real_memory, + "real_memory": process.RealMemory, "vmsize": process.Vmsize, } acc.AddFields("passenger_process", fields, tags) diff --git a/plugins/inputs/phpfpm/fcgi.go b/plugins/inputs/phpfpm/fcgi.go index 689660ea093c3..83bbf09cc73a3 100644 --- a/plugins/inputs/phpfpm/fcgi.go +++ b/plugins/inputs/phpfpm/fcgi.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package fcgi implements the FastCGI protocol. +// Package phpfpm implements the FastCGI protocol. // Currently only the responder role is supported. // The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22 package phpfpm @@ -135,8 +135,8 @@ func (rec *record) read(r io.Reader) (err error) { return nil } -func (r *record) content() []byte { - return r.buf[:r.h.ContentLength] +func (rec *record) content() []byte { + return rec.buf[:rec.h.ContentLength] } // writeRecord writes and sends a single record. diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go index d23dc526dda8d..7f6c93e50ecca 100644 --- a/plugins/inputs/phpfpm/fcgi_client.go +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -33,25 +33,25 @@ func newFcgiClient(h string, args ...interface{}) (*conn, error) { return fcgi, err } -func (client *conn) Request( +func (c *conn) Request( env map[string]string, requestData string, ) (retout []byte, reterr []byte, err error) { - defer client.rwc.Close() + defer c.rwc.Close() var reqId uint16 = 1 - err = client.writeBeginRequest(reqId, uint16(roleResponder), 0) + err = c.writeBeginRequest(reqId, uint16(roleResponder), 0) if err != nil { return } - err = client.writePairs(typeParams, reqId, env) + err = c.writePairs(typeParams, reqId, env) if err != nil { return } if len(requestData) > 0 { - if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil { + if err = c.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil { return } } @@ -62,7 +62,7 @@ func (client *conn) Request( // receive until EOF or FCGI_END_REQUEST READ_LOOP: for { - err1 = rec.read(client.rwc) + err1 = rec.read(c.rwc) if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") { if err1 != io.EOF { err = err1 diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 35f60342270dd..6a562f9ee231d 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -92,11 +92,11 @@ var sampleConfig = ` # pid_finder = "pgrep" ` -func (_ *Procstat) SampleConfig() string { +func (p *Procstat) SampleConfig() string { return sampleConfig } -func (_ *Procstat) Description() string { +func (p *Procstat) Description() string { return "Monitor process cpu and memory usage" } @@ -117,7 +117,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { p.createProcess = defaultProcess } - pids, tags, err := p.findPids(acc) + pids, tags, err := p.findPids() now := time.Now() if err != nil { @@ -136,7 +136,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { procs, err := p.updateProcesses(pids, tags, p.procs) if err != nil { - acc.AddError(fmt.Errorf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", + acc.AddError(fmt.Errorf("procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", p.Exe, p.PidFile, p.Pattern, p.User, err.Error())) } p.procs = procs @@ -234,26 +234,26 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time fields[prefix+"created_at"] = createdAt * 1000000 //Convert ms to ns } - cpu_time, err := proc.Times() + cpuTime, err := proc.Times() if err == nil { - fields[prefix+"cpu_time_user"] = cpu_time.User - fields[prefix+"cpu_time_system"] = cpu_time.System - fields[prefix+"cpu_time_idle"] = cpu_time.Idle - fields[prefix+"cpu_time_nice"] = cpu_time.Nice - fields[prefix+"cpu_time_iowait"] = cpu_time.Iowait - fields[prefix+"cpu_time_irq"] = cpu_time.Irq - fields[prefix+"cpu_time_soft_irq"] = cpu_time.Softirq - fields[prefix+"cpu_time_steal"] = cpu_time.Steal - fields[prefix+"cpu_time_guest"] = cpu_time.Guest - fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice - } - - cpu_perc, err := proc.Percent(time.Duration(0)) + fields[prefix+"cpu_time_user"] = cpuTime.User + fields[prefix+"cpu_time_system"] = cpuTime.System + fields[prefix+"cpu_time_idle"] = cpuTime.Idle + fields[prefix+"cpu_time_nice"] = cpuTime.Nice + fields[prefix+"cpu_time_iowait"] = cpuTime.Iowait + fields[prefix+"cpu_time_irq"] = cpuTime.Irq + fields[prefix+"cpu_time_soft_irq"] = cpuTime.Softirq + fields[prefix+"cpu_time_steal"] = cpuTime.Steal + fields[prefix+"cpu_time_guest"] = cpuTime.Guest + fields[prefix+"cpu_time_guest_nice"] = cpuTime.GuestNice + } + + cpuPerc, err := proc.Percent(time.Duration(0)) if err == nil { if p.solarisMode { - fields[prefix+"cpu_usage"] = cpu_perc / float64(runtime.NumCPU()) + fields[prefix+"cpu_usage"] = cpuPerc / float64(runtime.NumCPU()) } else { - fields[prefix+"cpu_usage"] = cpu_perc + fields[prefix+"cpu_usage"] = cpuPerc } } @@ -267,9 +267,9 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time fields[prefix+"memory_locked"] = mem.Locked } - mem_perc, err := proc.MemoryPercent() + memPerc, err := proc.MemoryPercent() if err == nil { - fields[prefix+"memory_usage"] = mem_perc + fields[prefix+"memory_usage"] = memPerc } rlims, err := proc.RlimitUsage(true) @@ -368,7 +368,7 @@ func (p *Procstat) getPIDFinder() (PIDFinder, error) { } // Get matching PIDs and their initial tags -func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, error) { +func (p *Procstat) findPids() ([]PID, map[string]string, error) { var pids []PID tags := make(map[string]string) var err error @@ -400,7 +400,7 @@ func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, pids, err = p.winServicePIDs() tags = map[string]string{"win_service": p.WinService} } else { - err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified") + err = fmt.Errorf("either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified") } return pids, tags, err diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 9836feaec8b89..401df08916d91 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -30,14 +30,14 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd { func TestMockExecCommand(t *testing.T) { var cmd []string for _, arg := range os.Args { - if string(arg) == "--" { + if arg == "--" { cmd = []string{} continue } if cmd == nil { continue } - cmd = append(cmd, string(arg)) + cmd = append(cmd, arg) } if cmd == nil { return @@ -72,7 +72,7 @@ func pidFinder(pids []PID, err error) func() (PIDFinder, error) { } } -func (pg *testPgrep) PidFile(path string) ([]PID, error) { +func (pg *testPgrep) PidFile(_ string) ([]PID, error) { return pg.pids, pg.err } @@ -80,15 +80,15 @@ func (p *testProc) Cmdline() (string, error) { return "test_proc", nil } -func (pg *testPgrep) Pattern(pattern string) ([]PID, error) { +func (pg *testPgrep) Pattern(_ string) ([]PID, error) { return pg.pids, pg.err } -func (pg *testPgrep) Uid(user string) ([]PID, error) { +func (pg *testPgrep) Uid(_ string) ([]PID, error) { return pg.pids, pg.err } -func (pg *testPgrep) FullPattern(pattern string) ([]PID, error) { +func (pg *testPgrep) FullPattern(_ string) ([]PID, error) { return pg.pids, pg.err } @@ -97,7 +97,7 @@ type testProc struct { tags map[string]string } -func newTestProc(pid PID) (Process, error) { +func newTestProc(_ PID) (Process, error) { proc := &testProc{ tags: make(map[string]string), } @@ -144,7 +144,7 @@ func (p *testProc) NumThreads() (int32, error) { return 0, nil } -func (p *testProc) Percent(interval time.Duration) (float64, error) { +func (p *testProc) Percent(_ time.Duration) (float64, error) { return 0, nil } @@ -160,12 +160,12 @@ func (p *testProc) Times() (*cpu.TimesStat, error) { return &cpu.TimesStat{}, nil } -func (p *testProc) RlimitUsage(gatherUsage bool) ([]process.RlimitStat, error) { +func (p *testProc) RlimitUsage(_ bool) ([]process.RlimitStat, error) { return []process.RlimitStat{}, nil } -var pid PID = PID(42) -var exe string = "foo" +var pid = PID(42) +var exe = "foo" func TestGather_CreateProcessErrorOk(t *testing.T) { var acc testutil.Accumulator @@ -363,8 +363,7 @@ func TestGather_systemdUnitPIDs(t *testing.T) { createPIDFinder: pidFinder([]PID{}, nil), SystemdUnit: "TestGather_systemdUnitPIDs", } - var acc testutil.Accumulator - pids, tags, err := p.findPids(&acc) + pids, tags, err := p.findPids() require.NoError(t, err) assert.Equal(t, []PID{11408}, pids) assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"]) @@ -385,8 +384,7 @@ func TestGather_cgroupPIDs(t *testing.T) { createPIDFinder: pidFinder([]PID{}, nil), CGroup: td, } - var acc testutil.Accumulator - pids, tags, err := p.findPids(&acc) + pids, tags, err := p.findPids() require.NoError(t, err) assert.Equal(t, []PID{1234, 5678}, pids) assert.Equal(t, td, tags["cgroup"]) @@ -415,7 +413,7 @@ func TestGather_SameTimestamps(t *testing.T) { require.NoError(t, acc.GatherError(p.Gather)) procstat, _ := acc.Get("procstat") - procstat_lookup, _ := acc.Get("procstat_lookup") + procstatLookup, _ := acc.Get("procstat_lookup") - require.Equal(t, procstat.Time, procstat_lookup.Time) + require.Equal(t, procstat.Time, procstatLookup.Time) } diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index 72b85dddaa8d3..fdc5dcd14cb12 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -230,7 +230,7 @@ var Tracking = map[string]string{ "role": "replication_role", } -func (r *Redis) init(acc telegraf.Accumulator) error { +func (r *Redis) init() error { if r.initialized { return nil } @@ -307,7 +307,7 @@ func (r *Redis) init(acc telegraf.Accumulator) error { // Returns one of the errors encountered while gather stats (if any). func (r *Redis) Gather(acc telegraf.Accumulator) error { if !r.initialized { - err := r.init(acc) + err := r.init() if err != nil { return err } @@ -361,7 +361,7 @@ func gatherInfoOutput( tags map[string]string, ) error { var section string - var keyspace_hits, keyspace_misses int64 + var keyspaceHits, keyspaceMisses int64 scanner := bufio.NewScanner(rdr) fields := make(map[string]interface{}) @@ -383,7 +383,7 @@ func gatherInfoOutput( if len(parts) < 2 { continue } - name := string(parts[0]) + name := parts[0] if section == "Server" { if name != "lru_clock" && name != "uptime_in_seconds" && name != "redis_version" { @@ -406,7 +406,7 @@ func gatherInfoOutput( metric, ok := Tracking[name] if !ok { if section == "Keyspace" { - kline := strings.TrimSpace(string(parts[1])) + kline := strings.TrimSpace(parts[1]) gatherKeyspaceLine(name, kline, acc, tags) continue } @@ -433,9 +433,9 @@ func gatherInfoOutput( if ival, err := strconv.ParseInt(val, 10, 64); err == nil { switch name { case "keyspace_hits": - keyspace_hits = ival + keyspaceHits = ival case "keyspace_misses": - keyspace_misses = ival + keyspaceMisses = ival case "rdb_last_save_time": // influxdb can't calculate this, so we have to do it fields["rdb_last_save_time_elapsed"] = time.Now().Unix() - ival @@ -459,11 +459,11 @@ func gatherInfoOutput( fields[metric] = val } - var keyspace_hitrate float64 = 0.0 - if keyspace_hits != 0 || keyspace_misses != 0 { - keyspace_hitrate = float64(keyspace_hits) / float64(keyspace_hits+keyspace_misses) + var keyspaceHitrate float64 + if keyspaceHits != 0 || keyspaceMisses != 0 { + keyspaceHitrate = float64(keyspaceHits) / float64(keyspaceHits+keyspaceMisses) } - fields["keyspace_hitrate"] = keyspace_hitrate + fields["keyspace_hitrate"] = keyspaceHitrate o := RedisFieldTypes{} @@ -482,12 +482,12 @@ func gatherKeyspaceLine( name string, line string, acc telegraf.Accumulator, - global_tags map[string]string, + globalTags map[string]string, ) { if strings.Contains(line, "keys=") { fields := make(map[string]interface{}) tags := make(map[string]string) - for k, v := range global_tags { + for k, v := range globalTags { tags[k] = v } tags["database"] = name @@ -511,7 +511,7 @@ func gatherCommandstateLine( name string, line string, acc telegraf.Accumulator, - global_tags map[string]string, + globalTags map[string]string, ) { if !strings.HasPrefix(name, "cmdstat") { return @@ -519,7 +519,7 @@ func gatherCommandstateLine( fields := make(map[string]interface{}) tags := make(map[string]string) - for k, v := range global_tags { + for k, v := range globalTags { tags[k] = v } tags["command"] = strings.TrimPrefix(name, "cmdstat_") @@ -556,11 +556,11 @@ func gatherReplicationLine( name string, line string, acc telegraf.Accumulator, - global_tags map[string]string, + globalTags map[string]string, ) { fields := make(map[string]interface{}) tags := make(map[string]string) - for k, v := range global_tags { + for k, v := range globalTags { tags[k] = v } diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index c1dda901b7736..ee642a50ec380 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -459,7 +459,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { i := f.OidIndexLength + 1 // leading separator idx = strings.Map(func(r rune) rune { if r == '.' { - i -= 1 + i-- } if i < 1 { return -1 @@ -641,7 +641,7 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { case int32: v = int64(vt) case int64: - v = int64(vt) + v = vt case uint: v = int64(vt) case uint8: @@ -864,28 +864,6 @@ func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, c return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err } -func SnmpTranslateForce(oid string, mibName string, oidNum string, oidText string, conversion string) { - snmpTranslateCachesLock.Lock() - defer snmpTranslateCachesLock.Unlock() - if snmpTranslateCaches == nil { - snmpTranslateCaches = map[string]snmpTranslateCache{} - } - - var stc snmpTranslateCache - stc.mibName = mibName - stc.oidNum = oidNum - stc.oidText = oidText - stc.conversion = conversion - stc.err = nil - snmpTranslateCaches[oid] = stc -} - -func SnmpTranslateClear() { - snmpTranslateCachesLock.Lock() - defer snmpTranslateCachesLock.Unlock() - snmpTranslateCaches = map[string]snmpTranslateCache{} -} - func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { var out []byte if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { diff --git a/plugins/inputs/solr/solr.go b/plugins/inputs/solr/solr.go index ce44fa0869c20..78652c6aab618 100644 --- a/plugins/inputs/solr/solr.go +++ b/plugins/inputs/solr/solr.go @@ -18,10 +18,6 @@ import ( const mbeansPath = "/admin/mbeans?stats=true&wt=json&cat=CORE&cat=QUERYHANDLER&cat=UPDATEHANDLER&cat=CACHE" const adminCoresPath = "/solr/admin/cores?action=STATUS&wt=json" -type node struct { - Host string `json:"host"` -} - const sampleConfig = ` ## specify a list of one or more Solr servers servers = ["http://localhost:8983"] @@ -497,10 +493,8 @@ func (s *Solr) gatherData(url string, v interface{}) error { return fmt.Errorf("solr: API responded with status-code %d, expected %d, url %s", r.StatusCode, http.StatusOK, url) } - if err = json.NewDecoder(r.Body).Decode(v); err != nil { - return err - } - return nil + + return json.NewDecoder(r.Body).Decode(v) } func init() { diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index 431076743101a..5e652148d3a27 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -201,24 +201,24 @@ func (g *lockedSeriesGrouper) Add( } // ListMetricDescriptors implements metricClient interface -func (c *stackdriverMetricClient) ListMetricDescriptors( +func (smc *stackdriverMetricClient) ListMetricDescriptors( ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, ) (<-chan *metricpb.MetricDescriptor, error) { mdChan := make(chan *metricpb.MetricDescriptor, 1000) go func() { - c.log.Debugf("List metric descriptor request filter: %s", req.Filter) + smc.log.Debugf("List metric descriptor request filter: %s", req.Filter) defer close(mdChan) // Iterate over metric descriptors and send them to buffered channel - mdResp := c.conn.ListMetricDescriptors(ctx, req) - c.listMetricDescriptorsCalls.Incr(1) + mdResp := smc.conn.ListMetricDescriptors(ctx, req) + smc.listMetricDescriptorsCalls.Incr(1) for { mdDesc, mdErr := mdResp.Next() if mdErr != nil { if mdErr != iterator.Done { - c.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr) + smc.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr) } break } @@ -230,24 +230,24 @@ func (c *stackdriverMetricClient) ListMetricDescriptors( } // ListTimeSeries implements metricClient interface -func (c *stackdriverMetricClient) ListTimeSeries( +func (smc *stackdriverMetricClient) ListTimeSeries( ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, ) (<-chan *monitoringpb.TimeSeries, error) { tsChan := make(chan *monitoringpb.TimeSeries, 1000) go func() { - c.log.Debugf("List time series request filter: %s", req.Filter) + smc.log.Debugf("List time series request filter: %s", req.Filter) defer close(tsChan) // Iterate over timeseries and send them to buffered channel - tsResp := c.conn.ListTimeSeries(ctx, req) - c.listTimeSeriesCalls.Incr(1) + tsResp := smc.conn.ListTimeSeries(ctx, req) + smc.listTimeSeriesCalls.Incr(1) for { tsDesc, tsErr := tsResp.Next() if tsErr != nil { if tsErr != iterator.Done { - c.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr) + smc.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr) } break } @@ -259,8 +259,8 @@ func (c *stackdriverMetricClient) ListTimeSeries( } // Close implements metricClient interface -func (s *stackdriverMetricClient) Close() error { - return s.conn.Close() +func (smc *stackdriverMetricClient) Close() error { + return smc.conn.Close() } // Description implements telegraf.Input interface diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index a88fe847c445b..e1b6e837c3847 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -21,9 +21,9 @@ import ( ) const ( - // UDP_MAX_PACKET_SIZE is the UDP packet limit, see + // UdpMaxPacketSize is the UDP packet limit, see // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure - UDP_MAX_PACKET_SIZE int = 64 * 1024 + UdpMaxPacketSize int = 64 * 1024 defaultFieldName = "value" @@ -31,7 +31,6 @@ const ( defaultSeparator = "_" defaultAllowPendingMessage = 10000 - MaxTCPConnections = 250 parserGoRoutines = 5 ) @@ -203,7 +202,7 @@ type cacheddistributions struct { tags map[string]string } -func (_ *Statsd) Description() string { +func (s *Statsd) Description() string { return "Statsd UDP/TCP Server" } @@ -273,7 +272,7 @@ const sampleConfig = ` #max_ttl = "1000h" ` -func (_ *Statsd) SampleConfig() string { +func (s *Statsd) SampleConfig() string { return sampleConfig } @@ -499,7 +498,7 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error { s.UDPlistener.SetReadBuffer(s.ReadBufferSize) } - buf := make([]byte, UDP_MAX_PACKET_SIZE) + buf := make([]byte, UdpMaxPacketSize) for { select { case <-s.done: diff --git a/plugins/inputs/swap/swap.go b/plugins/inputs/swap/swap.go index eabb40a038e7d..c7c614c1ba83b 100644 --- a/plugins/inputs/swap/swap.go +++ b/plugins/inputs/swap/swap.go @@ -12,14 +12,14 @@ type SwapStats struct { ps system.PS } -func (_ *SwapStats) Description() string { +func (ss *SwapStats) Description() string { return "Read metrics about swap memory usage" } -func (_ *SwapStats) SampleConfig() string { return "" } +func (ss *SwapStats) SampleConfig() string { return "" } -func (s *SwapStats) Gather(acc telegraf.Accumulator) error { - swap, err := s.ps.SwapStat() +func (ss *SwapStats) Gather(acc telegraf.Accumulator) error { + swap, err := ss.ps.SwapStat() if err != nil { return fmt.Errorf("error getting swap memory info: %s", err) } diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go index 4baaf6ffb0463..5c39903dbce7f 100644 --- a/plugins/inputs/webhooks/webhooks.go +++ b/plugins/inputs/webhooks/webhooks.go @@ -2,7 +2,6 @@ package webhooks import ( "fmt" - "log" "net" "net/http" "reflect" @@ -28,14 +27,16 @@ func init() { } type Webhooks struct { - ServiceAddress string + ServiceAddress string `toml:"service_address"` - Github *github.GithubWebhook - Filestack *filestack.FilestackWebhook - Mandrill *mandrill.MandrillWebhook - Rollbar *rollbar.RollbarWebhook - Papertrail *papertrail.PapertrailWebhook - Particle *particle.ParticleWebhook + Github *github.GithubWebhook `toml:"github"` + Filestack *filestack.FilestackWebhook `toml:"filestack"` + Mandrill *mandrill.MandrillWebhook `toml:"mandrill"` + Rollbar *rollbar.RollbarWebhook `toml:"rollbar"` + Papertrail *papertrail.PapertrailWebhook `toml:"papertrail"` + Particle *particle.ParticleWebhook `toml:"particle"` + + Log telegraf.Logger `toml:"-"` srv *http.Server } @@ -110,25 +111,24 @@ func (wb *Webhooks) Start(acc telegraf.Accumulator) error { ln, err := net.Listen("tcp", fmt.Sprintf("%s", wb.ServiceAddress)) if err != nil { - log.Fatalf("E! Error starting server: %v", err) - return err + return fmt.Errorf("error starting server: %v", err) } go func() { if err := wb.srv.Serve(ln); err != nil { if err != http.ErrServerClosed { - acc.AddError(fmt.Errorf("E! Error listening: %v", err)) + acc.AddError(fmt.Errorf("error listening: %v", err)) } } }() - log.Printf("I! Started the webhooks service on %s\n", wb.ServiceAddress) + wb.Log.Infof("Started the webhooks service on %s", wb.ServiceAddress) return nil } -func (rb *Webhooks) Stop() { - rb.srv.Close() - log.Println("I! Stopping the Webhooks service") +func (wb *Webhooks) Stop() { + wb.srv.Close() + wb.Log.Infof("Stopping the Webhooks service") } diff --git a/plugins/outputs/application_insights/mocks/diagnostics_message_subscriber.go b/plugins/outputs/application_insights/mocks/diagnostics_message_subscriber.go index ba7007d4061d5..841de1ac87728 100644 --- a/plugins/outputs/application_insights/mocks/diagnostics_message_subscriber.go +++ b/plugins/outputs/application_insights/mocks/diagnostics_message_subscriber.go @@ -1,4 +1,4 @@ -// Code generated by mockery v1.0.0 +// Code generated by mockery v1.0.0. DO NOT EDIT. package mocks import appinsights "github.com/Microsoft/ApplicationInsights-Go/appinsights" diff --git a/plugins/outputs/application_insights/mocks/transmitter.go b/plugins/outputs/application_insights/mocks/transmitter.go index 5cc56fbb1ee1f..4faa715f78836 100644 --- a/plugins/outputs/application_insights/mocks/transmitter.go +++ b/plugins/outputs/application_insights/mocks/transmitter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v1.0.0 +// Code generated by mockery v1.0.0. DO NOT EDIT. package mocks import appinsights "github.com/Microsoft/ApplicationInsights-Go/appinsights" diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 5e59ba2aaec1d..82aebbdcca233 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -1,7 +1,6 @@ package cloudwatch import ( - "log" "math" "sort" "strings" @@ -30,6 +29,8 @@ type CloudWatch struct { svc *cloudwatch.CloudWatch WriteStatistics bool `toml:"write_statistics"` + + Log telegraf.Logger `toml:"-"` } type statisticType int @@ -253,7 +254,7 @@ func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error { _, err := c.svc.PutMetricData(params) if err != nil { - log.Printf("E! CloudWatch: Unable to write to CloudWatch : %+v \n", err.Error()) + c.Log.Errorf("Unable to write to CloudWatch : %+v", err.Error()) } return err @@ -265,7 +266,7 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch numberOfPartitions := len(datums) / size if len(datums)%size != 0 { - numberOfPartitions += 1 + numberOfPartitions++ } partitions := make([][]*cloudwatch.MetricDatum, numberOfPartitions) diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index b2466e4d046d4..f0956689a5685 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -31,7 +31,7 @@ func TestBuildDimensions(t *testing.T) { i := 0 for k := range testPoint.Tags() { tagKeys[i] = k - i += 1 + i++ } sort.Strings(tagKeys) @@ -151,7 +151,6 @@ func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) { } func TestPartitionDatums(t *testing.T) { - assert := assert.New(t) testDatum := cloudwatch.MetricDatum{ diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 3e3e5ac9141fa..52978539260bd 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -47,7 +47,7 @@ type Metric struct { type Point [2]float64 -const datadog_api = "https://app.datadoghq.com/api/v1/series" +const datadogApi = "https://app.datadoghq.com/api/v1/series" func (d *Datadog) Connect() error { if d.Apikey == "" { @@ -166,7 +166,7 @@ func buildTags(tagList []*telegraf.Tag) []string { index := 0 for _, tag := range tagList { tags[index] = fmt.Sprintf("%s:%s", tag.Key, tag.Value) - index += 1 + index++ } return tags } @@ -208,7 +208,7 @@ func (d *Datadog) Close() error { func init() { outputs.Add("datadog", func() telegraf.Output { return &Datadog{ - URL: datadog_api, + URL: datadogApi, } }) } diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 522512f7f6c42..afb97efb16a65 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -196,7 +196,7 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { // write metric id,tags and value switch metric.Type() { case telegraf.Counter: - var delta float64 = 0 + var delta float64 // Check if LastValue exists if lastvalue, ok := d.State[metricID+tagb.String()]; ok { @@ -236,7 +236,7 @@ func (d *Dynatrace) send(msg []byte) error { req, err := http.NewRequest("POST", d.URL, bytes.NewBuffer(msg)) if err != nil { d.Log.Errorf("Dynatrace error: %s", err.Error()) - return fmt.Errorf("Dynatrace error while creating HTTP request:, %s", err.Error()) + return fmt.Errorf("error while creating HTTP request:, %s", err.Error()) } req.Header.Add("Content-Type", "text/plain; charset=UTF-8") @@ -250,7 +250,7 @@ func (d *Dynatrace) send(msg []byte) error { if err != nil { d.Log.Errorf("Dynatrace error: %s", err.Error()) fmt.Println(req) - return fmt.Errorf("Dynatrace error while sending HTTP request:, %s", err.Error()) + return fmt.Errorf("error while sending HTTP request:, %s", err.Error()) } defer resp.Body.Close() @@ -263,7 +263,7 @@ func (d *Dynatrace) send(msg []byte) error { bodyString := string(bodyBytes) d.Log.Debugf("Dynatrace returned: %s", bodyString) } else { - return fmt.Errorf("Dynatrace request failed with response code:, %d", resp.StatusCode) + return fmt.Errorf("request failed with response code:, %d", resp.StatusCode) } return nil diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 3798f107aa157..16cb923ee2db5 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -102,7 +102,7 @@ func (f *File) Description() string { } func (f *File) Write(metrics []telegraf.Metric) error { - var writeErr error = nil + var writeErr error if f.UseBatchFormat { octets, err := f.serializer.SerializeBatch(metrics) @@ -123,7 +123,7 @@ func (f *File) Write(metrics []telegraf.Metric) error { _, err = f.writer.Write(b) if err != nil { - writeErr = fmt.Errorf("E! [outputs.file] failed to write message: %v", err) + writeErr = fmt.Errorf("failed to write message: %v", err) } } } diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 95d3fcf71a096..68e0a135a9a3b 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -171,11 +171,7 @@ func (h *HTTP) Write(metrics []telegraf.Metric) error { return err } - if err := h.write(reqBody); err != nil { - return err - } - - return nil + return h.write(reqBody) } func (h *HTTP) write(reqBody []byte) error { diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index 0d94452389269..e62919cf43b13 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -38,7 +38,6 @@ func (e APIError) Error() string { const ( defaultRequestTimeout = time.Second * 5 defaultMaxWait = 60 // seconds - defaultDatabase = "telegraf" ) type HTTPConfig struct { @@ -171,7 +170,7 @@ func (g genericRespError) Error() string { func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error { if c.retryTime.After(time.Now()) { - return errors.New("Retry time has not elapsed") + return errors.New("retry time has not elapsed") } batches := make(map[string][]telegraf.Metric) diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go index 6076297f8c83a..c7dd20a5ca0db 100644 --- a/plugins/outputs/influxdb_v2/influxdb.go +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "log" "math/rand" "net/url" "time" @@ -96,12 +95,12 @@ type InfluxDB struct { UintSupport bool `toml:"influx_uint_support"` tls.ClientConfig + Log telegraf.Logger `toml:"-"` + clients []Client } func (i *InfluxDB) Connect() error { - ctx := context.Background() - if len(i.URLs) == 0 { i.URLs = append(i.URLs, defaultURL) } @@ -122,7 +121,7 @@ func (i *InfluxDB) Connect() error { switch parts.Scheme { case "http", "https", "unix": - c, err := i.getHTTPClient(ctx, parts, proxy) + c, err := i.getHTTPClient(parts, proxy) if err != nil { return err } @@ -165,13 +164,13 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { return nil } - log.Printf("E! [outputs.influxdb_v2] when writing to [%s]: %v", client.URL(), err) + i.Log.Errorf("When writing to [%s]: %v", client.URL(), err) } return err } -func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.URL) (Client, error) { +func (i *InfluxDB) getHTTPClient(url *url.URL, proxy *url.URL) (Client, error) { tlsConfig, err := i.ClientConfig.TLSConfig() if err != nil { return nil, err diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index 42eb824fc698a..d41cc94d5a2aa 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -165,11 +165,7 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error { } } - if err := http.flush(); err != nil { - return err - } - - return nil + return http.flush() } func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error { @@ -235,9 +231,9 @@ func buildValue(v interface{}) (string, error) { var retv string switch p := v.(type) { case int64: - retv = IntToString(int64(p)) + retv = IntToString(p) case uint64: - retv = UIntToString(uint64(p)) + retv = UIntToString(p) case float64: retv = FloatToString(float64(p)) default: @@ -246,16 +242,16 @@ func buildValue(v interface{}) (string, error) { return retv, nil } -func IntToString(input_num int64) string { - return strconv.FormatInt(input_num, 10) +func IntToString(inputNum int64) string { + return strconv.FormatInt(inputNum, 10) } -func UIntToString(input_num uint64) string { - return strconv.FormatUint(input_num, 10) +func UIntToString(inputNum uint64) string { + return strconv.FormatUint(inputNum, 10) } -func FloatToString(input_num float64) string { - return strconv.FormatFloat(input_num, 'f', 6, 64) +func FloatToString(inputNum float64) string { + return strconv.FormatFloat(inputNum, 'f', 6, 64) } func (o *OpenTSDB) SampleConfig() string { diff --git a/plugins/outputs/riemann_legacy/riemann.go b/plugins/outputs/riemann_legacy/riemann.go index 64d9f997061e7..a123bd7d0578b 100644 --- a/plugins/outputs/riemann_legacy/riemann.go +++ b/plugins/outputs/riemann_legacy/riemann.go @@ -2,7 +2,6 @@ package riemann_legacy import ( "fmt" - "log" "os" "sort" "strings" @@ -12,12 +11,13 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" ) -const deprecationMsg = "E! Error: this Riemann output plugin will be deprecated in a future release, see https://github.com/influxdata/telegraf/issues/1878 for more details & discussion." +const deprecationMsg = "Error: this Riemann output plugin will be deprecated in a future release, see https://github.com/influxdata/telegraf/issues/1878 for more details & discussion." type Riemann struct { - URL string - Transport string - Separator string + URL string `toml:"url"` + Transport string `toml:"transport"` + Separator string `toml:"separator"` + Log telegraf.Logger `toml:"-"` client *raidman.Client } @@ -32,7 +32,7 @@ var sampleConfig = ` ` func (r *Riemann) Connect() error { - log.Printf(deprecationMsg) + r.Log.Error(deprecationMsg) c, err := raidman.Dial(r.Transport, r.URL) if err != nil { @@ -62,7 +62,7 @@ func (r *Riemann) Description() string { } func (r *Riemann) Write(metrics []telegraf.Metric) error { - log.Printf(deprecationMsg) + r.Log.Error(deprecationMsg) if len(metrics) == 0 { return nil } @@ -140,7 +140,7 @@ func serviceName(s string, n string, t map[string]string, f string) string { tagStrings = append(tagStrings, t[tagName]) } } - var tagString string = strings.Join(tagStrings, s) + var tagString = strings.Join(tagStrings, s) if tagString != "" { serviceStrings = append(serviceStrings, tagString) } diff --git a/plugins/parsers/graphite/config.go b/plugins/parsers/graphite/config.go index 915077c06b299..43c7058693b33 100644 --- a/plugins/parsers/graphite/config.go +++ b/plugins/parsers/graphite/config.go @@ -19,11 +19,7 @@ type Config struct { // Validate validates the config's templates and tags. func (c *Config) Validate() error { - if err := c.validateTemplates(); err != nil { - return err - } - - return nil + return c.validateTemplates() } func (c *Config) validateTemplates() error { diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index f85435ed54644..adc89f407f4d3 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -82,8 +82,8 @@ func NewSeriesParser(handler *MetricHandler) *Parser { } } -func (h *Parser) SetTimeFunc(f TimeFunc) { - h.handler.SetTimeFunc(f) +func (p *Parser) SetTimeFunc(f TimeFunc) { + p.handler.SetTimeFunc(f) } func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { @@ -178,18 +178,18 @@ func NewStreamParser(r io.Reader) *StreamParser { // SetTimeFunc changes the function used to determine the time of metrics // without a timestamp. The default TimeFunc is time.Now. Useful mostly for // testing, or perhaps if you want all metrics to have the same timestamp. -func (h *StreamParser) SetTimeFunc(f TimeFunc) { - h.handler.SetTimeFunc(f) +func (sp *StreamParser) SetTimeFunc(f TimeFunc) { + sp.handler.SetTimeFunc(f) } -func (h *StreamParser) SetTimePrecision(u time.Duration) { - h.handler.SetTimePrecision(u) +func (sp *StreamParser) SetTimePrecision(u time.Duration) { + sp.handler.SetTimePrecision(u) } // Next parses the next item from the stream. You can repeat calls to this // function if it returns ParseError to get the next metric or error. -func (p *StreamParser) Next() (telegraf.Metric, error) { - err := p.machine.Next() +func (sp *StreamParser) Next() (telegraf.Metric, error) { + err := sp.machine.Next() if err == EOF { return nil, err } @@ -200,16 +200,16 @@ func (p *StreamParser) Next() (telegraf.Metric, error) { if err != nil { return nil, &ParseError{ - Offset: p.machine.Position(), - LineOffset: p.machine.LineOffset(), - LineNumber: p.machine.LineNumber(), - Column: p.machine.Column(), + Offset: sp.machine.Position(), + LineOffset: sp.machine.LineOffset(), + LineNumber: sp.machine.LineNumber(), + Column: sp.machine.Column(), msg: err.Error(), - buf: p.machine.LineText(), + buf: sp.machine.LineText(), } } - metric, err := p.handler.Metric() + metric, err := sp.handler.Metric() if err != nil { return nil, err } @@ -218,27 +218,27 @@ func (p *StreamParser) Next() (telegraf.Metric, error) { } // Position returns the current byte offset into the data. -func (p *StreamParser) Position() int { - return p.machine.Position() +func (sp *StreamParser) Position() int { + return sp.machine.Position() } // LineOffset returns the byte offset of the current line. -func (p *StreamParser) LineOffset() int { - return p.machine.LineOffset() +func (sp *StreamParser) LineOffset() int { + return sp.machine.LineOffset() } // LineNumber returns the current line number. Lines are counted based on the // regular expression `\r?\n`. -func (p *StreamParser) LineNumber() int { - return p.machine.LineNumber() +func (sp *StreamParser) LineNumber() int { + return sp.machine.LineNumber() } // Column returns the current column. -func (p *StreamParser) Column() int { - return p.machine.Column() +func (sp *StreamParser) Column() int { + return sp.machine.Column() } // LineText returns the text of the current line that has been parsed so far. -func (p *StreamParser) LineText() string { - return p.machine.LineText() +func (sp *StreamParser) LineText() string { + return sp.machine.LineText() } diff --git a/plugins/parsers/wavefront/parser.go b/plugins/parsers/wavefront/parser.go index 7ae455d47dbbd..d7984fb99b5b1 100644 --- a/plugins/parsers/wavefront/parser.go +++ b/plugins/parsers/wavefront/parser.go @@ -13,7 +13,7 @@ import ( "github.com/influxdata/telegraf/metric" ) -const MAX_BUFFER_SIZE = 2 +const MaxBufferSize = 2 type Point struct { Name string @@ -170,9 +170,9 @@ func (p *PointParser) convertPointToTelegrafMetric(points []Point) ([]telegraf.M func (p *PointParser) scan() (Token, string) { // If we have a token on the buffer, then return it. if p.buf.n != 0 { - idx := p.buf.n % MAX_BUFFER_SIZE + idx := p.buf.n % MaxBufferSize tok, lit := p.buf.tok[idx], p.buf.lit[idx] - p.buf.n -= 1 + p.buf.n-- return tok, lit } @@ -188,8 +188,8 @@ func (p *PointParser) scan() (Token, string) { func (p *PointParser) buffer(tok Token, lit string) { // create the buffer if it is empty if len(p.buf.tok) == 0 { - p.buf.tok = make([]Token, MAX_BUFFER_SIZE) - p.buf.lit = make([]string, MAX_BUFFER_SIZE) + p.buf.tok = make([]Token, MaxBufferSize) + p.buf.lit = make([]string, MaxBufferSize) } // for now assume a simple circular buffer of length two @@ -203,9 +203,9 @@ func (p *PointParser) unscan() { } func (p *PointParser) unscanTokens(n int) { - if n > MAX_BUFFER_SIZE { + if n > MaxBufferSize { // just log for now - log.Printf("cannot unscan more than %d tokens", MAX_BUFFER_SIZE) + log.Printf("cannot unscan more than %d tokens", MaxBufferSize) } p.buf.n += n } diff --git a/plugins/processors/port_name/port_name.go b/plugins/processors/port_name/port_name.go index 7866952314f2a..7fc459c034ff1 100644 --- a/plugins/processors/port_name/port_name.go +++ b/plugins/processors/port_name/port_name.go @@ -46,11 +46,11 @@ type PortName struct { Log telegraf.Logger `toml:"-"` } -func (d *PortName) SampleConfig() string { +func (pn *PortName) SampleConfig() string { return sampleConfig } -func (d *PortName) Description() string { +func (pn *PortName) Description() string { return "Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file" } @@ -106,22 +106,22 @@ func readServices(r io.Reader) sMap { return services } -func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { +func (pn *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { for _, m := range metrics { var portProto string var fromField bool - if len(d.SourceTag) > 0 { - if tag, ok := m.GetTag(d.SourceTag); ok { - portProto = string([]byte(tag)) + if len(pn.SourceTag) > 0 { + if tag, ok := m.GetTag(pn.SourceTag); ok { + portProto = tag } } - if len(d.SourceField) > 0 { - if field, ok := m.GetField(d.SourceField); ok { + if len(pn.SourceField) > 0 { + if field, ok := m.GetField(pn.SourceField); ok { switch v := field.(type) { default: - d.Log.Errorf("Unexpected type %t in source field; must be string or int", v) + pn.Log.Errorf("Unexpected type %t in source field; must be string or int", v) continue case int64: portProto = strconv.FormatInt(v, 10) @@ -143,7 +143,7 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { if l == 0 { // Empty tag - d.Log.Errorf("empty port tag: %v", d.SourceTag) + pn.Log.Errorf("empty port tag: %v", pn.SourceTag) continue } @@ -154,25 +154,25 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { port, err = strconv.Atoi(val) if err != nil { // Can't convert port to string - d.Log.Errorf("error converting port to integer: %v", val) + pn.Log.Errorf("error converting port to integer: %v", val) continue } } - proto := d.DefaultProtocol + proto := pn.DefaultProtocol if l > 1 && len(portProtoSlice[1]) > 0 { proto = portProtoSlice[1] } - if len(d.ProtocolTag) > 0 { - if tag, ok := m.GetTag(d.ProtocolTag); ok { + if len(pn.ProtocolTag) > 0 { + if tag, ok := m.GetTag(pn.ProtocolTag); ok { proto = tag } } - if len(d.ProtocolField) > 0 { - if field, ok := m.GetField(d.ProtocolField); ok { + if len(pn.ProtocolField) > 0 { + if field, ok := m.GetField(pn.ProtocolField); ok { switch v := field.(type) { default: - d.Log.Errorf("Unexpected type %t in protocol field; must be string", v) + pn.Log.Errorf("Unexpected type %t in protocol field; must be string", v) continue case string: proto = v @@ -190,7 +190,7 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { // normally has entries for both, so our map does too. If // not, it's very likely the source tag or the services // file doesn't make sense. - d.Log.Errorf("protocol not found in services map: %v", proto) + pn.Log.Errorf("protocol not found in services map: %v", proto) continue } @@ -200,21 +200,21 @@ func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { // // Not all ports are named so this isn't an error, but // it's helpful to know when debugging. - d.Log.Debugf("port not found in services map: %v", port) + pn.Log.Debugf("port not found in services map: %v", port) continue } if fromField { - m.AddField(d.Dest, service) + m.AddField(pn.Dest, service) } else { - m.AddTag(d.Dest, service) + m.AddTag(pn.Dest, service) } } return metrics } -func (h *PortName) Init() error { +func (pn *PortName) Init() error { services = make(sMap) readServicesFile() return nil diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index ff20039f3a3e2..7653283724d2c 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -276,8 +276,7 @@ func (t *TopK) push() []telegraf.Metric { } // The return value that will hold the returned metrics - var ret []telegraf.Metric = make([]telegraf.Metric, 0, 0) - + var ret = make([]telegraf.Metric, 0, 0) // Get the top K metrics for each field and add them to the return value addedKeys := make(map[string]bool) for _, field := range t.Fields { @@ -317,11 +316,11 @@ func (t *TopK) push() []telegraf.Metric { result := make([]telegraf.Metric, 0, len(ret)) for _, m := range ret { - copy, err := metric.New(m.Name(), m.Tags(), m.Fields(), m.Time(), m.Type()) + newMetric, err := metric.New(m.Name(), m.Tags(), m.Fields(), m.Time(), m.Type()) if err != nil { continue } - result = append(result, copy) + result = append(result, newMetric) } return result @@ -412,7 +411,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr continue } mean[field] += val - meanCounters[field] += 1 + meanCounters[field]++ } } // Divide by the number of recorded measurements collected for every field @@ -423,7 +422,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr continue } mean[k] = mean[k] / meanCounters[k] - noMeasurementsFound = noMeasurementsFound && false + noMeasurementsFound = false } if noMeasurementsFound { diff --git a/plugins/serializers/influx/influx.go b/plugins/serializers/influx/influx.go index 048d3afd8b328..978614376dabb 100644 --- a/plugins/serializers/influx/influx.go +++ b/plugins/serializers/influx/influx.go @@ -237,7 +237,7 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { // Additional length needed for field separator `,` if !firstField { - bytesNeeded += 1 + bytesNeeded++ } if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes { diff --git a/plugins/serializers/influx/reader.go b/plugins/serializers/influx/reader.go index 55b6c2b4130ec..4e0b60343172b 100644 --- a/plugins/serializers/influx/reader.go +++ b/plugins/serializers/influx/reader.go @@ -50,7 +50,7 @@ func (r *reader) Read(p []byte) (int, error) { for _, metric := range r.metrics[r.offset:] { _, err := r.serializer.Write(r.buf, metric) - r.offset += 1 + r.offset++ if err != nil { r.buf.Reset() if _, ok := err.(*MetricError); ok { diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go index aca801d561425..a2dfee71f9892 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go @@ -187,7 +187,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { metrickey, promts = getPromTS(metricName, labels, value, metric.Time()) } default: - return nil, fmt.Errorf("Unknown type %v", metric.Type()) + return nil, fmt.Errorf("unknown type %v", metric.Type()) } // A batch of metrics can contain multiple values for a single @@ -205,7 +205,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { } var promTS = make([]*prompb.TimeSeries, len(entries)) - var i int64 = 0 + var i int64 for _, promts := range entries { promTS[i] = promts i++ diff --git a/selfstat/selfstat.go b/selfstat/selfstat.go index a60ee099e0438..f98821fa97c6c 100644 --- a/selfstat/selfstat.go +++ b/selfstat/selfstat.go @@ -1,4 +1,4 @@ -// selfstat is a package for tracking and collecting internal statistics +// Package selfstat is a package for tracking and collecting internal statistics // about telegraf. Metrics can be registered using this package, and then // incremented or set within your code. If the inputs.internal plugin is enabled, // then all registered stats will be collected as they would by any other input From 458f9d1e47240c62ec6fc39ae6d910f969ba9c9d Mon Sep 17 00:00:00 2001 From: sarvanikonda <61405486+sarvanikonda@users.noreply.github.com> Date: Wed, 17 Feb 2021 21:47:13 +0530 Subject: [PATCH 225/761] added member_id as tag(as it is a unique value) for redfish plugin and added address of the server when the status is other than 200 for better debugging (#8813) --- plugins/inputs/redfish/README.md | 30 ++++--- plugins/inputs/redfish/redfish.go | 13 ++- plugins/inputs/redfish/redfish_test.go | 105 ++++++++++++++++--------- 3 files changed, 95 insertions(+), 53 deletions(-) diff --git a/plugins/inputs/redfish/README.md b/plugins/inputs/redfish/README.md index a22b9d3141741..cabf7e088047b 100644 --- a/plugins/inputs/redfish/README.md +++ b/plugins/inputs/redfish/README.md @@ -34,6 +34,7 @@ Telegraf minimum version: Telegraf 1.15.0 - redfish_thermal_temperatures - tags: - source + - member_id - address - name - datacenter (available only if location data is found) @@ -53,6 +54,7 @@ Telegraf minimum version: Telegraf 1.15.0 + redfish_thermal_fans - tags: - source + - member_id - address - name - datacenter (available only if location data is found) @@ -73,6 +75,7 @@ Telegraf minimum version: Telegraf 1.15.0 - tags: - source - address + - member_id - name - datacenter (available only if location data is found) - rack (available only if location data is found) @@ -92,6 +95,7 @@ Telegraf minimum version: Telegraf 1.15.0 - tags: - source - address + - member_id - name - datacenter (available only if location data is found) - rack (available only if location data is found) @@ -110,18 +114,18 @@ Telegraf minimum version: Telegraf 1.15.0 ### Example Output ``` -redfish_thermal_temperatures,source=test-hostname,name=CPU1,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=41,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_temperatures,source=test-hostname,name=CPU2,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=51,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_temperatures,source=test-hostname,name=SystemBoardInlet,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=23,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_temperatures,source=test-hostname,name=SystemBoardExhaust,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=33,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1A,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17720,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1B,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17760,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_fans,source=test-hostname,name=SystemBoardFan2A,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17880,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_power_powersupplies,source=test-hostname,name=PS1Status,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=208,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000 -redfish_power_powersupplies,source=test-hostname,name=PS2Status,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=194,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000 -redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_power_voltages,source=test-hostname,name=CPU1MEM347,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_power_voltages,source=test-hostname,name=PS1voltage1,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=208,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_temperatures,source=test-hostname,name=CPU1,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=41,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_temperatures,source=test-hostname,name=CPU2,address=http://190.0.0.1,member_id="1"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=51,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_temperatures,source=test-hostname,name=SystemBoardInlet,address=http://190.0.0.1,member_id="2"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=23,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_temperatures,source=test-hostname,name=SystemBoardExhaust,address=http://190.0.0.1,member_id="3"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=33,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1A,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17720,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1B,address=http://190.0.0.1,member_id="1"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17760,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_fans,source=test-hostname,name=SystemBoardFan2A,address=http://190.0.0.1,member_id="2"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17880,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_power_powersupplies,source=test-hostname,name=PS1Status,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=208,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000 +redfish_power_powersupplies,source=test-hostname,name=PS2Status,address=http://190.0.0.1,member_id="1",datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=194,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000 +redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,member_id="1"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_power_voltages,source=test-hostname,name=CPU1MEM347,address=http://190.0.0.1,member_id="2"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_power_voltages,source=test-hostname,name=PS1voltage1,address=http://190.0.0.1,member_id="12"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=208,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 ``` diff --git a/plugins/inputs/redfish/redfish.go b/plugins/inputs/redfish/redfish.go index ca500ab6c819a..cf9f09359872e 100644 --- a/plugins/inputs/redfish/redfish.go +++ b/plugins/inputs/redfish/redfish.go @@ -73,6 +73,7 @@ type Chassis struct { type Power struct { PowerSupplies []struct { Name string + MemberId string PowerInputWatts *float64 PowerCapacityWatts *float64 PowerOutputWatts *float64 @@ -82,6 +83,7 @@ type Power struct { } Voltages []struct { Name string + MemberId string ReadingVolts *float64 UpperThresholdCritical *float64 UpperThresholdFatal *float64 @@ -94,6 +96,7 @@ type Power struct { type Thermal struct { Fans []struct { Name string + MemberId string Reading *int64 ReadingUnits *string UpperThresholdCritical *int64 @@ -104,6 +107,7 @@ type Thermal struct { } Temperatures []struct { Name string + MemberId string ReadingCelsius *float64 UpperThresholdCritical *float64 UpperThresholdFatal *float64 @@ -189,9 +193,10 @@ func (r *Redfish) getData(url string, payload interface{}) error { defer resp.Body.Close() if resp.StatusCode != 200 { - return fmt.Errorf("received status code %d (%s), expected 200", + return fmt.Errorf("received status code %d (%s) for address %s, expected 200", resp.StatusCode, - http.StatusText(resp.StatusCode)) + http.StatusText(resp.StatusCode), + r.Address) } body, err := ioutil.ReadAll(resp.Body) @@ -271,6 +276,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range thermal.Temperatures { tags := map[string]string{} + tags["member_id"] = j.MemberId tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname @@ -295,6 +301,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range thermal.Fans { tags := map[string]string{} fields := make(map[string]interface{}) + tags["member_id"] = j.MemberId tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname @@ -326,6 +333,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range power.PowerSupplies { tags := map[string]string{} + tags["member_id"] = j.MemberId tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname @@ -349,6 +357,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range power.Voltages { tags := map[string]string{} + tags["member_id"] = j.MemberId tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname diff --git a/plugins/inputs/redfish/redfish_test.go b/plugins/inputs/redfish/redfish_test.go index 6bd28214840b8..81a04aa1854ff 100644 --- a/plugins/inputs/redfish/redfish_test.go +++ b/plugins/inputs/redfish/redfish_test.go @@ -49,6 +49,7 @@ func TestDellApis(t *testing.T) { "redfish_thermal_temperatures", map[string]string{ "name": "CPU1 Temp", + "member_id": "iDRAC.Embedded.1#CPU1Temp", "source": "tpa-hostname", "address": address, "datacenter": "", @@ -72,6 +73,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan1A", + "member_id": "0x17||Fan.Embedded.1A", "address": address, "datacenter": "", "health": "OK", @@ -92,6 +94,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan1B", + "member_id": "0x17||Fan.Embedded.1B", "address": address, "datacenter": "", "health": "OK", @@ -112,6 +115,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan2A", + "member_id": "0x17||Fan.Embedded.2A", "address": address, "datacenter": "", "health": "OK", @@ -132,6 +136,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan2B", + "member_id": "0x17||Fan.Embedded.2B", "address": address, "datacenter": "", "health": "OK", @@ -152,6 +157,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan3A", + "member_id": "0x17||Fan.Embedded.3A", "address": address, "datacenter": "", "health": "OK", @@ -172,6 +178,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan3B", + "member_id": "0x17||Fan.Embedded.3B", "address": address, "datacenter": "", "health": "OK", @@ -192,6 +199,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan4A", + "member_id": "0x17||Fan.Embedded.4A", "address": address, "datacenter": "", "health": "OK", @@ -212,6 +220,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan4B", + "member_id": "0x17||Fan.Embedded.4B", "address": address, "datacenter": "", "health": "OK", @@ -232,6 +241,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan5A", + "member_id": "0x17||Fan.Embedded.5A", "address": address, "datacenter": "", "health": "OK", @@ -252,6 +262,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan5B", + "member_id": "0x17||Fan.Embedded.5B", "address": address, "datacenter": "", "health": "OK", @@ -272,6 +283,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan6A", + "member_id": "0x17||Fan.Embedded.6A", "address": address, "datacenter": "", "health": "OK", @@ -292,6 +304,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan6B", + "member_id": "0x17||Fan.Embedded.6B", "address": address, "datacenter": "", "health": "OK", @@ -312,6 +325,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan7A", + "member_id": "0x17||Fan.Embedded.7A", "address": address, "datacenter": "", "health": "OK", @@ -332,6 +346,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan7B", + "member_id": "0x17||Fan.Embedded.7B", "address": address, "datacenter": "", "health": "OK", @@ -352,6 +367,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan8A", + "member_id": "0x17||Fan.Embedded.8A", "address": address, "datacenter": "", "health": "OK", @@ -372,6 +388,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan8B", + "member_id": "0x17||Fan.Embedded.8B", "address": address, "datacenter": "", "health": "OK", @@ -392,6 +409,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "PS1 Status", + "member_id": "PSU.Slot.1", "address": address, "datacenter": "", "health": "OK", @@ -413,6 +431,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board DIMM PG", + "member_id": "iDRAC.Embedded.1#SystemBoardDIMMPG", "address": address, "datacenter": "", "health": "OK", @@ -431,6 +450,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board NDC PG", + "member_id": "iDRAC.Embedded.1#SystemBoardNDCPG", "address": address, "datacenter": "", "health": "OK", @@ -450,6 +470,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board PS1 PG FAIL", + "member_id": "iDRAC.Embedded.1#SystemBoardPS1PGFAIL", "address": address, "datacenter": "", "health": "OK", @@ -514,11 +535,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_thermal_temperatures", map[string]string{ - "name": "01-Inlet Ambient", - "source": "tpa-hostname", - "address": address, - "health": "OK", - "state": "Enabled", + "name": "01-Inlet Ambient", + "member_id": "0", + "source": "tpa-hostname", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_celsius": 19.0, @@ -530,11 +552,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_thermal_temperatures", map[string]string{ - "name": "44-P/S 2 Zone", - "source": "tpa-hostname", - "address": address, - "health": "OK", - "state": "Enabled", + "name": "44-P/S 2 Zone", + "source": "tpa-hostname", + "member_id": "42", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_celsius": 34.0, @@ -546,11 +569,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_thermal_fans", map[string]string{ - "source": "tpa-hostname", - "name": "Fan 1", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "Fan 1", + "member_id": "0", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_percent": 23, @@ -560,11 +584,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_thermal_fans", map[string]string{ - "source": "tpa-hostname", - "name": "Fan 2", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "Fan 2", + "member_id": "1", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_percent": 23, @@ -574,11 +599,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_thermal_fans", map[string]string{ - "source": "tpa-hostname", - "name": "Fan 3", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "Fan 3", + "member_id": "2", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_percent": 23, @@ -588,11 +614,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_power_powersupplies", map[string]string{ - "source": "tpa-hostname", - "name": "HpeServerPowerSupply", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "HpeServerPowerSupply", + "member_id": "0", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "power_capacity_watts": 800.0, @@ -604,11 +631,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_power_powersupplies", map[string]string{ - "source": "tpa-hostname", - "name": "HpeServerPowerSupply", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "HpeServerPowerSupply", + "member_id": "1", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "power_capacity_watts": 800.0, @@ -670,9 +698,10 @@ func TestInvalidUsernameorPassword(t *testing.T) { var acc testutil.Accumulator r.Init() - err := r.Gather(&acc) - require.Error(t, err) - require.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") + u, err := url.Parse(ts.URL) + require.NoError(t, err) + err = r.Gather(&acc) + require.EqualError(t, err, "received status code 401 (Unauthorized) for address http://"+u.Host+", expected 200") } func TestNoUsernameorPasswordConfiguration(t *testing.T) { From 0860487321a190b0f5d24ff0ff9e9e933a76754c Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 17 Feb 2021 10:21:47 -0600 Subject: [PATCH 226/761] Update to 1.15.8 (#8868) --- .circleci/config.yml | 4 ++-- Makefile | 4 ++-- scripts/alpine.docker | 2 +- scripts/buster.docker | 2 +- scripts/ci-1.15.docker | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6eff53ee8d5bb..5d4f78d5c5757 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,7 +12,7 @@ executors: go-1_15: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.15.5' + - image: 'quay.io/influxdb/telegraf-ci:1.15.8' environment: GOFLAGS: -p=8 mac: @@ -140,7 +140,7 @@ jobs: shell: powershell.exe steps: - checkout - - run: choco upgrade golang --version=1.15.5 + - run: choco upgrade golang --version=1.15.8 - run: choco install make - run: git config --system core.longpaths true - run: make test-windows diff --git a/Makefile b/Makefile index f74a3fd556b84..06b0cd7b456bf 100644 --- a/Makefile +++ b/Makefile @@ -171,8 +171,8 @@ plugin-%: .PHONY: ci-1.15 ci-1.15: - docker build -t quay.io/influxdb/telegraf-ci:1.15.5 - < scripts/ci-1.15.docker - docker push quay.io/influxdb/telegraf-ci:1.15.5 + docker build -t quay.io/influxdb/telegraf-ci:1.15.8 - < scripts/ci-1.15.docker + docker push quay.io/influxdb/telegraf-ci:1.15.8 .PHONY: ci-1.14 ci-1.14: diff --git a/scripts/alpine.docker b/scripts/alpine.docker index 39075571fa9d9..7be9a39e6b361 100644 --- a/scripts/alpine.docker +++ b/scripts/alpine.docker @@ -1,4 +1,4 @@ -FROM golang:1.15.5 as builder +FROM golang:1.15.8 as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/buster.docker b/scripts/buster.docker index e39cf63cc828c..e64f7680124ad 100644 --- a/scripts/buster.docker +++ b/scripts/buster.docker @@ -1,4 +1,4 @@ -FROM golang:1.15.5-buster as builder +FROM golang:1.15.8-buster as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/ci-1.15.docker b/scripts/ci-1.15.docker index afef08606a94d..2b87f29be4e3e 100644 --- a/scripts/ci-1.15.docker +++ b/scripts/ci-1.15.docker @@ -1,4 +1,4 @@ -FROM golang:1.15.5 +FROM golang:1.15.8 RUN chmod -R 755 "$GOPATH" From b991aab75d815f1337ed56be7d9eb0e39a863c44 Mon Sep 17 00:00:00 2001 From: Andreas Fuchs Date: Wed, 17 Feb 2021 15:50:25 -0500 Subject: [PATCH 227/761] plugins/filestat: Skip missing files (#7316) --- plugins/inputs/filestat/filestat.go | 24 +++++++++++++++++++++--- plugins/inputs/filestat/filestat_test.go | 17 +++++++++++++++++ 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/filestat/filestat.go b/plugins/inputs/filestat/filestat.go index bf8ea6c160361..9450f9a41b77c 100644 --- a/plugins/inputs/filestat/filestat.go +++ b/plugins/inputs/filestat/filestat.go @@ -35,11 +35,18 @@ type FileStat struct { // maps full file paths to globmatch obj globs map[string]*globpath.GlobPath + + // files that were missing - we only log the first time it's not found. + missingFiles map[string]bool + // files that had an error in Stat - we only log the first error. + filesWithErrors map[string]bool } func NewFileStat() *FileStat { return &FileStat{ - globs: make(map[string]*globpath.GlobPath), + globs: make(map[string]*globpath.GlobPath), + missingFiles: make(map[string]bool), + filesWithErrors: make(map[string]bool), } } @@ -85,12 +92,23 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error { fileInfo, err := os.Stat(fileName) if os.IsNotExist(err) { fields["exists"] = int64(0) + acc.AddFields("filestat", fields, tags) + if !f.missingFiles[fileName] { + f.Log.Warnf("File %q not found", fileName) + f.missingFiles[fileName] = true + } + continue } + f.missingFiles[fileName] = false if fileInfo == nil { - f.Log.Errorf("Unable to get info for file %q, possible permissions issue", - fileName) + if !f.filesWithErrors[fileName] { + f.filesWithErrors[fileName] = true + f.Log.Errorf("Unable to get info for file %q: %v", + fileName, err) + } } else { + f.filesWithErrors[fileName] = false fields["size_bytes"] = fileInfo.Size() fields["modification_time"] = fileInfo.ModTime().UnixNano() } diff --git a/plugins/inputs/filestat/filestat_test.go b/plugins/inputs/filestat/filestat_test.go index 79a111ffb849a..f0b843dcbc3b4 100644 --- a/plugins/inputs/filestat/filestat_test.go +++ b/plugins/inputs/filestat/filestat_test.go @@ -83,6 +83,23 @@ func TestGatherExplicitFiles(t *testing.T) { require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(0))) } +func TestNonExistentFile(t *testing.T) { + fs := NewFileStat() + fs.Log = testutil.Logger{} + fs.Md5 = true + fs.Files = []string{ + "/non/existant/file", + } + acc := testutil.Accumulator{} + require.NoError(t, acc.GatherError(fs.Gather)) + + acc.AssertContainsFields(t, "filestat", map[string]interface{}{"exists": int64(0)}) + assert.False(t, acc.HasField("filestat", "error")) + assert.False(t, acc.HasField("filestat", "md5_sum")) + assert.False(t, acc.HasField("filestat", "size_bytes")) + assert.False(t, acc.HasField("filestat", "modification_time")) +} + func TestGatherGlob(t *testing.T) { fs := NewFileStat() fs.Log = testutil.Logger{} From a5385a2557521ac4018bdb516b591c4998de147a Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 17 Feb 2021 17:02:04 -0500 Subject: [PATCH 228/761] Update changelog (cherry picked from commit 3e8cf10a86eadf24c713f839f9e5825542827964) --- CHANGELOG.md | 18 ++++++++++++++++++ etc/telegraf.conf | 5 +++++ 2 files changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 21fabb86a6e47..713dc5b4bc642 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,21 @@ +## v1.17.3 [2021-02-17] + +#### Bugfixes + + - [#7316](https://github.com/influxdata/telegraf/pull/7316) `inputs.filestat` plugins/filestat: Skip missing files + - [#8868](https://github.com/influxdata/telegraf/pull/8868) Update to Go 1.15.8 + - [#8744](https://github.com/influxdata/telegraf/pull/8744) Bump github.com/gopcua/opcua from 0.1.12 to 0.1.13 + - [#8657](https://github.com/influxdata/telegraf/pull/8657) `outputs.warp10` outputs/warp10: url encode comma in tags value + - [#8824](https://github.com/influxdata/telegraf/pull/8824) `inputs.x509_cert` inputs.x509_cert: Fix timeout issue + - [#8821](https://github.com/influxdata/telegraf/pull/8821) `inputs.mqtt_consumer` Fix reconnection issues mqtt + - [#8775](https://github.com/influxdata/telegraf/pull/8775) `outputs.influxdb` Validate the response from InfluxDB after writing/creating a database to avoid json parsing panics/errors + - [#8804](https://github.com/influxdata/telegraf/pull/8804) `inputs.snmp` Expose v4/v6-only connection-schemes through GosnmpWrapper + - [#8838](https://github.com/influxdata/telegraf/pull/8838) `agent` fix issue with reading flush_jitter output from config + - [#8839](https://github.com/influxdata/telegraf/pull/8839) `inputs.ping` fixes Sort and timeout around deadline + - [#8787](https://github.com/influxdata/telegraf/pull/8787) `inputs.ping` Update README for inputs.ping with correct cmd for native ping on Linux + - [#8771](https://github.com/influxdata/telegraf/pull/8771) Update go-ping to latest version + + ## v1.17.2 [2021-01-28] #### Bugfixes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index d6e0b165cb25b..425e6d758833d 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -5219,8 +5219,13 @@ # # Retrieves SNMP values from remote agents # [[inputs.snmp]] # ## Agent addresses to retrieve values from. +# ## format: agents = [":"] +# ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. +# ## default is udp +# ## port: optional # ## example: agents = ["udp://127.0.0.1:161"] # ## agents = ["tcp://127.0.0.1:161"] +# ## agents = ["udp4://v4only-snmp-agent"] # agents = ["udp://127.0.0.1:161"] # # ## Timeout for each request. From b6b5d34060e5a7658a63d19d1352b6dc4fd5ec71 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 17 Feb 2021 23:22:33 +0100 Subject: [PATCH 229/761] Add quantile aggregator plugin (#8594) --- go.mod | 8 +- go.sum | 15 +- plugins/aggregators/all/all.go | 1 + plugins/aggregators/quantile/README.md | 127 ++++ plugins/aggregators/quantile/algorithms.go | 110 +++ plugins/aggregators/quantile/quantile.go | 165 +++++ plugins/aggregators/quantile/quantile_test.go | 635 ++++++++++++++++++ 7 files changed, 1050 insertions(+), 11 deletions(-) create mode 100644 plugins/aggregators/quantile/README.md create mode 100644 plugins/aggregators/quantile/algorithms.go create mode 100644 plugins/aggregators/quantile/quantile.go create mode 100644 plugins/aggregators/quantile/quantile_test.go diff --git a/go.mod b/go.mod index ad020ecc1c5e0..816ace7e9ffdd 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmatcuk/doublestar/v3 v3.0.0 github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 - github.com/caio/go-tdigest v2.3.0+incompatible // indirect + github.com/caio/go-tdigest v3.1.0+incompatible github.com/cenkalti/backoff v2.0.0+incompatible // indirect github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 github.com/cockroachdb/apd v1.1.0 // indirect @@ -92,7 +92,6 @@ require ( github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee github.com/kylelemons/godebug v1.1.0 // indirect - github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 // indirect github.com/lib/pq v1.3.0 // indirect github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 @@ -135,7 +134,7 @@ require ( github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect github.com/vjeantet/grok v1.0.1 github.com/vmware/govmomi v0.19.0 - github.com/wavefronthq/wavefront-sdk-go v0.9.2 + github.com/wavefronthq/wavefront-sdk-go v0.9.7 github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c @@ -150,7 +149,6 @@ require ( golang.org/x/text v0.3.3 golang.org/x/tools v0.0.0-20200317043434-63da46f3035e // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 - gonum.org/v1/gonum v0.6.2 // indirect google.golang.org/api v0.20.0 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 google.golang.org/grpc v1.33.1 @@ -159,7 +157,7 @@ require ( gopkg.in/ldap.v3 v3.1.0 gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce gopkg.in/olivere/elastic.v5 v5.0.70 - gopkg.in/yaml.v2 v2.2.8 + gopkg.in/yaml.v2 v2.3.0 gotest.tools v2.2.0+incompatible honnef.co/go/tools v0.0.1-2020.1.3 // indirect k8s.io/apimachinery v0.17.1 // indirect diff --git a/go.sum b/go.sum index 05917cdcb92c3..806e6e6cc185d 100644 --- a/go.sum +++ b/go.sum @@ -146,8 +146,8 @@ github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkN github.com/bmatcuk/doublestar/v3 v3.0.0/go.mod h1:6PcTVMw80pCY1RVuoqu3V++99uQB3vsSYKPTd8AWA0k= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/caio/go-tdigest v2.3.0+incompatible h1:zP6nR0nTSUzlSqqr7F/LhslPlSZX/fZeGmgmwj2cxxY= -github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= +github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= +github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY= github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -607,6 +607,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= @@ -628,8 +629,8 @@ github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY= github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/wavefronthq/wavefront-sdk-go v0.9.2 h1:/LvWgZYNjHFUg+ZUX+qv+7e+M8sEMi0lM15zPp681Gk= -github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU= +github.com/wavefronthq/wavefront-sdk-go v0.9.7 h1:SrtABcXXeKCW5SerQYsnCzHo15GeggjZmL+DjtTy6CI= +github.com/wavefronthq/wavefront-sdk-go v0.9.7/go.mod h1:JTGsu+KKgxx+GitC65VVdftN2iep1nVpQi/8EGR6v4Y= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOFra9xJfRXZcL2pLhMI8oNuDugNxg9Q= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk= @@ -851,8 +852,8 @@ golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.6.2 h1:4r+yNT0+8SWcOkXP+63H2zQbN+USnC73cjGUxnDF94Q= -gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.7.0 h1:Hdks0L0hgznZLG9nzXb8vZ0rRvqNvAcgAp84y7Mwkgw= +gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= @@ -958,6 +959,8 @@ gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/plugins/aggregators/all/all.go b/plugins/aggregators/all/all.go index f59e9450d3a49..4128d712bc994 100644 --- a/plugins/aggregators/all/all.go +++ b/plugins/aggregators/all/all.go @@ -7,5 +7,6 @@ import ( _ "github.com/influxdata/telegraf/plugins/aggregators/histogram" _ "github.com/influxdata/telegraf/plugins/aggregators/merge" _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" + _ "github.com/influxdata/telegraf/plugins/aggregators/quantile" _ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter" ) diff --git a/plugins/aggregators/quantile/README.md b/plugins/aggregators/quantile/README.md new file mode 100644 index 0000000000000..77d0f856409ec --- /dev/null +++ b/plugins/aggregators/quantile/README.md @@ -0,0 +1,127 @@ +# Quantile Aggregator Plugin + +The quantile aggregator plugin aggregates specified quantiles for each numeric field +per metric it sees and emits the quantiles every `period`. + +### Configuration + +```toml +[[aggregators.quantile]] + ## General Aggregator Arguments: + ## The period on which to flush & clear the aggregator. + period = "30s" + + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + + ## Quantiles to output in the range [0,1] + # quantiles = [0.25, 0.5, 0.75] + + ## Type of aggregation algorithm + ## Supported are: + ## "t-digest" -- approximation using centroids, can cope with large number of samples + ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) + ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) + ## NOTE: Do not use "exact" algorithms with large number of samples + ## to not impair performance or memory consumption! + # algorithm = "t-digest" + + ## Compression for approximation (t-digest). The value needs to be + ## greater or equal to 1.0. Smaller values will result in more + ## performance but less accuracy. + # compression = 100.0 +``` + +#### Algorithm types +##### t-digest +Proposed by [Dunning & Ertl (2019)][tdigest_paper] this type uses a +special data-structure to cluster data. These clusters are later used +to approximate the requested quantiles. The bounds of the approximation +can be controlled by the `compression` setting where smaller values +result in higher performance but less accuracy. + +Due to its incremental nature, this algorithm can handle large +numbers of samples efficiently. It is recommended for applications +where exact quantile calculation isn't required. + +For implementation details see the underlying [golang library][tdigest_lib]. + +##### exact R7 and R8 +These algorithms compute quantiles as described in [Hyndman & Fan (1996)][hyndman_fan]. +The R7 variant is used in Excel and NumPy. The R8 variant is recommended +by Hyndman & Fan due to its independence of the underlying sample distribution. + +These algorithms save all data for the aggregation `period`. They require +a lot of memory when used with a large number of series or a +large number of samples. They are slower than the `t-digest` +algorithm and are recommended only to be used with a small number of samples and series. + + +#### Benchmark (linux/amd64) +The benchmark was performed by adding 100 metrics with six numeric +(and two non-numeric) fields to the aggregator and the derive the aggregation +result. + +| algorithm | # quantiles | avg. runtime | +| :------------ | -------------:| -------------:| +| t-digest | 3 | 376372 ns/op | +| exact R7 | 3 | 9782946 ns/op | +| exact R8 | 3 | 9158205 ns/op | +| t-digest | 100 | 899204 ns/op | +| exact R7 | 100 | 7868816 ns/op | +| exact R8 | 100 | 8099612 ns/op | + +### Measurements +Measurement names are passed trough this aggregator. + +### Fields + +For all numeric fields (int32/64, uint32/64 and float32/64) new *quantile* +fields are aggregated in the form `_`. Other field +types (e.g. boolean, string) are ignored and dropped from the output. + +For example passing in the following metric as *input*: +- somemetric + - average_response_ms (float64) + - minimum_response_ms (float64) + - maximum_response_ms (float64) + - status (string) + - ok (boolean) + +and the default setting for `quantiles ` you get the following *output* +- somemetric + - average_response_ms_025 (float64) + - average_response_ms_050 (float64) + - average_response_ms_075 (float64) + - minimum_response_ms_025 (float64) + - minimum_response_ms_050 (float64) + - minimum_response_ms_075 (float64) + - maximum_response_ms_025 (float64) + - maximum_response_ms_050 (float64) + - maximum_response_ms_075 (float64) + +The `status` and `ok` fields are dropped because they are not numeric. Note that the +number of resulting fields scales with the number of `quantiles` specified. + +### Tags + +Tags are passed through to the output by this aggregator. + +### Example Output + +``` +cpu,cpu=cpu-total,host=Hugin usage_user=10.814851731872487,usage_system=2.1679541490155687,usage_irq=1.046598554697342,usage_steal=0,usage_guest_nice=0,usage_idle=85.79616247197244,usage_nice=0,usage_iowait=0,usage_softirq=0.1744330924495688,usage_guest=0 1608288360000000000 +cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_system=2.1601016518428664,usage_iowait=0.02541296060990694,usage_irq=1.0165184243964942,usage_softirq=0.1778907242693666,usage_steal=0,usage_guest_nice=0,usage_user=9.275730622616953,usage_idle=87.34434561626493,usage_nice=0 1608288370000000000 +cpu,cpu=cpu-total,host=Hugin usage_idle=85.78199052131747,usage_nice=0,usage_irq=1.0476428036915637,usage_guest=0,usage_guest_nice=0,usage_system=1.995510102269591,usage_iowait=0,usage_softirq=0.1995510102269662,usage_steal=0,usage_user=10.975305562484735 1608288380000000000 +cpu,cpu=cpu-total,host=Hugin usage_guest_nice_075=0,usage_user_050=10.814851731872487,usage_guest_075=0,usage_steal_025=0,usage_irq_025=1.031558489546918,usage_irq_075=1.0471206791944527,usage_iowait_025=0,usage_guest_050=0,usage_guest_nice_050=0,usage_nice_075=0,usage_iowait_050=0,usage_system_050=2.1601016518428664,usage_irq_050=1.046598554697342,usage_guest_nice_025=0,usage_idle_050=85.79616247197244,usage_softirq_075=0.1887208672481664,usage_steal_075=0,usage_system_025=2.0778058770562287,usage_system_075=2.1640279004292173,usage_softirq_050=0.1778907242693666,usage_nice_050=0,usage_iowait_075=0.01270648030495347,usage_user_075=10.895078647178611,usage_nice_025=0,usage_steal_050=0,usage_user_025=10.04529117724472,usage_idle_025=85.78907649664495,usage_idle_075=86.57025404411868,usage_softirq_025=0.1761619083594677,usage_guest_025=0 1608288390000000000 +``` + +# References +- Dunning & Ertl: "Computing Extremely Accurate Quantiles Using t-Digests", arXiv:1902.04023 (2019) [pdf][tdigest_paper] +- Hyndman & Fan: "Sample Quantiles in Statistical Packages", The American Statistician, vol. 50, pp. 361-365 (1996) [pdf][hyndman_fan] + + +[tdigest_paper]: https://arxiv.org/abs/1902.04023 +[tdigest_lib]: https://github.com/caio/go-tdigest +[hyndman_fan]: http://www.maths.usyd.edu.au/u/UG/SM/STAT3022/r/current/Misc/Sample%20Quantiles%20in%20Statistical%20Packages.pdf diff --git a/plugins/aggregators/quantile/algorithms.go b/plugins/aggregators/quantile/algorithms.go new file mode 100644 index 0000000000000..d2a5ac685397a --- /dev/null +++ b/plugins/aggregators/quantile/algorithms.go @@ -0,0 +1,110 @@ +package quantile + +import ( + "math" + "sort" + + "github.com/caio/go-tdigest" +) + +type algorithm interface { + Add(value float64) error + Quantile(q float64) float64 +} + +func newTDigest(compression float64) (algorithm, error) { + return tdigest.New(tdigest.Compression(compression)) +} + +type exactAlgorithmR7 struct { + xs []float64 + sorted bool +} + +func newExactR7(compression float64) (algorithm, error) { + return &exactAlgorithmR7{xs: make([]float64, 0, 100), sorted: false}, nil +} + +func (e *exactAlgorithmR7) Add(value float64) error { + e.xs = append(e.xs, value) + e.sorted = false + + return nil +} + +func (e *exactAlgorithmR7) Quantile(q float64) float64 { + size := len(e.xs) + + // No information + if len(e.xs) == 0 { + return math.NaN() + } + + // Sort the array if necessary + if !e.sorted { + sort.Float64s(e.xs) + e.sorted = true + } + + // Get the quantile index and the fraction to the neighbor + // Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R7 + // Same as Excel and Numpy. + N := float64(size) + n := q * (N - 1) + i, gamma := math.Modf(n) + j := int(i) + if j < 0 { + return e.xs[0] + } + if j >= size { + return e.xs[size-1] + } + // Linear interpolation + return e.xs[j] + gamma*(e.xs[j+1]-e.xs[j]) +} + +type exactAlgorithmR8 struct { + xs []float64 + sorted bool +} + +func newExactR8(compression float64) (algorithm, error) { + return &exactAlgorithmR8{xs: make([]float64, 0, 100), sorted: false}, nil +} + +func (e *exactAlgorithmR8) Add(value float64) error { + e.xs = append(e.xs, value) + e.sorted = false + + return nil +} + +func (e *exactAlgorithmR8) Quantile(q float64) float64 { + size := len(e.xs) + + // No information + if size == 0 { + return math.NaN() + } + + // Sort the array if necessary + if !e.sorted { + sort.Float64s(e.xs) + e.sorted = true + } + + // Get the quantile index and the fraction to the neighbor + // Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R8 + N := float64(size) + n := q*(N+1.0/3.0) - (2.0 / 3.0) // Indices are zero-base here but one-based in the paper + i, gamma := math.Modf(n) + j := int(i) + if j < 0 { + return e.xs[0] + } + if j >= size { + return e.xs[size-1] + } + // Linear interpolation + return e.xs[j] + gamma*(e.xs[j+1]-e.xs[j]) +} diff --git a/plugins/aggregators/quantile/quantile.go b/plugins/aggregators/quantile/quantile.go new file mode 100644 index 0000000000000..cb58ef2e826d2 --- /dev/null +++ b/plugins/aggregators/quantile/quantile.go @@ -0,0 +1,165 @@ +package quantile + +import ( + "fmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +type Quantile struct { + Quantiles []float64 `toml:"quantiles"` + Compression float64 `toml:"compression"` + AlgorithmType string `toml:"algorithm"` + + newAlgorithm newAlgorithmFunc + + cache map[uint64]aggregate + suffixes []string +} + +type aggregate struct { + name string + fields map[string]algorithm + tags map[string]string +} + +type newAlgorithmFunc func(compression float64) (algorithm, error) + +var sampleConfig = ` + ## General Aggregator Arguments: + ## The period on which to flush & clear the aggregator. + period = "30s" + + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + + ## Quantiles to output in the range [0,1] + # quantiles = [0.25, 0.5, 0.75] + + ## Type of aggregation algorithm + ## Supported are: + ## "t-digest" -- approximation using centroids, can cope with large number of samples + ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) + ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) + ## NOTE: Do not use "exact" algorithms with large number of samples + ## to not impair performance or memory consumption! + # algorithm = "t-digest" + + ## Compression for approximation (t-digest). The value needs to be + ## greater or equal to 1.0. Smaller values will result in more + ## performance but less accuracy. + # compression = 100.0 +` + +func (q *Quantile) SampleConfig() string { + return sampleConfig +} + +func (q *Quantile) Description() string { + return "Keep the aggregate quantiles of each metric passing through." +} + +func (q *Quantile) Add(in telegraf.Metric) { + id := in.HashID() + if cached, ok := q.cache[id]; ok { + fields := in.Fields() + for k, algo := range cached.fields { + if field, ok := fields[k]; ok { + if v, isconvertible := convert(field); isconvertible { + algo.Add(v) + } + } + } + return + } + + // New metric, setup cache and init algorithm + a := aggregate{ + name: in.Name(), + tags: in.Tags(), + fields: make(map[string]algorithm), + } + for k, field := range in.Fields() { + if v, isconvertible := convert(field); isconvertible { + // This should never error out as we tested it in Init() + algo, _ := q.newAlgorithm(q.Compression) + algo.Add(v) + a.fields[k] = algo + } + } + q.cache[id] = a +} + +func (q *Quantile) Push(acc telegraf.Accumulator) { + for _, aggregate := range q.cache { + fields := map[string]interface{}{} + for k, algo := range aggregate.fields { + for i, qtl := range q.Quantiles { + fields[k+q.suffixes[i]] = algo.Quantile(qtl) + } + } + acc.AddFields(aggregate.name, fields, aggregate.tags) + } +} + +func (q *Quantile) Reset() { + q.cache = make(map[uint64]aggregate) +} + +func convert(in interface{}) (float64, bool) { + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + case uint64: + return float64(v), true + default: + return 0, false + } +} + +func (q *Quantile) Init() error { + switch q.AlgorithmType { + case "t-digest", "": + q.newAlgorithm = newTDigest + case "exact R7": + q.newAlgorithm = newExactR7 + case "exact R8": + q.newAlgorithm = newExactR8 + default: + return fmt.Errorf("unknown algorithm type %q", q.AlgorithmType) + } + if _, err := q.newAlgorithm(q.Compression); err != nil { + return fmt.Errorf("cannot create %q algorithm: %v", q.AlgorithmType, err) + } + + if len(q.Quantiles) == 0 { + q.Quantiles = []float64{0.25, 0.5, 0.75} + } + + duplicates := make(map[float64]bool) + q.suffixes = make([]string, len(q.Quantiles)) + for i, qtl := range q.Quantiles { + if qtl < 0.0 || qtl > 1.0 { + return fmt.Errorf("quantile %v out of range", qtl) + } + if _, found := duplicates[qtl]; found { + return fmt.Errorf("duplicate quantile %v", qtl) + } + duplicates[qtl] = true + q.suffixes[i] = fmt.Sprintf("_%03d", int(qtl*100.0)) + } + + q.Reset() + + return nil +} + +func init() { + aggregators.Add("quantile", func() telegraf.Aggregator { + return &Quantile{Compression: 100} + }) +} diff --git a/plugins/aggregators/quantile/quantile_test.go b/plugins/aggregators/quantile/quantile_test.go new file mode 100644 index 0000000000000..4095f0c5837be --- /dev/null +++ b/plugins/aggregators/quantile/quantile_test.go @@ -0,0 +1,635 @@ +package quantile + +import ( + "math/rand" + "testing" + "time" + + "github.com/google/go-cmp/cmp/cmpopts" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestConfigInvalidAlgorithm(t *testing.T) { + q := Quantile{AlgorithmType: "a strange one"} + err := q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "unknown algorithm type") +} + +func TestConfigInvalidCompression(t *testing.T) { + q := Quantile{Compression: 0, AlgorithmType: "t-digest"} + err := q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "cannot create \"t-digest\" algorithm") +} + +func TestConfigInvalidQuantiles(t *testing.T) { + q := Quantile{Compression: 100, Quantiles: []float64{-0.5}} + err := q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "quantile -0.5 out of range") + + q = Quantile{Compression: 100, Quantiles: []float64{1.5}} + err = q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "quantile 1.5 out of range") + + q = Quantile{Compression: 100, Quantiles: []float64{0.1, 0.2, 0.3, 0.1}} + err = q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "duplicate quantile") +} + +func TestSingleMetricTDigest(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{Compression: 100} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a_025": 24.75, + "a_050": 49.50, + "a_075": 74.25, + "b_025": 24.75, + "b_050": 49.50, + "b_075": 74.25, + "c_025": 24.75, + "c_050": 49.50, + "c_075": 74.25, + "d_025": 24.75, + "d_050": 49.50, + "d_075": 74.25, + "e_025": 24.75, + "e_050": 49.50, + "e_075": 74.25, + "f_025": 24.75, + "f_050": 49.50, + "f_075": 74.25, + "g_025": 0.2475, + "g_050": 0.4950, + "g_075": 0.7425, + }, + time.Now(), + ), + } + + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int32(i), + "b": int64(i), + "c": uint32(i), + "d": uint64(i), + "e": float32(i), + "f": float64(i), + "g": float64(i) / 100.0, + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon) +} + +func TestMultipleMetricsTDigest(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{Compression: 100} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{ + "a_025": 24.75, "a_050": 49.50, "a_075": 74.25, + "b_025": 24.75, "b_050": 49.50, "b_075": 74.25, + }, + time.Now(), + ), + testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{ + "a_025": 49.50, "a_050": 99.00, "a_075": 148.50, + "b_025": 49.50, "b_050": 99.00, "b_075": 148.50, + }, + time.Now(), + ), + } + + metricsA := make([]telegraf.Metric, 100) + metricsB := make([]telegraf.Metric, 100) + for i := range metricsA { + metricsA[i] = testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{"a": int64(i), "b": float64(i), "x1": "string", "x2": true}, + time.Now(), + ) + } + for i := range metricsB { + metricsB[i] = testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{"a": int64(2 * i), "b": float64(2 * i), "x1": "string", "x2": true}, + time.Now(), + ) + } + + for _, m := range metricsA { + q.Add(m) + } + for _, m := range metricsB { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + sort := testutil.SortMetrics() + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon, sort) +} + +func TestSingleMetricExactR7(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{AlgorithmType: "exact R7"} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a_025": 24.75, + "a_050": 49.50, + "a_075": 74.25, + "b_025": 24.75, + "b_050": 49.50, + "b_075": 74.25, + "c_025": 24.75, + "c_050": 49.50, + "c_075": 74.25, + "d_025": 24.75, + "d_050": 49.50, + "d_075": 74.25, + "e_025": 24.75, + "e_050": 49.50, + "e_075": 74.25, + "f_025": 24.75, + "f_050": 49.50, + "f_075": 74.25, + "g_025": 0.2475, + "g_050": 0.4950, + "g_075": 0.7425, + }, + time.Now(), + ), + } + + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int32(i), + "b": int64(i), + "c": uint32(i), + "d": uint64(i), + "e": float32(i), + "f": float64(i), + "g": float64(i) / 100.0, + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon) +} + +func TestMultipleMetricsExactR7(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{AlgorithmType: "exact R7"} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{ + "a_025": 24.75, "a_050": 49.50, "a_075": 74.25, + "b_025": 24.75, "b_050": 49.50, "b_075": 74.25, + }, + time.Now(), + ), + testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{ + "a_025": 49.50, "a_050": 99.00, "a_075": 148.50, + "b_025": 49.50, "b_050": 99.00, "b_075": 148.50, + }, + time.Now(), + ), + } + + metricsA := make([]telegraf.Metric, 100) + metricsB := make([]telegraf.Metric, 100) + for i := range metricsA { + metricsA[i] = testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{"a": int64(i), "b": float64(i), "x1": "string", "x2": true}, + time.Now(), + ) + } + for i := range metricsB { + metricsB[i] = testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{"a": int64(2 * i), "b": float64(2 * i), "x1": "string", "x2": true}, + time.Now(), + ) + } + + for _, m := range metricsA { + q.Add(m) + } + for _, m := range metricsB { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + sort := testutil.SortMetrics() + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon, sort) +} + +func TestSingleMetricExactR8(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{AlgorithmType: "exact R8"} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a_025": 24.417, + "a_050": 49.500, + "a_075": 74.583, + "b_025": 24.417, + "b_050": 49.500, + "b_075": 74.583, + "c_025": 24.417, + "c_050": 49.500, + "c_075": 74.583, + "d_025": 24.417, + "d_050": 49.500, + "d_075": 74.583, + "e_025": 24.417, + "e_050": 49.500, + "e_075": 74.583, + "f_025": 24.417, + "f_050": 49.500, + "f_075": 74.583, + "g_025": 0.24417, + "g_050": 0.49500, + "g_075": 0.74583, + }, + time.Now(), + ), + } + + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int32(i), + "b": int64(i), + "c": uint32(i), + "d": uint64(i), + "e": float32(i), + "f": float64(i), + "g": float64(i) / 100.0, + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon) +} + +func TestMultipleMetricsExactR8(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{AlgorithmType: "exact R8"} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{ + "a_025": 24.417, "a_050": 49.500, "a_075": 74.583, + "b_025": 24.417, "b_050": 49.500, "b_075": 74.583, + }, + time.Now(), + ), + testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{ + "a_025": 48.833, "a_050": 99.000, "a_075": 149.167, + "b_025": 48.833, "b_050": 99.000, "b_075": 149.167, + }, + time.Now(), + ), + } + + metricsA := make([]telegraf.Metric, 100) + metricsB := make([]telegraf.Metric, 100) + for i := range metricsA { + metricsA[i] = testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{"a": int64(i), "b": float64(i), "x1": "string", "x2": true}, + time.Now(), + ) + } + for i := range metricsB { + metricsB[i] = testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{"a": int64(2 * i), "b": float64(2 * i), "x1": "string", "x2": true}, + time.Now(), + ) + } + + for _, m := range metricsA { + q.Add(m) + } + for _, m := range metricsB { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + sort := testutil.SortMetrics() + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon, sort) +} + +func BenchmarkDefaultTDigest(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + q := Quantile{Compression: 100} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultTDigest100Q(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + quantiles := make([]float64, 100) + for i := range quantiles { + quantiles[i] = 0.01 * float64(i) + } + + q := Quantile{Compression: 100, Quantiles: quantiles} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultExactR7(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + q := Quantile{AlgorithmType: "exact R7"} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultExactR7100Q(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + quantiles := make([]float64, 100) + for i := range quantiles { + quantiles[i] = 0.01 * float64(i) + } + + q := Quantile{AlgorithmType: "exact R7", Quantiles: quantiles} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultExactR8(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + q := Quantile{AlgorithmType: "exact R8"} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultExactR8100Q(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + quantiles := make([]float64, 100) + for i := range quantiles { + quantiles[i] = 0.01 * float64(i) + } + + q := Quantile{AlgorithmType: "exact R8", Quantiles: quantiles} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} From 660eb5b63c31592d5376d28d0ba0b2223c41855a Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 18 Feb 2021 10:56:10 -0600 Subject: [PATCH 230/761] Run revive linter in CI (#8798) * Run revive linter in CI Just output the results, don't fail on it Removed the rule.exported rule * Move revive install to CI * new line * Use golangci-lint * Get v1.37 * increase timeout by a minute * try five minutes * newline missing * Update config --- .circleci/config.yml | 10 ++++++++++ .golangci.yml | 44 ++++++++++++++++++++++++++++++++++++++++++++ Makefile | 10 ++++++++++ 3 files changed, 64 insertions(+) create mode 100644 .golangci.yml diff --git a/.circleci/config.yml b/.circleci/config.yml index 5d4f78d5c5757..97f9319a64f79 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -71,6 +71,14 @@ commands: paths: - 'dist' jobs: + linter: + executor: go-1_15 + steps: + - checkout + - restore_cache: + key: go-mod-v1-{{ checksum "go.sum" }} + - run: wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.37.0 + - run: make lint deps: executor: go-1_15 steps: @@ -178,6 +186,7 @@ workflows: version: 2 check: jobs: + - 'linter' - 'macdeps': filters: tags: @@ -246,6 +255,7 @@ workflows: - 'release' nightly: jobs: + - 'linter' - 'deps' - 'macdeps' - 'test-go-1_14': diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000000000..fa95d656fd874 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,44 @@ +linters: + enable: + - revive + +linters-settings: + revive: + rules: + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: if-return + - name: increment-decrement + - name: var-naming + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unused-parameter + - name: unreachable-code + - name: redefines-builtin-id + +run: + skip-dirs: + - scripts + - docs + - etc + skip-files: + - plugins/parsers/influx/machine.go* + +issues: + exclude: + - don't use an underscore in package name + - exported.*should have comment.*or be unexported diff --git a/Makefile b/Makefile index 06b0cd7b456bf..3c6e5f0291f8a 100644 --- a/Makefile +++ b/Makefile @@ -75,6 +75,7 @@ help: @echo ' test - run short unit tests' @echo ' fmt - format source files' @echo ' tidy - tidy go modules' + @echo ' lint - run linter' @echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md' @echo ' clean - delete build artifacts' @echo '' @@ -130,6 +131,15 @@ vet: exit 1; \ fi +.PHONY: lint +lint: +ifeq (, $(shell which golangci-lint)) + $(info golangci-lint can't be found, please install it: https://golangci-lint.run/usage/install/) + exit 1 +endif + + golangci-lint run --timeout 5m0s --issues-exit-code 0 + .PHONY: tidy tidy: go mod verify From 4d61935dec38a4bd23bac81b3dd30fc0f37eeead Mon Sep 17 00:00:00 2001 From: reimda Date: Fri, 19 Feb 2021 11:31:25 -0700 Subject: [PATCH 231/761] Fix mutex locking around ifname cache (#8873) --- plugins/processors/ifname/ifname.go | 43 +++++++++++++++-------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index fb16a78dda2a5..c7f6e2a74d825 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -100,8 +100,8 @@ type IfName struct { ifTable *si.Table `toml:"-"` ifXTable *si.Table `toml:"-"` - rwLock sync.RWMutex `toml:"-"` - cache *TTLCache `toml:"-"` + lock sync.Mutex `toml:"-"` + cache *TTLCache `toml:"-"` parallel parallel.Parallel `toml:"-"` acc telegraf.Accumulator `toml:"-"` @@ -187,9 +187,9 @@ func (d *IfName) addTag(metric telegraf.Metric) error { } func (d *IfName) invalidate(agent string) { - d.rwLock.RLock() + d.lock.Lock() d.cache.Delete(agent) - d.rwLock.RUnlock() + d.lock.Unlock() } func (d *IfName) Start(acc telegraf.Accumulator) error { @@ -241,31 +241,34 @@ func (d *IfName) Stop() error { func (d *IfName) getMap(agent string) (entry nameMap, age time.Duration, err error) { var sig chan struct{} + d.lock.Lock() + // Check cache - d.rwLock.RLock() m, ok, age := d.cache.Get(agent) - d.rwLock.RUnlock() if ok { + d.lock.Unlock() return m, age, nil } - // Is this the first request for this agent? - d.rwLock.Lock() + // cache miss. Is this the first request for this agent? sig, found := d.sigs[agent] if !found { + // This is the first request. Make signal for subsequent requests to wait on s := make(chan struct{}) d.sigs[agent] = s sig = s } - d.rwLock.Unlock() + + d.lock.Unlock() if found { // This is not the first request. Wait for first to finish. <-sig + // Check cache again - d.rwLock.RLock() + d.lock.Lock() m, ok, age := d.cache.Get(agent) - d.rwLock.RUnlock() + d.lock.Unlock() if ok { return m, age, nil } @@ -273,28 +276,26 @@ func (d *IfName) getMap(agent string) (entry nameMap, age time.Duration, err err } // The cache missed and this is the first request for this - // agent. - - // Make the SNMP request + // agent. Make the SNMP request m, err = d.getMapRemote(agent) + + d.lock.Lock() if err != nil { - //failure. signal without saving to cache - d.rwLock.Lock() + //snmp failure. signal without saving to cache close(sig) delete(d.sigs, agent) - d.rwLock.Unlock() + d.lock.Unlock() return nil, 0, fmt.Errorf("getting remote table: %w", err) } - // Cache it, then signal any other waiting requests for this agent - // and clean up - d.rwLock.Lock() + // snmp success. Cache response, then signal any other waiting + // requests for this agent and clean up d.cache.Put(agent, m) close(sig) delete(d.sigs, agent) - d.rwLock.Unlock() + d.lock.Unlock() return m, 0, nil } From 2372db9028dbe6c88b45254ec31fa3e85d9d5533 Mon Sep 17 00:00:00 2001 From: Avinash Nigam <56562150+avinash-nigam@users.noreply.github.com> Date: Fri, 19 Feb 2021 12:59:26 -0800 Subject: [PATCH 232/761] SQL Perfmon counters - synced queries from v2 to all db types (#8393) --- plugins/inputs/sqlserver/azuresqlqueries.go | 14 ++++++++++++++ plugins/inputs/sqlserver/sqlserverqueries.go | 18 ++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go index 2358a12c39614..03da02e879642 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqlqueries.go @@ -456,13 +456,17 @@ WITH PerfCounters AS ( ,'Readahead pages/sec' ,'Lazy writes/sec' ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' ,'Page life expectancy' ,'Log File(s) Size (KB)' ,'Log File(s) Used Size (KB)' ,'Data File(s) Size (KB)' ,'Transactions/sec' ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' ,'Active Temp Tables' + ,'Logical Connections' ,'Temp Tables Creation Rate' ,'Temp Tables For Destruction' ,'Free Space in tempdb (KB)' @@ -519,6 +523,9 @@ WITH PerfCounters AS ( ,'Mirrored Write Transactions/sec' ,'Group Commit Time' ,'Group Commits/Sec' + ,'Distributed Query' + ,'DTC calls' + ,'Query Store CPU usage' ) OR ( spi.[object_name] LIKE '%User Settable%' OR spi.[object_name] LIKE '%SQL Errors%' @@ -988,13 +995,17 @@ WITH PerfCounters AS ( ,'Readahead pages/sec' ,'Lazy writes/sec' ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' ,'Page life expectancy' ,'Log File(s) Size (KB)' ,'Log File(s) Used Size (KB)' ,'Data File(s) Size (KB)' ,'Transactions/sec' ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' ,'Active Temp Tables' + ,'Logical Connections' ,'Temp Tables Creation Rate' ,'Temp Tables For Destruction' ,'Free Space in tempdb (KB)' @@ -1051,6 +1062,9 @@ WITH PerfCounters AS ( ,'Mirrored Write Transactions/sec' ,'Group Commit Time' ,'Group Commits/Sec' + ,'Distributed Query' + ,'DTC calls' + ,'Query Store CPU usage' ) OR ( spi.[object_name] LIKE '%User Settable%' OR spi.[object_name] LIKE '%SQL Errors%' diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index 2af8e1eb775cf..a2ef3ca3ca123 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -282,6 +282,17 @@ FROM sys.dm_os_schedulers AS s' EXEC sp_executesql @SqlStatement ` +/* +This string defines a SQL statements to retrieve Performance Counters as documented here - + SQL Server Performance Objects - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects?view=sql-server-ver15#SQLServerPOs +Some of the specific objects used are - + MSSQL$*:Access Methods - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object?view=sql-server-ver15 + MSSQL$*:Buffer Manager - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object?view=sql-server-ver15 + MSSQL$*:Databases - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-ver15 + MSSQL$*:General Statistics - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object?view=sql-server-ver15 + MSSQL$*:Exec Statistics - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-execstatistics-object?view=sql-server-ver15 + SQLServer:Query Store - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-query-store-object?view=sql-server-ver15 +*/ const sqlServerPerformanceCounters string = ` SET DEADLOCK_PRIORITY -10; IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ @@ -331,13 +342,17 @@ SELECT DISTINCT ,'Readahead pages/sec' ,'Lazy writes/sec' ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' ,'Page life expectancy' ,'Log File(s) Size (KB)' ,'Log File(s) Used Size (KB)' ,'Data File(s) Size (KB)' ,'Transactions/sec' ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' ,'Active Temp Tables' + ,'Logical Connections' ,'Temp Tables Creation Rate' ,'Temp Tables For Destruction' ,'Free Space in tempdb (KB)' @@ -394,6 +409,9 @@ SELECT DISTINCT ,'Mirrored Write Transactions/sec' ,'Group Commit Time' ,'Group Commits/Sec' + ,'Distributed Query' + ,'DTC calls' + ,'Query Store CPU usage' ) OR ( spi.[object_name] LIKE '%User Settable%' OR spi.[object_name] LIKE '%SQL Errors%' From 58dd50cb964f049295786b7f5dc342d1ad02d2f4 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Mon, 22 Feb 2021 11:28:22 -0800 Subject: [PATCH 233/761] add SMCIPMITool input to external plugin list (#8897) --- EXTERNAL_PLUGINS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index 0a165a412ec07..8f361c62915f9 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -10,6 +10,7 @@ Pull requests welcome. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. - [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open Hardware Monitor](http://openhardwaremonitor.org) - [rand](https://github.com/ssoroka/rand) - Generate random numbers +- [SMCIPMITool](https://github.com/jhpope/smc_ipmi) - Python script to parse the output of [SMCIPMITool](https://www.supermicro.com/en/solutions/management-software/ipmi-utilities) into [InfluxDB line protocol](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/). - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. - [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts - [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels From 47e12d19811b36bbc7ed31aec70694356f9e4a1c Mon Sep 17 00:00:00 2001 From: Thomas Casteleyn Date: Wed, 24 Feb 2021 19:44:53 +0100 Subject: [PATCH 234/761] Support more snmpv3 authentication protocols (#8850) --- internal/snmp/wrapper.go | 8 ++++++++ plugins/inputs/snmp/README.md | 2 +- plugins/inputs/snmp/snmp.go | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/internal/snmp/wrapper.go b/internal/snmp/wrapper.go index 92c3442bb0189..9825d5677ff73 100644 --- a/internal/snmp/wrapper.go +++ b/internal/snmp/wrapper.go @@ -112,6 +112,14 @@ func NewWrapper(s ClientConfig) (GosnmpWrapper, error) { sp.AuthenticationProtocol = gosnmp.MD5 case "sha": sp.AuthenticationProtocol = gosnmp.SHA + case "sha224": + sp.AuthenticationProtocol = gosnmp.SHA224 + case "sha256": + sp.AuthenticationProtocol = gosnmp.SHA256 + case "sha384": + sp.AuthenticationProtocol = gosnmp.SHA384 + case "sha512": + sp.AuthenticationProtocol = gosnmp.SHA512 case "": sp.AuthenticationProtocol = gosnmp.NoAuth default: diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index fa96150b94b4c..0d52881a72f04 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -53,7 +53,7 @@ information. ## ## Security Name. # sec_name = "myuser" - ## Authentication protocol; one of "MD5", "SHA", or "". + ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". # auth_protocol = "MD5" ## Authentication password. # auth_password = "pass" diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index ee642a50ec380..df23eeeb7300f 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -56,7 +56,7 @@ const sampleConfig = ` ## ## Security Name. # sec_name = "myuser" - ## Authentication protocol; one of "MD5", "SHA", or "". + ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". # auth_protocol = "MD5" ## Authentication password. # auth_password = "pass" From 2cf4b751be06029f557914b2adb9ffa3d4b0ee6f Mon Sep 17 00:00:00 2001 From: Ben Keith Date: Thu, 25 Feb 2021 15:30:01 -0500 Subject: [PATCH 235/761] SignalFx Output (#6714) * [outputs.signalfx] Add output plugin for SignalFX This output plugin converts the `telegraf.Metrics` into signalfx `datapoint`s and then transmits them to the ingest servers using signalfx golang client lib. As of this commit, the client lib is allowed to pick sane defaults and none of its fields are overridable via telegraf config. This can be changed in the future if needed. The unit tests only test for conversion of `telegraf.Metric`s to the `datapoint` structs. All code that executes after that is assumed to be tested in the signalfx client lib itself (and not worth writing end-to-end tests for). Further enhancements: - Custom ingest urls - Better batching - More extensive tests - Support for events, sent by whitelist only Co-authored-by: Ben Keith Co-authored-by: Akshay Co-authored-by: Jay Camp --- README.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 8 + go.mod | 10 +- go.sum | 92 ++- plugins/outputs/all/all.go | 1 + plugins/outputs/signalfx/README.md | 20 + plugins/outputs/signalfx/signalfx.go | 260 ++++++++ plugins/outputs/signalfx/signalfx_test.go | 703 ++++++++++++++++++++++ 8 files changed, 1076 insertions(+), 19 deletions(-) create mode 100644 plugins/outputs/signalfx/README.md create mode 100644 plugins/outputs/signalfx/signalfx.go create mode 100644 plugins/outputs/signalfx/signalfx_test.go diff --git a/README.md b/README.md index 726e6e74fd289..9c2c65cd9f244 100644 --- a/README.md +++ b/README.md @@ -442,6 +442,7 @@ For documentation on the latest development code see the [documentation index][d * [prometheus](./plugins/outputs/prometheus_client) * [riemann](./plugins/outputs/riemann) * [riemann_legacy](./plugins/outputs/riemann_legacy) +* [signalfx](./plugins/outputs/signalfx) * [socket_writer](./plugins/outputs/socket_writer) * [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring) * [syslog](./plugins/outputs/syslog) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index f68d85e7bed54..ad499955067b4 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -66,10 +66,12 @@ following works: - github.com/go-ping/ping [MIT License](https://github.com/go-ping/ping/blob/master/LICENSE) - github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE) - github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) +- github.com/go-stack/stack [MIT License](https://github.com/go-stack/stack/blob/master/LICENSE.md) - github.com/goburrow/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/goburrow/modbus/blob/master/LICENSE) - github.com/goburrow/serial [MIT License](https://github.com/goburrow/serial/LICENSE) - github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE) - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) +- github.com/gogo/googleapis [Apache License 2.0](https://github.com/gogo/googleapis/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) - github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE) - github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE) @@ -96,6 +98,7 @@ following works: - github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE) - github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE) - github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE) +- github.com/jaegertracing/jaeger [Apache License 2.0](https://github.com/jaegertracing/jaeger/blob/master/LICENSE) - github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE) - github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) - github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE) @@ -127,6 +130,7 @@ following works: - github.com/openconfig/gnmi [Apache License 2.0](https://github.com/openconfig/gnmi/blob/master/LICENSE) - github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE) - github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE) +- github.com/opentracing/opentracing-go [Apache License 2.0](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) - github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE) - github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE) - github.com/pkg/errors [BSD 2-Clause "Simplified" License](https://github.com/pkg/errors/blob/master/LICENSE) @@ -141,6 +145,10 @@ following works: - github.com/safchain/ethtool [Apache License 2.0](https://github.com/safchain/ethtool/blob/master/LICENSE) - github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) - github.com/shirou/gopsutil [BSD 3-Clause Clear License](https://github.com/shirou/gopsutil/blob/master/LICENSE) +- github.com/signalfx/com_signalfx_metrics_protobuf [Apache License 2.0](https://github.com/signalfx/com_signalfx_metrics_protobuf/blob/master/LICENSE) +- github.com/signalfx/gohistogram [MIT License](https://github.com/signalfx/gohistogram/blob/master/LICENSE) +- github.com/signalfx/golib [Apache License 2.0](https://github.com/signalfx/golib/blob/master/LICENSE) +- github.com/signalfx/sapm-proto [Apache License 2.0](https://github.com/signalfx/sapm-proto/blob/master/LICENSE) - github.com/sirupsen/logrus [MIT License](https://github.com/sirupsen/logrus/blob/master/LICENSE) - github.com/soniah/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/soniah/gosnmp/blob/master/LICENSE) - github.com/streadway/amqp [BSD 2-Clause "Simplified" License](https://github.com/streadway/amqp/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 816ace7e9ffdd..cc85b7cf41e12 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,6 @@ require ( github.com/Microsoft/ApplicationInsights-Go v0.4.2 github.com/Microsoft/go-winio v0.4.9 // indirect github.com/Shopify/sarama v1.27.2 - github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect github.com/aerospike/aerospike-client-go v1.27.0 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 @@ -56,7 +55,6 @@ require ( github.com/ericchiang/k8s v1.2.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.4.0 - github.com/go-ole/go-ole v1.2.1 // indirect github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c github.com/go-redis/redis v6.15.9+incompatible github.com/go-sql-driver/mysql v1.5.0 @@ -64,7 +62,7 @@ require ( github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v2.1.0+incompatible - github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d + github.com/gogo/protobuf v1.3.1 github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/protobuf v1.3.5 github.com/golang/snappy v0.0.1 @@ -93,7 +91,6 @@ require ( github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee github.com/kylelemons/godebug v1.1.0 // indirect github.com/lib/pq v1.3.0 // indirect - github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b github.com/miekg/dns v1.0.14 @@ -109,7 +106,6 @@ require ( github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect - github.com/opentracing/opentracing-go v1.0.2 // indirect github.com/openzipkin/zipkin-go-opentracing v0.3.4 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.5.1 @@ -119,10 +115,10 @@ require ( github.com/prometheus/prometheus v2.5.0+incompatible github.com/riemann/riemann-go-client v0.5.0 github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 - github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/shirou/gopsutil v2.20.9+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect + github.com/signalfx/golib/v3 v3.3.0 github.com/sirupsen/logrus v1.4.2 github.com/soniah/gosnmp v1.25.0 github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 @@ -155,7 +151,7 @@ require ( gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/ldap.v3 v3.1.0 - gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce + gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 gopkg.in/olivere/elastic.v5 v5.0.70 gopkg.in/yaml.v2 v2.3.0 gotest.tools v2.2.0+incompatible diff --git a/go.sum b/go.sum index 806e6e6cc185d..07d28e132ffe4 100644 --- a/go.sum +++ b/go.sum @@ -90,8 +90,8 @@ github.com/Shopify/sarama v1.27.2 h1:1EyY1dsxNDUQEv0O/4TsjosHI2CgB1uo9H/v56xzTxc github.com/Shopify/sarama v1.27.2/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE= github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -146,6 +146,7 @@ github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkN github.com/bmatcuk/doublestar/v3 v3.0.0/go.mod h1:6PcTVMw80pCY1RVuoqu3V++99uQB3vsSYKPTd8AWA0k= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY= @@ -198,6 +199,8 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 h1:KgEcrKF0NWi9GT/OvDp9ioXZIrHRbP8S5o+sot9gznQ= github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= +github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= @@ -217,6 +220,10 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/ericchiang/k8s v1.2.0 h1:vxrMwEzY43oxu8aZyD/7b1s8tsBM+xoUoxjWECWFbPI= github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 h1:fP04zlkPjAGpsduG7xN3rRkxjAqkJaIQnnkNYYw/pAk= +github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4/go.mod h1:SBHk9aNQtiw4R4bEuzHjVmZikkUKCnO1v3lPQ21HZGk= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= @@ -232,13 +239,15 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= @@ -249,6 +258,7 @@ github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGK github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/goburrow/modbus v0.1.0 h1:DejRZY73nEM6+bt5JSP6IsFolJ9dVcqxsYbpLbeW/ro= github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBTlzMcZBg= @@ -260,16 +270,23 @@ github.com/gofrs/uuid v2.1.0+incompatible h1:8oEj3gioPmmDAOLQUZdnW+h4FZu9aSE/SQI github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.3.1 h1:CzMaKrvF6Qa7XtRii064vKBQiyvmY8H8vG1xa1/W1JA= +github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= @@ -291,6 +308,7 @@ github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgj github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/addlicense v0.0.0-20190510175307-22550fa7c1b0/go.mod h1:QtPG26W17m+OIQgE6gQ24gC1M6pUaMBAbFrTIDtwG/E= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= @@ -298,6 +316,8 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= @@ -323,6 +343,7 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= @@ -334,6 +355,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gopcua/opcua v0.1.13 h1:UP746MKRFNbv+CQGfrPwgH7rGxOlSGzVu9ieZdcox4E= github.com/gopcua/opcua v0.1.13/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= @@ -391,6 +414,8 @@ github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGU github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q= github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jaegertracing/jaeger v1.15.1 h1:7QzNAXq+4ko9GtCjozDNAp2uonoABu+B2Rk94hjQcp4= +github.com/jaegertracing/jaeger v1.15.1/go.mod h1:LUWPSnzNPGRubM8pk0inANGitpiMOOxihXx0+53llXI= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -411,6 +436,14 @@ github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea h1:g2k+8WR7cHch4g0tBDhfiEvAp7fXxTNBiD1oC1Oxj3E= +github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI= +github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b h1:Rrp0ByJXEjhREMPGTt3aWYjoIsUGCbt21ekbeJcTWv0= +github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= @@ -447,8 +480,8 @@ github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1: github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 h1:8/+Y8SKf0xCZ8cCTfnrMdY7HNzlEjPAt3bPjalNb6CA= -github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -520,14 +553,15 @@ github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVo github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g= github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -581,18 +615,33 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= -github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75 h1:cA+Ubq9qEVIQhIWvP2kNuSZ2CmnfBJFSRq+kO1pu2cc= +github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v2.20.9+incompatible h1:msXs2frUV+O/JLva9EDLpuJ84PrFsdCTCQex8PUdtkQ= github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.0-20190222193949-1fb69526e884 h1:KgLGEw137KEUtQnWBGzneCetphBj4+kKHRnhpAkXJC0= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.0-20190222193949-1fb69526e884/go.mod h1:muYA2clvwCdj7nzAJ5vJIXYpJsUumhAl4Uu1wUNpWzA= +github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 h1:WsShHmu12ZztYPfh9b+I+VjYD1o8iOHhB67WZCMEEE8= +github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083/go.mod h1:adPDS6s7WaajdFBV9mQ7i0dKfQ8xiDnF9ZNETVPpp7c= +github.com/signalfx/golib/v3 v3.3.0 h1:vSXsAb73bdrlnjk5rnZ7y3t09Qzu9qfBEbXdcyBHsmE= +github.com/signalfx/golib/v3 v3.3.0/go.mod h1:GzjWpV0skAXZn7+u9LnkOkiXAx9KKd5XZcd5r+RoF5o= +github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8KfCSyuwy/VILnROdgCvbQLA5ch0nkbG7lKT0BXw= +github.com/signalfx/sapm-proto v0.4.0 h1:5lQX++6FeIjUZEIcnSgBqhOpmSjMkRBW3y/4ZiKMo5E= +github.com/signalfx/sapm-proto v0.4.0/go.mod h1:x3gtwJ1GRejtkghB4nYpwixh2zqJrLbPU959ZNhM0Fk= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff h1:JcVn27VGCEwd33jyNj+3IqEbOmzAX9f9LILt3SoGPHU= +github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -621,6 +670,7 @@ github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0 github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0= github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= @@ -646,6 +696,8 @@ go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -656,6 +708,7 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -690,6 +743,8 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -700,6 +755,7 @@ golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -723,6 +779,7 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -768,6 +825,7 @@ golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -814,6 +872,7 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -821,8 +880,11 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190906203814-12febf440ab1 h1:w4Q0TX3lC1NfGcWkzt5wG4ee4E5fUAPqh5myV0efeHI= +golang.org/x/tools v0.0.0-20190906203814-12febf440ab1/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -841,6 +903,7 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200317043434-63da46f3035e h1:8ogAbHWoJTPepnVbNRqXLOpzMkl0rtRsM7crbflc4XM= golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -881,6 +944,8 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= @@ -899,6 +964,8 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -941,8 +1008,8 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE= gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE= gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -973,6 +1040,7 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 279bbda3bdd89..b8d64db8f4a04 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -35,6 +35,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" _ "github.com/influxdata/telegraf/plugins/outputs/riemann" _ "github.com/influxdata/telegraf/plugins/outputs/riemann_legacy" + _ "github.com/influxdata/telegraf/plugins/outputs/signalfx" _ "github.com/influxdata/telegraf/plugins/outputs/socket_writer" _ "github.com/influxdata/telegraf/plugins/outputs/stackdriver" _ "github.com/influxdata/telegraf/plugins/outputs/sumologic" diff --git a/plugins/outputs/signalfx/README.md b/plugins/outputs/signalfx/README.md new file mode 100644 index 0000000000000..4736e4bbceb0f --- /dev/null +++ b/plugins/outputs/signalfx/README.md @@ -0,0 +1,20 @@ +# SignalFx Output Plugin + +```toml +[[outputs.signalfx]] + ## SignalFx Org Access Token + access_token = "my-secret-token" + + ## The SignalFx realm that your organization resides in + signalfx_realm = "us9" # Required if ingest_url is not set + + ## You can optionally provide a custom ingest url instead of the + ## signalfx_realm option above if you are using a gateway or proxy + ## instance. This option takes precident over signalfx_realm. + ingest_url = "https://my-custom-ingest/" + + ## Event typed metrics are omitted by default, + ## If you require an event typed metric you must specify the + ## metric name in the following list. + included_event_names = ["plugin.metric_name"] +``` diff --git a/plugins/outputs/signalfx/signalfx.go b/plugins/outputs/signalfx/signalfx.go new file mode 100644 index 0000000000000..87285750735c5 --- /dev/null +++ b/plugins/outputs/signalfx/signalfx.go @@ -0,0 +1,260 @@ +package signalfx + +import ( + "context" + "errors" + "fmt" + "strings" + + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/signalfx/golib/v3/datapoint" + "github.com/signalfx/golib/v3/datapoint/dpsink" + "github.com/signalfx/golib/v3/event" + "github.com/signalfx/golib/v3/sfxclient" +) + +//init initializes the plugin context +func init() { + outputs.Add("signalfx", func() telegraf.Output { + return NewSignalFx() + }) +} + +// SignalFx plugin context +type SignalFx struct { + AccessToken string `toml:"access_token"` + SignalFxRealm string `toml:"signalfx_realm"` + IngestURL string `toml:"ingest_url"` + IncludedEventNames []string `toml:"included_event_names"` + + Log telegraf.Logger `toml:"-"` + + includedEventSet map[string]bool + client dpsink.Sink + + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup +} + +var sampleConfig = ` + ## SignalFx Org Access Token + access_token = "my-secret-token" + + ## The SignalFx realm that your organization resides in + signalfx_realm = "us9" # Required if ingest_url is not set + + ## You can optionally provide a custom ingest url instead of the + ## signalfx_realm option above if you are using a gateway or proxy + ## instance. This option takes precident over signalfx_realm. + ingest_url = "https://my-custom-ingest/" + + ## Event typed metrics are omitted by default, + ## If you require an event typed metric you must specify the + ## metric name in the following list. + included_event_names = ["plugin.metric_name"] +` + +// GetMetricType returns the equivalent telegraf ValueType for a signalfx metric type +func GetMetricType(mtype telegraf.ValueType) (metricType datapoint.MetricType) { + switch mtype { + case telegraf.Counter: + metricType = datapoint.Counter + case telegraf.Gauge: + metricType = datapoint.Gauge + case telegraf.Summary: + metricType = datapoint.Gauge + case telegraf.Histogram: + metricType = datapoint.Gauge + case telegraf.Untyped: + metricType = datapoint.Gauge + default: + metricType = datapoint.Gauge + } + return metricType +} + +// NewSignalFx - returns a new context for the SignalFx output plugin +func NewSignalFx() *SignalFx { + ctx, cancel := context.WithCancel(context.Background()) + return &SignalFx{ + AccessToken: "", + SignalFxRealm: "", + IngestURL: "", + IncludedEventNames: []string{""}, + ctx: ctx, + cancel: cancel, + client: sfxclient.NewHTTPSink(), + } +} + +// Description returns a description for the plugin +func (s *SignalFx) Description() string { + return "Send metrics and events to SignalFx" +} + +// SampleConfig returns the sample configuration for the plugin +func (s *SignalFx) SampleConfig() string { + return sampleConfig +} + +// Connect establishes a connection to SignalFx +func (s *SignalFx) Connect() error { + client := s.client.(*sfxclient.HTTPSink) + client.AuthToken = s.AccessToken + + if s.IngestURL != "" { + client.DatapointEndpoint = datapointEndpointForIngestURL(s.IngestURL) + client.EventEndpoint = eventEndpointForIngestURL(s.IngestURL) + } else if s.SignalFxRealm != "" { + client.DatapointEndpoint = datapointEndpointForRealm(s.SignalFxRealm) + client.EventEndpoint = eventEndpointForRealm(s.SignalFxRealm) + } else { + return errors.New("signalfx_realm or ingest_url must be configured") + } + + return nil +} + +// Close closes any connections to SignalFx +func (s *SignalFx) Close() error { + s.cancel() + s.client.(*sfxclient.HTTPSink).Client.CloseIdleConnections() + return nil +} + +func (s *SignalFx) ConvertToSignalFx(metrics []telegraf.Metric) ([]*datapoint.Datapoint, []*event.Event) { + var dps []*datapoint.Datapoint + var events []*event.Event + + for _, metric := range metrics { + s.Log.Debugf("Processing the following measurement: %v", metric) + var timestamp = metric.Time() + var metricType datapoint.MetricType + + metricType = GetMetricType(metric.Type()) + + for field, val := range metric.Fields() { + // Copy the metric tags because they are meant to be treated as + // immutable + var metricDims = metric.Tags() + + // Generate the metric name + metricName := getMetricName(metric.Name(), field) + + // Get the metric value as a datapoint value + if metricValue, err := datapoint.CastMetricValueWithBool(val); err == nil { + var dp = datapoint.New(metricName, + metricDims, + metricValue.(datapoint.Value), + metricType, + timestamp) + + s.Log.Debugf("Datapoint: %v", dp.String()) + + dps = append(dps, dp) + } else { + // Skip if it's not an explicitly included event + if !s.isEventIncluded(metricName) { + continue + } + + // We've already type checked field, so set property with value + metricProps := map[string]interface{}{"message": val} + var ev = event.NewWithProperties(metricName, + event.AGENT, + metricDims, + metricProps, + timestamp) + + s.Log.Debugf("Event: %v", ev.String()) + + events = append(events, ev) + } + } + } + + return dps, events +} + +// Write call back for writing metrics +func (s *SignalFx) Write(metrics []telegraf.Metric) error { + dps, events := s.ConvertToSignalFx(metrics) + + if len(dps) > 0 { + err := s.client.AddDatapoints(s.ctx, dps) + if err != nil { + return err + } + } + + if len(events) > 0 { + if err := s.client.AddEvents(s.ctx, events); err != nil { + // If events error out but we successfully sent some datapoints, + // don't return an error so that it won't ever retry -- that way we + // don't send the same datapoints twice. + if len(dps) == 0 { + return err + } + s.Log.Errorf("Failed to send SignalFx event: %v", err) + } + } + + return nil +} + +// isEventIncluded - checks whether a metric name for an event was put on the whitelist +func (s *SignalFx) isEventIncluded(name string) bool { + if s.includedEventSet == nil { + s.includedEventSet = make(map[string]bool, len(s.includedEventSet)) + for _, include := range s.IncludedEventNames { + s.includedEventSet[include] = true + } + } + return s.includedEventSet[name] +} + +// getMetricName combines telegraf fields and tags into a full metric name +func getMetricName(metric string, field string) string { + name := metric + + // Include field in metric name when it adds to the metric name + if field != "value" { + name = fmt.Sprintf("%s.%s", name, field) + } + + return name +} + +// ingestURLForRealm returns the base ingest URL for a particular SignalFx +// realm +func ingestURLForRealm(realm string) string { + return fmt.Sprintf("https://ingest.%s.signalfx.com", realm) +} + +// datapointEndpointForRealm returns the endpoint to which datapoints should be +// POSTed for a particular realm. +func datapointEndpointForRealm(realm string) string { + return datapointEndpointForIngestURL(ingestURLForRealm(realm)) +} + +// datapointEndpointForRealm returns the endpoint to which datapoints should be +// POSTed for a particular ingest base URL. +func datapointEndpointForIngestURL(ingestURL string) string { + return strings.TrimRight(ingestURL, "/") + "/v2/datapoint" +} + +// eventEndpointForRealm returns the endpoint to which events should be +// POSTed for a particular realm. +func eventEndpointForRealm(realm string) string { + return eventEndpointForIngestURL(ingestURLForRealm(realm)) +} + +// eventEndpointForRealm returns the endpoint to which events should be +// POSTed for a particular ingest base URL. +func eventEndpointForIngestURL(ingestURL string) string { + return strings.TrimRight(ingestURL, "/") + "/v2/event" +} diff --git a/plugins/outputs/signalfx/signalfx_test.go b/plugins/outputs/signalfx/signalfx_test.go new file mode 100644 index 0000000000000..3c04c1ef100e2 --- /dev/null +++ b/plugins/outputs/signalfx/signalfx_test.go @@ -0,0 +1,703 @@ +package signalfx + +import ( + "context" + "errors" + "reflect" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/testutil" + "github.com/signalfx/golib/v3/datapoint" + "github.com/signalfx/golib/v3/event" + "github.com/stretchr/testify/require" +) + +type sink struct { + dps []*datapoint.Datapoint + evs []*event.Event +} + +func (s *sink) AddDatapoints(ctx context.Context, points []*datapoint.Datapoint) error { + s.dps = append(s.dps, points...) + return nil +} +func (s *sink) AddEvents(ctx context.Context, events []*event.Event) error { + s.evs = append(s.evs, events...) + return nil +} + +type errorsink struct { + dps []*datapoint.Datapoint + evs []*event.Event +} + +func (e *errorsink) AddDatapoints(ctx context.Context, points []*datapoint.Datapoint) error { + return errors.New("not sending datapoints") +} +func (e *errorsink) AddEvents(ctx context.Context, events []*event.Event) error { + return errors.New("not sending events") +} +func TestSignalFx_SignalFx(t *testing.T) { + type measurement struct { + name string + tags map[string]string + fields map[string]interface{} + time time.Time + tp telegraf.ValueType + } + type fields struct { + IncludedEvents []string + } + type want struct { + datapoints []*datapoint.Datapoint + events []*event.Event + } + tests := []struct { + name string + fields fields + measurements []*measurement + want want + }{ + { + name: "add datapoints of all types", + fields: fields{}, + measurements: []*measurement{ + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Counter, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Summary, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Histogram, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Untyped, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"myboolmeasurement": true}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"myboolmeasurement": false}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{ + datapoint.New( + "datapoint.mymeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Counter, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.mymeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.mymeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.mymeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.mymeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.mymeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.myboolmeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewIntValue(int64(1)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + datapoint.New( + "datapoint.myboolmeasurement", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewIntValue(int64(0)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + }, + events: []*event.Event{}, + }, + }, + { + name: "add events of all types", + fields: fields{ + IncludedEvents: []string{"event.mymeasurement"}, + }, + measurements: []*measurement{ + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Counter, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Summary, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Histogram, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Untyped, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{ + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + }, + }, + }, + { + name: "exclude events by default", + fields: fields{}, + measurements: []*measurement{ + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"value": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{}, + }, + }, + { + name: "add datapoint with field named value", + fields: fields{}, + measurements: []*measurement{ + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"value": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{ + datapoint.New( + "datapoint", + map[string]string{ + "host": "192.168.0.1", + }, + datapoint.NewFloatValue(float64(3.14)), + datapoint.Gauge, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + }, + events: []*event.Event{}, + }, + }, + { + name: "add event", + fields: fields{ + IncludedEvents: []string{"event.mymeasurement"}, + }, + measurements: []*measurement{ + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Untyped, + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{ + event.NewWithProperties( + "event.mymeasurement", + event.AGENT, + map[string]string{ + "host": "192.168.0.1", + }, + map[string]interface{}{ + "message": "hello world", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)), + }, + }, + }, + { + name: "exclude events that are not explicitly included", + fields: fields{}, + measurements: []*measurement{ + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"value": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{}, + }, + }, + { + name: "malformed metadata event", + fields: fields{}, + measurements: []*measurement{ + { + name: "event", + tags: map[string]string{"host": "192.168.0.1", "sf_metric": "objects.host-meta-data"}, + fields: map[string]interface{}{"value": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := outputs.Outputs["signalfx"]().(*SignalFx) + s.IncludedEventNames = tt.fields.IncludedEvents + s.SignalFxRealm = "test" + s.Log = testutil.Logger{} + + require.Nil(t, s.Connect()) + + s.client = &sink{ + dps: []*datapoint.Datapoint{}, + evs: []*event.Event{}, + } + + measurements := []telegraf.Metric{} + + for _, measurement := range tt.measurements { + m, err := metric.New( + measurement.name, measurement.tags, measurement.fields, measurement.time, measurement.tp, + ) + if err != nil { + t.Errorf("Error creating measurement %v", measurement) + } + measurements = append(measurements, m) + } + + s.Write(measurements) + require.Eventually(t, func() bool { return len(s.client.(*sink).dps) == len(tt.want.datapoints) }, 5*time.Second, 100*time.Millisecond) + require.Eventually(t, func() bool { return len(s.client.(*sink).evs) == len(tt.want.events) }, 5*time.Second, 100*time.Millisecond) + + if !reflect.DeepEqual(s.client.(*sink).dps, tt.want.datapoints) { + t.Errorf("Collected datapoints do not match desired. Collected: %v Desired: %v", s.client.(*sink).dps, tt.want.datapoints) + } + if !reflect.DeepEqual(s.client.(*sink).evs, tt.want.events) { + t.Errorf("Collected events do not match desired. Collected: %v Desired: %v", s.client.(*sink).evs, tt.want.events) + } + }) + } +} + +func TestSignalFx_Errors(t *testing.T) { + type measurement struct { + name string + tags map[string]string + fields map[string]interface{} + time time.Time + tp telegraf.ValueType + } + type fields struct { + IncludedEvents []string + } + type want struct { + datapoints []*datapoint.Datapoint + events []*event.Event + } + tests := []struct { + name string + fields fields + measurements []*measurement + want want + }{ + { + name: "add datapoints of all types", + fields: fields{}, + measurements: []*measurement{ + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Counter, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Summary, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Histogram, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Untyped, + }, + { + name: "datapoint", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": float64(3.14)}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{}, + }, + }, + { + name: "add events of all types", + fields: fields{ + IncludedEvents: []string{"event.mymeasurement"}, + }, + measurements: []*measurement{ + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Counter, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Gauge, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Summary, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Histogram, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + tp: telegraf.Untyped, + }, + { + name: "event", + tags: map[string]string{"host": "192.168.0.1"}, + fields: map[string]interface{}{"mymeasurement": "hello world"}, + time: time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + }, + want: want{ + datapoints: []*datapoint.Datapoint{}, + events: []*event.Event{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := outputs.Outputs["signalfx"]().(*SignalFx) + // constrain the buffer to cover code that emits when batch size is met + s.IncludedEventNames = tt.fields.IncludedEvents + s.SignalFxRealm = "test" + s.Log = testutil.Logger{} + + require.Nil(t, s.Connect()) + + s.client = &errorsink{ + dps: []*datapoint.Datapoint{}, + evs: []*event.Event{}, + } + + for _, measurement := range tt.measurements { + m, err := metric.New( + measurement.name, measurement.tags, measurement.fields, measurement.time, measurement.tp, + ) + if err != nil { + t.Errorf("Error creating measurement %v", measurement) + } + s.Write([]telegraf.Metric{m}) + } + for !(len(s.client.(*errorsink).dps) == len(tt.want.datapoints) && len(s.client.(*errorsink).evs) == len(tt.want.events)) { + time.Sleep(1 * time.Second) + } + if !reflect.DeepEqual(s.client.(*errorsink).dps, tt.want.datapoints) { + t.Errorf("Collected datapoints do not match desired. Collected: %v Desired: %v", s.client.(*errorsink).dps, tt.want.datapoints) + } + if !reflect.DeepEqual(s.client.(*errorsink).evs, tt.want.events) { + t.Errorf("Collected events do not match desired. Collected: %v Desired: %v", s.client.(*errorsink).evs, tt.want.events) + } + }) + } +} + +// this is really just for complete code coverage +func TestSignalFx_Description(t *testing.T) { + tests := []struct { + name string + want string + }{ + { + name: "verify description is correct", + want: "Send metrics and events to SignalFx", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &SignalFx{} + if got := s.Description(); got != tt.want { + t.Errorf("SignalFx.Description() = %v, want %v", got, tt.want) + } + }) + } +} + +// this is also just for complete code coverage +func TestSignalFx_SampleConfig(t *testing.T) { + tests := []struct { + name string + want string + }{ + { + name: "verify sample config is returned", + want: sampleConfig, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &SignalFx{} + if got := s.SampleConfig(); got != tt.want { + t.Errorf("SignalFx.SampleConfig() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetMetricName(t *testing.T) { + type args struct { + metric string + field string + dims map[string]string + } + tests := []struct { + name string + args args + want string + wantsfx bool + }{ + { + name: "fields that equal value should not be append to metricname", + args: args{ + metric: "datapoint", + field: "value", + dims: map[string]string{ + "testDimKey": "testDimVal", + }, + }, + want: "datapoint", + }, + { + name: "fields other than 'value' with out sf_metric dim should return measurement.fieldname as metric name", + args: args{ + metric: "datapoint", + field: "test", + dims: map[string]string{ + "testDimKey": "testDimVal", + }, + }, + want: "datapoint.test", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := getMetricName(tt.args.metric, tt.args.field) + if got != tt.want { + t.Errorf("getMetricName() got = %v, want %v", got, tt.want) + } + }) + } +} From 25aa0eeb2148bafc4bab01d325c6db258f58dc6c Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Thu, 25 Feb 2021 17:17:46 -0500 Subject: [PATCH 236/761] Fix statsd concurrency bug (#8885) * Fix the issue * Remove test I was using for replication * Accidentally removed test. * Add lock only where it is necessary. * eliminate unnecessary space --- plugins/inputs/statsd/statsd.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index e1b6e837c3847..168d4b9cf4309 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -712,6 +712,8 @@ func (s *Statsd) parseStatsdLine(line string) error { // map of tags. // Return values are (, , ) func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { + s.Lock() + defer s.Unlock() tags := make(map[string]string) bucketparts := strings.Split(bucket, ",") From e1a896ca12f6f34ba6c946f93e58f80e68b50cff Mon Sep 17 00:00:00 2001 From: Niels Huylebroeck Date: Fri, 26 Feb 2021 16:42:46 +0100 Subject: [PATCH 237/761] Non systemd support with unittest (#8785) --- plugins/inputs/diskio/diskio_linux.go | 38 +++++++++++++++------- plugins/inputs/diskio/diskio_linux_test.go | 37 +++++++++++++-------- 2 files changed, 50 insertions(+), 25 deletions(-) diff --git a/plugins/inputs/diskio/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go index 59822a2778a9b..bb11429f1d387 100644 --- a/plugins/inputs/diskio/diskio_linux.go +++ b/plugins/inputs/diskio/diskio_linux.go @@ -16,8 +16,6 @@ type diskInfoCache struct { values map[string]string } -var udevPath = "/run/udev/data" - func (d *DiskIO) diskInfo(devName string) (map[string]string, error) { var err error var stat unix.Stat_t @@ -37,9 +35,33 @@ func (d *DiskIO) diskInfo(devName string) (map[string]string, error) { return ic.values, nil } - major := unix.Major(uint64(stat.Rdev)) - minor := unix.Minor(uint64(stat.Rdev)) - udevDataPath := fmt.Sprintf("%s/b%d:%d", udevPath, major, minor) + var udevDataPath string + if ok && len(ic.udevDataPath) > 0 { + // We can reuse the udev data path from a "previous" entry. + // This allows us to also "poison" it during test scenarios + udevDataPath = ic.udevDataPath + } else { + major := unix.Major(uint64(stat.Rdev)) + minor := unix.Minor(uint64(stat.Rdev)) + udevDataPath = fmt.Sprintf("/run/udev/data/b%d:%d", major, minor) + + _, err := os.Stat(udevDataPath) + if err != nil { + // This path failed, try the fallback .udev style (non-systemd) + udevDataPath = fmt.Sprintf("/dev/.udev/db/block:%s", devName) + _, err := os.Stat(udevDataPath) + if err != nil { + // Giving up, cannot retrieve disk info + return nil, err + } + } + } + // Final open of the confirmed (or the previously detected/used) udev file + f, err := os.Open(udevDataPath) + defer f.Close() + if err != nil { + return nil, err + } di := map[string]string{} @@ -49,12 +71,6 @@ func (d *DiskIO) diskInfo(devName string) (map[string]string, error) { values: di, } - f, err := os.Open(udevDataPath) - if err != nil { - return nil, err - } - defer f.Close() - scnr := bufio.NewScanner(f) var devlinks bytes.Buffer for scnr.Scan() { diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index 1cb031bdce553..4d7dc5c821ee5 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -19,19 +19,31 @@ S:foo/bar/devlink1 `) // setupNullDisk sets up fake udev info as if /dev/null were a disk. -func setupNullDisk(t *testing.T) func() error { - td, err := ioutil.TempDir("", ".telegraf.TestDiskInfo") +func setupNullDisk(t *testing.T, s *DiskIO, devName string) func() error { + td, err := ioutil.TempFile("", ".telegraf.DiskInfoTest") require.NoError(t, err) - origUdevPath := udevPath + if s.infoCache == nil { + s.infoCache = make(map[string]diskInfoCache, 0) + } + ic, ok := s.infoCache[devName] + if !ok { + // No previous calls for the device were done, easy to poison the cache + s.infoCache[devName] = diskInfoCache{ + modifiedAt: 0, + udevDataPath: td.Name(), + values: map[string]string{}, + } + } + origUdevPath := ic.udevDataPath cleanFunc := func() error { - udevPath = origUdevPath - return os.RemoveAll(td) + ic.udevDataPath = origUdevPath + return os.Remove(td.Name()) } - udevPath = td - err = ioutil.WriteFile(td+"/b1:3", nullDiskInfo, 0644) // 1:3 is the 'null' device + ic.udevDataPath = td.Name() + _, err = td.Write(nullDiskInfo) if err != nil { cleanFunc() t.Fatal(err) @@ -41,10 +53,9 @@ func setupNullDisk(t *testing.T) func() error { } func TestDiskInfo(t *testing.T) { - clean := setupNullDisk(t) - defer clean() - s := &DiskIO{} + clean := setupNullDisk(t, s, "null") + defer clean() di, err := s.diskInfo("null") require.NoError(t, err) assert.Equal(t, "myval1", di["MY_PARAM_1"]) @@ -67,8 +78,6 @@ func TestDiskInfo(t *testing.T) { // DiskIOStats.diskName isn't a linux specific function, but dependent // functions are a no-op on non-Linux. func TestDiskIOStats_diskName(t *testing.T) { - defer setupNullDisk(t)() - tests := []struct { templates []string expected string @@ -88,6 +97,7 @@ func TestDiskIOStats_diskName(t *testing.T) { s := DiskIO{ NameTemplates: tc.templates, } + defer setupNullDisk(t, &s, "null")() name, _ := s.diskName("null") assert.Equal(t, tc.expected, name, "Templates: %#v", tc.templates) } @@ -96,11 +106,10 @@ func TestDiskIOStats_diskName(t *testing.T) { // DiskIOStats.diskTags isn't a linux specific function, but dependent // functions are a no-op on non-Linux. func TestDiskIOStats_diskTags(t *testing.T) { - defer setupNullDisk(t)() - s := &DiskIO{ DeviceTags: []string{"MY_PARAM_2"}, } + defer setupNullDisk(t, s, "null")() dt := s.diskTags("null") assert.Equal(t, map[string]string{"MY_PARAM_2": "myval2"}, dt) } From 7ed98c7e5c81c067cca98b51c2d891808d58b72b Mon Sep 17 00:00:00 2001 From: Antonio Garcia Date: Fri, 26 Feb 2021 10:45:33 -0600 Subject: [PATCH 238/761] fix: reading multiple holding registers in modbus input plugin (#8628) --- plugins/inputs/modbus/modbus.go | 28 +++++--- plugins/inputs/modbus/modbus_test.go | 97 ++++++++++++++++++++++++++++ 2 files changed, 116 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index eda29095325af..dbd952b524a85 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -217,23 +217,33 @@ func (m *Modbus) InitRegister(fields []fieldContainer, name string) error { sort.Slice(addrs, func(i, j int) bool { return addrs[i] < addrs[j] }) ii := 0 + maxQuantity := 1 var registersRange []registerRange + if name == cDiscreteInputs || name == cCoils { + maxQuantity = 2000 + } else if name == cInputRegisters || name == cHoldingRegisters { + maxQuantity = 125 + } // Get range of consecutive integers // [1, 2, 3, 5, 6, 10, 11, 12, 14] // (1, 3) , (5, 2) , (10, 3), (14 , 1) for range addrs { - if ii < len(addrs) { - start := addrs[ii] - end := start + if ii >= len(addrs) { + break + } + quantity := 1 + start := addrs[ii] + end := start - for ii < len(addrs)-1 && addrs[ii+1]-addrs[ii] == 1 { - end = addrs[ii+1] - ii++ - } + for ii < len(addrs)-1 && addrs[ii+1]-addrs[ii] == 1 && quantity < maxQuantity { + end = addrs[ii+1] ii++ - registersRange = append(registersRange, registerRange{start, end - start + 1}) + quantity++ } + ii++ + + registersRange = append(registersRange, registerRange{start, end - start + 1}) } m.registers = append(m.registers, register{name, registersRange, fields}) @@ -434,7 +444,7 @@ func (m *Modbus) getFields() error { for bitPosition := 0; bitPosition < 8; bitPosition++ { bitRawValues[address] = getBitValue(readValue, bitPosition) address = address + 1 - if address+1 > rr.length { + if address > rr.address+rr.length { break } } diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index 99fa7bb7da7da..397e6da463335 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -1,6 +1,7 @@ package modbus import ( + "fmt" "testing" m "github.com/goburrow/modbus" @@ -657,6 +658,102 @@ func TestHoldingRegisters(t *testing.T) { } } +func TestReadMultipleCoilLimit(t *testing.T) { + serv := mbserver.NewServer() + err := serv.ListenTCP("localhost:1502") + assert.NoError(t, err) + defer serv.Close() + + handler := m.NewTCPClientHandler("localhost:1502") + err = handler.Connect() + assert.NoError(t, err) + defer handler.Close() + client := m.NewClient(handler) + + fcs := []fieldContainer{} + writeValue := uint16(0) + for i := 0; i <= 4000; i++ { + fc := fieldContainer{} + fc.Name = fmt.Sprintf("coil-%v", i) + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + t.Run(fc.Name, func(t *testing.T) { + _, err = client.WriteSingleCoil(fc.Address[0], writeValue) + assert.NoError(t, err) + }) + + writeValue = 65280 - writeValue + } + + modbus := Modbus{ + Name: "TestReadCoils", + Controller: "tcp://localhost:1502", + SlaveID: 1, + Coils: fcs, + } + + err = modbus.Init() + assert.NoError(t, err) + var acc testutil.Accumulator + err = modbus.Gather(&acc) + assert.NoError(t, err) + + writeValue = 0 + for i := 0; i <= 4000; i++ { + t.Run(modbus.registers[0].Fields[i].Name, func(t *testing.T) { + assert.Equal(t, writeValue, modbus.registers[0].Fields[i].value) + writeValue = 1 - writeValue + }) + } +} + +func TestReadMultipleHoldingRegisterLimit(t *testing.T) { + serv := mbserver.NewServer() + err := serv.ListenTCP("localhost:1502") + assert.NoError(t, err) + defer serv.Close() + + handler := m.NewTCPClientHandler("localhost:1502") + err = handler.Connect() + assert.NoError(t, err) + defer handler.Close() + client := m.NewClient(handler) + + fcs := []fieldContainer{} + for i := 0; i <= 400; i++ { + fc := fieldContainer{} + fc.Name = fmt.Sprintf("HoldingRegister-%v", i) + fc.ByteOrder = "AB" + fc.DataType = "INT16" + fc.Scale = 1.0 + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + t.Run(fc.Name, func(t *testing.T) { + _, err = client.WriteSingleRegister(fc.Address[0], uint16(i)) + assert.NoError(t, err) + }) + } + + modbus := Modbus{ + Name: "TestHoldingRegister", + Controller: "tcp://localhost:1502", + SlaveID: 1, + HoldingRegisters: fcs, + } + + err = modbus.Init() + assert.NoError(t, err) + var acc testutil.Accumulator + err = modbus.Gather(&acc) + assert.NoError(t, err) + + for i := 0; i <= 400; i++ { + assert.Equal(t, int16(i), modbus.registers[0].Fields[i].value) + } +} + func TestRetrySuccessful(t *testing.T) { retries := 0 maxretries := 2 From 956350db944f7edf56be286d9c9440b91b92e3ed Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Fri, 26 Feb 2021 13:58:13 -0500 Subject: [PATCH 239/761] Display error message on badly formatted config string array (eg. namepass) (#8910) * add error when expected string array is not in string array format * add word --- config/config.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/config/config.go b/config/config.go index e086eebffa376..560d8a5cf85a6 100644 --- a/config/config.go +++ b/config/config.go @@ -1538,6 +1538,9 @@ func (c *Config) getFieldStringSlice(tbl *ast.Table, fieldName string, target *[ *target = append(*target, str.Value) } } + } else { + c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format", fieldName)) + return } } } @@ -1554,6 +1557,9 @@ func (c *Config) getFieldTagFilter(tbl *ast.Table, fieldName string, target *[]m tagfilter.Filter = append(tagfilter.Filter, str.Value) } } + } else { + c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format on each entry", fieldName)) + return } *target = append(*target, tagfilter) } From accf91305fc3160c7efb1693f85e3a3e81ae1f2d Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Fri, 26 Feb 2021 13:58:28 -0500 Subject: [PATCH 240/761] add proxy (#8915) --- plugins/inputs/cloudwatch/README.md | 3 +++ plugins/inputs/cloudwatch/cloudwatch.go | 22 +++++++++++++++++--- plugins/inputs/cloudwatch/cloudwatch_test.go | 15 +++++++++++++ 3 files changed, 37 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index bc7b9b50c5d80..c86e66e674c6d 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -41,6 +41,9 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## ex: endpoint_url = "http://localhost:8000" # endpoint_url = "" + ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) + # http_proxy_url = "http://localhost:8888" + # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # metrics are made available to the 1 minute period. Some are collected at # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 10f34a41f07ee..1bc5379e56419 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -18,6 +18,7 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/limiter" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -35,6 +36,8 @@ type CloudWatch struct { StatisticInclude []string `toml:"statistic_include"` Timeout config.Duration `toml:"timeout"` + proxy.HTTPProxy + Period config.Duration `toml:"period"` Delay config.Duration `toml:"delay"` Namespace string `toml:"namespace"` @@ -107,6 +110,9 @@ func (c *CloudWatch) SampleConfig() string { ## ex: endpoint_url = "http://localhost:8000" # endpoint_url = "" + ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) + # http_proxy_url = "http://localhost:8888" + # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # metrics are made available to the 1 minute period. Some are collected at # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. @@ -188,7 +194,10 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { } if c.client == nil { - c.initializeCloudWatch() + err := c.initializeCloudWatch() + if err != nil { + return err + } } filteredMetrics, err := getFilteredMetrics(c) @@ -249,7 +258,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { return c.aggregateMetrics(acc, results) } -func (c *CloudWatch) initializeCloudWatch() { +func (c *CloudWatch) initializeCloudWatch() error { credentialConfig := &internalaws.CredentialConfig{ Region: c.Region, AccessKey: c.AccessKey, @@ -262,11 +271,16 @@ func (c *CloudWatch) initializeCloudWatch() { } configProvider := credentialConfig.Credentials() + proxy, err := c.HTTPProxy.Proxy() + if err != nil { + return err + } + cfg := &aws.Config{ HTTPClient: &http.Client{ // use values from DefaultTransport Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, + Proxy: proxy, DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, @@ -283,6 +297,8 @@ func (c *CloudWatch) initializeCloudWatch() { loglevel := aws.LogOff c.client = cloudwatch.New(configProvider, cfg.WithLogLevel(loglevel)) + + return nil } type filteredMetric struct { diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 2983773ad1bb5..798cdff1f2bed 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -1,6 +1,7 @@ package cloudwatch import ( + "net/http" "testing" "time" @@ -11,6 +12,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/testutil" ) @@ -333,3 +335,16 @@ func TestUpdateWindow(t *testing.T) { assert.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay))) assert.EqualValues(t, c.windowStart, newStartTime) } + +func TestProxyFunction(t *testing.T) { + c := &CloudWatch{ + HTTPProxy: proxy.HTTPProxy{HTTPProxyURL: "http://www.penguins.com"}, + } + + proxyFunction, err := c.HTTPProxy.Proxy() + require.NoError(t, err) + + proxyResult, err := proxyFunction(&http.Request{}) + require.NoError(t, err) + require.Equal(t, "www.penguins.com", proxyResult.Host) +} From b362ee466502320190eca7b40dfe548a6c904f16 Mon Sep 17 00:00:00 2001 From: Avinash Nigam <56562150+avinash-nigam@users.noreply.github.com> Date: Fri, 26 Feb 2021 10:59:29 -0800 Subject: [PATCH 241/761] Bug Fix - SQL Server HADR queries for SQL Versions (#8833) --- plugins/inputs/sqlserver/sqlserver_test.go | 90 +++++++++ plugins/inputs/sqlserver/sqlserverqueries.go | 192 +++++++++++-------- 2 files changed, 203 insertions(+), 79 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index f9306ee2f98fd..9af7003e08c84 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -169,6 +169,96 @@ func TestSqlServer_MultipleInit(t *testing.T) { assert.Equal(t, s2.isInitialized, true) } +func TestSqlServer_AGQueriesApplicableForDatabaseTypeSQLServer(t *testing.T) { + // This test case checks where Availability Group (AG / HADR) queries return an output when included for processing for DatabaseType = SQLServer + // And they should not be processed when DatabaseType = AzureSQLDB + + // Please change the connection string to connect to relevant database when executing the test case + + t.Skip("Skipping as unable to open tcp connection with host '127.0.0.1:1433") + + testServer := "Server=127.0.0.1;Port=1433;Database=testdb1;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" + + s := &SQLServer{ + Servers: []string{testServer}, + DatabaseType: "SQLServer", + IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + } + s2 := &SQLServer{ + Servers: []string{testServer}, + DatabaseType: "AzureSQLDB", + IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + } + + var acc, acc2 testutil.Accumulator + err := s.Gather(&acc) + require.NoError(t, err) + assert.Equal(t, s.isInitialized, true) + assert.Equal(t, s2.isInitialized, false) + + err = s2.Gather(&acc2) + require.NoError(t, err) + assert.Equal(t, s.isInitialized, true) + assert.Equal(t, s2.isInitialized, true) + + // acc includes size metrics, and excludes memory metrics + assert.True(t, acc.HasMeasurement("sqlserver_hadr_replica_states")) + assert.True(t, acc.HasMeasurement("sqlserver_hadr_dbreplica_states")) + + // acc2 includes memory metrics, and excludes size metrics + assert.False(t, acc2.HasMeasurement("sqlserver_hadr_replica_states")) + assert.False(t, acc2.HasMeasurement("sqlserver_hadr_dbreplica_states")) +} + +func TestSqlServer_AGQueryFieldsOutputBasedOnSQLServerVersion(t *testing.T) { + // This test case checks where Availability Group (AG / HADR) queries return specific fields supported by corresponding SQL Server version database being connected to. + + // Please change the connection strings to connect to relevant database when executing the test case + + t.Skip("Skipping as unable to open tcp connection with host '127.0.0.1:1433") + + testServer2019 := "Server=127.0.0.10;Port=1433;Database=testdb2019;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" + testServer2012 := "Server=127.0.0.20;Port=1433;Database=testdb2012;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" + + s2019 := &SQLServer{ + Servers: []string{testServer2019}, + DatabaseType: "SQLServer", + IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + } + s2012 := &SQLServer{ + Servers: []string{testServer2012}, + DatabaseType: "SQLServer", + IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + } + + var acc2019, acc2012 testutil.Accumulator + err := s2019.Gather(&acc2019) + require.NoError(t, err) + assert.Equal(t, s2019.isInitialized, true) + assert.Equal(t, s2012.isInitialized, false) + + err = s2012.Gather(&acc2012) + require.NoError(t, err) + assert.Equal(t, s2019.isInitialized, true) + assert.Equal(t, s2012.isInitialized, true) + + // acc2019 includes new HADR query fields + assert.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "basic_features")) + assert.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "is_distributed")) + assert.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "seeding_mode")) + assert.True(t, acc2019.HasTag("sqlserver_hadr_replica_states", "seeding_mode_desc")) + assert.True(t, acc2019.HasField("sqlserver_hadr_dbreplica_states", "is_primary_replica")) + assert.True(t, acc2019.HasField("sqlserver_hadr_dbreplica_states", "secondary_lag_seconds")) + + // acc2012 does not include new HADR query fields + assert.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "basic_features")) + assert.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "is_distributed")) + assert.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "seeding_mode")) + assert.False(t, acc2012.HasTag("sqlserver_hadr_replica_states", "seeding_mode_desc")) + assert.False(t, acc2012.HasField("sqlserver_hadr_dbreplica_states", "is_primary_replica")) + assert.False(t, acc2012.HasField("sqlserver_hadr_dbreplica_states", "secondary_lag_seconds")) +} + const mockPerformanceMetrics = `measurement;servername;type;Point In Time Recovery;Available physical memory (bytes);Average pending disk IO;Average runnable tasks;Average tasks;Buffer pool rate (bytes/sec);Connection memory per connection (bytes);Memory grant pending;Page File Usage (%);Page lookup per batch request;Page split per batch request;Readahead per page read;Signal wait (%);Sql compilation per batch request;Sql recompilation per batch request;Total target memory ratio Performance metrics;WIN8-DEV;Performance metrics;0;6353158144;0;0;7;2773;415061;0;25;229371;130;10;18;188;52;14` diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index a2ef3ca3ca123..f90966c4e2985 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -9,11 +9,11 @@ import ( // Variable @MajorMinorVersion: // - 1000 --> SQL Server 2008 // - 1050 --> SQL Server 2008 R2 -// - 1011 --> SQL Server 2012 -// - 1012 --> SQL Server 2014 -// - 1013 --> SQL Server 2016 -// - 1014 --> SQL Server 2017 -// - 1015 --> SQL Server 2019 +// - 1100 --> SQL Server 2012 +// - 1200 --> SQL Server 2014 +// - 1300 --> SQL Server 2016 +// - 1400 --> SQL Server 2017 +// - 1500 --> SQL Server 2019 // Thanks Bob Ward (http://aka.ms/bobwardms) // and the folks at Stack Overflow (https://github.com/opserver/Opserver/blob/9c89c7e9936b58ad237b30e6f4cc6cd59c406889/Opserver.Core/Data/SQL/SQLInstance.Memory.cs) @@ -1169,6 +1169,8 @@ FROM ( ) AS z ` +// Collects availability replica state information from `sys.dm_hadr_availability_replica_states` for a High Availability / Disaster Recovery (HADR) setup +// Certain fields are only supported on SQL Server 2016 and newer version, identified by check MajorMinorVersion >= 1300 const sqlServerAvailabilityReplicaStates string = ` IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard,Enterprise or Express. Check the database_type parameter in the telegraf configuration.'; @@ -1176,50 +1178,65 @@ IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterp RETURN END -IF SERVERPROPERTY('IsHadrEnabled') = 1 BEGIN - SELECT - 'sqlserver_hadr_replica_states' AS [measurement], - REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], - convert(nvarchar(36), hars.replica_id) as replica_id, - ar.replica_server_name, - convert(nvarchar(36), hars.group_id) as group_id, - ag.name AS group_name, - ag.basic_features, - ag.is_distributed, - hags.synchronization_health_desc AS ag_synchronization_health_desc, - ar.replica_metadata_id, - ar.availability_mode, - ar.availability_mode_desc, - ar.failover_mode, - ar.failover_mode_desc, - ar.session_timeout, - ar.primary_role_allow_connections, - ar.primary_role_allow_connections_desc, - ar.secondary_role_allow_connections, - ar.secondary_role_allow_connections_desc, - ar.seeding_mode, - ar.seeding_mode_desc, - hars.is_local, - hars.role, - hars.role_desc, - hars.operational_state, - hars.operational_state_desc, - hars.connected_state, - hars.connected_state_desc, - hars.recovery_health, - hars.recovery_health_desc, - hars.synchronization_health AS replica_synchronization_health, - hars.synchronization_health_desc AS replica_synchronization_health_desc, - hars.last_connect_error_number, - hars.last_connect_error_description, - hars.last_connect_error_timestamp - from sys.dm_hadr_availability_replica_states AS hars - inner join sys.availability_replicas AS ar on hars.replica_id = ar.replica_id - inner join sys.availability_groups AS ag on ar.group_id = ag.group_id - inner join sys.dm_hadr_availability_group_states AS hags ON hags.group_id = ag.group_id +DECLARE + @SqlStatement AS nvarchar(max) + ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),3) AS int) + ,@Columns AS nvarchar(MAX) = '' + +IF @MajorMinorVersion >= 1300 BEGIN + SET @Columns += N' + ,ag.basic_features + ,ag.is_distributed + ,ar.seeding_mode + ,ar.seeding_mode_desc' END + +SET @SqlStatement = N' +IF SERVERPROPERTY(''IsHadrEnabled'') = 1 BEGIN + SELECT + ''sqlserver_hadr_replica_states'' AS [measurement] + ,REPLACE(@@SERVERNAME, ''\'', '':'') AS [sql_instance] + ,convert(nvarchar(36), hars.replica_id) as replica_id + ,ar.replica_server_name + ,convert(nvarchar(36), hars.group_id) as group_id + ,ag.name AS group_name + ,hags.synchronization_health_desc AS ag_synchronization_health_desc + ,ar.replica_metadata_id + ,ar.availability_mode + ,ar.availability_mode_desc + ,ar.failover_mode + ,ar.failover_mode_desc + ,ar.session_timeout + ,ar.primary_role_allow_connections + ,ar.primary_role_allow_connections_desc + ,ar.secondary_role_allow_connections + ,ar.secondary_role_allow_connections_desc + ,hars.is_local + ,hars.role + ,hars.role_desc + ,hars.operational_state + ,hars.operational_state_desc + ,hars.connected_state + ,hars.connected_state_desc + ,hars.recovery_health + ,hars.recovery_health_desc + ,hars.synchronization_health AS replica_synchronization_health + ,hars.synchronization_health_desc AS replica_synchronization_health_desc + ,hars.last_connect_error_number + ,hars.last_connect_error_description + ,hars.last_connect_error_timestamp' + + @Columns + N' + FROM sys.dm_hadr_availability_replica_states AS hars + INNER JOIN sys.availability_replicas AS ar on hars.replica_id = ar.replica_id + INNER JOIN sys.availability_groups AS ag on ar.group_id = ag.group_id + INNER JOIN sys.dm_hadr_availability_group_states AS hags ON hags.group_id = ag.group_id +END' + +EXEC sp_executesql @SqlStatement ` +// Collects database replica state information from `sys.dm_hadr_database_replica_states` for a High Availability / Disaster Recovery (HADR) setup +// Certain fields are only supported on SQL Server 2016 and newer version, or SQL Server 2014 and newer, identified by check MajorMinorVersion >= 1300 or MajorMinorVersion >= 1200 const sqlServerDatabaseReplicaStates string = ` IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard,Enterprise or Express. Check the database_type parameter in the telegraf configuration.'; @@ -1227,38 +1244,55 @@ IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterp RETURN END -IF SERVERPROPERTY('IsHadrEnabled') = 1 BEGIN - SELECT - 'sqlserver_hadr_dbreplica_states' AS [measurement], - REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], - database_id, - db_name(database_id) as database_name, - convert(nvarchar(36), drs.replica_id) as replica_id, - ar.replica_server_name, - convert(nvarchar(36), drs.group_database_id) as group_database_id, - is_primary_replica, - synchronization_state, - synchronization_state_desc, - is_commit_participant, - synchronization_health, - synchronization_health_desc, - database_state, - database_state_desc, - is_suspended, - suspend_reason, - suspend_reason_desc, - last_sent_time, - last_received_time, - last_hardened_time, - last_redone_time, - log_send_queue_size, - log_send_rate, - redo_queue_size, - redo_rate, - filestream_send_rate, - last_commit_time, - secondary_lag_seconds - from sys.dm_hadr_database_replica_states AS drs - inner join sys.availability_replicas AS ar on drs.replica_id = ar.replica_id +DECLARE + @SqlStatement AS nvarchar(max) + ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),3) AS int) + ,@Columns AS nvarchar(MAX) = '' + +IF @MajorMinorVersion >= 1200 BEGIN + SET @Columns += N' + ,is_primary_replica' END + +IF @MajorMinorVersion >= 1300 BEGIN + SET @Columns += N' + ,secondary_lag_seconds' +END + +SET @SqlStatement = N' +IF SERVERPROPERTY(''IsHadrEnabled'') = 1 BEGIN + SELECT + ''sqlserver_hadr_dbreplica_states'' AS [measurement] + ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance] + ,database_id + ,db_name(database_id) as database_name + ,convert(nvarchar(36), drs.replica_id) as replica_id + ,ar.replica_server_name + ,convert(nvarchar(36), drs.group_database_id) as group_database_id + ,synchronization_state + ,synchronization_state_desc + ,is_commit_participant + ,synchronization_health + ,synchronization_health_desc + ,database_state + ,database_state_desc + ,is_suspended + ,suspend_reason + ,suspend_reason_desc + ,last_sent_time + ,last_received_time + ,last_hardened_time + ,last_redone_time + ,log_send_queue_size + ,log_send_rate + ,redo_queue_size + ,redo_rate + ,filestream_send_rate + ,last_commit_time' + + @Columns + N' + FROM sys.dm_hadr_database_replica_states AS drs + INNER JOIN sys.availability_replicas AS ar on drs.replica_id = ar.replica_id +END' + +EXEC sp_executesql @SqlStatement ` From a65a3052a90a04d41360457961a5f1b88d6cbcae Mon Sep 17 00:00:00 2001 From: Jeff Ashton Date: Mon, 1 Mar 2021 10:56:17 -0500 Subject: [PATCH 242/761] outputs.kinesis - log record error count (#8817) --- plugins/outputs/kinesis/kinesis.go | 30 ++-- plugins/outputs/kinesis/kinesis_test.go | 228 ++++++++++++++++++++++++ 2 files changed, 245 insertions(+), 13 deletions(-) diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 88620fa70d3f9..75f790f3318b7 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -6,6 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" "github.com/gofrs/uuid" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" @@ -29,7 +30,7 @@ type ( RandomPartitionKey bool `toml:"use_random_partitionkey"` Partition *Partition `toml:"partition"` Debug bool `toml:"debug"` - svc *kinesis.Kinesis + svc kinesisiface.KinesisAPI serializer serializers.Serializer } @@ -154,26 +155,29 @@ func (k *KinesisOutput) SetSerializer(serializer serializers.Serializer) { k.serializer = serializer } -func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Duration { +func (k *KinesisOutput) writeKinesis(r []*kinesis.PutRecordsRequestEntry) time.Duration { + start := time.Now() payload := &kinesis.PutRecordsInput{ Records: r, StreamName: aws.String(k.StreamName), } + resp, err := k.svc.PutRecords(payload) + if err != nil { + log.Printf("E! kinesis: Unable to write to Kinesis : %s", err.Error()) + return time.Since(start) + } + if k.Debug { - resp, err := k.svc.PutRecords(payload) - if err != nil { - log.Printf("E! kinesis: Unable to write to Kinesis : %s", err.Error()) - } log.Printf("I! Wrote: '%+v'", resp) + } - } else { - _, err := k.svc.PutRecords(payload) - if err != nil { - log.Printf("E! kinesis: Unable to write to Kinesis : %s", err.Error()) - } + failed := *resp.FailedRecordCount + if failed > 0 { + log.Printf("E! kinesis: Unable to write %+v of %+v record(s) to Kinesis", failed, len(r)) } + return time.Since(start) } @@ -241,7 +245,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { if sz == 500 { // Max Messages Per PutRecordRequest is 500 - elapsed := writekinesis(k, r) + elapsed := k.writeKinesis(r) log.Printf("D! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) sz = 0 r = nil @@ -249,7 +253,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { } if sz > 0 { - elapsed := writekinesis(k, r) + elapsed := k.writeKinesis(r) log.Printf("D! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) } diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 9d4f6729be53c..293ec86fb829e 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -1,13 +1,19 @@ package kinesis import ( + "fmt" "testing" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" "github.com/gofrs/uuid" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" ) +const zero int64 = 0 + func TestPartitionKey(t *testing.T) { assert := assert.New(t) @@ -83,3 +89,225 @@ func TestPartitionKey(t *testing.T) { assert.Nil(err, "Issue parsing UUID") assert.Equal(byte(4), u.Version(), "PartitionKey should be UUIDv4") } + +func TestWriteKinesis_WhenSuccess(t *testing.T) { + + assert := assert.New(t) + + partitionKey := "partitionKey" + shard := "shard" + sequenceNumber := "sequenceNumber" + streamName := "stream" + + records := []*kinesis.PutRecordsRequestEntry{ + { + PartitionKey: &partitionKey, + Data: []byte{0x65}, + }, + } + + svc := &mockKinesisPutRecords{} + svc.SetupResponse( + 0, + []*kinesis.PutRecordsResultEntry{ + { + ErrorCode: nil, + ErrorMessage: nil, + SequenceNumber: &sequenceNumber, + ShardId: &shard, + }, + }, + ) + + k := KinesisOutput{ + StreamName: streamName, + svc: svc, + } + + elapsed := k.writeKinesis(records) + assert.GreaterOrEqual(elapsed.Nanoseconds(), zero) + + svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + { + StreamName: &streamName, + Records: records, + }, + }) +} + +func TestWriteKinesis_WhenRecordErrors(t *testing.T) { + + assert := assert.New(t) + + errorCode := "InternalFailure" + errorMessage := "Internal Service Failure" + partitionKey := "partitionKey" + streamName := "stream" + + records := []*kinesis.PutRecordsRequestEntry{ + { + PartitionKey: &partitionKey, + Data: []byte{0x66}, + }, + } + + svc := &mockKinesisPutRecords{} + svc.SetupResponse( + 1, + []*kinesis.PutRecordsResultEntry{ + { + ErrorCode: &errorCode, + ErrorMessage: &errorMessage, + SequenceNumber: nil, + ShardId: nil, + }, + }, + ) + + k := KinesisOutput{ + StreamName: streamName, + svc: svc, + } + + elapsed := k.writeKinesis(records) + assert.GreaterOrEqual(elapsed.Nanoseconds(), zero) + + svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + { + StreamName: &streamName, + Records: records, + }, + }) +} + +func TestWriteKinesis_WhenServiceError(t *testing.T) { + + assert := assert.New(t) + + partitionKey := "partitionKey" + streamName := "stream" + + records := []*kinesis.PutRecordsRequestEntry{ + { + PartitionKey: &partitionKey, + Data: []byte{}, + }, + } + + svc := &mockKinesisPutRecords{} + svc.SetupErrorResponse( + awserr.New("InvalidArgumentException", "Invalid record", nil), + ) + + k := KinesisOutput{ + StreamName: streamName, + svc: svc, + } + + elapsed := k.writeKinesis(records) + assert.GreaterOrEqual(elapsed.Nanoseconds(), zero) + + svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + { + StreamName: &streamName, + Records: records, + }, + }) +} + +type mockKinesisPutRecordsResponse struct { + Output *kinesis.PutRecordsOutput + Err error +} + +type mockKinesisPutRecords struct { + kinesisiface.KinesisAPI + + requests []*kinesis.PutRecordsInput + responses []*mockKinesisPutRecordsResponse +} + +func (m *mockKinesisPutRecords) SetupResponse( + failedRecordCount int64, + records []*kinesis.PutRecordsResultEntry, +) { + + m.responses = append(m.responses, &mockKinesisPutRecordsResponse{ + Err: nil, + Output: &kinesis.PutRecordsOutput{ + FailedRecordCount: &failedRecordCount, + Records: records, + }, + }) +} + +func (m *mockKinesisPutRecords) SetupErrorResponse(err error) { + + m.responses = append(m.responses, &mockKinesisPutRecordsResponse{ + Err: err, + Output: nil, + }) +} + +func (m *mockKinesisPutRecords) PutRecords(input *kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) { + + reqNum := len(m.requests) + if reqNum > len(m.responses) { + return nil, fmt.Errorf("Response for request %+v not setup", reqNum) + } + + m.requests = append(m.requests, input) + + resp := m.responses[reqNum] + return resp.Output, resp.Err +} + +func (m *mockKinesisPutRecords) AssertRequests( + assert *assert.Assertions, + expected []*kinesis.PutRecordsInput, +) { + + assert.Equal( + len(expected), + len(m.requests), + fmt.Sprintf("Expected %v requests", len(expected)), + ) + + for i, expectedInput := range expected { + actualInput := m.requests[i] + + assert.Equal( + expectedInput.StreamName, + actualInput.StreamName, + fmt.Sprintf("Expected request %v to have correct StreamName", i), + ) + + assert.Equal( + len(expectedInput.Records), + len(actualInput.Records), + fmt.Sprintf("Expected request %v to have %v Records", i, len(expectedInput.Records)), + ) + + for r, expectedRecord := range expectedInput.Records { + actualRecord := actualInput.Records[r] + + assert.Equal( + &expectedRecord.PartitionKey, + &actualRecord.PartitionKey, + fmt.Sprintf("Expected (request %v, record %v) to have correct PartitionKey", i, r), + ) + + assert.Equal( + &expectedRecord.ExplicitHashKey, + &actualRecord.ExplicitHashKey, + fmt.Sprintf("Expected (request %v, record %v) to have correct ExplicitHashKey", i, r), + ) + + assert.Equal( + expectedRecord.Data, + actualRecord.Data, + fmt.Sprintf("Expected (request %v, record %v) to have correct Data", i, r), + ) + } + } +} From 4584d691a77bcc6c1dfa503c8aee8322240ec5e8 Mon Sep 17 00:00:00 2001 From: Thomas Casteleyn Date: Mon, 1 Mar 2021 21:46:49 +0100 Subject: [PATCH 243/761] Fix max_repetitions signedness issues #8823 (#8902) --- go.mod | 4 ++-- go.sum | 6 ++++-- internal/snmp/config.go | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index cc85b7cf41e12..06fd402c51079 100644 --- a/go.mod +++ b/go.mod @@ -70,7 +70,7 @@ require ( github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.13 github.com/gorilla/mux v1.6.2 - github.com/gosnmp/gosnmp v1.29.0 + github.com/gosnmp/gosnmp v1.30.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 @@ -122,7 +122,7 @@ require ( github.com/sirupsen/logrus v1.4.2 github.com/soniah/gosnmp v1.25.0 github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 - github.com/stretchr/testify v1.6.1 + github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect github.com/tidwall/gjson v1.6.0 diff --git a/go.sum b/go.sum index 07d28e132ffe4..c9bbf781149c7 100644 --- a/go.sum +++ b/go.sum @@ -363,8 +363,8 @@ github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gosnmp/gosnmp v1.29.0 h1:fEkud7oiYVzR64L+/BQA7uvp+7COI9+XkrUQi8JunYM= -github.com/gosnmp/gosnmp v1.29.0/go.mod h1:Ux0YzU4nV5yDET7dNIijd0VST0BCy8ijBf+gTVFQeaM= +github.com/gosnmp/gosnmp v1.30.0 h1:P6uUvPaoZCZh2EXvSUIgsxYZ1vdD/Sonl2BSVCGieG8= +github.com/gosnmp/gosnmp v1.30.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvNDMpgF0= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= @@ -659,6 +659,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg= diff --git a/internal/snmp/config.go b/internal/snmp/config.go index e616e75709737..9941f0682fe3d 100644 --- a/internal/snmp/config.go +++ b/internal/snmp/config.go @@ -15,7 +15,7 @@ type ClientConfig struct { Community string `toml:"community"` // Parameters for Version 2 & 3 - MaxRepetitions uint8 `toml:"max_repetitions"` + MaxRepetitions uint32 `toml:"max_repetitions"` // Parameters for Version 3 ContextName string `toml:"context_name"` From 8a6907a18625a760e5f0f3d95016c6198c83b76b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Mon, 1 Mar 2021 22:04:35 +0100 Subject: [PATCH 244/761] Revive fixes - part 3 (#8872) * * Revive fixes regarding following set of rules: [rule.var-naming] --- .golangci.yml | 24 +++ plugins/aggregators/basicstats/basicstats.go | 52 ++--- plugins/inputs/activemq/activemq.go | 12 +- plugins/inputs/apache/apache.go | 8 +- plugins/inputs/bcache/bcache_test.go | 62 +++--- plugins/inputs/beanstalkd/beanstalkd.go | 10 +- plugins/inputs/beat/beat.go | 8 +- plugins/inputs/bind/bind.go | 4 +- plugins/inputs/bind/json_stats.go | 10 +- plugins/inputs/bind/xml_stats_v2.go | 4 +- plugins/inputs/bind/xml_stats_v3.go | 10 +- plugins/inputs/cassandra/cassandra.go | 14 +- plugins/inputs/ceph/ceph.go | 8 +- plugins/inputs/ceph/ceph_test.go | 4 +- plugins/inputs/cloud_pubsub/pubsub.go | 4 +- plugins/inputs/cloud_pubsub/pubsub_test.go | 30 +-- plugins/inputs/cpu/cpu.go | 19 +- plugins/inputs/dns_query/dns_query.go | 18 +- plugins/inputs/dns_query/dns_query_test.go | 18 +- plugins/inputs/dovecot/dovecot_test.go | 4 +- plugins/inputs/exec/exec_test.go | 8 +- plugins/inputs/graylog/graylog.go | 14 +- plugins/inputs/http_response/http_response.go | 34 +-- plugins/inputs/httpjson/httpjson.go | 26 +-- plugins/inputs/httpjson/httpjson_test.go | 30 +-- plugins/inputs/icinga2/icinga2.go | 10 +- plugins/inputs/infiniband/infiniband_test.go | 4 +- .../inputs/intel_powerstat/unit_converter.go | 4 +- plugins/inputs/interrupts/interrupts.go | 4 +- plugins/inputs/interrupts/interrupts_test.go | 12 +- plugins/inputs/ipmi_sensor/ipmi.go | 26 +-- plugins/inputs/ipmi_sensor/ipmi_test.go | 4 +- plugins/inputs/jolokia/jolokia.go | 20 +- plugins/inputs/jolokia2/client.go | 24 +-- .../openconfig_telemetry_test.go | 12 +- plugins/inputs/kapacitor/kapacitor.go | 4 +- plugins/inputs/kernel/kernel.go | 18 +- plugins/inputs/kernel/kernel_test.go | 30 +-- .../kernel_vmstat/kernel_vmstat_test.go | 16 +- plugins/inputs/kibana/kibana.go | 12 +- plugins/inputs/kubernetes/kubernetes.go | 10 +- plugins/inputs/kubernetes/kubernetes_pods.go | 2 +- plugins/inputs/lanz/lanz.go | 20 +- plugins/inputs/lanz/lanz_test.go | 10 +- plugins/inputs/logstash/logstash.go | 34 +-- plugins/inputs/logstash/logstash_test.go | 24 +-- plugins/inputs/mailchimp/chimp_api.go | 12 +- plugins/inputs/mailchimp/mailchimp.go | 12 +- plugins/inputs/mailchimp/mailchimp_test.go | 4 +- plugins/inputs/mesos/mesos.go | 4 +- plugins/inputs/mongodb/mongodb.go | 8 +- plugins/inputs/mongodb/mongodb_server.go | 6 +- plugins/inputs/mongodb/mongodb_test.go | 12 +- plugins/inputs/nginx/nginx.go | 8 +- plugins/inputs/nginx/nginx_test.go | 20 +- plugins/inputs/nginx_plus/nginx_plus.go | 12 +- plugins/inputs/nginx_plus/nginx_plus_test.go | 4 +- .../inputs/nginx_plus_api/nginx_plus_api.go | 22 +- .../nginx_plus_api/nginx_plus_api_metrics.go | 72 +++---- .../nginx_plus_api_metrics_test.go | 48 ++--- .../nginx_plus_api/nginx_plus_api_types.go | 10 +- .../nginx_upstream_check.go | 12 +- plugins/inputs/nsd/nsd.go | 14 +- plugins/inputs/nsq/nsq.go | 4 +- plugins/inputs/nstat/nstat.go | 26 +-- plugins/inputs/opensmtpd/opensmtpd.go | 6 +- plugins/inputs/opensmtpd/opensmtpd_test.go | 4 +- .../inputs/openweathermap/openweathermap.go | 50 ++--- .../openweathermap/openweathermap_test.go | 28 +-- plugins/inputs/passenger/passenger.go | 8 +- plugins/inputs/phpfpm/child.go | 26 +-- plugins/inputs/phpfpm/fcgi.go | 32 +-- plugins/inputs/phpfpm/fcgi_client.go | 8 +- plugins/inputs/phpfpm/fcgi_test.go | 16 +- plugins/inputs/phpfpm/phpfpm.go | 58 ++--- plugins/inputs/postgresql/postgresql.go | 12 +- .../postgresql_extensible.go | 57 +++-- plugins/inputs/procstat/native_finder.go | 2 +- .../procstat/native_finder_windows_test.go | 2 +- plugins/inputs/procstat/pgrep.go | 2 +- plugins/inputs/procstat/process.go | 6 +- plugins/inputs/procstat/procstat.go | 2 +- plugins/inputs/procstat/procstat_test.go | 2 +- plugins/inputs/proxmox/proxmox.go | 102 ++++----- plugins/inputs/proxmox/proxmox_test.go | 16 +- plugins/inputs/proxmox/structs.go | 16 +- plugins/inputs/rabbitmq/rabbitmq.go | 8 +- plugins/inputs/raindrops/raindrops.go | 46 ++-- plugins/inputs/raindrops/raindrops_test.go | 2 +- plugins/inputs/redfish/redfish.go | 6 +- plugins/inputs/redfish/redfish_test.go | 30 +-- plugins/inputs/rethinkdb/rethinkdb.go | 16 +- plugins/inputs/rethinkdb/rethinkdb_data.go | 4 +- .../inputs/rethinkdb/rethinkdb_data_test.go | 4 +- plugins/inputs/rethinkdb/rethinkdb_server.go | 14 +- plugins/inputs/rethinkdb/rethinkdb_test.go | 6 +- plugins/inputs/riak/riak.go | 12 +- plugins/inputs/snmp_legacy/snmp_legacy.go | 104 ++++----- plugins/inputs/sqlserver/sqlqueriesV2.go | 2 +- plugins/inputs/sqlserver/sqlserver.go | 4 +- plugins/inputs/sqlserver/sqlserverqueries.go | 2 +- plugins/inputs/statsd/statsd.go | 6 +- .../systemd_units/systemd_units_linux.go | 26 +-- .../systemd_units/systemd_units_linux_test.go | 4 +- plugins/inputs/tcp_listener/tcp_listener.go | 28 +-- .../inputs/tcp_listener/tcp_listener_test.go | 24 +-- plugins/inputs/tengine/tengine.go | 198 +++++++++--------- plugins/inputs/tengine/tengine_test.go | 10 +- plugins/inputs/tomcat/tomcat.go | 4 +- plugins/inputs/udp_listener/udp_listener.go | 30 +-- .../inputs/udp_listener/udp_listener_test.go | 16 +- plugins/inputs/vsphere/endpoint.go | 18 +- plugins/inputs/vsphere/vsphere.go | 4 +- .../filestack/filestack_webhooks_events.go | 4 +- .../mandrill/mandrill_webhooks_events.go | 4 +- .../rollbar/rollbar_webhooks_events.go | 24 +-- plugins/inputs/zfs/zfs_linux_test.go | 8 +- plugins/inputs/zookeeper/zookeeper.go | 6 +- plugins/outputs/amon/amon.go | 5 +- .../outputs/azure_monitor/azure_monitor.go | 12 +- plugins/outputs/datadog/datadog.go | 14 +- plugins/outputs/datadog/datadog_test.go | 12 +- plugins/outputs/dynatrace/dynatrace.go | 6 +- plugins/outputs/dynatrace/dynatrace_test.go | 8 +- .../outputs/elasticsearch/elasticsearch.go | 4 +- plugins/outputs/instrumental/instrumental.go | 24 +-- .../outputs/instrumental/instrumental_test.go | 2 +- plugins/outputs/logzio/logzio.go | 4 +- plugins/outputs/newrelic/newrelic.go | 6 +- plugins/outputs/newrelic/newrelic_test.go | 2 +- plugins/outputs/opentsdb/opentsdb.go | 18 +- plugins/outputs/opentsdb/opentsdb_http.go | 6 +- plugins/outputs/opentsdb/opentsdb_test.go | 4 +- plugins/outputs/riemann/riemann.go | 4 +- .../socket_writer/socket_writer_test.go | 14 +- plugins/outputs/wavefront/wavefront.go | 40 ++-- .../yandex_cloud_monitoring.go | 26 +-- .../yandex_cloud_monitoring_test.go | 14 +- plugins/parsers/grok/influx_patterns.go | 2 +- plugins/parsers/grok/parser.go | 52 ++--- plugins/parsers/wavefront/element.go | 26 +-- plugins/parsers/wavefront/scanner.go | 28 +-- plugins/parsers/wavefront/token.go | 32 +-- plugins/processors/enum/enum_test.go | 6 +- plugins/processors/ifname/ifname.go | 12 +- plugins/processors/starlark/builtins.go | 18 +- plugins/processors/starlark/field_dict.go | 18 +- plugins/processors/starlark/tag_dict.go | 18 +- plugins/processors/tag_limit/tag_limit.go | 4 +- plugins/serializers/graphite/graphite.go | 16 +- plugins/serializers/graphite/graphite_test.go | 48 ++--- plugins/serializers/nowmetric/nowmetric.go | 4 +- plugins/serializers/registry.go | 12 +- .../serializers/splunkmetric/splunkmetric.go | 6 +- 154 files changed, 1361 insertions(+), 1340 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index fa95d656fd874..0961510f943c9 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -31,14 +31,38 @@ linters-settings: - name: redefines-builtin-id run: + # which dirs to skip: issues from them won't be reported; + # can use regexp here: generated.*, regexp is applied on full path; + # default value is empty list, but default dirs are skipped independently + # from this option's value (see skip-dirs-use-default). + # "/" will be replaced by current OS file path separator to properly work + # on Windows. skip-dirs: - scripts - docs - etc + + # which files to skip: they will be analyzed, but issues from them + # won't be reported. Default value is empty list, but there is + # no need to include all autogenerated files, we confidently recognize + # autogenerated files. If it's not please let us know. + # "/" will be replaced by current OS file path separator to properly work + # on Windows. skip-files: - plugins/parsers/influx/machine.go* issues: + # List of regexps of issue texts to exclude, empty list by default. + # But independently from this option we use default exclude patterns, + # it can be disabled by `exclude-use-default: false`. To list all + # excluded by default patterns execute `golangci-lint run --help` exclude: - don't use an underscore in package name - exported.*should have comment.*or be unexported + - comment on exported.*should be of the form + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index 67cee50c4609b..b335ec4f0a71d 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -17,18 +17,18 @@ type BasicStats struct { } type configuredStats struct { - count bool - min bool - max bool - mean bool - variance bool - stdev bool - sum bool - diff bool - non_negative_diff bool - rate bool - non_negative_rate bool - interval bool + count bool + min bool + max bool + mean bool + variance bool + stdev bool + sum bool + diff bool + nonNegativeDiff bool + rate bool + nonNegativeRate bool + interval bool } func NewBasicStats() *BasicStats { @@ -197,13 +197,13 @@ func (b *BasicStats) Push(acc telegraf.Accumulator) { if b.statsConfig.diff { fields[k+"_diff"] = v.diff } - if b.statsConfig.non_negative_diff && v.diff >= 0 { + if b.statsConfig.nonNegativeDiff && v.diff >= 0 { fields[k+"_non_negative_diff"] = v.diff } if b.statsConfig.rate { fields[k+"_rate"] = v.rate } - if b.statsConfig.non_negative_rate && v.diff >= 0 { + if b.statsConfig.nonNegativeRate && v.diff >= 0 { fields[k+"_non_negative_rate"] = v.rate } if b.statsConfig.interval { @@ -242,11 +242,11 @@ func (b *BasicStats) parseStats() *configuredStats { case "diff": parsed.diff = true case "non_negative_diff": - parsed.non_negative_diff = true + parsed.nonNegativeDiff = true case "rate": parsed.rate = true case "non_negative_rate": - parsed.non_negative_rate = true + parsed.nonNegativeRate = true case "interval": parsed.interval = true default: @@ -260,16 +260,16 @@ func (b *BasicStats) parseStats() *configuredStats { func (b *BasicStats) getConfiguredStats() { if b.Stats == nil { b.statsConfig = &configuredStats{ - count: true, - min: true, - max: true, - mean: true, - variance: true, - stdev: true, - sum: false, - non_negative_diff: false, - rate: false, - non_negative_rate: false, + count: true, + min: true, + max: true, + mean: true, + variance: true, + stdev: true, + sum: false, + nonNegativeDiff: false, + rate: false, + nonNegativeRate: false, } } else { b.statsConfig = b.parseStats() diff --git a/plugins/inputs/activemq/activemq.go b/plugins/inputs/activemq/activemq.go index f7847f83d8d04..cb8793619ff8f 100644 --- a/plugins/inputs/activemq/activemq.go +++ b/plugins/inputs/activemq/activemq.go @@ -49,9 +49,9 @@ type Subscribers struct { type Subscriber struct { XMLName xml.Name `xml:"subscriber"` - ClientId string `xml:"clientId,attr"` + ClientID string `xml:"clientId,attr"` SubscriptionName string `xml:"subscriptionName,attr"` - ConnectionId string `xml:"connectionId,attr"` + ConnectionID string `xml:"connectionId,attr"` DestinationName string `xml:"destinationName,attr"` Selector string `xml:"selector,attr"` Active string `xml:"active,attr"` @@ -117,7 +117,7 @@ func (a *ActiveMQ) SampleConfig() string { return sampleConfig } -func (a *ActiveMQ) createHttpClient() (*http.Client, error) { +func (a *ActiveMQ) createHTTPClient() (*http.Client, error) { tlsCfg, err := a.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -157,7 +157,7 @@ func (a *ActiveMQ) Init() error { a.baseURL = u - a.client, err = a.createHttpClient() + a.client, err = a.createHTTPClient() if err != nil { return err } @@ -228,9 +228,9 @@ func (a *ActiveMQ) GatherSubscribersMetrics(acc telegraf.Accumulator, subscriber records := make(map[string]interface{}) tags := make(map[string]string) - tags["client_id"] = subscriber.ClientId + tags["client_id"] = subscriber.ClientID tags["subscription_name"] = subscriber.SubscriptionName - tags["connection_id"] = subscriber.ConnectionId + tags["connection_id"] = subscriber.ConnectionID tags["destination_name"] = subscriber.DestinationName tags["selector"] = subscriber.Selector tags["active"] = subscriber.Active diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index 0220b43530495..f6d5831702bd1 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -67,7 +67,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error { } if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -84,7 +84,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(n.gatherUrl(addr, acc)) + acc.AddError(n.gatherURL(addr, acc)) }(addr) } @@ -92,7 +92,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error { return nil } -func (n *Apache) createHttpClient() (*http.Client, error) { +func (n *Apache) createHTTPClient() (*http.Client, error) { tlsCfg, err := n.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -108,7 +108,7 @@ func (n *Apache) createHttpClient() (*http.Client, error) { return client, nil } -func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { +func (n *Apache) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { req, err := http.NewRequest("GET", addr.String(), nil) if err != nil { return fmt.Errorf("error on new request to %s : %s", addr.String(), err) diff --git a/plugins/inputs/bcache/bcache_test.go b/plugins/inputs/bcache/bcache_test.go index 4646963c4bfe1..b9d786fa91bec 100644 --- a/plugins/inputs/bcache/bcache_test.go +++ b/plugins/inputs/bcache/bcache_test.go @@ -12,26 +12,26 @@ import ( ) const ( - dirty_data = "1.5G" - bypassed = "4.7T" - cache_bypass_hits = "146155333" - cache_bypass_misses = "0" - cache_hit_ratio = "90" - cache_hits = "511469583" - cache_miss_collisions = "157567" - cache_misses = "50616331" - cache_readaheads = "2" + dirtyData = "1.5G" + bypassed = "4.7T" + cacheBypassHits = "146155333" + cacheBypassMisses = "0" + cacheHitRatio = "90" + cacheHits = "511469583" + cacheMissCollisions = "157567" + cacheMisses = "50616331" + cacheReadaheads = "2" ) var ( testBcachePath = os.TempDir() + "/telegraf/sys/fs/bcache" - testBcacheUuidPath = testBcachePath + "/663955a3-765a-4737-a9fd-8250a7a78411" + testBcacheUUIDPath = testBcachePath + "/663955a3-765a-4737-a9fd-8250a7a78411" testBcacheDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/bcache0" testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10" ) func TestBcacheGeneratesMetrics(t *testing.T) { - err := os.MkdirAll(testBcacheUuidPath, 0755) + err := os.MkdirAll(testBcacheUUIDPath, 0755) require.NoError(t, err) err = os.MkdirAll(testBcacheDevPath, 0755) @@ -40,49 +40,49 @@ func TestBcacheGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testBcacheBackingDevPath+"/bcache", 0755) require.NoError(t, err) - err = os.Symlink(testBcacheBackingDevPath+"/bcache", testBcacheUuidPath+"/bdev0") + err = os.Symlink(testBcacheBackingDevPath+"/bcache", testBcacheUUIDPath+"/bdev0") require.NoError(t, err) - err = os.Symlink(testBcacheDevPath, testBcacheUuidPath+"/bdev0/dev") + err = os.Symlink(testBcacheDevPath, testBcacheUUIDPath+"/bdev0/dev") require.NoError(t, err) - err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755) + err = os.MkdirAll(testBcacheUUIDPath+"/bdev0/stats_total", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data", - []byte(dirty_data), 0644) + err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/dirty_data", + []byte(dirtyData), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed", + err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/bypassed", []byte(bypassed), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits", - []byte(cache_bypass_hits), 0644) + err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_hits", + []byte(cacheBypassHits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses", - []byte(cache_bypass_misses), 0644) + err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_misses", + []byte(cacheBypassMisses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio", - []byte(cache_hit_ratio), 0644) + err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hit_ratio", + []byte(cacheHitRatio), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits", - []byte(cache_hits), 0644) + err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hits", + []byte(cacheHits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions", - []byte(cache_miss_collisions), 0644) + err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_miss_collisions", + []byte(cacheMissCollisions), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses", - []byte(cache_misses), 0644) + err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_misses", + []byte(cacheMisses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads", - []byte(cache_readaheads), 0644) + err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_readaheads", + []byte(cacheReadaheads), 0644) require.NoError(t, err) fields := map[string]interface{}{ diff --git a/plugins/inputs/beanstalkd/beanstalkd.go b/plugins/inputs/beanstalkd/beanstalkd.go index 932edd301f910..fa6075589dabf 100644 --- a/plugins/inputs/beanstalkd/beanstalkd.go +++ b/plugins/inputs/beanstalkd/beanstalkd.go @@ -128,7 +128,7 @@ func (b *Beanstalkd) gatherServerStats(connection *textproto.Conn, acc telegraf. }, map[string]string{ "hostname": stats.Hostname, - "id": stats.Id, + "id": stats.ID, "server": b.Server, "version": stats.Version, }, @@ -169,13 +169,13 @@ func (b *Beanstalkd) gatherTubeStats(connection *textproto.Conn, tube string, ac } func runQuery(connection *textproto.Conn, cmd string, result interface{}) error { - requestId, err := connection.Cmd(cmd) + requestID, err := connection.Cmd(cmd) if err != nil { return err } - connection.StartResponse(requestId) - defer connection.EndResponse(requestId) + connection.StartResponse(requestID) + defer connection.EndResponse(requestID) status, err := connection.ReadLine() if err != nil { @@ -240,7 +240,7 @@ type statsResponse struct { CurrentWaiting int `yaml:"current-waiting"` CurrentWorkers int `yaml:"current-workers"` Hostname string `yaml:"hostname"` - Id string `yaml:"id"` + ID string `yaml:"id"` JobTimeouts int `yaml:"job-timeouts"` MaxJobSize int `yaml:"max-job-size"` Pid int `yaml:"pid"` diff --git a/plugins/inputs/beat/beat.go b/plugins/inputs/beat/beat.go index 017b4c27e340a..79769df9ddbbc 100644 --- a/plugins/inputs/beat/beat.go +++ b/plugins/inputs/beat/beat.go @@ -170,16 +170,16 @@ func (beat *Beat) Gather(accumulator telegraf.Accumulator) error { beatStats := &BeatStats{} beatInfo := &BeatInfo{} - infoUrl, err := url.Parse(beat.URL + suffixInfo) + infoURL, err := url.Parse(beat.URL + suffixInfo) if err != nil { return err } - statsUrl, err := url.Parse(beat.URL + suffixStats) + statsURL, err := url.Parse(beat.URL + suffixStats) if err != nil { return err } - err = beat.gatherJSONData(infoUrl.String(), beatInfo) + err = beat.gatherJSONData(infoURL.String(), beatInfo) if err != nil { return err } @@ -191,7 +191,7 @@ func (beat *Beat) Gather(accumulator telegraf.Accumulator) error { "beat_version": beatInfo.Version, } - err = beat.gatherJSONData(statsUrl.String(), beatStats) + err = beat.gatherJSONData(statsURL.String(), beatStats) if err != nil { return err } diff --git a/plugins/inputs/bind/bind.go b/plugins/inputs/bind/bind.go index 7247b23a4d6fa..dd7b3d128c9f0 100644 --- a/plugins/inputs/bind/bind.go +++ b/plugins/inputs/bind/bind.go @@ -65,7 +65,7 @@ func (b *Bind) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(b.gatherUrl(addr, acc)) + acc.AddError(b.gatherURL(addr, acc)) }(addr) } @@ -73,7 +73,7 @@ func (b *Bind) Gather(acc telegraf.Accumulator) error { return nil } -func (b *Bind) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { +func (b *Bind) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { switch addr.Path { case "": // BIND 9.6 - 9.8 diff --git a/plugins/inputs/bind/json_stats.go b/plugins/inputs/bind/json_stats.go index 906ab21d97a69..06c21008a5364 100644 --- a/plugins/inputs/bind/json_stats.go +++ b/plugins/inputs/bind/json_stats.go @@ -31,7 +31,7 @@ type jsonMemory struct { ContextSize int64 Lost int64 Contexts []struct { - Id string + ID string Name string Total int64 InUse int64 @@ -113,7 +113,7 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st // Detailed, per-context memory stats if b.GatherMemoryContexts { for _, c := range stats.Memory.Contexts { - tags := map[string]string{"url": urlTag, "id": c.Id, "name": c.Name, "source": host, "port": port} + tags := map[string]string{"url": urlTag, "id": c.ID, "name": c.Name, "source": host, "port": port} fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} acc.AddGauge("bind_memory_context", fields, tags) @@ -153,9 +153,9 @@ func (b *Bind) readStatsJSON(addr *url.URL, acc telegraf.Accumulator) error { // Progressively build up full jsonStats struct by parsing the individual HTTP responses for _, suffix := range [...]string{"/server", "/net", "/mem"} { - scrapeUrl := addr.String() + suffix + scrapeURL := addr.String() + suffix - resp, err := b.client.Get(scrapeUrl) + resp, err := b.client.Get(scrapeURL) if err != nil { return err } @@ -163,7 +163,7 @@ func (b *Bind) readStatsJSON(addr *url.URL, acc telegraf.Accumulator) error { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status: %s", scrapeUrl, resp.Status) + return fmt.Errorf("%s returned HTTP status: %s", scrapeURL, resp.Status) } if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil { diff --git a/plugins/inputs/bind/xml_stats_v2.go b/plugins/inputs/bind/xml_stats_v2.go index ce7116a199d9e..5e0d976afa8fc 100644 --- a/plugins/inputs/bind/xml_stats_v2.go +++ b/plugins/inputs/bind/xml_stats_v2.go @@ -42,7 +42,7 @@ type v2Statistics struct { Memory struct { Contexts []struct { // Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater - Id string `xml:"id"` + ID string `xml:"id"` Name string `xml:"name"` Total int64 `xml:"total"` InUse int64 `xml:"inuse"` @@ -142,7 +142,7 @@ func (b *Bind) readStatsXMLv2(addr *url.URL, acc telegraf.Accumulator) error { // Detailed, per-context memory stats if b.GatherMemoryContexts { for _, c := range stats.Statistics.Memory.Contexts { - tags := map[string]string{"url": addr.Host, "id": c.Id, "name": c.Name, "source": host, "port": port} + tags := map[string]string{"url": addr.Host, "id": c.ID, "name": c.Name, "source": host, "port": port} fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} acc.AddGauge("bind_memory_context", fields, tags) diff --git a/plugins/inputs/bind/xml_stats_v3.go b/plugins/inputs/bind/xml_stats_v3.go index 7d36e000b9d95..448360caf28b0 100644 --- a/plugins/inputs/bind/xml_stats_v3.go +++ b/plugins/inputs/bind/xml_stats_v3.go @@ -25,7 +25,7 @@ type v3Stats struct { type v3Memory struct { Contexts []struct { // Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater - Id string `xml:"id"` + ID string `xml:"id"` Name string `xml:"name"` Total int64 `xml:"total"` InUse int64 `xml:"inuse"` @@ -98,7 +98,7 @@ func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort s // Detailed, per-context memory stats if b.GatherMemoryContexts { for _, c := range stats.Memory.Contexts { - tags := map[string]string{"url": hostPort, "source": host, "port": port, "id": c.Id, "name": c.Name} + tags := map[string]string{"url": hostPort, "source": host, "port": port, "id": c.ID, "name": c.Name} fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} acc.AddGauge("bind_memory_context", fields, tags) @@ -138,9 +138,9 @@ func (b *Bind) readStatsXMLv3(addr *url.URL, acc telegraf.Accumulator) error { // Progressively build up full v3Stats struct by parsing the individual HTTP responses for _, suffix := range [...]string{"/server", "/net", "/mem"} { - scrapeUrl := addr.String() + suffix + scrapeURL := addr.String() + suffix - resp, err := b.client.Get(scrapeUrl) + resp, err := b.client.Get(scrapeURL) if err != nil { return err } @@ -148,7 +148,7 @@ func (b *Bind) readStatsXMLv3(addr *url.URL, acc telegraf.Accumulator) error { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status: %s", scrapeUrl, resp.Status) + return fmt.Errorf("%s returned HTTP status: %s", scrapeURL, resp.Status) } if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index 7858d3f4bf56e..ea7001fe334a0 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -197,9 +197,9 @@ func (c *Cassandra) Description() string { return "Read Cassandra metrics through Jolokia" } -func (c *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) { +func (c *Cassandra) getAttr(requestURL *url.URL) (map[string]interface{}, error) { // Create + send request - req, err := http.NewRequest("GET", requestUrl.String(), nil) + req, err := http.NewRequest("GET", requestURL.String(), nil) if err != nil { return nil, err } @@ -213,7 +213,7 @@ func (c *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) // Process response if resp.StatusCode != http.StatusOK { err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", - requestUrl, + requestURL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, @@ -292,24 +292,24 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error { } // Prepare URL - requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" + + requestURL, err := url.Parse("http://" + serverTokens["host"] + ":" + serverTokens["port"] + context + metric) if err != nil { acc.AddError(err) continue } if serverTokens["user"] != "" && serverTokens["passwd"] != "" { - requestUrl.User = url.UserPassword(serverTokens["user"], + requestURL.User = url.UserPassword(serverTokens["user"], serverTokens["passwd"]) } - out, err := c.getAttr(requestUrl) + out, err := c.getAttr(requestURL) if err != nil { acc.AddError(err) continue } if out["status"] != 200.0 { - acc.AddError(fmt.Errorf("provided URL returned with status %v - %s", out["status"], requestUrl)) + acc.AddError(fmt.Errorf("provided URL returned with status %v - %s", out["status"], requestURL)) continue } m.addTagsFields(out) diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index c875de8dfaeba..5a820a2382ba6 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -120,7 +120,7 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error { for tag, metrics := range data { acc.AddFields(measurement, map[string]interface{}(metrics), - map[string]string{"type": s.sockType, "id": s.sockId, "collection": tag}) + map[string]string{"type": s.sockType, "id": s.sockID, "collection": tag}) } } return nil @@ -226,13 +226,13 @@ var findSockets = func(c *Ceph) ([]*socket, error) { if sockType == typeOsd || sockType == typeMon || sockType == typeMds || sockType == typeRgw { path := filepath.Join(c.SocketDir, f) - sockets = append(sockets, &socket{parseSockId(f, sockPrefix, c.SocketSuffix), sockType, path}) + sockets = append(sockets, &socket{parseSockID(f, sockPrefix, c.SocketSuffix), sockType, path}) } } return sockets, nil } -func parseSockId(fname, prefix, suffix string) string { +func parseSockID(fname, prefix, suffix string) string { s := fname s = strings.TrimPrefix(s, prefix) s = strings.TrimSuffix(s, suffix) @@ -241,7 +241,7 @@ func parseSockId(fname, prefix, suffix string) string { } type socket struct { - sockId string + sockID string sockType string socket string } diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index f57cda4679ce4..f6cf8e8a946fc 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -24,7 +24,7 @@ type expectedResult struct { } func TestParseSockId(t *testing.T) { - s := parseSockId(sockFile(osdPrefix, 1), osdPrefix, sockSuffix) + s := parseSockID(sockFile(osdPrefix, 1), osdPrefix, sockSuffix) assert.Equal(t, s, "1") } @@ -170,7 +170,7 @@ func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*soc if s.socket == expected { found = true assert.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s) - assert.Equal(t, s.sockId, strconv.Itoa(i)) + assert.Equal(t, s.sockID, strconv.Itoa(i)) } } assert.True(t, found, "Did not find socket: %s", expected) diff --git a/plugins/inputs/cloud_pubsub/pubsub.go b/plugins/inputs/cloud_pubsub/pubsub.go index b418274f3b34a..0ac40a2cf551d 100644 --- a/plugins/inputs/cloud_pubsub/pubsub.go +++ b/plugins/inputs/cloud_pubsub/pubsub.go @@ -269,12 +269,12 @@ func (ps *PubSub) getPubSubClient() (*pubsub.Client, error) { return client, nil } -func (ps *PubSub) getGCPSubscription(subId string) (subscription, error) { +func (ps *PubSub) getGCPSubscription(subID string) (subscription, error) { client, err := ps.getPubSubClient() if err != nil { return nil, err } - s := client.Subscription(subId) + s := client.Subscription(subID) s.ReceiveSettings = pubsub.ReceiveSettings{ NumGoroutines: ps.MaxReceiverGoRoutines, MaxExtension: ps.MaxExtension.Duration, diff --git a/plugins/inputs/cloud_pubsub/pubsub_test.go b/plugins/inputs/cloud_pubsub/pubsub_test.go index 2045cf4ccbc89..0adc024872df7 100644 --- a/plugins/inputs/cloud_pubsub/pubsub_test.go +++ b/plugins/inputs/cloud_pubsub/pubsub_test.go @@ -16,12 +16,12 @@ const ( // Test ingesting InfluxDB-format PubSub message func TestRunParse(t *testing.T) { - subId := "sub-run-parse" + subID := "sub-run-parse" testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } sub.receiver = testMessagesReceive(sub) @@ -31,7 +31,7 @@ func TestRunParse(t *testing.T) { parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, } @@ -60,12 +60,12 @@ func TestRunParse(t *testing.T) { // Test ingesting InfluxDB-format PubSub message func TestRunBase64(t *testing.T) { - subId := "sub-run-base64" + subID := "sub-run-base64" testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } sub.receiver = testMessagesReceive(sub) @@ -75,7 +75,7 @@ func TestRunBase64(t *testing.T) { parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, Base64Data: true, } @@ -104,12 +104,12 @@ func TestRunBase64(t *testing.T) { } func TestRunInvalidMessages(t *testing.T) { - subId := "sub-invalid-messages" + subID := "sub-invalid-messages" testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } sub.receiver = testMessagesReceive(sub) @@ -119,7 +119,7 @@ func TestRunInvalidMessages(t *testing.T) { parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, } @@ -149,14 +149,14 @@ func TestRunInvalidMessages(t *testing.T) { } func TestRunOverlongMessages(t *testing.T) { - subId := "sub-message-too-long" + subID := "sub-message-too-long" acc := &testutil.Accumulator{} testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } sub.receiver = testMessagesReceive(sub) @@ -166,7 +166,7 @@ func TestRunOverlongMessages(t *testing.T) { parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, // Add MaxMessageLen Param MaxMessageLen: 1, @@ -196,14 +196,14 @@ func TestRunOverlongMessages(t *testing.T) { } func TestRunErrorInSubscriber(t *testing.T) { - subId := "sub-unexpected-error" + subID := "sub-unexpected-error" acc := &testutil.Accumulator{} testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } fakeErrStr := "a fake error" @@ -214,7 +214,7 @@ func TestRunErrorInSubscriber(t *testing.T) { parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, RetryReceiveDelaySeconds: 1, } diff --git a/plugins/inputs/cpu/cpu.go b/plugins/inputs/cpu/cpu.go index b1de9a5a06f85..3fcdb3db4136e 100644 --- a/plugins/inputs/cpu/cpu.go +++ b/plugins/inputs/cpu/cpu.go @@ -59,8 +59,8 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error { "cpu": cts.CPU, } - total := totalCpuTime(cts) - active := activeCpuTime(cts) + total := totalCPUTime(cts) + active := activeCPUTime(cts) if c.CollectCPUTime { // Add cpu time metrics @@ -77,7 +77,7 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error { "time_guest_nice": cts.GuestNice, } if c.ReportActive { - fieldsC["time_active"] = activeCpuTime(cts) + fieldsC["time_active"] = activeCPUTime(cts) } acc.AddCounter("cpu", fieldsC, tags, now) } @@ -92,8 +92,8 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error { if !ok { continue } - lastTotal := totalCpuTime(lastCts) - lastActive := activeCpuTime(lastCts) + lastTotal := totalCPUTime(lastCts) + lastActive := activeCPUTime(lastCts) totalDelta := total - lastTotal if totalDelta < 0 { @@ -131,14 +131,13 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error { return err } -func totalCpuTime(t cpu.TimesStat) float64 { - total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + - t.Idle +func totalCPUTime(t cpu.TimesStat) float64 { + total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + t.Idle return total } -func activeCpuTime(t cpu.TimesStat) float64 { - active := totalCpuTime(t) - t.Idle +func activeCPUTime(t cpu.TimesStat) float64 { + active := totalCPUTime(t) - t.Idle return active } diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index c5657277073c2..4c721a0964776 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -21,7 +21,7 @@ const ( Error = 2 ) -type DnsQuery struct { +type DNSQuery struct { // Domains or subdomains to query Domains []string @@ -62,14 +62,14 @@ var sampleConfig = ` # timeout = 2 ` -func (d *DnsQuery) SampleConfig() string { +func (d *DNSQuery) SampleConfig() string { return sampleConfig } -func (d *DnsQuery) Description() string { +func (d *DNSQuery) Description() string { return "Query given DNS server and gives statistics" } -func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { +func (d *DNSQuery) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup d.setDefaultValues() @@ -84,7 +84,7 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { "record_type": d.RecordType, } - dnsQueryTime, rcode, err := d.getDnsQueryTime(domain, server) + dnsQueryTime, rcode, err := d.getDNSQueryTime(domain, server) if rcode >= 0 { tags["rcode"] = dns.RcodeToString[rcode] fields["rcode_value"] = rcode @@ -110,7 +110,7 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { return nil } -func (d *DnsQuery) setDefaultValues() { +func (d *DNSQuery) setDefaultValues() { if d.Network == "" { d.Network = "udp" } @@ -133,7 +133,7 @@ func (d *DnsQuery) setDefaultValues() { } } -func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, int, error) { +func (d *DNSQuery) getDNSQueryTime(domain string, server string) (float64, int, error) { dnsQueryTime := float64(0) c := new(dns.Client) @@ -159,7 +159,7 @@ func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, int, return dnsQueryTime, r.Rcode, nil } -func (d *DnsQuery) parseRecordType() (uint16, error) { +func (d *DNSQuery) parseRecordType() (uint16, error) { var recordType uint16 var err error @@ -210,6 +210,6 @@ func setResult(result ResultType, fields map[string]interface{}, tags map[string func init() { inputs.Add("dns_query", func() telegraf.Input { - return &DnsQuery{} + return &DNSQuery{} }) } diff --git a/plugins/inputs/dns_query/dns_query_test.go b/plugins/inputs/dns_query/dns_query_test.go index 3fa2accbc9ec3..c1dd7abf06121 100644 --- a/plugins/inputs/dns_query/dns_query_test.go +++ b/plugins/inputs/dns_query/dns_query_test.go @@ -18,7 +18,7 @@ func TestGathering(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: domains, } @@ -37,7 +37,7 @@ func TestGatheringMxRecord(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: domains, } @@ -57,7 +57,7 @@ func TestGatheringRootDomain(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: []string{"."}, RecordType: "MX", @@ -89,7 +89,7 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: domains, } @@ -120,7 +120,7 @@ func TestGatheringTimeout(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: domains, } @@ -141,7 +141,7 @@ func TestGatheringTimeout(t *testing.T) { } func TestSettingDefaultValues(t *testing.T) { - dnsConfig := DnsQuery{} + dnsConfig := DNSQuery{} dnsConfig.setDefaultValues() @@ -150,7 +150,7 @@ func TestSettingDefaultValues(t *testing.T) { assert.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53") assert.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2") - dnsConfig = DnsQuery{Domains: []string{"."}} + dnsConfig = DNSQuery{Domains: []string{"."}} dnsConfig.setDefaultValues() @@ -158,7 +158,7 @@ func TestSettingDefaultValues(t *testing.T) { } func TestRecordTypeParser(t *testing.T) { - var dnsConfig = DnsQuery{} + var dnsConfig = DNSQuery{} var recordType uint16 dnsConfig.RecordType = "A" @@ -207,7 +207,7 @@ func TestRecordTypeParser(t *testing.T) { } func TestRecordTypeParserError(t *testing.T) { - var dnsConfig = DnsQuery{} + var dnsConfig = DNSQuery{} var err error dnsConfig.RecordType = "nil" diff --git a/plugins/inputs/dovecot/dovecot_test.go b/plugins/inputs/dovecot/dovecot_test.go index a9c799a274ecb..86efdbb4f8e1d 100644 --- a/plugins/inputs/dovecot/dovecot_test.go +++ b/plugins/inputs/dovecot/dovecot_test.go @@ -63,7 +63,7 @@ func TestDovecotIntegration(t *testing.T) { // Test type=ip tags = map[string]string{"server": "dovecot.test", "type": "ip", "ip": "192.168.0.100"} - buf = bytes.NewBufferString(sampleIp) + buf = bytes.NewBufferString(sampleIP) err = gatherStats(buf, &acc, "dovecot.test", "ip") require.NoError(t, err) @@ -112,7 +112,7 @@ const sampleGlobal = `reset_timestamp last_update num_logins num_cmds num_connec const sampleDomain = `domain reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits domain.test 1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080` -const sampleIp = `ip reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits +const sampleIP = `ip reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits 192.168.0.100 1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080` const sampleUser = `user reset_timestamp last_update num_logins num_cmds user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index 38503a7c069d8..5e614f61dd9ec 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -21,7 +21,7 @@ import ( // Midnight 9/22/2015 const baseTimeSeconds = 1442905200 -const validJson = ` +const validJSON = ` { "status": "green", "num_processes": 82, @@ -35,7 +35,7 @@ const validJson = ` "users": [0, 1, 2, 3] }` -const malformedJson = ` +const malformedJSON = ` { "status": "green", ` @@ -102,7 +102,7 @@ func TestExec(t *testing.T) { }) e := &Exec{ Log: testutil.Logger{}, - runner: newRunnerMock([]byte(validJson), nil, nil), + runner: newRunnerMock([]byte(validJSON), nil, nil), Commands: []string{"testcommand arg1"}, parser: parser, } @@ -132,7 +132,7 @@ func TestExecMalformed(t *testing.T) { }) e := &Exec{ Log: testutil.Logger{}, - runner: newRunnerMock([]byte(malformedJson), nil, nil), + runner: newRunnerMock([]byte(malformedJSON), nil, nil), Commands: []string{"badcommand arg1"}, parser: parser, } diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index be5f8fc60aaa4..bc286631fcd7f 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -178,16 +178,16 @@ func (h *GrayLog) gatherServer( if err := json.Unmarshal([]byte(resp), &dat); err != nil { return err } - for _, m_item := range dat.Metrics { + for _, mItem := range dat.Metrics { fields := make(map[string]interface{}) tags := map[string]string{ "server": host, "port": port, - "name": m_item.Name, - "type": m_item.Type, + "name": mItem.Name, + "type": mItem.Type, } - h.flatten(m_item.Fields, fields, "") - acc.AddFields(m_item.FullName, fields, tags) + h.flatten(mItem.Fields, fields, "") + acc.AddFields(mItem.FullName, fields, tags) } return nil } @@ -241,12 +241,12 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { if strings.Contains(requestURL.String(), "multiple") { m := &Messagebody{Metrics: h.Metrics} - http_body, err := json.Marshal(m) + httpBody, err := json.Marshal(m) if err != nil { return "", -1, fmt.Errorf("Invalid list of Metrics %s", h.Metrics) } method = "POST" - content = bytes.NewBuffer(http_body) + content = bytes.NewBuffer(httpBody) } req, err := http.NewRequest(method, requestURL.String(), content) if err != nil { diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 01ce81401a745..3be16a9d40dd1 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -146,11 +146,11 @@ func (h *HTTPResponse) SampleConfig() string { var ErrRedirectAttempted = errors.New("redirect") // Set the proxy. A configured proxy overwrites the system wide proxy. -func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) { - if http_proxy == "" { +func getProxyFunc(httpProxy string) func(*http.Request) (*url.URL, error) { + if httpProxy == "" { return http.ProxyFromEnvironment } - proxyURL, err := url.Parse(http_proxy) + proxyURL, err := url.Parse(httpProxy) if err != nil { return func(_ *http.Request) (*url.URL, error) { return nil, errors.New("bad proxy: " + err.Error()) @@ -161,9 +161,9 @@ func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) { } } -// createHttpClient creates an http client which will timeout at the specified +// createHTTPClient creates an http client which will timeout at the specified // timeout period and can follow redirects if specified -func (h *HTTPResponse) createHttpClient() (*http.Client, error) { +func (h *HTTPResponse) createHTTPClient() (*http.Client, error) { tlsCfg, err := h.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -217,8 +217,8 @@ func localAddress(interfaceName string) (net.Addr, error) { return nil, fmt.Errorf("cannot create local address for interface %q", interfaceName) } -func setResult(result_string string, fields map[string]interface{}, tags map[string]string) { - result_codes := map[string]int{ +func setResult(resultString string, fields map[string]interface{}, tags map[string]string) { + resultCodes := map[string]int{ "success": 0, "response_string_mismatch": 1, "body_read_error": 2, @@ -228,9 +228,9 @@ func setResult(result_string string, fields map[string]interface{}, tags map[str "response_status_code_mismatch": 6, } - tags["result"] = result_string - fields["result_type"] = result_string - fields["result_code"] = result_codes[result_string] + tags["result"] = resultString + fields["result_type"] = resultString + fields["result_code"] = resultCodes[resultString] } func setError(err error, fields map[string]interface{}, tags map[string]string) error { @@ -239,8 +239,8 @@ func setError(err error, fields map[string]interface{}, tags map[string]string) return timeoutError } - urlErr, isUrlErr := err.(*url.Error) - if !isUrlErr { + urlErr, isURLErr := err.(*url.Error) + if !isURLErr { return nil } @@ -299,7 +299,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] // Start Timer start := time.Now() resp, err := h.client.Do(request) - response_time := time.Since(start).Seconds() + responseTime := time.Since(start).Seconds() // If an error in returned, it means we are dealing with a network error, as // HTTP error codes do not generate errors in the net/http library @@ -321,7 +321,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] } if _, ok := fields["response_time"]; !ok { - fields["response_time"] = response_time + fields["response_time"] = responseTime } // This function closes the response body, as @@ -396,8 +396,8 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] } // Set result in case of a body read error -func (h *HTTPResponse) setBodyReadError(error_msg string, bodyBytes []byte, fields map[string]interface{}, tags map[string]string) { - h.Log.Debugf(error_msg) +func (h *HTTPResponse) setBodyReadError(errorMsg string, bodyBytes []byte, fields map[string]interface{}, tags map[string]string) { + h.Log.Debugf(errorMsg) setResult("body_read_error", fields, tags) fields["content_length"] = len(bodyBytes) if h.ResponseStringMatch != "" { @@ -435,7 +435,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { } if h.client == nil { - client, err := h.createHttpClient() + client, err := h.createHTTPClient() if err != nil { return err } diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index a5f5e47aad68e..502a1473b9231 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -21,8 +21,8 @@ var ( utf8BOM = []byte("\xef\xbb\xbf") ) -// HttpJson struct -type HttpJson struct { +// HTTPJSON struct +type HTTPJSON struct { Name string Servers []string Method string @@ -113,16 +113,16 @@ var sampleConfig = ` # apiVersion = "v1" ` -func (h *HttpJson) SampleConfig() string { +func (h *HTTPJSON) SampleConfig() string { return sampleConfig } -func (h *HttpJson) Description() string { +func (h *HTTPJSON) Description() string { return "Read flattened metrics from one or more JSON HTTP endpoints" } // Gathers data for all servers. -func (h *HttpJson) Gather(acc telegraf.Accumulator) error { +func (h *HTTPJSON) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup if h.client.HTTPClient() == nil { @@ -162,7 +162,7 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error { // // Returns: // error: Any error that may have occurred -func (h *HttpJson) gatherServer( +func (h *HTTPJSON) gatherServer( acc telegraf.Accumulator, serverURL string, ) error { @@ -171,11 +171,11 @@ func (h *HttpJson) gatherServer( return err } - var msrmnt_name string + var msrmntName string if h.Name == "" { - msrmnt_name = "httpjson" + msrmntName = "httpjson" } else { - msrmnt_name = "httpjson_" + h.Name + msrmntName = "httpjson_" + h.Name } tags := map[string]string{ "server": serverURL, @@ -183,7 +183,7 @@ func (h *HttpJson) gatherServer( parser, err := parsers.NewParser(&parsers.Config{ DataFormat: "json", - MetricName: msrmnt_name, + MetricName: msrmntName, TagKeys: h.TagKeys, DefaultTags: tags, }) @@ -207,7 +207,7 @@ func (h *HttpJson) gatherServer( return nil } -// Sends an HTTP request to the server using the HttpJson object's HTTPClient. +// Sends an HTTP request to the server using the HTTPJSON object's HTTPClient. // This request can be either a GET or a POST. // Parameters: // serverURL: endpoint to send request to @@ -215,7 +215,7 @@ func (h *HttpJson) gatherServer( // Returns: // string: body of the response // error : Any error that may have occurred -func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { +func (h *HTTPJSON) sendRequest(serverURL string) (string, float64, error) { // Prepare URL requestURL, err := url.Parse(serverURL) if err != nil { @@ -285,7 +285,7 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { func init() { inputs.Add("httpjson", func() telegraf.Input { - return &HttpJson{ + return &HTTPJSON{ client: &RealHTTPClient{}, ResponseTimeout: internal.Duration{ Duration: 5 * time.Second, diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 90975919959e8..9e3e95aeaa71d 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -154,15 +154,15 @@ func (c *mockHTTPClient) HTTPClient() *http.Client { return nil } -// Generates a pointer to an HttpJson object that uses a mock HTTP client. +// Generates a pointer to an HTTPJSON object that uses a mock HTTP client. // Parameters: // response : Body of the response that the mock HTTP client should return // statusCode: HTTP status code the mock HTTP client should return // // Returns: -// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client -func genMockHttpJson(response string, statusCode int) []*HttpJson { - return []*HttpJson{ +// *HTTPJSON: Pointer to an HTTPJSON object that uses the generated mock HTTP client +func genMockHTTPJSON(response string, statusCode int) []*HTTPJSON { + return []*HTTPJSON{ { client: &mockHTTPClient{responseBody: response, statusCode: statusCode}, Servers: []string{ @@ -206,7 +206,7 @@ func genMockHttpJson(response string, statusCode int) []*HttpJson { // Test that the proper values are ignored or collected func TestHttpJson200(t *testing.T) { - httpjson := genMockHttpJson(validJSON, 200) + httpjson := genMockHTTPJSON(validJSON, 200) for _, service := range httpjson { var acc testutil.Accumulator @@ -237,7 +237,7 @@ func TestHttpJsonGET_URL(t *testing.T) { })) defer ts.Close() - a := HttpJson{ + a := HTTPJSON{ Servers: []string{ts.URL + "?api_key=mykey"}, Name: "", Method: "GET", @@ -309,7 +309,7 @@ func TestHttpJsonGET(t *testing.T) { })) defer ts.Close() - a := HttpJson{ + a := HTTPJSON{ Servers: []string{ts.URL}, Name: "", Method: "GET", @@ -383,7 +383,7 @@ func TestHttpJsonPOST(t *testing.T) { })) defer ts.Close() - a := HttpJson{ + a := HTTPJSON{ Servers: []string{ts.URL}, Name: "", Method: "POST", @@ -445,7 +445,7 @@ func TestHttpJsonPOST(t *testing.T) { // Test response to HTTP 500 func TestHttpJson500(t *testing.T) { - httpjson := genMockHttpJson(validJSON, 500) + httpjson := genMockHTTPJSON(validJSON, 500) var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) @@ -456,7 +456,7 @@ func TestHttpJson500(t *testing.T) { // Test response to HTTP 405 func TestHttpJsonBadMethod(t *testing.T) { - httpjson := genMockHttpJson(validJSON, 200) + httpjson := genMockHTTPJSON(validJSON, 200) httpjson[0].Method = "NOT_A_REAL_METHOD" var acc testutil.Accumulator @@ -468,7 +468,7 @@ func TestHttpJsonBadMethod(t *testing.T) { // Test response to malformed JSON func TestHttpJsonBadJson(t *testing.T) { - httpjson := genMockHttpJson(invalidJSON, 200) + httpjson := genMockHTTPJSON(invalidJSON, 200) var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) @@ -479,7 +479,7 @@ func TestHttpJsonBadJson(t *testing.T) { // Test response to empty string as response object func TestHttpJsonEmptyResponse(t *testing.T) { - httpjson := genMockHttpJson(empty, 200) + httpjson := genMockHTTPJSON(empty, 200) var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) @@ -488,7 +488,7 @@ func TestHttpJsonEmptyResponse(t *testing.T) { // Test that the proper values are ignored or collected func TestHttpJson200Tags(t *testing.T) { - httpjson := genMockHttpJson(validJSONTags, 200) + httpjson := genMockHTTPJSON(validJSONTags, 200) for _, service := range httpjson { if service.Name == "other_webapp" { @@ -526,7 +526,7 @@ const validJSONArrayTags = ` // Test that array data is collected correctly func TestHttpJsonArray200Tags(t *testing.T) { - httpjson := genMockHttpJson(validJSONArrayTags, 200) + httpjson := genMockHTTPJSON(validJSONArrayTags, 200) for _, service := range httpjson { if service.Name == "other_webapp" { @@ -563,7 +563,7 @@ var jsonBOM = []byte("\xef\xbb\xbf[{\"value\":17}]") // TestHttpJsonBOM tests that UTF-8 JSON with a BOM can be parsed func TestHttpJsonBOM(t *testing.T) { - httpjson := genMockHttpJson(string(jsonBOM), 200) + httpjson := genMockHTTPJSON(string(jsonBOM), 200) for _, service := range httpjson { if service.Name == "other_webapp" { diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go index 5ec0bb43db319..3392300f9a44a 100644 --- a/plugins/inputs/icinga2/icinga2.go +++ b/plugins/inputs/icinga2/icinga2.go @@ -115,7 +115,7 @@ func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) { } } -func (i *Icinga2) createHttpClient() (*http.Client, error) { +func (i *Icinga2) createHTTPClient() (*http.Client, error) { tlsCfg, err := i.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -137,22 +137,22 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error { } if i.client == nil { - client, err := i.createHttpClient() + client, err := i.createHTTPClient() if err != nil { return err } i.client = client } - requestUrl := "%s/v1/objects/%s?attrs=name&attrs=display_name&attrs=state&attrs=check_command" + requestURL := "%s/v1/objects/%s?attrs=name&attrs=display_name&attrs=state&attrs=check_command" // Note: attrs=host_name is only valid for 'services' requests, using check.Attrs.HostName for the host // 'hosts' requests will need to use attrs=name only, using check.Attrs.Name for the host if i.ObjectType == "services" { - requestUrl += "&attrs=host_name" + requestURL += "&attrs=host_name" } - url := fmt.Sprintf(requestUrl, i.Server, i.ObjectType) + url := fmt.Sprintf(requestURL, i.Server, i.ObjectType) req, err := http.NewRequest("GET", url, nil) if err != nil { diff --git a/plugins/inputs/infiniband/infiniband_test.go b/plugins/inputs/infiniband/infiniband_test.go index 6c4bb24587f4a..57e8ad4da85c9 100644 --- a/plugins/inputs/infiniband/infiniband_test.go +++ b/plugins/inputs/infiniband/infiniband_test.go @@ -38,7 +38,7 @@ func TestInfiniband(t *testing.T) { "port": "1", } - sample_rdmastats_entries := []rdmamap.RdmaStatEntry{ + sampleRdmastatsEntries := []rdmamap.RdmaStatEntry{ { Name: "excessive_buffer_overrun_errors", Value: uint64(0), @@ -127,7 +127,7 @@ func TestInfiniband(t *testing.T) { var acc testutil.Accumulator - addStats("m1x5_0", "1", sample_rdmastats_entries, &acc) + addStats("m1x5_0", "1", sampleRdmastatsEntries, &acc) acc.AssertContainsTaggedFields(t, "infiniband", fields, tags) diff --git a/plugins/inputs/intel_powerstat/unit_converter.go b/plugins/inputs/intel_powerstat/unit_converter.go index 4c3cba6b1b83a..43dc79e6efc4a 100644 --- a/plugins/inputs/intel_powerstat/unit_converter.go +++ b/plugins/inputs/intel_powerstat/unit_converter.go @@ -23,8 +23,8 @@ func convertMicroWattToWatt(mW float64) float64 { return mW * microWattToWatt } -func convertKiloHertzToMegaHertz(kHz float64) float64 { - return kHz * kiloHertzToMegaHertz +func convertKiloHertzToMegaHertz(kiloHertz float64) float64 { + return kiloHertz * kiloHertzToMegaHertz } func convertNanoSecondsToSeconds(ns int64) float64 { diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go index 39b3020ddbd39..6d68818a8f055 100644 --- a/plugins/inputs/interrupts/interrupts.go +++ b/plugins/inputs/interrupts/interrupts.go @@ -13,7 +13,7 @@ import ( ) type Interrupts struct { - CpuAsTag bool `toml:"cpu_as_tag"` + CPUAsTag bool `toml:"cpu_as_tag"` } type IRQ struct { @@ -121,7 +121,7 @@ func (s *Interrupts) Gather(acc telegraf.Accumulator) error { acc.AddError(fmt.Errorf("Parsing %s: %s", file, err)) continue } - reportMetrics(measurement, irqs, acc, s.CpuAsTag) + reportMetrics(measurement, irqs, acc, s.CPUAsTag) } return nil } diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go index 63ff765b678dd..3ed0cd394cfdc 100644 --- a/plugins/inputs/interrupts/interrupts_test.go +++ b/plugins/inputs/interrupts/interrupts_test.go @@ -13,13 +13,13 @@ import ( // Setup and helper functions // ===================================================================================== -func expectCpuAsTags(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { +func expectCPUAsTags(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { for idx, value := range irq.Cpus { m.AssertContainsTaggedFields(t, measurement, map[string]interface{}{"count": value}, map[string]string{"irq": irq.ID, "type": irq.Type, "device": irq.Device, "cpu": fmt.Sprintf("cpu%d", idx)}) } } -func expectCpuAsFields(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { +func expectCPUAsFields(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { fields := map[string]interface{}{} total := int64(0) for idx, count := range irq.Cpus { @@ -70,7 +70,7 @@ func TestCpuAsTagsSoftIrqs(t *testing.T) { reportMetrics("soft_interrupts", irqs, acc, true) for _, irq := range softIrqsExpectedArgs { - expectCpuAsTags(acc, t, "soft_interrupts", irq) + expectCPUAsTags(acc, t, "soft_interrupts", irq) } } @@ -79,7 +79,7 @@ func TestCpuAsFieldsSoftIrqs(t *testing.T) { reportMetrics("soft_interrupts", irqs, acc, false) for _, irq := range softIrqsExpectedArgs { - expectCpuAsFields(acc, t, "soft_interrupts", irq) + expectCPUAsFields(acc, t, "soft_interrupts", irq) } } @@ -142,7 +142,7 @@ func TestCpuAsTagsHwIrqs(t *testing.T) { reportMetrics("interrupts", irqs, acc, true) for _, irq := range hwIrqsExpectedArgs { - expectCpuAsTags(acc, t, "interrupts", irq) + expectCPUAsTags(acc, t, "interrupts", irq) } } @@ -151,6 +151,6 @@ func TestCpuAsFieldsHwIrqs(t *testing.T) { reportMetrics("interrupts", irqs, acc, false) for _, irq := range hwIrqsExpectedArgs { - expectCpuAsFields(acc, t, "interrupts", irq) + expectCPUAsFields(acc, t, "interrupts", irq) } } diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index eb344f539e695..89ade652fbf55 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -20,11 +20,11 @@ import ( ) var ( - execCommand = exec.Command // execCommand is used to mock commands in tests. - re_v1_parse_line = regexp.MustCompile(`^(?P[^|]*)\|(?P[^|]*)\|(?P.*)`) - re_v2_parse_line = regexp.MustCompile(`^(?P[^|]*)\|[^|]+\|(?P[^|]*)\|(?P[^|]*)\|(?:(?P[^|]+))?`) - re_v2_parse_description = regexp.MustCompile(`^(?P-?[0-9.]+)\s(?P.*)|(?P.+)|^$`) - re_v2_parse_unit = regexp.MustCompile(`^(?P[^,]+)(?:,\s*(?P.*))?`) + execCommand = exec.Command // execCommand is used to mock commands in tests. + reV1ParseLine = regexp.MustCompile(`^(?P[^|]*)\|(?P[^|]*)\|(?P.*)`) + reV2ParseLine = regexp.MustCompile(`^(?P[^|]*)\|[^|]+\|(?P[^|]*)\|(?P[^|]*)\|(?:(?P[^|]+))?`) + reV2ParseDescription = regexp.MustCompile(`^(?P-?[0-9.]+)\s(?P.*)|(?P.+)|^$`) + reV2ParseUnit = regexp.MustCompile(`^(?P[^,]+)(?:,\s*(?P.*))?`) ) // Ipmi stores the configuration values for the ipmi_sensor input plugin @@ -176,12 +176,12 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { return parseV1(acc, hostname, out, timestamp) } -func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error { +func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { // each line will look something like // Planar VBAT | 3.05 Volts | ok scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) for scanner.Scan() { - ipmiFields := extractFieldsFromRegex(re_v1_parse_line, scanner.Text()) + ipmiFields := extractFieldsFromRegex(reV1ParseLine, scanner.Text()) if len(ipmiFields) != 3 { continue } @@ -227,20 +227,20 @@ func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ fields["value"] = 0.0 } - acc.AddFields("ipmi_sensor", fields, tags, measured_at) + acc.AddFields("ipmi_sensor", fields, tags, measuredAt) } return scanner.Err() } -func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error { +func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { // each line will look something like // CMOS Battery | 65h | ok | 7.1 | // Temp | 0Eh | ok | 3.1 | 55 degrees C // Drive 0 | A0h | ok | 7.1 | Drive Present scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) for scanner.Scan() { - ipmiFields := extractFieldsFromRegex(re_v2_parse_line, scanner.Text()) + ipmiFields := extractFieldsFromRegex(reV2ParseLine, scanner.Text()) if len(ipmiFields) < 3 || len(ipmiFields) > 4 { continue } @@ -256,7 +256,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ tags["entity_id"] = transform(ipmiFields["entity_id"]) tags["status_code"] = trim(ipmiFields["status_code"]) fields := make(map[string]interface{}) - descriptionResults := extractFieldsFromRegex(re_v2_parse_description, trim(ipmiFields["description"])) + descriptionResults := extractFieldsFromRegex(reV2ParseDescription, trim(ipmiFields["description"])) // This is an analog value with a unit if descriptionResults["analogValue"] != "" && len(descriptionResults["analogUnit"]) >= 1 { var err error @@ -265,7 +265,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ continue } // Some implementations add an extra status to their analog units - unitResults := extractFieldsFromRegex(re_v2_parse_unit, descriptionResults["analogUnit"]) + unitResults := extractFieldsFromRegex(reV2ParseUnit, descriptionResults["analogUnit"]) tags["unit"] = transform(unitResults["realAnalogUnit"]) if unitResults["statusDesc"] != "" { tags["status_desc"] = transform(unitResults["statusDesc"]) @@ -281,7 +281,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ } } - acc.AddFields("ipmi_sensor", fields, tags, measured_at) + acc.AddFields("ipmi_sensor", fields, tags, measuredAt) } return scanner.Err() diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index 81139ef40ee94..55a1da6b124e2 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -611,8 +611,8 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected for i := range tests { t.Logf("Checking v%d data...", i+1) - extractFieldsFromRegex(re_v1_parse_line, tests[i]) - extractFieldsFromRegex(re_v2_parse_line, tests[i]) + extractFieldsFromRegex(reV1ParseLine, tests[i]) + extractFieldsFromRegex(reV2ParseLine, tests[i]) } } diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index db2440f4ffa16..5c72ba7133153 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -168,7 +168,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) } func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request, error) { - var jolokiaUrl *url.URL + var jolokiaURL *url.URL context := j.Context // Usually "/jolokia/" var bulkBodyContent []map[string]interface{} @@ -188,11 +188,11 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request // Add target, only in proxy mode if j.Mode == "proxy" { - serviceUrl := fmt.Sprintf("service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi", + serviceURL := fmt.Sprintf("service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi", server.Host, server.Port) target := map[string]string{ - "url": serviceUrl, + "url": serviceURL, } if server.Username != "" { @@ -208,26 +208,26 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request proxy := j.Proxy // Prepare ProxyURL - proxyUrl, err := url.Parse("http://" + proxy.Host + ":" + proxy.Port + context) + proxyURL, err := url.Parse("http://" + proxy.Host + ":" + proxy.Port + context) if err != nil { return nil, err } if proxy.Username != "" || proxy.Password != "" { - proxyUrl.User = url.UserPassword(proxy.Username, proxy.Password) + proxyURL.User = url.UserPassword(proxy.Username, proxy.Password) } - jolokiaUrl = proxyUrl + jolokiaURL = proxyURL } else { - serverUrl, err := url.Parse("http://" + server.Host + ":" + server.Port + context) + serverURL, err := url.Parse("http://" + server.Host + ":" + server.Port + context) if err != nil { return nil, err } if server.Username != "" || server.Password != "" { - serverUrl.User = url.UserPassword(server.Username, server.Password) + serverURL.User = url.UserPassword(server.Username, server.Password) } - jolokiaUrl = serverUrl + jolokiaURL = serverURL } bulkBodyContent = append(bulkBodyContent, bodyContent) @@ -238,7 +238,7 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request return nil, err } - req, err := http.NewRequest("POST", jolokiaUrl.String(), bytes.NewBuffer(requestBody)) + req, err := http.NewRequest("POST", jolokiaURL.String(), bytes.NewBuffer(requestBody)) if err != nil { return nil, err } diff --git a/plugins/inputs/jolokia2/client.go b/plugins/inputs/jolokia2/client.go index 90aa9c0db7fce..1cde65bcbe513 100644 --- a/plugins/inputs/jolokia2/client.go +++ b/plugins/inputs/jolokia2/client.go @@ -125,14 +125,14 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { return nil, err } - requestUrl, err := formatReadUrl(c.URL, c.config.Username, c.config.Password) + requestURL, err := formatReadURL(c.URL, c.config.Username, c.config.Password) if err != nil { return nil, err } - req, err := http.NewRequest("POST", requestUrl, bytes.NewBuffer(requestBody)) + req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(requestBody)) if err != nil { - return nil, fmt.Errorf("unable to create new request '%s': %s", requestUrl, err) + return nil, fmt.Errorf("unable to create new request '%s': %s", requestURL, err) } req.Header.Add("Content-type", "application/json") @@ -249,22 +249,22 @@ func makeReadResponses(jresponses []jolokiaResponse) []ReadResponse { return rresponses } -func formatReadUrl(configUrl, username, password string) (string, error) { - parsedUrl, err := url.Parse(configUrl) +func formatReadURL(configURL, username, password string) (string, error) { + parsedURL, err := url.Parse(configURL) if err != nil { return "", err } - readUrl := url.URL{ - Host: parsedUrl.Host, - Scheme: parsedUrl.Scheme, + readURL := url.URL{ + Host: parsedURL.Host, + Scheme: parsedURL.Scheme, } if username != "" || password != "" { - readUrl.User = url.UserPassword(username, password) + readURL.User = url.UserPassword(username, password) } - readUrl.Path = path.Join(parsedUrl.Path, "read") - readUrl.Query().Add("ignoreErrors", "true") - return readUrl.String(), nil + readURL.Path = path.Join(parsedURL.Path, "read") + readURL.Query().Add("ignoreErrors", "true") + return readURL.String(), nil } diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go index a3df62e1bb0c0..784b6a8c12526 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go @@ -27,20 +27,20 @@ var data = &telemetry.OpenConfigData{ Kv: []*telemetry.KeyValue{{Key: "/sensor[tag='tagValue']/intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}}, } -var data_with_prefix = &telemetry.OpenConfigData{ +var dataWithPrefix = &telemetry.OpenConfigData{ Path: "/sensor_with_prefix", Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}}, {Key: "intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}}, } -var data_with_multiple_tags = &telemetry.OpenConfigData{ +var dataWithMultipleTags = &telemetry.OpenConfigData{ Path: "/sensor_with_multiple_tags", Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}}, {Key: "tagKey[tag='tagValue']/boolKey", Value: &telemetry.KeyValue_BoolValue{BoolValue: false}}, {Key: "intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}}, } -var data_with_string_values = &telemetry.OpenConfigData{ +var dataWithStringValues = &telemetry.OpenConfigData{ Path: "/sensor_with_string_values", Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}}, {Key: "strKey[tag='tagValue']/strValue", Value: &telemetry.KeyValue_StrValue{StrValue: "10"}}}, @@ -54,11 +54,11 @@ func (s *openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.Subscripti if path == "/sensor" { stream.Send(data) } else if path == "/sensor_with_prefix" { - stream.Send(data_with_prefix) + stream.Send(dataWithPrefix) } else if path == "/sensor_with_multiple_tags" { - stream.Send(data_with_multiple_tags) + stream.Send(dataWithMultipleTags) } else if path == "/sensor_with_string_values" { - stream.Send(data_with_string_values) + stream.Send(dataWithStringValues) } return nil } diff --git a/plugins/inputs/kapacitor/kapacitor.go b/plugins/inputs/kapacitor/kapacitor.go index dd3303a7419d3..ecb99877d2846 100644 --- a/plugins/inputs/kapacitor/kapacitor.go +++ b/plugins/inputs/kapacitor/kapacitor.go @@ -51,7 +51,7 @@ func (*Kapacitor) SampleConfig() string { func (k *Kapacitor) Gather(acc telegraf.Accumulator) error { if k.client == nil { - client, err := k.createHttpClient() + client, err := k.createHTTPClient() if err != nil { return err } @@ -73,7 +73,7 @@ func (k *Kapacitor) Gather(acc telegraf.Accumulator) error { return nil } -func (k *Kapacitor) createHttpClient() (*http.Client, error) { +func (k *Kapacitor) createHTTPClient() (*http.Client, error) { tlsCfg, err := k.ClientConfig.TLSConfig() if err != nil { return nil, err diff --git a/plugins/inputs/kernel/kernel.go b/plugins/inputs/kernel/kernel.go index ea55803d6b354..cd682c779e7ed 100644 --- a/plugins/inputs/kernel/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -16,11 +16,11 @@ import ( // /proc/stat file line prefixes to gather stats on: var ( - interrupts = []byte("intr") - context_switches = []byte("ctxt") - processes_forked = []byte("processes") - disk_pages = []byte("page") - boot_time = []byte("btime") + interrupts = []byte("intr") + contextSwitches = []byte("ctxt") + processesForked = []byte("processes") + diskPages = []byte("page") + bootTime = []byte("btime") ) type Kernel struct { @@ -65,25 +65,25 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { return err } fields["interrupts"] = int64(m) - case bytes.Equal(field, context_switches): + case bytes.Equal(field, contextSwitches): m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err } fields["context_switches"] = int64(m) - case bytes.Equal(field, processes_forked): + case bytes.Equal(field, processesForked): m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err } fields["processes_forked"] = int64(m) - case bytes.Equal(field, boot_time): + case bytes.Equal(field, bootTime): m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err } fields["boot_time"] = int64(m) - case bytes.Equal(field, disk_pages): + case bytes.Equal(field, diskPages): in, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err diff --git a/plugins/inputs/kernel/kernel_test.go b/plugins/inputs/kernel/kernel_test.go index d356f43802798..e844d24322490 100644 --- a/plugins/inputs/kernel/kernel_test.go +++ b/plugins/inputs/kernel/kernel_test.go @@ -13,8 +13,8 @@ import ( ) func TestFullProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Full)) - tmpfile2 := makeFakeStatFile([]byte(entropyStatFile_Full)) + tmpfile := makeFakeStatFile([]byte(statFileFull)) + tmpfile2 := makeFakeStatFile([]byte(entropyStatFileFull)) defer os.Remove(tmpfile) defer os.Remove(tmpfile2) @@ -40,8 +40,8 @@ func TestFullProcFile(t *testing.T) { } func TestPartialProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Partial)) - tmpfile2 := makeFakeStatFile([]byte(entropyStatFile_Partial)) + tmpfile := makeFakeStatFile([]byte(statFilePartial)) + tmpfile2 := makeFakeStatFile([]byte(entropyStatFilePartial)) defer os.Remove(tmpfile) defer os.Remove(tmpfile2) @@ -66,8 +66,8 @@ func TestPartialProcFile(t *testing.T) { } func TestInvalidProcFile1(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Invalid)) - tmpfile2 := makeFakeStatFile([]byte(entropyStatFile_Invalid)) + tmpfile := makeFakeStatFile([]byte(statFileInvalid)) + tmpfile2 := makeFakeStatFile([]byte(entropyStatFileInvalid)) defer os.Remove(tmpfile) defer os.Remove(tmpfile2) @@ -82,7 +82,7 @@ func TestInvalidProcFile1(t *testing.T) { } func TestInvalidProcFile2(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Invalid2)) + tmpfile := makeFakeStatFile([]byte(statFileInvalid2)) defer os.Remove(tmpfile) k := Kernel{ @@ -95,7 +95,7 @@ func TestInvalidProcFile2(t *testing.T) { } func TestNoProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Invalid2)) + tmpfile := makeFakeStatFile([]byte(statFileInvalid2)) os.Remove(tmpfile) k := Kernel{ @@ -108,7 +108,7 @@ func TestNoProcFile(t *testing.T) { assert.Contains(t, err.Error(), "does not exist") } -const statFile_Full = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +const statFileFull = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 cpu0 6796 252 5655 10444977 175 0 101 0 0 0 intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 2626618 @@ -122,7 +122,7 @@ swap 1 0 entropy_avail 1024 ` -const statFile_Partial = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +const statFilePartial = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 cpu0 6796 252 5655 10444977 175 0 101 0 0 0 intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 2626618 @@ -134,7 +134,7 @@ page 5741 1808 ` // missing btime measurement -const statFile_Invalid = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +const statFileInvalid = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 cpu0 6796 252 5655 10444977 175 0 101 0 0 0 intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 2626618 @@ -149,7 +149,7 @@ entropy_avail 1024 ` // missing second page measurement -const statFile_Invalid2 = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +const statFileInvalid2 = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 cpu0 6796 252 5655 10444977 175 0 101 0 0 0 intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 2626618 @@ -161,11 +161,11 @@ softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545 entropy_avail 1024 2048 ` -const entropyStatFile_Full = `1024` +const entropyStatFileFull = `1024` -const entropyStatFile_Partial = `1024` +const entropyStatFilePartial = `1024` -const entropyStatFile_Invalid = `` +const entropyStatFileInvalid = `` func makeFakeStatFile(content []byte) string { tmpfile, err := ioutil.TempFile("", "kernel_test") diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go index bba615a743e54..cb571e8a320c6 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go @@ -13,7 +13,7 @@ import ( ) func TestFullVmStatProcFile(t *testing.T) { - tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Full)) + tmpfile := makeFakeVMStatFile([]byte(vmStatFileFull)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -121,7 +121,7 @@ func TestFullVmStatProcFile(t *testing.T) { } func TestPartialVmStatProcFile(t *testing.T) { - tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Partial)) + tmpfile := makeFakeVMStatFile([]byte(vmStatFilePartial)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -151,7 +151,7 @@ func TestPartialVmStatProcFile(t *testing.T) { } func TestInvalidVmStatProcFile1(t *testing.T) { - tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Invalid)) + tmpfile := makeFakeVMStatFile([]byte(vmStatFileInvalid)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -164,7 +164,7 @@ func TestInvalidVmStatProcFile1(t *testing.T) { } func TestNoVmStatProcFile(t *testing.T) { - tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Invalid)) + tmpfile := makeFakeVMStatFile([]byte(vmStatFileInvalid)) os.Remove(tmpfile) k := KernelVmstat{ @@ -177,7 +177,7 @@ func TestNoVmStatProcFile(t *testing.T) { assert.Contains(t, err.Error(), "does not exist") } -const vmStatFile_Full = `nr_free_pages 78730 +const vmStatFileFull = `nr_free_pages 78730 nr_inactive_anon 426259 nr_active_anon 2515657 nr_inactive_file 2366791 @@ -269,7 +269,7 @@ thp_collapse_alloc 24857 thp_collapse_alloc_failed 102214 thp_split 9817` -const vmStatFile_Partial = `unevictable_pgs_culled 1531 +const vmStatFilePartial = `unevictable_pgs_culled 1531 unevictable_pgs_scanned 0 unevictable_pgs_rescued 5426 unevictable_pgs_mlocked 6988 @@ -284,7 +284,7 @@ thp_collapse_alloc_failed 102214 thp_split 9817` // invalid thp_split measurement -const vmStatFile_Invalid = `unevictable_pgs_culled 1531 +const vmStatFileInvalid = `unevictable_pgs_culled 1531 unevictable_pgs_scanned 0 unevictable_pgs_rescued 5426 unevictable_pgs_mlocked 6988 @@ -298,7 +298,7 @@ thp_collapse_alloc 24857 thp_collapse_alloc_failed 102214 thp_split abcd` -func makeFakeVmStatFile(content []byte) string { +func makeFakeVMStatFile(content []byte) string { tmpfile, err := ioutil.TempFile("", "kernel_vmstat_test") if err != nil { panic(err) diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index 98b81a91f52b9..83523eb37bf27 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -141,7 +141,7 @@ func (k *Kibana) Description() string { func (k *Kibana) Gather(acc telegraf.Accumulator) error { if k.client == nil { - client, err := k.createHttpClient() + client, err := k.createHTTPClient() if err != nil { return err @@ -166,7 +166,7 @@ func (k *Kibana) Gather(acc telegraf.Accumulator) error { return nil } -func (k *Kibana) createHttpClient() (*http.Client, error) { +func (k *Kibana) createHTTPClient() (*http.Client, error) { tlsCfg, err := k.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -182,12 +182,12 @@ func (k *Kibana) createHttpClient() (*http.Client, error) { return client, nil } -func (k *Kibana) gatherKibanaStatus(baseUrl string, acc telegraf.Accumulator) error { +func (k *Kibana) gatherKibanaStatus(baseURL string, acc telegraf.Accumulator) error { kibanaStatus := &kibanaStatus{} - url := baseUrl + statusPath + url := baseURL + statusPath - host, err := k.gatherJsonData(url, kibanaStatus) + host, err := k.gatherJSONData(url, kibanaStatus) if err != nil { return err } @@ -237,7 +237,7 @@ func (k *Kibana) gatherKibanaStatus(baseUrl string, acc telegraf.Accumulator) er return nil } -func (k *Kibana) gatherJsonData(url string, v interface{}) (host string, err error) { +func (k *Kibana) gatherJSONData(url string, v interface{}) (host string, err error) { request, err := http.NewRequest("GET", url, nil) if err != nil { return "", fmt.Errorf("unable to create new request '%s': %v", url, err) diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index a9bb6ef4850d8..7845417e03173 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -129,7 +129,7 @@ func buildURL(endpoint string, base string) (*url.URL, error) { func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error { summaryMetrics := &SummaryMetrics{} - err := k.LoadJson(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics) + err := k.LoadJSON(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics) if err != nil { return err } @@ -193,19 +193,19 @@ func buildNodeMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) } func (k *Kubernetes) gatherPodInfo(baseURL string) ([]Metadata, error) { - var podApi Pods - err := k.LoadJson(fmt.Sprintf("%s/pods", baseURL), &podApi) + var podAPI Pods + err := k.LoadJSON(fmt.Sprintf("%s/pods", baseURL), &podAPI) if err != nil { return nil, err } var podInfos []Metadata - for _, podMetadata := range podApi.Items { + for _, podMetadata := range podAPI.Items { podInfos = append(podInfos, podMetadata.Metadata) } return podInfos, nil } -func (k *Kubernetes) LoadJson(url string, v interface{}) error { +func (k *Kubernetes) LoadJSON(url string, v interface{}) error { var req, err = http.NewRequest("GET", url, nil) if err != nil { return err diff --git a/plugins/inputs/kubernetes/kubernetes_pods.go b/plugins/inputs/kubernetes/kubernetes_pods.go index 672608e54fe25..29d5e77895266 100644 --- a/plugins/inputs/kubernetes/kubernetes_pods.go +++ b/plugins/inputs/kubernetes/kubernetes_pods.go @@ -2,7 +2,7 @@ package kubernetes type Pods struct { Kind string `json:"kind"` - ApiVersion string `json:"apiVersion"` + APIVersion string `json:"apiVersion"` Items []Item `json:"items"` } diff --git a/plugins/inputs/lanz/lanz.go b/plugins/inputs/lanz/lanz.go index 7553c33c777b2..b1adcd6e77e7c 100644 --- a/plugins/inputs/lanz/lanz.go +++ b/plugins/inputs/lanz/lanz.go @@ -54,12 +54,12 @@ func (l *Lanz) Start(acc telegraf.Accumulator) error { } for _, server := range l.Servers { - deviceUrl, err := url.Parse(server) + deviceURL, err := url.Parse(server) if err != nil { return err } client := lanz.New( - lanz.WithAddr(deviceUrl.Host), + lanz.WithAddr(deviceURL.Host), lanz.WithBackoff(1*time.Second), lanz.WithTimeout(10*time.Second), ) @@ -72,7 +72,7 @@ func (l *Lanz) Start(acc telegraf.Accumulator) error { l.wg.Add(1) go func() { l.wg.Done() - receive(acc, in, deviceUrl) + receive(acc, in, deviceURL) }() } return nil @@ -85,19 +85,19 @@ func (l *Lanz) Stop() { l.wg.Wait() } -func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceUrl *url.URL) { +func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceURL *url.URL) { for { select { case msg, ok := <-in: if !ok { return } - msgToAccumulator(acc, msg, deviceUrl) + msgToAccumulator(acc, msg, deviceURL) } } } -func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *url.URL) { +func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceURL *url.URL) { cr := msg.GetCongestionRecord() if cr != nil { vals := map[string]interface{}{ @@ -114,8 +114,8 @@ func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *u "entry_type": strconv.FormatInt(int64(cr.GetEntryType()), 10), "traffic_class": strconv.FormatInt(int64(cr.GetTrafficClass()), 10), "fabric_peer_intf_name": cr.GetFabricPeerIntfName(), - "source": deviceUrl.Hostname(), - "port": deviceUrl.Port(), + "source": deviceURL.Hostname(), + "port": deviceURL.Port(), } acc.AddFields("lanz_congestion_record", vals, tags) } @@ -129,8 +129,8 @@ func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *u } tags := map[string]string{ "entry_type": strconv.FormatInt(int64(gbur.GetEntryType()), 10), - "source": deviceUrl.Hostname(), - "port": deviceUrl.Port(), + "source": deviceURL.Hostname(), + "port": deviceURL.Port(), } acc.AddFields("lanz_global_buffer_usage_record", vals, tags) } diff --git a/plugins/inputs/lanz/lanz_test.go b/plugins/inputs/lanz/lanz_test.go index 5f9c7ab24cb40..26e1f52920398 100644 --- a/plugins/inputs/lanz/lanz_test.go +++ b/plugins/inputs/lanz/lanz_test.go @@ -58,16 +58,16 @@ func TestLanzGeneratesMetrics(t *testing.T) { l.Servers = append(l.Servers, "tcp://switch01.int.example.com:50001") l.Servers = append(l.Servers, "tcp://switch02.int.example.com:50001") - deviceUrl1, err := url.Parse(l.Servers[0]) + deviceURL1, err := url.Parse(l.Servers[0]) if err != nil { t.Fail() } - deviceUrl2, err := url.Parse(l.Servers[1]) + deviceURL2, err := url.Parse(l.Servers[1]) if err != nil { t.Fail() } - msgToAccumulator(&acc, testProtoBufCongestionRecord1, deviceUrl1) + msgToAccumulator(&acc, testProtoBufCongestionRecord1, deviceURL1) acc.Wait(1) vals1 := map[string]interface{}{ @@ -92,7 +92,7 @@ func TestLanzGeneratesMetrics(t *testing.T) { acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals1, tags1) acc.ClearMetrics() - msgToAccumulator(&acc, testProtoBufCongestionRecord2, deviceUrl2) + msgToAccumulator(&acc, testProtoBufCongestionRecord2, deviceURL2) acc.Wait(1) vals2 := map[string]interface{}{ @@ -117,7 +117,7 @@ func TestLanzGeneratesMetrics(t *testing.T) { acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals2, tags2) acc.ClearMetrics() - msgToAccumulator(&acc, testProtoBufGlobalBufferUsageRecord, deviceUrl1) + msgToAccumulator(&acc, testProtoBufGlobalBufferUsageRecord, deviceURL1) acc.Wait(1) gburVals1 := map[string]interface{}{ diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index 65f326b76968c..e9218278f77b8 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -157,8 +157,8 @@ func (logstash *Logstash) Init() error { return nil } -// createHttpClient create a clients to access API -func (logstash *Logstash) createHttpClient() (*http.Client, error) { +// createHTTPClient create a clients to access API +func (logstash *Logstash) createHTTPClient() (*http.Client, error) { tlsConfig, err := logstash.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -174,8 +174,8 @@ func (logstash *Logstash) createHttpClient() (*http.Client, error) { return client, nil } -// gatherJsonData query the data source and parse the response JSON -func (logstash *Logstash) gatherJsonData(url string, value interface{}) error { +// gatherJSONData query the data source and parse the response JSON +func (logstash *Logstash) gatherJSONData(url string, value interface{}) error { request, err := http.NewRequest("GET", url, nil) if err != nil { return err @@ -217,7 +217,7 @@ func (logstash *Logstash) gatherJsonData(url string, value interface{}) error { func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumulator) error { jvmStats := &JVMStats{} - err := logstash.gatherJsonData(url, jvmStats) + err := logstash.gatherJSONData(url, jvmStats) if err != nil { return err } @@ -243,7 +243,7 @@ func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumu func (logstash *Logstash) gatherProcessStats(url string, accumulator telegraf.Accumulator) error { processStats := &ProcessStats{} - err := logstash.gatherJsonData(url, processStats) + err := logstash.gatherJSONData(url, processStats) if err != nil { return err } @@ -333,7 +333,7 @@ func (logstash *Logstash) gatherQueueStats( func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.Accumulator) error { pipelineStats := &PipelineStats{} - err := logstash.gatherJsonData(url, pipelineStats) + err := logstash.gatherJSONData(url, pipelineStats) if err != nil { return err } @@ -377,7 +377,7 @@ func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.A func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf.Accumulator) error { pipelinesStats := &PipelinesStats{} - err := logstash.gatherJsonData(url, pipelinesStats) + err := logstash.gatherJSONData(url, pipelinesStats) if err != nil { return err } @@ -423,7 +423,7 @@ func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf. // Gather ask this plugin to start gathering metrics func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { if logstash.client == nil { - client, err := logstash.createHttpClient() + client, err := logstash.createHTTPClient() if err != nil { return err @@ -432,40 +432,40 @@ func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { } if choice.Contains("jvm", logstash.Collect) { - jvmUrl, err := url.Parse(logstash.URL + jvmStats) + jvmURL, err := url.Parse(logstash.URL + jvmStats) if err != nil { return err } - if err := logstash.gatherJVMStats(jvmUrl.String(), accumulator); err != nil { + if err := logstash.gatherJVMStats(jvmURL.String(), accumulator); err != nil { return err } } if choice.Contains("process", logstash.Collect) { - processUrl, err := url.Parse(logstash.URL + processStats) + processURL, err := url.Parse(logstash.URL + processStats) if err != nil { return err } - if err := logstash.gatherProcessStats(processUrl.String(), accumulator); err != nil { + if err := logstash.gatherProcessStats(processURL.String(), accumulator); err != nil { return err } } if choice.Contains("pipelines", logstash.Collect) { if logstash.SinglePipeline { - pipelineUrl, err := url.Parse(logstash.URL + pipelineStats) + pipelineURL, err := url.Parse(logstash.URL + pipelineStats) if err != nil { return err } - if err := logstash.gatherPipelineStats(pipelineUrl.String(), accumulator); err != nil { + if err := logstash.gatherPipelineStats(pipelineURL.String(), accumulator); err != nil { return err } } else { - pipelinesUrl, err := url.Parse(logstash.URL + pipelinesStats) + pipelinesURL, err := url.Parse(logstash.URL + pipelinesStats) if err != nil { return err } - if err := logstash.gatherPipelinesStats(pipelinesUrl.String(), accumulator); err != nil { + if err := logstash.gatherPipelinesStats(pipelinesURL.String(), accumulator); err != nil { return err } } diff --git a/plugins/inputs/logstash/logstash_test.go b/plugins/inputs/logstash/logstash_test.go index aeb4e46f8dbb6..e849fa57db100 100644 --- a/plugins/inputs/logstash/logstash_test.go +++ b/plugins/inputs/logstash/logstash_test.go @@ -36,10 +36,10 @@ func Test_Logstash5GatherProcessStats(test *testing.T) { defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() + client, err := logstashTest.createHTTPClient() if err != nil { - test.Logf("Can't createHttpClient") + test.Logf("Can't createHTTPClient") } logstashTest.client = client } @@ -85,10 +85,10 @@ func Test_Logstash6GatherProcessStats(test *testing.T) { defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() + client, err := logstashTest.createHTTPClient() if err != nil { - test.Logf("Can't createHttpClient") + test.Logf("Can't createHTTPClient") } logstashTest.client = client } @@ -135,10 +135,10 @@ func Test_Logstash5GatherPipelineStats(test *testing.T) { defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() + client, err := logstashTest.createHTTPClient() if err != nil { - test.Logf("Can't createHttpClient") + test.Logf("Can't createHTTPClient") } logstashTest.client = client } @@ -237,10 +237,10 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() + client, err := logstashTest.createHTTPClient() if err != nil { - test.Logf("Can't createHttpClient") + test.Logf("Can't createHTTPClient") } logstashTest.client = client } @@ -566,10 +566,10 @@ func Test_Logstash5GatherJVMStats(test *testing.T) { defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() + client, err := logstashTest.createHTTPClient() if err != nil { - test.Logf("Can't createHttpClient") + test.Logf("Can't createHTTPClient") } logstashTest.client = client } @@ -635,10 +635,10 @@ func Test_Logstash6GatherJVMStats(test *testing.T) { defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() + client, err := logstashTest.createHTTPClient() if err != nil { - test.Logf("Can't createHttpClient") + test.Logf("Can't createHTTPClient") } logstashTest.client = client } diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index a40614b1d0f7e..b36bbf322cdf7 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -15,11 +15,11 @@ import ( ) const ( - reports_endpoint string = "/3.0/reports" - reports_endpoint_campaign string = "/3.0/reports/%s" + reportsEndpoint string = "/3.0/reports" + reportsEndpointCampaign string = "/3.0/reports/%s" ) -var mailchimp_datacenter = regexp.MustCompile("[a-z]+[0-9]+$") +var mailchimpDatacenter = regexp.MustCompile("[a-z]+[0-9]+$") type ChimpAPI struct { Transport http.RoundTripper @@ -57,7 +57,7 @@ func (p *ReportsParams) String() string { func NewChimpAPI(apiKey string) *ChimpAPI { u := &url.URL{} u.Scheme = "https" - u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimp_datacenter.FindString(apiKey)) + u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimpDatacenter.FindString(apiKey)) u.User = url.UserPassword("", apiKey) return &ChimpAPI{url: u} } @@ -86,7 +86,7 @@ func chimpErrorCheck(body []byte) error { func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) { a.Lock() defer a.Unlock() - a.url.Path = reports_endpoint + a.url.Path = reportsEndpoint var response ReportsResponse rawjson, err := runChimp(a, params) @@ -105,7 +105,7 @@ func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) { func (a *ChimpAPI) GetReport(campaignID string) (Report, error) { a.Lock() defer a.Unlock() - a.url.Path = fmt.Sprintf(reports_endpoint_campaign, campaignID) + a.url.Path = fmt.Sprintf(reportsEndpointCampaign, campaignID) var response Report rawjson, err := runChimp(a, ReportsParams{}) diff --git a/plugins/inputs/mailchimp/mailchimp.go b/plugins/inputs/mailchimp/mailchimp.go index d7255191ab724..fe6892bf48743 100644 --- a/plugins/inputs/mailchimp/mailchimp.go +++ b/plugins/inputs/mailchimp/mailchimp.go @@ -11,9 +11,9 @@ import ( type MailChimp struct { api *ChimpAPI - ApiKey string - DaysOld int - CampaignId string + APIKey string `toml:"api_key"` + DaysOld int `toml:"days_old"` + CampaignID string `toml:"campaign_id"` } var sampleConfig = ` @@ -37,11 +37,11 @@ func (m *MailChimp) Description() string { func (m *MailChimp) Gather(acc telegraf.Accumulator) error { if m.api == nil { - m.api = NewChimpAPI(m.ApiKey) + m.api = NewChimpAPI(m.APIKey) } m.api.Debug = false - if m.CampaignId == "" { + if m.CampaignID == "" { since := "" if m.DaysOld > 0 { now := time.Now() @@ -61,7 +61,7 @@ func (m *MailChimp) Gather(acc telegraf.Accumulator) error { gatherReport(acc, report, now) } } else { - report, err := m.api.GetReport(m.CampaignId) + report, err := m.api.GetReport(m.CampaignID) if err != nil { return err } diff --git a/plugins/inputs/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go index 0c4dab56d5d12..ba711bf745080 100644 --- a/plugins/inputs/mailchimp/mailchimp_test.go +++ b/plugins/inputs/mailchimp/mailchimp_test.go @@ -94,7 +94,7 @@ func TestMailChimpGatherReport(t *testing.T) { } m := MailChimp{ api: api, - CampaignId: "test", + CampaignID: "test", } var acc testutil.Accumulator @@ -159,7 +159,7 @@ func TestMailChimpGatherError(t *testing.T) { } m := MailChimp{ api: api, - CampaignId: "test", + CampaignID: "test", } var acc testutil.Accumulator diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index acc836cba34bb..0b3c7d26fa5e3 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -159,7 +159,7 @@ func (m *Mesos) initialize() error { m.slaveURLs = append(m.slaveURLs, u) } - client, err := m.createHttpClient() + client, err := m.createHTTPClient() if err != nil { return err } @@ -203,7 +203,7 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { return nil } -func (m *Mesos) createHttpClient() (*http.Client, error) { +func (m *Mesos) createHTTPClient() (*http.Client, error) { tlsCfg, err := m.ClientConfig.TLSConfig() if err != nil { return nil, err diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 4ba54137383dd..77ea2744b300f 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -121,7 +121,7 @@ func (m *MongoDB) getMongoServer(url *url.URL) *Server { if _, ok := m.mongos[url.Host]; !ok { m.mongos[url.Host] = &Server{ Log: m.Log, - Url: url, + URL: url, } } return m.mongos[url.Host] @@ -130,10 +130,10 @@ func (m *MongoDB) getMongoServer(url *url.URL) *Server { func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { if server.Session == nil { var dialAddrs []string - if server.Url.User != nil { - dialAddrs = []string{server.Url.String()} + if server.URL.User != nil { + dialAddrs = []string{server.URL.String()} } else { - dialAddrs = []string{server.Url.Host} + dialAddrs = []string{server.URL.Host} } dialInfo, err := mgo.ParseURL(dialAddrs[0]) if err != nil { diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 5af48c10a6f9b..9553a578c04da 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -12,7 +12,7 @@ import ( ) type Server struct { - Url *url.URL + URL *url.URL Session *mgo.Session lastResult *MongoStatus @@ -21,7 +21,7 @@ type Server struct { func (s *Server) getDefaultTags() map[string]string { tags := make(map[string]string) - tags["hostname"] = s.Url.Host + tags["hostname"] = s.URL.Host return tags } @@ -275,7 +275,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, durationInSeconds = 1 } data := NewMongodbData( - NewStatLine(*s.lastResult, *result, s.Url.Host, true, durationInSeconds), + NewStatLine(*s.lastResult, *result, s.URL.Host, true, durationInSeconds), s.getDefaultTags(), ) data.AddDefaultStats() diff --git a/plugins/inputs/mongodb/mongodb_test.go b/plugins/inputs/mongodb/mongodb_test.go index 73e68ed376784..cd3b741e250d8 100644 --- a/plugins/inputs/mongodb/mongodb_test.go +++ b/plugins/inputs/mongodb/mongodb_test.go @@ -20,23 +20,23 @@ func init() { connect_url = os.Getenv("MONGODB_URL") if connect_url == "" { connect_url = "127.0.0.1:27017" - server = &Server{Url: &url.URL{Host: connect_url}} + server = &Server{URL: &url.URL{Host: connect_url}} } else { full_url, err := url.Parse(connect_url) if err != nil { log.Fatalf("Unable to parse URL (%s), %s\n", full_url, err.Error()) } - server = &Server{Url: full_url} + server = &Server{URL: full_url} } } func testSetup(m *testing.M) { var err error var dialAddrs []string - if server.Url.User != nil { - dialAddrs = []string{server.Url.String()} + if server.URL.User != nil { + dialAddrs = []string{server.URL.String()} } else { - dialAddrs = []string{server.Url.Host} + dialAddrs = []string{server.URL.Host} } dialInfo, err := mgo.ParseURL(dialAddrs[0]) if err != nil { @@ -49,7 +49,7 @@ func testSetup(m *testing.M) { log.Fatalf("Unable to connect to MongoDB, %s\n", err.Error()) } server.Session = sess - server.Session, _ = mgo.Dial(server.Url.Host) + server.Session, _ = mgo.Dial(server.URL.Host) if err != nil { log.Fatalln(err.Error()) } diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go index 4834137542039..a3aa3b7e5356a 100644 --- a/plugins/inputs/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -55,7 +55,7 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error { // Create an HTTP client that is re-used for each // collection interval if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -72,7 +72,7 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(n.gatherUrl(addr, acc)) + acc.AddError(n.gatherURL(addr, acc)) }(addr) } @@ -80,7 +80,7 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error { return nil } -func (n *Nginx) createHttpClient() (*http.Client, error) { +func (n *Nginx) createHTTPClient() (*http.Client, error) { tlsCfg, err := n.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -100,7 +100,7 @@ func (n *Nginx) createHttpClient() (*http.Client, error) { return client, nil } -func (n *Nginx) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { +func (n *Nginx) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { resp, err := n.client.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) diff --git a/plugins/inputs/nginx/nginx_test.go b/plugins/inputs/nginx/nginx_test.go index 7eb9e90b653ef..8d9f047f50c8c 100644 --- a/plugins/inputs/nginx/nginx_test.go +++ b/plugins/inputs/nginx/nginx_test.go @@ -61,16 +61,16 @@ func TestNginxGeneratesMetrics(t *testing.T) { Urls: []string{fmt.Sprintf("%s/tengine_status", ts.URL)}, } - var acc_nginx testutil.Accumulator - var acc_tengine testutil.Accumulator + var accNginx testutil.Accumulator + var accTengine testutil.Accumulator - err_nginx := acc_nginx.GatherError(n.Gather) - err_tengine := acc_tengine.GatherError(nt.Gather) + errNginx := accNginx.GatherError(n.Gather) + errTengine := accTengine.GatherError(nt.Gather) - require.NoError(t, err_nginx) - require.NoError(t, err_tengine) + require.NoError(t, errNginx) + require.NoError(t, errTengine) - fields_nginx := map[string]interface{}{ + fieldsNginx := map[string]interface{}{ "active": uint64(585), "accepts": uint64(85340), "handled": uint64(85340), @@ -80,7 +80,7 @@ func TestNginxGeneratesMetrics(t *testing.T) { "waiting": uint64(446), } - fields_tengine := map[string]interface{}{ + fieldsTengine := map[string]interface{}{ "active": uint64(403), "accepts": uint64(853), "handled": uint64(8533), @@ -108,6 +108,6 @@ func TestNginxGeneratesMetrics(t *testing.T) { } tags := map[string]string{"server": host, "port": port} - acc_nginx.AssertContainsTaggedFields(t, "nginx", fields_nginx, tags) - acc_tengine.AssertContainsTaggedFields(t, "nginx", fields_tengine, tags) + accNginx.AssertContainsTaggedFields(t, "nginx", fieldsNginx, tags) + accTengine.AssertContainsTaggedFields(t, "nginx", fieldsTengine, tags) } diff --git a/plugins/inputs/nginx_plus/nginx_plus.go b/plugins/inputs/nginx_plus/nginx_plus.go index 5b0fb2596ebf8..80811cc8d4d07 100644 --- a/plugins/inputs/nginx_plus/nginx_plus.go +++ b/plugins/inputs/nginx_plus/nginx_plus.go @@ -56,7 +56,7 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { // collection interval if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -73,7 +73,7 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(n.gatherUrl(addr, acc)) + acc.AddError(n.gatherURL(addr, acc)) }(addr) } @@ -81,7 +81,7 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { return nil } -func (n *NginxPlus) createHttpClient() (*http.Client, error) { +func (n *NginxPlus) createHTTPClient() (*http.Client, error) { if n.ResponseTimeout.Duration < time.Second { n.ResponseTimeout.Duration = time.Second * 5 } @@ -101,7 +101,7 @@ func (n *NginxPlus) createHttpClient() (*http.Client, error) { return client, nil } -func (n *NginxPlus) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { +func (n *NginxPlus) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { resp, err := n.client.Get(addr.String()) if err != nil { @@ -114,7 +114,7 @@ func (n *NginxPlus) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] switch contentType { case "application/json": - return gatherStatusUrl(bufio.NewReader(resp.Body), getTags(addr), acc) + return gatherStatusURL(bufio.NewReader(resp.Body), getTags(addr), acc) default: return fmt.Errorf("%s returned unexpected content type %s", addr.String(), contentType) } @@ -283,7 +283,7 @@ type Status struct { } `json:"stream"` } -func gatherStatusUrl(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error { +func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error { dec := json.NewDecoder(r) status := &Status{} if err := dec.Decode(status); err != nil { diff --git a/plugins/inputs/nginx_plus/nginx_plus_test.go b/plugins/inputs/nginx_plus/nginx_plus_test.go index 6e9a8c4d97c3e..d7531de975393 100644 --- a/plugins/inputs/nginx_plus/nginx_plus_test.go +++ b/plugins/inputs/nginx_plus/nginx_plus_test.go @@ -270,9 +270,9 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator - err_nginx := n.Gather(&acc) + errNginx := n.Gather(&acc) - require.NoError(t, err_nginx) + require.NoError(t, errNginx) addr, err := url.Parse(ts.URL) if err != nil { diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api.go b/plugins/inputs/nginx_plus_api/nginx_plus_api.go index 8ec1ea0f7725f..b2ab91762ae58 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api.go @@ -13,9 +13,9 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -type NginxPlusApi struct { +type NginxPlusAPI struct { Urls []string `toml:"urls"` - ApiVersion int64 `toml:"api_version"` + APIVersion int64 `toml:"api_version"` ResponseTimeout internal.Duration `toml:"response_timeout"` tls.ClientConfig @@ -24,7 +24,7 @@ type NginxPlusApi struct { const ( // Default settings - defaultApiVersion = 3 + defaultAPIVersion = 3 // Paths processesPath = "processes" @@ -61,26 +61,26 @@ var sampleConfig = ` # insecure_skip_verify = false ` -func (n *NginxPlusApi) SampleConfig() string { +func (n *NginxPlusAPI) SampleConfig() string { return sampleConfig } -func (n *NginxPlusApi) Description() string { +func (n *NginxPlusAPI) Description() string { return "Read Nginx Plus Api documentation" } -func (n *NginxPlusApi) Gather(acc telegraf.Accumulator) error { +func (n *NginxPlusAPI) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup // Create an HTTP client that is re-used for each // collection interval - if n.ApiVersion == 0 { - n.ApiVersion = defaultApiVersion + if n.APIVersion == 0 { + n.APIVersion = defaultAPIVersion } if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -105,7 +105,7 @@ func (n *NginxPlusApi) Gather(acc telegraf.Accumulator) error { return nil } -func (n *NginxPlusApi) createHttpClient() (*http.Client, error) { +func (n *NginxPlusAPI) createHTTPClient() (*http.Client, error) { if n.ResponseTimeout.Duration < time.Second { n.ResponseTimeout.Duration = time.Second * 5 } @@ -127,6 +127,6 @@ func (n *NginxPlusApi) createHttpClient() (*http.Client, error) { func init() { inputs.Add("nginx_plus_api", func() telegraf.Input { - return &NginxPlusApi{} + return &NginxPlusAPI{} }) } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go index 6aaaff2d344c7..7e1e753c5ff76 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -19,19 +19,19 @@ var ( errNotFound = errors.New("not found") ) -func (n *NginxPlusApi) gatherMetrics(addr *url.URL, acc telegraf.Accumulator) { +func (n *NginxPlusAPI) gatherMetrics(addr *url.URL, acc telegraf.Accumulator) { addError(acc, n.gatherProcessesMetrics(addr, acc)) addError(acc, n.gatherConnectionsMetrics(addr, acc)) addError(acc, n.gatherSslMetrics(addr, acc)) - addError(acc, n.gatherHttpRequestsMetrics(addr, acc)) - addError(acc, n.gatherHttpServerZonesMetrics(addr, acc)) - addError(acc, n.gatherHttpUpstreamsMetrics(addr, acc)) - addError(acc, n.gatherHttpCachesMetrics(addr, acc)) + addError(acc, n.gatherHTTPRequestsMetrics(addr, acc)) + addError(acc, n.gatherHTTPServerZonesMetrics(addr, acc)) + addError(acc, n.gatherHTTPUpstreamsMetrics(addr, acc)) + addError(acc, n.gatherHTTPCachesMetrics(addr, acc)) addError(acc, n.gatherStreamServerZonesMetrics(addr, acc)) addError(acc, n.gatherStreamUpstreamsMetrics(addr, acc)) - if n.ApiVersion >= 5 { - addError(acc, n.gatherHttpLocationZonesMetrics(addr, acc)) + if n.APIVersion >= 5 { + addError(acc, n.gatherHTTPLocationZonesMetrics(addr, acc)) addError(acc, n.gatherResolverZonesMetrics(addr, acc)) } } @@ -48,8 +48,8 @@ func addError(acc telegraf.Accumulator, err error) { } } -func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) { - url := fmt.Sprintf("%s/%d/%s", addr.String(), n.ApiVersion, path) +func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { + url := fmt.Sprintf("%s/%d/%s", addr.String(), n.APIVersion, path) resp, err := n.client.Get(url) if err != nil { @@ -81,8 +81,8 @@ func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) { } } -func (n *NginxPlusApi) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, processesPath) +func (n *NginxPlusAPI) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, processesPath) if err != nil { return err } @@ -104,8 +104,8 @@ func (n *NginxPlusApi) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumu return nil } -func (n *NginxPlusApi) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, connectionsPath) +func (n *NginxPlusAPI) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, connectionsPath) if err != nil { return err } @@ -130,8 +130,8 @@ func (n *NginxPlusApi) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accu return nil } -func (n *NginxPlusApi) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, sslPath) +func (n *NginxPlusAPI) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, sslPath) if err != nil { return err } @@ -155,13 +155,13 @@ func (n *NginxPlusApi) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator) return nil } -func (n *NginxPlusApi) gatherHttpRequestsMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpRequestsPath) +func (n *NginxPlusAPI) gatherHTTPRequestsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpRequestsPath) if err != nil { return err } - var httpRequests = &HttpRequests{} + var httpRequests = &HTTPRequests{} if err := json.Unmarshal(body, httpRequests); err != nil { return err @@ -179,13 +179,13 @@ func (n *NginxPlusApi) gatherHttpRequestsMetrics(addr *url.URL, acc telegraf.Acc return nil } -func (n *NginxPlusApi) gatherHttpServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpServerZonesPath) +func (n *NginxPlusAPI) gatherHTTPServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpServerZonesPath) if err != nil { return err } - var httpServerZones HttpServerZones + var httpServerZones HTTPServerZones if err := json.Unmarshal(body, &httpServerZones); err != nil { return err @@ -227,13 +227,13 @@ func (n *NginxPlusApi) gatherHttpServerZonesMetrics(addr *url.URL, acc telegraf. } // Added in 5 API version -func (n *NginxPlusApi) gatherHttpLocationZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpLocationZonesPath) +func (n *NginxPlusAPI) gatherHTTPLocationZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpLocationZonesPath) if err != nil { return err } - var httpLocationZones HttpLocationZones + var httpLocationZones HTTPLocationZones if err := json.Unmarshal(body, &httpLocationZones); err != nil { return err @@ -273,13 +273,13 @@ func (n *NginxPlusApi) gatherHttpLocationZonesMetrics(addr *url.URL, acc telegra return nil } -func (n *NginxPlusApi) gatherHttpUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpUpstreamsPath) +func (n *NginxPlusAPI) gatherHTTPUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpUpstreamsPath) if err != nil { return err } - var httpUpstreams HttpUpstreams + var httpUpstreams HTTPUpstreams if err := json.Unmarshal(body, &httpUpstreams); err != nil { return err @@ -357,13 +357,13 @@ func (n *NginxPlusApi) gatherHttpUpstreamsMetrics(addr *url.URL, acc telegraf.Ac return nil } -func (n *NginxPlusApi) gatherHttpCachesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpCachesPath) +func (n *NginxPlusAPI) gatherHTTPCachesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpCachesPath) if err != nil { return err } - var httpCaches HttpCaches + var httpCaches HTTPCaches if err := json.Unmarshal(body, &httpCaches); err != nil { return err @@ -411,8 +411,8 @@ func (n *NginxPlusApi) gatherHttpCachesMetrics(addr *url.URL, acc telegraf.Accum return nil } -func (n *NginxPlusApi) gatherStreamServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, streamServerZonesPath) +func (n *NginxPlusAPI) gatherStreamServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, streamServerZonesPath) if err != nil { return err } @@ -447,8 +447,8 @@ func (n *NginxPlusApi) gatherStreamServerZonesMetrics(addr *url.URL, acc telegra } // Added in 5 API version -func (n *NginxPlusApi) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, resolverZonesPath) +func (n *NginxPlusAPI) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, resolverZonesPath) if err != nil { return err } @@ -490,8 +490,8 @@ func (n *NginxPlusApi) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Ac return nil } -func (n *NginxPlusApi) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, streamUpstreamsPath) +func (n *NginxPlusAPI) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, streamUpstreamsPath) if err != nil { return err } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go index f309886cff58e..be155f073400f 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -520,7 +520,7 @@ const streamServerZonesPayload = ` ` func TestGatherProcessesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, processesPath, defaultApiVersion, processesPayload) + ts, n := prepareEndpoint(t, processesPath, defaultAPIVersion, processesPayload) defer ts.Close() var acc testutil.Accumulator @@ -541,7 +541,7 @@ func TestGatherProcessesMetrics(t *testing.T) { } func TestGatherConnectionsMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, connectionsPath, defaultApiVersion, connectionsPayload) + ts, n := prepareEndpoint(t, connectionsPath, defaultAPIVersion, connectionsPayload) defer ts.Close() var acc testutil.Accumulator @@ -565,7 +565,7 @@ func TestGatherConnectionsMetrics(t *testing.T) { } func TestGatherSslMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, sslPath, defaultApiVersion, sslPayload) + ts, n := prepareEndpoint(t, sslPath, defaultAPIVersion, sslPayload) defer ts.Close() var acc testutil.Accumulator @@ -588,13 +588,13 @@ func TestGatherSslMetrics(t *testing.T) { } func TestGatherHttpRequestsMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpRequestsPath, defaultApiVersion, httpRequestsPayload) + ts, n := prepareEndpoint(t, httpRequestsPath, defaultAPIVersion, httpRequestsPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpRequestsMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPRequestsMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -610,13 +610,13 @@ func TestGatherHttpRequestsMetrics(t *testing.T) { } func TestGatherHttpServerZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpServerZonesPath, defaultApiVersion, httpServerZonesPayload) + ts, n := prepareEndpoint(t, httpServerZonesPath, defaultAPIVersion, httpServerZonesPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpServerZonesMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPServerZonesMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -664,13 +664,13 @@ func TestGatherHttpServerZonesMetrics(t *testing.T) { } func TestGatherHttpLocationZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpLocationZonesPath, defaultApiVersion, httpLocationZonesPayload) + ts, n := prepareEndpoint(t, httpLocationZonesPath, defaultAPIVersion, httpLocationZonesPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpLocationZonesMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPLocationZonesMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -716,13 +716,13 @@ func TestGatherHttpLocationZonesMetrics(t *testing.T) { } func TestGatherHttpUpstreamsMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload) + ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultAPIVersion, httpUpstreamsPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpUpstreamsMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPUpstreamsMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -888,13 +888,13 @@ func TestGatherHttpUpstreamsMetrics(t *testing.T) { } func TestGatherHttpCachesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpCachesPath, defaultApiVersion, httpCachesPayload) + ts, n := prepareEndpoint(t, httpCachesPath, defaultAPIVersion, httpCachesPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpCachesMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPCachesMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -966,7 +966,7 @@ func TestGatherHttpCachesMetrics(t *testing.T) { } func TestGatherResolverZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, resolverZonesPath, defaultApiVersion, resolverZonesPayload) + ts, n := prepareEndpoint(t, resolverZonesPath, defaultAPIVersion, resolverZonesPayload) defer ts.Close() var acc testutil.Accumulator @@ -1020,7 +1020,7 @@ func TestGatherResolverZonesMetrics(t *testing.T) { } func TestGatherStreamUpstreams(t *testing.T) { - ts, n := prepareEndpoint(t, streamUpstreamsPath, defaultApiVersion, streamUpstreamsPayload) + ts, n := prepareEndpoint(t, streamUpstreamsPath, defaultAPIVersion, streamUpstreamsPayload) defer ts.Close() var acc testutil.Accumulator @@ -1163,7 +1163,7 @@ func TestGatherStreamUpstreams(t *testing.T) { } func TestGatherStreamServerZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, streamServerZonesPath, defaultApiVersion, streamServerZonesPayload) + ts, n := prepareEndpoint(t, streamServerZonesPath, defaultAPIVersion, streamServerZonesPayload) defer ts.Close() var acc testutil.Accumulator @@ -1208,7 +1208,7 @@ func TestUnavailableEndpoints(t *testing.T) { })) defer ts.Close() - n := &NginxPlusApi{ + n := &NginxPlusAPI{ client: ts.Client(), } @@ -1228,7 +1228,7 @@ func TestServerError(t *testing.T) { })) defer ts.Close() - n := &NginxPlusApi{ + n := &NginxPlusAPI{ client: ts.Client(), } @@ -1249,7 +1249,7 @@ func TestMalformedJSON(t *testing.T) { })) defer ts.Close() - n := &NginxPlusApi{ + n := &NginxPlusAPI{ client: ts.Client(), } @@ -1269,7 +1269,7 @@ func TestUnknownContentType(t *testing.T) { })) defer ts.Close() - n := &NginxPlusApi{ + n := &NginxPlusAPI{ client: ts.Client(), } @@ -1306,7 +1306,7 @@ func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) { return addr, host, port } -func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusApi) { +func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusAPI) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string @@ -1320,12 +1320,12 @@ func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string fmt.Fprintln(w, rsp) })) - n := &NginxPlusApi{ + n := &NginxPlusAPI{ Urls: []string{fmt.Sprintf("%s/api", ts.URL)}, - ApiVersion: apiVersion, + APIVersion: apiVersion, } - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { t.Fatal(err) } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go index 868bc04e445eb..51ada5fd9f46f 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go @@ -35,7 +35,7 @@ type ResolverZones map[string]struct { } `json:"responses"` } -type HttpRequests struct { +type HTTPRequests struct { Total int64 `json:"total"` Current int64 `json:"current"` } @@ -49,7 +49,7 @@ type ResponseStats struct { Total int64 `json:"total"` } -type HttpServerZones map[string]struct { +type HTTPServerZones map[string]struct { Processing int `json:"processing"` Requests int64 `json:"requests"` Responses ResponseStats `json:"responses"` @@ -58,7 +58,7 @@ type HttpServerZones map[string]struct { Sent int64 `json:"sent"` } -type HttpLocationZones map[string]struct { +type HTTPLocationZones map[string]struct { Requests int64 `json:"requests"` Responses ResponseStats `json:"responses"` Discarded *int64 `json:"discarded"` // added in version 6 @@ -73,7 +73,7 @@ type HealthCheckStats struct { LastPassed *bool `json:"last_passed"` } -type HttpUpstreams map[string]struct { +type HTTPUpstreams map[string]struct { Peers []struct { ID *int `json:"id"` // added in version 3 Server string `json:"server"` @@ -145,7 +145,7 @@ type ExtendedHitStats struct { BytesWritten int64 `json:"bytes_written"` } -type HttpCaches map[string]struct { // added in version 2 +type HTTPCaches map[string]struct { // added in version 2 Size int64 `json:"size"` MaxSize int64 `json:"max_size"` Cold bool `json:"cold"` diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index 0fe2907c9a08a..34aff538cf983 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -104,8 +104,8 @@ type NginxUpstreamCheckServer struct { Port uint16 `json:"port"` } -// createHttpClient create a clients to access API -func (check *NginxUpstreamCheck) createHttpClient() (*http.Client, error) { +// createHTTPClient create a clients to access API +func (check *NginxUpstreamCheck) createHTTPClient() (*http.Client, error) { tlsConfig, err := check.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -121,8 +121,8 @@ func (check *NginxUpstreamCheck) createHttpClient() (*http.Client, error) { return client, nil } -// gatherJsonData query the data source and parse the response JSON -func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) error { +// gatherJSONData query the data source and parse the response JSON +func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) error { var method string if check.Method != "" { @@ -168,7 +168,7 @@ func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) e func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error { if check.client == nil { - client, err := check.createHttpClient() + client, err := check.createHTTPClient() if err != nil { return err @@ -193,7 +193,7 @@ func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegraf.Accumulator) error { checkData := &NginxUpstreamCheckData{} - err := check.gatherJsonData(url, checkData) + err := check.gatherJSONData(url, checkData) if err != nil { return err } diff --git a/plugins/inputs/nsd/nsd.go b/plugins/inputs/nsd/nsd.go index 3c5d2695dcb33..37a7c482020b2 100644 --- a/plugins/inputs/nsd/nsd.go +++ b/plugins/inputs/nsd/nsd.go @@ -128,14 +128,14 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error { if strings.HasPrefix(stat, "server") { statTokens := strings.Split(stat, ".") if len(statTokens) > 1 { - serverId := strings.TrimPrefix(statTokens[0], "server") - if _, err := strconv.Atoi(serverId); err == nil { + serverID := strings.TrimPrefix(statTokens[0], "server") + if _, err := strconv.Atoi(serverID); err == nil { serverTokens := statTokens[1:] field := strings.Join(serverTokens[:], "_") - if fieldsServers[serverId] == nil { - fieldsServers[serverId] = make(map[string]interface{}) + if fieldsServers[serverID] == nil { + fieldsServers[serverID] = make(map[string]interface{}) } - fieldsServers[serverId][field] = fieldValue + fieldsServers[serverID][field] = fieldValue } } } else { @@ -145,8 +145,8 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error { } acc.AddFields("nsd", fields, nil) - for thisServerId, thisServerFields := range fieldsServers { - thisServerTag := map[string]string{"server": thisServerId} + for thisServerID, thisServerFields := range fieldsServers { + thisServerTag := map[string]string{"server": thisServerID} acc.AddFields("nsd_servers", thisServerFields, thisServerTag) } diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index fe941982646b1..166444f857050 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -82,7 +82,7 @@ func (n *NSQ) Gather(acc telegraf.Accumulator) error { var err error if n.httpClient == nil { - n.httpClient, err = n.getHttpClient() + n.httpClient, err = n.getHTTPClient() if err != nil { return err } @@ -101,7 +101,7 @@ func (n *NSQ) Gather(acc telegraf.Accumulator) error { return nil } -func (n *NSQ) getHttpClient() (*http.Client, error) { +func (n *NSQ) getHTTPClient() (*http.Client, error) { tlsConfig, err := n.ClientConfig.TLSConfig() if err != nil { return nil, err diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index e6dcb420f30ce..4b25a44c0ab7d 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -18,18 +18,18 @@ var ( // default file paths const ( - NET_NETSTAT = "/net/netstat" - NET_SNMP = "/net/snmp" - NET_SNMP6 = "/net/snmp6" - NET_PROC = "/proc" + NetNetstat = "/net/netstat" + NetSnmp = "/net/snmp" + NetSnmp6 = "/net/snmp6" + NetProc = "/proc" ) // env variable names const ( - ENV_NETSTAT = "PROC_NET_NETSTAT" - ENV_SNMP = "PROC_NET_SNMP" - ENV_SNMP6 = "PROC_NET_SNMP6" - ENV_ROOT = "PROC_ROOT" + EnvNetstat = "PROC_NET_NETSTAT" + EnvSnmp = "PROC_NET_SNMP" + EnvSnmp6 = "PROC_NET_SNMP6" + EnvRoot = "PROC_ROOT" ) type Nstat struct { @@ -136,13 +136,13 @@ func (ns *Nstat) gatherSNMP6(data []byte, acc telegraf.Accumulator) error { // if it is empty then try read from env variables func (ns *Nstat) loadPaths() { if ns.ProcNetNetstat == "" { - ns.ProcNetNetstat = proc(ENV_NETSTAT, NET_NETSTAT) + ns.ProcNetNetstat = proc(EnvNetstat, NetNetstat) } if ns.ProcNetSNMP == "" { - ns.ProcNetSNMP = proc(ENV_SNMP, NET_SNMP) + ns.ProcNetSNMP = proc(EnvSnmp, NetSnmp) } if ns.ProcNetSNMP6 == "" { - ns.ProcNetSNMP6 = proc(ENV_SNMP6, NET_SNMP6) + ns.ProcNetSNMP6 = proc(EnvSnmp6, NetSnmp6) } } @@ -220,9 +220,9 @@ func proc(env, path string) string { return p } // try to read root path, or use default root path - root := os.Getenv(ENV_ROOT) + root := os.Getenv(EnvRoot) if root == "" { - root = NET_PROC + root = NetProc } return root + path } diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go index bfff00562ac19..11660a84080db 100644 --- a/plugins/inputs/opensmtpd/opensmtpd.go +++ b/plugins/inputs/opensmtpd/opensmtpd.go @@ -77,8 +77,8 @@ func opensmtpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (* // All the dots in stat name will replaced by underscores. Histogram statistics will not be collected. func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { // Always exclude uptime.human statistics - stat_excluded := []string{"uptime.human"} - filter_excluded, err := filter.Compile(stat_excluded) + statExcluded := []string{"uptime.human"} + filterExcluded, err := filter.Compile(statExcluded) if err != nil { return err } @@ -104,7 +104,7 @@ func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { value := cols[1] // Filter value - if filter_excluded.Match(stat) { + if filterExcluded.Match(stat) { continue } diff --git a/plugins/inputs/opensmtpd/opensmtpd_test.go b/plugins/inputs/opensmtpd/opensmtpd_test.go index 42e978b6c34e7..2a3f4cdcfb970 100644 --- a/plugins/inputs/opensmtpd/opensmtpd_test.go +++ b/plugins/inputs/opensmtpd/opensmtpd_test.go @@ -12,7 +12,7 @@ import ( var TestTimeout = internal.Duration{Duration: time.Second} -func SmtpCTL(output string, Timeout internal.Duration, useSudo bool) func(string, internal.Duration, bool) (*bytes.Buffer, error) { +func SMTPCTL(output string, Timeout internal.Duration, useSudo bool) func(string, internal.Duration, bool) (*bytes.Buffer, error) { return func(string, internal.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } @@ -21,7 +21,7 @@ func SmtpCTL(output string, Timeout internal.Duration, useSudo bool) func(string func TestFilterSomeStats(t *testing.T) { acc := &testutil.Accumulator{} v := &Opensmtpd{ - run: SmtpCTL(fullOutput, TestTimeout, false), + run: SMTPCTL(fullOutput, TestTimeout, false), } err := v.Gather(acc) diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index 94055a6f8bb6a..bda43438dc5a2 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -21,25 +21,25 @@ const ( // https://openweathermap.org/current#severalid // Call for several city IDs // The limit of locations is 20. - owmRequestSeveralCityId int = 20 + owmRequestSeveralCityID int = 20 - defaultBaseUrl = "https://api.openweathermap.org/" + defaultBaseURL = "https://api.openweathermap.org/" defaultResponseTimeout time.Duration = time.Second * 5 defaultUnits string = "metric" defaultLang string = "en" ) type OpenWeatherMap struct { - AppId string `toml:"app_id"` - CityId []string `toml:"city_id"` + AppID string `toml:"app_id"` + CityID []string `toml:"city_id"` Lang string `toml:"lang"` Fetch []string `toml:"fetch"` - BaseUrl string `toml:"base_url"` + BaseURL string `toml:"base_url"` ResponseTimeout internal.Duration `toml:"response_timeout"` Units string `toml:"units"` client *http.Client - baseUrl *url.URL + baseURL *url.URL } var sampleConfig = ` @@ -87,12 +87,12 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { for _, fetch := range n.Fetch { if fetch == "forecast" { - for _, city := range n.CityId { + for _, city := range n.CityID { addr := n.formatURL("/data/2.5/forecast", city) wg.Add(1) go func() { defer wg.Done() - status, err := n.gatherUrl(addr) + status, err := n.gatherURL(addr) if err != nil { acc.AddError(err) return @@ -103,10 +103,10 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { } } else if fetch == "weather" { j := 0 - for j < len(n.CityId) { + for j < len(n.CityID) { strs = make([]string, 0) - for i := 0; j < len(n.CityId) && i < owmRequestSeveralCityId; i++ { - strs = append(strs, n.CityId[j]) + for i := 0; j < len(n.CityID) && i < owmRequestSeveralCityID; i++ { + strs = append(strs, n.CityID[j]) j++ } cities := strings.Join(strs, ",") @@ -115,7 +115,7 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func() { defer wg.Done() - status, err := n.gatherUrl(addr) + status, err := n.gatherURL(addr) if err != nil { acc.AddError(err) return @@ -132,7 +132,7 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { return nil } -func (n *OpenWeatherMap) createHttpClient() (*http.Client, error) { +func (n *OpenWeatherMap) createHTTPClient() (*http.Client, error) { if n.ResponseTimeout.Duration < time.Second { n.ResponseTimeout.Duration = defaultResponseTimeout } @@ -145,7 +145,7 @@ func (n *OpenWeatherMap) createHttpClient() (*http.Client, error) { return client, nil } -func (n *OpenWeatherMap) gatherUrl(addr string) (*Status, error) { +func (n *OpenWeatherMap) gatherURL(addr string) (*Status, error) { resp, err := n.client.Get(addr) if err != nil { return nil, fmt.Errorf("error making HTTP request to %s: %s", addr, err) @@ -165,7 +165,7 @@ func (n *OpenWeatherMap) gatherUrl(addr string) (*Status, error) { return nil, fmt.Errorf("%s returned unexpected content type %s", addr, mediaType) } - return gatherWeatherUrl(resp.Body) + return gatherWeatherURL(resp.Body) } type WeatherEntry struct { @@ -191,7 +191,7 @@ type WeatherEntry struct { Deg float64 `json:"deg"` Speed float64 `json:"speed"` } `json:"wind"` - Id int64 `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Coord struct { Lat float64 `json:"lat"` @@ -213,13 +213,13 @@ type Status struct { Lon float64 `json:"lon"` } `json:"coord"` Country string `json:"country"` - Id int64 `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` } `json:"city"` List []WeatherEntry `json:"list"` } -func gatherWeatherUrl(r io.Reader) (*Status, error) { +func gatherWeatherURL(r io.Reader) (*Status, error) { dec := json.NewDecoder(r) status := &Status{} if err := dec.Decode(status); err != nil { @@ -253,7 +253,7 @@ func gatherWeather(acc telegraf.Accumulator, status *Status) { } tags := map[string]string{ "city": e.Name, - "city_id": strconv.FormatInt(e.Id, 10), + "city_id": strconv.FormatInt(e.ID, 10), "country": e.Sys.Country, "forecast": "*", } @@ -271,7 +271,7 @@ func gatherWeather(acc telegraf.Accumulator, status *Status) { func gatherForecast(acc telegraf.Accumulator, status *Status) { tags := map[string]string{ - "city_id": strconv.FormatInt(status.City.Id, 10), + "city_id": strconv.FormatInt(status.City.ID, 10), "forecast": "*", "city": status.City.Name, "country": status.City.Country, @@ -305,21 +305,21 @@ func init() { } return &OpenWeatherMap{ ResponseTimeout: tmout, - BaseUrl: defaultBaseUrl, + BaseURL: defaultBaseURL, } }) } func (n *OpenWeatherMap) Init() error { var err error - n.baseUrl, err = url.Parse(n.BaseUrl) + n.baseURL, err = url.Parse(n.BaseURL) if err != nil { return err } // Create an HTTP client that is re-used for each // collection interval - n.client, err = n.createHttpClient() + n.client, err = n.createHTTPClient() if err != nil { return err } @@ -349,7 +349,7 @@ func (n *OpenWeatherMap) Init() error { func (n *OpenWeatherMap) formatURL(path string, city string) string { v := url.Values{ "id": []string{city}, - "APPID": []string{n.AppId}, + "APPID": []string{n.AppID}, "lang": []string{n.Lang}, "units": []string{n.Units}, } @@ -359,5 +359,5 @@ func (n *OpenWeatherMap) formatURL(path string, city string) string { RawQuery: v.Encode(), } - return n.baseUrl.ResolveReference(relative).String() + return n.baseURL.ResolveReference(relative).String() } diff --git a/plugins/inputs/openweathermap/openweathermap_test.go b/plugins/inputs/openweathermap/openweathermap_test.go index 9bee1d2e96199..d513f6273d07f 100644 --- a/plugins/inputs/openweathermap/openweathermap_test.go +++ b/plugins/inputs/openweathermap/openweathermap_test.go @@ -416,9 +416,9 @@ func TestForecastGeneratesMetrics(t *testing.T) { defer ts.Close() n := &OpenWeatherMap{ - BaseUrl: ts.URL, - AppId: "noappid", - CityId: []string{"2988507"}, + BaseURL: ts.URL, + AppID: "noappid", + CityID: []string{"2988507"}, Fetch: []string{"weather", "forecast"}, Units: "metric", } @@ -500,9 +500,9 @@ func TestWeatherGeneratesMetrics(t *testing.T) { defer ts.Close() n := &OpenWeatherMap{ - BaseUrl: ts.URL, - AppId: "noappid", - CityId: []string{"2988507"}, + BaseURL: ts.URL, + AppID: "noappid", + CityID: []string{"2988507"}, Fetch: []string{"weather"}, Units: "metric", } @@ -560,9 +560,9 @@ func TestRainMetrics(t *testing.T) { defer ts.Close() n := &OpenWeatherMap{ - BaseUrl: ts.URL, - AppId: "noappid", - CityId: []string{"111", "222", "333", "444"}, + BaseURL: ts.URL, + AppID: "noappid", + CityID: []string{"111", "222", "333", "444"}, Fetch: []string{"weather"}, Units: "metric", } @@ -703,9 +703,9 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { defer ts.Close() n := &OpenWeatherMap{ - BaseUrl: ts.URL, - AppId: "noappid", - CityId: []string{"524901", "703448", "2643743"}, + BaseURL: ts.URL, + AppID: "noappid", + CityID: []string{"524901", "703448", "2643743"}, Fetch: []string{"weather"}, Units: "metric", } @@ -803,10 +803,10 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { func TestFormatURL(t *testing.T) { n := &OpenWeatherMap{ - AppId: "appid", + AppID: "appid", Units: "units", Lang: "lang", - BaseUrl: "http://foo.com", + BaseURL: "http://foo.com", } n.Init() diff --git a/plugins/inputs/passenger/passenger.go b/plugins/inputs/passenger/passenger.go index 2602161a8e9a9..fbd016af60a0e 100644 --- a/plugins/inputs/passenger/passenger.go +++ b/plugins/inputs/passenger/passenger.go @@ -75,14 +75,14 @@ type process struct { LifeStatus string `xml:"life_status"` Enabled string `xml:"enabled"` HasMetrics bool `xml:"has_metrics"` - Cpu int64 `xml:"cpu"` + CPU int64 `xml:"cpu"` Rss int64 `xml:"rss"` Pss int64 `xml:"pss"` PrivateDirty int64 `xml:"private_dirty"` Swap int64 `xml:"swap"` RealMemory int64 `xml:"real_memory"` Vmsize int64 `xml:"vmsize"` - ProcessGroupId string `xml:"process_group_id"` + ProcessGroupID string `xml:"process_group_id"` } func (p *process) getUptime() int64 { @@ -211,7 +211,7 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { "pid": fmt.Sprintf("%d", process.Pid), "code_revision": process.CodeRevision, "life_status": process.LifeStatus, - "process_group_id": process.ProcessGroupId, + "process_group_id": process.ProcessGroupID, } fields := map[string]interface{}{ "concurrency": process.Concurrency, @@ -223,7 +223,7 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { "spawn_end_time": process.SpawnEndTime, "last_used": process.LastUsed, "uptime": process.getUptime(), - "cpu": process.Cpu, + "cpu": process.CPU, "rss": process.Rss, "pss": process.Pss, "private_dirty": process.PrivateDirty, diff --git a/plugins/inputs/phpfpm/child.go b/plugins/inputs/phpfpm/child.go index 2ebdf2ffbca35..a90cf093bd8e6 100644 --- a/plugins/inputs/phpfpm/child.go +++ b/plugins/inputs/phpfpm/child.go @@ -24,16 +24,16 @@ import ( // it's converted to an http.Request. type request struct { pw *io.PipeWriter - reqId uint16 + reqID uint16 params map[string]string buf [1024]byte rawParams []byte keepConn bool } -func newRequest(reqId uint16, flags uint8) *request { +func newRequest(reqID uint16, flags uint8) *request { r := &request{ - reqId: reqId, + reqID: reqID, params: map[string]string{}, keepConn: flags&flagKeepConn != 0, } @@ -79,7 +79,7 @@ func newResponse(c *child, req *request) *response { return &response{ req: req, header: http.Header{}, - w: newWriter(c.conn, typeStdout, req.reqId), + w: newWriter(c.conn, typeStdout, req.reqID), } } @@ -173,7 +173,7 @@ var ErrConnClosed = errors.New("fcgi: connection to web server closed") func (c *child) handleRecord(rec *record) error { c.mu.Lock() - req, ok := c.requests[rec.h.Id] + req, ok := c.requests[rec.h.ID] c.mu.Unlock() if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues { // The spec says to ignore unknown request IDs. @@ -193,12 +193,12 @@ func (c *child) handleRecord(rec *record) error { return err } if br.role != roleResponder { - c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole) + c.conn.writeEndRequest(rec.h.ID, 0, statusUnknownRole) return nil } - req = newRequest(rec.h.Id, br.flags) + req = newRequest(rec.h.ID, br.flags) c.mu.Lock() - c.requests[rec.h.Id] = req + c.requests[rec.h.ID] = req c.mu.Unlock() return nil case typeParams: @@ -240,9 +240,9 @@ func (c *child) handleRecord(rec *record) error { return nil case typeAbortRequest: c.mu.Lock() - delete(c.requests, rec.h.Id) + delete(c.requests, rec.h.ID) c.mu.Unlock() - c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete) + c.conn.writeEndRequest(rec.h.ID, 0, statusRequestComplete) if req.pw != nil { req.pw.CloseWithError(ErrRequestAborted) } @@ -265,16 +265,16 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) { if err != nil { // there was an error reading the request r.WriteHeader(http.StatusInternalServerError) - c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error())) + c.conn.writeRecord(typeStderr, req.reqID, []byte(err.Error())) } else { httpReq.Body = body c.handler.ServeHTTP(r, httpReq) } r.Close() c.mu.Lock() - delete(c.requests, req.reqId) + delete(c.requests, req.reqID) c.mu.Unlock() - c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete) + c.conn.writeEndRequest(req.reqID, 0, statusRequestComplete) // Consume the entire body, so the host isn't still writing to // us when we close the socket below in the !keepConn case, diff --git a/plugins/inputs/phpfpm/fcgi.go b/plugins/inputs/phpfpm/fcgi.go index 83bbf09cc73a3..551164d15c490 100644 --- a/plugins/inputs/phpfpm/fcgi.go +++ b/plugins/inputs/phpfpm/fcgi.go @@ -63,7 +63,7 @@ const headerLen = 8 type header struct { Version uint8 Type recType - Id uint16 + ID uint16 ContentLength uint16 PaddingLength uint8 Reserved uint8 @@ -88,10 +88,10 @@ func (br *beginRequest) read(content []byte) error { // not synchronized because we don't care what the contents are var pad [maxPad]byte -func (h *header) init(recType recType, reqId uint16, contentLength int) { +func (h *header) init(recType recType, reqID uint16, contentLength int) { h.Version = 1 h.Type = recType - h.Id = reqId + h.ID = reqID h.ContentLength = uint16(contentLength) h.PaddingLength = uint8(-contentLength & 7) } @@ -140,11 +140,11 @@ func (rec *record) content() []byte { } // writeRecord writes and sends a single record. -func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error { +func (c *conn) writeRecord(recType recType, reqID uint16, b []byte) error { c.mutex.Lock() defer c.mutex.Unlock() c.buf.Reset() - c.h.init(recType, reqId, len(b)) + c.h.init(recType, reqID, len(b)) if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil { return err } @@ -158,20 +158,20 @@ func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error { return err } -func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error { +func (c *conn) writeBeginRequest(reqID uint16, role uint16, flags uint8) error { b := [8]byte{byte(role >> 8), byte(role), flags} - return c.writeRecord(typeBeginRequest, reqId, b[:]) + return c.writeRecord(typeBeginRequest, reqID, b[:]) } -func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error { +func (c *conn) writeEndRequest(reqID uint16, appStatus int, protocolStatus uint8) error { b := make([]byte, 8) binary.BigEndian.PutUint32(b, uint32(appStatus)) b[4] = protocolStatus - return c.writeRecord(typeEndRequest, reqId, b) + return c.writeRecord(typeEndRequest, reqID, b) } -func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string) error { - w := newWriter(c, recType, reqId) +func (c *conn) writePairs(recType recType, reqID uint16, pairs map[string]string) error { + w := newWriter(c, recType, reqID) b := make([]byte, 8) for k, v := range pairs { n := encodeSize(b, uint32(len(k))) @@ -238,8 +238,8 @@ func (w *bufWriter) Close() error { return w.closer.Close() } -func newWriter(c *conn, recType recType, reqId uint16) *bufWriter { - s := &streamWriter{c: c, recType: recType, reqId: reqId} +func newWriter(c *conn, recType recType, reqID uint16) *bufWriter { + s := &streamWriter{c: c, recType: recType, reqID: reqID} w := bufio.NewWriterSize(s, maxWrite) return &bufWriter{s, w} } @@ -249,7 +249,7 @@ func newWriter(c *conn, recType recType, reqId uint16) *bufWriter { type streamWriter struct { c *conn recType recType - reqId uint16 + reqID uint16 } func (w *streamWriter) Write(p []byte) (int, error) { @@ -259,7 +259,7 @@ func (w *streamWriter) Write(p []byte) (int, error) { if n > maxWrite { n = maxWrite } - if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil { + if err := w.c.writeRecord(w.recType, w.reqID, p[:n]); err != nil { return nn, err } nn += n @@ -270,5 +270,5 @@ func (w *streamWriter) Write(p []byte) (int, error) { func (w *streamWriter) Close() error { // send empty record to close the stream - return w.c.writeRecord(w.recType, w.reqId, nil) + return w.c.writeRecord(w.recType, w.reqID, nil) } diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go index 7f6c93e50ecca..56fb38188fb75 100644 --- a/plugins/inputs/phpfpm/fcgi_client.go +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -38,20 +38,20 @@ func (c *conn) Request( requestData string, ) (retout []byte, reterr []byte, err error) { defer c.rwc.Close() - var reqId uint16 = 1 + var reqID uint16 = 1 - err = c.writeBeginRequest(reqId, uint16(roleResponder), 0) + err = c.writeBeginRequest(reqID, uint16(roleResponder), 0) if err != nil { return } - err = c.writePairs(typeParams, reqId, env) + err = c.writePairs(typeParams, reqID, env) if err != nil { return } if len(requestData) > 0 { - if err = c.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil { + if err = c.writeRecord(typeStdin, reqID, []byte(requestData)); err != nil { return } } diff --git a/plugins/inputs/phpfpm/fcgi_test.go b/plugins/inputs/phpfpm/fcgi_test.go index 15e0030a77151..7060955e0a10f 100644 --- a/plugins/inputs/phpfpm/fcgi_test.go +++ b/plugins/inputs/phpfpm/fcgi_test.go @@ -44,7 +44,7 @@ func TestSize(t *testing.T) { var streamTests = []struct { desc string recType recType - reqId uint16 + reqID uint16 content []byte raw []byte }{ @@ -90,8 +90,8 @@ outer: t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType) continue } - if rec.h.Id != test.reqId { - t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId) + if rec.h.ID != test.reqID { + t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.ID, test.reqID) continue } if !bytes.Equal(content, test.content) { @@ -100,7 +100,7 @@ outer: } buf.Reset() c := newConn(&nilCloser{buf}) - w := newWriter(c, test.recType, test.reqId) + w := newWriter(c, test.recType, test.reqID) if _, err := w.Write(test.content); err != nil { t.Errorf("%s: error writing record: %v", test.desc, err) continue @@ -164,17 +164,17 @@ func nameValuePair11(nameData, valueData string) []byte { func makeRecord( recordType recType, - requestId uint16, + requestID uint16, contentData []byte, ) []byte { - requestIdB1 := byte(requestId >> 8) - requestIdB0 := byte(requestId) + requestIDB1 := byte(requestID >> 8) + requestIDB0 := byte(requestID) contentLength := len(contentData) contentLengthB1 := byte(contentLength >> 8) contentLengthB0 := byte(contentLength) return bytes.Join([][]byte{ - {1, byte(recordType), requestIdB1, requestIdB0, contentLengthB1, + {1, byte(recordType), requestIDB1, requestIDB0, contentLengthB1, contentLengthB0, 0, 0}, contentData, }, diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 87eb4f649a57b..c71d3290666ad 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -19,19 +19,19 @@ import ( ) const ( - PF_POOL = "pool" - PF_PROCESS_MANAGER = "process manager" - PF_START_SINCE = "start since" - PF_ACCEPTED_CONN = "accepted conn" - PF_LISTEN_QUEUE = "listen queue" - PF_MAX_LISTEN_QUEUE = "max listen queue" - PF_LISTEN_QUEUE_LEN = "listen queue len" - PF_IDLE_PROCESSES = "idle processes" - PF_ACTIVE_PROCESSES = "active processes" - PF_TOTAL_PROCESSES = "total processes" - PF_MAX_ACTIVE_PROCESSES = "max active processes" - PF_MAX_CHILDREN_REACHED = "max children reached" - PF_SLOW_REQUESTS = "slow requests" + PfPool = "pool" + PfProcessManager = "process manager" + PfStartSince = "start since" + PfAcceptedConn = "accepted conn" + PfListenQueue = "listen queue" + PfMaxListenQueue = "max listen queue" + PfListenQueueLen = "listen queue len" + PfIdleProcesses = "idle processes" + PfActiveProcesses = "active processes" + PfTotalProcesses = "total processes" + PfMaxActiveProcesses = "max active processes" + PfMaxChildrenReached = "max children reached" + PfSlowRequests = "slow requests" ) type metric map[string]int64 @@ -131,7 +131,7 @@ func (p *phpfpm) Gather(acc telegraf.Accumulator) error { // Request status page to get stat raw data and import it func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") { - return p.gatherHttp(addr, acc) + return p.gatherHTTP(addr, acc) } var ( @@ -147,9 +147,9 @@ func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { return fmt.Errorf("unable parse server address '%s': %s", addr, err) } socketAddr := strings.Split(u.Host, ":") - fcgiIp := socketAddr[0] + fcgiIP := socketAddr[0] fcgiPort, _ := strconv.Atoi(socketAddr[1]) - fcgi, err = newFcgiClient(fcgiIp, fcgiPort) + fcgi, err = newFcgiClient(fcgiIP, fcgiPort) if err != nil { return err } @@ -193,7 +193,7 @@ func (p *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumula } // Gather stat using http protocol -func (p *phpfpm) gatherHttp(addr string, acc telegraf.Accumulator) error { +func (p *phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error { u, err := url.Parse(addr) if err != nil { return fmt.Errorf("unable parse server address '%s': %v", addr, err) @@ -233,7 +233,7 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat { } fieldName := strings.Trim(keyvalue[0], " ") // We start to gather data for a new pool here - if fieldName == PF_POOL { + if fieldName == PfPool { currentPool = strings.Trim(keyvalue[1], " ") stats[currentPool] = make(metric) continue @@ -241,17 +241,17 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat { // Start to parse metric for current pool switch fieldName { - case PF_START_SINCE, - PF_ACCEPTED_CONN, - PF_LISTEN_QUEUE, - PF_MAX_LISTEN_QUEUE, - PF_LISTEN_QUEUE_LEN, - PF_IDLE_PROCESSES, - PF_ACTIVE_PROCESSES, - PF_TOTAL_PROCESSES, - PF_MAX_ACTIVE_PROCESSES, - PF_MAX_CHILDREN_REACHED, - PF_SLOW_REQUESTS: + case PfStartSince, + PfAcceptedConn, + PfListenQueue, + PfMaxListenQueue, + PfListenQueueLen, + PfIdleProcesses, + PfActiveProcesses, + PfTotalProcesses, + PfMaxActiveProcesses, + PfMaxChildrenReached, + PfSlowRequests: fieldValue, err := strconv.ParseInt(strings.Trim(keyvalue[1], " "), 10, 64) if err == nil { stats[currentPool][fieldName] = fieldValue diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 0911b20ce7184..231e864c1e2d0 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -105,26 +105,26 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { query = `SELECT * FROM pg_stat_bgwriter` - bg_writer_row, err := p.DB.Query(query) + bgWriterRow, err := p.DB.Query(query) if err != nil { return err } - defer bg_writer_row.Close() + defer bgWriterRow.Close() // grab the column information from the result - if columns, err = bg_writer_row.Columns(); err != nil { + if columns, err = bgWriterRow.Columns(); err != nil { return err } - for bg_writer_row.Next() { - err = p.accRow(bg_writer_row, acc, columns) + for bgWriterRow.Next() { + err = p.accRow(bgWriterRow, acc, columns) if err != nil { return err } } - return bg_writer_row.Err() + return bgWriterRow.Err() } type scanner interface { diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 000e12a8ad2c8..e1ad27086b312 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -156,50 +156,49 @@ func ReadQueryFromFile(filePath string) (string, error) { func (p *Postgresql) Gather(acc telegraf.Accumulator) error { var ( - err error - sql_query string - query_addon string - db_version int - query string - tag_value string - meas_name string - timestamp string - columns []string + err error + sqlQuery string + queryAddon string + dbVersion int + query string + tagValue string + measName string + timestamp string + columns []string ) // Retrieving the database version query = `SELECT setting::integer / 100 AS version FROM pg_settings WHERE name = 'server_version_num'` - if err = p.DB.QueryRow(query).Scan(&db_version); err != nil { - db_version = 0 + if err = p.DB.QueryRow(query).Scan(&dbVersion); err != nil { + dbVersion = 0 } // We loop in order to process each query // Query is not run if Database version does not match the query version. for i := range p.Query { - sql_query = p.Query[i].Sqlquery - tag_value = p.Query[i].Tagvalue + sqlQuery = p.Query[i].Sqlquery + tagValue = p.Query[i].Tagvalue timestamp = p.Query[i].Timestamp if p.Query[i].Measurement != "" { - meas_name = p.Query[i].Measurement + measName = p.Query[i].Measurement } else { - meas_name = "postgresql" + measName = "postgresql" } if p.Query[i].Withdbname { if len(p.Databases) != 0 { - query_addon = fmt.Sprintf(` IN ('%s')`, - strings.Join(p.Databases, "','")) + queryAddon = fmt.Sprintf(` IN ('%s')`, strings.Join(p.Databases, "','")) } else { - query_addon = " is not null" + queryAddon = " is not null" } } else { - query_addon = "" + queryAddon = "" } - sql_query += query_addon + sqlQuery += queryAddon - if p.Query[i].Version <= db_version { - rows, err := p.DB.Query(sql_query) + if p.Query[i].Version <= dbVersion { + rows, err := p.DB.Query(sqlQuery) if err != nil { p.Log.Error(err.Error()) continue @@ -214,17 +213,17 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { } p.AdditionalTags = nil - if tag_value != "" { - tag_list := strings.Split(tag_value, ",") - for t := range tag_list { - p.AdditionalTags = append(p.AdditionalTags, tag_list[t]) + if tagValue != "" { + tagList := strings.Split(tagValue, ",") + for t := range tagList { + p.AdditionalTags = append(p.AdditionalTags, tagList[t]) } } p.Timestamp = timestamp for rows.Next() { - err = p.accRow(meas_name, rows, acc, columns) + err = p.accRow(measName, rows, acc, columns) if err != nil { p.Log.Error(err.Error()) break @@ -239,7 +238,7 @@ type scanner interface { Scan(dest ...interface{}) error } -func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumulator, columns []string) error { +func (p *Postgresql) accRow(measName string, row scanner, acc telegraf.Accumulator, columns []string) error { var ( err error columnVars []interface{} @@ -329,7 +328,7 @@ COLUMN: fields[col] = *val } } - acc.AddFields(meas_name, fields, tags, timestamp) + acc.AddFields(measName, fields, tags, timestamp) return nil } diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index 5f286dd64a63e..799f0183854d1 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -20,7 +20,7 @@ func NewNativeFinder() (PIDFinder, error) { } //Uid will return all pids for the given user -func (pg *NativeFinder) Uid(user string) ([]PID, error) { +func (pg *NativeFinder) UID(user string) ([]PID, error) { var dst []PID procs, err := process.Processes() if err != nil { diff --git a/plugins/inputs/procstat/native_finder_windows_test.go b/plugins/inputs/procstat/native_finder_windows_test.go index 6f3067545364e..0148fdedca933 100644 --- a/plugins/inputs/procstat/native_finder_windows_test.go +++ b/plugins/inputs/procstat/native_finder_windows_test.go @@ -42,7 +42,7 @@ func TestGather_RealUserIntegration(t *testing.T) { require.NoError(t, err) pg, err := NewNativeFinder() require.NoError(t, err) - pids, err := pg.Uid(user.Username) + pids, err := pg.UID(user.Username) require.NoError(t, err) fmt.Println(pids) assert.Equal(t, len(pids) > 0, true) diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 37f9dfc3f67a9..85e8d80f83cfe 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -43,7 +43,7 @@ func (pg *Pgrep) Pattern(pattern string) ([]PID, error) { return find(pg.path, args) } -func (pg *Pgrep) Uid(user string) ([]PID, error) { +func (pg *Pgrep) UID(user string) ([]PID, error) { args := []string{"-u", user} return find(pg.path, args) } diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index c10624fedcbbe..ab2fde601f5c8 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -31,7 +31,7 @@ type Process interface { type PIDFinder interface { PidFile(path string) ([]PID, error) Pattern(pattern string) ([]PID, error) - Uid(user string) ([]PID, error) + UID(user string) ([]PID, error) FullPattern(path string) ([]PID, error) } @@ -68,10 +68,10 @@ func (p *Proc) Username() (string, error) { } func (p *Proc) Percent(interval time.Duration) (float64, error) { - cpu_perc, err := p.Process.Percent(time.Duration(0)) + cpuPerc, err := p.Process.Percent(time.Duration(0)) if !p.hasCPUTimes && err == nil { p.hasCPUTimes = true return 0, fmt.Errorf("must call Percent twice to compute percent cpu") } - return cpu_perc, err + return cpuPerc, err } diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 6a562f9ee231d..772fe77ae4f13 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -388,7 +388,7 @@ func (p *Procstat) findPids() ([]PID, map[string]string, error) { pids, err = f.FullPattern(p.Pattern) tags = map[string]string{"pattern": p.Pattern} } else if p.User != "" { - pids, err = f.Uid(p.User) + pids, err = f.UID(p.User) tags = map[string]string{"user": p.User} } else if p.SystemdUnit != "" { pids, err = p.systemdUnitPIDs() diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 401df08916d91..c86ac53b385a0 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -84,7 +84,7 @@ func (pg *testPgrep) Pattern(_ string) ([]PID, error) { return pg.pids, pg.err } -func (pg *testPgrep) Uid(_ string) ([]PID, error) { +func (pg *testPgrep) UID(_ string) ([]PID, error) { return pg.pids, pg.err } diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index 810c45c58c454..423070a357c23 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -81,28 +81,28 @@ func init() { } func getNodeSearchDomain(px *Proxmox) error { - apiUrl := "/nodes/" + px.NodeName + "/dns" - jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) + apiURL := "/nodes/" + px.NodeName + "/dns" + jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { return err } - var nodeDns NodeDns - err = json.Unmarshal(jsonData, &nodeDns) + var nodeDNS NodeDNS + err = json.Unmarshal(jsonData, &nodeDNS) if err != nil { return err } - if nodeDns.Data.Searchdomain == "" { + if nodeDNS.Data.Searchdomain == "" { return errors.New("search domain is not set") } - px.nodeSearchDomain = nodeDns.Data.Searchdomain + px.nodeSearchDomain = nodeDNS.Data.Searchdomain return nil } -func performRequest(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) { - request, err := http.NewRequest(method, px.BaseURL+apiUrl, strings.NewReader(data.Encode())) +func performRequest(px *Proxmox, apiURL string, method string, data url.Values) ([]byte, error) { + request, err := http.NewRequest(method, px.BaseURL+apiURL, strings.NewReader(data.Encode())) if err != nil { return nil, err } @@ -123,15 +123,15 @@ func performRequest(px *Proxmox, apiUrl string, method string, data url.Values) } func gatherLxcData(px *Proxmox, acc telegraf.Accumulator) { - gatherVmData(px, acc, LXC) + gatherVMData(px, acc, LXC) } func gatherQemuData(px *Proxmox, acc telegraf.Accumulator) { - gatherVmData(px, acc, QEMU) + gatherVMData(px, acc, QEMU) } -func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { - vmStats, err := getVmStats(px, rt) +func gatherVMData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { + vmStats, err := getVMStats(px, rt) if err != nil { px.Log.Error("Error getting VM stats: %v", err) return @@ -139,7 +139,7 @@ func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { // For each VM add metrics to Accumulator for _, vmStat := range vmStats.Data { - vmConfig, err := getVmConfig(px, vmStat.ID, rt) + vmConfig, err := getVMConfig(px, vmStat.ID, rt) if err != nil { px.Log.Errorf("Error getting VM config: %v", err) return @@ -167,76 +167,76 @@ func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { } } -func getCurrentVMStatus(px *Proxmox, rt ResourceType, id string) (VmStat, error) { - apiUrl := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + id + "/status/current" +func getCurrentVMStatus(px *Proxmox, rt ResourceType, id string) (VMStat, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + id + "/status/current" - jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) + jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { - return VmStat{}, err + return VMStat{}, err } - var currentVmStatus VmCurrentStats - err = json.Unmarshal(jsonData, ¤tVmStatus) + var currentVMStatus VMCurrentStats + err = json.Unmarshal(jsonData, ¤tVMStatus) if err != nil { - return VmStat{}, err + return VMStat{}, err } - return currentVmStatus.Data, nil + return currentVMStatus.Data, nil } -func getVmStats(px *Proxmox, rt ResourceType) (VmStats, error) { - apiUrl := "/nodes/" + px.NodeName + "/" + string(rt) - jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) +func getVMStats(px *Proxmox, rt ResourceType) (VMStats, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { - return VmStats{}, err + return VMStats{}, err } - var vmStats VmStats + var vmStats VMStats err = json.Unmarshal(jsonData, &vmStats) if err != nil { - return VmStats{}, err + return VMStats{}, err } return vmStats, nil } -func getVmConfig(px *Proxmox, vmId string, rt ResourceType) (VmConfig, error) { - apiUrl := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + vmId + "/config" - jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) +func getVMConfig(px *Proxmox, vmID string, rt ResourceType) (VMConfig, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + vmID + "/config" + jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { - return VmConfig{}, err + return VMConfig{}, err } - var vmConfig VmConfig + var vmConfig VMConfig err = json.Unmarshal(jsonData, &vmConfig) if err != nil { - return VmConfig{}, err + return VMConfig{}, err } return vmConfig, nil } -func getFields(vmStat VmStat) (map[string]interface{}, error) { - mem_total, mem_used, mem_free, mem_used_percentage := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem) - swap_total, swap_used, swap_free, swap_used_percentage := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap) - disk_total, disk_used, disk_free, disk_used_percentage := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk) +func getFields(vmStat VMStat) (map[string]interface{}, error) { + memTotal, memUsed, memFree, memUsedPercentage := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem) + swapTotal, swapUsed, swapFree, swapUsedPercentage := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap) + diskTotal, diskUsed, diskFree, diskUsedPercentage := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk) return map[string]interface{}{ "status": vmStat.Status, "uptime": jsonNumberToInt64(vmStat.Uptime), - "cpuload": jsonNumberToFloat64(vmStat.CpuLoad), - "mem_used": mem_used, - "mem_total": mem_total, - "mem_free": mem_free, - "mem_used_percentage": mem_used_percentage, - "swap_used": swap_used, - "swap_total": swap_total, - "swap_free": swap_free, - "swap_used_percentage": swap_used_percentage, - "disk_used": disk_used, - "disk_total": disk_total, - "disk_free": disk_free, - "disk_used_percentage": disk_used_percentage, + "cpuload": jsonNumberToFloat64(vmStat.CPULoad), + "mem_used": memUsed, + "mem_total": memTotal, + "mem_free": memFree, + "mem_used_percentage": memUsedPercentage, + "swap_used": swapUsed, + "swap_total": swapTotal, + "swap_free": swapFree, + "swap_used_percentage": swapUsedPercentage, + "disk_used": diskUsed, + "disk_total": diskTotal, + "disk_free": diskFree, + "disk_used_percentage": diskUsedPercentage, }, nil } @@ -270,7 +270,7 @@ func jsonNumberToFloat64(value json.Number) float64 { return float64Value } -func getTags(px *Proxmox, name string, vmConfig VmConfig, rt ResourceType) map[string]string { +func getTags(px *Proxmox, name string, vmConfig VMConfig, rt ResourceType) map[string]string { domain := vmConfig.Data.Searchdomain if len(domain) == 0 { domain = px.nodeSearchDomain diff --git a/plugins/inputs/proxmox/proxmox_test.go b/plugins/inputs/proxmox/proxmox_test.go index 226705329761c..35ae559ed96c7 100644 --- a/plugins/inputs/proxmox/proxmox_test.go +++ b/plugins/inputs/proxmox/proxmox_test.go @@ -18,22 +18,22 @@ var lxcConfigTestData = `{"data":{"hostname":"container1","searchdomain":"test.e var lxcCurrentStatusTestData = `{"data":{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}}` var qemuCurrentStatusTestData = `{"data":{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}}` -func performTestRequest(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) { +func performTestRequest(px *Proxmox, apiURL string, method string, data url.Values) ([]byte, error) { var bytedata = []byte("") - if strings.HasSuffix(apiUrl, "dns") { + if strings.HasSuffix(apiURL, "dns") { bytedata = []byte(nodeSearchDomainTestData) - } else if strings.HasSuffix(apiUrl, "qemu") { + } else if strings.HasSuffix(apiURL, "qemu") { bytedata = []byte(qemuTestData) - } else if strings.HasSuffix(apiUrl, "113/config") { + } else if strings.HasSuffix(apiURL, "113/config") { bytedata = []byte(qemuConfigTestData) - } else if strings.HasSuffix(apiUrl, "lxc") { + } else if strings.HasSuffix(apiURL, "lxc") { bytedata = []byte(lxcTestData) - } else if strings.HasSuffix(apiUrl, "111/config") { + } else if strings.HasSuffix(apiURL, "111/config") { bytedata = []byte(lxcConfigTestData) - } else if strings.HasSuffix(apiUrl, "111/status/current") { + } else if strings.HasSuffix(apiURL, "111/status/current") { bytedata = []byte(lxcCurrentStatusTestData) - } else if strings.HasSuffix(apiUrl, "113/status/current") { + } else if strings.HasSuffix(apiURL, "113/status/current") { bytedata = []byte(qemuCurrentStatusTestData) } diff --git a/plugins/inputs/proxmox/structs.go b/plugins/inputs/proxmox/structs.go index b137603ea79a9..ef207693e4b5f 100644 --- a/plugins/inputs/proxmox/structs.go +++ b/plugins/inputs/proxmox/structs.go @@ -32,15 +32,15 @@ var ( LXC ResourceType = "lxc" ) -type VmStats struct { - Data []VmStat `json:"data"` +type VMStats struct { + Data []VMStat `json:"data"` } -type VmCurrentStats struct { - Data VmStat `json:"data"` +type VMCurrentStats struct { + Data VMStat `json:"data"` } -type VmStat struct { +type VMStat struct { ID string `json:"vmid"` Name string `json:"name"` Status string `json:"status"` @@ -51,10 +51,10 @@ type VmStat struct { UsedSwap json.Number `json:"swap"` TotalSwap json.Number `json:"maxswap"` Uptime json.Number `json:"uptime"` - CpuLoad json.Number `json:"cpu"` + CPULoad json.Number `json:"cpu"` } -type VmConfig struct { +type VMConfig struct { Data struct { Searchdomain string `json:"searchdomain"` Hostname string `json:"hostname"` @@ -62,7 +62,7 @@ type VmConfig struct { } `json:"data"` } -type NodeDns struct { +type NodeDNS struct { Data struct { Searchdomain string `json:"search"` } `json:"data"` diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 4d8050c33fbca..29e2864399c08 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -157,8 +157,8 @@ type Node struct { Uptime int64 `json:"uptime"` MnesiaDiskTxCount int64 `json:"mnesia_disk_tx_count"` MnesiaDiskTxCountDetails Details `json:"mnesia_disk_tx_count_details"` - MnesiaRamTxCount int64 `json:"mnesia_ram_tx_count"` - MnesiaRamTxCountDetails Details `json:"mnesia_ram_tx_count_details"` + MnesiaRAMTxCount int64 `json:"mnesia_ram_tx_count"` + MnesiaRAMTxCountDetails Details `json:"mnesia_ram_tx_count_details"` GcNum int64 `json:"gc_num"` GcNumDetails Details `json:"gc_num_details"` GcBytesReclaimed int64 `json:"gc_bytes_reclaimed"` @@ -491,8 +491,8 @@ func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) { "uptime": node.Uptime, "mnesia_disk_tx_count": node.MnesiaDiskTxCount, "mnesia_disk_tx_count_rate": node.MnesiaDiskTxCountDetails.Rate, - "mnesia_ram_tx_count": node.MnesiaRamTxCount, - "mnesia_ram_tx_count_rate": node.MnesiaRamTxCountDetails.Rate, + "mnesia_ram_tx_count": node.MnesiaRAMTxCount, + "mnesia_ram_tx_count_rate": node.MnesiaRAMTxCountDetails.Rate, "gc_num": node.GcNum, "gc_num_rate": node.GcNumDetails.Rate, "gc_bytes_reclaimed": node.GcBytesReclaimed, diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index bcbf773689f33..fbf9f929fd880 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -16,8 +16,8 @@ import ( ) type Raindrops struct { - Urls []string - http_client *http.Client + Urls []string + httpClient *http.Client } var sampleConfig = ` @@ -46,7 +46,7 @@ func (r *Raindrops) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(r.gatherUrl(addr, acc)) + acc.AddError(r.gatherURL(addr, acc)) }(addr) } @@ -55,8 +55,8 @@ func (r *Raindrops) Gather(acc telegraf.Accumulator) error { return nil } -func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { - resp, err := r.http_client.Get(addr.String()) +func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { + resp, err := r.httpClient.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) } @@ -101,10 +101,10 @@ func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { acc.AddFields("raindrops", fields, tags) iterate := true - var queued_line_str string - var active_line_str string - var active_err error - var queued_err error + var queuedLineStr string + var activeLineStr string + var activeErr error + var queuedErr error for iterate { // Listen @@ -114,35 +114,35 @@ func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { "active": 0, "queued": 0, } - active_line_str, active_err = buf.ReadString('\n') - if active_err != nil { + activeLineStr, activeErr = buf.ReadString('\n') + if activeErr != nil { iterate = false break } - if strings.Compare(active_line_str, "\n") == 0 { + if strings.Compare(activeLineStr, "\n") == 0 { break } - queued_line_str, queued_err = buf.ReadString('\n') - if queued_err != nil { + queuedLineStr, queuedErr = buf.ReadString('\n') + if queuedErr != nil { iterate = false } - active_line := strings.Split(active_line_str, " ") - listen_name := active_line[0] + activeLine := strings.Split(activeLineStr, " ") + listenName := activeLine[0] - active, err := strconv.ParseUint(strings.TrimSpace(active_line[2]), 10, 64) + active, err := strconv.ParseUint(strings.TrimSpace(activeLine[2]), 10, 64) if err != nil { active = 0 } lis["active"] = active - queued_line := strings.Split(queued_line_str, " ") - queued, err := strconv.ParseUint(strings.TrimSpace(queued_line[2]), 10, 64) + queuedLine := strings.Split(queuedLineStr, " ") + queued, err := strconv.ParseUint(strings.TrimSpace(queuedLine[2]), 10, 64) if err != nil { queued = 0 } lis["queued"] = queued - if strings.Contains(listen_name, ":") { - listener := strings.Split(listen_name, ":") + if strings.Contains(listenName, ":") { + listener := strings.Split(listenName, ":") tags = map[string]string{ "ip": listener[0], "port": listener[1], @@ -150,7 +150,7 @@ func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { } else { tags = map[string]string{ - "socket": listen_name, + "socket": listenName, } } acc.AddFields("raindrops_listen", lis, tags) @@ -177,7 +177,7 @@ func (r *Raindrops) getTags(addr *url.URL) map[string]string { func init() { inputs.Add("raindrops", func() telegraf.Input { - return &Raindrops{http_client: &http.Client{ + return &Raindrops{httpClient: &http.Client{ Transport: &http.Transport{ ResponseHeaderTimeout: time.Duration(3 * time.Second), }, diff --git a/plugins/inputs/raindrops/raindrops_test.go b/plugins/inputs/raindrops/raindrops_test.go index b0b601cec49cc..2fed0a35a9af8 100644 --- a/plugins/inputs/raindrops/raindrops_test.go +++ b/plugins/inputs/raindrops/raindrops_test.go @@ -61,7 +61,7 @@ func TestRaindropsGeneratesMetrics(t *testing.T) { n := &Raindrops{ Urls: []string{fmt.Sprintf("%s/_raindrops", ts.URL)}, - http_client: &http.Client{Transport: &http.Transport{ + httpClient: &http.Client{Transport: &http.Transport{ ResponseHeaderTimeout: time.Duration(3 * time.Second), }}, } diff --git a/plugins/inputs/redfish/redfish.go b/plugins/inputs/redfish/redfish.go index cf9f09359872e..efd9f9f3367ae 100644 --- a/plugins/inputs/redfish/redfish.go +++ b/plugins/inputs/redfish/redfish.go @@ -43,7 +43,7 @@ type Redfish struct { Address string `toml:"address"` Username string `toml:"username"` Password string `toml:"password"` - ComputerSystemId string `toml:"computer_system_id"` + ComputerSystemID string `toml:"computer_system_id"` Timeout config.Duration `toml:"timeout"` client http.Client @@ -150,7 +150,7 @@ func (r *Redfish) Init() error { return fmt.Errorf("did not provide username and password") } - if r.ComputerSystemId == "" { + if r.ComputerSystemID == "" { return fmt.Errorf("did not provide the computer system ID of the resource") } @@ -258,7 +258,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { address = r.baseURL.Host } - system, err := r.getComputerSystem(r.ComputerSystemId) + system, err := r.getComputerSystem(r.ComputerSystemID) if err != nil { return err } diff --git a/plugins/inputs/redfish/redfish_test.go b/plugins/inputs/redfish/redfish_test.go index 81a04aa1854ff..c31f650136cb5 100644 --- a/plugins/inputs/redfish/redfish_test.go +++ b/plugins/inputs/redfish/redfish_test.go @@ -44,7 +44,7 @@ func TestDellApis(t *testing.T) { address, _, err := net.SplitHostPort(u.Host) require.NoError(t, err) - expected_metrics := []telegraf.Metric{ + expectedMetrics := []telegraf.Metric{ testutil.MustMetric( "redfish_thermal_temperatures", map[string]string{ @@ -489,7 +489,7 @@ func TestDellApis(t *testing.T) { Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "System.Embedded.1", + ComputerSystemID: "System.Embedded.1", } plugin.Init() var acc testutil.Accumulator @@ -497,7 +497,7 @@ func TestDellApis(t *testing.T) { err = plugin.Gather(&acc) require.NoError(t, err) require.True(t, acc.HasMeasurement("redfish_thermal_temperatures")) - testutil.RequireMetricsEqual(t, expected_metrics, acc.GetTelegrafMetrics(), + testutil.RequireMetricsEqual(t, expectedMetrics, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } @@ -531,7 +531,7 @@ func TestHPApis(t *testing.T) { address, _, err := net.SplitHostPort(u.Host) require.NoError(t, err) - expected_metrics_hp := []telegraf.Metric{ + expectedMetricsHp := []telegraf.Metric{ testutil.MustMetric( "redfish_thermal_temperatures", map[string]string{ @@ -647,19 +647,19 @@ func TestHPApis(t *testing.T) { ), } - hp_plugin := &Redfish{ + hpPlugin := &Redfish{ Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "1", + ComputerSystemID: "1", } - hp_plugin.Init() - var hp_acc testutil.Accumulator + hpPlugin.Init() + var hpAcc testutil.Accumulator - err = hp_plugin.Gather(&hp_acc) + err = hpPlugin.Gather(&hpAcc) require.NoError(t, err) - require.True(t, hp_acc.HasMeasurement("redfish_thermal_temperatures")) - testutil.RequireMetricsEqual(t, expected_metrics_hp, hp_acc.GetTelegrafMetrics(), + require.True(t, hpAcc.HasMeasurement("redfish_thermal_temperatures")) + testutil.RequireMetricsEqual(t, expectedMetricsHp, hpAcc.GetTelegrafMetrics(), testutil.IgnoreTime()) } @@ -693,7 +693,7 @@ func TestInvalidUsernameorPassword(t *testing.T) { Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "System.Embedded.1", + ComputerSystemID: "System.Embedded.1", } var acc testutil.Accumulator @@ -723,7 +723,7 @@ func TestNoUsernameorPasswordConfiguration(t *testing.T) { r := &Redfish{ Address: ts.URL, - ComputerSystemId: "System.Embedded.1", + ComputerSystemID: "System.Embedded.1", } err := r.Init() @@ -796,7 +796,7 @@ func TestInvalidDellJSON(t *testing.T) { Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "System.Embedded.1", + ComputerSystemID: "System.Embedded.1", } plugin.Init() @@ -867,7 +867,7 @@ func TestInvalidHPJSON(t *testing.T) { Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "System.Embedded.2", + ComputerSystemID: "System.Embedded.2", } plugin.Init() diff --git a/plugins/inputs/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go index 9bf595761bb24..e59350b5c69ca 100644 --- a/plugins/inputs/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -40,7 +40,7 @@ func (r *RethinkDB) Description() string { return "Read metrics from one or many RethinkDB servers" } -var localhost = &Server{Url: &url.URL{Host: "127.0.0.1:28015"}} +var localhost = &Server{URL: &url.URL{Host: "127.0.0.1:28015"}} // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). @@ -64,7 +64,7 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(serv string) { defer wg.Done() - acc.AddError(r.gatherServer(&Server{Url: u}, acc)) + acc.AddError(r.gatherServer(&Server{URL: u}, acc)) }(serv) } @@ -76,20 +76,20 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { func (r *RethinkDB) gatherServer(server *Server, acc telegraf.Accumulator) error { var err error connectOpts := gorethink.ConnectOpts{ - Address: server.Url.Host, + Address: server.URL.Host, DiscoverHosts: false, } - if server.Url.User != nil { - pwd, set := server.Url.User.Password() + if server.URL.User != nil { + pwd, set := server.URL.User.Password() if set && pwd != "" { connectOpts.AuthKey = pwd connectOpts.HandshakeVersion = gorethink.HandshakeV0_4 } } - if server.Url.Scheme == "rethinkdb2" && server.Url.User != nil { - pwd, set := server.Url.User.Password() + if server.URL.Scheme == "rethinkdb2" && server.URL.User != nil { + pwd, set := server.URL.User.Password() if set && pwd != "" { - connectOpts.Username = server.Url.User.Username() + connectOpts.Username = server.URL.User.Username() connectOpts.Password = pwd connectOpts.HandshakeVersion = gorethink.HandshakeV1_0 } diff --git a/plugins/inputs/rethinkdb/rethinkdb_data.go b/plugins/inputs/rethinkdb/rethinkdb_data.go index ca4ac75523455..159f6af9d992b 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_data.go +++ b/plugins/inputs/rethinkdb/rethinkdb_data.go @@ -8,7 +8,7 @@ import ( ) type serverStatus struct { - Id string `gorethink:"id"` + ID string `gorethink:"id"` Network struct { Addresses []Address `gorethink:"canonical_addresses"` Hostname string `gorethink:"hostname"` @@ -41,7 +41,7 @@ type Engine struct { } type tableStatus struct { - Id string `gorethink:"id"` + ID string `gorethink:"id"` DB string `gorethink:"db"` Name string `gorethink:"name"` } diff --git a/plugins/inputs/rethinkdb/rethinkdb_data_test.go b/plugins/inputs/rethinkdb/rethinkdb_data_test.go index ce1d963b973fc..a0c5e4ba8ae57 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_data_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_data_test.go @@ -59,14 +59,14 @@ func TestAddEngineStatsPartial(t *testing.T) { "written_docs_per_sec", } - missing_keys := []string{ + missingKeys := []string{ "total_queries", "total_reads", "total_writes", } engine.AddEngineStats(keys, &acc, tags) - for _, metric := range missing_keys { + for _, metric := range missingKeys { assert.False(t, acc.HasInt64Field("rethinkdb", metric)) } } diff --git a/plugins/inputs/rethinkdb/rethinkdb_server.go b/plugins/inputs/rethinkdb/rethinkdb_server.go index 521f2b7e53d53..ca12a224356d1 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server.go @@ -15,7 +15,7 @@ import ( ) type Server struct { - Url *url.URL + URL *url.URL session *gorethink.Session serverStatus serverStatus } @@ -78,9 +78,9 @@ func (s *Server) getServerStatus() error { if err != nil { return errors.New("could not parse server_status results") } - host, port, err := net.SplitHostPort(s.Url.Host) + host, port, err := net.SplitHostPort(s.URL.Host) if err != nil { - return fmt.Errorf("unable to determine provided hostname from %s", s.Url.Host) + return fmt.Errorf("unable to determine provided hostname from %s", s.URL.Host) } driverPort, _ := strconv.Atoi(port) for _, ss := range serverStatuses { @@ -92,12 +92,12 @@ func (s *Server) getServerStatus() error { } } - return fmt.Errorf("unable to determine host id from server_status with %s", s.Url.Host) + return fmt.Errorf("unable to determine host id from server_status with %s", s.URL.Host) } func (s *Server) getDefaultTags() map[string]string { tags := make(map[string]string) - tags["rethinkdb_host"] = s.Url.Host + tags["rethinkdb_host"] = s.URL.Host tags["rethinkdb_hostname"] = s.serverStatus.Network.Hostname return tags } @@ -139,7 +139,7 @@ var MemberTracking = []string{ } func (s *Server) addMemberStats(acc telegraf.Accumulator) error { - cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"server", s.serverStatus.Id}).Run(s.session) + cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"server", s.serverStatus.ID}).Run(s.session) if err != nil { return fmt.Errorf("member stats query error, %s", err.Error()) } @@ -176,7 +176,7 @@ func (s *Server) addTableStats(acc telegraf.Accumulator) error { } for _, table := range tables { cursor, err := gorethink.DB("rethinkdb").Table("stats"). - Get([]string{"table_server", table.Id, s.serverStatus.Id}). + Get([]string{"table_server", table.ID, s.serverStatus.ID}). Run(s.session) if err != nil { return fmt.Errorf("table stats query error, %s", err.Error()) diff --git a/plugins/inputs/rethinkdb/rethinkdb_test.go b/plugins/inputs/rethinkdb/rethinkdb_test.go index fa2cc92f2b06c..9a09864cad91a 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_test.go @@ -28,18 +28,18 @@ func init() { func testSetup(m *testing.M) { var err error - server = &Server{Url: &url.URL{Host: connect_url}} + server = &Server{URL: &url.URL{Host: connect_url}} if authKey { server.session, _ = gorethink.Connect(gorethink.ConnectOpts{ - Address: server.Url.Host, + Address: server.URL.Host, AuthKey: authKey, HandshakeVersion: gorethink.HandshakeV0_4, DiscoverHosts: false, }) } else { server.session, _ = gorethink.Connect(gorethink.ConnectOpts{ - Address: server.Url.Host, + Address: server.URL.Host, Username: username, Password: password, HandshakeVersion: gorethink.HandshakeV1_0, diff --git a/plugins/inputs/riak/riak.go b/plugins/inputs/riak/riak.go index 19f6222890360..c0f3990fa8b48 100644 --- a/plugins/inputs/riak/riak.go +++ b/plugins/inputs/riak/riak.go @@ -31,9 +31,9 @@ func NewRiak() *Riak { // Type riakStats represents the data that is received from Riak type riakStats struct { - CpuAvg1 int64 `json:"cpu_avg1"` - CpuAvg15 int64 `json:"cpu_avg15"` - CpuAvg5 int64 `json:"cpu_avg5"` + CPUAvg1 int64 `json:"cpu_avg1"` + CPUAvg15 int64 `json:"cpu_avg15"` + CPUAvg5 int64 `json:"cpu_avg5"` MemoryCode int64 `json:"memory_code"` MemoryEts int64 `json:"memory_ets"` MemoryProcesses int64 `json:"memory_processes"` @@ -144,9 +144,9 @@ func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error { // Build a map of field values fields := map[string]interface{}{ - "cpu_avg1": stats.CpuAvg1, - "cpu_avg15": stats.CpuAvg15, - "cpu_avg5": stats.CpuAvg5, + "cpu_avg1": stats.CPUAvg1, + "cpu_avg15": stats.CPUAvg15, + "cpu_avg5": stats.CPUAvg5, "memory_code": stats.MemoryCode, "memory_ets": stats.MemoryEts, "memory_processes": stats.MemoryProcesses, diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index 7e37fc32b8e3e..cff524b6b5390 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -230,7 +230,7 @@ func (s *Snmp) Description() string { return `DEPRECATED! PLEASE USE inputs.snmp INSTEAD.` } -func fillnode(parentNode Node, oid_name string, ids []string) { +func fillnode(parentNode Node, oidName string, ids []string) { // ids = ["1", "3", "6", ...] id, ids := ids[0], ids[1:] node, ok := parentNode.subnodes[id] @@ -241,12 +241,12 @@ func fillnode(parentNode Node, oid_name string, ids []string) { subnodes: make(map[string]Node), } if len(ids) == 0 { - node.name = oid_name + node.name = oidName } parentNode.subnodes[id] = node } if len(ids) > 0 { - fillnode(node, oid_name, ids) + fillnode(node, oidName, ids) } } @@ -305,10 +305,10 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { for _, line := range strings.Split(string(data), "\n") { oids := strings.Fields(line) if len(oids) == 2 && oids[1] != "" { - oid_name := oids[0] + oidName := oids[0] oid := oids[1] - fillnode(s.initNode, oid_name, strings.Split(oid, ".")) - s.nameToOid[oid_name] = oid + fillnode(s.initNode, oidName, strings.Split(oid, ".")) + s.nameToOid[oidName] = oid } } } @@ -348,10 +348,10 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { host.getOids = append(host.getOids, oid) } - for _, oid_name := range host.Collect { + for _, oidName := range host.Collect { // Get GET oids for _, oid := range s.Get { - if oid.Name == oid_name { + if oid.Name == oidName { if val, ok := s.nameToOid[oid.Oid]; ok { // TODO should we add the 0 instance ? if oid.Instance != "" { @@ -367,7 +367,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } // Get GETBULK oids for _, oid := range s.Bulk { - if oid.Name == oid_name { + if oid.Name == oidName { if val, ok := s.nameToOid[oid.Oid]; ok { oid.rawOid = "." + val } else { @@ -473,15 +473,15 @@ func (h *Host) SNMPMap( // We need to query this table // To get mapping between instance id // and instance name - oid_asked := table.mappingTable - oid_next := oid_asked - need_more_requests := true + oidAsked := table.mappingTable + oidNext := oidAsked + needMoreRequests := true // Set max repetition maxRepetition := uint8(32) // Launch requests - for need_more_requests { + for needMoreRequests { // Launch request - result, err3 := snmpClient.GetBulk([]string{oid_next}, 0, maxRepetition) + result, err3 := snmpClient.GetBulk([]string{oidNext}, 0, maxRepetition) if err3 != nil { return err3 } @@ -489,7 +489,7 @@ func (h *Host) SNMPMap( lastOid := "" for _, variable := range result.Variables { lastOid = variable.Name - if strings.HasPrefix(variable.Name, oid_asked) { + if strings.HasPrefix(variable.Name, oidAsked) { switch variable.Type { // handle instance names case gosnmp.OctetString: @@ -519,7 +519,7 @@ func (h *Host) SNMPMap( // remove oid table from the complete oid // in order to get the current instance id - key := strings.Replace(variable.Name, oid_asked, "", 1) + key := strings.Replace(variable.Name, oidAsked, "", 1) if len(table.subTables) == 0 { // We have a mapping table @@ -581,11 +581,11 @@ func (h *Host) SNMPMap( } } // Determine if we need more requests - if strings.HasPrefix(lastOid, oid_asked) { - need_more_requests = true - oid_next = lastOid + if strings.HasPrefix(lastOid, oidAsked) { + needMoreRequests = true + oidNext = lastOid } else { - need_more_requests = false + needMoreRequests = false } } } @@ -617,15 +617,15 @@ func (h *Host) SNMPGet(acc telegraf.Accumulator, initNode Node) error { // gosnmp.MAX_OIDS == 60 // TODO use gosnmp.MAX_OIDS instead of hard coded value - max_oids := 60 + maxOids := 60 // limit 60 (MAX_OIDS) oids by requests - for i := 0; i < len(oidsList); i = i + max_oids { + for i := 0; i < len(oidsList); i = i + maxOids { // Launch request - max_index := i + max_oids - if i+max_oids > len(oidsList) { - max_index = len(oidsList) + maxIndex := i + maxOids + if i+maxOids > len(oidsList) { + maxIndex = len(oidsList) } - result, err3 := snmpClient.Get(oidsNameList[i:max_index]) // Get() accepts up to g.MAX_OIDS + result, err3 := snmpClient.Get(oidsNameList[i:maxIndex]) // Get() accepts up to g.MAX_OIDS if err3 != nil { return err3 } @@ -658,31 +658,31 @@ func (h *Host) SNMPBulk(acc telegraf.Accumulator, initNode Node) error { // TODO Trying to make requests with more than one OID // to reduce the number of requests for _, oid := range oidsNameList { - oid_asked := oid - need_more_requests := true + oidAsked := oid + needMoreRequests := true // Set max repetition maxRepetition := oidsList[oid].MaxRepetition if maxRepetition <= 0 { maxRepetition = 32 } // Launch requests - for need_more_requests { + for needMoreRequests { // Launch request result, err3 := snmpClient.GetBulk([]string{oid}, 0, maxRepetition) if err3 != nil { return err3 } // Handle response - last_oid, err := h.HandleResponse(oidsList, result, acc, initNode) + lastOid, err := h.HandleResponse(oidsList, result, acc, initNode) if err != nil { return err } // Determine if we need more requests - if strings.HasPrefix(last_oid, oid_asked) { - need_more_requests = true - oid = last_oid + if strings.HasPrefix(lastOid, oidAsked) { + needMoreRequests = true + oid = lastOid } else { - need_more_requests = false + needMoreRequests = false } } } @@ -700,16 +700,16 @@ func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) { version = gosnmp.Version2c } // Prepare host and port - host, port_str, err := net.SplitHostPort(h.Address) + host, portStr, err := net.SplitHostPort(h.Address) if err != nil { - port_str = string("161") + portStr = string("161") } // convert port_str to port in uint16 - port_64, err := strconv.ParseUint(port_str, 10, 16) + port64, err := strconv.ParseUint(portStr, 10, 16) if err != nil { return nil, err } - port := uint16(port_64) + port := uint16(port64) // Get SNMP client snmpClient := &gosnmp.GoSNMP{ Target: host, @@ -739,7 +739,7 @@ func (h *Host) HandleResponse( lastOid = variable.Name nextresult: // Get only oid wanted - for oid_key, oid := range oids { + for oidKey, oid := range oids { // Skip oids already processed for _, processedOid := range h.processedOids { if variable.Name == processedOid { @@ -750,7 +750,7 @@ func (h *Host) HandleResponse( // OR // the result is SNMP table which "." comes right after oid_key. // ex: oid_key: .1.3.6.1.2.1.2.2.1.16, variable.Name: .1.3.6.1.2.1.2.2.1.16.1 - if variable.Name == oid_key || strings.HasPrefix(variable.Name, oid_key+".") { + if variable.Name == oidKey || strings.HasPrefix(variable.Name, oidKey+".") { switch variable.Type { // handle Metrics case gosnmp.Boolean, gosnmp.Integer, gosnmp.Counter32, gosnmp.Gauge32, @@ -761,19 +761,19 @@ func (h *Host) HandleResponse( tags["unit"] = oid.Unit } // Get name and instance - var oid_name string + var oidName string var instance string // Get oidname and instance from translate file - oid_name, instance = findnodename(initNode, + oidName, instance = findnodename(initNode, strings.Split(string(variable.Name[1:]), ".")) // Set instance tag // From mapping table - mapping, inMappingNoSubTable := h.OidInstanceMapping[oid_key] + mapping, inMappingNoSubTable := h.OidInstanceMapping[oidKey] if inMappingNoSubTable { // filter if the instance in not in // OidInstanceMapping mapping map - if instance_name, exists := mapping[instance]; exists { - tags["instance"] = instance_name + if instanceName, exists := mapping[instance]; exists { + tags["instance"] = instanceName } else { continue } @@ -788,24 +788,24 @@ func (h *Host) HandleResponse( } // Set name - var field_name string - if oid_name != "" { + var fieldName string + if oidName != "" { // Set fieldname as oid name from translate file - field_name = oid_name + fieldName = oidName } else { // Set fieldname as oid name from inputs.snmp.get section // Because the result oid is equal to inputs.snmp.get section - field_name = oid.Name + fieldName = oid.Name } tags["snmp_host"], _, _ = net.SplitHostPort(h.Address) fields := make(map[string]interface{}) - fields[string(field_name)] = variable.Value + fields[string(fieldName)] = variable.Value h.processedOids = append(h.processedOids, variable.Name) - acc.AddFields(field_name, fields, tags) + acc.AddFields(fieldName, fields, tags) case gosnmp.NoSuchObject, gosnmp.NoSuchInstance: // Oid not found - log.Printf("E! [inputs.snmp_legacy] oid %q not found", oid_key) + log.Printf("E! [inputs.snmp_legacy] oid %q not found", oidKey) default: // delete other data } diff --git a/plugins/inputs/sqlserver/sqlqueriesV2.go b/plugins/inputs/sqlserver/sqlqueriesV2.go index 66b1bdf5976b5..a6c68f5c0d98e 100644 --- a/plugins/inputs/sqlserver/sqlqueriesV2.go +++ b/plugins/inputs/sqlserver/sqlqueriesV2.go @@ -1348,7 +1348,7 @@ IF @EngineEdition IN (2,3,4) AND @MajorMinorVersion >= 1050 END ` -const sqlServerCpuV2 string = ` +const sqlServerCPUV2 string = ` /*The ring buffer has a new value every minute*/ IF SERVERPROPERTY('EngineEdition') IN (2,3,4) /*Standard,Enterpris,Express*/ BEGIN diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 3baa5ed6aafbb..67571d23d6f26 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -154,7 +154,7 @@ func initQueries(s *SQLServer) error { queries["SQLServerSchedulers"] = Query{ScriptName: "SQLServerSchedulers", Script: sqlServerSchedulers, ResultByRow: false} queries["SQLServerRequests"] = Query{ScriptName: "SQLServerRequests", Script: sqlServerRequests, ResultByRow: false} queries["SQLServerVolumeSpace"] = Query{ScriptName: "SQLServerVolumeSpace", Script: sqlServerVolumeSpace, ResultByRow: false} - queries["SQLServerCpu"] = Query{ScriptName: "SQLServerCpu", Script: sqlServerRingBufferCpu, ResultByRow: false} + queries["SQLServerCpu"] = Query{ScriptName: "SQLServerCpu", Script: sqlServerRingBufferCPU, ResultByRow: false} queries["SQLServerAvailabilityReplicaStates"] = Query{ScriptName: "SQLServerAvailabilityReplicaStates", Script: sqlServerAvailabilityReplicaStates, ResultByRow: false} queries["SQLServerDatabaseReplicaStates"] = Query{ScriptName: "SQLServerDatabaseReplicaStates", Script: sqlServerDatabaseReplicaStates, ResultByRow: false} } else { @@ -174,7 +174,7 @@ func initQueries(s *SQLServer) error { queries["Schedulers"] = Query{ScriptName: "Schedulers", Script: sqlServerSchedulersV2, ResultByRow: false} queries["SqlRequests"] = Query{ScriptName: "SqlRequests", Script: sqlServerRequestsV2, ResultByRow: false} queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: sqlServerVolumeSpaceV2, ResultByRow: false} - queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCpuV2, ResultByRow: false} + queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCPUV2, ResultByRow: false} } else { log.Println("W! DEPRECATED: query_version=1 has been deprecated in favor of database_type.") queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCounters, ResultByRow: true} diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index f90966c4e2985..41fd848a1b36a 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -1131,7 +1131,7 @@ IF @MajorMinorVersion >= 1050 BEGIN END ` -const sqlServerRingBufferCpu string = ` +const sqlServerRingBufferCPU string = ` IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard,Enterprise or Express. Check the database_type parameter in the telegraf configuration.'; RAISERROR (@ErrorMessage,11,1) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 168d4b9cf4309..bb9e9664683f8 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -21,9 +21,9 @@ import ( ) const ( - // UdpMaxPacketSize is the UDP packet limit, see + // UDPMaxPacketSize is the UDP packet limit, see // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure - UdpMaxPacketSize int = 64 * 1024 + UDPMaxPacketSize int = 64 * 1024 defaultFieldName = "value" @@ -498,7 +498,7 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error { s.UDPlistener.SetReadBuffer(s.ReadBufferSize) } - buf := make([]byte, UdpMaxPacketSize) + buf := make([]byte, UDPMaxPacketSize) for { select { case <-s.done: diff --git a/plugins/inputs/systemd_units/systemd_units_linux.go b/plugins/inputs/systemd_units/systemd_units_linux.go index 8df21efa6de13..e7a4be077d7f4 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux.go +++ b/plugins/inputs/systemd_units/systemd_units_linux.go @@ -27,7 +27,7 @@ const measurement = "systemd_units" // Below are mappings of systemd state tables as defined in // https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c // Duplicate strings are removed from this list. -var load_map = map[string]int{ +var loadMap = map[string]int{ "loaded": 0, "stub": 1, "not-found": 2, @@ -37,7 +37,7 @@ var load_map = map[string]int{ "masked": 6, } -var active_map = map[string]int{ +var activeMap = map[string]int{ "active": 0, "reloading": 1, "inactive": 2, @@ -46,7 +46,7 @@ var active_map = map[string]int{ "deactivating": 5, } -var sub_map = map[string]int{ +var subMap = map[string]int{ // service_state_table, offset 0x0000 "running": 0x0000, "dead": 0x0001, @@ -162,27 +162,27 @@ func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { } var ( - load_code int - active_code int - sub_code int - ok bool + loadCode int + activeCode int + subCode int + ok bool ) - if load_code, ok = load_map[load]; !ok { + if loadCode, ok = loadMap[load]; !ok { acc.AddError(fmt.Errorf("Error parsing field 'load', value not in map: %s", load)) continue } - if active_code, ok = active_map[active]; !ok { + if activeCode, ok = activeMap[active]; !ok { acc.AddError(fmt.Errorf("Error parsing field 'active', value not in map: %s", active)) continue } - if sub_code, ok = sub_map[sub]; !ok { + if subCode, ok = subMap[sub]; !ok { acc.AddError(fmt.Errorf("Error parsing field 'sub', value not in map: %s", sub)) continue } fields := map[string]interface{}{ - "load_code": load_code, - "active_code": active_code, - "sub_code": sub_code, + "load_code": loadCode, + "active_code": activeCode, + "sub_code": subCode, } acc.AddFields(measurement, fields, tags) diff --git a/plugins/inputs/systemd_units/systemd_units_linux_test.go b/plugins/inputs/systemd_units/systemd_units_linux_test.go index f45922bb91af0..01af08105998d 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux_test.go +++ b/plugins/inputs/systemd_units/systemd_units_linux_test.go @@ -73,13 +73,13 @@ func TestSystemdUnits(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - systemd_units := &SystemdUnits{ + systemdUnits := &SystemdUnits{ systemctl: func(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) { return bytes.NewBufferString(tt.line), nil }, } acc := new(testutil.Accumulator) - err := acc.GatherError(systemd_units.Gather) + err := acc.GatherError(systemdUnits.Gather) if !reflect.DeepEqual(tt.err, err) { t.Errorf("%s: expected error '%#v' got '%#v'", tt.name, tt.err, err) } diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go index 41b8e463766ba..9016061cb565e 100644 --- a/plugins/inputs/tcp_listener/tcp_listener.go +++ b/plugins/inputs/tcp_listener/tcp_listener.go @@ -14,7 +14,7 @@ import ( "github.com/influxdata/telegraf/selfstat" ) -type TcpListener struct { +type TCPListener struct { ServiceAddress string AllowedPendingMessages int MaxTCPConnections int `toml:"max_tcp_connections"` @@ -65,26 +65,26 @@ const sampleConfig = ` # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener ` -func (t *TcpListener) SampleConfig() string { +func (t *TCPListener) SampleConfig() string { return sampleConfig } -func (t *TcpListener) Description() string { +func (t *TCPListener) Description() string { return "Generic TCP listener" } // All the work is done in the Start() function, so this is just a dummy // function. -func (t *TcpListener) Gather(_ telegraf.Accumulator) error { +func (t *TCPListener) Gather(_ telegraf.Accumulator) error { return nil } -func (t *TcpListener) SetParser(parser parsers.Parser) { +func (t *TCPListener) SetParser(parser parsers.Parser) { t.parser = parser } // Start starts the tcp listener service. -func (t *TcpListener) Start(acc telegraf.Accumulator) error { +func (t *TCPListener) Start(acc telegraf.Accumulator) error { t.Lock() defer t.Unlock() @@ -129,7 +129,7 @@ func (t *TcpListener) Start(acc telegraf.Accumulator) error { } // Stop cleans up all resources -func (t *TcpListener) Stop() { +func (t *TCPListener) Stop() { t.Lock() defer t.Unlock() close(t.done) @@ -155,7 +155,7 @@ func (t *TcpListener) Stop() { } // tcpListen listens for incoming TCP connections. -func (t *TcpListener) tcpListen() error { +func (t *TCPListener) tcpListen() error { defer t.wg.Done() for { @@ -186,7 +186,7 @@ func (t *TcpListener) tcpListen() error { } // refuser refuses a TCP connection -func (t *TcpListener) refuser(conn *net.TCPConn) { +func (t *TCPListener) refuser(conn *net.TCPConn) { // Tell the connection why we are closing. fmt.Fprintf(conn, "Telegraf maximum concurrent TCP connections (%d)"+ " reached, closing.\nYou may want to increase max_tcp_connections in"+ @@ -197,7 +197,7 @@ func (t *TcpListener) refuser(conn *net.TCPConn) { } // handler handles a single TCP Connection -func (t *TcpListener) handler(conn *net.TCPConn, id string) { +func (t *TCPListener) handler(conn *net.TCPConn, id string) { t.CurrentConnections.Incr(1) t.TotalConnections.Incr(1) // connection cleanup function @@ -243,7 +243,7 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) { } // tcpParser parses the incoming tcp byte packets -func (t *TcpListener) tcpParser() error { +func (t *TCPListener) tcpParser() error { defer t.wg.Done() var packet []byte @@ -276,14 +276,14 @@ func (t *TcpListener) tcpParser() error { } // forget a TCP connection -func (t *TcpListener) forget(id string) { +func (t *TCPListener) forget(id string) { t.cleanup.Lock() defer t.cleanup.Unlock() delete(t.conns, id) } // remember a TCP connection -func (t *TcpListener) remember(id string, conn *net.TCPConn) { +func (t *TCPListener) remember(id string, conn *net.TCPConn) { t.cleanup.Lock() defer t.cleanup.Unlock() t.conns[id] = conn @@ -291,7 +291,7 @@ func (t *TcpListener) remember(id string, conn *net.TCPConn) { func init() { inputs.Add("tcp_listener", func() telegraf.Input { - return &TcpListener{ + return &TCPListener{ ServiceAddress: ":8094", AllowedPendingMessages: 10000, MaxTCPConnections: 250, diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go index 61fa890cd9b82..5c476703e54cd 100644 --- a/plugins/inputs/tcp_listener/tcp_listener_test.go +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -30,9 +30,9 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257 ` ) -func newTestTcpListener() (*TcpListener, chan []byte) { +func newTestTCPListener() (*TCPListener, chan []byte) { in := make(chan []byte, 1500) - listener := &TcpListener{ + listener := &TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8194", AllowedPendingMessages: 10000, @@ -45,7 +45,7 @@ func newTestTcpListener() (*TcpListener, chan []byte) { // benchmark how long it takes to accept & process 100,000 metrics: func BenchmarkTCP(b *testing.B) { - listener := TcpListener{ + listener := TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8198", AllowedPendingMessages: 100000, @@ -77,7 +77,7 @@ func BenchmarkTCP(b *testing.B) { } func TestHighTrafficTCP(t *testing.T) { - listener := TcpListener{ + listener := TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8199", AllowedPendingMessages: 100000, @@ -105,7 +105,7 @@ func TestHighTrafficTCP(t *testing.T) { } func TestConnectTCP(t *testing.T) { - listener := TcpListener{ + listener := TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8194", AllowedPendingMessages: 10000, @@ -143,7 +143,7 @@ func TestConnectTCP(t *testing.T) { // Test that MaxTCPConnections is respected func TestConcurrentConns(t *testing.T) { - listener := TcpListener{ + listener := TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8195", AllowedPendingMessages: 10000, @@ -179,7 +179,7 @@ func TestConcurrentConns(t *testing.T) { // Test that MaxTCPConnections is respected when max==1 func TestConcurrentConns1(t *testing.T) { - listener := TcpListener{ + listener := TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8196", AllowedPendingMessages: 10000, @@ -213,7 +213,7 @@ func TestConcurrentConns1(t *testing.T) { // Test that MaxTCPConnections is respected func TestCloseConcurrentConns(t *testing.T) { - listener := TcpListener{ + listener := TCPListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:8195", AllowedPendingMessages: 10000, @@ -235,7 +235,7 @@ func TestCloseConcurrentConns(t *testing.T) { func TestRunParser(t *testing.T) { var testmsg = []byte(testMsg) - listener, in := newTestTcpListener() + listener, in := newTestTCPListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) @@ -257,7 +257,7 @@ func TestRunParser(t *testing.T) { func TestRunParserInvalidMsg(t *testing.T) { var testmsg = []byte("cpu_load_short") - listener, in := newTestTcpListener() + listener, in := newTestTCPListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) @@ -283,7 +283,7 @@ func TestRunParserInvalidMsg(t *testing.T) { func TestRunParserGraphiteMsg(t *testing.T) { var testmsg = []byte("cpu.load.graphite 12 1454780029") - listener, in := newTestTcpListener() + listener, in := newTestTCPListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) @@ -303,7 +303,7 @@ func TestRunParserGraphiteMsg(t *testing.T) { func TestRunParserJSONMsg(t *testing.T) { var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n") - listener, in := newTestTcpListener() + listener, in := newTestTCPListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) diff --git a/plugins/inputs/tengine/tengine.go b/plugins/inputs/tengine/tengine.go index c45ae81d10b60..774abff991edf 100644 --- a/plugins/inputs/tengine/tengine.go +++ b/plugins/inputs/tengine/tengine.go @@ -56,7 +56,7 @@ func (n *Tengine) Gather(acc telegraf.Accumulator) error { // Create an HTTP client that is re-used for each // collection interval if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -73,7 +73,7 @@ func (n *Tengine) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(n.gatherUrl(addr, acc)) + acc.AddError(n.gatherURL(addr, acc)) }(addr) } @@ -81,7 +81,7 @@ func (n *Tengine) Gather(acc telegraf.Accumulator) error { return nil } -func (n *Tengine) createHttpClient() (*http.Client, error) { +func (n *Tengine) createHTTPClient() (*http.Client, error) { tlsCfg, err := n.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -102,40 +102,40 @@ func (n *Tengine) createHttpClient() (*http.Client, error) { } type TengineStatus struct { - host string - bytes_in uint64 - bytes_out uint64 - conn_total uint64 - req_total uint64 - http_2xx uint64 - http_3xx uint64 - http_4xx uint64 - http_5xx uint64 - http_other_status uint64 - rt uint64 - ups_req uint64 - ups_rt uint64 - ups_tries uint64 - http_200 uint64 - http_206 uint64 - http_302 uint64 - http_304 uint64 - http_403 uint64 - http_404 uint64 - http_416 uint64 - http_499 uint64 - http_500 uint64 - http_502 uint64 - http_503 uint64 - http_504 uint64 - http_508 uint64 - http_other_detail_status uint64 - http_ups_4xx uint64 - http_ups_5xx uint64 + host string + bytesIn uint64 + bytesOut uint64 + connTotal uint64 + reqTotal uint64 + http2xx uint64 + http3xx uint64 + http4xx uint64 + http5xx uint64 + httpOtherStatus uint64 + rt uint64 + upsReq uint64 + upsRt uint64 + upsTries uint64 + http200 uint64 + http206 uint64 + http302 uint64 + http304 uint64 + http403 uint64 + http404 uint64 + http416 uint64 + http499 uint64 + http500 uint64 + http502 uint64 + http503 uint64 + http504 uint64 + http508 uint64 + httpOtherDetailStatus uint64 + httpUps4xx uint64 + httpUps5xx uint64 } -func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { - var tenginestatus TengineStatus +func (n *Tengine) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { + var tengineStatus TengineStatus resp, err := n.client.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) @@ -152,161 +152,161 @@ func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { if err != nil || io.EOF == err { break } - line_split := strings.Split(strings.TrimSpace(line), ",") - if len(line_split) != 30 { + lineSplit := strings.Split(strings.TrimSpace(line), ",") + if len(lineSplit) != 30 { continue } - tenginestatus.host = line_split[0] + tengineStatus.host = lineSplit[0] if err != nil { return err } - tenginestatus.bytes_in, err = strconv.ParseUint(line_split[1], 10, 64) + tengineStatus.bytesIn, err = strconv.ParseUint(lineSplit[1], 10, 64) if err != nil { return err } - tenginestatus.bytes_out, err = strconv.ParseUint(line_split[2], 10, 64) + tengineStatus.bytesOut, err = strconv.ParseUint(lineSplit[2], 10, 64) if err != nil { return err } - tenginestatus.conn_total, err = strconv.ParseUint(line_split[3], 10, 64) + tengineStatus.connTotal, err = strconv.ParseUint(lineSplit[3], 10, 64) if err != nil { return err } - tenginestatus.req_total, err = strconv.ParseUint(line_split[4], 10, 64) + tengineStatus.reqTotal, err = strconv.ParseUint(lineSplit[4], 10, 64) if err != nil { return err } - tenginestatus.http_2xx, err = strconv.ParseUint(line_split[5], 10, 64) + tengineStatus.http2xx, err = strconv.ParseUint(lineSplit[5], 10, 64) if err != nil { return err } - tenginestatus.http_3xx, err = strconv.ParseUint(line_split[6], 10, 64) + tengineStatus.http3xx, err = strconv.ParseUint(lineSplit[6], 10, 64) if err != nil { return err } - tenginestatus.http_4xx, err = strconv.ParseUint(line_split[7], 10, 64) + tengineStatus.http4xx, err = strconv.ParseUint(lineSplit[7], 10, 64) if err != nil { return err } - tenginestatus.http_5xx, err = strconv.ParseUint(line_split[8], 10, 64) + tengineStatus.http5xx, err = strconv.ParseUint(lineSplit[8], 10, 64) if err != nil { return err } - tenginestatus.http_other_status, err = strconv.ParseUint(line_split[9], 10, 64) + tengineStatus.httpOtherStatus, err = strconv.ParseUint(lineSplit[9], 10, 64) if err != nil { return err } - tenginestatus.rt, err = strconv.ParseUint(line_split[10], 10, 64) + tengineStatus.rt, err = strconv.ParseUint(lineSplit[10], 10, 64) if err != nil { return err } - tenginestatus.ups_req, err = strconv.ParseUint(line_split[11], 10, 64) + tengineStatus.upsReq, err = strconv.ParseUint(lineSplit[11], 10, 64) if err != nil { return err } - tenginestatus.ups_rt, err = strconv.ParseUint(line_split[12], 10, 64) + tengineStatus.upsRt, err = strconv.ParseUint(lineSplit[12], 10, 64) if err != nil { return err } - tenginestatus.ups_tries, err = strconv.ParseUint(line_split[13], 10, 64) + tengineStatus.upsTries, err = strconv.ParseUint(lineSplit[13], 10, 64) if err != nil { return err } - tenginestatus.http_200, err = strconv.ParseUint(line_split[14], 10, 64) + tengineStatus.http200, err = strconv.ParseUint(lineSplit[14], 10, 64) if err != nil { return err } - tenginestatus.http_206, err = strconv.ParseUint(line_split[15], 10, 64) + tengineStatus.http206, err = strconv.ParseUint(lineSplit[15], 10, 64) if err != nil { return err } - tenginestatus.http_302, err = strconv.ParseUint(line_split[16], 10, 64) + tengineStatus.http302, err = strconv.ParseUint(lineSplit[16], 10, 64) if err != nil { return err } - tenginestatus.http_304, err = strconv.ParseUint(line_split[17], 10, 64) + tengineStatus.http304, err = strconv.ParseUint(lineSplit[17], 10, 64) if err != nil { return err } - tenginestatus.http_403, err = strconv.ParseUint(line_split[18], 10, 64) + tengineStatus.http403, err = strconv.ParseUint(lineSplit[18], 10, 64) if err != nil { return err } - tenginestatus.http_404, err = strconv.ParseUint(line_split[19], 10, 64) + tengineStatus.http404, err = strconv.ParseUint(lineSplit[19], 10, 64) if err != nil { return err } - tenginestatus.http_416, err = strconv.ParseUint(line_split[20], 10, 64) + tengineStatus.http416, err = strconv.ParseUint(lineSplit[20], 10, 64) if err != nil { return err } - tenginestatus.http_499, err = strconv.ParseUint(line_split[21], 10, 64) + tengineStatus.http499, err = strconv.ParseUint(lineSplit[21], 10, 64) if err != nil { return err } - tenginestatus.http_500, err = strconv.ParseUint(line_split[22], 10, 64) + tengineStatus.http500, err = strconv.ParseUint(lineSplit[22], 10, 64) if err != nil { return err } - tenginestatus.http_502, err = strconv.ParseUint(line_split[23], 10, 64) + tengineStatus.http502, err = strconv.ParseUint(lineSplit[23], 10, 64) if err != nil { return err } - tenginestatus.http_503, err = strconv.ParseUint(line_split[24], 10, 64) + tengineStatus.http503, err = strconv.ParseUint(lineSplit[24], 10, 64) if err != nil { return err } - tenginestatus.http_504, err = strconv.ParseUint(line_split[25], 10, 64) + tengineStatus.http504, err = strconv.ParseUint(lineSplit[25], 10, 64) if err != nil { return err } - tenginestatus.http_508, err = strconv.ParseUint(line_split[26], 10, 64) + tengineStatus.http508, err = strconv.ParseUint(lineSplit[26], 10, 64) if err != nil { return err } - tenginestatus.http_other_detail_status, err = strconv.ParseUint(line_split[27], 10, 64) + tengineStatus.httpOtherDetailStatus, err = strconv.ParseUint(lineSplit[27], 10, 64) if err != nil { return err } - tenginestatus.http_ups_4xx, err = strconv.ParseUint(line_split[28], 10, 64) + tengineStatus.httpUps4xx, err = strconv.ParseUint(lineSplit[28], 10, 64) if err != nil { return err } - tenginestatus.http_ups_5xx, err = strconv.ParseUint(line_split[29], 10, 64) + tengineStatus.httpUps5xx, err = strconv.ParseUint(lineSplit[29], 10, 64) if err != nil { return err } - tags := getTags(addr, tenginestatus.host) + tags := getTags(addr, tengineStatus.host) fields := map[string]interface{}{ - "bytes_in": tenginestatus.bytes_in, - "bytes_out": tenginestatus.bytes_out, - "conn_total": tenginestatus.conn_total, - "req_total": tenginestatus.req_total, - "http_2xx": tenginestatus.http_2xx, - "http_3xx": tenginestatus.http_3xx, - "http_4xx": tenginestatus.http_4xx, - "http_5xx": tenginestatus.http_5xx, - "http_other_status": tenginestatus.http_other_status, - "rt": tenginestatus.rt, - "ups_req": tenginestatus.ups_req, - "ups_rt": tenginestatus.ups_rt, - "ups_tries": tenginestatus.ups_tries, - "http_200": tenginestatus.http_200, - "http_206": tenginestatus.http_206, - "http_302": tenginestatus.http_302, - "http_304": tenginestatus.http_304, - "http_403": tenginestatus.http_403, - "http_404": tenginestatus.http_404, - "http_416": tenginestatus.http_416, - "http_499": tenginestatus.http_499, - "http_500": tenginestatus.http_500, - "http_502": tenginestatus.http_502, - "http_503": tenginestatus.http_503, - "http_504": tenginestatus.http_504, - "http_508": tenginestatus.http_508, - "http_other_detail_status": tenginestatus.http_other_detail_status, - "http_ups_4xx": tenginestatus.http_ups_4xx, - "http_ups_5xx": tenginestatus.http_ups_5xx, + "bytes_in": tengineStatus.bytesIn, + "bytes_out": tengineStatus.bytesOut, + "conn_total": tengineStatus.connTotal, + "req_total": tengineStatus.reqTotal, + "http_2xx": tengineStatus.http2xx, + "http_3xx": tengineStatus.http3xx, + "http_4xx": tengineStatus.http4xx, + "http_5xx": tengineStatus.http5xx, + "http_other_status": tengineStatus.httpOtherStatus, + "rt": tengineStatus.rt, + "ups_req": tengineStatus.upsReq, + "ups_rt": tengineStatus.upsRt, + "ups_tries": tengineStatus.upsTries, + "http_200": tengineStatus.http200, + "http_206": tengineStatus.http206, + "http_302": tengineStatus.http302, + "http_304": tengineStatus.http304, + "http_403": tengineStatus.http403, + "http_404": tengineStatus.http404, + "http_416": tengineStatus.http416, + "http_499": tengineStatus.http499, + "http_500": tengineStatus.http500, + "http_502": tengineStatus.http502, + "http_503": tengineStatus.http503, + "http_504": tengineStatus.http504, + "http_508": tengineStatus.http508, + "http_other_detail_status": tengineStatus.httpOtherDetailStatus, + "http_ups_4xx": tengineStatus.httpUps4xx, + "http_ups_5xx": tengineStatus.httpUps5xx, } acc.AddFields("tengine", fields, tags) } @@ -315,7 +315,7 @@ func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { } // Get tag(s) for the tengine plugin -func getTags(addr *url.URL, server_name string) map[string]string { +func getTags(addr *url.URL, serverName string) map[string]string { h := addr.Host host, port, err := net.SplitHostPort(h) if err != nil { @@ -328,7 +328,7 @@ func getTags(addr *url.URL, server_name string) map[string]string { port = "" } } - return map[string]string{"server": host, "port": port, "server_name": server_name} + return map[string]string{"server": host, "port": port, "server_name": serverName} } func init() { diff --git a/plugins/inputs/tengine/tengine_test.go b/plugins/inputs/tengine/tengine_test.go index 317820bb22acb..70526826cd0ae 100644 --- a/plugins/inputs/tengine/tengine_test.go +++ b/plugins/inputs/tengine/tengine_test.go @@ -38,13 +38,13 @@ func TestTengineGeneratesMetrics(t *testing.T) { Urls: []string{fmt.Sprintf("%s/us", ts.URL)}, } - var acc_tengine testutil.Accumulator + var accTengine testutil.Accumulator - err_tengine := acc_tengine.GatherError(n.Gather) + errTengine := accTengine.GatherError(n.Gather) - require.NoError(t, err_tengine) + require.NoError(t, errTengine) - fields_tengine := map[string]interface{}{ + fieldsTengine := map[string]interface{}{ "bytes_in": uint64(784), "bytes_out": uint64(1511), "conn_total": uint64(2), @@ -93,5 +93,5 @@ func TestTengineGeneratesMetrics(t *testing.T) { } } tags := map[string]string{"server": host, "port": port, "server_name": "127.0.0.1"} - acc_tengine.AssertContainsTaggedFields(t, "tengine", fields_tengine, tags) + accTengine.AssertContainsTaggedFields(t, "tengine", fieldsTengine, tags) } diff --git a/plugins/inputs/tomcat/tomcat.go b/plugins/inputs/tomcat/tomcat.go index d32b0168a3d05..560594ce5a7b9 100644 --- a/plugins/inputs/tomcat/tomcat.go +++ b/plugins/inputs/tomcat/tomcat.go @@ -99,7 +99,7 @@ func (s *Tomcat) SampleConfig() string { func (s *Tomcat) Gather(acc telegraf.Accumulator) error { if s.client == nil { - client, err := s.createHttpClient() + client, err := s.createHTTPClient() if err != nil { return err } @@ -187,7 +187,7 @@ func (s *Tomcat) Gather(acc telegraf.Accumulator) error { return nil } -func (s *Tomcat) createHttpClient() (*http.Client, error) { +func (s *Tomcat) createHTTPClient() (*http.Client, error) { tlsConfig, err := s.ClientConfig.TLSConfig() if err != nil { return nil, err diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index 7fa59fdb121bc..4833b0fdfd132 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -13,8 +13,8 @@ import ( "github.com/influxdata/telegraf/selfstat" ) -// UdpListener main struct for the collector -type UdpListener struct { +// UDPListener main struct for the collector +type UDPListener struct { ServiceAddress string // UDPBufferSize should only be set if you want/need the telegraf UDP socket to @@ -57,9 +57,9 @@ type UdpListener struct { Log telegraf.Logger } -// UDP_MAX_PACKET_SIZE is packet limit, see +// UDPMaxPacketSize is packet limit, see // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure -const UDP_MAX_PACKET_SIZE int = 64 * 1024 +const UDPMaxPacketSize int = 64 * 1024 var dropwarn = "udp_listener message queue full. " + "We have dropped %d messages so far. " + @@ -74,25 +74,25 @@ const sampleConfig = ` # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener ` -func (u *UdpListener) SampleConfig() string { +func (u *UDPListener) SampleConfig() string { return sampleConfig } -func (u *UdpListener) Description() string { +func (u *UDPListener) Description() string { return "Generic UDP listener" } // All the work is done in the Start() function, so this is just a dummy // function. -func (u *UdpListener) Gather(_ telegraf.Accumulator) error { +func (u *UDPListener) Gather(_ telegraf.Accumulator) error { return nil } -func (u *UdpListener) SetParser(parser parsers.Parser) { +func (u *UDPListener) SetParser(parser parsers.Parser) { u.parser = parser } -func (u *UdpListener) Start(acc telegraf.Accumulator) error { +func (u *UDPListener) Start(acc telegraf.Accumulator) error { u.Lock() defer u.Unlock() @@ -119,7 +119,7 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error { return nil } -func (u *UdpListener) Stop() { +func (u *UDPListener) Stop() { u.Lock() defer u.Unlock() close(u.done) @@ -129,7 +129,7 @@ func (u *UdpListener) Stop() { u.Log.Infof("Stopped service on %q", u.ServiceAddress) } -func (u *UdpListener) udpListen() error { +func (u *UDPListener) udpListen() error { var err error address, _ := net.ResolveUDPAddr("udp", u.ServiceAddress) @@ -153,10 +153,10 @@ func (u *UdpListener) udpListen() error { return nil } -func (u *UdpListener) udpListenLoop() { +func (u *UDPListener) udpListenLoop() { defer u.wg.Done() - buf := make([]byte, UDP_MAX_PACKET_SIZE) + buf := make([]byte, UDPMaxPacketSize) for { select { case <-u.done: @@ -189,7 +189,7 @@ func (u *UdpListener) udpListenLoop() { } } -func (u *UdpListener) udpParser() error { +func (u *UDPListener) udpParser() error { defer u.wg.Done() var packet []byte @@ -219,7 +219,7 @@ func (u *UdpListener) udpParser() error { func init() { inputs.Add("udp_listener", func() telegraf.Input { - return &UdpListener{ + return &UDPListener{ ServiceAddress: ":8092", AllowedPendingMessages: 10000, } diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index b241235e4d61d..f3e034363e471 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -28,9 +28,9 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257 ` ) -func newTestUdpListener() (*UdpListener, chan []byte) { +func newTestUDPListener() (*UDPListener, chan []byte) { in := make(chan []byte, 1500) - listener := &UdpListener{ + listener := &UDPListener{ Log: testutil.Logger{}, ServiceAddress: ":8125", AllowedPendingMessages: 10000, @@ -41,7 +41,7 @@ func newTestUdpListener() (*UdpListener, chan []byte) { } // func TestHighTrafficUDP(t *testing.T) { -// listener := UdpListener{ +// listener := UDPListener{ // ServiceAddress: ":8126", // AllowedPendingMessages: 100000, // } @@ -78,7 +78,7 @@ func newTestUdpListener() (*UdpListener, chan []byte) { // } func TestConnectUDP(t *testing.T) { - listener := UdpListener{ + listener := UDPListener{ Log: testutil.Logger{}, ServiceAddress: ":8127", AllowedPendingMessages: 10000, @@ -117,7 +117,7 @@ func TestRunParser(t *testing.T) { log.SetOutput(ioutil.Discard) var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257\n") - listener, in := newTestUdpListener() + listener, in := newTestUDPListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) @@ -140,7 +140,7 @@ func TestRunParserInvalidMsg(t *testing.T) { log.SetOutput(ioutil.Discard) var testmsg = []byte("cpu_load_short") - listener, in := newTestUdpListener() + listener, in := newTestUDPListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) @@ -166,7 +166,7 @@ func TestRunParserGraphiteMsg(t *testing.T) { log.SetOutput(ioutil.Discard) var testmsg = []byte("cpu.load.graphite 12 1454780029") - listener, in := newTestUdpListener() + listener, in := newTestUDPListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) @@ -187,7 +187,7 @@ func TestRunParserJSONMsg(t *testing.T) { log.SetOutput(ioutil.Discard) var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n") - listener, in := newTestUdpListener() + listener, in := newTestUDPListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 9c1837713977b..7da90949e7445 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -310,7 +310,7 @@ func (e *Endpoint) init(ctx context.Context) error { return nil } -func (e *Endpoint) getMetricNameForId(id int32) string { +func (e *Endpoint) getMetricNameForID(id int32) string { e.metricNameMux.RLock() defer e.metricNameMux.RUnlock() return e.metricNameLookup[id] @@ -470,8 +470,8 @@ func (e *Endpoint) discover(ctx context.Context) error { dss := newObjects["datastore"] l2d := make(map[string]string) for _, ds := range dss { - lunId := ds.altID - m := isolateLUN.FindStringSubmatch(lunId) + lunID := ds.altID + m := isolateLUN.FindStringSubmatch(lunID) if m != nil { l2d[m[1]] = ds.name } @@ -567,7 +567,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, } else { m.Instance = "" } - if res.filters.Match(e.getMetricNameForId(m.CounterId)) { + if res.filters.Match(e.getMetricNameForID(m.CounterId)) { mMap[strconv.Itoa(int(m.CounterId))+"|"+m.Instance] = m } } @@ -712,7 +712,7 @@ func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap ips := make(map[string][]string) for _, ip := range net.IpConfig.IpAddress { addr := ip.IpAddress - for _, ipType := range e.Parent.IpAddresses { + for _, ipType := range e.Parent.IPAddresses { if !(ipType == "ipv4" && isIPv4.MatchString(addr) || ipType == "ipv6" && isIPv6.MatchString(addr)) { continue @@ -779,18 +779,18 @@ func getDatastores(ctx context.Context, e *Endpoint, filter *ResourceFilter) (ob } m := make(objectMap) for _, r := range resources { - lunId := "" + lunID := "" if r.Info != nil { info := r.Info.GetDatastoreInfo() if info != nil { - lunId = info.Url + lunID = info.Url } } m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ name: r.Name, ref: r.ExtensibleManagedObject.Reference(), parentRef: r.Parent, - altID: lunId, + altID: lunID, customValues: e.loadCustomAttributes(&r.ManagedEntity), } } @@ -898,7 +898,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim for metricIdx, metric := range res.metrics { // Determine time of last successful collection - metricName := e.getMetricNameForId(metric.CounterId) + metricName := e.getMetricNameForID(metric.CounterId) if metricName == "" { e.log.Infof("Unable to find metric name for id %d. Skipping!", metric.CounterId) continue diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 9bafcd92113c3..600780a57bee2 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -47,7 +47,7 @@ type VSphere struct { CustomAttributeInclude []string CustomAttributeExclude []string UseIntSamples bool - IpAddresses []string + IPAddresses []string MaxQueryObjects int MaxQueryMetrics int @@ -358,7 +358,7 @@ func init() { CustomAttributeInclude: []string{}, CustomAttributeExclude: []string{"*"}, UseIntSamples: true, - IpAddresses: []string{}, + IPAddresses: []string{}, MaxQueryObjects: 256, MaxQueryMetrics: 256, diff --git a/plugins/inputs/webhooks/filestack/filestack_webhooks_events.go b/plugins/inputs/webhooks/filestack/filestack_webhooks_events.go index 93f976f6074be..74d697b2cb0a6 100644 --- a/plugins/inputs/webhooks/filestack/filestack_webhooks_events.go +++ b/plugins/inputs/webhooks/filestack/filestack_webhooks_events.go @@ -5,7 +5,7 @@ import "strconv" type FilestackEvent struct { Action string `json:"action"` TimeStamp int64 `json:"timestamp"` - Id int `json:"id"` + ID int `json:"id"` } func (fe *FilestackEvent) Tags() map[string]string { @@ -16,6 +16,6 @@ func (fe *FilestackEvent) Tags() map[string]string { func (fe *FilestackEvent) Fields() map[string]interface{} { return map[string]interface{}{ - "id": strconv.Itoa(fe.Id), + "id": strconv.Itoa(fe.ID), } } diff --git a/plugins/inputs/webhooks/mandrill/mandrill_webhooks_events.go b/plugins/inputs/webhooks/mandrill/mandrill_webhooks_events.go index b36b13e541eef..242130545a5ae 100644 --- a/plugins/inputs/webhooks/mandrill/mandrill_webhooks_events.go +++ b/plugins/inputs/webhooks/mandrill/mandrill_webhooks_events.go @@ -8,7 +8,7 @@ type Event interface { type MandrillEvent struct { EventName string `json:"event"` TimeStamp int64 `json:"ts"` - Id string `json:"_id"` + ID string `json:"_id"` } func (me *MandrillEvent) Tags() map[string]string { @@ -19,6 +19,6 @@ func (me *MandrillEvent) Tags() map[string]string { func (me *MandrillEvent) Fields() map[string]interface{} { return map[string]interface{}{ - "id": me.Id, + "id": me.ID, } } diff --git a/plugins/inputs/webhooks/rollbar/rollbar_webhooks_events.go b/plugins/inputs/webhooks/rollbar/rollbar_webhooks_events.go index b9a3a0713cc16..ad5c54a037ecd 100644 --- a/plugins/inputs/webhooks/rollbar/rollbar_webhooks_events.go +++ b/plugins/inputs/webhooks/rollbar/rollbar_webhooks_events.go @@ -17,9 +17,9 @@ type NewItemDataItemLastOccurence struct { } type NewItemDataItem struct { - Id int `json:"id"` + ID int `json:"id"` Environment string `json:"environment"` - ProjectId int `json:"project_id"` + ProjectID int `json:"project_id"` LastOccurence NewItemDataItemLastOccurence `json:"last_occurrence"` } @@ -36,7 +36,7 @@ func (ni *NewItem) Tags() map[string]string { return map[string]string{ "event": ni.EventName, "environment": ni.Data.Item.Environment, - "project_id": strconv.Itoa(ni.Data.Item.ProjectId), + "project_id": strconv.Itoa(ni.Data.Item.ProjectID), "language": ni.Data.Item.LastOccurence.Language, "level": ni.Data.Item.LastOccurence.Level, } @@ -44,7 +44,7 @@ func (ni *NewItem) Tags() map[string]string { func (ni *NewItem) Fields() map[string]interface{} { return map[string]interface{}{ - "id": ni.Data.Item.Id, + "id": ni.Data.Item.ID, } } @@ -54,9 +54,9 @@ type OccurrenceDataOccurrence struct { } type OccurrenceDataItem struct { - Id int `json:"id"` + ID int `json:"id"` Environment string `json:"environment"` - ProjectId int `json:"project_id"` + ProjectID int `json:"project_id"` } type OccurrenceData struct { @@ -73,7 +73,7 @@ func (o *Occurrence) Tags() map[string]string { return map[string]string{ "event": o.EventName, "environment": o.Data.Item.Environment, - "project_id": strconv.Itoa(o.Data.Item.ProjectId), + "project_id": strconv.Itoa(o.Data.Item.ProjectID), "language": o.Data.Occurrence.Language, "level": o.Data.Occurrence.Level, } @@ -81,14 +81,14 @@ func (o *Occurrence) Tags() map[string]string { func (o *Occurrence) Fields() map[string]interface{} { return map[string]interface{}{ - "id": o.Data.Item.Id, + "id": o.Data.Item.ID, } } type DeployDataDeploy struct { - Id int `json:"id"` + ID int `json:"id"` Environment string `json:"environment"` - ProjectId int `json:"project_id"` + ProjectID int `json:"project_id"` } type DeployData struct { @@ -104,12 +104,12 @@ func (ni *Deploy) Tags() map[string]string { return map[string]string{ "event": ni.EventName, "environment": ni.Data.Deploy.Environment, - "project_id": strconv.Itoa(ni.Data.Deploy.ProjectId), + "project_id": strconv.Itoa(ni.Data.Deploy.ProjectID), } } func (ni *Deploy) Fields() map[string]interface{} { return map[string]interface{}{ - "id": ni.Data.Deploy.Id, + "id": ni.Data.Deploy.ID, } } diff --git a/plugins/inputs/zfs/zfs_linux_test.go b/plugins/inputs/zfs/zfs_linux_test.go index 133d1cafa53c9..8690fee4c3bf9 100644 --- a/plugins/inputs/zfs/zfs_linux_test.go +++ b/plugins/inputs/zfs/zfs_linux_test.go @@ -115,7 +115,7 @@ streams_resets 4 20989756 streams_noresets 4 503182328 bogus_streams 4 0 ` -const pool_ioContents = `11 3 0x00 1 80 2225326830828 32953476980628 +const poolIoContents = `11 3 0x00 1 80 2225326830828 32953476980628 nread nwritten reads writes wtime wlentime wupdate rtime rlentime rupdate wcnt rcnt 1884160 6450688 22 978 272187126 2850519036 2263669418655 424226814 2850519036 2263669871823 0 0 ` @@ -142,7 +142,7 @@ erpt-set-failed 4 202 fmri-set-failed 4 303 payload-set-failed 4 404 ` -const dmu_txContents = `5 1 0x01 11 528 34103260832 437683925071438 +const dmuTxContents = `5 1 0x01 11 528 34103260832 437683925071438 name type data dmu_tx_assigned 4 39321636 dmu_tx_delay 4 111 @@ -252,7 +252,7 @@ func TestZfsPoolMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(pool_ioContents), 0644) + err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(poolIoContents), 0644) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) @@ -306,7 +306,7 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = ioutil.WriteFile(testKstatPath+"/fm", []byte(fmContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/dmu_tx", []byte(dmu_txContents), 0644) + err = ioutil.WriteFile(testKstatPath+"/dmu_tx", []byte(dmuTxContents), 0644) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/abdstats", []byte(abdstatsContents), 0644) diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index 0cf54f3027180..6b6d21fc0d4f0 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -107,7 +107,7 @@ func (z *Zookeeper) Gather(acc telegraf.Accumulator) error { } func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error { - var zookeeper_state string + var zookeeperState string _, _, err := net.SplitHostPort(address) if err != nil { address = address + ":2181" @@ -145,7 +145,7 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr measurement := strings.TrimPrefix(parts[1], "zk_") if measurement == "server_state" { - zookeeper_state = parts[2] + zookeeperState = parts[2] } else { sValue := string(parts[2]) @@ -166,7 +166,7 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr tags := map[string]string{ "server": srv, "port": service[1], - "state": zookeeper_state, + "state": zookeeperState, } acc.AddFields("zookeeper", fields, tags) diff --git a/plugins/outputs/amon/amon.go b/plugins/outputs/amon/amon.go index 52104eaf45e2b..864bd60c853f1 100644 --- a/plugins/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -86,7 +86,7 @@ func (a *Amon) Write(metrics []telegraf.Metric) error { if err != nil { return fmt.Errorf("unable to marshal TimeSeries, %s", err.Error()) } - req, err := http.NewRequest("POST", a.authenticatedUrl(), bytes.NewBuffer(tsBytes)) + req, err := http.NewRequest("POST", a.authenticatedURL(), bytes.NewBuffer(tsBytes)) if err != nil { return fmt.Errorf("unable to create http.Request, %s", err.Error()) } @@ -113,8 +113,7 @@ func (a *Amon) Description() string { return "Configuration for Amon Server to send metrics to." } -func (a *Amon) authenticatedUrl() string { - +func (a *Amon) authenticatedURL() string { return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey) } diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index cd57805e172cf..2bf1d2899fb9b 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -30,7 +30,7 @@ type AzureMonitor struct { StringsAsDimensions bool `toml:"strings_as_dimensions"` Region string `toml:"region"` ResourceID string `toml:"resource_id"` - EndpointUrl string `toml:"endpoint_url"` + EndpointURL string `toml:"endpoint_url"` Log telegraf.Logger `toml:"-"` url string @@ -158,7 +158,7 @@ func (a *AzureMonitor) Connect() error { var err error var region string var resourceID string - var endpointUrl string + var endpointURL string if a.Region == "" || a.ResourceID == "" { // Pull region and resource identifier @@ -173,8 +173,8 @@ func (a *AzureMonitor) Connect() error { if a.ResourceID != "" { resourceID = a.ResourceID } - if a.EndpointUrl != "" { - endpointUrl = a.EndpointUrl + if a.EndpointURL != "" { + endpointURL = a.EndpointURL } if resourceID == "" { @@ -183,10 +183,10 @@ func (a *AzureMonitor) Connect() error { return fmt.Errorf("no region configured or available via VM instance metadata") } - if endpointUrl == "" { + if endpointURL == "" { a.url = fmt.Sprintf(urlTemplate, region, resourceID) } else { - a.url = fmt.Sprintf(urlOverrideTemplate, endpointUrl, resourceID) + a.url = fmt.Sprintf(urlOverrideTemplate, endpointURL, resourceID) } a.Log.Debugf("Writing to Azure Monitor URL: %s", a.url) diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 52978539260bd..6b5d4437b63b1 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -47,7 +47,7 @@ type Metric struct { type Point [2]float64 -const datadogApi = "https://app.datadoghq.com/api/v1/series" +const datadogAPI = "https://app.datadoghq.com/api/v1/series" func (d *Datadog) Connect() error { if d.Apikey == "" { @@ -104,22 +104,22 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { return nil } - redactedApiKey := "****************" + redactedAPIKey := "****************" ts.Series = make([]*Metric, metricCounter) copy(ts.Series, tempSeries[0:]) tsBytes, err := json.Marshal(ts) if err != nil { return fmt.Errorf("unable to marshal TimeSeries, %s", err.Error()) } - req, err := http.NewRequest("POST", d.authenticatedUrl(), bytes.NewBuffer(tsBytes)) + req, err := http.NewRequest("POST", d.authenticatedURL(), bytes.NewBuffer(tsBytes)) if err != nil { - return fmt.Errorf("unable to create http.Request, %s", strings.Replace(err.Error(), d.Apikey, redactedApiKey, -1)) + return fmt.Errorf("unable to create http.Request, %s", strings.Replace(err.Error(), d.Apikey, redactedAPIKey, -1)) } req.Header.Add("Content-Type", "application/json") resp, err := d.client.Do(req) if err != nil { - return fmt.Errorf("error POSTing metrics, %s", strings.Replace(err.Error(), d.Apikey, redactedApiKey, -1)) + return fmt.Errorf("error POSTing metrics, %s", strings.Replace(err.Error(), d.Apikey, redactedAPIKey, -1)) } defer resp.Body.Close() @@ -138,7 +138,7 @@ func (d *Datadog) Description() string { return "Configuration for DataDog API to send metrics to." } -func (d *Datadog) authenticatedUrl() string { +func (d *Datadog) authenticatedURL() string { q := url.Values{ "api_key": []string{d.Apikey}, } @@ -208,7 +208,7 @@ func (d *Datadog) Close() error { func init() { outputs.Add("datadog", func() telegraf.Output { return &Datadog{ - URL: datadogApi, + URL: datadogAPI, } }) } diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go index ff88e093dc616..c893833b44398 100644 --- a/plugins/outputs/datadog/datadog_test.go +++ b/plugins/outputs/datadog/datadog_test.go @@ -17,8 +17,8 @@ import ( ) var ( - fakeUrl = "http://test.datadog.com" - fakeApiKey = "123456" + fakeURL = "http://test.datadog.com" + fakeAPIKey = "123456" ) func NewDatadog(url string) *Datadog { @@ -28,8 +28,8 @@ func NewDatadog(url string) *Datadog { } func fakeDatadog() *Datadog { - d := NewDatadog(fakeUrl) - d.Apikey = fakeApiKey + d := NewDatadog(fakeURL) + d.Apikey = fakeAPIKey return d } @@ -74,8 +74,8 @@ func TestBadStatusCode(t *testing.T) { func TestAuthenticatedUrl(t *testing.T) { d := fakeDatadog() - authUrl := d.authenticatedUrl() - assert.EqualValues(t, fmt.Sprintf("%s?api_key=%s", fakeUrl, fakeApiKey), authUrl) + authURL := d.authenticatedURL() + assert.EqualValues(t, fmt.Sprintf("%s?api_key=%s", fakeURL, fakeAPIKey), authURL) } func TestBuildTags(t *testing.T) { diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index afb97efb16a65..adabcb73a554d 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -19,7 +19,7 @@ import ( ) const ( - oneAgentMetricsUrl = "http://127.0.0.1:14499/metrics/ingest" + oneAgentMetricsURL = "http://127.0.0.1:14499/metrics/ingest" ) var ( @@ -273,9 +273,9 @@ func (d *Dynatrace) Init() error { d.State = make(map[string]string) if len(d.URL) == 0 { d.Log.Infof("Dynatrace URL is empty, defaulting to OneAgent metrics interface") - d.URL = oneAgentMetricsUrl + d.URL = oneAgentMetricsURL } - if d.URL != oneAgentMetricsUrl && len(d.APIToken) == 0 { + if d.URL != oneAgentMetricsURL && len(d.APIToken) == 0 { d.Log.Errorf("Dynatrace api_token is a required field for Dynatrace output") return fmt.Errorf("api_token is a required field for Dynatrace output") } diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index cf6549c72ff11..45f4f24d7d73e 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -87,9 +87,9 @@ func TestMissingURL(t *testing.T) { d.Log = testutil.Logger{} err := d.Init() - require.Equal(t, oneAgentMetricsUrl, d.URL) + require.Equal(t, oneAgentMetricsURL, d.URL) err = d.Connect() - require.Equal(t, oneAgentMetricsUrl, d.URL) + require.Equal(t, oneAgentMetricsURL, d.URL) require.NoError(t, err) } @@ -98,9 +98,9 @@ func TestMissingAPITokenMissingURL(t *testing.T) { d.Log = testutil.Logger{} err := d.Init() - require.Equal(t, oneAgentMetricsUrl, d.URL) + require.Equal(t, oneAgentMetricsURL, d.URL) err = d.Connect() - require.Equal(t, oneAgentMetricsUrl, d.URL) + require.Equal(t, oneAgentMetricsURL, d.URL) require.NoError(t, err) } diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go index 352d0357933b5..97ef94bbb4c34 100644 --- a/plugins/outputs/elasticsearch/elasticsearch.go +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -33,7 +33,7 @@ type Elasticsearch struct { ManageTemplate bool TemplateName string OverwriteTemplate bool - ForceDocumentId bool + ForceDocumentID bool `toml:"force_document_id"` MajorReleaseNumber int tls.ClientConfig @@ -284,7 +284,7 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { br := elastic.NewBulkIndexRequest().Index(indexName).Doc(m) - if a.ForceDocumentId { + if a.ForceDocumentID { id := GetPointID(metric) br.Id(id) } diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index a8c68499ab323..ab6f05f866f4e 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -21,14 +21,14 @@ var ( ) type Instrumental struct { - Host string - ApiToken string - Prefix string - DataFormat string - Template string - Templates []string - Timeout internal.Duration - Debug bool + Host string `toml:"host"` + APIToken string `toml:"api_token"` + Prefix string `toml:"prefix"` + DataFormat string `toml:"data_format"` + Template string `toml:"template"` + Templates []string `toml:"templates"` + Timeout internal.Duration `toml:"timeout"` + Debug bool `toml:"debug"` Log telegraf.Logger `toml:"-"` @@ -140,10 +140,10 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { time := splitStat[2] // replace invalid components of metric name with underscore - clean_metric := MetricNameReplacer.ReplaceAllString(name, "_") + cleanMetric := MetricNameReplacer.ReplaceAllString(name, "_") if !ValueIncludesBadChar.MatchString(value) { - points = append(points, fmt.Sprintf("%s %s %s %s", metricType, clean_metric, value, time)) + points = append(points, fmt.Sprintf("%s %s %s %s", metricType, cleanMetric, value, time)) } } } @@ -176,7 +176,7 @@ func (i *Instrumental) SampleConfig() string { } func (i *Instrumental) authenticate(conn net.Conn) error { - _, err := fmt.Fprintf(conn, HandshakeFormat, i.ApiToken) + _, err := fmt.Fprintf(conn, HandshakeFormat, i.APIToken) if err != nil { return err } @@ -199,7 +199,7 @@ func init() { outputs.Add("instrumental", func() telegraf.Output { return &Instrumental{ Host: DefaultHost, - Template: graphite.DEFAULT_TEMPLATE, + Template: graphite.DefaultTemplate, } }) } diff --git a/plugins/outputs/instrumental/instrumental_test.go b/plugins/outputs/instrumental/instrumental_test.go index 0d3ce904008e6..6752069bb226b 100644 --- a/plugins/outputs/instrumental/instrumental_test.go +++ b/plugins/outputs/instrumental/instrumental_test.go @@ -20,7 +20,7 @@ func TestWrite(t *testing.T) { i := Instrumental{ Host: "127.0.0.1", - ApiToken: "abc123token", + APIToken: "abc123token", Prefix: "my.prefix", } diff --git a/plugins/outputs/logzio/logzio.go b/plugins/outputs/logzio/logzio.go index a174ba60bd1fc..b0ca8c7b3bc80 100644 --- a/plugins/outputs/logzio/logzio.go +++ b/plugins/outputs/logzio/logzio.go @@ -130,7 +130,7 @@ func (l *Logzio) Write(metrics []telegraf.Metric) error { } func (l *Logzio) send(metrics []byte) error { - req, err := http.NewRequest("POST", l.authUrl(), bytes.NewBuffer(metrics)) + req, err := http.NewRequest("POST", l.authURL(), bytes.NewBuffer(metrics)) if err != nil { return fmt.Errorf("unable to create http.Request, %s", err.Error()) } @@ -150,7 +150,7 @@ func (l *Logzio) send(metrics []byte) error { return nil } -func (l *Logzio) authUrl() string { +func (l *Logzio) authURL() string { return fmt.Sprintf("%s/?token=%s", l.URL, l.Token) } diff --git a/plugins/outputs/newrelic/newrelic.go b/plugins/outputs/newrelic/newrelic.go index 883e8911f60d5..ccd8b50d70b11 100644 --- a/plugins/outputs/newrelic/newrelic.go +++ b/plugins/outputs/newrelic/newrelic.go @@ -20,7 +20,7 @@ type NewRelic struct { InsightsKey string `toml:"insights_key"` MetricPrefix string `toml:"metric_prefix"` Timeout internal.Duration `toml:"timeout"` - HttpProxy string `toml:"http_proxy"` + HTTPProxy string `toml:"http_proxy"` harvestor *telemetry.Harvester dc *cumulative.DeltaCalculator @@ -167,12 +167,12 @@ func init() { } func (nr *NewRelic) initClient() error { - if nr.HttpProxy == "" { + if nr.HTTPProxy == "" { nr.client = http.Client{} return nil } - proxyURL, err := url.Parse(nr.HttpProxy) + proxyURL, err := url.Parse(nr.HTTPProxy) if err != nil { return err } diff --git a/plugins/outputs/newrelic/newrelic_test.go b/plugins/outputs/newrelic/newrelic_test.go index d6613e55fa535..1eedc63f44116 100644 --- a/plugins/outputs/newrelic/newrelic_test.go +++ b/plugins/outputs/newrelic/newrelic_test.go @@ -172,7 +172,7 @@ func TestNewRelic_Connect(t *testing.T) { name: "Test: HTTP Proxy", newrelic: &NewRelic{ InsightsKey: "12121212", - HttpProxy: "https://my.proxy", + HTTPProxy: "https://my.proxy", }, wantErr: false, }, diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index d41cc94d5a2aa..3d7fdf5cc5cc2 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -22,7 +22,7 @@ var ( `%`, "-", "#", "-", "$", "-") - defaultHttpPath = "/api/put" + defaultHTTPPath = "/api/put" defaultSeparator = "_" ) @@ -32,8 +32,8 @@ type OpenTSDB struct { Host string `toml:"host"` Port int `toml:"port"` - HttpBatchSize int `toml:"http_batch_size"` // deprecated httpBatchSize form in 1.8 - HttpPath string `toml:"http_path"` + HTTPBatchSize int `toml:"http_batch_size"` // deprecated httpBatchSize form in 1.8 + HTTPPath string `toml:"http_path"` Debug bool `toml:"debug"` @@ -116,20 +116,20 @@ func (o *OpenTSDB) Write(metrics []telegraf.Metric) error { if u.Scheme == "" || u.Scheme == "tcp" { return o.WriteTelnet(metrics, u) } else if u.Scheme == "http" || u.Scheme == "https" { - return o.WriteHttp(metrics, u) + return o.WriteHTTP(metrics, u) } else { return fmt.Errorf("unknown scheme in host parameter") } } -func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error { +func (o *OpenTSDB) WriteHTTP(metrics []telegraf.Metric, u *url.URL) error { http := openTSDBHttp{ Host: u.Host, Port: o.Port, Scheme: u.Scheme, User: u.User, - BatchSize: o.HttpBatchSize, - Path: o.HttpPath, + BatchSize: o.HTTPBatchSize, + Path: o.HTTPPath, Debug: o.Debug, } @@ -151,7 +151,7 @@ func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error { continue } - metric := &HttpMetric{ + metric := &HTTPMetric{ Metric: sanitize(fmt.Sprintf("%s%s%s%s", o.Prefix, m.Name(), o.Separator, fieldName)), Tags: tags, @@ -276,7 +276,7 @@ func sanitize(value string) string { func init() { outputs.Add("opentsdb", func() telegraf.Output { return &OpenTSDB{ - HttpPath: defaultHttpPath, + HTTPPath: defaultHTTPPath, Separator: defaultSeparator, } }) diff --git a/plugins/outputs/opentsdb/opentsdb_http.go b/plugins/outputs/opentsdb/opentsdb_http.go index 4f971abb639aa..b164765850578 100644 --- a/plugins/outputs/opentsdb/opentsdb_http.go +++ b/plugins/outputs/opentsdb/opentsdb_http.go @@ -13,7 +13,7 @@ import ( "net/url" ) -type HttpMetric struct { +type HTTPMetric struct { Metric string `json:"metric"` Timestamp int64 `json:"timestamp"` Value interface{} `json:"value"` @@ -68,7 +68,7 @@ func (r *requestBody) reset(debug bool) { r.empty = true } -func (r *requestBody) addMetric(metric *HttpMetric) error { +func (r *requestBody) addMetric(metric *HTTPMetric) error { if !r.empty { io.WriteString(r.w, ",") } @@ -92,7 +92,7 @@ func (r *requestBody) close() error { return nil } -func (o *openTSDBHttp) sendDataPoint(metric *HttpMetric) error { +func (o *openTSDBHttp) sendDataPoint(metric *HTTPMetric) error { if o.metricCounter == 0 { o.body.reset(o.Debug) } diff --git a/plugins/outputs/opentsdb/opentsdb_test.go b/plugins/outputs/opentsdb/opentsdb_test.go index 16d764ebe7eb8..89748d055d9d5 100644 --- a/plugins/outputs/opentsdb/opentsdb_test.go +++ b/plugins/outputs/opentsdb/opentsdb_test.go @@ -155,8 +155,8 @@ func BenchmarkHttpSend(b *testing.B) { Host: ts.URL, Port: port, Prefix: "", - HttpBatchSize: BatchSize, - HttpPath: "/api/put", + HTTPBatchSize: BatchSize, + HTTPPath: "/api/put", } b.ResetTimer() diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go index 3f3f1b54b18af..b6882dceaffec 100644 --- a/plugins/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -63,12 +63,12 @@ var sampleConfig = ` ` func (r *Riemann) Connect() error { - parsed_url, err := url.Parse(r.URL) + parsedURL, err := url.Parse(r.URL) if err != nil { return err } - client, err := raidman.DialWithTimeout(parsed_url.Scheme, parsed_url.Host, r.Timeout.Duration) + client, err := raidman.DialWithTimeout(parsedURL.Scheme, parsedURL.Host, r.Timeout.Duration) if err != nil { r.client = nil return err diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index 98ae51b8df79a..3c20583e15e20 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -29,7 +29,7 @@ func TestSocketWriter_tcp(t *testing.T) { lconn, err := listener.Accept() require.NoError(t, err) - testSocketWriter_stream(t, sw, lconn) + testSocketWriterStream(t, sw, lconn) } func TestSocketWriter_udp(t *testing.T) { @@ -42,7 +42,7 @@ func TestSocketWriter_udp(t *testing.T) { err = sw.Connect() require.NoError(t, err) - testSocketWriter_packet(t, sw, listener) + testSocketWriterPacket(t, sw, listener) } func TestSocketWriter_unix(t *testing.T) { @@ -63,7 +63,7 @@ func TestSocketWriter_unix(t *testing.T) { lconn, err := listener.Accept() require.NoError(t, err) - testSocketWriter_stream(t, sw, lconn) + testSocketWriterStream(t, sw, lconn) } func TestSocketWriter_unixgram(t *testing.T) { @@ -85,10 +85,10 @@ func TestSocketWriter_unixgram(t *testing.T) { err = sw.Connect() require.NoError(t, err) - testSocketWriter_packet(t, sw, listener) + testSocketWriterPacket(t, sw, listener) } -func testSocketWriter_stream(t *testing.T, sw *SocketWriter, lconn net.Conn) { +func testSocketWriterStream(t *testing.T, sw *SocketWriter, lconn net.Conn) { metrics := []telegraf.Metric{} metrics = append(metrics, testutil.TestMetric(1, "test")) mbs1out, _ := sw.Serialize(metrics[0]) @@ -110,7 +110,7 @@ func testSocketWriter_stream(t *testing.T, sw *SocketWriter, lconn net.Conn) { assert.Equal(t, string(mbs2out), mstr2in) } -func testSocketWriter_packet(t *testing.T, sw *SocketWriter, lconn net.PacketConn) { +func testSocketWriterPacket(t *testing.T, sw *SocketWriter, lconn net.PacketConn) { metrics := []telegraf.Metric{} metrics = append(metrics, testutil.TestMetric(1, "test")) mbs1out, _ := sw.Serialize(metrics[0]) @@ -212,5 +212,5 @@ func TestSocketWriter_udp_gzip(t *testing.T) { err = sw.Connect() require.NoError(t, err) - testSocketWriter_packet(t, sw, listener) + testSocketWriterPacket(t, sw, listener) } diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index f3f1fc94bbdb7..885bee8c0b74a 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -13,24 +13,24 @@ import ( const maxTagLength = 254 type Wavefront struct { - Url string - Token string - Host string - Port int - Prefix string - SimpleFields bool - MetricSeparator string - ConvertPaths bool - ConvertBool bool - UseRegex bool - UseStrict bool - TruncateTags bool - ImmediateFlush bool - SourceOverride []string - StringToNumber map[string][]map[string]float64 + URL string `toml:"url"` + Token string `toml:"token"` + Host string `toml:"host"` + Port int `toml:"port"` + Prefix string `toml:"prefix"` + SimpleFields bool `toml:"simple_fields"` + MetricSeparator string `toml:"metric_separator"` + ConvertPaths bool `toml:"convert_paths"` + ConvertBool bool `toml:"convert_bool"` + UseRegex bool `toml:"use_regex"` + UseStrict bool `toml:"use_strict"` + TruncateTags bool `toml:"truncate_tags"` + ImmediateFlush bool `toml:"immediate_flush"` + SourceOverride []string `toml:"source_override"` + StringToNumber map[string][]map[string]float64 `toml:"string_to_number"` sender wavefront.Sender - Log telegraf.Logger + Log telegraf.Logger `toml:"-"` } // catch many of the invalid chars that could appear in a metric or tag name @@ -134,15 +134,15 @@ func (w *Wavefront) Connect() error { if w.ImmediateFlush { flushSeconds = 86400 // Set a very long flush interval if we're flushing directly } - if w.Url != "" { - w.Log.Debug("connecting over http/https using Url: %s", w.Url) + if w.URL != "" { + w.Log.Debug("connecting over http/https using Url: %s", w.URL) sender, err := wavefront.NewDirectSender(&wavefront.DirectConfiguration{ - Server: w.Url, + Server: w.URL, Token: w.Token, FlushIntervalSeconds: flushSeconds, }) if err != nil { - return fmt.Errorf("could not create Wavefront Sender for Url: %s", w.Url) + return fmt.Errorf("could not create Wavefront Sender for Url: %s", w.URL) } w.sender = sender } else { diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go index 36fd4ab0bef9f..c51a7f3b246bf 100644 --- a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go @@ -18,7 +18,7 @@ import ( // service type YandexCloudMonitoring struct { Timeout internal.Duration `toml:"timeout"` - EndpointUrl string `toml:"endpoint_url"` + EndpointURL string `toml:"endpoint_url"` Service string `toml:"service"` Log telegraf.Logger @@ -58,9 +58,9 @@ type MetadataIamToken struct { const ( defaultRequestTimeout = time.Second * 20 - defaultEndpointUrl = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" - defaultMetadataTokenUrl = "http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token" - defaultMetadataFolderUrl = "http://169.254.169.254/computeMetadata/v1/instance/attributes/folder-id" + defaultEndpointURL = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" + defaultMetadataTokenURL = "http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token" + defaultMetadataFolderURL = "http://169.254.169.254/computeMetadata/v1/instance/attributes/folder-id" ) var sampleConfig = ` @@ -89,17 +89,17 @@ func (a *YandexCloudMonitoring) Connect() error { if a.Timeout.Duration <= 0 { a.Timeout.Duration = defaultRequestTimeout } - if a.EndpointUrl == "" { - a.EndpointUrl = defaultEndpointUrl + if a.EndpointURL == "" { + a.EndpointURL = defaultEndpointURL } if a.Service == "" { a.Service = "custom" } if a.MetadataTokenURL == "" { - a.MetadataTokenURL = defaultMetadataTokenUrl + a.MetadataTokenURL = defaultMetadataTokenURL } if a.MetadataFolderURL == "" { - a.MetadataFolderURL = defaultMetadataFolderUrl + a.MetadataFolderURL = defaultMetadataFolderURL } a.client = &http.Client{ @@ -115,7 +115,7 @@ func (a *YandexCloudMonitoring) Connect() error { return err } - a.Log.Infof("Writing to Yandex.Cloud Monitoring URL: %s", a.EndpointUrl) + a.Log.Infof("Writing to Yandex.Cloud Monitoring URL: %s", a.EndpointURL) tags := map[string]string{} a.MetricOutsideWindow = selfstat.Register("yandex_cloud_monitoring", "metric_outside_window", tags) @@ -161,8 +161,8 @@ func (a *YandexCloudMonitoring) Write(metrics []telegraf.Metric) error { return a.send(body) } -func getResponseFromMetadata(c *http.Client, metadataUrl string) ([]byte, error) { - req, err := http.NewRequest("GET", metadataUrl, nil) +func getResponseFromMetadata(c *http.Client, metadataURL string) ([]byte, error) { + req, err := http.NewRequest("GET", metadataURL, nil) if err != nil { return nil, fmt.Errorf("error creating request: %v", err) } @@ -179,7 +179,7 @@ func getResponseFromMetadata(c *http.Client, metadataUrl string) ([]byte, error) } if resp.StatusCode >= 300 || resp.StatusCode < 200 { return nil, fmt.Errorf("unable to fetch instance metadata: [%s] %d", - metadataUrl, resp.StatusCode) + metadataURL, resp.StatusCode) } return body, nil } @@ -214,7 +214,7 @@ func (a *YandexCloudMonitoring) getIAMTokenFromMetadata() (string, int, error) { } func (a *YandexCloudMonitoring) send(body []byte) error { - req, err := http.NewRequest("POST", a.EndpointUrl, bytes.NewBuffer(body)) + req, err := http.NewRequest("POST", a.EndpointURL, bytes.NewBuffer(body)) if err != nil { return err } diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go index edd2960bf0cff..db62358777c9a 100644 --- a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go @@ -22,7 +22,7 @@ func TestWrite(t *testing.T) { return message, nil } - testMetadataHttpServer := httptest.NewServer( + testMetadataHTTPServer := httptest.NewServer( http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.HasSuffix(r.URL.Path, "/token") { token := MetadataIamToken{ @@ -39,9 +39,9 @@ func TestWrite(t *testing.T) { w.WriteHeader(http.StatusOK) }), ) - defer testMetadataHttpServer.Close() - metadataTokenUrl := "http://" + testMetadataHttpServer.Listener.Addr().String() + "/token" - metadataFolderUrl := "http://" + testMetadataHttpServer.Listener.Addr().String() + "/folder" + defer testMetadataHTTPServer.Close() + metadataTokenURL := "http://" + testMetadataHTTPServer.Listener.Addr().String() + "/token" + metadataFolderURL := "http://" + testMetadataHTTPServer.Listener.Addr().String() + "/folder" ts := httptest.NewServer(http.NotFoundHandler()) defer ts.Close() @@ -82,9 +82,9 @@ func TestWrite(t *testing.T) { tt.handler(t, w, r) }) tt.plugin.Log = testutil.Logger{} - tt.plugin.EndpointUrl = url - tt.plugin.MetadataTokenURL = metadataTokenUrl - tt.plugin.MetadataFolderURL = metadataFolderUrl + tt.plugin.EndpointURL = url + tt.plugin.MetadataTokenURL = metadataTokenURL + tt.plugin.MetadataFolderURL = metadataFolderURL err := tt.plugin.Connect() require.NoError(t, err) diff --git a/plugins/parsers/grok/influx_patterns.go b/plugins/parsers/grok/influx_patterns.go index 282c28111b14c..428d129fc2394 100644 --- a/plugins/parsers/grok/influx_patterns.go +++ b/plugins/parsers/grok/influx_patterns.go @@ -1,6 +1,6 @@ package grok -const DEFAULT_PATTERNS = ` +const DefaultPatterns = ` # Example log file pattern, example log looks like this: # [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs # Breakdown of the DURATION pattern below: diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index 810190b9d2f12..cdf787d8f229b 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -38,18 +38,18 @@ var timeLayouts = map[string]string{ } const ( - MEASUREMENT = "measurement" - INT = "int" - TAG = "tag" - FLOAT = "float" - STRING = "string" - DURATION = "duration" - DROP = "drop" - EPOCH = "EPOCH" - EPOCH_MILLI = "EPOCH_MILLI" - EPOCH_NANO = "EPOCH_NANO" - SYSLOG_TIMESTAMP = "SYSLOG_TIMESTAMP" - GENERIC_TIMESTAMP = "GENERIC_TIMESTAMP" + Measurement = "measurement" + Int = "int" + Tag = "tag" + Float = "float" + String = "string" + Duration = "duration" + Drop = "drop" + Epoch = "EPOCH" + EpochMilli = "EPOCH_MILLI" + EpochNano = "EPOCH_NANO" + SyslogTimestamp = "SYSLOG_TIMESTAMP" + GenericTimestamp = "GENERIC_TIMESTAMP" ) var ( @@ -161,7 +161,7 @@ func (p *Parser) Compile() error { // Combine user-supplied CustomPatterns with DEFAULT_PATTERNS and parse // them together as the same type of pattern. - p.CustomPatterns = DEFAULT_PATTERNS + p.CustomPatterns + p.CustomPatterns = DefaultPatterns + p.CustomPatterns if len(p.CustomPatterns) != 0 { scanner := bufio.NewScanner(strings.NewReader(p.CustomPatterns)) p.addCustomPatterns(scanner) @@ -243,38 +243,38 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } // if we didn't find a type OR timestamp modifier, assume string if t == "" { - t = STRING + t = String } switch t { - case MEASUREMENT: + case Measurement: p.Measurement = v - case INT: + case Int: iv, err := strconv.ParseInt(v, 0, 64) if err != nil { log.Printf("E! Error parsing %s to int: %s", v, err) } else { fields[k] = iv } - case FLOAT: + case Float: fv, err := strconv.ParseFloat(v, 64) if err != nil { log.Printf("E! Error parsing %s to float: %s", v, err) } else { fields[k] = fv } - case DURATION: + case Duration: d, err := time.ParseDuration(v) if err != nil { log.Printf("E! Error parsing %s to duration: %s", v, err) } else { fields[k] = int64(d) } - case TAG: + case Tag: tags[k] = v - case STRING: + case String: fields[k] = v - case EPOCH: + case Epoch: parts := strings.SplitN(v, ".", 2) if len(parts) == 0 { log.Printf("E! Error parsing %s to timestamp: %s", v, err) @@ -299,21 +299,21 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { ts = ts.Add(time.Duration(nanosec) * time.Nanosecond) } timestamp = ts - case EPOCH_MILLI: + case EpochMilli: ms, err := strconv.ParseInt(v, 10, 64) if err != nil { log.Printf("E! Error parsing %s to int: %s", v, err) } else { timestamp = time.Unix(0, ms*int64(time.Millisecond)) } - case EPOCH_NANO: + case EpochNano: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { log.Printf("E! Error parsing %s to int: %s", v, err) } else { timestamp = time.Unix(0, iv) } - case SYSLOG_TIMESTAMP: + case SyslogTimestamp: ts, err := time.ParseInLocation(time.Stamp, v, p.loc) if err == nil { if ts.Year() == 0 { @@ -323,7 +323,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } else { log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err) } - case GENERIC_TIMESTAMP: + case GenericTimestamp: var foundTs bool // first try timestamp layouts that we've already found for _, layout := range p.foundTsLayouts { @@ -353,7 +353,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { log.Printf("E! Error parsing timestamp [%s], could not find any "+ "suitable time layouts.", v) } - case DROP: + case Drop: // goodbye! default: v = strings.Replace(v, ",", ".", -1) diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go index 5ed37645cdd74..7c9487369d5b8 100644 --- a/plugins/parsers/wavefront/element.go +++ b/plugins/parsers/wavefront/element.go @@ -55,12 +55,12 @@ func (ep *ValueParser) parse(p *PointParser, pt *Point) error { } p.writeBuf.Reset() - if tok == MINUS_SIGN { + if tok == MinusSign { p.writeBuf.WriteString(lit) tok, lit = p.scan() } - for tok != EOF && (tok == LETTER || tok == NUMBER || tok == DOT || tok == MINUS_SIGN) { + for tok != EOF && (tok == Letter || tok == Number || tok == Dot || tok == MinusSign) { p.writeBuf.WriteString(lit) tok, lit = p.scan() } @@ -84,7 +84,7 @@ func (ep *TimestampParser) parse(p *PointParser, pt *Point) error { return fmt.Errorf("found %q, expected number", lit) } - if tok != NUMBER { + if tok != Number { if ep.optional { p.unscanTokens(2) return setTimestamp(pt, 0, 1) @@ -93,7 +93,7 @@ func (ep *TimestampParser) parse(p *PointParser, pt *Point) error { } p.writeBuf.Reset() - for tok != EOF && tok == NUMBER { + for tok != EOF && tok == Number { p.writeBuf.WriteString(lit) tok, lit = p.scan() } @@ -154,7 +154,7 @@ func (ep *TagParser) parse(p *PointParser, pt *Point) error { } next, lit := p.scan() - if next != EQUALS { + if next != Equals { return fmt.Errorf("found %q, expected equals", lit) } @@ -170,8 +170,8 @@ func (ep *TagParser) parse(p *PointParser, pt *Point) error { } func (ep *WhiteSpaceParser) parse(p *PointParser, pt *Point) error { - tok := WS - for tok != EOF && tok == WS { + tok := Ws + for tok != EOF && tok == Ws { tok, _ = p.scan() } @@ -202,9 +202,9 @@ func parseQuotedLiteral(p *PointParser) (string, error) { escaped := false tok, lit := p.scan() - for tok != EOF && (tok != QUOTES || (tok == QUOTES && escaped)) { + for tok != EOF && (tok != Quotes || (tok == Quotes && escaped)) { // let everything through - escaped = tok == BACKSLASH + escaped = tok == Backslash p.writeBuf.WriteString(lit) tok, lit = p.scan() } @@ -220,19 +220,19 @@ func parseLiteral(p *PointParser) (string, error) { return "", fmt.Errorf("found %q, expected literal", lit) } - if tok == QUOTES { + if tok == Quotes { return parseQuotedLiteral(p) } p.writeBuf.Reset() - for tok != EOF && tok > literal_beg && tok < literal_end { + for tok != EOF && tok > literalBeg && tok < literalEnd { p.writeBuf.WriteString(lit) tok, lit = p.scan() - if tok == DELTA { + if tok == Delta { return "", errors.New("found delta inside metric name") } } - if tok == QUOTES { + if tok == Quotes { return "", errors.New("found quote inside unquoted literal") } p.unscan() diff --git a/plugins/parsers/wavefront/scanner.go b/plugins/parsers/wavefront/scanner.go index a528f72ee52cd..abdbd6d1b4b8f 100644 --- a/plugins/parsers/wavefront/scanner.go +++ b/plugins/parsers/wavefront/scanner.go @@ -35,13 +35,13 @@ func (s *PointScanner) Scan() (Token, string) { // Read the next rune ch := s.read() if isWhitespace(ch) { - return WS, string(ch) + return Ws, string(ch) } else if isLetter(ch) { - return LETTER, string(ch) + return Letter, string(ch) } else if isNumber(ch) { - return NUMBER, string(ch) + return Number, string(ch) } else if isDelta(ch) { - return DELTA, string(ch) + return Delta, string(ch) } // Otherwise read the individual character. @@ -49,23 +49,23 @@ func (s *PointScanner) Scan() (Token, string) { case eof: return EOF, "" case '\n': - return NEWLINE, string(ch) + return Newline, string(ch) case '.': - return DOT, string(ch) + return Dot, string(ch) case '-': - return MINUS_SIGN, string(ch) + return MinusSign, string(ch) case '_': - return UNDERSCORE, string(ch) + return Underscore, string(ch) case '/': - return SLASH, string(ch) + return Slash, string(ch) case '\\': - return BACKSLASH, string(ch) + return Backslash, string(ch) case ',': - return COMMA, string(ch) + return Comma, string(ch) case '"': - return QUOTES, string(ch) + return Quotes, string(ch) case '=': - return EQUALS, string(ch) + return Equals, string(ch) } - return ILLEGAL, string(ch) + return Illegal, string(ch) } diff --git a/plugins/parsers/wavefront/token.go b/plugins/parsers/wavefront/token.go index 5b77d0cdbb69b..68619e21bb6c4 100644 --- a/plugins/parsers/wavefront/token.go +++ b/plugins/parsers/wavefront/token.go @@ -4,27 +4,27 @@ type Token int const ( // Special tokens - ILLEGAL Token = iota + Illegal Token = iota EOF - WS + Ws // Literals - literal_beg - LETTER // metric name, source/point tags - NUMBER - MINUS_SIGN - UNDERSCORE - DOT - SLASH - BACKSLASH - COMMA - DELTA - literal_end + literalBeg + Letter // metric name, source/point tags + Number + MinusSign + Underscore + Dot + Slash + Backslash + Comma + Delta + literalEnd // Misc characters - QUOTES - EQUALS - NEWLINE + Quotes + Equals + Newline ) func isWhitespace(ch rune) bool { diff --git a/plugins/processors/enum/enum_test.go b/plugins/processors/enum/enum_test.go index 21b89d241a2a2..fdf9131ef3d8d 100644 --- a/plugins/processors/enum/enum_test.go +++ b/plugins/processors/enum/enum_test.go @@ -116,13 +116,13 @@ func TestMappings(t *testing.T) { } for _, mapping := range mappings { - field_name := mapping["field_name"][0].(string) + fieldName := mapping["field_name"][0].(string) for index := range mapping["target_value"] { - mapper := EnumMapper{Mappings: []Mapping{{Field: field_name, ValueMappings: map[string]interface{}{mapping["target_value"][index].(string): mapping["mapped_value"][index]}}}} + mapper := EnumMapper{Mappings: []Mapping{{Field: fieldName, ValueMappings: map[string]interface{}{mapping["target_value"][index].(string): mapping["mapped_value"][index]}}}} err := mapper.Init() assert.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) - assertFieldValue(t, mapping["expected_value"][index], field_name, fields) + assertFieldValue(t, mapping["expected_value"][index], fieldName, fields) } } } diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index c7f6e2a74d825..d6c696b75bcf1 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -143,13 +143,13 @@ func (d *IfName) addTag(metric telegraf.Metric) error { return nil } - num_s, ok := metric.GetTag(d.SourceTag) + numS, ok := metric.GetTag(d.SourceTag) if !ok { d.Log.Warn("Source tag missing.") return nil } - num, err := strconv.ParseUint(num_s, 10, 64) + num, err := strconv.ParseUint(numS, 10, 64) if err != nil { return fmt.Errorf("couldn't parse source tag as uint") } @@ -378,21 +378,21 @@ func buildMap(gs snmp.GosnmpWrapper, tab *si.Table, column string) (nameMap, err t := make(nameMap) for _, v := range rtab.Rows { - i_str, ok := v.Tags["index"] + iStr, ok := v.Tags["index"] if !ok { //should always have an index tag because the table should //always have IndexAsTag true return nil, fmt.Errorf("no index tag") } - i, err := strconv.ParseUint(i_str, 10, 64) + i, err := strconv.ParseUint(iStr, 10, 64) if err != nil { return nil, fmt.Errorf("index tag isn't a uint") } - name_if, ok := v.Fields[column] + nameIf, ok := v.Fields[column] if !ok { return nil, fmt.Errorf("field %s is missing", column) } - name, ok := name_if.(string) + name, ok := nameIf.(string) if !ok { return nil, fmt.Errorf("field %s isn't a string", column) } diff --git a/plugins/processors/starlark/builtins.go b/plugins/processors/starlark/builtins.go index c2a30a0dc9379..66a7c9f9e0308 100644 --- a/plugins/processors/starlark/builtins.go +++ b/plugins/processors/starlark/builtins.go @@ -80,7 +80,7 @@ func nameErr(b *starlark.Builtin, msg interface{}) error { // --- dictionary methods --- // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·clear -func dict_clear(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictClear(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } @@ -92,7 +92,7 @@ func dict_clear(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tupl } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·pop -func dict_pop(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictPop(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var k, d starlark.Value if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &k, &d); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) @@ -112,7 +112,7 @@ func dict_pop(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·popitem -func dict_popitem(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictPopitem(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } @@ -124,7 +124,7 @@ func dict_popitem(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tu } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·get -func dict_get(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictGet(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var key, dflt starlark.Value if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) @@ -140,7 +140,7 @@ func dict_get(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·setdefault -func dict_setdefault(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictSetdefault(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var key, dflt starlark.Value = nil, starlark.None if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) @@ -161,7 +161,7 @@ func dict_setdefault(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update -func dict_update(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictUpdate(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { // Unpack the arguments if len(args) > 1 { return nil, fmt.Errorf("update: got %d arguments, want at most 1", len(args)) @@ -234,7 +234,7 @@ func dict_update(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tup } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·items -func dict_items(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictItems(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } @@ -247,7 +247,7 @@ func dict_items(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tupl } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·keys -func dict_keys(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictKeys(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } @@ -261,7 +261,7 @@ func dict_keys(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update -func dict_values(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictValues(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } diff --git a/plugins/processors/starlark/field_dict.go b/plugins/processors/starlark/field_dict.go index 1e48ac7c02cc2..af32da185ba11 100644 --- a/plugins/processors/starlark/field_dict.go +++ b/plugins/processors/starlark/field_dict.go @@ -59,15 +59,15 @@ func (d FieldDict) Attr(name string) (starlark.Value, error) { } var FieldDictMethods = map[string]builtinMethod{ - "clear": dict_clear, - "get": dict_get, - "items": dict_items, - "keys": dict_keys, - "pop": dict_pop, - "popitem": dict_popitem, - "setdefault": dict_setdefault, - "update": dict_update, - "values": dict_values, + "clear": dictClear, + "get": dictGet, + "items": dictItems, + "keys": dictKeys, + "pop": dictPop, + "popitem": dictPopitem, + "setdefault": dictSetdefault, + "update": dictUpdate, + "values": dictValues, } // Get implements the starlark.Mapping interface. diff --git a/plugins/processors/starlark/tag_dict.go b/plugins/processors/starlark/tag_dict.go index 3d95264382db5..b17a6e2f0b6a3 100644 --- a/plugins/processors/starlark/tag_dict.go +++ b/plugins/processors/starlark/tag_dict.go @@ -58,15 +58,15 @@ func (d TagDict) Attr(name string) (starlark.Value, error) { } var TagDictMethods = map[string]builtinMethod{ - "clear": dict_clear, - "get": dict_get, - "items": dict_items, - "keys": dict_keys, - "pop": dict_pop, - "popitem": dict_popitem, - "setdefault": dict_setdefault, - "update": dict_update, - "values": dict_values, + "clear": dictClear, + "get": dictGet, + "items": dictItems, + "keys": dictKeys, + "pop": dictPop, + "popitem": dictPopitem, + "setdefault": dictSetdefault, + "update": dictUpdate, + "values": dictValues, } // Get implements the starlark.Mapping interface. diff --git a/plugins/processors/tag_limit/tag_limit.go b/plugins/processors/tag_limit/tag_limit.go index 41353a8f863c4..1b48739a189f1 100644 --- a/plugins/processors/tag_limit/tag_limit.go +++ b/plugins/processors/tag_limit/tag_limit.go @@ -39,8 +39,8 @@ func (d *TagLimit) initOnce() error { } d.keepTags = make(map[string]string) // convert list of tags-to-keep to a map so we can do constant-time lookups - for _, tag_key := range d.Keep { - d.keepTags[tag_key] = "" + for _, tagKey := range d.Keep { + d.keepTags[tagKey] = "" } d.init = true return nil diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index e3eee9da3d07e..f71e97fa419c4 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -13,7 +13,7 @@ import ( "github.com/influxdata/telegraf/filter" ) -const DEFAULT_TEMPLATE = "host.tags.measurement.field" +const DefaultTemplate = "host.tags.measurement.field" var ( allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-:._=\p{L}]`) @@ -141,7 +141,7 @@ func formatValue(value interface{}) string { // SerializeBucketName will take the given measurement name and tags and // produce a graphite bucket. It will use the GraphiteSerializer.Template -// to generate this, or DEFAULT_TEMPLATE. +// to generate this, or DefaultTemplate. // // NOTE: SerializeBucketName replaces the "field" portion of the template with // FIELDNAME. It is up to the user to replace this. This is so that @@ -154,7 +154,7 @@ func SerializeBucketName( prefix string, ) string { if template == "" { - template = DEFAULT_TEMPLATE + template = DefaultTemplate } tagsCopy := make(map[string]string) for k, v := range tags { @@ -296,16 +296,16 @@ func buildTags(tags map[string]string) string { } sort.Strings(keys) - var tag_str string + var tagStr string for i, k := range keys { - tag_value := strings.Replace(tags[k], ".", "_", -1) + tagValue := strings.Replace(tags[k], ".", "_", -1) if i == 0 { - tag_str += tag_value + tagStr += tagValue } else { - tag_str += "." + tag_value + tagStr += "." + tagValue } } - return tag_str + return tagStr } func sanitize(value string) string { diff --git a/plugins/serializers/graphite/graphite_test.go b/plugins/serializers/graphite/graphite_test.go index b6fcad696dc2e..2b93b16df4e4d 100644 --- a/plugins/serializers/graphite/graphite_test.go +++ b/plugins/serializers/graphite/graphite_test.go @@ -809,11 +809,11 @@ func TestTemplate6(t *testing.T) { func TestClean(t *testing.T) { now := time.Unix(1234567890, 0) tests := []struct { - name string - metric_name string - tags map[string]string - fields map[string]interface{} - expected string + name string + metricName string + tags map[string]string + fields map[string]interface{} + expected string }{ { "Base metric", @@ -890,7 +890,7 @@ func TestClean(t *testing.T) { s := GraphiteSerializer{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New(tt.metric_name, tt.tags, tt.fields, now) + m, err := metric.New(tt.metricName, tt.tags, tt.fields, now) assert.NoError(t, err) actual, _ := s.Serialize(m) require.Equal(t, tt.expected, string(actual)) @@ -901,11 +901,11 @@ func TestClean(t *testing.T) { func TestCleanWithTagsSupport(t *testing.T) { now := time.Unix(1234567890, 0) tests := []struct { - name string - metric_name string - tags map[string]string - fields map[string]interface{} - expected string + name string + metricName string + tags map[string]string + fields map[string]interface{} + expected string }{ { "Base metric", @@ -985,7 +985,7 @@ func TestCleanWithTagsSupport(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New(tt.metric_name, tt.tags, tt.fields, now) + m, err := metric.New(tt.metricName, tt.tags, tt.fields, now) assert.NoError(t, err) actual, _ := s.Serialize(m) require.Equal(t, tt.expected, string(actual)) @@ -996,11 +996,11 @@ func TestCleanWithTagsSupport(t *testing.T) { func TestSerializeBatch(t *testing.T) { now := time.Unix(1234567890, 0) tests := []struct { - name string - metric_name string - tags map[string]string - fields map[string]interface{} - expected string + name string + metricName string + tags map[string]string + fields map[string]interface{} + expected string }{ { "Base metric", @@ -1014,7 +1014,7 @@ func TestSerializeBatch(t *testing.T) { s := GraphiteSerializer{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New(tt.metric_name, tt.tags, tt.fields, now) + m, err := metric.New(tt.metricName, tt.tags, tt.fields, now) assert.NoError(t, err) actual, _ := s.SerializeBatch([]telegraf.Metric{m, m}) require.Equal(t, tt.expected, string(actual)) @@ -1025,11 +1025,11 @@ func TestSerializeBatch(t *testing.T) { func TestSerializeBatchWithTagsSupport(t *testing.T) { now := time.Unix(1234567890, 0) tests := []struct { - name string - metric_name string - tags map[string]string - fields map[string]interface{} - expected string + name string + metricName string + tags map[string]string + fields map[string]interface{} + expected string }{ { "Base metric", @@ -1046,7 +1046,7 @@ func TestSerializeBatchWithTagsSupport(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New(tt.metric_name, tt.tags, tt.fields, now) + m, err := metric.New(tt.metricName, tt.tags, tt.fields, now) assert.NoError(t, err) actual, _ := s.SerializeBatch([]telegraf.Metric{m, m}) require.Equal(t, tt.expected, string(actual)) diff --git a/plugins/serializers/nowmetric/nowmetric.go b/plugins/serializers/nowmetric/nowmetric.go index c9d0b946370f8..b1960bb7a9f57 100644 --- a/plugins/serializers/nowmetric/nowmetric.go +++ b/plugins/serializers/nowmetric/nowmetric.go @@ -123,9 +123,9 @@ func (s *serializer) createObject(metric telegraf.Metric) ([]byte, error) { allmetrics = append(allmetrics, oimetric) } - metricsJson, err := json.Marshal(allmetrics) + metricsJSON, err := json.Marshal(allmetrics) - return metricsJson, err + return metricsJSON, err } func verifyValue(v interface{}) bool { diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index 32bc034e0b0b0..61fb03c96562d 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -116,7 +116,7 @@ func NewSerializer(config *Config) (Serializer, error) { case "graphite": serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport, config.GraphiteSeparator, config.Templates) case "json": - serializer, err = NewJsonSerializer(config.TimestampUnits) + serializer, err = NewJSONSerializer(config.TimestampUnits) case "splunkmetric": serializer, err = NewSplunkmetricSerializer(config.HecRouting, config.SplunkmetricMultiMetric) case "nowmetric": @@ -179,7 +179,7 @@ func NewWavefrontSerializer(prefix string, useStrict bool, sourceOverride []stri return wavefront.NewSerializer(prefix, useStrict, sourceOverride) } -func NewJsonSerializer(timestampUnits time.Duration) (Serializer, error) { +func NewJSONSerializer(timestampUnits time.Duration) (Serializer, error) { return json.NewSerializer(timestampUnits) } @@ -187,8 +187,8 @@ func NewCarbon2Serializer(carbon2format string) (Serializer, error) { return carbon2.NewSerializer(carbon2format) } -func NewSplunkmetricSerializer(splunkmetric_hec_routing bool, splunkmetric_multimetric bool) (Serializer, error) { - return splunkmetric.NewSerializer(splunkmetric_hec_routing, splunkmetric_multimetric) +func NewSplunkmetricSerializer(splunkmetricHecRouting bool, splunkmetricMultimetric bool) (Serializer, error) { + return splunkmetric.NewSerializer(splunkmetricHecRouting, splunkmetricMultimetric) } func NewNowSerializer() (Serializer, error) { @@ -217,7 +217,7 @@ func NewInfluxSerializer() (Serializer, error) { return influx.NewSerializer(), nil } -func NewGraphiteSerializer(prefix, template string, tag_support bool, separator string, templates []string) (Serializer, error) { +func NewGraphiteSerializer(prefix, template string, tagSupport bool, separator string, templates []string) (Serializer, error) { graphiteTemplates, defaultTemplate, err := graphite.InitGraphiteTemplates(templates) if err != nil { @@ -235,7 +235,7 @@ func NewGraphiteSerializer(prefix, template string, tag_support bool, separator return &graphite.GraphiteSerializer{ Prefix: prefix, Template: template, - TagSupport: tag_support, + TagSupport: tagSupport, Separator: separator, Templates: graphiteTemplates, }, nil diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go index b96db5cf81155..f23de80df7f3b 100644 --- a/plugins/serializers/splunkmetric/splunkmetric.go +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -30,11 +30,11 @@ type HECTimeSeries struct { } // NewSerializer Setup our new serializer -func NewSerializer(splunkmetric_hec_routing bool, splunkmetric_multimetric bool) (*serializer, error) { +func NewSerializer(splunkmetricHecRouting bool, splunkmetricMultimetric bool) (*serializer, error) { /* Define output params */ s := &serializer{ - HecRouting: splunkmetric_hec_routing, - SplunkmetricMultiMetric: splunkmetric_multimetric, + HecRouting: splunkmetricHecRouting, + SplunkmetricMultiMetric: splunkmetricMultimetric, } return s, nil } From 6bc731be92c5b4d455e4a71f6a34ae13cc9aa257 Mon Sep 17 00:00:00 2001 From: Patrick Koenig Date: Tue, 2 Mar 2021 11:19:08 -0800 Subject: [PATCH 245/761] Use consistent container name in docker input plugin (#8703) --- plugins/inputs/docker/docker.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index dafedacafb3f1..f795d5b029be4 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -434,8 +434,7 @@ func (d *Docker) gatherContainer( var cname string for _, name := range container.Names { trimmedName := strings.TrimPrefix(name, "/") - match := d.containerFilter.Match(trimmedName) - if match { + if !strings.Contains(trimmedName, "/") { cname = trimmedName break } @@ -445,6 +444,10 @@ func (d *Docker) gatherContainer( return nil } + if !d.containerFilter.Match(cname) { + return nil + } + imageName, imageVersion := docker.ParseImage(container.Image) tags := map[string]string{ @@ -480,11 +483,6 @@ func (d *Docker) gatherContainer( } daemonOSType := r.OSType - // use common (printed at `docker ps`) name for container - if v.Name != "" { - tags["container_name"] = strings.TrimPrefix(v.Name, "/") - } - // Add labels to tags for k, label := range container.Labels { if d.labelFilter.Match(k) { From 9075ae51752c706b82a13710ccd628ad85132837 Mon Sep 17 00:00:00 2001 From: "Jason Kim (Jun Chul Kim)" Date: Wed, 3 Mar 2021 04:48:58 +0900 Subject: [PATCH 246/761] Add MessagePack output data format (#8828) --- README.md | 5 +- docs/DATA_FORMATS_OUTPUT.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 2 + go.mod | 8 +- go.sum | 22 +- plugins/serializers/msgpack/README.md | 45 ++ plugins/serializers/msgpack/metric.go | 104 +++++ plugins/serializers/msgpack/metric_gen.go | 417 ++++++++++++++++++ .../serializers/msgpack/metric_gen_test.go | 236 ++++++++++ plugins/serializers/msgpack/metric_test.go | 143 ++++++ plugins/serializers/msgpack/msgpack.go | 44 ++ plugins/serializers/msgpack/msgpack_test.go | 132 ++++++ plugins/serializers/registry.go | 9 +- 13 files changed, 1155 insertions(+), 13 deletions(-) create mode 100644 plugins/serializers/msgpack/README.md create mode 100644 plugins/serializers/msgpack/metric.go create mode 100644 plugins/serializers/msgpack/metric_gen.go create mode 100644 plugins/serializers/msgpack/metric_gen_test.go create mode 100644 plugins/serializers/msgpack/metric_test.go create mode 100644 plugins/serializers/msgpack/msgpack.go create mode 100644 plugins/serializers/msgpack/msgpack_test.go diff --git a/README.md b/README.md index 9c2c65cd9f244..ae1e63f369adf 100644 --- a/README.md +++ b/README.md @@ -364,11 +364,12 @@ For documentation on the latest development code see the [documentation index][d ## Serializers - [InfluxDB Line Protocol](/plugins/serializers/influx) -- [JSON](/plugins/serializers/json) +- [Carbon2](/plugins/serializers/carbon2) - [Graphite](/plugins/serializers/graphite) +- [JSON](/plugins/serializers/json) +- [MessagePack](/plugins/serializers/msgpack) - [ServiceNow](/plugins/serializers/nowmetric) - [SplunkMetric](/plugins/serializers/splunkmetric) -- [Carbon2](/plugins/serializers/carbon2) - [Wavefront](/plugins/serializers/wavefront) ## Processor Plugins diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index 0d0bdfff4bb27..720c922de6755 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -8,6 +8,7 @@ plugins. 1. [Carbon2](/plugins/serializers/carbon2) 1. [Graphite](/plugins/serializers/graphite) 1. [JSON](/plugins/serializers/json) +1. [MessagePack](/plugins/serializers/msgpack) 1. [Prometheus](/plugins/serializers/prometheus) 1. [Prometheus Remote Write](/plugins/serializers/prometheusremotewrite) 1. [ServiceNow Metrics](/plugins/serializers/nowmetric) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index ad499955067b4..97125ffd1a325 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -132,6 +132,7 @@ following works: - github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE) - github.com/opentracing/opentracing-go [Apache License 2.0](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) - github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE) +- github.com/philhofer/fwd [MIT License](https://github.com/philhofer/fwd/blob/master/LICENSE.md) - github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE) - github.com/pkg/errors [BSD 2-Clause "Simplified" License](https://github.com/pkg/errors/blob/master/LICENSE) - github.com/pmezard/go-difflib [BSD 3-Clause Clear License](https://github.com/pmezard/go-difflib/blob/master/LICENSE) @@ -157,6 +158,7 @@ following works: - github.com/tidwall/gjson [MIT License](https://github.com/tidwall/gjson/blob/master/LICENSE) - github.com/tidwall/match [MIT License](https://github.com/tidwall/match/blob/master/LICENSE) - github.com/tidwall/pretty [MIT License](https://github.com/tidwall/pretty/blob/master/LICENSE) +- github.com/tinylib/msgp [MIT License](https://github.com/tinylib/msgp/blob/master/LICENSE) - github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE) - github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE) - github.com/vjeantet/grok [Apache License 2.0](https://github.com/vjeantet/grok/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 06fd402c51079..aaa19949dd710 100644 --- a/go.mod +++ b/go.mod @@ -126,6 +126,7 @@ require ( github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect github.com/tidwall/gjson v1.6.0 + github.com/tinylib/msgp v1.1.5 github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect github.com/vjeantet/grok v1.0.1 @@ -138,12 +139,11 @@ require ( go.starlark.net v0.0.0-20200901195727-6e684ef5eeee golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - golang.org/x/net v0.0.0-20200904194848-62affa334b73 + golang.org/x/net v0.0.0-20201021035429-f5854403a974 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a - golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 + golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f golang.org/x/text v0.3.3 - golang.org/x/tools v0.0.0-20200317043434-63da46f3035e // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.20.0 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 diff --git a/go.sum b/go.sum index c9bbf781149c7..db92645af3929 100644 --- a/go.sum +++ b/go.sum @@ -561,6 +561,8 @@ github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2J github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= @@ -671,6 +673,9 @@ github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0= +github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0= @@ -691,7 +696,7 @@ github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw= @@ -763,6 +768,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -792,11 +799,12 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -811,8 +819,8 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -852,6 +860,8 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 h1:DvY3Zkh7KabQE/kfzMvYvKirSiguP9Q/veMtkYyf0o8= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -902,8 +912,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200317043434-63da46f3035e h1:8ogAbHWoJTPepnVbNRqXLOpzMkl0rtRsM7crbflc4XM= -golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9 h1:sEvmEcJVKBNUvgCUClbUQeHOAa9U0I2Ce1BooMvVCY4= +golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/plugins/serializers/msgpack/README.md b/plugins/serializers/msgpack/README.md new file mode 100644 index 0000000000000..5607cc64c05bc --- /dev/null +++ b/plugins/serializers/msgpack/README.md @@ -0,0 +1,45 @@ +# MessagePack: + +MessagePack is an efficient binary serialization format. It lets you exchange data among multiple languages like JSON. + +https://msgpack.org + +### Format Definitions: + +Output of this format is MessagePack binary representation of metrics that have identical structure of the below JSON. + +``` +{ + "name":"cpu", + "time": , // https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type + "tags":{ + "tag_1":"host01", + ... + }, + "fields":{ + "field_1":30, + "field_2":true, + "field_3":"field_value" + "field_4":30.1 + ... + } +} +``` + +MessagePack has it's own timestamp representation. You can find additional informations from [MessagePack specification](https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type). + +### MessagePack Configuration: + +There are no additional configuration options for MessagePack format. + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "msgpack" +``` \ No newline at end of file diff --git a/plugins/serializers/msgpack/metric.go b/plugins/serializers/msgpack/metric.go new file mode 100644 index 0000000000000..6b8a00878b6a8 --- /dev/null +++ b/plugins/serializers/msgpack/metric.go @@ -0,0 +1,104 @@ +package msgpack + +import ( + "encoding/binary" + "time" + + "github.com/tinylib/msgp/msgp" +) + +//go:generate msgp + +// Metric is structure to define MessagePack message format +// will be used by msgp code generator +type Metric struct { + Name string `msg:"name"` + Time MessagePackTime `msg:"time,extension"` + Tags map[string]string `msg:"tags"` + Fields map[string]interface{} `msg:"fields"` +} + +// MessagePackTime implements the official timestamp extension type +// https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type +// +// tinylib/msgp has been using their own custom extension type and the official extension +// is not available. (https://github.com/tinylib/msgp/issues/214) +type MessagePackTime struct { + time time.Time +} + +func init() { + msgp.RegisterExtension(-1, func() msgp.Extension { return new(MessagePackTime) }) +} + +// ExtensionType implements the Extension interface +func (*MessagePackTime) ExtensionType() int8 { + return -1 +} + +// Len implements the Extension interface +// The timestamp extension uses variable length encoding depending the input +// +// 32bits: [1970-01-01 00:00:00 UTC, 2106-02-07 06:28:16 UTC) range. If the nanoseconds part is 0 +// 64bits: [1970-01-01 00:00:00.000000000 UTC, 2514-05-30 01:53:04.000000000 UTC) range. +// 96bits: [-584554047284-02-23 16:59:44 UTC, 584554051223-11-09 07:00:16.000000000 UTC) range. +func (t *MessagePackTime) Len() int { + sec := t.time.Unix() + nsec := t.time.Nanosecond() + + if sec < 0 || sec >= (1<<34) { // 96 bits encoding + return 12 + } + if sec >= (1<<32) || nsec != 0 { + return 8 + } + return 4 +} + +// MarshalBinaryTo implements the Extension interface +func (t *MessagePackTime) MarshalBinaryTo(buf []byte) error { + len := t.Len() + + if len == 4 { + sec := t.time.Unix() + binary.BigEndian.PutUint32(buf, uint32(sec)) + } else if len == 8 { + sec := t.time.Unix() + nsec := t.time.Nanosecond() + + data := uint64(nsec)<<34 | (uint64(sec) & 0x03_ffff_ffff) + binary.BigEndian.PutUint64(buf, data) + } else if len == 12 { + sec := t.time.Unix() + nsec := t.time.Nanosecond() + + binary.BigEndian.PutUint32(buf, uint32(nsec)) + binary.BigEndian.PutUint64(buf[4:], uint64(sec)) + } + + return nil +} + +// UnmarshalBinary implements the Extension interface +func (t *MessagePackTime) UnmarshalBinary(buf []byte) error { + len := len(buf) + + if len == 4 { + sec := binary.BigEndian.Uint32(buf) + t.time = time.Unix(int64(sec), 0) + } else if len == 8 { + data := binary.BigEndian.Uint64(buf) + + nsec := (data & 0xfffffffc_00000000) >> 34 + sec := (data & 0x00000003_ffffffff) + + t.time = time.Unix(int64(sec), int64(nsec)) + } else if len == 12 { + nsec := binary.BigEndian.Uint32(buf) + sec := binary.BigEndian.Uint64(buf[4:]) + + t.time = time.Unix(int64(sec), int64(nsec)) + } + + return nil +} diff --git a/plugins/serializers/msgpack/metric_gen.go b/plugins/serializers/msgpack/metric_gen.go new file mode 100644 index 0000000000000..f02b0aba28503 --- /dev/null +++ b/plugins/serializers/msgpack/metric_gen.go @@ -0,0 +1,417 @@ +package msgpack + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *MessagePackTime) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z MessagePackTime) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 0 + err = en.Append(0x80) + if err != nil { + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z MessagePackTime) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 0 + o = append(o, 0x80) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *MessagePackTime) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z MessagePackTime) Msgsize() (s int) { + s = 1 + return +} + +// DecodeMsg implements msgp.Decodable +func (z *Metric) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "name": + z.Name, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "time": + err = dc.ReadExtension(&z.Time) + if err != nil { + err = msgp.WrapError(err, "Time") + return + } + case "tags": + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if z.Tags == nil { + z.Tags = make(map[string]string, zb0002) + } else if len(z.Tags) > 0 { + for key := range z.Tags { + delete(z.Tags, key) + } + } + for zb0002 > 0 { + zb0002-- + var za0001 string + var za0002 string + za0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + za0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Tags", za0001) + return + } + z.Tags[za0001] = za0002 + } + case "fields": + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Fields") + return + } + if z.Fields == nil { + z.Fields = make(map[string]interface{}, zb0003) + } else if len(z.Fields) > 0 { + for key := range z.Fields { + delete(z.Fields, key) + } + } + for zb0003 > 0 { + zb0003-- + var za0003 string + var za0004 interface{} + za0003, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Fields") + return + } + za0004, err = dc.ReadIntf() + if err != nil { + err = msgp.WrapError(err, "Fields", za0003) + return + } + z.Fields[za0003] = za0004 + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Metric) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 4 + // write "name" + err = en.Append(0x84, 0xa4, 0x6e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Name) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + // write "time" + err = en.Append(0xa4, 0x74, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteExtension(&z.Time) + if err != nil { + err = msgp.WrapError(err, "Time") + return + } + // write "tags" + err = en.Append(0xa4, 0x74, 0x61, 0x67, 0x73) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.Tags))) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + for za0001, za0002 := range z.Tags { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "Tags", za0001) + return + } + } + // write "fields" + err = en.Append(0xa6, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.Fields))) + if err != nil { + err = msgp.WrapError(err, "Fields") + return + } + for za0003, za0004 := range z.Fields { + err = en.WriteString(za0003) + if err != nil { + err = msgp.WrapError(err, "Fields") + return + } + err = en.WriteIntf(za0004) + if err != nil { + err = msgp.WrapError(err, "Fields", za0003) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Metric) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 4 + // string "name" + o = append(o, 0x84, 0xa4, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Name) + // string "time" + o = append(o, 0xa4, 0x74, 0x69, 0x6d, 0x65) + o, err = msgp.AppendExtension(o, &z.Time) + if err != nil { + err = msgp.WrapError(err, "Time") + return + } + // string "tags" + o = append(o, 0xa4, 0x74, 0x61, 0x67, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Tags))) + for za0001, za0002 := range z.Tags { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + // string "fields" + o = append(o, 0xa6, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Fields))) + for za0003, za0004 := range z.Fields { + o = msgp.AppendString(o, za0003) + o, err = msgp.AppendIntf(o, za0004) + if err != nil { + err = msgp.WrapError(err, "Fields", za0003) + return + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Metric) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "name": + z.Name, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "time": + bts, err = msgp.ReadExtensionBytes(bts, &z.Time) + if err != nil { + err = msgp.WrapError(err, "Time") + return + } + case "tags": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if z.Tags == nil { + z.Tags = make(map[string]string, zb0002) + } else if len(z.Tags) > 0 { + for key := range z.Tags { + delete(z.Tags, key) + } + } + for zb0002 > 0 { + var za0001 string + var za0002 string + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0001) + return + } + z.Tags[za0001] = za0002 + } + case "fields": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Fields") + return + } + if z.Fields == nil { + z.Fields = make(map[string]interface{}, zb0003) + } else if len(z.Fields) > 0 { + for key := range z.Fields { + delete(z.Fields, key) + } + } + for zb0003 > 0 { + var za0003 string + var za0004 interface{} + zb0003-- + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Fields") + return + } + za0004, bts, err = msgp.ReadIntfBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Fields", za0003) + return + } + z.Fields[za0003] = za0004 + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Metric) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 5 + msgp.ExtensionPrefixSize + z.Time.Len() + 5 + msgp.MapHeaderSize + if z.Tags != nil { + for za0001, za0002 := range z.Tags { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += 7 + msgp.MapHeaderSize + if z.Fields != nil { + for za0003, za0004 := range z.Fields { + _ = za0004 + s += msgp.StringPrefixSize + len(za0003) + msgp.GuessSize(za0004) + } + } + return +} diff --git a/plugins/serializers/msgpack/metric_gen_test.go b/plugins/serializers/msgpack/metric_gen_test.go new file mode 100644 index 0000000000000..e24d0a9b179c3 --- /dev/null +++ b/plugins/serializers/msgpack/metric_gen_test.go @@ -0,0 +1,236 @@ +package msgpack + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "bytes" + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalMessagePackTime(t *testing.T) { + v := MessagePackTime{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgMessagePackTime(b *testing.B) { + v := MessagePackTime{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgMessagePackTime(b *testing.B) { + v := MessagePackTime{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalMessagePackTime(b *testing.B) { + v := MessagePackTime{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeMessagePackTime(t *testing.T) { + v := MessagePackTime{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeMessagePackTime Msgsize() is inaccurate") + } + + vn := MessagePackTime{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeMessagePackTime(b *testing.B) { + v := MessagePackTime{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeMessagePackTime(b *testing.B) { + v := MessagePackTime{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalMetric(t *testing.T) { + v := Metric{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgMetric(b *testing.B) { + v := Metric{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgMetric(b *testing.B) { + v := Metric{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalMetric(b *testing.B) { + v := Metric{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeMetric(t *testing.T) { + v := Metric{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeMetric Msgsize() is inaccurate") + } + + vn := Metric{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeMetric(b *testing.B) { + v := Metric{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeMetric(b *testing.B) { + v := Metric{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/plugins/serializers/msgpack/metric_test.go b/plugins/serializers/msgpack/metric_test.go new file mode 100644 index 0000000000000..e0ea25ebc88a7 --- /dev/null +++ b/plugins/serializers/msgpack/metric_test.go @@ -0,0 +1,143 @@ +package msgpack + +import ( + "encoding/hex" + "math" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestMsgPackTime32(t *testing.T) { + // Maximum of 4 bytes encodable time + var sec int64 = 0xFFFFFFFF + var nsec int64 = 0 + t1 := MessagePackTime{time: time.Unix(sec, nsec)} + + assert.Equal(t, t1.Len(), 4) + + buf := make([]byte, t1.Len()) + assert.NoError(t, t1.MarshalBinaryTo(buf)) + + t2 := new(MessagePackTime) + t2.UnmarshalBinary(buf) + + assert.Equal(t, t1.time, t2.time) +} + +func TestMsgPackTime64(t *testing.T) { + // Maximum of 8 bytes encodable time + var sec int64 = 0x3FFFFFFFF + var nsec int64 = 999999999 + t1 := MessagePackTime{time: time.Unix(sec, nsec)} + + assert.Equal(t, t1.Len(), 8) + + buf := make([]byte, t1.Len()) + assert.NoError(t, t1.MarshalBinaryTo(buf)) + + t2 := new(MessagePackTime) + t2.UnmarshalBinary(buf) + + assert.Equal(t, t1.time, t2.time) +} + +func TestMsgPackTime96(t *testing.T) { + // Testing 12 bytes timestamp + var sec int64 = 0x400000001 + var nsec int64 = 111111111 + t1 := MessagePackTime{time: time.Unix(sec, nsec)} + + assert.Equal(t, t1.Len(), 12) + + buf := make([]byte, t1.Len()) + assert.NoError(t, t1.MarshalBinaryTo(buf)) + + t2 := new(MessagePackTime) + t2.UnmarshalBinary(buf) + + assert.True(t, t1.time.Equal(t2.time)) + + // Testing the default value: 0001-01-01T00:00:00Z + t1 = MessagePackTime{} + + assert.Equal(t, t1.Len(), 12) + assert.NoError(t, t1.MarshalBinaryTo(buf)) + + t2 = new(MessagePackTime) + t2.UnmarshalBinary(buf) + + assert.True(t, t1.time.Equal(t2.time)) +} + +func TestMsgPackTimeEdgeCases(t *testing.T) { + times := make([]time.Time, 0) + expected := make([][]byte, 0) + + // Unix epoch. Begin of 4bytes dates + // Nanoseconds: 0x00000000, Seconds: 0x0000000000000000 + ts, _ := time.Parse(time.RFC3339, "1970-01-01T00:00:00Z") + bs, _ := hex.DecodeString("d6ff00000000") + times = append(times, ts) + expected = append(expected, bs) + + // End of 4bytes dates + // Nanoseconds: 0x00000000, Seconds: 0x00000000ffffffff + ts, _ = time.Parse(time.RFC3339, "2106-02-07T06:28:15Z") + bs, _ = hex.DecodeString("d6ffffffffff") + times = append(times, ts) + expected = append(expected, bs) + + // Begin of 8bytes dates + // Nanoseconds: 0x00000000, Seconds: 0x0000000100000000 + ts, _ = time.Parse(time.RFC3339, "2106-02-07T06:28:16Z") + bs, _ = hex.DecodeString("d7ff0000000100000000") + times = append(times, ts) + expected = append(expected, bs) + + // Just after Unix epoch. Non zero nanoseconds + // Nanoseconds: 0x00000001, Seconds: 0x0000000000000000 + ts, _ = time.Parse(time.RFC3339Nano, "1970-01-01T00:00:00.000000001Z") + bs, _ = hex.DecodeString("d7ff0000000400000000") + times = append(times, ts) + expected = append(expected, bs) + + // End of 8bytes dates + // Nanoseconds: 0x00000000, Seconds: 0x00000003ffffffff + ts, _ = time.Parse(time.RFC3339Nano, "2514-05-30T01:53:03.000000000Z") + bs, _ = hex.DecodeString("d7ff00000003ffffffff") + times = append(times, ts) + expected = append(expected, bs) + + // Begin of 12bytes date + // Nanoseconds: 0x00000000, Seconds: 0x0000000400000000 + ts, _ = time.Parse(time.RFC3339Nano, "2514-05-30T01:53:04.000000000Z") + bs, _ = hex.DecodeString("c70cff000000000000000400000000") + times = append(times, ts) + expected = append(expected, bs) + + // Zero value, 0001-01-01T00:00:00Z + // Nanoseconds: 0x00000000, Seconds: 0xfffffff1886e0900 + ts = time.Time{} + bs, _ = hex.DecodeString("c70cff00000000fffffff1886e0900") + times = append(times, ts) + expected = append(expected, bs) + + // Max value + // Nanoseconds: 0x3b9ac9ff, Seconds: 0x7fffffffffffffff + ts = time.Unix(math.MaxInt64, 999_999_999).UTC() + bs, _ = hex.DecodeString("c70cff3b9ac9ff7fffffffffffffff") + times = append(times, ts) + expected = append(expected, bs) + + buf := make([]byte, 0) + for i, ts := range times { + t1 := MessagePackTime{time: ts} + m := Metric{Time: t1} + + buf = buf[:0] + buf, _ = m.MarshalMsg(buf) + assert.Equal(t, expected[i], buf[12:len(buf)-14]) + } +} diff --git a/plugins/serializers/msgpack/msgpack.go b/plugins/serializers/msgpack/msgpack.go new file mode 100644 index 0000000000000..d850bb8b004ca --- /dev/null +++ b/plugins/serializers/msgpack/msgpack.go @@ -0,0 +1,44 @@ +package msgpack + +import ( + "github.com/influxdata/telegraf" +) + +// Serializer encodes metrics in MessagePack format +type Serializer struct{} + +// NewSerializer creates a msgpack.Serializer +func NewSerializer() *Serializer { + return &Serializer{} +} + +func marshalMetric(buf []byte, metric telegraf.Metric) ([]byte, error) { + return (&Metric{ + Name: metric.Name(), + Time: MessagePackTime{time: metric.Time()}, + Tags: metric.Tags(), + Fields: metric.Fields(), + }).MarshalMsg(buf) +} + +// Serialize implements serializers.Serializer.Serialize +// github.com/influxdata/telegraf/plugins/serializers/Serializer +func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) { + return marshalMetric(nil, metric) +} + +// SerializeBatch implements serializers.Serializer.SerializeBatch +// github.com/influxdata/telegraf/plugins/serializers/Serializer +func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + buf := make([]byte, 0) + for _, m := range metrics { + var err error + buf, err = marshalMetric(buf, m) + + if err != nil { + return nil, err + } + + } + return buf, nil +} diff --git a/plugins/serializers/msgpack/msgpack_test.go b/plugins/serializers/msgpack/msgpack_test.go new file mode 100644 index 0000000000000..a44ffae4515e3 --- /dev/null +++ b/plugins/serializers/msgpack/msgpack_test.go @@ -0,0 +1,132 @@ +package msgpack + +import ( + "testing" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func toTelegrafMetric(m Metric) telegraf.Metric { + tm, _ := metric.New(m.Name, m.Tags, m.Fields, m.Time.time) + + return tm +} + +func TestSerializeMetricInt(t *testing.T) { + m := testutil.TestMetric(int64(90)) + + s := Serializer{} + var buf []byte + buf, err := s.Serialize(m) + assert.NoError(t, err) + + m2 := &Metric{} + left, err := m2.UnmarshalMsg(buf) + assert.NoError(t, err) + + assert.Equal(t, len(left), 0) + + testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2)) +} + +func TestSerializeMetricString(t *testing.T) { + m := testutil.TestMetric("foobar") + + s := Serializer{} + var buf []byte + buf, err := s.Serialize(m) + assert.NoError(t, err) + + m2 := &Metric{} + left, err := m2.UnmarshalMsg(buf) + assert.NoError(t, err) + + assert.Equal(t, len(left), 0) + + testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2)) +} + +func TestSerializeMultiFields(t *testing.T) { + m := testutil.TestMetric(int(90)) + m.AddField("value2", 8559615) + + s := Serializer{} + var buf []byte + buf, err := s.Serialize(m) + assert.NoError(t, err) + + m2 := &Metric{} + left, err := m2.UnmarshalMsg(buf) + assert.NoError(t, err) + + assert.Equal(t, len(left), 0) + + testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2)) +} + +func TestSerializeMetricWithEscapes(t *testing.T) { + m := testutil.TestMetric(int(90)) + m.AddField("U,age=Idle", int64(90)) + m.AddTag("cpu tag", "cpu0") + + s := Serializer{} + var buf []byte + buf, err := s.Serialize(m) + assert.NoError(t, err) + + m2 := &Metric{} + left, err := m2.UnmarshalMsg(buf) + assert.NoError(t, err) + + assert.Equal(t, len(left), 0) + + testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2)) +} + +func TestSerializeMultipleMetric(t *testing.T) { + m := testutil.TestMetric(int(90)) + + s := Serializer{} + + encoded, err := s.Serialize(m) + assert.NoError(t, err) + + // Multiple metrics in continous bytes stream + var buf []byte + buf = append(buf, encoded...) + buf = append(buf, encoded...) + buf = append(buf, encoded...) + buf = append(buf, encoded...) + + left := buf + for len(left) > 0 { + decodeM := &Metric{} + left, err = decodeM.UnmarshalMsg(left) + + assert.NoError(t, err) + testutil.RequireMetricEqual(t, m, toTelegrafMetric(*decodeM)) + } +} + +func TestSerializeBatch(t *testing.T) { + m := testutil.TestMetric(int(90)) + + metrics := []telegraf.Metric{m, m, m, m} + + s := Serializer{} + + buf, err := s.SerializeBatch(metrics) + assert.NoError(t, err) + + left := buf + for len(left) > 0 { + decodeM := &Metric{} + left, err = decodeM.UnmarshalMsg(left) + + assert.NoError(t, err) + testutil.RequireMetricEqual(t, m, toTelegrafMetric(*decodeM)) + } +} diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index 61fb03c96562d..f6c62fc12cbda 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -2,7 +2,6 @@ package serializers import ( "fmt" - "github.com/influxdata/telegraf/plugins/serializers/prometheusremotewrite" "time" "github.com/influxdata/telegraf" @@ -10,8 +9,10 @@ import ( "github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/plugins/serializers/json" + "github.com/influxdata/telegraf/plugins/serializers/msgpack" "github.com/influxdata/telegraf/plugins/serializers/nowmetric" "github.com/influxdata/telegraf/plugins/serializers/prometheus" + "github.com/influxdata/telegraf/plugins/serializers/prometheusremotewrite" "github.com/influxdata/telegraf/plugins/serializers/splunkmetric" "github.com/influxdata/telegraf/plugins/serializers/wavefront" ) @@ -129,6 +130,8 @@ func NewSerializer(config *Config) (Serializer, error) { serializer, err = NewPrometheusSerializer(config) case "prometheusremotewrite": serializer, err = NewPrometheusRemoteWriteSerializer(config) + case "msgpack": + serializer, err = NewMsgpackSerializer() default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } @@ -240,3 +243,7 @@ func NewGraphiteSerializer(prefix, template string, tagSupport bool, separator s Templates: graphiteTemplates, }, nil } + +func NewMsgpackSerializer() (Serializer, error) { + return msgpack.NewSerializer(), nil +} From cf9ae34d0337805eab159d94555ea0965845004f Mon Sep 17 00:00:00 2001 From: Avinash Nigam <56562150+avinash-nigam@users.noreply.github.com> Date: Tue, 2 Mar 2021 11:55:28 -0800 Subject: [PATCH 247/761] SqlServer - fix for default server list (#8655) --- plugins/inputs/sqlserver/sqlserver.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 67571d23d6f26..c789ace9b3994 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -221,10 +221,6 @@ func (s *SQLServer) Gather(acc telegraf.Accumulator) error { } } - if len(s.Servers) == 0 { - s.Servers = append(s.Servers, defaultServer) - } - var wg sync.WaitGroup for _, serv := range s.Servers { @@ -327,8 +323,16 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e return nil } +func (s *SQLServer) Init() error { + if len(s.Servers) == 0 { + log.Println("W! Warning: Server list is empty.") + } + + return nil +} + func init() { inputs.Add("sqlserver", func() telegraf.Input { - return &SQLServer{} + return &SQLServer{Servers: []string{defaultServer}} }) } From c17cc8cabbd43959741b83c96a5b7d325be5d8a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Labesse=20K=C3=A9vin?= Date: Tue, 2 Mar 2021 21:48:48 +0100 Subject: [PATCH 248/761] plugin: output loki (#8450) --- plugins/outputs/all/all.go | 1 + plugins/outputs/loki/README.md | 34 +++ plugins/outputs/loki/loki.go | 209 ++++++++++++++++ plugins/outputs/loki/loki_test.go | 356 ++++++++++++++++++++++++++++ plugins/outputs/loki/stream.go | 70 ++++++ plugins/outputs/loki/stream_test.go | 157 ++++++++++++ 6 files changed, 827 insertions(+) create mode 100644 plugins/outputs/loki/README.md create mode 100644 plugins/outputs/loki/loki.go create mode 100644 plugins/outputs/loki/loki_test.go create mode 100644 plugins/outputs/loki/stream.go create mode 100644 plugins/outputs/loki/stream_test.go diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index b8d64db8f4a04..dbec69f95b1f5 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -27,6 +27,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/kinesis" _ "github.com/influxdata/telegraf/plugins/outputs/librato" _ "github.com/influxdata/telegraf/plugins/outputs/logzio" + _ "github.com/influxdata/telegraf/plugins/outputs/loki" _ "github.com/influxdata/telegraf/plugins/outputs/mqtt" _ "github.com/influxdata/telegraf/plugins/outputs/nats" _ "github.com/influxdata/telegraf/plugins/outputs/newrelic" diff --git a/plugins/outputs/loki/README.md b/plugins/outputs/loki/README.md new file mode 100644 index 0000000000000..9c48f95bae805 --- /dev/null +++ b/plugins/outputs/loki/README.md @@ -0,0 +1,34 @@ +# Loki Output Plugin + +This plugin sends logs to Loki, using tags as labels, +log line will content all fields in `key="value"` format which is easily parsable with `logfmt` parser in Loki. + +### Configuration: + +```toml +# A plugin that can transmit logs to Loki +[[outputs.loki]] + ## The domain of Loki + domain = "https://loki.domain.tld" + + ## Endpoint to write api + # endpoint = "/loki/api/v1/push" + + ## Connection timeout, defaults to "5s" if not set. + # timeout = "5s" + + ## Basic auth credential + # username = "loki" + # password = "pass" + + ## Additional HTTP headers + # http_headers = {"X-Scope-OrgID" = "1"} + + ## If the request must be gzip encoded + # gzip_request = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" +``` diff --git a/plugins/outputs/loki/loki.go b/plugins/outputs/loki/loki.go new file mode 100644 index 0000000000000..c097d21fd8bf0 --- /dev/null +++ b/plugins/outputs/loki/loki.go @@ -0,0 +1,209 @@ +package loki + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/outputs" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +const ( + defaultEndpoint = "/loki/api/v1/push" + defaultClientTimeout = 5 * time.Second +) + +var sampleConfig = ` + ## The domain of Loki + domain = "https://loki.domain.tld" + + ## Endpoint to write api + # endpoint = "/loki/api/v1/push" + + ## Connection timeout, defaults to "5s" if not set. + # timeout = "5s" + + ## Basic auth credential + # username = "loki" + # password = "pass" + + ## Additional HTTP headers + # http_headers = {"X-Scope-OrgID" = "1"} + + ## If the request must be gzip encoded + # gzip_request = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" +` + +type Loki struct { + Domain string `toml:"domain"` + Endpoint string `toml:"endpoint"` + Timeout internal.Duration `toml:"timeout"` + Username string `toml:"username"` + Password string `toml:"password"` + Headers map[string]string `toml:"headers"` + ClientID string `toml:"client_id"` + ClientSecret string `toml:"client_secret"` + TokenURL string `toml:"token_url"` + Scopes []string `toml:"scopes"` + GZipRequest bool `toml:"gzip_request"` + + url string + client *http.Client + tls.ClientConfig +} + +func (l *Loki) SampleConfig() string { + return sampleConfig +} + +func (l *Loki) Description() string { + return "Send logs to Loki" +} + +func (l *Loki) createClient(ctx context.Context) (*http.Client, error) { + tlsCfg, err := l.ClientConfig.TLSConfig() + if err != nil { + return nil, fmt.Errorf("tls config fail: %w", err) + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: http.ProxyFromEnvironment, + }, + Timeout: l.Timeout.Duration, + } + + if l.ClientID != "" && l.ClientSecret != "" && l.TokenURL != "" { + oauthConfig := clientcredentials.Config{ + ClientID: l.ClientID, + ClientSecret: l.ClientSecret, + TokenURL: l.TokenURL, + Scopes: l.Scopes, + } + ctx = context.WithValue(ctx, oauth2.HTTPClient, client) + client = oauthConfig.Client(ctx) + } + + return client, nil +} + +func (l *Loki) Connect() (err error) { + if l.Domain == "" { + return fmt.Errorf("domain is required") + } + + if l.Endpoint == "" { + l.Endpoint = defaultEndpoint + } + + l.url = fmt.Sprintf("%s%s", l.Domain, l.Endpoint) + + if l.Timeout.Duration == 0 { + l.Timeout.Duration = defaultClientTimeout + } + + ctx := context.Background() + l.client, err = l.createClient(ctx) + if err != nil { + return fmt.Errorf("http client fail: %w", err) + } + + return +} + +func (l *Loki) Close() error { + l.client.CloseIdleConnections() + + return nil +} + +func (l *Loki) Write(metrics []telegraf.Metric) error { + s := Streams{} + + for _, m := range metrics { + tags := m.TagList() + var line string + + for _, f := range m.FieldList() { + line += fmt.Sprintf("%s=\"%v\" ", f.Key, f.Value) + } + + s.insertLog(tags, Log{fmt.Sprintf("%d", m.Time().UnixNano()), line}) + } + + return l.write(s) +} + +func (l *Loki) write(s Streams) error { + bs, err := json.Marshal(s) + if err != nil { + return fmt.Errorf("json.Marshal: %w", err) + } + + var reqBodyBuffer io.Reader = bytes.NewBuffer(bs) + + if l.GZipRequest { + rc, err := internal.CompressWithGzip(reqBodyBuffer) + if err != nil { + return err + } + defer rc.Close() + reqBodyBuffer = rc + } + + req, err := http.NewRequest(http.MethodPost, l.url, reqBodyBuffer) + if err != nil { + return err + } + + if l.Username != "" { + req.SetBasicAuth(l.Username, l.Password) + } + + for k, v := range l.Headers { + if strings.ToLower(k) == "host" { + req.Host = v + } + req.Header.Set(k, v) + } + + req.Header.Set("User-Agent", internal.ProductToken()) + req.Header.Set("Content-Type", "application/json") + if l.GZipRequest { + req.Header.Set("Content-Encoding", "gzip") + } + + resp, err := l.client.Do(req) + if err != nil { + return err + } + _ = resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("when writing to [%s] received status code: %d", l.url, resp.StatusCode) + } + + return nil +} + +func init() { + outputs.Add("loki", func() telegraf.Output { + return &Loki{} + }) +} diff --git a/plugins/outputs/loki/loki_test.go b/plugins/outputs/loki/loki_test.go new file mode 100644 index 0000000000000..1b8b61e34e48e --- /dev/null +++ b/plugins/outputs/loki/loki_test.go @@ -0,0 +1,356 @@ +package loki + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "github.com/influxdata/telegraf/testutil" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/stretchr/testify/require" +) + +func getMetric() telegraf.Metric { + return testutil.MustMetric( + "log", + map[string]string{ + "key1": "value1", + }, + map[string]interface{}{ + "line": "my log", + "field": 3.14, + }, + time.Unix(123, 0), + ) +} + +func TestStatusCode(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *Loki + statusCode int + errFunc func(t *testing.T, err error) + }{ + { + name: "success", + plugin: &Loki{ + Domain: u.String(), + }, + statusCode: http.StatusNoContent, + errFunc: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "1xx status is an error", + plugin: &Loki{ + Domain: u.String(), + }, + statusCode: 103, + errFunc: func(t *testing.T, err error) { + require.Error(t, err) + }, + }, + { + name: "3xx status is an error", + plugin: &Loki{ + Domain: u.String(), + }, + statusCode: http.StatusMultipleChoices, + errFunc: func(t *testing.T, err error) { + require.Error(t, err) + }, + }, + { + name: "4xx status is an error", + plugin: &Loki{ + Domain: u.String(), + }, + statusCode: http.StatusMultipleChoices, + errFunc: func(t *testing.T, err error) { + require.Error(t, err) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(tt.statusCode) + }) + + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + tt.errFunc(t, err) + }) + } +} + +func TestContentType(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *Loki + expected string + }{ + { + name: "default is application/json", + plugin: &Loki{ + Domain: u.String(), + }, + expected: "application/json", + }, + { + name: "overwrite content_type", + plugin: &Loki{ + Domain: u.String(), + Headers: map[string]string{"Content-Type": "plain/text"}, + }, + // plugin force content-type + expected: "application/json", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, tt.expected, r.Header.Get("Content-Type")) + w.WriteHeader(http.StatusOK) + }) + + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) + } +} + +func TestContentEncodingGzip(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *Loki + expected string + }{ + { + name: "default is no content encoding", + plugin: &Loki{ + Domain: u.String(), + }, + expected: "", + }, + { + name: "overwrite content_encoding", + plugin: &Loki{ + Domain: u.String(), + GZipRequest: true, + }, + expected: "gzip", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, tt.expected, r.Header.Get("Content-Encoding")) + + body := r.Body + var err error + if r.Header.Get("Content-Encoding") == "gzip" { + body, err = gzip.NewReader(r.Body) + require.NoError(t, err) + } + + payload, err := ioutil.ReadAll(body) + require.NoError(t, err) + + var s Request + err = json.Unmarshal(payload, &s) + require.NoError(t, err) + require.Len(t, s.Streams, 1) + require.Len(t, s.Streams[0].Logs, 1) + require.Len(t, s.Streams[0].Logs[0], 2) + require.Equal(t, map[string]string{"key1": "value1"}, s.Streams[0].Labels) + require.Equal(t, "123000000000", s.Streams[0].Logs[0][0]) + require.Contains(t, s.Streams[0].Logs[0][1], "line=\"my log\"") + require.Contains(t, s.Streams[0].Logs[0][1], "field=\"3.14\"") + + w.WriteHeader(http.StatusNoContent) + }) + + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) + } +} + +func TestBasicAuth(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *Loki + }{ + { + name: "default", + plugin: &Loki{ + Domain: u.String(), + }, + }, + { + name: "username and password", + plugin: &Loki{ + Domain: u.String(), + Username: "username", + Password: "pa$$word", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + username, password, _ := r.BasicAuth() + require.Equal(t, tt.plugin.Username, username) + require.Equal(t, tt.plugin.Password, password) + w.WriteHeader(http.StatusOK) + }) + + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) + } +} + +type TestHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) + +func TestOAuthClientCredentialsGrant(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + var token = "2YotnFZFEjr1zCsicMWpAA" + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *Loki + tokenHandler TestHandlerFunc + handler TestHandlerFunc + }{ + { + name: "no credentials", + plugin: &Loki{ + Domain: u.String(), + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Len(t, r.Header["Authorization"], 0) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "success", + plugin: &Loki{ + Domain: u.String(), + ClientID: "howdy", + ClientSecret: "secret", + TokenURL: u.String() + "/token", + Scopes: []string{"urn:opc:idm:__myscopes__"}, + }, + tokenHandler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + values := url.Values{} + values.Add("access_token", token) + values.Add("token_type", "bearer") + values.Add("expires_in", "3600") + w.Write([]byte(values.Encode())) + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, []string{"Bearer " + token}, r.Header["Authorization"]) + w.WriteHeader(http.StatusOK) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case defaultEndpoint: + tt.handler(t, w, r) + case "/token": + tt.tokenHandler(t, w, r) + } + }) + + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) + } +} + +func TestDefaultUserAgent(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + t.Run("default-user-agent", func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, internal.ProductToken(), r.Header.Get("User-Agent")) + w.WriteHeader(http.StatusOK) + }) + + client := &Loki{ + Domain: u.String(), + } + + err = client.Connect() + require.NoError(t, err) + + err = client.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) +} diff --git a/plugins/outputs/loki/stream.go b/plugins/outputs/loki/stream.go new file mode 100644 index 0000000000000..4f9f9c07269c6 --- /dev/null +++ b/plugins/outputs/loki/stream.go @@ -0,0 +1,70 @@ +package loki + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/influxdata/telegraf" +) + +type ( + Log []string + + Streams map[string]*Stream + + Stream struct { + Labels map[string]string `json:"stream"` + Logs []Log `json:"values"` + } + + Request struct { + Streams []Stream `json:"streams"` + } +) + +func (s Streams) insertLog(ts []*telegraf.Tag, l Log) { + key := uniqKeyFromTagList(ts) + + if _, ok := s[key]; !ok { + s[key] = newStream(ts) + } + + s[key].Logs = append(s[key].Logs, l) +} + +func (s Streams) MarshalJSON() ([]byte, error) { + r := Request{ + Streams: make([]Stream, 0, len(s)), + } + + for _, stream := range s { + r.Streams = append(r.Streams, *stream) + } + + return json.Marshal(r) +} + +func uniqKeyFromTagList(ts []*telegraf.Tag) (k string) { + for _, t := range ts { + k += fmt.Sprintf("%s-%s-", + strings.ReplaceAll(t.Key, "-", "--"), + strings.ReplaceAll(t.Value, "-", "--"), + ) + } + + return k +} + +func newStream(ts []*telegraf.Tag) *Stream { + s := &Stream{ + Logs: make([]Log, 0), + Labels: map[string]string{}, + } + + for _, t := range ts { + s.Labels[t.Key] = t.Value + } + + return s +} diff --git a/plugins/outputs/loki/stream_test.go b/plugins/outputs/loki/stream_test.go new file mode 100644 index 0000000000000..7a47de5ccd746 --- /dev/null +++ b/plugins/outputs/loki/stream_test.go @@ -0,0 +1,157 @@ +package loki + +import ( + "testing" + + "github.com/influxdata/telegraf" + "github.com/stretchr/testify/require" +) + +type tuple struct { + key, value string +} + +func generateLabelsAndTag(tt ...tuple) (map[string]string, []*telegraf.Tag) { + labels := map[string]string{} + var tags []*telegraf.Tag + + for _, t := range tt { + labels[t.key] = t.value + tags = append(tags, &telegraf.Tag{Key: t.key, Value: t.value}) + } + + return labels, tags +} + +func TestGenerateLabelsAndTag(t *testing.T) { + labels, tags := generateLabelsAndTag( + tuple{key: "key1", value: "value1"}, + tuple{key: "key2", value: "value2"}, + tuple{key: "key3", value: "value3"}, + ) + + expectedTags := []*telegraf.Tag{ + {Key: "key1", Value: "value1"}, + {Key: "key2", Value: "value2"}, + {Key: "key3", Value: "value3"}, + } + + require.Len(t, labels, 3) + require.Len(t, tags, 3) + require.Equal(t, map[string]string{"key1": "value1", "key2": "value2", "key3": "value3"}, labels) + require.Equal(t, map[string]string{"key1": "value1", "key2": "value2", "key3": "value3"}, labels) + require.Equal(t, expectedTags, tags) +} + +func TestStream_insertLog(t *testing.T) { + s := Streams{} + log1 := Log{"123", "this log isn't useful"} + log2 := Log{"124", "this log isn't useful neither"} + log3 := Log{"122", "again"} + + key1 := "key1-value1-key2-value2-key3-value3-" + labels1, tags1 := generateLabelsAndTag( + tuple{key: "key1", value: "value1"}, + tuple{key: "key2", value: "value2"}, + tuple{key: "key3", value: "value3"}, + ) + + key2 := "key2-value2-" + labels2, tags2 := generateLabelsAndTag( + tuple{key: "key2", value: "value2"}, + ) + + s.insertLog(tags1, log1) + + require.Len(t, s, 1) + require.Contains(t, s, key1) + require.Len(t, s[key1].Logs, 1) + require.Equal(t, labels1, s[key1].Labels) + require.Equal(t, "123", s[key1].Logs[0][0]) + require.Equal(t, "this log isn't useful", s[key1].Logs[0][1]) + + s.insertLog(tags1, log2) + + require.Len(t, s, 1) + require.Len(t, s[key1].Logs, 2) + require.Equal(t, "124", s[key1].Logs[1][0]) + require.Equal(t, "this log isn't useful neither", s[key1].Logs[1][1]) + + s.insertLog(tags2, log3) + + require.Len(t, s, 2) + require.Contains(t, s, key2) + require.Len(t, s[key2].Logs, 1) + require.Equal(t, labels2, s[key2].Labels) + require.Equal(t, "122", s[key2].Logs[0][0]) + require.Equal(t, "again", s[key2].Logs[0][1]) +} + +func TestUniqKeyFromTagList(t *testing.T) { + tests := []struct { + in []*telegraf.Tag + out string + }{ + { + in: []*telegraf.Tag{ + {Key: "key1", Value: "value1"}, + {Key: "key2", Value: "value2"}, + {Key: "key3", Value: "value3"}, + }, + out: "key1-value1-key2-value2-key3-value3-", + }, + { + in: []*telegraf.Tag{ + {Key: "key1", Value: "value1"}, + {Key: "key3", Value: "value3"}, + {Key: "key4", Value: "value4"}, + }, + out: "key1-value1-key3-value3-key4-value4-", + }, + { + in: []*telegraf.Tag{ + {Key: "target", Value: "local"}, + {Key: "host", Value: "host"}, + {Key: "service", Value: "dns"}, + }, + out: "target-local-host-host-service-dns-", + }, + { + in: []*telegraf.Tag{ + {Key: "target", Value: "localhost"}, + {Key: "hostservice", Value: "dns"}, + }, + out: "target-localhost-hostservice-dns-", + }, + { + in: []*telegraf.Tag{ + {Key: "target-local", Value: "host-"}, + }, + out: "target--local-host---", + }, + } + + for _, test := range tests { + require.Equal(t, test.out, uniqKeyFromTagList(test.in)) + } +} + +func Test_newStream(t *testing.T) { + labels, tags := generateLabelsAndTag( + tuple{key: "key1", value: "value1"}, + tuple{key: "key2", value: "value2"}, + tuple{key: "key3", value: "value3"}, + ) + + s := newStream(tags) + + require.Empty(t, s.Logs) + require.Equal(t, s.Labels, labels) +} + +func Test_newStream_noTag(t *testing.T) { + s := newStream(nil) + + require.Empty(t, s.Logs) + require.Empty(t, s.Labels) +} From 15d45ec0bf74515ae2b804906ebc7a55eddd0bd1 Mon Sep 17 00:00:00 2001 From: Thomas Schuetz <38893055+thschue@users.noreply.github.com> Date: Tue, 2 Mar 2021 21:55:38 +0100 Subject: [PATCH 249/761] Dynatrace Plugin: Make conversion to counters possible / Changed large bulk handling (#8397) --- plugins/outputs/dynatrace/README.md | 2 + plugins/outputs/dynatrace/dynatrace.go | 52 ++++++++++++++++++-------- 2 files changed, 39 insertions(+), 15 deletions(-) diff --git a/plugins/outputs/dynatrace/README.md b/plugins/outputs/dynatrace/README.md index ea4b42777752d..e0c1e17635183 100644 --- a/plugins/outputs/dynatrace/README.md +++ b/plugins/outputs/dynatrace/README.md @@ -35,6 +35,8 @@ You will also need to configure an API token for secure access. Find out how to prefix = "telegraf." ## Flag for skipping the tls certificate check, just for testing purposes, should be false by default insecure_skip_verify = false + ## If you want to convert values represented as gauges to counters, add the metric names here + additional_counters = [ ] ``` diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index adabcb73a554d..8931986dd2fba 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -19,7 +19,8 @@ import ( ) const ( - oneAgentMetricsURL = "http://127.0.0.1:14499/metrics/ingest" + oneAgentMetricsURL = "http://127.0.0.1:14499/metrics/ingest" + dtIngestApiLineLimit = 1000 ) var ( @@ -30,13 +31,14 @@ var ( // Dynatrace Configuration for the Dynatrace output plugin type Dynatrace struct { - URL string `toml:"url"` - APIToken string `toml:"api_token"` - Prefix string `toml:"prefix"` - Log telegraf.Logger `toml:"-"` - Timeout internal.Duration `toml:"timeout"` - State map[string]string - SendCounter int + URL string `toml:"url"` + APIToken string `toml:"api_token"` + Prefix string `toml:"prefix"` + Log telegraf.Logger `toml:"-"` + Timeout internal.Duration `toml:"timeout"` + AddCounterMetrics []string `toml:"additional_counters"` + State map[string]string + SendCounter int tls.ClientConfig @@ -73,6 +75,9 @@ const sampleConfig = ` ## Connection timeout, defaults to "5s" if not set. timeout = "5s" + + ## If you want to convert values represented as gauges to counters, add the metric names here + additional_counters = [ ] ` // Connect Connects the Dynatrace output plugin to the Telegraf stream @@ -130,6 +135,7 @@ func (d *Dynatrace) escape(v string) string { func (d *Dynatrace) Write(metrics []telegraf.Metric) error { var buf bytes.Buffer + metricCounter := 1 var tagb bytes.Buffer if len(metrics) == 0 { return nil @@ -151,8 +157,9 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { if err != nil { continue } - fmt.Fprintf(&tagb, ",%s=%s", strings.ToLower(tagKey), d.escape(metric.Tags()[k])) - + if len(metric.Tags()[k]) > 0 { + fmt.Fprintf(&tagb, ",%s=%s", strings.ToLower(tagKey), d.escape(metric.Tags()[k])) + } } } if len(metric.Fields()) > 0 { @@ -194,7 +201,15 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { continue } // write metric id,tags and value - switch metric.Type() { + + metricType := metric.Type() + for _, i := range d.AddCounterMetrics { + if metric.Name()+"."+metricKey == i { + metricType = telegraf.Counter + } + } + + switch metricType { case telegraf.Counter: var delta float64 @@ -209,7 +224,7 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { if err != nil { d.Log.Debugf("Could not parse current value: %s", value) } - if floatCurrentValue > floatLastValue { + if floatCurrentValue >= floatLastValue { delta = floatCurrentValue - floatLastValue fmt.Fprintf(&buf, "%s%s count,delta=%f\n", metricID, tagb.String(), delta) } @@ -219,6 +234,15 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { default: fmt.Fprintf(&buf, "%s%s %v\n", metricID, tagb.String(), value) } + + if metricCounter%dtIngestApiLineLimit == 0 { + err = d.send(buf.Bytes()) + if err != nil { + return err + } + buf.Reset() + } + metricCounter++ } } } @@ -249,13 +273,12 @@ func (d *Dynatrace) send(msg []byte) error { resp, err := d.client.Do(req) if err != nil { d.Log.Errorf("Dynatrace error: %s", err.Error()) - fmt.Println(req) return fmt.Errorf("error while sending HTTP request:, %s", err.Error()) } defer resp.Body.Close() // print metric line results as info log - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusAccepted { + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusBadRequest { bodyBytes, err := ioutil.ReadAll(resp.Body) if err != nil { d.Log.Errorf("Dynatrace error reading response") @@ -265,7 +288,6 @@ func (d *Dynatrace) send(msg []byte) error { } else { return fmt.Errorf("request failed with response code:, %d", resp.StatusCode) } - return nil } From 30a0fd04cd5c870d27b8ebf163bc8452fd12305d Mon Sep 17 00:00:00 2001 From: Sam Dillard Date: Tue, 2 Mar 2021 13:55:27 -0800 Subject: [PATCH 250/761] Add Starlark script for estimating Line Protocol cardinality (#8852) --- .../starlark/testdata/schema_sizing.star | 84 +++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 plugins/processors/starlark/testdata/schema_sizing.star diff --git a/plugins/processors/starlark/testdata/schema_sizing.star b/plugins/processors/starlark/testdata/schema_sizing.star new file mode 100644 index 0000000000000..6bc53377f7bd4 --- /dev/null +++ b/plugins/processors/starlark/testdata/schema_sizing.star @@ -0,0 +1,84 @@ +# Produces a new Line of statistics about the Fields +# Drops the original metric +# +# Example Input: +# logstash,environment_id=EN456,property_id=PR789,request_type=ingress,stack_id=engd asn=1313i,cache_response_code=202i,colo_code="LAX",colo_id=12i,compute_time=28736i,edge_end_timestamp=1611085500320i,edge_start_timestamp=1611085496208i,id="1b5c67ed-dfd0-4d30-99bd-84f0a9c5297b_76af1809-29d1-4b35-a0cf-39797458275c",parent_ray_id="00",processing_details="ok",rate_limit_id=0i,ray_id="76af1809-29d1-4b35-a0cf-39797458275c",request_bytes=7777i,request_host="engd-08364a825824e04f0a494115.reactorstream.dev",request_id="1b5c67ed-dfd0-4d30-99bd-84f0a9c5297b",request_result="succeeded",request_uri="/ENafcb2798a9be4bb7bfddbf35c374db15",response_code=200i,subrequest=false,subrequest_count=1i,user_agent="curl/7.64.1" 1611085496208 +# +# Example Output: +# sizing,measurement=logstash,environment_id=EN456,property_id=PR789,request_type=ingress,stack_id=engd tag_count=4,tag_key_avg_length=11.25,tag_value_avg_length=5.25,int_avg_length=4.9,int_count=10,bool_avg_length=5,bool_count=1,str_avg_length=25.4,str_count=10 1611085496208 + +def apply(metric): + new_metric = Metric("sizing") + num_tags = len(metric.tags.items()) + new_metric.fields["tag_count"] = float(num_tags) + new_metric.fields["tag_key_avg_length"] = sum(map(len, metric.tags.keys())) / num_tags + new_metric.fields["tag_value_avg_length"] = sum(map(len, metric.tags.values())) / num_tags + + new_metric.tags["measurement"] = metric.name + + new_metric.tags.update(metric.tags) + + ints, floats, bools, strs = [], [], [], [] + for field in metric.fields.items(): + value = field[1] + if type(value) == "int": + ints.append(field) + elif type(value) == "float": + floats.append(field) + elif type(value) == "bool": + bools.append(field) + elif type(value) == "string": + strs.append(field) + + if len(ints) > 0: + int_vals = [i[1] for i in ints] + produce_pairs(new_metric, int_vals, "int") + if len(floats) > 0: + float_vals = [i[1] for i in floats] + produce_pairs(new_metric, float_vals, "float") + if len(bools) > 0: + bool_vals = [i[1] for i in bools] + produce_pairs(new_metric, bool_vals, "bool") + if len(strs) > 0: + str_vals = [i[1] for i in strs] + produce_pairs(new_metric, str_vals, "str") + + return new_metric + +def produce_pairs(metric, li, field_type): + lens = elem_lengths(li) + counts = count_lengths(lens) + + metric.fields["{}_avg_length".format(field_type)] = float(mean(lens)) + metric.fields["{}_count".format(field_type)] = float(len(li)) + +def elem_lengths(li): + if type(li[0]) in ("int", "float", "bool"): + return [len(str(elem)) for elem in li] + else: + return [len(elem) for elem in li] + +def count_lengths(li): + # Returns dict of counts of each occurrence of length in a list of lengths + lens = [] + counts = [] + for elem in li: + if elem not in lens: + lens.append(elem) + counts.append(1) + else: + index = lens.index(elem) + counts[index] += 1 + return dict(zip(lens, counts)) + +def map(f, li): + return [f(x) for x in li] + +def sum(li): + sum = 0 + for i in li: + sum += i + return sum + +def mean(li): + return sum(li)/len(li) From 600816826d0ff7981cd08d859aafdd932f2455ab Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Tue, 2 Mar 2021 17:30:59 -0500 Subject: [PATCH 251/761] Adding a new directory monitor input plugin. (#8751) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 1 + go.sum | 2 + plugins/inputs/all/all.go | 1 + plugins/inputs/directory_monitor/README.md | 48 ++ .../directory_monitor/directory_monitor.go | 410 ++++++++++++++++++ .../directory_monitor_test.go | 135 ++++++ 7 files changed, 598 insertions(+) create mode 100644 plugins/inputs/directory_monitor/README.md create mode 100644 plugins/inputs/directory_monitor/directory_monitor.go create mode 100644 plugins/inputs/directory_monitor/directory_monitor_test.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 97125ffd1a325..657c632767075 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -185,6 +185,7 @@ following works: - google.golang.org/genproto [Apache License 2.0](https://github.com/google/go-genproto/blob/master/LICENSE) - google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) - gopkg.in/asn1-ber.v1 [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE) +- gopkg.in/djherbis/times.v1 [MIT License](https://github.com/djherbis/times/blob/master/LICENSE) - gopkg.in/fatih/pool.v2 [MIT License](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) - gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE) - gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE) diff --git a/go.mod b/go.mod index aaa19949dd710..818e78c0c8496 100644 --- a/go.mod +++ b/go.mod @@ -148,6 +148,7 @@ require ( google.golang.org/api v0.20.0 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 google.golang.org/grpc v1.33.1 + gopkg.in/djherbis/times.v1 v1.2.0 gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/ldap.v3 v3.1.0 diff --git a/go.sum b/go.sum index db92645af3929..322437835292d 100644 --- a/go.sum +++ b/go.sum @@ -998,6 +998,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogR gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/djherbis/times.v1 v1.2.0 h1:UCvDKl1L/fmBygl2Y7hubXCnY7t4Yj46ZrBFNUipFbM= +gopkg.in/djherbis/times.v1 v1.2.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index e20d43479344e..dd3474d25da28 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -30,6 +30,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" _ "github.com/influxdata/telegraf/plugins/inputs/cpu" _ "github.com/influxdata/telegraf/plugins/inputs/dcos" + _ "github.com/influxdata/telegraf/plugins/inputs/directory_monitor" _ "github.com/influxdata/telegraf/plugins/inputs/disk" _ "github.com/influxdata/telegraf/plugins/inputs/diskio" _ "github.com/influxdata/telegraf/plugins/inputs/disque" diff --git a/plugins/inputs/directory_monitor/README.md b/plugins/inputs/directory_monitor/README.md new file mode 100644 index 0000000000000..66d9eb51fce79 --- /dev/null +++ b/plugins/inputs/directory_monitor/README.md @@ -0,0 +1,48 @@ +# Directory Monitor Input Plugin + +This plugin monitors a single directory (without looking at sub-directories), and takes in each file placed in the directory. +The plugin will gather all files in the directory at a configurable interval (`monitor_interval`), and parse the ones that haven't been picked up yet. + +This plugin is intended to read files that are moved or copied to the monitored directory, and thus files should also not be used by another process or else they may fail to be gathered. Please be advised that this plugin pulls files directly after they've been in the directory for the length of the configurable `directory_duration_threshold`, and thus files should not be written 'live' to the monitored directory. If you absolutely must write files directly, they must be guaranteed to finish writing before the `directory_duration_threshold`. + +### Configuration: + +```toml +[[inputs.directory_monitor]] + ## The directory to monitor and read files from. + directory = "" + # + ## The directory to move finished files to. + finished_directory = "" + # + ## The directory to move files to upon file error. + ## If not provided, erroring files will stay in the monitored directory. + # error_directory = "" + # + ## The amount of time a file is allowed to sit in the directory before it is picked up. + ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, + ## set this higher so that the plugin will wait until the file is fully copied to the directory. + # directory_duration_threshold = "50ms" + # + ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. + # files_to_monitor = ["^.*\.csv"] + # + ## A list of files to ignore, if necessary. Supports regex. + # files_to_ignore = [".DS_Store"] + # + ## Maximum lines of the file to process that have not yet be written by the + ## output. For best throughput set to the size of the output's metric_buffer_limit. + ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. + # max_buffered_metrics = 10000 + # + ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. + ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. + # file_queue_size = 100000 + # + ## The dataformat to be read from the files. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec + data_format = "influx" +``` diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go new file mode 100644 index 0000000000000..30820659338d8 --- /dev/null +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -0,0 +1,410 @@ +package directory_monitor + +import ( + "bufio" + "context" + "errors" + "fmt" + "regexp" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/csv" + "github.com/influxdata/telegraf/selfstat" + "golang.org/x/sync/semaphore" + "gopkg.in/djherbis/times.v1" + + "compress/gzip" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" +) + +const sampleConfig = ` + ## The directory to monitor and read files from. + directory = "" + # + ## The directory to move finished files to. + finished_directory = "" + # + ## The directory to move files to upon file error. + ## If not provided, erroring files will stay in the monitored directory. + # error_directory = "" + # + ## The amount of time a file is allowed to sit in the directory before it is picked up. + ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, + ## set this higher so that the plugin will wait until the file is fully copied to the directory. + # directory_duration_threshold = "50ms" + # + ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. + # files_to_monitor = ["^.*\.csv"] + # + ## A list of files to ignore, if necessary. Supports regex. + # files_to_ignore = [".DS_Store"] + # + ## Maximum lines of the file to process that have not yet be written by the + ## output. For best throughput set to the size of the output's metric_buffer_limit. + ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. + # max_buffered_metrics = 10000 + # + ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. + ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. + # file_queue_size = 100000 + # + ## The dataformat to be read from the files. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec + data_format = "influx" +` + +var ( + defaultFilesToMonitor = []string{} + defaultFilesToIgnore = []string{} + defaultMaxBufferedMetrics = 10000 + defaultDirectoryDurationThreshold = config.Duration(0 * time.Millisecond) + defaultFileQueueSize = 100000 +) + +type DirectoryMonitor struct { + Directory string `toml:"directory"` + FinishedDirectory string `toml:"finished_directory"` + ErrorDirectory string `toml:"error_directory"` + + FilesToMonitor []string `toml:"files_to_monitor"` + FilesToIgnore []string `toml:"files_to_ignore"` + MaxBufferedMetrics int `toml:"max_buffered_metrics"` + DirectoryDurationThreshold config.Duration `toml:"directory_duration_threshold"` + Log telegraf.Logger `toml:"-"` + FileQueueSize int `toml:"file_queue_size"` + + filesInUse sync.Map + cancel context.CancelFunc + context context.Context + parserFunc parsers.ParserFunc + filesProcessed selfstat.Stat + filesDropped selfstat.Stat + waitGroup *sync.WaitGroup + acc telegraf.TrackingAccumulator + sem *semaphore.Weighted + fileRegexesToMatch []*regexp.Regexp + fileRegexesToIgnore []*regexp.Regexp + filesToProcess chan string +} + +func (monitor *DirectoryMonitor) SampleConfig() string { + return sampleConfig +} + +func (monitor *DirectoryMonitor) Description() string { + return "Ingests files in a directory and then moves them to a target directory." +} + +func (monitor *DirectoryMonitor) Gather(acc telegraf.Accumulator) error { + // Get all files sitting in the directory. + files, err := ioutil.ReadDir(monitor.Directory) + if err != nil { + return fmt.Errorf("unable to monitor the targeted directory: %w", err) + } + + for _, file := range files { + filePath := monitor.Directory + "/" + file.Name() + + // We've been cancelled via Stop(). + if monitor.context.Err() != nil { + return nil + } + + stat, err := times.Stat(filePath) + if err != nil { + continue + } + + timeThresholdExceeded := time.Since(stat.AccessTime()) >= time.Duration(monitor.DirectoryDurationThreshold) + + // If file is decaying, process it. + if timeThresholdExceeded { + monitor.processFile(file, acc) + } + } + + return nil +} + +func (monitor *DirectoryMonitor) Start(acc telegraf.Accumulator) error { + // Use tracking to determine when more metrics can be added without overflowing the outputs. + monitor.acc = acc.WithTracking(monitor.MaxBufferedMetrics) + go func() { + for range monitor.acc.Delivered() { + monitor.sem.Release(1) + } + }() + + // Monitor the files channel and read what they receive. + monitor.waitGroup.Add(1) + go func() { + monitor.Monitor(acc) + monitor.waitGroup.Done() + }() + + return nil +} + +func (monitor *DirectoryMonitor) Stop() { + // Before stopping, wrap up all file-reading routines. + monitor.cancel() + close(monitor.filesToProcess) + monitor.Log.Warnf("Exiting the Directory Monitor plugin. Waiting to quit until all current files are finished.") + monitor.waitGroup.Wait() +} + +func (monitor *DirectoryMonitor) Monitor(acc telegraf.Accumulator) { + for filePath := range monitor.filesToProcess { + if monitor.context.Err() != nil { + return + } + + // Prevent goroutines from taking the same file as another. + if _, exists := monitor.filesInUse.LoadOrStore(filePath, true); exists { + continue + } + + monitor.read(filePath) + + // We've finished reading the file and moved it away, delete it from files in use. + monitor.filesInUse.Delete(filePath) + } +} + +func (monitor *DirectoryMonitor) processFile(file os.FileInfo, acc telegraf.Accumulator) { + if file.IsDir() { + return + } + + filePath := monitor.Directory + "/" + file.Name() + + // File must be configured to be monitored, if any configuration... + if !monitor.isMonitoredFile(file.Name()) { + return + } + + // ...and should not be configured to be ignored. + if monitor.isIgnoredFile(file.Name()) { + return + } + + select { + case monitor.filesToProcess <- filePath: + default: + } +} + +func (monitor *DirectoryMonitor) read(filePath string) { + // Open, read, and parse the contents of the file. + err := monitor.ingestFile(filePath) + if _, isPathError := err.(*os.PathError); isPathError { + return + } + + // Handle a file read error. We don't halt execution but do document, log, and move the problematic file. + if err != nil { + monitor.Log.Errorf("Error while reading file: '" + filePath + "'. " + err.Error()) + monitor.filesDropped.Incr(1) + if monitor.ErrorDirectory != "" { + monitor.moveFile(filePath, monitor.ErrorDirectory) + } + return + } + + // File is finished, move it to the 'finished' directory. + monitor.moveFile(filePath, monitor.FinishedDirectory) + monitor.filesProcessed.Incr(1) +} + +func (monitor *DirectoryMonitor) ingestFile(filePath string) error { + file, err := os.Open(filePath) + if err != nil { + return err + } + defer file.Close() + + parser, err := monitor.parserFunc() + if err != nil { + return fmt.Errorf("E! Creating parser: %s", err.Error()) + } + + // Handle gzipped files. + var reader io.Reader + if filepath.Ext(filePath) == ".gz" { + reader, err = gzip.NewReader(file) + if err != nil { + return err + } + } else { + reader = file + } + + return monitor.parseFile(parser, reader) +} + +func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Reader) error { + // Read the file line-by-line and parse with the configured parse method. + firstLine := true + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + metrics, err := monitor.parseLine(parser, scanner.Bytes(), firstLine) + if err != nil { + return err + } + if firstLine { + firstLine = false + } + + monitor.sendMetrics(metrics) + } + + return nil +} + +func (monitor *DirectoryMonitor) parseLine(parser parsers.Parser, line []byte, firstLine bool) ([]telegraf.Metric, error) { + switch parser.(type) { + case *csv.Parser: + // The CSV parser parses headers in Parse and skips them in ParseLine. + if firstLine { + return parser.Parse(line) + } + + m, err := parser.ParseLine(string(line)) + if err != nil { + return nil, err + } + + if m != nil { + return []telegraf.Metric{m}, nil + } + + return []telegraf.Metric{}, nil + default: + return parser.Parse(line) + } +} + +func (monitor *DirectoryMonitor) sendMetrics(metrics []telegraf.Metric) { + // Report the metrics for the file. + for _, m := range metrics { + // Block until metric can be written. + monitor.sem.Acquire(monitor.context, 1) + monitor.acc.AddTrackingMetricGroup([]telegraf.Metric{m}) + } +} + +func (monitor *DirectoryMonitor) moveFile(filePath string, directory string) { + err := os.Rename(filePath, directory+"/"+filepath.Base(filePath)) + + if err != nil { + monitor.Log.Errorf("Error while moving file '" + filePath + "' to another directory. Error: " + err.Error()) + } +} + +func (monitor *DirectoryMonitor) isMonitoredFile(fileName string) bool { + if len(monitor.fileRegexesToMatch) == 0 { + return true + } + + // Only monitor matching files. + for _, regex := range monitor.fileRegexesToMatch { + if regex.MatchString(fileName) { + return true + } + } + + return false +} + +func (monitor *DirectoryMonitor) isIgnoredFile(fileName string) bool { + // Skip files that are set to be ignored. + for _, regex := range monitor.fileRegexesToIgnore { + if regex.MatchString(fileName) { + return true + } + } + + return false +} + +func (monitor *DirectoryMonitor) SetParserFunc(fn parsers.ParserFunc) { + monitor.parserFunc = fn +} + +func (monitor *DirectoryMonitor) Init() error { + if monitor.Directory == "" || monitor.FinishedDirectory == "" { + return errors.New("Missing one of the following required config options: directory, finished_directory.") + } + + if monitor.FileQueueSize <= 0 { + return errors.New("file queue size needs to be more than 0") + } + + // Finished directory can be created if not exists for convenience. + if _, err := os.Stat(monitor.FinishedDirectory); os.IsNotExist(err) { + err = os.Mkdir(monitor.FinishedDirectory, 0777) + if err != nil { + return err + } + } + + monitor.filesDropped = selfstat.Register("directory_monitor", "files_dropped", map[string]string{}) + monitor.filesProcessed = selfstat.Register("directory_monitor", "files_processed", map[string]string{}) + + // If an error directory should be used but has not been configured yet, create one ourselves. + if monitor.ErrorDirectory != "" { + if _, err := os.Stat(monitor.ErrorDirectory); os.IsNotExist(err) { + err := os.Mkdir(monitor.ErrorDirectory, 0777) + if err != nil { + return err + } + } + } + + monitor.waitGroup = &sync.WaitGroup{} + monitor.sem = semaphore.NewWeighted(int64(monitor.MaxBufferedMetrics)) + monitor.context, monitor.cancel = context.WithCancel(context.Background()) + monitor.filesToProcess = make(chan string, monitor.FileQueueSize) + + // Establish file matching / exclusion regexes. + for _, matcher := range monitor.FilesToMonitor { + regex, err := regexp.Compile(matcher) + if err != nil { + return err + } + monitor.fileRegexesToMatch = append(monitor.fileRegexesToMatch, regex) + } + + for _, matcher := range monitor.FilesToIgnore { + regex, err := regexp.Compile(matcher) + if err != nil { + return err + } + monitor.fileRegexesToIgnore = append(monitor.fileRegexesToIgnore, regex) + } + + return nil +} + +func init() { + inputs.Add("directory_monitor", func() telegraf.Input { + return &DirectoryMonitor{ + FilesToMonitor: defaultFilesToMonitor, + FilesToIgnore: defaultFilesToIgnore, + MaxBufferedMetrics: defaultMaxBufferedMetrics, + DirectoryDurationThreshold: defaultDirectoryDurationThreshold, + FileQueueSize: defaultFileQueueSize, + } + }) +} diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go new file mode 100644 index 0000000000000..265f371885f29 --- /dev/null +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -0,0 +1,135 @@ +package directory_monitor + +import ( + "bytes" + "compress/gzip" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestCSVGZImport(t *testing.T) { + acc := testutil.Accumulator{} + testCsvFile := "test.csv" + testCsvGzFile := "test.csv.gz" + + // Establish process directory and finished directory. + finishedDirectory, err := ioutil.TempDir("", "finished") + require.NoError(t, err) + processDirectory, err := ioutil.TempDir("", "test") + require.NoError(t, err) + defer os.RemoveAll(processDirectory) + defer os.RemoveAll(finishedDirectory) + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: 1000, + FileQueueSize: 100000, + } + err = r.Init() + require.NoError(t, err) + + parserConfig := parsers.Config{ + DataFormat: "csv", + CSVHeaderRowCount: 1, + } + require.NoError(t, err) + r.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(&parserConfig) + }) + r.Log = testutil.Logger{} + + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testCsvFile)) + require.NoError(t, err) + f.WriteString("thing,color\nsky,blue\ngrass,green\nclifford,red\n") + f.Close() + + // Write csv.gz file to process into the 'process' directory. + var b bytes.Buffer + w := gzip.NewWriter(&b) + w.Write([]byte("thing,color\nsky,blue\ngrass,green\nclifford,red\n")) + w.Close() + err = ioutil.WriteFile(filepath.Join(processDirectory, testCsvGzFile), b.Bytes(), 0666) + + // Start plugin before adding file. + err = r.Start(&acc) + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(6) + r.Stop() + + // Verify that we read both files once. + require.Equal(t, len(acc.Metrics), 6) + + // File should have gone back to the test directory, as we configured. + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvFile)) + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvGzFile)) + + require.NoError(t, err) +} + +// For JSON data. +type event struct { + Name string + Speed float64 + Length float64 +} + +func TestMultipleJSONFileImports(t *testing.T) { + acc := testutil.Accumulator{} + testJsonFile := "test.json" + + // Establish process directory and finished directory. + finishedDirectory, err := ioutil.TempDir("", "finished") + require.NoError(t, err) + processDirectory, err := ioutil.TempDir("", "test") + require.NoError(t, err) + defer os.RemoveAll(processDirectory) + defer os.RemoveAll(finishedDirectory) + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: 1000, + FileQueueSize: 1000, + } + err = r.Init() + require.NoError(t, err) + + parserConfig := parsers.Config{ + DataFormat: "json", + JSONNameKey: "Name", + } + + r.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(&parserConfig) + }) + + // Let's drop a 5-line LINE-DELIMITED json. + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testJsonFile)) + require.NoError(t, err) + f.WriteString("{\"Name\": \"event1\",\"Speed\": 100.1,\"Length\": 20.1}\n{\"Name\": \"event2\",\"Speed\": 500,\"Length\": 1.4}\n{\"Name\": \"event3\",\"Speed\": 200,\"Length\": 10.23}\n{\"Name\": \"event4\",\"Speed\": 80,\"Length\": 250}\n{\"Name\": \"event5\",\"Speed\": 120.77,\"Length\": 25.97}") + f.Close() + + err = r.Start(&acc) + r.Log = testutil.Logger{} + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(5) + r.Stop() + + // Verify that we read each JSON line once to a single metric. + require.Equal(t, len(acc.Metrics), 5) +} From 851136f16ca97b86c1c55c22dca825f8778ee931 Mon Sep 17 00:00:00 2001 From: reimda Date: Tue, 2 Mar 2021 21:42:06 -0700 Subject: [PATCH 252/761] Add PPID field to procstat input plugin (#8887) --- plugins/inputs/procstat/process.go | 1 + plugins/inputs/procstat/procstat.go | 5 +++++ plugins/inputs/procstat/procstat_test.go | 4 ++++ 3 files changed, 10 insertions(+) diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index ab2fde601f5c8..8e3e934bbdc55 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -26,6 +26,7 @@ type Process interface { RlimitUsage(bool) ([]process.RlimitStat, error) Username() (string, error) CreateTime() (int64, error) + Ppid() (int32, error) } type PIDFinder interface { diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 772fe77ae4f13..a27ea2c938d69 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -311,6 +311,11 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time } } + ppid, err := proc.Ppid() + if err == nil { + fields[prefix+"ppid"] = ppid + } + acc.AddFields("procstat", fields, proc.Tags(), t) } diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index c86ac53b385a0..4f1c15f40150e 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -164,6 +164,10 @@ func (p *testProc) RlimitUsage(_ bool) ([]process.RlimitStat, error) { return []process.RlimitStat{}, nil } +func (p *testProc) Ppid() (int32, error) { + return 0, nil +} + var pid = PID(42) var exe = "foo" From d50a52ff2ff4671cb02861cef389102093a2bd79 Mon Sep 17 00:00:00 2001 From: Jeff Ashton Date: Wed, 3 Mar 2021 09:49:05 -0500 Subject: [PATCH 253/761] Switching kinesis output plugin to use telegraf.Logger (#8929) --- plugins/outputs/kinesis/kinesis.go | 22 +++++++++++----------- plugins/outputs/kinesis/kinesis_test.go | 12 ++++++++++++ 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 75f790f3318b7..fd233e5b80bd8 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -1,7 +1,6 @@ package kinesis import ( - "log" "time" "github.com/aws/aws-sdk-go/aws" @@ -30,9 +29,10 @@ type ( RandomPartitionKey bool `toml:"use_random_partitionkey"` Partition *Partition `toml:"partition"` Debug bool `toml:"debug"` - svc kinesisiface.KinesisAPI + Log telegraf.Logger `toml:"-"` serializer serializers.Serializer + svc kinesisiface.KinesisAPI } Partition struct { @@ -118,13 +118,13 @@ func (k *KinesisOutput) Description() string { func (k *KinesisOutput) Connect() error { if k.Partition == nil { - log.Print("E! kinesis : Deprecated partitionkey configuration in use, please consider using outputs.kinesis.partition") + k.Log.Error("Deprecated partitionkey configuration in use, please consider using outputs.kinesis.partition") } // We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using // environment variables, and then Shared Credentials. if k.Debug { - log.Printf("I! kinesis: Establishing a connection to Kinesis in %s", k.Region) + k.Log.Infof("Establishing a connection to Kinesis in %s", k.Region) } credentialConfig := &internalaws.CredentialConfig{ @@ -165,17 +165,17 @@ func (k *KinesisOutput) writeKinesis(r []*kinesis.PutRecordsRequestEntry) time.D resp, err := k.svc.PutRecords(payload) if err != nil { - log.Printf("E! kinesis: Unable to write to Kinesis : %s", err.Error()) + k.Log.Errorf("Unable to write to Kinesis : %s", err.Error()) return time.Since(start) } if k.Debug { - log.Printf("I! Wrote: '%+v'", resp) + k.Log.Infof("Wrote: '%+v'", resp) } failed := *resp.FailedRecordCount if failed > 0 { - log.Printf("E! kinesis: Unable to write %+v of %+v record(s) to Kinesis", failed, len(r)) + k.Log.Errorf("Unable to write %+v of %+v record(s) to Kinesis", failed, len(r)) } return time.Since(start) @@ -203,7 +203,7 @@ func (k *KinesisOutput) getPartitionKey(metric telegraf.Metric) string { // Default partition name if default is not set return "telegraf" default: - log.Printf("E! kinesis : You have configured a Partition method of '%s' which is not supported", k.Partition.Method) + k.Log.Errorf("You have configured a Partition method of '%s' which is not supported", k.Partition.Method) } } if k.RandomPartitionKey { @@ -230,7 +230,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { values, err := k.serializer.Serialize(metric) if err != nil { - log.Printf("D! [outputs.kinesis] Could not serialize metric: %v", err) + k.Log.Debugf("Could not serialize metric: %v", err) continue } @@ -246,7 +246,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { if sz == 500 { // Max Messages Per PutRecordRequest is 500 elapsed := k.writeKinesis(r) - log.Printf("D! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) + k.Log.Debugf("Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) sz = 0 r = nil } @@ -254,7 +254,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { } if sz > 0 { elapsed := k.writeKinesis(r) - log.Printf("D! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) + k.Log.Debugf("Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) } return nil diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 293ec86fb829e..49cfcedd5dfc0 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -20,6 +20,7 @@ func TestPartitionKey(t *testing.T) { testPoint := testutil.TestMetric(1) k := KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "static", Key: "-", @@ -28,6 +29,7 @@ func TestPartitionKey(t *testing.T) { assert.Equal("-", k.getPartitionKey(testPoint), "PartitionKey should be '-'") k = KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "tag", Key: "tag1", @@ -36,6 +38,7 @@ func TestPartitionKey(t *testing.T) { assert.Equal(testPoint.Tags()["tag1"], k.getPartitionKey(testPoint), "PartitionKey should be value of 'tag1'") k = KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "tag", Key: "doesnotexist", @@ -45,6 +48,7 @@ func TestPartitionKey(t *testing.T) { assert.Equal("somedefault", k.getPartitionKey(testPoint), "PartitionKey should use default") k = KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "tag", Key: "doesnotexist", @@ -53,6 +57,7 @@ func TestPartitionKey(t *testing.T) { assert.Equal("telegraf", k.getPartitionKey(testPoint), "PartitionKey should be telegraf") k = KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "not supported", }, @@ -60,6 +65,7 @@ func TestPartitionKey(t *testing.T) { assert.Equal("", k.getPartitionKey(testPoint), "PartitionKey should be value of ''") k = KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "measurement", }, @@ -67,6 +73,7 @@ func TestPartitionKey(t *testing.T) { assert.Equal(testPoint.Name(), k.getPartitionKey(testPoint), "PartitionKey should be value of measurement name") k = KinesisOutput{ + Log: testutil.Logger{}, Partition: &Partition{ Method: "random", }, @@ -77,11 +84,13 @@ func TestPartitionKey(t *testing.T) { assert.Equal(byte(4), u.Version(), "PartitionKey should be UUIDv4") k = KinesisOutput{ + Log: testutil.Logger{}, PartitionKey: "-", } assert.Equal("-", k.getPartitionKey(testPoint), "PartitionKey should be '-'") k = KinesisOutput{ + Log: testutil.Logger{}, RandomPartitionKey: true, } partitionKey = k.getPartitionKey(testPoint) @@ -120,6 +129,7 @@ func TestWriteKinesis_WhenSuccess(t *testing.T) { ) k := KinesisOutput{ + Log: testutil.Logger{}, StreamName: streamName, svc: svc, } @@ -165,6 +175,7 @@ func TestWriteKinesis_WhenRecordErrors(t *testing.T) { ) k := KinesisOutput{ + Log: testutil.Logger{}, StreamName: streamName, svc: svc, } @@ -200,6 +211,7 @@ func TestWriteKinesis_WhenServiceError(t *testing.T) { ) k := KinesisOutput{ + Log: testutil.Logger{}, StreamName: streamName, svc: svc, } From cc61251cc9a0f32922e17b967e0ba9d50be68100 Mon Sep 17 00:00:00 2001 From: Caleb Hailey Date: Wed, 3 Mar 2021 10:56:42 -0800 Subject: [PATCH 254/761] Sensu Go Output Plugin for Telegraf (#8398) --- README.md | 1 + go.mod | 3 +- go.sum | 89 ++--- plugins/outputs/all/all.go | 1 + plugins/outputs/sensu/README.md | 97 ++++++ plugins/outputs/sensu/sensu.go | 513 ++++++++++++++++++++++++++++ plugins/outputs/sensu/sensu_test.go | 210 ++++++++++++ 7 files changed, 850 insertions(+), 64 deletions(-) create mode 100644 plugins/outputs/sensu/README.md create mode 100644 plugins/outputs/sensu/sensu.go create mode 100644 plugins/outputs/sensu/sensu_test.go diff --git a/README.md b/README.md index ae1e63f369adf..3e2d332fb214f 100644 --- a/README.md +++ b/README.md @@ -443,6 +443,7 @@ For documentation on the latest development code see the [documentation index][d * [prometheus](./plugins/outputs/prometheus_client) * [riemann](./plugins/outputs/riemann) * [riemann_legacy](./plugins/outputs/riemann_legacy) +* [sensu-go](./plugins/outputs/sensu) * [signalfx](./plugins/outputs/signalfx) * [socket_writer](./plugins/outputs/socket_writer) * [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring) diff --git a/go.mod b/go.mod index 818e78c0c8496..fce100462629e 100644 --- a/go.mod +++ b/go.mod @@ -116,10 +116,11 @@ require ( github.com/riemann/riemann-go-client v0.5.0 github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect + github.com/sensu/sensu-go/api/core/v2 v2.6.0 github.com/shirou/gopsutil v2.20.9+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/signalfx/golib/v3 v3.3.0 - github.com/sirupsen/logrus v1.4.2 + github.com/sirupsen/logrus v1.6.0 github.com/soniah/gosnmp v1.25.0 github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 github.com/stretchr/testify v1.7.0 diff --git a/go.sum b/go.sum index 322437835292d..6c48bcd5c6861 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -97,7 +96,6 @@ github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaR github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -135,7 +133,6 @@ github.com/aws/smithy-go v1.0.0 h1:hkhcRKG9rJ4Fn+RbfXY7Tz7b3ITLDyolBnLLBhwbg/c= github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -167,6 +164,10 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/coreos/etcd v3.3.22+incompatible h1:AnRMUyVdVvh1k7lHe61YEd227+CLoNogQuAypztGSK4= +github.com/coreos/etcd v3.3.22+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 h1:F8nmbiuX+gCz9xvWMi6Ak8HQntB4ATFXP46gaxifbp4= @@ -201,7 +202,6 @@ github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= -github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -209,6 +209,10 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/echlebek/crock v1.0.1 h1:KbzamClMIfVIkkjq/GTXf+N16KylYBpiaTitO3f1ujg= +github.com/echlebek/crock v1.0.1/go.mod h1:/kvwHRX3ZXHj/kHWJkjXDmzzRow54EJuHtQ/PapL/HI= +github.com/echlebek/timeproxy v1.0.0 h1:V41/v8tmmMDNMA2GrBPI45nlXb3F7+OY+nJz1BqKsCk= +github.com/echlebek/timeproxy v1.0.0/go.mod h1:0dg2Lnb8no/jFwoMQKMTU6iAivgoMptGqSTprhnrRtk= github.com/eclipse/paho.mqtt.golang v1.3.0 h1:MU79lqr3FKNKbSrGN7d7bNYqh8MwWW7Zcx0iG+VIw9I= github.com/eclipse/paho.mqtt.golang v1.3.0/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -274,7 +278,6 @@ github.com/gogo/googleapis v1.3.1 h1:CzMaKrvF6Qa7XtRii064vKBQiyvmY8H8vG1xa1/W1JA github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= @@ -282,10 +285,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -300,7 +301,6 @@ github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+ github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= @@ -309,23 +309,17 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/addlicense v0.0.0-20190510175307-22550fa7c1b0/go.mod h1:QtPG26W17m+OIQgE6gQ24gC1M6pUaMBAbFrTIDtwG/E= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -344,11 +338,9 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -432,6 +424,7 @@ github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGu github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= @@ -456,16 +449,15 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -505,9 +497,11 @@ github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eI github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= @@ -561,14 +555,13 @@ github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2J github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -577,35 +570,29 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -613,6 +600,10 @@ github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6O github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/riemann/riemann-go-client v0.5.0 h1:yPP7tz1vSYJkSZvZFCsMiDsHHXX57x8/fEX3qyEXuAA= github.com/riemann/riemann-go-client v0.5.0/go.mod h1:FMiaOL8dgBnRfgwENzV0xlYJ2eCbV1o7yqVwOBLbShQ= +github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff h1:+6NUiITWwE5q1KO6SAfUX918c+Tab0+tGAM/mtdlUyA= +github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= @@ -621,6 +612,8 @@ github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75 h1:cA+Ubq9qEVI github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sensu/sensu-go/api/core/v2 v2.6.0 h1:hEKPHFZZNDuWTlKr7Kgm2yog65ZdkBUqNesE5qaWEGo= +github.com/sensu/sensu-go/api/core/v2 v2.6.0/go.mod h1:97IK4ZQuvVjWvvoLkp+NgrD6ot30WDRz3LEbFUc/N34= github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v2.20.9+incompatible h1:msXs2frUV+O/JLva9EDLpuJ84PrFsdCTCQex8PUdtkQ= github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -636,10 +629,10 @@ github.com/signalfx/golib/v3 v3.3.0/go.mod h1:GzjWpV0skAXZn7+u9LnkOkiXAx9KKd5XZc github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8KfCSyuwy/VILnROdgCvbQLA5ch0nkbG7lKT0BXw= github.com/signalfx/sapm-proto v0.4.0 h1:5lQX++6FeIjUZEIcnSgBqhOpmSjMkRBW3y/4ZiKMo5E= github.com/signalfx/sapm-proto v0.4.0/go.mod h1:x3gtwJ1GRejtkghB4nYpwixh2zqJrLbPU959ZNhM0Fk= -github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff h1:JcVn27VGCEwd33jyNj+3IqEbOmzAX9f9LILt3SoGPHU= @@ -656,10 +649,9 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -699,11 +691,9 @@ github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= -go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= @@ -720,18 +710,14 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2 h1:y102fOLFqhV41b+4GPiJoa0k/x+pJcEi2/HB1Y5T6fU= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -766,7 +752,6 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -792,21 +777,17 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -816,7 +797,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= @@ -841,7 +821,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -852,13 +831,10 @@ golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 h1:DvY3Zkh7KabQE/kfzMvYvKirSiguP9Q/veMtkYyf0o8= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -866,11 +842,9 @@ golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fq golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= @@ -895,7 +869,6 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190906203814-12febf440ab1 h1:w4Q0TX3lC1NfGcWkzt5wG4ee4E5fUAPqh5myV0efeHI= golang.org/x/tools v0.0.0-20190906203814-12febf440ab1/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -917,7 +890,6 @@ golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4f golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= @@ -932,7 +904,6 @@ gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -945,7 +916,6 @@ google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -953,10 +923,8 @@ google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpC google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -974,9 +942,7 @@ google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -992,9 +958,7 @@ gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzyc gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1026,6 +990,8 @@ gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3 gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE= gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= +gopkg.in/sourcemap.v1 v1.0.5 h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI= +gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1034,15 +1000,13 @@ gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= @@ -1061,7 +1025,6 @@ k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= modernc.org/httpfs v1.0.0 h1:LtuKNg6JMiaBKVQHKd6Phhvk+2GFp+pUcmDQgRjrds0= diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index dbec69f95b1f5..e183242b91343 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -36,6 +36,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" _ "github.com/influxdata/telegraf/plugins/outputs/riemann" _ "github.com/influxdata/telegraf/plugins/outputs/riemann_legacy" + _ "github.com/influxdata/telegraf/plugins/outputs/sensu" _ "github.com/influxdata/telegraf/plugins/outputs/signalfx" _ "github.com/influxdata/telegraf/plugins/outputs/socket_writer" _ "github.com/influxdata/telegraf/plugins/outputs/stackdriver" diff --git a/plugins/outputs/sensu/README.md b/plugins/outputs/sensu/README.md new file mode 100644 index 0000000000000..82e6302767d08 --- /dev/null +++ b/plugins/outputs/sensu/README.md @@ -0,0 +1,97 @@ +# HTTP Output Plugin + +This plugin writes metrics events to [Sensu Go](https://sensu.io) via its +HTTP events API. + +### Configuration: + +```toml +[[outputs.sensu-go]] + ## BACKEND API URL is the Sensu Backend API root URL to send metrics to + ## (protocol, host, and port only). The output plugin will automatically + ## append the corresponding backend API path + ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). + ## + ## Backend Events API reference: + ## https://docs.sensu.io/sensu-go/latest/api/events/ + ## + ## AGENT API URL is the Sensu Agent API root URL to send metrics to + ## (protocol, host, and port only). The output plugin will automatically + ## append the correspeonding agent API path (/events). + ## + ## Agent API Events API reference: + ## https://docs.sensu.io/sensu-go/latest/api/events/ + ## + ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output + ## plugin will use backend_api_url. If backend_api_url and agent_api_url are + ## not provided, the output plugin will default to use an agent_api_url of + ## http://127.0.0.1:3031 + ## + # backend_api_url = "http://127.0.0.1:8080" + # agent_api_url = "http://127.0.0.1:3031" + + ## API KEY is the Sensu Backend API token + ## Generate a new API token via: + ## + ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities + ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf + ## $ sensuctl user create telegraf --group telegraf --password REDACTED + ## $ sensuctl api-key grant telegraf + ## + ## For more information on Sensu RBAC profiles & API tokens, please visit: + ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ + ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ + ## + # api_key = "${SENSU_API_KEY}" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + + ## Sensu Event details + ## + ## Below are the event details to be sent to Sensu. The main portions of the + ## event are the check, entity, and metrics specifications. For more information + ## on Sensu events and its components, please visit: + ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events + ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks + ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities + ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics + ## + ## Check specification + ## The check name is the name to give the Sensu check associated with the event + ## created. This maps to check.metatadata.name in the event. + [outputs.sensu-go.check] + name = "telegraf" + + ## Entity specification + ## Configure the entity name and namespace, if necessary. This will be part of + ## the entity.metadata in the event. + ## + ## NOTE: if the output plugin is configured to send events to a + ## backend_api_url and entity_name is not set, the value returned by + ## os.Hostname() will be used; if the output plugin is configured to send + ## events to an agent_api_url, entity_name and entity_namespace are not used. + # [outputs.sensu-go.entity] + # name = "server-01" + # namespace = "default" + + ## Metrics specification + ## Configure the tags for the metrics that are sent as part of the Sensu event + # [outputs.sensu-go.tags] + # source = "telegraf" + + ## Configure the handler(s) for processing the provided metrics + # [outputs.sensu-go.metrics] + # handlers = ["influxdb","elasticsearch"] +``` diff --git a/plugins/outputs/sensu/sensu.go b/plugins/outputs/sensu/sensu.go new file mode 100644 index 0000000000000..3674765ef9b44 --- /dev/null +++ b/plugins/outputs/sensu/sensu.go @@ -0,0 +1,513 @@ +package sensu + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "net/url" + "os" + "path" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/outputs" +) + +const ( + defaultUrl = "http://127.0.0.1:3031" + defaultClientTimeout = 5 * time.Second + defaultContentType = "application/json; charset=utf-8" +) + +type OutputMetadata struct { + Name string `json:"name"` +} + +type OutputEntity struct { + Metadata *OutputMetadata `json:"metadata"` +} + +type OutputCheck struct { + Metadata *OutputMetadata `json:"metadata"` + Status int `json:"status"` + Output string `json:"output"` + Issued int64 `json:"issued"` + OutputMetricHandlers []string `json:"output_metric_handlers"` +} + +type OutputMetrics struct { + Handlers []string `json:"handlers"` + Metrics []*OutputMetric `json:"points"` +} + +type OutputMetric struct { + Name string `json:"name"` + Tags []*OutputTag `json:"tags"` + Value interface{} `json:"value"` + Timestamp int64 `json:"timestamp"` +} + +type OutputTag struct { + Name string `json:"name"` + Value string `json:"value"` +} + +type OutputEvent struct { + Entity *OutputEntity `json:"entity,omitempty"` + Check *OutputCheck `json:"check"` + Metrics *OutputMetrics `json:"metrics"` + Timestamp int64 `json:"timestamp"` +} + +type SensuEntity struct { + Name *string `toml:"name"` + Namespace *string `toml:"namespace"` +} + +type SensuCheck struct { + Name *string `toml:"name"` +} + +type SensuMetrics struct { + Handlers []string `toml:"handlers"` +} + +type Sensu struct { + ApiKey *string `toml:"api_key"` + AgentApiUrl *string `toml:"agent_api_url"` + BackendApiUrl *string `toml:"backend_api_url"` + Entity *SensuEntity `toml:"entity"` + Tags map[string]string `toml:"tags"` + Metrics *SensuMetrics `toml:"metrics"` + Check *SensuCheck `toml:"check"` + + Timeout config.Duration `toml:"timeout"` + ContentEncoding string `toml:"content_encoding"` + + EndpointUrl string + OutEntity *OutputEntity + + Log telegraf.Logger `toml:"-"` + + tls.ClientConfig + client *http.Client +} + +var sampleConfig = ` + ## BACKEND API URL is the Sensu Backend API root URL to send metrics to + ## (protocol, host, and port only). The output plugin will automatically + ## append the corresponding backend API path + ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). + ## + ## Backend Events API reference: + ## https://docs.sensu.io/sensu-go/latest/api/events/ + ## + ## AGENT API URL is the Sensu Agent API root URL to send metrics to + ## (protocol, host, and port only). The output plugin will automatically + ## append the correspeonding agent API path (/events). + ## + ## Agent API Events API reference: + ## https://docs.sensu.io/sensu-go/latest/api/events/ + ## + ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output + ## plugin will use backend_api_url. If backend_api_url and agent_api_url are + ## not provided, the output plugin will default to use an agent_api_url of + ## http://127.0.0.1:3031 + ## + # backend_api_url = "http://127.0.0.1:8080" + # agent_api_url = "http://127.0.0.1:3031" + + ## API KEY is the Sensu Backend API token + ## Generate a new API token via: + ## + ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities + ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf + ## $ sensuctl user create telegraf --group telegraf --password REDACTED + ## $ sensuctl api-key grant telegraf + ## + ## For more information on Sensu RBAC profiles & API tokens, please visit: + ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ + ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ + ## + # api_key = "${SENSU_API_KEY}" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + + ## Sensu Event details + ## + ## Below are the event details to be sent to Sensu. The main portions of the + ## event are the check, entity, and metrics specifications. For more information + ## on Sensu events and its components, please visit: + ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events + ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks + ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities + ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics + ## + ## Check specification + ## The check name is the name to give the Sensu check associated with the event + ## created. This maps to check.metatadata.name in the event. + [outputs.sensu-go.check] + name = "telegraf" + + ## Entity specification + ## Configure the entity name and namespace, if necessary. This will be part of + ## the entity.metadata in the event. + ## + ## NOTE: if the output plugin is configured to send events to a + ## backend_api_url and entity_name is not set, the value returned by + ## os.Hostname() will be used; if the output plugin is configured to send + ## events to an agent_api_url, entity_name and entity_namespace are not used. + # [outputs.sensu-go.entity] + # name = "server-01" + # namespace = "default" + + ## Metrics specification + ## Configure the tags for the metrics that are sent as part of the Sensu event + # [outputs.sensu-go.tags] + # source = "telegraf" + + ## Configure the handler(s) for processing the provided metrics + # [outputs.sensu-go.metrics] + # handlers = ["influxdb","elasticsearch"] +` + +// Description provides a description of the plugin +func (s *Sensu) Description() string { + return "Send aggregate metrics to Sensu Monitor" +} + +// SampleConfig provides a sample configuration for the plugin +func (s *Sensu) SampleConfig() string { + return sampleConfig +} + +func (s *Sensu) createClient() (*http.Client, error) { + tlsCfg, err := s.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + }, + Timeout: time.Duration(s.Timeout), + } + + return client, nil +} + +func (s *Sensu) Connect() error { + err := s.setEndpointUrl() + if err != nil { + return err + } + + err = s.setEntity() + if err != nil { + return err + } + + client, err := s.createClient() + if err != nil { + return err + } + + s.client = client + + return nil +} + +func (s *Sensu) Close() error { + s.client.CloseIdleConnections() + return nil +} + +func (s *Sensu) Write(metrics []telegraf.Metric) error { + var points []*OutputMetric + for _, metric := range metrics { + // Add tags from config to each metric point + tagList := make([]*OutputTag, 0, len(s.Tags)+len(metric.TagList())) + for name, value := range s.Tags { + tag := &OutputTag{ + Name: name, + Value: value, + } + tagList = append(tagList, tag) + } + for _, tagSet := range metric.TagList() { + tag := &OutputTag{ + Name: tagSet.Key, + Value: tagSet.Value, + } + tagList = append(tagList, tag) + } + + // Get all valid numeric values, convert to float64 + for _, fieldSet := range metric.FieldList() { + key := fieldSet.Key + value := getFloat(fieldSet.Value) + // JSON does not support these special values + if math.IsInf(value, 1) { + s.Log.Debugf("metric %s returned positive infinity, setting value to %f", key, math.MaxFloat64) + value = math.MaxFloat64 + } + if math.IsInf(value, -1) { + s.Log.Debugf("metric %s returned negative infinity, setting value to %f", key, -math.MaxFloat64) + value = -math.MaxFloat64 + } + if math.IsNaN(value) { + s.Log.Debugf("metric %s returned as non a number, skipping", key) + continue + } + + point := &OutputMetric{ + Name: metric.Name() + "." + key, + Tags: tagList, + Timestamp: metric.Time().Unix(), + Value: value, + } + points = append(points, point) + } + } + + reqBody, err := s.encodeToJson(points) + if err != nil { + return err + } + + return s.write(reqBody) +} + +func (s *Sensu) write(reqBody []byte) error { + var reqBodyBuffer io.Reader = bytes.NewBuffer(reqBody) + method := http.MethodPost + + if s.ContentEncoding == "gzip" { + rc, err := internal.CompressWithGzip(reqBodyBuffer) + if err != nil { + return err + } + defer rc.Close() + reqBodyBuffer = rc + } + + req, err := http.NewRequest(method, s.EndpointUrl, reqBodyBuffer) + if err != nil { + return err + } + + req.Header.Set("User-Agent", internal.ProductToken()) + + req.Header.Set("Content-Type", defaultContentType) + if s.ContentEncoding == "gzip" { + req.Header.Set("Content-Encoding", "gzip") + } + + if s.ApiKey != nil { + req.Header.Set("Authorization", "Key "+*s.ApiKey) + } + + resp, err := s.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusCreated { + bodyData, err := ioutil.ReadAll(resp.Body) + if err != nil { + s.Log.Debugf("Couldn't read response body: %v", err) + } + s.Log.Debugf("Failed to write, response: %v", string(bodyData)) + if resp.StatusCode < 400 || resp.StatusCode > 499 { + return fmt.Errorf("when writing to [%s] received status code: %d", s.EndpointUrl, resp.StatusCode) + } + } + + return nil +} + +// Resolves the event write endpoint +func (s *Sensu) setEndpointUrl() error { + var ( + endpointUrl string + path_suffix string + ) + + if s.BackendApiUrl != nil { + endpointUrl = *s.BackendApiUrl + namespace := "default" + if s.Entity != nil && s.Entity.Namespace != nil { + namespace = *s.Entity.Namespace + } + path_suffix = "/api/core/v2/namespaces/" + namespace + "/events" + } else if s.AgentApiUrl != nil { + endpointUrl = *s.AgentApiUrl + path_suffix = "/events" + } + + if len(endpointUrl) == 0 { + s.Log.Debugf("no backend or agent API URL provided, falling back to default agent API URL %s", defaultUrl) + endpointUrl = defaultUrl + path_suffix = "/events" + } + + u, err := url.Parse(endpointUrl) + if err != nil { + return err + } + + u.Path = path.Join(u.Path, path_suffix) + s.EndpointUrl = u.String() + + return nil +} + +func (s *Sensu) Init() error { + if len(s.ContentEncoding) != 0 { + validEncoding := []string{"identity", "gzip"} + if !choice.Contains(s.ContentEncoding, validEncoding) { + return fmt.Errorf("Unsupported content_encoding [%q] specified", s.ContentEncoding) + } + } + + if s.BackendApiUrl != nil && s.ApiKey == nil { + return fmt.Errorf("backend_api_url [%q] specified, but no API Key provided", *s.BackendApiUrl) + } + + return nil +} + +func init() { + outputs.Add("sensu-go", func() telegraf.Output { + // Default configuration values + + // make a string from the defaultUrl const + agentApiUrl := defaultUrl + + return &Sensu{ + AgentApiUrl: &agentApiUrl, + Timeout: config.Duration(defaultClientTimeout), + ContentEncoding: "identity", + } + }) +} + +func (s *Sensu) encodeToJson(metricPoints []*OutputMetric) ([]byte, error) { + timestamp := time.Now().Unix() + + check, err := s.getCheck(metricPoints) + if err != nil { + return []byte{}, err + } + + output, err := json.Marshal(&OutputEvent{ + Entity: s.OutEntity, + Check: check, + Metrics: &OutputMetrics{ + Handlers: s.getHandlers(), + Metrics: metricPoints, + }, + Timestamp: timestamp, + }) + + return output, err +} + +// Constructs the entity payload +// Throws when no entity name is provided and fails resolve to hostname +func (s *Sensu) setEntity() error { + if s.BackendApiUrl != nil { + var entityName string + if s.Entity != nil && s.Entity.Name != nil { + entityName = *s.Entity.Name + } else { + defaultHostname, err := os.Hostname() + if err != nil { + return fmt.Errorf("resolving hostname failed: %v", err) + } + entityName = defaultHostname + } + + s.OutEntity = &OutputEntity{ + Metadata: &OutputMetadata{ + Name: entityName, + }, + } + return nil + } + s.OutEntity = &OutputEntity{} + return nil +} + +// Constructs the check payload +// Throws if check name is not provided +func (s *Sensu) getCheck(metricPoints []*OutputMetric) (*OutputCheck, error) { + count := len(metricPoints) + + if s.Check == nil || s.Check.Name == nil { + return &OutputCheck{}, fmt.Errorf("missing check name") + } + + return &OutputCheck{ + Metadata: &OutputMetadata{ + Name: *s.Check.Name, + }, + Status: 0, // Always OK + Issued: time.Now().Unix(), + Output: "Telegraf agent processed " + strconv.Itoa(count) + " metrics", + OutputMetricHandlers: s.getHandlers(), + }, nil +} + +func (s *Sensu) getHandlers() []string { + if s.Metrics == nil || s.Metrics.Handlers == nil { + return []string{} + } + return s.Metrics.Handlers +} + +func getFloat(unk interface{}) float64 { + switch i := unk.(type) { + case float64: + return i + case float32: + return float64(i) + case int64: + return float64(i) + case int32: + return float64(i) + case int: + return float64(i) + case uint64: + return float64(i) + case uint32: + return float64(i) + case uint: + return float64(i) + default: + return math.NaN() + } +} diff --git a/plugins/outputs/sensu/sensu_test.go b/plugins/outputs/sensu/sensu_test.go new file mode 100644 index 0000000000000..4184a9976fa89 --- /dev/null +++ b/plugins/outputs/sensu/sensu_test.go @@ -0,0 +1,210 @@ +package sensu + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "math" + "net/http" + "net/http/httptest" + "testing" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/testutil" + corev2 "github.com/sensu/sensu-go/api/core/v2" + "github.com/stretchr/testify/require" +) + +func TestResolveEventEndpointUrl(t *testing.T) { + agentApiUrl := "http://127.0.0.1:3031" + backendApiUrl := "http://127.0.0.1:8080" + entityNamespace := "test-namespace" + emptyString := "" + tests := []struct { + name string + plugin *Sensu + expectedEndpointUrl string + }{ + { + name: "agent event endpoint", + plugin: &Sensu{ + AgentApiUrl: &agentApiUrl, + Log: testutil.Logger{}, + }, + expectedEndpointUrl: "http://127.0.0.1:3031/events", + }, + { + name: "backend event endpoint with default namespace", + plugin: &Sensu{ + AgentApiUrl: &agentApiUrl, + BackendApiUrl: &backendApiUrl, + Log: testutil.Logger{}, + }, + expectedEndpointUrl: "http://127.0.0.1:8080/api/core/v2/namespaces/default/events", + }, + { + name: "backend event endpoint with namespace declared", + plugin: &Sensu{ + AgentApiUrl: &agentApiUrl, + BackendApiUrl: &backendApiUrl, + Entity: &SensuEntity{ + Namespace: &entityNamespace, + }, + Log: testutil.Logger{}, + }, + expectedEndpointUrl: "http://127.0.0.1:8080/api/core/v2/namespaces/test-namespace/events", + }, + { + name: "agent event endpoint due to empty AgentApiUrl", + plugin: &Sensu{ + AgentApiUrl: &emptyString, + Log: testutil.Logger{}, + }, + expectedEndpointUrl: "http://127.0.0.1:3031/events", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.plugin.setEndpointUrl() + require.Equal(t, err, error(nil)) + require.Equal(t, tt.expectedEndpointUrl, tt.plugin.EndpointUrl) + }) + } +} + +func TestConnectAndWrite(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + testUrl := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) + testApiKey := "a0b1c2d3-e4f5-g6h7-i8j9-k0l1m2n3o4p5" + testCheck := "telegraf" + testEntity := "entity1" + testNamespace := "default" + testHandler := "influxdb" + testTagName := "myTagName" + testTagValue := "myTagValue" + expectedAuthHeader := fmt.Sprintf("Key %s", testApiKey) + expectedUrl := fmt.Sprintf("/api/core/v2/namespaces/%s/events", testNamespace) + expectedPointName := "cpu" + expectedPointValue := float64(42) + + plugin := &Sensu{ + AgentApiUrl: nil, + BackendApiUrl: &testUrl, + ApiKey: &testApiKey, + Check: &SensuCheck{ + Name: &testCheck, + }, + Entity: &SensuEntity{ + Name: &testEntity, + Namespace: &testNamespace, + }, + Metrics: &SensuMetrics{ + Handlers: []string{testHandler}, + }, + Tags: map[string]string{testTagName: testTagValue}, + Log: testutil.Logger{}, + } + + t.Run("connect", func(t *testing.T) { + err := plugin.Connect() + require.NoError(t, err) + }) + + t.Run("write", func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, expectedUrl, r.URL.String()) + require.Equal(t, expectedAuthHeader, (r.Header.Get("Authorization"))) + // let's make sure what we received is a valid Sensu event that contains all of the expected data + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + receivedEvent := &corev2.Event{} + err = json.Unmarshal(body, receivedEvent) + require.NoError(t, err) + require.Equal(t, testCheck, receivedEvent.Check.Name) + require.Equal(t, testEntity, receivedEvent.Entity.Name) + require.NotEmpty(t, receivedEvent.Metrics) + require.Equal(t, true, choice.Contains(testHandler, receivedEvent.Metrics.Handlers)) + require.NotEmpty(t, receivedEvent.Metrics.Points) + pointFound := false + tagFound := false + for _, p := range receivedEvent.Metrics.Points { + if p.Name == expectedPointName+".value" && p.Value == expectedPointValue { + pointFound = true + require.NotEmpty(t, p.Tags) + for _, t := range p.Tags { + if t.Name == testTagName && t.Value == testTagValue { + tagFound = true + } + } + } + } + require.Equal(t, true, pointFound) + require.Equal(t, true, tagFound) + w.WriteHeader(http.StatusCreated) + }) + err := plugin.Write([]telegraf.Metric{testutil.TestMetric(expectedPointValue, expectedPointName)}) + require.NoError(t, err) + }) +} + +func TestGetFloat(t *testing.T) { + tests := []struct { + name string + value interface{} + expectedReturn float64 + }{ + { + name: "getfloat with float64", + value: float64(42), + expectedReturn: 42, + }, + { + name: "getfloat with float32", + value: float32(42), + expectedReturn: 42, + }, + { + name: "getfloat with int64", + value: int64(42), + expectedReturn: 42, + }, + { + name: "getfloat with int32", + value: int32(42), + expectedReturn: 42, + }, + { + name: "getfloat with int", + value: int(42), + expectedReturn: 42, + }, + { + name: "getfloat with uint64", + value: uint64(42), + expectedReturn: 42, + }, + { + name: "getfloat with uint32", + value: uint32(42), + expectedReturn: 42, + }, + { + name: "getfloat with uint", + value: uint(42), + expectedReturn: 42, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.expectedReturn, getFloat(tt.value)) + }) + } + // Since math.NaN() == math.NaN() returns false + t.Run("getfloat NaN special case", func(t *testing.T) { + f := getFloat("42") + require.True(t, math.IsNaN(f)) + }) +} From 786dca2d5efbfd178025df9a216684ad0ebb2605 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20=C3=81lvarez?= <1671935+kir4h@users.noreply.github.com> Date: Wed, 3 Mar 2021 20:02:04 +0100 Subject: [PATCH 255/761] [plugins/input/docker] Make perdevice affect also cpu and add class granularity through perdevice_include/total_include (#7312) --- plugins/inputs/docker/README.md | 25 ++- plugins/inputs/docker/docker.go | 226 ++++++++++++++++-------- plugins/inputs/docker/docker_test.go | 249 ++++++++++++++++++++++++++- testutil/accumulator.go | 1 - testutil/testutil.go | 7 + 5 files changed, 428 insertions(+), 80 deletions(-) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 95394c94e9c44..1a8aca6ae924f 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -43,12 +43,29 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/) ## Timeout for docker list, info, and stats commands timeout = "5s" - ## Whether to report for each container per-device blkio (8:0, 8:1...) and - ## network (eth0, eth1, ...) stats or not + ## Whether to report for each container per-device blkio (8:0, 8:1...), + ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. + ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting + ## is honored. perdevice = true - - ## Whether to report for each container total blkio and network stats or not + + ## Specifies for which classes a per-device metric should be issued + ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) + ## Please note that this setting has no effect if 'perdevice' is set to 'true' + # perdevice_include = ["cpu"] + + ## Whether to report for each container total blkio and network stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. + ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting + ## is honored. total = false + + ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. + ## Possible values are 'cpu', 'blkio' and 'network' + ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. + ## Please note that this setting has no effect if 'total' is set to 'false' + # total_include = ["cpu", "blkio", "network"] ## docker labels to include and exclude as tags. Globs accepted. ## Note that an empty array for both will include all labels as tags diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index f795d5b029be4..d3f4c23976cee 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -19,6 +19,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/internal/docker" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -31,12 +32,14 @@ type Docker struct { GatherServices bool `toml:"gather_services"` - Timeout internal.Duration - PerDevice bool `toml:"perdevice"` - Total bool `toml:"total"` - TagEnvironment []string `toml:"tag_env"` - LabelInclude []string `toml:"docker_label_include"` - LabelExclude []string `toml:"docker_label_exclude"` + Timeout internal.Duration + PerDevice bool `toml:"perdevice"` + PerDeviceInclude []string `toml:"perdevice_include"` + Total bool `toml:"total"` + TotalInclude []string `toml:"total_include"` + TagEnvironment []string `toml:"tag_env"` + LabelInclude []string `toml:"docker_label_include"` + LabelExclude []string `toml:"docker_label_exclude"` ContainerInclude []string `toml:"container_name_include"` ContainerExclude []string `toml:"container_name_exclude"` @@ -72,12 +75,21 @@ const ( PB = 1000 * TB defaultEndpoint = "unix:///var/run/docker.sock" + + perDeviceIncludeDeprecationWarning = "'perdevice' setting is set to 'true' so 'blkio' and 'network' metrics will" + + "be collected. Please set it to 'false' and use 'perdevice_include' instead to control this behaviour as " + + "'perdevice' will be deprecated" + + totalIncludeDeprecationWarning = "'total' setting is set to 'false' so 'blkio' and 'network' metrics will not be " + + "collected. Please set it to 'true' and use 'total_include' instead to control this behaviour as 'total' " + + "will be deprecated" ) var ( - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) - containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} - now = time.Now + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) + containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} + containerMetricClasses = []string{"cpu", "network", "blkio"} + now = time.Now ) var sampleConfig = ` @@ -110,13 +122,30 @@ var sampleConfig = ` ## Timeout for docker list, info, and stats commands timeout = "5s" - ## Whether to report for each container per-device blkio (8:0, 8:1...) and - ## network (eth0, eth1, ...) stats or not + ## Whether to report for each container per-device blkio (8:0, 8:1...), + ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. + ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting + ## is honored. perdevice = true - ## Whether to report for each container total blkio and network stats or not + ## Specifies for which classes a per-device metric should be issued + ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) + ## Please note that this setting has no effect if 'perdevice' is set to 'true' + # perdevice_include = ["cpu"] + + ## Whether to report for each container total blkio and network stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. + ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting + ## is honored. total = false + ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. + ## Possible values are 'cpu', 'blkio' and 'network' + ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. + ## Please note that this setting has no effect if 'total' is set to 'false' + # total_include = ["cpu", "blkio", "network"] + ## Which environment variables should we use as a tag ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] @@ -141,6 +170,41 @@ func (d *Docker) Description() string { return "Read metrics about docker containers" } +func (d *Docker) Init() error { + err := choice.CheckSlice(d.PerDeviceInclude, containerMetricClasses) + if err != nil { + return fmt.Errorf("error validating 'perdevice_include' setting : %v", err) + } + + err = choice.CheckSlice(d.TotalInclude, containerMetricClasses) + if err != nil { + return fmt.Errorf("error validating 'total_include' setting : %v", err) + } + + // Temporary logic needed for backwards compatibility until 'perdevice' setting is removed. + if d.PerDevice { + d.Log.Warn(perDeviceIncludeDeprecationWarning) + if !choice.Contains("network", d.PerDeviceInclude) { + d.PerDeviceInclude = append(d.PerDeviceInclude, "network") + } + if !choice.Contains("blkio", d.PerDeviceInclude) { + d.PerDeviceInclude = append(d.PerDeviceInclude, "blkio") + } + } + + // Temporary logic needed for backwards compatibility until 'total' setting is removed. + if !d.Total { + d.Log.Warn(totalIncludeDeprecationWarning) + if choice.Contains("cpu", d.TotalInclude) { + d.TotalInclude = []string{"cpu"} + } else { + d.TotalInclude = []string{} + } + } + + return nil +} + // Gather metrics from the docker server. func (d *Docker) Gather(acc telegraf.Accumulator) error { if d.client == nil { @@ -516,7 +580,7 @@ func (d *Docker) gatherContainerInspect( for _, envvar := range info.Config.Env { for _, configvar := range d.TagEnvironment { dockEnv := strings.SplitN(envvar, "=", 2) - //check for presence of tag in whitelist + // check for presence of tag in whitelist if len(dockEnv) == 2 && len(strings.TrimSpace(dockEnv[1])) != 0 && configvar == dockEnv[0] { tags[dockEnv[0]] = dockEnv[1] } @@ -563,7 +627,7 @@ func (d *Docker) gatherContainerInspect( } } - parseContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType) + parseContainerStats(v, acc, tags, container.ID, d.PerDeviceInclude, d.TotalInclude, daemonOSType) return nil } @@ -573,8 +637,8 @@ func parseContainerStats( acc telegraf.Accumulator, tags map[string]string, id string, - perDevice bool, - total bool, + perDeviceInclude []string, + totalInclude []string, daemonOSType string, ) { tm := stat.Read @@ -643,48 +707,52 @@ func parseContainerStats( acc.AddFields("docker_container_mem", memfields, tags, tm) - cpufields := map[string]interface{}{ - "usage_total": stat.CPUStats.CPUUsage.TotalUsage, - "usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode, - "usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode, - "usage_system": stat.CPUStats.SystemUsage, - "throttling_periods": stat.CPUStats.ThrottlingData.Periods, - "throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods, - "throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime, - "container_id": id, - } - - if daemonOSType != "windows" { - previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage - previousSystem := stat.PreCPUStats.SystemUsage - cpuPercent := CalculateCPUPercentUnix(previousCPU, previousSystem, stat) - cpufields["usage_percent"] = cpuPercent - } else { - cpuPercent := calculateCPUPercentWindows(stat) - cpufields["usage_percent"] = cpuPercent - } + if choice.Contains("cpu", totalInclude) { + cpufields := map[string]interface{}{ + "usage_total": stat.CPUStats.CPUUsage.TotalUsage, + "usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode, + "usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode, + "usage_system": stat.CPUStats.SystemUsage, + "throttling_periods": stat.CPUStats.ThrottlingData.Periods, + "throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods, + "throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime, + "container_id": id, + } - cputags := copyTags(tags) - cputags["cpu"] = "cpu-total" - acc.AddFields("docker_container_cpu", cpufields, cputags, tm) + if daemonOSType != "windows" { + previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage + previousSystem := stat.PreCPUStats.SystemUsage + cpuPercent := CalculateCPUPercentUnix(previousCPU, previousSystem, stat) + cpufields["usage_percent"] = cpuPercent + } else { + cpuPercent := calculateCPUPercentWindows(stat) + cpufields["usage_percent"] = cpuPercent + } - // If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs - // (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400) - var percpuusage []uint64 - if stat.CPUStats.OnlineCPUs > 0 { - percpuusage = stat.CPUStats.CPUUsage.PercpuUsage[:stat.CPUStats.OnlineCPUs] - } else { - percpuusage = stat.CPUStats.CPUUsage.PercpuUsage + cputags := copyTags(tags) + cputags["cpu"] = "cpu-total" + acc.AddFields("docker_container_cpu", cpufields, cputags, tm) } - for i, percpu := range percpuusage { - percputags := copyTags(tags) - percputags["cpu"] = fmt.Sprintf("cpu%d", i) - fields := map[string]interface{}{ - "usage_total": percpu, - "container_id": id, + if choice.Contains("cpu", perDeviceInclude) { + // If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs + // (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400) + var percpuusage []uint64 + if stat.CPUStats.OnlineCPUs > 0 { + percpuusage = stat.CPUStats.CPUUsage.PercpuUsage[:stat.CPUStats.OnlineCPUs] + } else { + percpuusage = stat.CPUStats.CPUUsage.PercpuUsage + } + + for i, percpu := range percpuusage { + percputags := copyTags(tags) + percputags["cpu"] = fmt.Sprintf("cpu%d", i) + fields := map[string]interface{}{ + "usage_total": percpu, + "container_id": id, + } + acc.AddFields("docker_container_cpu", fields, percputags, tm) } - acc.AddFields("docker_container_cpu", fields, percputags, tm) } totalNetworkStatMap := make(map[string]interface{}) @@ -701,12 +769,12 @@ func parseContainerStats( "container_id": id, } // Create a new network tag dictionary for the "network" tag - if perDevice { + if choice.Contains("network", perDeviceInclude) { nettags := copyTags(tags) nettags["network"] = network acc.AddFields("docker_container_net", netfields, nettags, tm) } - if total { + if choice.Contains("network", totalInclude) { for field, value := range netfields { if field == "container_id" { continue @@ -733,27 +801,21 @@ func parseContainerStats( } // totalNetworkStatMap could be empty if container is running with --net=host. - if total && len(totalNetworkStatMap) != 0 { + if choice.Contains("network", totalInclude) && len(totalNetworkStatMap) != 0 { nettags := copyTags(tags) nettags["network"] = "total" totalNetworkStatMap["container_id"] = id acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, tm) } - gatherBlockIOMetrics(stat, acc, tags, tm, id, perDevice, total) + perDeviceBlkio := choice.Contains("blkio", perDeviceInclude) + totalBlkio := choice.Contains("blkio", totalInclude) + + gatherBlockIOMetrics(stat, acc, tags, tm, id, perDeviceBlkio, totalBlkio) } -func gatherBlockIOMetrics( - stat *types.StatsJSON, - acc telegraf.Accumulator, - tags map[string]string, - tm time.Time, - id string, - perDevice bool, - total bool, -) { - blkioStats := stat.BlkioStats - // Make a map of devices to their block io stats +// Make a map of devices to their block io stats +func getDeviceStatMap(blkioStats types.BlkioStats) map[string]map[string]interface{} { deviceStatMap := make(map[string]map[string]interface{}) for _, metric := range blkioStats.IoServiceBytesRecursive { @@ -811,6 +873,20 @@ func gatherBlockIOMetrics( device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) deviceStatMap[device]["sectors_recursive"] = metric.Value } + return deviceStatMap +} + +func gatherBlockIOMetrics( + stat *types.StatsJSON, + acc telegraf.Accumulator, + tags map[string]string, + tm time.Time, + id string, + perDevice bool, + total bool, +) { + blkioStats := stat.BlkioStats + deviceStatMap := getDeviceStatMap(blkioStats) totalStatMap := make(map[string]interface{}) for device, fields := range deviceStatMap { @@ -942,12 +1018,14 @@ func (d *Docker) getNewClient() (Client, error) { func init() { inputs.Add("docker", func() telegraf.Input { return &Docker{ - PerDevice: true, - Timeout: internal.Duration{Duration: time.Second * 5}, - Endpoint: defaultEndpoint, - newEnvClient: NewEnvClient, - newClient: NewClient, - filtersCreated: false, + PerDevice: true, + PerDeviceInclude: []string{"cpu"}, + TotalInclude: []string{"cpu", "blkio", "network"}, + Timeout: internal.Duration{Duration: time.Second * 5}, + Endpoint: defaultEndpoint, + newEnvClient: NewEnvClient, + newClient: NewClient, + filtersCreated: false, } }) } diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index d8700217c307d..3272abec066f9 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -4,6 +4,7 @@ import ( "context" "crypto/tls" "io/ioutil" + "reflect" "sort" "strings" "testing" @@ -12,6 +13,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -110,7 +112,7 @@ func TestDockerGatherContainerStats(t *testing.T) { "container_image": "redis/image", } - parseContainerStats(stats, &acc, tags, "123456789", true, true, "linux") + parseContainerStats(stats, &acc, tags, "123456789", containerMetricClasses, containerMetricClasses, "linux") // test docker_container_net measurement netfields := map[string]interface{}{ @@ -396,6 +398,8 @@ func TestContainerLabels(t *testing.T) { newClient: newClientFunc, LabelInclude: tt.include, LabelExclude: tt.exclude, + Total: true, + TotalInclude: []string{"cpu"}, } err := d.Gather(&acc) @@ -751,6 +755,9 @@ func TestDockerGatherInfo(t *testing.T) { newClient: newClient, TagEnvironment: []string{"ENVVAR1", "ENVVAR2", "ENVVAR3", "ENVVAR5", "ENVVAR6", "ENVVAR7", "ENVVAR8", "ENVVAR9"}, + PerDeviceInclude: []string{"cpu", "network", "blkio"}, + Total: true, + TotalInclude: []string{""}, } err := acc.GatherError(d.Gather) @@ -1117,3 +1124,243 @@ func TestHostnameFromID(t *testing.T) { } } + +func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) { + type args struct { + stat *types.StatsJSON + acc telegraf.Accumulator + tags map[string]string + id string + perDeviceInclude []string + totalInclude []string + daemonOSType string + } + + var ( + testDate = time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC) + metricCpuTotal = testutil.MustMetric( + "docker_container_cpu", + map[string]string{ + "cpu": "cpu-total", + }, + map[string]interface{}{}, + testDate) + + metricCpu0 = testutil.MustMetric( + "docker_container_cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{}, + testDate) + metricCpu1 = testutil.MustMetric( + "docker_container_cpu", + map[string]string{ + "cpu": "cpu1", + }, + map[string]interface{}{}, + testDate) + + metricNetworkTotal = testutil.MustMetric( + "docker_container_net", + map[string]string{ + "network": "total", + }, + map[string]interface{}{}, + testDate) + + metricNetworkEth0 = testutil.MustMetric( + "docker_container_net", + map[string]string{ + "network": "eth0", + }, + map[string]interface{}{}, + testDate) + + metricNetworkEth1 = testutil.MustMetric( + "docker_container_net", + map[string]string{ + "network": "eth0", + }, + map[string]interface{}{}, + testDate) + metricBlkioTotal = testutil.MustMetric( + "docker_container_blkio", + map[string]string{ + "device": "total", + }, + map[string]interface{}{}, + testDate) + metricBlkio6_0 = testutil.MustMetric( + "docker_container_blkio", + map[string]string{ + "device": "6:0", + }, + map[string]interface{}{}, + testDate) + metricBlkio6_1 = testutil.MustMetric( + "docker_container_blkio", + map[string]string{ + "device": "6:1", + }, + map[string]interface{}{}, + testDate) + ) + stats := testStats() + tests := []struct { + name string + args args + expected []telegraf.Metric + }{ + { + name: "Per device and total metrics enabled", + args: args{ + stat: stats, + perDeviceInclude: containerMetricClasses, + totalInclude: containerMetricClasses, + }, + expected: []telegraf.Metric{ + metricCpuTotal, metricCpu0, metricCpu1, + metricNetworkTotal, metricNetworkEth0, metricNetworkEth1, + metricBlkioTotal, metricBlkio6_0, metricBlkio6_1, + }, + }, + { + name: "Per device metrics enabled", + args: args{ + stat: stats, + perDeviceInclude: containerMetricClasses, + totalInclude: []string{}, + }, + expected: []telegraf.Metric{ + metricCpu0, metricCpu1, + metricNetworkEth0, metricNetworkEth1, + metricBlkio6_0, metricBlkio6_1, + }, + }, + { + name: "Total metrics enabled", + args: args{ + stat: stats, + perDeviceInclude: []string{}, + totalInclude: containerMetricClasses, + }, + expected: []telegraf.Metric{metricCpuTotal, metricNetworkTotal, metricBlkioTotal}, + }, + { + name: "Per device and total metrics disabled", + args: args{ + stat: stats, + perDeviceInclude: []string{}, + totalInclude: []string{}, + }, + expected: []telegraf.Metric{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + parseContainerStats(tt.args.stat, &acc, tt.args.tags, tt.args.id, tt.args.perDeviceInclude, + tt.args.totalInclude, tt.args.daemonOSType) + + actual := FilterMetrics(acc.GetTelegrafMetrics(), func(m telegraf.Metric) bool { + return choice.Contains(m.Name(), + []string{"docker_container_cpu", "docker_container_net", "docker_container_blkio"}) + }) + testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.OnlyTags(), testutil.SortMetrics()) + + }) + } +} + +func TestDocker_Init(t *testing.T) { + type fields struct { + PerDevice bool + PerDeviceInclude []string + Total bool + TotalInclude []string + } + tests := []struct { + name string + fields fields + wantErr bool + wantPerDeviceInclude []string + wantTotalInclude []string + }{ + { + "Unsupported perdevice_include setting", + fields{ + PerDevice: false, + PerDeviceInclude: []string{"nonExistentClass"}, + Total: false, + TotalInclude: []string{"cpu"}, + }, + true, + []string{}, + []string{}, + }, + { + "Unsupported total_include setting", + fields{ + PerDevice: false, + PerDeviceInclude: []string{"cpu"}, + Total: false, + TotalInclude: []string{"nonExistentClass"}, + }, + true, + []string{}, + []string{}, + }, + { + "PerDevice true adds network and blkio", + fields{ + PerDevice: true, + PerDeviceInclude: []string{"cpu"}, + Total: true, + TotalInclude: []string{"cpu"}, + }, + false, + []string{"cpu", "network", "blkio"}, + []string{"cpu"}, + }, + { + "Total false removes network and blkio", + fields{ + PerDevice: false, + PerDeviceInclude: []string{"cpu"}, + Total: false, + TotalInclude: []string{"cpu", "network", "blkio"}, + }, + false, + []string{"cpu"}, + []string{"cpu"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &Docker{ + Log: testutil.Logger{}, + PerDevice: tt.fields.PerDevice, + PerDeviceInclude: tt.fields.PerDeviceInclude, + Total: tt.fields.Total, + TotalInclude: tt.fields.TotalInclude, + } + err := d.Init() + if (err != nil) != tt.wantErr { + t.Errorf("Init() error = %v, wantErr %v", err, tt.wantErr) + } + + if err == nil { + if !reflect.DeepEqual(d.PerDeviceInclude, tt.wantPerDeviceInclude) { + t.Errorf("Perdevice include: got '%v', want '%v'", d.PerDeviceInclude, tt.wantPerDeviceInclude) + } + + if !reflect.DeepEqual(d.TotalInclude, tt.wantTotalInclude) { + t.Errorf("Total include: got '%v', want '%v'", d.TotalInclude, tt.wantTotalInclude) + } + } + + }) + } +} diff --git a/testutil/accumulator.go b/testutil/accumulator.go index c02f5092c9dd8..f1b6469b79ec8 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -397,7 +397,6 @@ func (a *Accumulator) AssertDoesNotContainsTaggedFields( } return } - func (a *Accumulator) AssertContainsFields( t *testing.T, measurement string, diff --git a/testutil/testutil.go b/testutil/testutil.go index abcc27cba6ebb..f2d95560d7fd6 100644 --- a/testutil/testutil.go +++ b/testutil/testutil.go @@ -6,6 +6,7 @@ import ( "os" "time" + "github.com/google/go-cmp/cmp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" ) @@ -63,3 +64,9 @@ func TestMetric(value interface{}, name ...string) telegraf.Metric { ) return pt } + +// OnlyTags returns an option for keeping only "Tags" for a given Metric +func OnlyTags() cmp.Option { + f := func(p cmp.Path) bool { return p.String() != "Tags" && p.String() != "" } + return cmp.FilterPath(f, cmp.Ignore()) +} From ee09a39de5ee1d885033beb93d5f576f51b01507 Mon Sep 17 00:00:00 2001 From: oofdog <46097282+oofdog@users.noreply.github.com> Date: Wed, 3 Mar 2021 14:05:14 -0500 Subject: [PATCH 256/761] Add CSGO SRCDS input plugin (#8525) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 1 + go.sum | 2 + plugins/inputs/all/all.go | 1 + plugins/inputs/csgo/README.md | 37 ++++++ plugins/inputs/csgo/csgo.go | 192 +++++++++++++++++++++++++++++++ plugins/inputs/csgo/csgo_test.go | 54 +++++++++ 7 files changed, 288 insertions(+) create mode 100644 plugins/inputs/csgo/README.md create mode 100644 plugins/inputs/csgo/csgo.go create mode 100644 plugins/inputs/csgo/csgo_test.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 657c632767075..ee4cbb665870a 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -99,6 +99,7 @@ following works: - github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE) - github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE) - github.com/jaegertracing/jaeger [Apache License 2.0](https://github.com/jaegertracing/jaeger/blob/master/LICENSE) +- github.com/james4k/rcon [MIT License](https://github.com/james4k/rcon/blob/master/LICENSE) - github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE) - github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) - github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE) diff --git a/go.mod b/go.mod index fce100462629e..d8e19b95a8cc1 100644 --- a/go.mod +++ b/go.mod @@ -85,6 +85,7 @@ require ( github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect github.com/jackc/pgx v3.6.0+incompatible + github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 diff --git a/go.sum b/go.sum index 6c48bcd5c6861..7085775b0f186 100644 --- a/go.sum +++ b/go.sum @@ -406,6 +406,8 @@ github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGU github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q= github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPIc28Jel37LGREut2fpV+ObkwJ0= +github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQDsAXYfUuF/Z0rtK5eT8x9D6Pi7S3PjXAg= github.com/jaegertracing/jaeger v1.15.1 h1:7QzNAXq+4ko9GtCjozDNAp2uonoABu+B2Rk94hjQcp4= github.com/jaegertracing/jaeger v1.15.1/go.mod h1:LUWPSnzNPGRubM8pk0inANGitpiMOOxihXx0+53llXI= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index dd3474d25da28..595be84cabb87 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -29,6 +29,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/couchbase" _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" _ "github.com/influxdata/telegraf/plugins/inputs/cpu" + _ "github.com/influxdata/telegraf/plugins/inputs/csgo" _ "github.com/influxdata/telegraf/plugins/inputs/dcos" _ "github.com/influxdata/telegraf/plugins/inputs/directory_monitor" _ "github.com/influxdata/telegraf/plugins/inputs/disk" diff --git a/plugins/inputs/csgo/README.md b/plugins/inputs/csgo/README.md new file mode 100644 index 0000000000000..ad80030065153 --- /dev/null +++ b/plugins/inputs/csgo/README.md @@ -0,0 +1,37 @@ +# CSGO Input Plugin + +The `csgo` plugin gather metrics from CSGO servers. + +#### Configuration +```toml +[[inputs.csgo]] + ## Specify servers using the following format: + ## servers = [ + ## ["ip1:port1", "rcon_password1"], + ## ["ip2:port2", "rcon_password2"], + ## ] + # + ## If no servers are specified, no data will be collected + servers = [] +``` + +### Metrics + +The plugin retrieves the output of the `stats` command that is executed via rcon. + +If no servers are specified, no data will be collected + +- csgo + - tags: + - host + - fields: + - cpu (float) + - net_in (float) + - net_out (float) + - uptime_minutes (float) + - maps (float) + - fps (float) + - players (float) + - sv_ms (float) + - variance_ms (float) + - tick_ms (float) diff --git a/plugins/inputs/csgo/csgo.go b/plugins/inputs/csgo/csgo.go new file mode 100644 index 0000000000000..fe82962669ced --- /dev/null +++ b/plugins/inputs/csgo/csgo.go @@ -0,0 +1,192 @@ +package csgo + +import ( + "encoding/json" + "errors" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/james4k/rcon" +) + +type statsData struct { + CPU float64 `json:"cpu"` + NetIn float64 `json:"net_in"` + NetOut float64 `json:"net_out"` + UptimeMinutes float64 `json:"uptime_minutes"` + Maps float64 `json:"maps"` + FPS float64 `json:"fps"` + Players float64 `json:"players"` + Sim float64 `json:"sv_ms"` + Variance float64 `json:"variance_ms"` + Tick float64 `json:"tick_ms"` +} + +type CSGO struct { + Servers [][]string `toml:"servers"` +} + +func (_ *CSGO) Description() string { + return "Fetch metrics from a CSGO SRCDS" +} + +var sampleConfig = ` + ## Specify servers using the following format: + ## servers = [ + ## ["ip1:port1", "rcon_password1"], + ## ["ip2:port2", "rcon_password2"], + ## ] + # + ## If no servers are specified, no data will be collected + servers = [] +` + +func (_ *CSGO) SampleConfig() string { + return sampleConfig +} + +func (s *CSGO) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + // Loop through each server and collect metrics + for _, server := range s.Servers { + wg.Add(1) + go func(ss []string) { + defer wg.Done() + acc.AddError(s.gatherServer(ss, requestServer, acc)) + }(server) + } + + wg.Wait() + return nil +} + +func init() { + inputs.Add("csgo", func() telegraf.Input { + return &CSGO{} + }) +} + +func (s *CSGO) gatherServer( + server []string, + request func(string, string) (string, error), + acc telegraf.Accumulator) error { + + if len(server) != 2 { + return errors.New("incorrect server config") + } + + url, rconPw := server[0], server[1] + resp, err := request(url, rconPw) + if err != nil { + return err + } + + rows := strings.Split(resp, "\n") + if len(rows) < 2 { + return errors.New("bad response") + } + + fields := strings.Fields(rows[1]) + if len(fields) != 10 { + return errors.New("bad response") + } + + cpu, err := strconv.ParseFloat(fields[0], 32) + if err != nil { + return err + } + netIn, err := strconv.ParseFloat(fields[1], 64) + if err != nil { + return err + } + netOut, err := strconv.ParseFloat(fields[2], 64) + if err != nil { + return err + } + uptimeMinutes, err := strconv.ParseFloat(fields[3], 64) + if err != nil { + return err + } + maps, err := strconv.ParseFloat(fields[4], 64) + if err != nil { + return err + } + fps, err := strconv.ParseFloat(fields[5], 64) + if err != nil { + return err + } + players, err := strconv.ParseFloat(fields[6], 64) + if err != nil { + return err + } + svms, err := strconv.ParseFloat(fields[7], 64) + if err != nil { + return err + } + msVar, err := strconv.ParseFloat(fields[8], 64) + if err != nil { + return err + } + tick, err := strconv.ParseFloat(fields[9], 64) + if err != nil { + return err + } + + now := time.Now() + stats := statsData{ + CPU: cpu, + NetIn: netIn, + NetOut: netOut, + UptimeMinutes: uptimeMinutes, + Maps: maps, + FPS: fps, + Players: players, + Sim: svms, + Variance: msVar, + Tick: tick, + } + + tags := map[string]string{ + "host": url, + } + + var statsMap map[string]interface{} + marshalled, err := json.Marshal(stats) + if err != nil { + return err + } + err = json.Unmarshal(marshalled, &statsMap) + if err != nil { + return err + } + + acc.AddGauge("csgo", statsMap, tags, now) + return nil +} + +func requestServer(url string, rconPw string) (string, error) { + remoteConsole, err := rcon.Dial(url, rconPw) + if err != nil { + return "", err + } + defer remoteConsole.Close() + + reqId, err := remoteConsole.Write("stats") + if err != nil { + return "", err + } + + resp, respReqId, err := remoteConsole.Read() + if err != nil { + return "", err + } else if reqId != respReqId { + return "", errors.New("response/request mismatch") + } else { + return resp, nil + } +} diff --git a/plugins/inputs/csgo/csgo_test.go b/plugins/inputs/csgo/csgo_test.go new file mode 100644 index 0000000000000..311e4b2b69bf0 --- /dev/null +++ b/plugins/inputs/csgo/csgo_test.go @@ -0,0 +1,54 @@ +package csgo + +import ( + "github.com/influxdata/telegraf/testutil" + "testing" + + "github.com/stretchr/testify/assert" +) + +const testInput = `CPU NetIn NetOut Uptime Maps FPS Players Svms +-ms ~tick +10.0 1.2 3.4 100 1 120.20 15 5.23 0.01 0.02` + +var ( + expectedOutput = statsData{ + 10.0, 1.2, 3.4, 100.0, 1, 120.20, 15, 5.23, 0.01, 0.02, + } +) + +func TestCPUStats(t *testing.T) { + c := NewCSGOStats() + var acc testutil.Accumulator + err := c.gatherServer(c.Servers[0], requestMock, &acc) + if err != nil { + t.Error(err) + } + + if !acc.HasMeasurement("csgo") { + t.Errorf("acc.HasMeasurement: expected csgo") + } + + assert.Equal(t, "1.2.3.4:1234", acc.Metrics[0].Tags["host"]) + assert.Equal(t, expectedOutput.CPU, acc.Metrics[0].Fields["cpu"]) + assert.Equal(t, expectedOutput.NetIn, acc.Metrics[0].Fields["net_in"]) + assert.Equal(t, expectedOutput.NetOut, acc.Metrics[0].Fields["net_out"]) + assert.Equal(t, expectedOutput.UptimeMinutes, acc.Metrics[0].Fields["uptime_minutes"]) + assert.Equal(t, expectedOutput.Maps, acc.Metrics[0].Fields["maps"]) + assert.Equal(t, expectedOutput.FPS, acc.Metrics[0].Fields["fps"]) + assert.Equal(t, expectedOutput.Players, acc.Metrics[0].Fields["players"]) + assert.Equal(t, expectedOutput.Sim, acc.Metrics[0].Fields["sv_ms"]) + assert.Equal(t, expectedOutput.Variance, acc.Metrics[0].Fields["variance_ms"]) + assert.Equal(t, expectedOutput.Tick, acc.Metrics[0].Fields["tick_ms"]) +} + +func requestMock(_ string, _ string) (string, error) { + return testInput, nil +} + +func NewCSGOStats() *CSGO { + return &CSGO{ + Servers: [][]string{ + {"1.2.3.4:1234", "password"}, + }, + } +} From 927d34f66c7e5488b737b882277a1ed867d1508a Mon Sep 17 00:00:00 2001 From: Karsten Schnitter Date: Wed, 3 Mar 2021 20:33:21 +0100 Subject: [PATCH 257/761] Add Derivative Aggregator Plugin (#3762) Calculate derivatives based on time or fields. --- plugins/aggregators/all/all.go | 1 + plugins/aggregators/derivative/README.md | 166 +++++++ plugins/aggregators/derivative/derivative.go | 224 ++++++++++ .../aggregators/derivative/derivative_test.go | 404 ++++++++++++++++++ 4 files changed, 795 insertions(+) create mode 100644 plugins/aggregators/derivative/README.md create mode 100644 plugins/aggregators/derivative/derivative.go create mode 100644 plugins/aggregators/derivative/derivative_test.go diff --git a/plugins/aggregators/all/all.go b/plugins/aggregators/all/all.go index 4128d712bc994..20d5b5ea2e482 100644 --- a/plugins/aggregators/all/all.go +++ b/plugins/aggregators/all/all.go @@ -3,6 +3,7 @@ package all import ( //Blank imports for plugins to register themselves _ "github.com/influxdata/telegraf/plugins/aggregators/basicstats" + _ "github.com/influxdata/telegraf/plugins/aggregators/derivative" _ "github.com/influxdata/telegraf/plugins/aggregators/final" _ "github.com/influxdata/telegraf/plugins/aggregators/histogram" _ "github.com/influxdata/telegraf/plugins/aggregators/merge" diff --git a/plugins/aggregators/derivative/README.md b/plugins/aggregators/derivative/README.md new file mode 100644 index 0000000000000..3ca29c36d4f49 --- /dev/null +++ b/plugins/aggregators/derivative/README.md @@ -0,0 +1,166 @@ +# Derivative Aggregator Plugin +The Derivative Aggregator Plugin estimates the derivative for all fields of the +aggregated metrics. + +### Time Derivatives + +In its default configuration it determines the first and last measurement of +the period. From these measurements the time difference in seconds is +calculated. This time difference is than used to divide the difference of each +field using the following formula: +``` + field_last - field_first +derivative = -------------------------- + time_difference +``` +For each field the derivative is emitted with a naming pattern +`_rate`. + +### Custom Derivation Variable + +The plugin supports to use a field of the aggregated measurements as derivation +variable in the denominator. This variable is assumed to be a monotonically +increasing value. In this feature the following formula is used: +``` + field_last - field_first +derivative = -------------------------------- + variable_last - variable_first +``` +**Make sure the specified variable is not filtered and exists in the metrics passed to this aggregator!** + +When using a custom derivation variable, you should change the `suffix` of the derivative name. +See the next section on [customizing the derivative name](#customize-the-derivative-name) for details. + +### Customize the Derivative Name + +The derivatives generated by the aggregator are named `_rate`, i.e. they are composed of the field name and a suffix `_rate`. +You can configure the suffix to be used by changing the `suffix` parameter. + +### Roll-Over to next Period + +Calculating the derivative for a period requires at least two distinct measurements during that period. +Whether those are available depends on the configuration of the aggregator `period` and the agent `interval`. +By default the last measurement is used as first measurement in the next +aggregation period. This enables a continuous calculation of the derivative. If +within the next period an earlier timestamp is encountered this measurement will +replace the roll-over metric. A main benefit of this roll-over is the ability to +cope with multiple "quiet" periods, where no new measurement is pushed to the +aggregator. The roll-over will take place at most `max_roll_over` times. + +#### Example of Roll-Over + +Let us assume we have an input plugin, that generates a measurement with a single metric "test" every 2 seconds. +Let this metric increase the first 10 seconds from 0.0 to 10.0 and then decrease the next 10 seconds form 10.0 to 0.0: + +| timestamp | value | +|-----------|-------| +| 0 | 0.0 | +| 2 | 2.0 | +| 4 | 4.0 | +| 6 | 6.0 | +| 8 | 8.0 | +| 10 | 10.0 | +| 12 | 8.0 | +| 14 | 6.0 | +| 16 | 4.0 | +| 18 | 2.0 | +| 20 | 0.0 | + +To avoid thinking about border values, we consider periods to be inclusive at the start but exclusive in the end. +Using `period = "10s"` and `max_roll_over = 0` we would get the following aggregates: + +| timestamp | value | aggregate | explanantion | +|-----------|-------|-----------|--------------| +| 0 | 0.0 | +| 2 | 2.0 | +| 4 | 4.0 | +| 6 | 6.0 | +| 8 | 8.0 | +||| 1.0 | (8.0 - 0.0) / (8 - 0) | +| 10 | 10.0 | +| 12 | 8.0 | +| 14 | 6.0 | +| 16 | 4.0 | +| 18 | 2.0 | +||| -1.0 | (2.0 - 10.0) / (18 - 10) +| 20 | 0.0 | + +If we now decrease the period with `period = 2s`, no derivative could be calculated since there would only one measurement for each period. +The aggregator will emit the log messages `Same first and last event for "test", skipping.`. +This changes, if we use `max_roll_over = 1`, since now end measurements of a period are taking as start for the next period. + +| timestamp | value | aggregate | explanantion | +|-----------|-------|-----------|--------------| +| 0 | 0.0 | +| 2 | 2.0 | 1.0 | (2.0 - 0.0) / (2 - 0) | +| 4 | 4.0 | 1.0 | (4.0 - 2.0) / (4 - 2) | +| 6 | 6.0 | 1.0 | (6.0 - 4.0) / (6 - 4) | +| 8 | 8.0 | 1.0 | (8.0 - 6.0) / (8 - 6) | +| 10 | 10.0 | 1.0 | (10.0 - 8.0) / (10 - 8) | +| 12 | 8.0 | -1.0 | (8.0 - 10.0) / (12 - 10) | +| 14 | 6.0 | -1.0 | (6.0 - 8.0) / (14 - 12) | +| 16 | 4.0 | -1.0 | (4.0 - 6.0) / (16 - 14) | +| 18 | 2.0 | -1.0 | (2.0 - 4.0) / (18 - 16) | +| 20 | 0.0 | -1.0 | (0.0 - 2.0) / (20 - 18) | + +The default `max_roll_over = 10` allows for multiple periods without measurements either due to configuration or missing input. + +There may be a slight difference in the calculation when using `max_roll_over` compared to running without. +To illustrate this, let us compare the derivatives for `period = "7s"`. + +| timestamp | value | `max_roll_over = 0` | `max_roll_over = 1` | +|-----------|-------|-----------|--------------| +| 0 | 0.0 | +| 2 | 2.0 | +| 4 | 4.0 | +| 6 | 6.0 | +||| 1.0 | 1.0 | +| 8 | 8.0 | +| 10 | 10.0 | +| 12 | 8.0 | +||| 0.0 | 0.33... | +| 14 | 6.0 | +| 16 | 4.0 | +| 18 | 2.0 | +| 20 | 0.0 | +||| -1.0 | -1.0 | + +The difference stems from the change of the value between periods, e.g. from 6.0 to 8.0 between first and second period. +Thoses changes are omitted with `max_roll_over = 0` but are respected with `max_roll_over = 1`. +That there are no more differences in the calculated derivatives is due to the example data, which has constant derivatives in during the first and last period, even when including the gap between the periods. +Using `max_roll_over` with a value greater 0 may be important, if you need to detect changes between periods, e.g. when you have very few measurements in a period or quasi-constant metrics with only occasional changes. + +### Configuration + +```toml +[[aggregators.derivative]] + ## Specific Derivative Aggregator Arguments: + + ## Configure a custom derivation variable. Timestamp is used if none is given. + # variable = "" + + ## Suffix to add to the field name for the derivative name. + # suffix = "_rate" + + ## Roll-Over last measurement to first measurement of next period + # max_roll_over = 10 + + ## General Aggregator Arguments: + + ## calculate derivative every 30 seconds + period = "30s" +``` + +### Tags: +No tags are applied by this aggregator. +Existing tags are passed throug the aggregator untouched. + +### Example Output + +``` +net bytes_recv=15409i,packets_recv=164i,bytes_sent=16649i,packets_sent=120i 1508843640000000000 +net bytes_recv=73987i,packets_recv=364i,bytes_sent=87328i,packets_sent=452i 1508843660000000000 +net bytes_recv_by_packets_recv=292.89 1508843660000000000 +net packets_sent_rate=16.6,bytes_sent_rate=3533.95 1508843660000000000 +net bytes_sent_by_packet=292.89 1508843660000000000 +``` diff --git a/plugins/aggregators/derivative/derivative.go b/plugins/aggregators/derivative/derivative.go new file mode 100644 index 0000000000000..f9e9c33de96c9 --- /dev/null +++ b/plugins/aggregators/derivative/derivative.go @@ -0,0 +1,224 @@ +package derivative + +import ( + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +type Derivative struct { + Variable string `toml:"variable"` + Suffix string `toml:"suffix"` + MaxRollOver uint `toml:"max_roll_over"` + Log telegraf.Logger `toml:"-"` + cache map[uint64]*aggregate +} + +type aggregate struct { + first *event + last *event + name string + tags map[string]string + rollOver uint +} + +type event struct { + fields map[string]float64 + time time.Time +} + +const defaultSuffix = "_rate" + +func NewDerivative() *Derivative { + derivative := &Derivative{Suffix: defaultSuffix, MaxRollOver: 10} + derivative.cache = make(map[uint64]*aggregate) + derivative.Reset() + return derivative +} + +var sampleConfig = ` + ## The period in which to flush the aggregator. + period = "30s" + ## + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + ## + ## This aggregator will estimate a derivative for each field, which is + ## contained in both the first and last metric of the aggregation interval. + ## Without further configuration the derivative will be calculated with + ## respect to the time difference between these two measurements in seconds. + ## The formula applied is for every field: + ## + ## value_last - value_first + ## derivative = -------------------------- + ## time_difference_in_seconds + ## + ## The resulting derivative will be named *fieldname_rate*. The suffix + ## "_rate" can be configured by the *suffix* parameter. When using a + ## derivation variable you can include its name for more clarity. + # suffix = "_rate" + ## + ## As an abstraction the derivative can be calculated not only by the time + ## difference but by the difference of a field, which is contained in the + ## measurement. This field is assumed to be monotonously increasing. This + ## feature is used by specifying a *variable*. + ## Make sure the specified variable is not filtered and exists in the metrics + ## passed to this aggregator! + # variable = "" + ## + ## When using a field as the derivation parameter the name of that field will + ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. + ## + ## Note, that the calculation is based on the actual timestamp of the + ## measurements. When there is only one measurement during that period, the + ## measurement will be rolled over to the next period. The maximum number of + ## such roll-overs can be configured with a default of 10. + # max_roll_over = 10 + ## +` + +func (d *Derivative) SampleConfig() string { + return sampleConfig +} + +func (d *Derivative) Description() string { + return "Calculates a derivative for every field." +} + +func (d *Derivative) Add(in telegraf.Metric) { + id := in.HashID() + current, ok := d.cache[id] + if !ok { + // hit an uncached metric, create caches for first time: + d.cache[id] = newAggregate(in) + return + } + if current.first.time.After(in.Time()) { + current.first = newEvent(in) + current.rollOver = 0 + } else if current.first.time.Equal(in.Time()) { + upsertConvertedFields(in.Fields(), current.first.fields) + current.rollOver = 0 + } + if current.last.time.Before(in.Time()) { + current.last = newEvent(in) + current.rollOver = 0 + } else if current.last.time.Equal(in.Time()) { + upsertConvertedFields(in.Fields(), current.last.fields) + current.rollOver = 0 + } +} + +func newAggregate(in telegraf.Metric) *aggregate { + event := newEvent(in) + return &aggregate{ + name: in.Name(), + tags: in.Tags(), + first: event, + last: event, + rollOver: 0, + } +} + +func newEvent(in telegraf.Metric) *event { + return &event{ + fields: extractConvertedFields(in), + time: in.Time(), + } +} + +func extractConvertedFields(in telegraf.Metric) map[string]float64 { + fields := make(map[string]float64, len(in.Fields())) + upsertConvertedFields(in.Fields(), fields) + return fields +} + +func upsertConvertedFields(source map[string]interface{}, target map[string]float64) { + for k, v := range source { + if value, ok := convert(v); ok { + target[k] = value + } + } +} + +func convert(in interface{}) (float64, bool) { + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + case uint64: + return float64(v), true + } + return 0, false +} + +func (d *Derivative) Push(acc telegraf.Accumulator) { + for _, aggregate := range d.cache { + if aggregate.first == aggregate.last { + d.Log.Debugf("Same first and last event for %q, skipping.", aggregate.name) + continue + } + var denominator float64 + denominator = aggregate.last.time.Sub(aggregate.first.time).Seconds() + if len(d.Variable) > 0 { + var first float64 + var last float64 + var found bool + if first, found = aggregate.first.fields[d.Variable]; !found { + d.Log.Debugf("Did not find %q in first event for %q.", d.Variable, aggregate.name) + continue + } + if last, found = aggregate.last.fields[d.Variable]; !found { + d.Log.Debugf("Did not find %q in last event for %q.", d.Variable, aggregate.name) + continue + } + denominator = last - first + } + if denominator == 0 { + d.Log.Debugf("Got difference 0 in denominator for %q, skipping.", aggregate.name) + continue + } + derivatives := make(map[string]interface{}) + for key, start := range aggregate.first.fields { + if key == d.Variable { + // Skip derivation variable + continue + } + if end, ok := aggregate.last.fields[key]; ok { + d.Log.Debugf("Adding derivative %q to %q.", key+d.Suffix, aggregate.name) + derivatives[key+d.Suffix] = (end - start) / denominator + } + } + acc.AddFields(aggregate.name, derivatives, aggregate.tags) + } +} + +func (d *Derivative) Reset() { + for id, aggregate := range d.cache { + if aggregate.rollOver < d.MaxRollOver { + aggregate.first = aggregate.last + aggregate.rollOver = aggregate.rollOver + 1 + d.cache[id] = aggregate + d.Log.Debugf("Roll-Over %q for the %d time.", aggregate.name, aggregate.rollOver) + } else { + delete(d.cache, id) + d.Log.Debugf("Removed %q from cache.", aggregate.name) + } + } +} + +func (d *Derivative) Init() error { + d.Suffix = strings.TrimSpace(d.Suffix) + d.Variable = strings.TrimSpace(d.Variable) + return nil +} + +func init() { + aggregators.Add("derivative", func() telegraf.Aggregator { + return NewDerivative() + }) +} diff --git a/plugins/aggregators/derivative/derivative_test.go b/plugins/aggregators/derivative/derivative_test.go new file mode 100644 index 0000000000000..1549500f74003 --- /dev/null +++ b/plugins/aggregators/derivative/derivative_test.go @@ -0,0 +1,404 @@ +package derivative + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" +) + +var start, _ = metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{ + "increasing": int64(0), + "decreasing": int64(100), + "unchanged": int64(42), + "ignored": "strings are not supported", + "parameter": float64(0.0), + }, + time.Now(), +) + +var finish, _ = metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{ + "increasing": int64(1000), + "decreasing": int64(0), + "unchanged": int64(42), + "ignored": "strings are not supported", + "parameter": float64(10.0), + }, + time.Now().Add(time.Second), +) + +func TestTwoFullEventsWithParameter(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + derivative.Add(start) + derivative.Add(finish) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_by_parameter": 100.0, + "decreasing_by_parameter": -10.0, + "unchanged_by_parameter": 0.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestTwoFullEventsWithParameterReverseSequence(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + derivative.Add(finish) + derivative.Add(start) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_by_parameter": 100.0, + "decreasing_by_parameter": -10.0, + "unchanged_by_parameter": 0.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestTwoFullEventsWithoutParameter(t *testing.T) { + acc := testutil.Accumulator{} + derivative := NewDerivative() + derivative.Log = testutil.Logger{} + derivative.Init() + + startTime := time.Now() + duration, _ := time.ParseDuration("2s") + endTime := startTime.Add(duration) + + first, _ := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(10), + }, + startTime, + ) + last, _ := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(20), + }, + endTime, + ) + + derivative.Add(first) + derivative.Add(last) + derivative.Push(&acc) + + acc.AssertContainsFields(t, + "One Field", + map[string]interface{}{ + "value_rate": float64(5), + }, + ) + +} + +func TestTwoFullEventsInSeperatePushes(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: " parameter", + Suffix: "_wrt_parameter", + MaxRollOver: 10, + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + derivative.Add(start) + derivative.Push(&acc) + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") + + acc.ClearMetrics() + + derivative.Add(finish) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_wrt_parameter": 100.0, + "decreasing_wrt_parameter": -10.0, + "unchanged_wrt_parameter": 0.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestTwoFullEventsInSeperatePushesWithSeveralRollOvers(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_wrt_parameter", + MaxRollOver: 10, + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + derivative.Add(start) + derivative.Push(&acc) + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") + + derivative.Push(&acc) + derivative.Push(&acc) + derivative.Push(&acc) + + derivative.Add(finish) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_wrt_parameter": 100.0, + "decreasing_wrt_parameter": -10.0, + "unchanged_wrt_parameter": 0.0, + } + + acc.AssertContainsFields(t, "TestMetric", expectedFields) +} + +func TestTwoFullEventsInSeperatePushesWithOutRollOver(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + MaxRollOver: 0, + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + derivative.Add(start) + // This test relies on RunningAggregator always callining Reset after Push + // to remove the first metric after max-rollover of 0 has been reached. + derivative.Push(&acc) + derivative.Reset() + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") + + acc.ClearMetrics() + derivative.Add(finish) + derivative.Push(&acc) + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") +} + +func TestIgnoresMissingVariable(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + noParameter, _ := metric.New("TestMetric", + map[string]string{"state": "no_parameter"}, + map[string]interface{}{ + "increasing": int64(100), + "decreasing": int64(0), + "unchanged": int64(42), + }, + time.Now(), + ) + + derivative.Add(noParameter) + derivative.Push(&acc) + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") + + acc.ClearMetrics() + derivative.Add(noParameter) + derivative.Add(start) + derivative.Add(noParameter) + derivative.Add(finish) + derivative.Add(noParameter) + derivative.Push(&acc) + expectedFields := map[string]interface{}{ + "increasing_by_parameter": 100.0, + "decreasing_by_parameter": -10.0, + "unchanged_by_parameter": 0.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestMergesDifferenMetricsWithSameHash(t *testing.T) { + acc := testutil.Accumulator{} + derivative := NewDerivative() + derivative.Log = testutil.Logger{} + derivative.Init() + + startTime := time.Now() + duration, _ := time.ParseDuration("2s") + endTime := startTime.Add(duration) + part1, _ := metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{"field1": int64(10)}, + startTime, + ) + part2, _ := metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{"field2": int64(20)}, + startTime, + ) + final, _ := metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{ + "field1": int64(30), + "field2": int64(30), + }, + endTime, + ) + + derivative.Add(part1) + derivative.Push(&acc) + derivative.Add(part2) + derivative.Push(&acc) + derivative.Add(final) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "field1_rate": 10.0, + "field2_rate": 5.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestDropsAggregatesOnMaxRollOver(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + MaxRollOver: 1, + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + derivative.Init() + + derivative.Add(start) + derivative.Push(&acc) + derivative.Reset() + derivative.Push(&acc) + derivative.Reset() + derivative.Add(finish) + derivative.Push(&acc) + derivative.Reset() + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") +} + +func TestAddMetricsResetsRollOver(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + MaxRollOver: 1, + cache: make(map[uint64]*aggregate), + Log: testutil.Logger{}, + } + derivative.Init() + + derivative.Add(start) + derivative.Push(&acc) + derivative.Reset() + derivative.Add(start) + derivative.Reset() + derivative.Add(finish) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_by_parameter": 100.0, + "decreasing_by_parameter": -10.0, + "unchanged_by_parameter": 0.0, + } + + acc.AssertContainsFields(t, "TestMetric", expectedFields) +} + +func TestCalculatesCorrectDerivativeOnTwoConsecutivePeriods(t *testing.T) { + acc := testutil.Accumulator{} + period, _ := time.ParseDuration("10s") + derivative := NewDerivative() + derivative.Log = testutil.Logger{} + derivative.Init() + + startTime := time.Now() + first, _ := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(10), + }, + startTime, + ) + derivative.Add(first) + derivative.Push(&acc) + derivative.Reset() + + second, _ := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(20), + }, + startTime.Add(period), + ) + derivative.Add(second) + derivative.Push(&acc) + derivative.Reset() + + acc.AssertContainsFields(t, "One Field", map[string]interface{}{ + "value_rate": 1.0, + }) + + acc.ClearMetrics() + third, _ := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(40), + }, + startTime.Add(period).Add(period), + ) + derivative.Add(third) + derivative.Push(&acc) + derivative.Reset() + + acc.AssertContainsFields(t, "One Field", map[string]interface{}{ + "value_rate": 2.0, + }) +} From 5e3d7b8a1669073e114219917bea6db223ebbaec Mon Sep 17 00:00:00 2001 From: Marcin Lewandowski Date: Wed, 3 Mar 2021 20:37:06 +0100 Subject: [PATCH 258/761] Input plugin for RavenDB (#8834) --- README.md | 1 + etc/telegraf.conf | 32 ++ plugins/inputs/all/all.go | 1 + plugins/inputs/ravendb/README.md | 216 +++++++++ plugins/inputs/ravendb/ravendb.go | 425 ++++++++++++++++++ plugins/inputs/ravendb/ravendb_dto.go | 199 ++++++++ plugins/inputs/ravendb/ravendb_test.go | 393 ++++++++++++++++ .../ravendb/testdata/collections_full.json | 19 + .../ravendb/testdata/collections_min.json | 19 + .../ravendb/testdata/databases_full.json | 49 ++ .../ravendb/testdata/databases_min.json | 49 ++ .../inputs/ravendb/testdata/indexes_full.json | 25 ++ .../inputs/ravendb/testdata/indexes_min.json | 25 ++ .../inputs/ravendb/testdata/server_full.json | 73 +++ .../inputs/ravendb/testdata/server_min.json | 72 +++ 15 files changed, 1598 insertions(+) create mode 100644 plugins/inputs/ravendb/README.md create mode 100644 plugins/inputs/ravendb/ravendb.go create mode 100644 plugins/inputs/ravendb/ravendb_dto.go create mode 100644 plugins/inputs/ravendb/ravendb_test.go create mode 100644 plugins/inputs/ravendb/testdata/collections_full.json create mode 100644 plugins/inputs/ravendb/testdata/collections_min.json create mode 100644 plugins/inputs/ravendb/testdata/databases_full.json create mode 100644 plugins/inputs/ravendb/testdata/databases_min.json create mode 100644 plugins/inputs/ravendb/testdata/indexes_full.json create mode 100644 plugins/inputs/ravendb/testdata/indexes_min.json create mode 100644 plugins/inputs/ravendb/testdata/server_full.json create mode 100644 plugins/inputs/ravendb/testdata/server_min.json diff --git a/README.md b/README.md index 3e2d332fb214f..2e88d83bb7415 100644 --- a/README.md +++ b/README.md @@ -294,6 +294,7 @@ For documentation on the latest development code see the [documentation index][d * [rabbitmq](./plugins/inputs/rabbitmq) * [raindrops](./plugins/inputs/raindrops) * [ras](./plugins/inputs/ras) +* [ravendb](./plugins/inputs/ravendb) * [redfish](./plugins/inputs/redfish) * [redis](./plugins/inputs/redis) * [rethinkdb](./plugins/inputs/rethinkdb) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 425e6d758833d..c70b1d2f9f87c 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -5065,6 +5065,38 @@ # ## Default: /var/lib/rasdaemon/ras-mc_event.db # # db_path = "" +# [[inputs.ravendb]] +# ## Node URL and port that RavenDB is listening on. +# url = "https://localhost:8080" +# +# ## RavenDB X509 client certificate setup +# tls_cert = "/etc/telegraf/raven.crt" +# tls_key = "/etc/telegraf/raven.key" +# +# ## Optional request timeout +# ## +# ## Timeout, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request and +# ## time limit for requests made by this client. +# # timeout = "5s" +# +# ## List of statistics which are collected +# # At least one is required +# # Allowed values: server, databases, indexes, collections +# +# # stats_include = ["server", "databases", "indexes", "collections"] +# +# ## List of db where database stats are collected +# ## If empty, all db are concerned +# # db_stats_dbs = [] +# +# ## List of db where index status are collected +# ## If empty, all indexes from all db are concerned +# # index_stats_dbs = [] +# +# ## List of db where collection status are collected +# ## If empty, all collections from all db are concerned +# # collection_stats_dbs = [] # # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs # [[inputs.redfish]] diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 595be84cabb87..65d8d5254c6d3 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -147,6 +147,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq" _ "github.com/influxdata/telegraf/plugins/inputs/raindrops" _ "github.com/influxdata/telegraf/plugins/inputs/ras" + _ "github.com/influxdata/telegraf/plugins/inputs/ravendb" _ "github.com/influxdata/telegraf/plugins/inputs/redfish" _ "github.com/influxdata/telegraf/plugins/inputs/redis" _ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" diff --git a/plugins/inputs/ravendb/README.md b/plugins/inputs/ravendb/README.md new file mode 100644 index 0000000000000..b40850ab5c82d --- /dev/null +++ b/plugins/inputs/ravendb/README.md @@ -0,0 +1,216 @@ +# RavenDB Input Plugin + +Reads metrics from RavenDB servers via monitoring endpoints APIs. + +Requires RavenDB Server 5.2+. + +### Configuration + +The following is an example config for RavenDB. **Note:** The client certificate used should have `Operator` permissions on the cluster. + +```toml +[[inputs.ravendb]] + ## Node URL and port that RavenDB is listening on + url = "https://localhost:8080" + + ## RavenDB X509 client certificate setup + tls_cert = "/etc/telegraf/raven.crt" + tls_key = "/etc/telegraf/raven.key" + + ## Optional request timeout + ## + ## Timeout, specifies the amount of time to wait + ## for a server's response headers after fully writing the request and + ## time limit for requests made by this client + # timeout = "5s" + + ## List of statistics which are collected + # At least one is required + # Allowed values: server, databases, indexes, collections + # + # stats_include = ["server", "databases", "indexes", "collections"] + + ## List of db where database stats are collected + ## If empty, all db are concerned + # db_stats_dbs = [] + + ## List of db where index status are collected + ## If empty, all indexes from all db are concerned + # index_stats_dbs = [] + + ## List of db where collection status are collected + ## If empty, all collections from all db are concerned + # collection_stats_dbs = [] +``` + +### Metrics + +- ravendb_server + - tags: + - url + - node_tag + - cluster_id + - public_server_url (optional) + - fields: + - backup_current_number_of_running_backups + - backup_max_number_of_concurrent_backups + - certificate_server_certificate_expiration_left_in_sec (optional) + - certificate_well_known_admin_certificates (optional, separated by ';') + - cluster_current_term + - cluster_index + - cluster_node_state + - 0 -> Passive + - 1 -> Candidate + - 2 -> Follower + - 3 -> LeaderElect + - 4 -> Leader + - config_public_tcp_server_urls (optional, separated by ';') + - config_server_urls + - config_tcp_server_urls (optional, separated by ';') + - cpu_assigned_processor_count + - cpu_machine_usage + - cpu_machine_io_wait (optional) + - cpu_process_usage + - cpu_processor_count + - cpu_thread_pool_available_worker_threads + - cpu_thread_pool_available_completion_port_threads + - databases_loaded_count + - databases_total_count + - disk_remaining_storage_space_percentage + - disk_system_store_used_data_file_size_in_mb + - disk_system_store_total_data_file_size_in_mb + - disk_total_free_space_in_mb + - license_expiration_left_in_sec (optional) + - license_max_cores + - license_type + - license_utilized_cpu_cores + - memory_allocated_in_mb + - memory_installed_in_mb + - memory_low_memory_severity + - 0 -> None + - 1 -> Low + - 2 -> Extremely Low + - memory_physical_in_mb + - memory_total_dirty_in_mb + - memory_total_swap_size_in_mb + - memory_total_swap_usage_in_mb + - memory_working_set_swap_usage_in_mb + - network_concurrent_requests_count + - network_last_authorized_non_cluster_admin_request_time_in_sec (optional) + - network_last_request_time_in_sec (optional) + - network_requests_per_sec + - network_tcp_active_connections + - network_total_requests + - server_full_version + - server_process_id + - server_version + - uptime_in_sec + +- ravendb_databases + - tags: + - url + - database_name + - database_id + - node_tag + - public_server_url (optional) + - fields: + - counts_alerts + - counts_attachments + - counts_documents + - counts_performance_hints + - counts_rehabs + - counts_replication_factor + - counts_revisions + - counts_unique_attachments + - statistics_doc_puts_per_sec + - statistics_map_index_indexes_per_sec + - statistics_map_reduce_index_mapped_per_sec + - statistics_map_reduce_index_reduced_per_sec + - statistics_request_average_duration_in_ms + - statistics_requests_count + - statistics_requests_per_sec + - indexes_auto_count + - indexes_count + - indexes_disabled_count + - indexes_errors_count + - indexes_errored_count + - indexes_idle_count + - indexes_stale_count + - indexes_static_count + - storage_documents_allocated_data_file_in_mb + - storage_documents_used_data_file_in_mb + - storage_indexes_allocated_data_file_in_mb + - storage_indexes_used_data_file_in_mb + - storage_total_allocated_storage_file_in_mb + - storage_total_free_space_in_mb + - time_since_last_backup_in_sec (optional) + - uptime_in_sec + +- ravendb_indexes + - tags: + - database_name + - index_name + - node_tag + - public_server_url (optional) + - url + - fields + - errors + - is_invalid + - lock_mode + - Unlock + - LockedIgnore + - LockedError + - mapped_per_sec + - priority + - Low + - Normal + - High + - reduced_per_sec + - state + - Normal + - Disabled + - Idle + - Error + - status + - Running + - Paused + - Disabled + - time_since_last_indexing_in_sec (optional) + - time_since_last_query_in_sec (optional) + - type + - None + - AutoMap + - AutoMapReduce + - Map + - MapReduce + - Faulty + - JavaScriptMap + - JavaScriptMapReduce + +- ravendb_collections + - tags: + - collection_name + - database_name + - node_tag + - public_server_url (optional) + - url + - fields + - documents_count + - documents_size_in_bytes + - revisions_size_in_bytes + - tombstones_size_in_bytes + - total_size_in_bytes + +### Example output + +``` +> ravendb_server,cluster_id=07aecc42-9194-4181-999c-1c42450692c9,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 backup_current_number_of_running_backups=0i,backup_max_number_of_concurrent_backups=4i,certificate_server_certificate_expiration_left_in_sec=-1,cluster_current_term=2i,cluster_index=10i,cluster_node_state=4i,config_server_urls="http://127.0.0.1:8080",cpu_assigned_processor_count=8i,cpu_machine_usage=19.09944089456869,cpu_process_usage=0.16977205323024872,cpu_processor_count=8i,cpu_thread_pool_available_completion_port_threads=1000i,cpu_thread_pool_available_worker_threads=32763i,databases_loaded_count=1i,databases_total_count=1i,disk_remaining_storage_space_percentage=18i,disk_system_store_total_data_file_size_in_mb=35184372088832i,disk_system_store_used_data_file_size_in_mb=31379031064576i,disk_total_free_space_in_mb=42931i,license_expiration_left_in_sec=24079222.8772186,license_max_cores=256i,license_type="Enterprise",license_utilized_cpu_cores=8i,memory_allocated_in_mb=205i,memory_installed_in_mb=16384i,memory_low_memory_severity=0i,memory_physical_in_mb=16250i,memory_total_dirty_in_mb=0i,memory_total_swap_size_in_mb=0i,memory_total_swap_usage_in_mb=0i,memory_working_set_swap_usage_in_mb=0i,network_concurrent_requests_count=1i,network_last_request_time_in_sec=0.0058717,network_requests_per_sec=0.09916543455308825,network_tcp_active_connections=128i,network_total_requests=10i,server_full_version="5.2.0-custom-52",server_process_id=31044i,server_version="5.2",uptime_in_sec=56i 1613027977000000000 +> ravendb_databases,database_id=ced0edba-8f80-48b8-8e81-c3d2c6748ec3,database_name=db1,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 counts_alerts=0i,counts_attachments=17i,counts_documents=1059i,counts_performance_hints=0i,counts_rehabs=0i,counts_replication_factor=1i,counts_revisions=5475i,counts_unique_attachments=17i,indexes_auto_count=0i,indexes_count=7i,indexes_disabled_count=0i,indexes_errored_count=0i,indexes_errors_count=0i,indexes_idle_count=0i,indexes_stale_count=0i,indexes_static_count=7i,statistics_doc_puts_per_sec=0,statistics_map_index_indexes_per_sec=0,statistics_map_reduce_index_mapped_per_sec=0,statistics_map_reduce_index_reduced_per_sec=0,statistics_request_average_duration_in_ms=0,statistics_requests_count=0i,statistics_requests_per_sec=0,storage_documents_allocated_data_file_in_mb=140737488355328i,storage_documents_used_data_file_in_mb=74741020884992i,storage_indexes_allocated_data_file_in_mb=175921860444160i,storage_indexes_used_data_file_in_mb=120722940755968i,storage_total_allocated_storage_file_in_mb=325455441821696i,storage_total_free_space_in_mb=42931i,uptime_in_sec=54 1613027977000000000 +> ravendb_indexes,database_name=db1,host=DESKTOP-2OISR6D,index_name=Orders/Totals,node_tag=A,url=http://localhost:8080 errors=0i,is_invalid=false,lock_mode="Unlock",mapped_per_sec=0,priority="Normal",reduced_per_sec=0,state="Normal",status="Running",time_since_last_indexing_in_sec=45.4256655,time_since_last_query_in_sec=45.4304202,type="Map" 1613027977000000000 +> ravendb_collections,collection_name=@hilo,database_name=db1,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 documents_count=8i,documents_size_in_bytes=122880i,revisions_size_in_bytes=0i,tombstones_size_in_bytes=122880i,total_size_in_bytes=245760i 1613027977000000000 +``` + +### Contributors + +- Marcin Lewandowski (https://github.com/ml054/) +- Casey Barton (https://github.com/bartoncasey) \ No newline at end of file diff --git a/plugins/inputs/ravendb/ravendb.go b/plugins/inputs/ravendb/ravendb.go new file mode 100644 index 0000000000000..42b50d0d3816f --- /dev/null +++ b/plugins/inputs/ravendb/ravendb.go @@ -0,0 +1,425 @@ +package ravendb + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// defaultURL will set a default value that corresponds to the default value +// used by RavenDB +const defaultURL = "http://localhost:8080" + +const defaultTimeout = 5 + +// RavenDB defines the configuration necessary for gathering metrics, +// see the sample config for further details +type RavenDB struct { + URL string `toml:"url"` + Name string `toml:"name"` + + Timeout internal.Duration `toml:"timeout"` + + StatsInclude []string `toml:"stats_include"` + DbStatsDbs []string `toml:"db_stats_dbs"` + IndexStatsDbs []string `toml:"index_stats_dbs"` + CollectionStatsDbs []string `toml:"collection_stats_dbs"` + + tls.ClientConfig + + Log telegraf.Logger `toml:"-"` + + client *http.Client + requestUrlServer string + requestUrlDatabases string + requestUrlIndexes string + requestUrlCollection string +} + +var sampleConfig = ` + ## Node URL and port that RavenDB is listening on + url = "https://localhost:8080" + + ## RavenDB X509 client certificate setup + # tls_cert = "/etc/telegraf/raven.crt" + # tls_key = "/etc/telegraf/raven.key" + + ## Optional request timeout + ## + ## Timeout, specifies the amount of time to wait + ## for a server's response headers after fully writing the request and + ## time limit for requests made by this client + # timeout = "5s" + + ## List of statistics which are collected + # At least one is required + # Allowed values: server, databases, indexes, collections + # + # stats_include = ["server", "databases", "indexes", "collections"] + + ## List of db where database stats are collected + ## If empty, all db are concerned + # db_stats_dbs = [] + + ## List of db where index status are collected + ## If empty, all indexes from all db are concerned + # index_stats_dbs = [] + + ## List of db where collection status are collected + ## If empty, all collections from all db are concerned + # collection_stats_dbs = [] +` + +func (r *RavenDB) SampleConfig() string { + return sampleConfig +} + +func (r *RavenDB) Description() string { + return "Reads metrics from RavenDB servers via the Monitoring Endpoints" +} + +func (r *RavenDB) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + for _, statToCollect := range r.StatsInclude { + wg.Add(1) + + switch statToCollect { + case "server": + go func() { + defer wg.Done() + r.gatherServer(acc) + }() + case "databases": + go func() { + defer wg.Done() + r.gatherDatabases(acc) + }() + case "indexes": + go func() { + defer wg.Done() + r.gatherIndexes(acc) + }() + case "collections": + go func() { + defer wg.Done() + r.gatherCollections(acc) + }() + } + } + + wg.Wait() + + return nil +} + +func (r *RavenDB) ensureClient() error { + if r.client != nil { + return nil + } + + tlsCfg, err := r.ClientConfig.TLSConfig() + if err != nil { + return err + } + tr := &http.Transport{ + ResponseHeaderTimeout: r.Timeout.Duration, + TLSClientConfig: tlsCfg, + } + r.client = &http.Client{ + Transport: tr, + Timeout: r.Timeout.Duration, + } + + return nil +} + +func (r *RavenDB) requestJSON(u string, target interface{}) error { + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return err + } + + resp, err := r.client.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + r.Log.Debugf("%s: %s", u, resp.Status) + if resp.StatusCode >= 400 { + return fmt.Errorf("invalid response code to request '%s': %d - %s", r.URL, resp.StatusCode, resp.Status) + } + + return json.NewDecoder(resp.Body).Decode(target) +} + +func (r *RavenDB) gatherServer(acc telegraf.Accumulator) { + serverResponse := &serverMetricsResponse{} + + err := r.requestJSON(r.requestUrlServer, &serverResponse) + if err != nil { + acc.AddError(err) + return + } + + tags := map[string]string{ + "cluster_id": serverResponse.Cluster.Id, + "node_tag": serverResponse.Cluster.NodeTag, + "url": r.URL, + } + + if serverResponse.Config.PublicServerUrl != nil { + tags["public_server_url"] = *serverResponse.Config.PublicServerUrl + } + + fields := map[string]interface{}{ + "backup_current_number_of_running_backups": serverResponse.Backup.CurrentNumberOfRunningBackups, + "backup_max_number_of_concurrent_backups": serverResponse.Backup.MaxNumberOfConcurrentBackups, + "certificate_server_certificate_expiration_left_in_sec": serverResponse.Certificate.ServerCertificateExpirationLeftInSec, + "cluster_current_term": serverResponse.Cluster.CurrentTerm, + "cluster_index": serverResponse.Cluster.Index, + "cluster_node_state": serverResponse.Cluster.NodeState, + "config_server_urls": strings.Join(serverResponse.Config.ServerUrls, ";"), + "cpu_assigned_processor_count": serverResponse.Cpu.AssignedProcessorCount, + "cpu_machine_io_wait": serverResponse.Cpu.MachineIoWait, + "cpu_machine_usage": serverResponse.Cpu.MachineUsage, + "cpu_process_usage": serverResponse.Cpu.ProcessUsage, + "cpu_processor_count": serverResponse.Cpu.ProcessorCount, + "cpu_thread_pool_available_worker_threads": serverResponse.Cpu.ThreadPoolAvailableWorkerThreads, + "cpu_thread_pool_available_completion_port_threads": serverResponse.Cpu.ThreadPoolAvailableCompletionPortThreads, + "databases_loaded_count": serverResponse.Databases.LoadedCount, + "databases_total_count": serverResponse.Databases.TotalCount, + "disk_remaining_storage_space_percentage": serverResponse.Disk.RemainingStorageSpacePercentage, + "disk_system_store_used_data_file_size_in_mb": serverResponse.Disk.SystemStoreUsedDataFileSizeInMb, + "disk_system_store_total_data_file_size_in_mb": serverResponse.Disk.SystemStoreTotalDataFileSizeInMb, + "disk_total_free_space_in_mb": serverResponse.Disk.TotalFreeSpaceInMb, + "license_expiration_left_in_sec": serverResponse.License.ExpirationLeftInSec, + "license_max_cores": serverResponse.License.MaxCores, + "license_type": serverResponse.License.Type, + "license_utilized_cpu_cores": serverResponse.License.UtilizedCpuCores, + "memory_allocated_in_mb": serverResponse.Memory.AllocatedMemoryInMb, + "memory_installed_in_mb": serverResponse.Memory.InstalledMemoryInMb, + "memory_low_memory_severity": serverResponse.Memory.LowMemorySeverity, + "memory_physical_in_mb": serverResponse.Memory.PhysicalMemoryInMb, + "memory_total_dirty_in_mb": serverResponse.Memory.TotalDirtyInMb, + "memory_total_swap_size_in_mb": serverResponse.Memory.TotalSwapSizeInMb, + "memory_total_swap_usage_in_mb": serverResponse.Memory.TotalSwapUsageInMb, + "memory_working_set_swap_usage_in_mb": serverResponse.Memory.WorkingSetSwapUsageInMb, + "network_concurrent_requests_count": serverResponse.Network.ConcurrentRequestsCount, + "network_last_authorized_non_cluster_admin_request_time_in_sec": serverResponse.Network.LastAuthorizedNonClusterAdminRequestTimeInSec, + "network_last_request_time_in_sec": serverResponse.Network.LastRequestTimeInSec, + "network_requests_per_sec": serverResponse.Network.RequestsPerSec, + "network_tcp_active_connections": serverResponse.Network.TcpActiveConnections, + "network_total_requests": serverResponse.Network.TotalRequests, + "server_full_version": serverResponse.ServerFullVersion, + "server_process_id": serverResponse.ServerProcessId, + "server_version": serverResponse.ServerVersion, + "uptime_in_sec": serverResponse.UpTimeInSec, + } + + if serverResponse.Config.TcpServerUrls != nil { + fields["config_tcp_server_urls"] = strings.Join(serverResponse.Config.TcpServerUrls, ";") + } + + if serverResponse.Config.PublicTcpServerUrls != nil { + fields["config_public_tcp_server_urls"] = strings.Join(serverResponse.Config.PublicTcpServerUrls, ";") + } + + if serverResponse.Certificate.WellKnownAdminCertificates != nil { + fields["certificate_well_known_admin_certificates"] = strings.Join(serverResponse.Certificate.WellKnownAdminCertificates, ";") + } + + acc.AddFields("ravendb_server", fields, tags) +} + +func (r *RavenDB) gatherDatabases(acc telegraf.Accumulator) { + databasesResponse := &databasesMetricResponse{} + + err := r.requestJSON(r.requestUrlDatabases, &databasesResponse) + if err != nil { + acc.AddError(err) + return + } + + for _, dbResponse := range databasesResponse.Results { + tags := map[string]string{ + "database_id": dbResponse.DatabaseId, + "database_name": dbResponse.DatabaseName, + "node_tag": databasesResponse.NodeTag, + "url": r.URL, + } + + if databasesResponse.PublicServerUrl != nil { + tags["public_server_url"] = *databasesResponse.PublicServerUrl + } + + fields := map[string]interface{}{ + "counts_alerts": dbResponse.Counts.Alerts, + "counts_attachments": dbResponse.Counts.Attachments, + "counts_documents": dbResponse.Counts.Documents, + "counts_performance_hints": dbResponse.Counts.PerformanceHints, + "counts_rehabs": dbResponse.Counts.Rehabs, + "counts_replication_factor": dbResponse.Counts.ReplicationFactor, + "counts_revisions": dbResponse.Counts.Revisions, + "counts_unique_attachments": dbResponse.Counts.UniqueAttachments, + "indexes_auto_count": dbResponse.Indexes.AutoCount, + "indexes_count": dbResponse.Indexes.Count, + "indexes_errored_count": dbResponse.Indexes.ErroredCount, + "indexes_errors_count": dbResponse.Indexes.ErrorsCount, + "indexes_disabled_count": dbResponse.Indexes.DisabledCount, + "indexes_idle_count": dbResponse.Indexes.IdleCount, + "indexes_stale_count": dbResponse.Indexes.StaleCount, + "indexes_static_count": dbResponse.Indexes.StaticCount, + "statistics_doc_puts_per_sec": dbResponse.Statistics.DocPutsPerSec, + "statistics_map_index_indexes_per_sec": dbResponse.Statistics.MapIndexIndexesPerSec, + "statistics_map_reduce_index_mapped_per_sec": dbResponse.Statistics.MapReduceIndexMappedPerSec, + "statistics_map_reduce_index_reduced_per_sec": dbResponse.Statistics.MapReduceIndexReducedPerSec, + "statistics_request_average_duration_in_ms": dbResponse.Statistics.RequestAverageDurationInMs, + "statistics_requests_count": dbResponse.Statistics.RequestsCount, + "statistics_requests_per_sec": dbResponse.Statistics.RequestsPerSec, + "storage_documents_allocated_data_file_in_mb": dbResponse.Storage.DocumentsAllocatedDataFileInMb, + "storage_documents_used_data_file_in_mb": dbResponse.Storage.DocumentsUsedDataFileInMb, + "storage_indexes_allocated_data_file_in_mb": dbResponse.Storage.IndexesAllocatedDataFileInMb, + "storage_indexes_used_data_file_in_mb": dbResponse.Storage.IndexesUsedDataFileInMb, + "storage_total_allocated_storage_file_in_mb": dbResponse.Storage.TotalAllocatedStorageFileInMb, + "storage_total_free_space_in_mb": dbResponse.Storage.TotalFreeSpaceInMb, + "time_since_last_backup_in_sec": dbResponse.TimeSinceLastBackupInSec, + "uptime_in_sec": dbResponse.UptimeInSec, + } + + acc.AddFields("ravendb_databases", fields, tags) + } +} + +func (r *RavenDB) gatherIndexes(acc telegraf.Accumulator) { + indexesResponse := &indexesMetricResponse{} + + err := r.requestJSON(r.requestUrlIndexes, &indexesResponse) + if err != nil { + acc.AddError(err) + return + } + + for _, perDbIndexResponse := range indexesResponse.Results { + for _, indexResponse := range perDbIndexResponse.Indexes { + tags := map[string]string{ + "database_name": perDbIndexResponse.DatabaseName, + "index_name": indexResponse.IndexName, + "node_tag": indexesResponse.NodeTag, + "url": r.URL, + } + + if indexesResponse.PublicServerUrl != nil { + tags["public_server_url"] = *indexesResponse.PublicServerUrl + } + + fields := map[string]interface{}{ + "errors": indexResponse.Errors, + "is_invalid": indexResponse.IsInvalid, + "lock_mode": indexResponse.LockMode, + "mapped_per_sec": indexResponse.MappedPerSec, + "priority": indexResponse.Priority, + "reduced_per_sec": indexResponse.ReducedPerSec, + "state": indexResponse.State, + "status": indexResponse.Status, + "time_since_last_indexing_in_sec": indexResponse.TimeSinceLastIndexingInSec, + "time_since_last_query_in_sec": indexResponse.TimeSinceLastQueryInSec, + "type": indexResponse.Type, + } + + acc.AddFields("ravendb_indexes", fields, tags) + } + } +} + +func (r *RavenDB) gatherCollections(acc telegraf.Accumulator) { + collectionsResponse := &collectionsMetricResponse{} + + err := r.requestJSON(r.requestUrlCollection, &collectionsResponse) + if err != nil { + acc.AddError(err) + return + } + + for _, perDbCollectionMetrics := range collectionsResponse.Results { + for _, collectionMetrics := range perDbCollectionMetrics.Collections { + tags := map[string]string{ + "collection_name": collectionMetrics.CollectionName, + "database_name": perDbCollectionMetrics.DatabaseName, + "node_tag": collectionsResponse.NodeTag, + "url": r.URL, + } + + if collectionsResponse.PublicServerUrl != nil { + tags["public_server_url"] = *collectionsResponse.PublicServerUrl + } + + fields := map[string]interface{}{ + "documents_count": collectionMetrics.DocumentsCount, + "documents_size_in_bytes": collectionMetrics.DocumentsSizeInBytes, + "revisions_size_in_bytes": collectionMetrics.RevisionsSizeInBytes, + "tombstones_size_in_bytes": collectionMetrics.TombstonesSizeInBytes, + "total_size_in_bytes": collectionMetrics.TotalSizeInBytes, + } + + acc.AddFields("ravendb_collections", fields, tags) + } + } +} + +func prepareDbNamesUrlPart(dbNames []string) string { + if len(dbNames) == 0 { + return "" + } + result := "?" + dbNames[0] + for _, db := range dbNames[1:] { + result += "&name=" + url.QueryEscape(db) + } + + return result +} + +func (r *RavenDB) Init() error { + if r.URL == "" { + r.URL = defaultURL + } + + r.requestUrlServer = r.URL + "/admin/monitoring/v1/server" + r.requestUrlDatabases = r.URL + "/admin/monitoring/v1/databases" + prepareDbNamesUrlPart(r.DbStatsDbs) + r.requestUrlIndexes = r.URL + "/admin/monitoring/v1/indexes" + prepareDbNamesUrlPart(r.IndexStatsDbs) + r.requestUrlCollection = r.URL + "/admin/monitoring/v1/collections" + prepareDbNamesUrlPart(r.IndexStatsDbs) + + err := choice.CheckSlice(r.StatsInclude, []string{"server", "databases", "indexes", "collections"}) + if err != nil { + return err + } + + err = r.ensureClient() + if nil != err { + r.Log.Errorf("Error with Client %s", err) + return err + } + + return nil +} + +func init() { + inputs.Add("ravendb", func() telegraf.Input { + return &RavenDB{ + Timeout: internal.Duration{Duration: defaultTimeout * time.Second}, + StatsInclude: []string{"server", "databases", "indexes", "collections"}, + } + }) +} diff --git a/plugins/inputs/ravendb/ravendb_dto.go b/plugins/inputs/ravendb/ravendb_dto.go new file mode 100644 index 0000000000000..af4012f8ceecd --- /dev/null +++ b/plugins/inputs/ravendb/ravendb_dto.go @@ -0,0 +1,199 @@ +package ravendb + +type serverMetricsResponse struct { + ServerVersion string `json:"ServerVersion"` + ServerFullVersion string `json:"ServerFullVersion"` + UpTimeInSec int32 `json:"UpTimeInSec"` + ServerProcessId int32 `json:"ServerProcessId"` + Backup backupMetrics `json:"Backup"` + Config configurationMetrics `json:"Config"` + Cpu cpuMetrics `json:"Cpu"` + Memory memoryMetrics `json:"Memory"` + Disk diskMetrics `json:"Disk"` + License licenseMetrics `json:"License"` + Network networkMetrics `json:"Network"` + Certificate certificateMetrics `json:"Certificate"` + Cluster clusterMetrics `json:"Cluster"` + Databases allDatabasesMetrics `json:"Databases"` +} + +type backupMetrics struct { + CurrentNumberOfRunningBackups int32 `json:"CurrentNumberOfRunningBackups"` + MaxNumberOfConcurrentBackups int32 `json:"MaxNumberOfConcurrentBackups"` +} + +type configurationMetrics struct { + ServerUrls []string `json:"ServerUrls"` + PublicServerUrl *string `json:"PublicServerUrl"` + TcpServerUrls []string `json:"TcpServerUrls"` + PublicTcpServerUrls []string `json:"PublicTcpServerUrls"` +} + +type cpuMetrics struct { + ProcessUsage float64 `json:"ProcessUsage"` + MachineUsage float64 `json:"MachineUsage"` + MachineIoWait *float64 `json:"MachineIoWait"` + ProcessorCount int32 `json:"ProcessorCount"` + AssignedProcessorCount int32 `json:"AssignedProcessorCount"` + ThreadPoolAvailableWorkerThreads int32 `json:"ThreadPoolAvailableWorkerThreads"` + ThreadPoolAvailableCompletionPortThreads int32 `json:"ThreadPoolAvailableCompletionPortThreads"` +} + +type memoryMetrics struct { + AllocatedMemoryInMb int64 `json:"AllocatedMemoryInMb"` + PhysicalMemoryInMb int64 `json:"PhysicalMemoryInMb"` + InstalledMemoryInMb int64 `json:"InstalledMemoryInMb"` + LowMemorySeverity string `json:"LowMemorySeverity"` + TotalSwapSizeInMb int64 `json:"TotalSwapSizeInMb"` + TotalSwapUsageInMb int64 `json:"TotalSwapUsageInMb"` + WorkingSetSwapUsageInMb int64 `json:"WorkingSetSwapUsageInMb"` + TotalDirtyInMb int64 `json:"TotalDirtyInMb"` +} + +type diskMetrics struct { + SystemStoreUsedDataFileSizeInMb int64 `json:"SystemStoreUsedDataFileSizeInMb"` + SystemStoreTotalDataFileSizeInMb int64 `json:"SystemStoreTotalDataFileSizeInMb"` + TotalFreeSpaceInMb int64 `json:"TotalFreeSpaceInMb"` + RemainingStorageSpacePercentage int64 `json:"RemainingStorageSpacePercentage"` +} + +type licenseMetrics struct { + Type string `json:"Type"` + ExpirationLeftInSec *float64 `json:"ExpirationLeftInSec"` + UtilizedCpuCores int32 `json:"UtilizedCpuCores"` + MaxCores int32 `json:"MaxCores"` +} + +type networkMetrics struct { + TcpActiveConnections int64 `json:"TcpActiveConnections"` + ConcurrentRequestsCount int64 `json:"ConcurrentRequestsCount"` + TotalRequests int64 `json:"TotalRequests"` + RequestsPerSec float64 `json:"RequestsPerSec"` + LastRequestTimeInSec *float64 `json:"LastRequestTimeInSec"` + LastAuthorizedNonClusterAdminRequestTimeInSec *float64 `json:"LastAuthorizedNonClusterAdminRequestTimeInSec"` +} + +type certificateMetrics struct { + ServerCertificateExpirationLeftInSec *float64 `json:"ServerCertificateExpirationLeftInSec"` + WellKnownAdminCertificates []string `json:"WellKnownAdminCertificates"` +} + +type clusterMetrics struct { + NodeTag string `json:"NodeTag"` + NodeState string `json:"NodeState"` + CurrentTerm int64 `json:"CurrentTerm"` + Index int64 `json:"Index"` + Id string `json:"Id"` +} + +type allDatabasesMetrics struct { + TotalCount int32 `json:"TotalCount"` + LoadedCount int32 `json:"LoadedCount"` +} + +type databasesMetricResponse struct { + Results []*databaseMetrics `json:"Results"` + PublicServerUrl *string `json:"PublicServerUrl"` + NodeTag string `json:"NodeTag"` +} + +type databaseMetrics struct { + DatabaseName string `json:"DatabaseName"` + DatabaseId string `json:"DatabaseId"` + UptimeInSec float64 `json:"UptimeInSec"` + TimeSinceLastBackupInSec *float64 `json:"TimeSinceLastBackupInSec"` + + Counts databaseCounts `json:"Counts"` + Statistics databaseStatistics `json:"Statistics"` + + Indexes databaseIndexesMetrics `json:"Indexes"` + Storage databaseStorageMetrics `json:"Storage"` +} + +type databaseCounts struct { + Documents int64 `json:"Documents"` + Revisions int64 `json:"Revisions"` + Attachments int64 `json:"Attachments"` + UniqueAttachments int64 `json:"UniqueAttachments"` + Alerts int64 `json:"Alerts"` + Rehabs int32 `json:"Rehabs"` + PerformanceHints int64 `json:"PerformanceHints"` + ReplicationFactor int32 `json:"ReplicationFactor"` +} + +type databaseStatistics struct { + DocPutsPerSec float64 `json:"DocPutsPerSec"` + MapIndexIndexesPerSec float64 `json:"MapIndexIndexesPerSec"` + MapReduceIndexMappedPerSec float64 `json:"MapReduceIndexMappedPerSec"` + MapReduceIndexReducedPerSec float64 `json:"MapReduceIndexReducedPerSec"` + RequestsPerSec float64 `json:"RequestsPerSec"` + RequestsCount int32 `json:"RequestsCount"` + RequestAverageDurationInMs float64 `json:"RequestAverageDurationInMs"` +} + +type databaseIndexesMetrics struct { + Count int64 `json:"Count"` + StaleCount int32 `json:"StaleCount"` + ErrorsCount int64 `json:"ErrorsCount"` + StaticCount int32 `json:"StaticCount"` + AutoCount int32 `json:"AutoCount"` + IdleCount int32 `json:"IdleCount"` + DisabledCount int32 `json:"DisabledCount"` + ErroredCount int32 `json:"ErroredCount"` +} + +type databaseStorageMetrics struct { + DocumentsAllocatedDataFileInMb int64 `json:"DocumentsAllocatedDataFileInMb"` + DocumentsUsedDataFileInMb int64 `json:"DocumentsUsedDataFileInMb"` + IndexesAllocatedDataFileInMb int64 `json:"IndexesAllocatedDataFileInMb"` + IndexesUsedDataFileInMb int64 `json:"IndexesUsedDataFileInMb"` + TotalAllocatedStorageFileInMb int64 `json:"TotalAllocatedStorageFileInMb"` + TotalFreeSpaceInMb int64 `json:"TotalFreeSpaceInMb"` +} + +type indexesMetricResponse struct { + Results []*perDatabaseIndexMetrics `json:"Results"` + PublicServerUrl *string `json:"PublicServerUrl"` + NodeTag string `json:"NodeTag"` +} + +type perDatabaseIndexMetrics struct { + DatabaseName string `json:"DatabaseName"` + Indexes []*indexMetrics `json:"Indexes"` +} + +type indexMetrics struct { + IndexName string `json:"IndexName"` + Priority string `json:"Priority"` + State string `json:"State"` + Errors int32 `json:"Errors"` + TimeSinceLastQueryInSec *float64 `json:"TimeSinceLastQueryInSec"` + TimeSinceLastIndexingInSec *float64 `json:"TimeSinceLastIndexingInSec"` + LockMode string `json:"LockMode"` + IsInvalid bool `json:"IsInvalid"` + Status string `json:"Status"` + MappedPerSec float64 `json:"MappedPerSec"` + ReducedPerSec float64 `json:"ReducedPerSec"` + Type string `json:"Type"` + EntriesCount int32 `json:"EntriesCount"` +} + +type collectionsMetricResponse struct { + Results []*perDatabaseCollectionMetrics `json:"Results"` + PublicServerUrl *string `json:"PublicServerUrl"` + NodeTag string `json:"NodeTag"` +} + +type perDatabaseCollectionMetrics struct { + DatabaseName string `json:"DatabaseName"` + Collections []*collectionMetrics `json:"Collections"` +} + +type collectionMetrics struct { + CollectionName string `json:"CollectionName"` + DocumentsCount int64 `json:"DocumentsCount"` + TotalSizeInBytes int64 `json:"TotalSizeInBytes"` + DocumentsSizeInBytes int64 `json:"DocumentsSizeInBytes"` + TombstonesSizeInBytes int64 `json:"TombstonesSizeInBytes"` + RevisionsSizeInBytes int64 `json:"RevisionsSizeInBytes"` +} diff --git a/plugins/inputs/ravendb/ravendb_test.go b/plugins/inputs/ravendb/ravendb_test.go new file mode 100644 index 0000000000000..754ece88fd01d --- /dev/null +++ b/plugins/inputs/ravendb/ravendb_test.go @@ -0,0 +1,393 @@ +package ravendb + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +// Test against fully filled data +func TestRavenDBGeneratesMetricsFull(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var jsonFilePath string + + switch r.URL.Path { + case "/admin/monitoring/v1/databases": + jsonFilePath = "testdata/databases_full.json" + case "/admin/monitoring/v1/server": + jsonFilePath = "testdata/server_full.json" + case "/admin/monitoring/v1/indexes": + jsonFilePath = "testdata/indexes_full.json" + case "/admin/monitoring/v1/collections": + jsonFilePath = "testdata/collections_full.json" + + default: + panic(fmt.Sprintf("Cannot handle request for uri %s", r.URL.Path)) + } + + data, err := ioutil.ReadFile(jsonFilePath) + + if err != nil { + panic(fmt.Sprintf("could not read from data file %s", jsonFilePath)) + } + + w.Write(data) + })) + defer ts.Close() + + r := &RavenDB{ + URL: ts.URL, + StatsInclude: []string{"server", "databases", "indexes", "collections"}, + Log: testutil.Logger{}, + } + + r.Init() + + acc := &testutil.Accumulator{} + + err := acc.GatherError(r.Gather) + require.NoError(t, err) + + serverFields := map[string]interface{}{ + "server_version": "5.1", + "server_full_version": "5.1.1-custom-51", + "uptime_in_sec": int64(30), + "server_process_id": 26360, + "config_server_urls": "http://127.0.0.1:8080;http://192.168.0.1:8080", + "config_tcp_server_urls": "tcp://127.0.0.1:3888;tcp://192.168.0.1:3888", + "config_public_tcp_server_urls": "tcp://2.3.4.5:3888;tcp://6.7.8.9:3888", + "backup_max_number_of_concurrent_backups": 4, + "backup_current_number_of_running_backups": 2, + "cpu_process_usage": 6.28, + "cpu_machine_usage": 41.05, + "cpu_machine_io_wait": 2.55, + "cpu_processor_count": 8, + "cpu_assigned_processor_count": 7, + "cpu_thread_pool_available_worker_threads": 32766, + "cpu_thread_pool_available_completion_port_threads": 1000, + "memory_allocated_in_mb": 235, + "memory_installed_in_mb": 16384, + "memory_physical_in_mb": 16250, + "memory_low_memory_severity": "None", + "memory_total_swap_size_in_mb": 1024, + "memory_total_swap_usage_in_mb": 456, + "memory_working_set_swap_usage_in_mb": 89, + "memory_total_dirty_in_mb": 1, + "disk_system_store_used_data_file_size_in_mb": 28, + "disk_system_store_total_data_file_size_in_mb": 32, + "disk_total_free_space_in_mb": 52078, + "disk_remaining_storage_space_percentage": 22, + "license_type": "Enterprise", + "license_expiration_left_in_sec": 25466947.5, + "license_utilized_cpu_cores": 8, + "license_max_cores": 256, + "network_tcp_active_connections": 84, + "network_concurrent_requests_count": 1, + "network_total_requests": 3, + "network_requests_per_sec": 0.03322, + "network_last_request_time_in_sec": 0.0264977, + "network_last_authorized_non_cluster_admin_request_time_in_sec": 0.04, + "certificate_server_certificate_expiration_left_in_sec": float64(104), + "certificate_well_known_admin_certificates": "a909502dd82ae41433e6f83886b00d4277a32a7b;4444444444444444444444444444444444444444", + "cluster_node_state": "Leader", + "cluster_current_term": 28, + "cluster_index": 104, + "databases_total_count": 25, + "databases_loaded_count": 2, + } + + serverTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "cluster_id": "6b535a18-558f-4e53-a479-a514efc16aab", + "public_server_url": "http://raven1:8080", + } + + defaultTime := time.Unix(0, 0) + + dbFields := map[string]interface{}{ + "uptime_in_sec": float64(1396), + "time_since_last_backup_in_sec": 104.3, + "counts_documents": 425189, + "counts_revisions": 429605, + "counts_attachments": 17, + "counts_unique_attachments": 16, + "counts_alerts": 2, + "counts_rehabs": 3, + "counts_performance_hints": 5, + "counts_replication_factor": 2, + "statistics_doc_puts_per_sec": 23.4, + "statistics_map_index_indexes_per_sec": 82.5, + "statistics_map_reduce_index_mapped_per_sec": 50.3, + "statistics_map_reduce_index_reduced_per_sec": 85.2, + "statistics_requests_per_sec": 22.5, + "statistics_requests_count": 809, + "statistics_request_average_duration_in_ms": 0.55, + "indexes_count": 7, + "indexes_stale_count": 1, + "indexes_errors_count": 2, + "indexes_static_count": 7, + "indexes_auto_count": 3, + "indexes_idle_count": 4, + "indexes_disabled_count": 5, + "indexes_errored_count": 6, + "storage_documents_allocated_data_file_in_mb": 1024, + "storage_documents_used_data_file_in_mb": 942, + "storage_indexes_allocated_data_file_in_mb": 464, + "storage_indexes_used_data_file_in_mb": 278, + "storage_total_allocated_storage_file_in_mb": 1496, + "storage_total_free_space_in_mb": 52074, + } + + dbTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db2", + "database_id": "06eefe8b-d720-4a8d-a809-2c5af9a4abb5", + "public_server_url": "http://myhost:8080", + } + + indexFields := map[string]interface{}{ + "priority": "Normal", + "state": "Normal", + "errors": 0, + "time_since_last_query_in_sec": 3.4712567, + "time_since_last_indexing_in_sec": 3.4642612, + "lock_mode": "Unlock", + "is_invalid": true, + "status": "Running", + "mapped_per_sec": 102.34, + "reduced_per_sec": 593.23, + "type": "MapReduce", + } + + indexTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "public_server_url": "http://localhost:8080", + "database_name": "db1", + "index_name": "Product/Rating", + } + + collectionFields := map[string]interface{}{ + "documents_count": 830, + "total_size_in_bytes": 2744320, + "documents_size_in_bytes": 868352, + "tombstones_size_in_bytes": 122880, + "revisions_size_in_bytes": 1753088, + } + + collectionTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db1", + "collection_name": "Orders", + "public_server_url": "http://localhost:8080", + } + + serverExpected := testutil.MustMetric("ravendb_server", serverTags, serverFields, defaultTime) + dbExpected := testutil.MustMetric("ravendb_databases", dbTags, dbFields, defaultTime) + indexExpected := testutil.MustMetric("ravendb_indexes", indexTags, indexFields, defaultTime) + collectionsExpected := testutil.MustMetric("ravendb_collections", collectionTags, collectionFields, defaultTime) + + for _, metric := range acc.GetTelegrafMetrics() { + switch metric.Name() { + case "ravendb_server": + testutil.RequireMetricEqual(t, serverExpected, metric, testutil.IgnoreTime()) + case "ravendb_databases": + testutil.RequireMetricEqual(t, dbExpected, metric, testutil.IgnoreTime()) + case "ravendb_indexes": + testutil.RequireMetricEqual(t, indexExpected, metric, testutil.IgnoreTime()) + case "ravendb_collections": + testutil.RequireMetricEqual(t, collectionsExpected, metric, testutil.IgnoreTime()) + } + } +} + +// Test against minimum filled data +func TestRavenDBGeneratesMetricsMin(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var jsonFilePath string + + switch r.URL.Path { + case "/admin/monitoring/v1/databases": + jsonFilePath = "testdata/databases_min.json" + case "/admin/monitoring/v1/server": + jsonFilePath = "testdata/server_min.json" + case "/admin/monitoring/v1/indexes": + jsonFilePath = "testdata/indexes_min.json" + case "/admin/monitoring/v1/collections": + jsonFilePath = "testdata/collections_min.json" + default: + panic(fmt.Sprintf("Cannot handle request for uri %s", r.URL.Path)) + } + + data, err := ioutil.ReadFile(jsonFilePath) + + if err != nil { + panic(fmt.Sprintf("could not read from data file %s", jsonFilePath)) + } + + w.Write(data) + })) + defer ts.Close() + + r := &RavenDB{ + URL: ts.URL, + StatsInclude: []string{"server", "databases", "indexes", "collections"}, + Log: testutil.Logger{}, + } + + r.Init() + + acc := &testutil.Accumulator{} + + err := acc.GatherError(r.Gather) + require.NoError(t, err) + + serverFields := map[string]interface{}{ + "server_version": "5.1", + "server_full_version": "5.1.1-custom-51", + "uptime_in_sec": 30, + "server_process_id": 26360, + "config_server_urls": "http://127.0.0.1:8080", + "backup_max_number_of_concurrent_backups": 4, + "backup_current_number_of_running_backups": 2, + "cpu_process_usage": 6.28, + "cpu_machine_usage": 41.07, + "cpu_processor_count": 8, + "cpu_assigned_processor_count": 7, + "cpu_thread_pool_available_worker_threads": 32766, + "cpu_thread_pool_available_completion_port_threads": 1000, + "memory_allocated_in_mb": 235, + "memory_installed_in_mb": 16384, + "memory_physical_in_mb": 16250, + "memory_low_memory_severity": "Low", + "memory_total_swap_size_in_mb": 1024, + "memory_total_swap_usage_in_mb": 456, + "memory_working_set_swap_usage_in_mb": 89, + "memory_total_dirty_in_mb": 1, + "disk_system_store_used_data_file_size_in_mb": 28, + "disk_system_store_total_data_file_size_in_mb": 32, + "disk_total_free_space_in_mb": 52078, + "disk_remaining_storage_space_percentage": 22, + "license_type": "Enterprise", + "license_utilized_cpu_cores": 8, + "license_max_cores": 256, + "network_tcp_active_connections": 84, + "network_concurrent_requests_count": 1, + "network_total_requests": 3, + "network_requests_per_sec": 0.03322, + "cluster_node_state": "Leader", + "cluster_current_term": 28, + "cluster_index": 104, + "databases_total_count": 25, + "databases_loaded_count": 2, + } + + serverTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "cluster_id": "6b535a18-558f-4e53-a479-a514efc16aab", + } + + dbFields := map[string]interface{}{ + "uptime_in_sec": float64(1396), + "counts_documents": 425189, + "counts_revisions": 429605, + "counts_attachments": 17, + "counts_unique_attachments": 16, + "counts_alerts": 2, + "counts_rehabs": 3, + "counts_performance_hints": 5, + "counts_replication_factor": 2, + "statistics_doc_puts_per_sec": 23.4, + "statistics_map_index_indexes_per_sec": 82.5, + "statistics_map_reduce_index_mapped_per_sec": 50.3, + "statistics_map_reduce_index_reduced_per_sec": 85.2, + "statistics_requests_per_sec": 22.5, + "statistics_requests_count": 809, + "statistics_request_average_duration_in_ms": 0.55, + "indexes_count": 7, + "indexes_stale_count": 1, + "indexes_errors_count": 2, + "indexes_static_count": 7, + "indexes_auto_count": 3, + "indexes_idle_count": 4, + "indexes_disabled_count": 5, + "indexes_errored_count": 6, + "storage_documents_allocated_data_file_in_mb": 1024, + "storage_documents_used_data_file_in_mb": 942, + "storage_indexes_allocated_data_file_in_mb": 464, + "storage_indexes_used_data_file_in_mb": 278, + "storage_total_allocated_storage_file_in_mb": 1496, + "storage_total_free_space_in_mb": 52074, + } + + dbTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db2", + "database_id": "06eefe8b-d720-4a8d-a809-2c5af9a4abb5", + } + + indexFields := map[string]interface{}{ + "priority": "Normal", + "state": "Normal", + "errors": 0, + "lock_mode": "Unlock", + "is_invalid": false, + "status": "Running", + "mapped_per_sec": 102.34, + "reduced_per_sec": 593.23, + "type": "MapReduce", + } + + indexTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db1", + "index_name": "Product/Rating", + } + + collectionFields := map[string]interface{}{ + "documents_count": 830, + "total_size_in_bytes": 2744320, + "documents_size_in_bytes": 868352, + "tombstones_size_in_bytes": 122880, + "revisions_size_in_bytes": 1753088, + } + + collectionTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db1", + "collection_name": "Orders", + } + + defaultTime := time.Unix(0, 0) + + serverExpected := testutil.MustMetric("ravendb_server", serverTags, serverFields, defaultTime) + dbExpected := testutil.MustMetric("ravendb_databases", dbTags, dbFields, defaultTime) + indexExpected := testutil.MustMetric("ravendb_indexes", indexTags, indexFields, defaultTime) + collectionsExpected := testutil.MustMetric("ravendb_collections", collectionTags, collectionFields, defaultTime) + + for _, metric := range acc.GetTelegrafMetrics() { + switch metric.Name() { + case "ravendb_server": + testutil.RequireMetricEqual(t, serverExpected, metric, testutil.IgnoreTime()) + case "ravendb_databases": + testutil.RequireMetricEqual(t, dbExpected, metric, testutil.IgnoreTime()) + case "ravendb_indexes": + testutil.RequireMetricEqual(t, indexExpected, metric, testutil.IgnoreTime()) + case "ravendb_collections": + testutil.RequireMetricEqual(t, collectionsExpected, metric, testutil.IgnoreTime()) + } + } +} diff --git a/plugins/inputs/ravendb/testdata/collections_full.json b/plugins/inputs/ravendb/testdata/collections_full.json new file mode 100644 index 0000000000000..db91e90868d9b --- /dev/null +++ b/plugins/inputs/ravendb/testdata/collections_full.json @@ -0,0 +1,19 @@ +{ + "PublicServerUrl": "http://localhost:8080", + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db1", + "Collections": [ + { + "CollectionName": "Orders", + "DocumentsCount": 830, + "TotalSizeInBytes": 2744320, + "DocumentsSizeInBytes": 868352, + "TombstonesSizeInBytes": 122880, + "RevisionsSizeInBytes": 1753088 + } + ] + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/collections_min.json b/plugins/inputs/ravendb/testdata/collections_min.json new file mode 100644 index 0000000000000..edd636d21e202 --- /dev/null +++ b/plugins/inputs/ravendb/testdata/collections_min.json @@ -0,0 +1,19 @@ +{ + "PublicServerUrl": null, + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db1", + "Collections": [ + { + "CollectionName": "Orders", + "DocumentsCount": 830, + "TotalSizeInBytes": 2744320, + "DocumentsSizeInBytes": 868352, + "TombstonesSizeInBytes": 122880, + "RevisionsSizeInBytes": 1753088 + } + ] + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/databases_full.json b/plugins/inputs/ravendb/testdata/databases_full.json new file mode 100644 index 0000000000000..1c74568812575 --- /dev/null +++ b/plugins/inputs/ravendb/testdata/databases_full.json @@ -0,0 +1,49 @@ +{ + "PublicServerUrl": "http://myhost:8080", + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db2", + "DatabaseId": "06eefe8b-d720-4a8d-a809-2c5af9a4abb5", + "UptimeInSec": 1396, + "TimeSinceLastBackupInSec": 104.3, + "Counts": { + "Documents": 425189, + "Revisions": 429605, + "Attachments": 17, + "UniqueAttachments": 16, + "Alerts": 2, + "Rehabs": 3, + "PerformanceHints": 5, + "ReplicationFactor": 2 + }, + "Statistics": { + "DocPutsPerSec": 23.4, + "MapIndexIndexesPerSec": 82.5, + "MapReduceIndexMappedPerSec": 50.3, + "MapReduceIndexReducedPerSec": 85.2, + "RequestsPerSec": 22.5, + "RequestsCount": 809, + "RequestAverageDurationInMs": 0.55 + }, + "Indexes": { + "Count": 7, + "StaleCount": 1, + "ErrorsCount": 2, + "StaticCount": 7, + "AutoCount": 3, + "IdleCount": 4, + "DisabledCount": 5, + "ErroredCount": 6 + }, + "Storage": { + "DocumentsAllocatedDataFileInMb": 1024, + "DocumentsUsedDataFileInMb": 942, + "IndexesAllocatedDataFileInMb": 464, + "IndexesUsedDataFileInMb": 278, + "TotalAllocatedStorageFileInMb": 1496, + "TotalFreeSpaceInMb": 52074 + } + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/databases_min.json b/plugins/inputs/ravendb/testdata/databases_min.json new file mode 100644 index 0000000000000..48a1ccbb6b7ad --- /dev/null +++ b/plugins/inputs/ravendb/testdata/databases_min.json @@ -0,0 +1,49 @@ +{ + "PublicServerUrl": null, + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db2", + "DatabaseId": "06eefe8b-d720-4a8d-a809-2c5af9a4abb5", + "UptimeInSec": 1396, + "TimeSinceLastBackupInSec": null, + "Counts": { + "Documents": 425189, + "Revisions": 429605, + "Attachments": 17, + "UniqueAttachments": 16, + "Alerts": 2, + "Rehabs": 3, + "PerformanceHints": 5, + "ReplicationFactor": 2 + }, + "Statistics": { + "DocPutsPerSec": 23.4, + "MapIndexIndexesPerSec": 82.5, + "MapReduceIndexMappedPerSec": 50.3, + "MapReduceIndexReducedPerSec": 85.2, + "RequestsPerSec": 22.5, + "RequestsCount": 809, + "RequestAverageDurationInMs": 0.55 + }, + "Indexes": { + "Count": 7, + "StaleCount": 1, + "ErrorsCount": 2, + "StaticCount": 7, + "AutoCount": 3, + "IdleCount": 4, + "DisabledCount": 5, + "ErroredCount": 6 + }, + "Storage": { + "DocumentsAllocatedDataFileInMb": 1024, + "DocumentsUsedDataFileInMb": 942, + "IndexesAllocatedDataFileInMb": 464, + "IndexesUsedDataFileInMb": 278, + "TotalAllocatedStorageFileInMb": 1496, + "TotalFreeSpaceInMb": 52074 + } + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/indexes_full.json b/plugins/inputs/ravendb/testdata/indexes_full.json new file mode 100644 index 0000000000000..d67ded7d18800 --- /dev/null +++ b/plugins/inputs/ravendb/testdata/indexes_full.json @@ -0,0 +1,25 @@ +{ + "PublicServerUrl": "http://localhost:8080", + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db1", + "Indexes": [ + { + "IndexName": "Product/Rating", + "Priority": "Normal", + "State": "Normal", + "Errors": 0, + "TimeSinceLastQueryInSec": 3.4712567, + "TimeSinceLastIndexingInSec": 3.4642612, + "LockMode": "Unlock", + "IsInvalid": true, + "Status": "Running", + "MappedPerSec": 102.34, + "ReducedPerSec": 593.23, + "Type": "MapReduce" + } + ] + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/indexes_min.json b/plugins/inputs/ravendb/testdata/indexes_min.json new file mode 100644 index 0000000000000..493bda8b7e799 --- /dev/null +++ b/plugins/inputs/ravendb/testdata/indexes_min.json @@ -0,0 +1,25 @@ +{ + "PublicServerUrl": null, + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db1", + "Indexes": [ + { + "IndexName": "Product/Rating", + "Priority": "Normal", + "State": "Normal", + "Errors": 0, + "TimeSinceLastQueryInSec": null, + "TimeSinceLastIndexingInSec": null, + "LockMode": "Unlock", + "IsInvalid": false, + "Status": "Running", + "MappedPerSec": 102.34, + "ReducedPerSec": 593.23, + "Type": "MapReduce" + } + ] + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/server_full.json b/plugins/inputs/ravendb/testdata/server_full.json new file mode 100644 index 0000000000000..edfbbbf7940dc --- /dev/null +++ b/plugins/inputs/ravendb/testdata/server_full.json @@ -0,0 +1,73 @@ +{ + "ServerVersion": "5.1", + "ServerFullVersion": "5.1.1-custom-51", + "UpTimeInSec": 30, + "ServerProcessId": 26360, + "Config": { + "ServerUrls": [ + "http://127.0.0.1:8080", + "http://192.168.0.1:8080" + ], + "PublicServerUrl": "http://raven1:8080", + "TcpServerUrls": ["tcp://127.0.0.1:3888", "tcp://192.168.0.1:3888"], + "PublicTcpServerUrls": ["tcp://2.3.4.5:3888", "tcp://6.7.8.9:3888"] + }, + "Backup": { + "CurrentNumberOfRunningBackups": 2, + "MaxNumberOfConcurrentBackups": 4 + }, + "Cpu": { + "ProcessUsage": 6.28, + "MachineUsage": 41.05, + "MachineIoWait": 2.55, + "ProcessorCount": 8, + "AssignedProcessorCount": 7, + "ThreadPoolAvailableWorkerThreads": 32766, + "ThreadPoolAvailableCompletionPortThreads": 1000 + }, + "Memory": { + "AllocatedMemoryInMb": 235, + "PhysicalMemoryInMb": 16250, + "InstalledMemoryInMb": 16384, + "LowMemorySeverity": "None", + "TotalSwapSizeInMb": 1024, + "TotalSwapUsageInMb": 456, + "WorkingSetSwapUsageInMb": 89, + "TotalDirtyInMb": 1 + }, + "Disk": { + "SystemStoreUsedDataFileSizeInMb": 28, + "SystemStoreTotalDataFileSizeInMb": 32, + "TotalFreeSpaceInMb": 52078, + "RemainingStorageSpacePercentage": 22 + }, + "License": { + "Type": "Enterprise", + "ExpirationLeftInSec": 25466947.5, + "UtilizedCpuCores": 8, + "MaxCores": 256 + }, + "Network": { + "TcpActiveConnections": 84, + "ConcurrentRequestsCount": 1, + "TotalRequests": 3, + "RequestsPerSec": 0.03322, + "LastRequestTimeInSec": 0.0264977, + "LastAuthorizedNonClusterAdminRequestTimeInSec": 0.04 + }, + "Certificate": { + "ServerCertificateExpirationLeftInSec": 104, + "WellKnownAdminCertificates": ["a909502dd82ae41433e6f83886b00d4277a32a7b", "4444444444444444444444444444444444444444"] + }, + "Cluster": { + "NodeTag": "A", + "NodeState": "Leader", + "CurrentTerm": 28, + "Index": 104, + "Id": "6b535a18-558f-4e53-a479-a514efc16aab" + }, + "Databases": { + "TotalCount": 25, + "LoadedCount": 2 + } +} diff --git a/plugins/inputs/ravendb/testdata/server_min.json b/plugins/inputs/ravendb/testdata/server_min.json new file mode 100644 index 0000000000000..e22bd03d4460d --- /dev/null +++ b/plugins/inputs/ravendb/testdata/server_min.json @@ -0,0 +1,72 @@ +{ + "ServerVersion": "5.1", + "ServerFullVersion": "5.1.1-custom-51", + "UpTimeInSec": 30, + "ServerProcessId": 26360, + "Config": { + "ServerUrls": [ + "http://127.0.0.1:8080" + ], + "PublicServerUrl": null, + "TcpServerUrls": null, + "PublicTcpServerUrls": null + }, + "Backup": { + "CurrentNumberOfRunningBackups": 2, + "MaxNumberOfConcurrentBackups": 4 + }, + "Cpu": { + "ProcessUsage": 6.28, + "MachineUsage": 41.07, + "MachineIoWait": null, + "ProcessorCount": 8, + "AssignedProcessorCount": 7, + "ThreadPoolAvailableWorkerThreads": 32766, + "ThreadPoolAvailableCompletionPortThreads": 1000 + }, + "Memory": { + "AllocatedMemoryInMb": 235, + "PhysicalMemoryInMb": 16250, + "InstalledMemoryInMb": 16384, + "LowMemorySeverity": "Low", + "TotalSwapSizeInMb": 1024, + "TotalSwapUsageInMb": 456, + "WorkingSetSwapUsageInMb": 89, + "TotalDirtyInMb": 1 + }, + "Disk": { + "SystemStoreUsedDataFileSizeInMb": 28, + "SystemStoreTotalDataFileSizeInMb": 32, + "TotalFreeSpaceInMb": 52078, + "RemainingStorageSpacePercentage": 22 + }, + "License": { + "Type": "Enterprise", + "ExpirationLeftInSec": null, + "UtilizedCpuCores": 8, + "MaxCores": 256 + }, + "Network": { + "TcpActiveConnections": 84, + "ConcurrentRequestsCount": 1, + "TotalRequests": 3, + "RequestsPerSec": 0.03322, + "LastRequestTimeInSec": null, + "LastAuthorizedNonClusterAdminRequestTimeInSec": null + }, + "Certificate": { + "ServerCertificateExpirationLeftInSec": null, + "WellKnownAdminCertificates": null + }, + "Cluster": { + "NodeTag": "A", + "NodeState": "Leader", + "CurrentTerm": 28, + "Index": 104, + "Id": "6b535a18-558f-4e53-a479-a514efc16aab" + }, + "Databases": { + "TotalCount": 25, + "LoadedCount": 2 + } +} From 17efd172b70a20a8925182e00c1fdee1512b7be0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 3 Mar 2021 20:56:31 +0100 Subject: [PATCH 259/761] Clearing LGTM alerts and warnings (#8923) --- .lgtm.yml | 2 ++ README.md | 2 +- plugins/inputs/github/github.go | 10 +++++----- plugins/inputs/jolokia2/client.go | 17 +++++++++-------- .../inputs/riemann_listener/riemann_listener.go | 3 +-- plugins/outputs/http/http.go | 3 +++ .../yandex_cloud_monitoring.go | 1 - plugins/parsers/prometheus/parser.go | 3 +-- 8 files changed, 22 insertions(+), 19 deletions(-) create mode 100644 .lgtm.yml diff --git a/.lgtm.yml b/.lgtm.yml new file mode 100644 index 0000000000000..5b0b2e3367a14 --- /dev/null +++ b/.lgtm.yml @@ -0,0 +1,2 @@ +queries: + - exclude: go/disabled-certificate-check diff --git a/README.md b/README.md index 2e88d83bb7415..b1cf1ecf4b7fa 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) +# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) [![Total alerts](https://img.shields.io/lgtm/alerts/g/influxdata/telegraf.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/influxdata/telegraf/alerts/) [![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack) Telegraf is an agent for collecting, processing, aggregating, and writing metrics. diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go index e9b48bc389709..c7e3888f9c4e6 100644 --- a/plugins/inputs/github/github.go +++ b/plugins/inputs/github/github.go @@ -146,22 +146,22 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error { fields := getFields(repositoryInfo) for _, field := range g.AdditionalFields { - addFields := make(map[string]interface{}) switch field { case "pull-requests": // Pull request properties - addFields, err = g.getPullRequestFields(ctx, owner, repository) + addFields, err := g.getPullRequestFields(ctx, owner, repository) if err != nil { acc.AddError(err) continue } + + for k, v := range addFields { + fields[k] = v + } default: acc.AddError(fmt.Errorf("unknown additional field %q", field)) continue } - for k, v := range addFields { - fields[k] = v - } } acc.AddFields("github_repository", fields, tags, now) diff --git a/plugins/inputs/jolokia2/client.go b/plugins/inputs/jolokia2/client.go index 1cde65bcbe513..41ebd4f8af872 100644 --- a/plugins/inputs/jolokia2/client.go +++ b/plugins/inputs/jolokia2/client.go @@ -119,8 +119,8 @@ func NewClient(url string, config *ClientConfig) (*Client, error) { } func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { - jrequests := makeJolokiaRequests(requests, c.config.ProxyConfig) - requestBody, err := json.Marshal(jrequests) + jRequests := makeJolokiaRequests(requests, c.config.ProxyConfig) + requestBody, err := json.Marshal(jRequests) if err != nil { return nil, err } @@ -132,7 +132,8 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(requestBody)) if err != nil { - return nil, fmt.Errorf("unable to create new request '%s': %s", requestURL, err) + //err is not contained in returned error - it may contain sensitive data (password) which should not be logged + return nil, fmt.Errorf("unable to create new request for: '%s'", c.URL) } req.Header.Add("Content-type", "application/json") @@ -144,7 +145,7 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + return nil, fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", c.URL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) } @@ -153,12 +154,12 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { return nil, err } - var jresponses []jolokiaResponse - if err = json.Unmarshal([]byte(responseBody), &jresponses); err != nil { - return nil, fmt.Errorf("Error decoding JSON response: %s: %s", err, responseBody) + var jResponses []jolokiaResponse + if err = json.Unmarshal(responseBody, &jResponses); err != nil { + return nil, fmt.Errorf("decoding JSON response: %s: %s", err, responseBody) } - return makeReadResponses(jresponses), nil + return makeReadResponses(jResponses), nil } func makeJolokiaRequests(rrequests []ReadRequest, proxyConfig *ProxyConfig) []jolokiaRequest { diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index 45d1ef4db27f2..50ef6a9a880e6 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -170,8 +170,7 @@ func (rsl *riemannListener) read(conn net.Conn) { for { if rsl.ReadTimeout != nil && rsl.ReadTimeout.Duration > 0 { - - err = conn.SetDeadline(time.Now().Add(rsl.ReadTimeout.Duration)) + conn.SetDeadline(time.Now().Add(rsl.ReadTimeout.Duration)) } messagePb := &riemangoProto.Msg{} diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 68e0a135a9a3b..53d51f3e7c0e1 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -218,6 +218,9 @@ func (h *HTTP) write(reqBody []byte) error { if resp.StatusCode < 200 || resp.StatusCode >= 300 { return fmt.Errorf("when writing to [%s] received status code: %d", h.URL, resp.StatusCode) } + if err != nil { + return fmt.Errorf("when writing to [%s] received error: %v", h.URL, err) + } return nil } diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go index c51a7f3b246bf..95b0bda0f44ea 100644 --- a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go @@ -156,7 +156,6 @@ func (a *YandexCloudMonitoring) Write(metrics []telegraf.Metric) error { if err != nil { return err } - body = append(body, jsonBytes...) body = append(jsonBytes, '\n') return a.send(body) } diff --git a/plugins/parsers/prometheus/parser.go b/plugins/parsers/prometheus/parser.go index 14f0eef90ca40..bf5ebf7f12f19 100644 --- a/plugins/parsers/prometheus/parser.go +++ b/plugins/parsers/prometheus/parser.go @@ -76,8 +76,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { } else { // standard metric // reading fields - fields := make(map[string]interface{}) - fields = getNameAndValue(m, metricName) + fields := getNameAndValue(m, metricName) // converting to telegraf metric if len(fields) > 0 { t := getTimestamp(m, now) From 431d06acc0dc6b3a6bdc58ef06a37fef5698d1a7 Mon Sep 17 00:00:00 2001 From: reimda Date: Wed, 3 Mar 2021 13:26:09 -0700 Subject: [PATCH 260/761] Add XML parser using XPath queries (#8931) --- config/config.go | 34 +- docs/DATA_FORMATS_INPUT.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 2 + go.mod | 10 +- go.sum | 34 +- plugins/parsers/registry.go | 36 + plugins/parsers/xml/README.md | 345 +++++ plugins/parsers/xml/parser.go | 422 ++++++ plugins/parsers/xml/parser_test.go | 1175 +++++++++++++++++ plugins/parsers/xml/testcases/multisensor.xml | 31 + .../testcases/multisensor_explicit_basic.conf | 17 + .../testcases/multisensor_explicit_batch.conf | 28 + .../multisensor_selection_batch.conf | 23 + .../parsers/xml/testcases/openweathermap.conf | 28 + .../xml/testcases/openweathermap_5d.xml | 38 + testutil/file.go | 84 ++ 16 files changed, 2298 insertions(+), 10 deletions(-) create mode 100644 plugins/parsers/xml/README.md create mode 100644 plugins/parsers/xml/parser.go create mode 100644 plugins/parsers/xml/parser_test.go create mode 100644 plugins/parsers/xml/testcases/multisensor.xml create mode 100644 plugins/parsers/xml/testcases/multisensor_explicit_basic.conf create mode 100644 plugins/parsers/xml/testcases/multisensor_explicit_batch.conf create mode 100644 plugins/parsers/xml/testcases/multisensor_selection_batch.conf create mode 100644 plugins/parsers/xml/testcases/openweathermap.conf create mode 100644 plugins/parsers/xml/testcases/openweathermap_5d.xml create mode 100644 testutil/file.go diff --git a/config/config.go b/config/config.go index 560d8a5cf85a6..58483428adb36 100644 --- a/config/config.go +++ b/config/config.go @@ -1268,7 +1268,14 @@ func (c *Config) buildParser(name string, tbl *ast.Table) (parsers.Parser, error if err != nil { return nil, err } - return parsers.NewParser(config) + parser, err := parsers.NewParser(config) + if err != nil { + return nil, err + } + logger := models.NewLogger("parsers", config.DataFormat, name) + models.SetLoggerOnPlugin(parser, logger) + + return parser, nil } func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { @@ -1335,6 +1342,28 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, c.getFieldStringSlice(tbl, "form_urlencoded_tag_keys", &pc.FormUrlencodedTagKeys) + //for XML parser + if node, ok := tbl.Fields["xml"]; ok { + if subtbls, ok := node.([]*ast.Table); ok { + pc.XMLConfig = make([]parsers.XMLConfig, len(subtbls)) + for i, subtbl := range subtbls { + subcfg := pc.XMLConfig[i] + c.getFieldString(subtbl, "metric_name", &subcfg.MetricQuery) + c.getFieldString(subtbl, "metric_selection", &subcfg.Selection) + c.getFieldString(subtbl, "timestamp", &subcfg.Timestamp) + c.getFieldString(subtbl, "timestamp_format", &subcfg.TimestampFmt) + c.getFieldStringMap(subtbl, "tags", &subcfg.Tags) + c.getFieldStringMap(subtbl, "fields", &subcfg.Fields) + c.getFieldStringMap(subtbl, "fields_int", &subcfg.FieldsInt) + c.getFieldString(subtbl, "field_selection", &subcfg.FieldSelection) + c.getFieldBool(subtbl, "field_name_expansion", &subcfg.FieldNameExpand) + c.getFieldString(subtbl, "field_name", &subcfg.FieldNameQuery) + c.getFieldString(subtbl, "field_value", &subcfg.FieldValueQuery) + pc.XMLConfig[i] = subcfg + } + } + } + pc.MetricName = name if c.hasErrs() { @@ -1439,7 +1468,7 @@ func (c *Config) missingTomlField(typ reflect.Type, key string) error { "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", "separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys", "tagdrop", "tagexclude", "taginclude", "tagpass", "tags", "template", "templates", - "wavefront_source_override", "wavefront_use_strict": + "wavefront_source_override", "wavefront_use_strict", "xml": // ignore fields that are common to all plugins. default: @@ -1545,6 +1574,7 @@ func (c *Config) getFieldStringSlice(tbl *ast.Table, fieldName string, target *[ } } } + func (c *Config) getFieldTagFilter(tbl *ast.Table, fieldName string, target *[]models.TagFilter) { if node, ok := tbl.Fields[fieldName]; ok { if subtbl, ok := node.(*ast.Table); ok { diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index dbcb283a10cc8..3e7dd107becf5 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -17,6 +17,7 @@ Protocol or in JSON format. - [Prometheus](/plugins/parsers/prometheus) - [Value](/plugins/parsers/value), ie: 45 or "booyah" - [Wavefront](/plugins/parsers/wavefront) +- [XML](/plugins/parsers/xml) Any input plugin containing the `data_format` option can use it to select the desired parser: diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index ee4cbb665870a..66e19e2b49683 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -21,6 +21,8 @@ following works: - github.com/aerospike/aerospike-client-go [Apache License 2.0](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE) - github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING) - github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE) +- github.com/antchfx/xmlquery [MIT License](https://github.com/antchfx/xmlquery/blob/master/LICENSE) +- github.com/antchfx/xpath [MIT License](https://github.com/antchfx/xpath/blob/master/LICENSE) - github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE) - github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE) - github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING) diff --git a/go.mod b/go.mod index d8e19b95a8cc1..fcc1fc6cc662f 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,8 @@ require ( github.com/aerospike/aerospike-client-go v1.27.0 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 + github.com/antchfx/xmlquery v1.3.3 + github.com/antchfx/xpath v1.1.11 github.com/apache/thrift v0.12.0 github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 @@ -118,7 +120,7 @@ require ( github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/sensu/sensu-go/api/core/v2 v2.6.0 - github.com/shirou/gopsutil v2.20.9+incompatible + github.com/shirou/gopsutil v3.20.11+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/signalfx/golib/v3 v3.3.0 github.com/sirupsen/logrus v1.6.0 @@ -141,11 +143,11 @@ require ( go.starlark.net v0.0.0-20200901195727-6e684ef5eeee golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - golang.org/x/net v0.0.0-20201021035429-f5854403a974 + golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 - golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f - golang.org/x/text v0.3.3 + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 + golang.org/x/text v0.3.4 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.20.0 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 diff --git a/go.sum b/go.sum index 7085775b0f186..b51f150f6182e 100644 --- a/go.sum +++ b/go.sum @@ -101,6 +101,12 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1C github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= +github.com/antchfx/xmlquery v1.3.3 h1:HYmadPG0uz8CySdL68rB4DCLKXz2PurCjS3mnkVF4CQ= +github.com/antchfx/xmlquery v1.3.3/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= +github.com/antchfx/xpath v1.1.10 h1:cJ0pOvEdN/WvYXxvRrzQH9x5QWKpzHacYO8qzCcDYAg= +github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= +github.com/antchfx/xpath v1.1.11 h1:WOFtK8TVAjLm3lbgqeP0arlHpvCEeTANeWZ/csPpJkQ= +github.com/antchfx/xpath v1.1.11/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -406,10 +412,10 @@ github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGU github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q= github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPIc28Jel37LGREut2fpV+ObkwJ0= -github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQDsAXYfUuF/Z0rtK5eT8x9D6Pi7S3PjXAg= github.com/jaegertracing/jaeger v1.15.1 h1:7QzNAXq+4ko9GtCjozDNAp2uonoABu+B2Rk94hjQcp4= github.com/jaegertracing/jaeger v1.15.1/go.mod h1:LUWPSnzNPGRubM8pk0inANGitpiMOOxihXx0+53llXI= +github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPIc28Jel37LGREut2fpV+ObkwJ0= +github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQDsAXYfUuF/Z0rtK5eT8x9D6Pi7S3PjXAg= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -617,8 +623,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/sensu/sensu-go/api/core/v2 v2.6.0 h1:hEKPHFZZNDuWTlKr7Kgm2yog65ZdkBUqNesE5qaWEGo= github.com/sensu/sensu-go/api/core/v2 v2.6.0/go.mod h1:97IK4ZQuvVjWvvoLkp+NgrD6ot30WDRz3LEbFUc/N34= github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v2.20.9+incompatible h1:msXs2frUV+O/JLva9EDLpuJ84PrFsdCTCQex8PUdtkQ= -github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.20.11+incompatible h1:LJr4ZQK4mPpIV5gOa4jCOKOGb4ty4DZO54I4FGqIpto= +github.com/shirou/gopsutil v3.20.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -785,10 +791,21 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc h1:zK/HqS5bZxDptfPJNq8v7vJfXtkU7r9TLIoSr1bXaP4= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 h1:lwlPPsmjDKK0J6eG6xDWd5XPehI0R024zxjDnw3esPA= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -834,12 +851,18 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -847,6 +870,9 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 54edf3300b612..44cde6c85b7ba 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -17,6 +17,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/prometheus" "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/plugins/parsers/wavefront" + "github.com/influxdata/telegraf/plugins/parsers/xml" ) type ParserFunc func() (Parser, error) @@ -150,6 +151,13 @@ type Config struct { // FormData configuration FormUrlencodedTagKeys []string `toml:"form_urlencoded_tag_keys"` + + // XML configuration + XMLConfig []XMLConfig `toml:"xml"` +} + +type XMLConfig struct { + xml.Config } // NewParser returns a Parser interface based on the given config. @@ -237,6 +245,8 @@ func NewParser(config *Config) (Parser, error) { ) case "prometheus": parser, err = NewPrometheusParser(config.DefaultTags) + case "xml": + parser, err = NewXMLParser(config.MetricName, config.DefaultTags, config.XMLConfig) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } @@ -350,3 +360,29 @@ func NewPrometheusParser(defaultTags map[string]string) (Parser, error) { DefaultTags: defaultTags, }, nil } + +func NewXMLParser(metricName string, defaultTags map[string]string, xmlConfigs []XMLConfig) (Parser, error) { + // Convert the config formats which is a one-to-one copy + configs := make([]xml.Config, len(xmlConfigs)) + for i, cfg := range xmlConfigs { + configs[i].MetricName = metricName + configs[i].MetricQuery = cfg.MetricQuery + configs[i].Selection = cfg.Selection + configs[i].Timestamp = cfg.Timestamp + configs[i].TimestampFmt = cfg.TimestampFmt + configs[i].Tags = cfg.Tags + configs[i].Fields = cfg.Fields + configs[i].FieldsInt = cfg.FieldsInt + + configs[i].FieldSelection = cfg.FieldSelection + configs[i].FieldNameQuery = cfg.FieldNameQuery + configs[i].FieldValueQuery = cfg.FieldValueQuery + + configs[i].FieldNameExpand = cfg.FieldNameExpand + } + + return &xml.Parser{ + Configs: configs, + DefaultTags: defaultTags, + }, nil +} diff --git a/plugins/parsers/xml/README.md b/plugins/parsers/xml/README.md new file mode 100644 index 0000000000000..93b150703c1bc --- /dev/null +++ b/plugins/parsers/xml/README.md @@ -0,0 +1,345 @@ +# XML + +The XML data format parser parses a [XML][xml] string into metric fields using [XPath][xpath] expressions. For supported +XPath functions check [the underlying XPath library][xpath lib]. + +**NOTE:** The type of fields are specified using [XPath functions][xpath lib]. The only exception are *integer* fields +that need to be specified in a `fields_int` section. + +### Configuration + +```toml +[[inputs.file]] + files = ["example.xml"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "xml" + + ## Multiple parsing sections are allowed + [[inputs.file.xml]] + ## Optional: XPath-query to select a subset of nodes from the XML document. + #metric_selection = "/Bus/child::Sensor" + + ## Optional: XPath-query to set the metric (measurement) name. + #metric_name = "string('example')" + + ## Optional: Query to extract metric timestamp. + ## If not specified the time of execution is used. + #timestamp = "/Gateway/Timestamp" + ## Optional: Format of the timestamp determined by the query above. + ## This can be any of "unix", "unix_ms", "unix_us", "unix_ns" or a valid Golang + ## time format. If not specified, a "unix" timestamp (in seconds) is expected. + #timestamp_format = "2006-01-02T15:04:05Z" + + ## Tag definitions using the given XPath queries. + [inputs.file.xml.tags] + name = "substring-after(Sensor/@name, ' ')" + device = "string('the ultimate sensor')" + + ## Integer field definitions using XPath queries. + [inputs.file.xml.fields_int] + consumers = "Variable/@consumers" + + ## Non-integer field definitions using XPath queries. + ## The field type is defined using XPath expressions such as number(), boolean() or string(). If no conversion is performed the field will be of type string. + [inputs.file.xml.fields] + temperature = "number(Variable/@temperature)" + power = "number(Variable/@power)" + frequency = "number(Variable/@frequency)" + ok = "Mode != 'ok'" +``` + +A configuration can contain muliple *xml* subsections for e.g. the file plugin to process the xml-string multiple times. +Consult the [XPath syntax][xpath] and the [underlying library's functions][xpath lib] for details and help regarding XPath queries. + +Alternatively to the configuration above, fields can also be specified in a batch way. So contrary to specify the fields +in a section, you can define a `name` and a `value` selector used to determine the name and value of the fields in the +metric. +```toml +[[inputs.file]] + files = ["example.xml"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "xml" + + ## Multiple parsing sections are allowed + [[inputs.file.xml]] + ## Optional: XPath-query to select a subset of nodes from the XML document. + metric_selection = "/Bus/child::Sensor" + + ## Optional: XPath-query to set the metric (measurement) name. + #metric_name = "string('example')" + + ## Optional: Query to extract metric timestamp. + ## If not specified the time of execution is used. + #timestamp = "/Gateway/Timestamp" + ## Optional: Format of the timestamp determined by the query above. + ## This can be any of "unix", "unix_ms", "unix_us", "unix_ns" or a valid Golang + ## time format. If not specified, a "unix" timestamp (in seconds) is expected. + #timestamp_format = "2006-01-02T15:04:05Z" + + ## Field specifications using a selector. + field_selection = "child::*" + ## Optional: Queries to specify field name and value. + ## These options are only to be used in combination with 'field_selection'! + ## By default the node name and node content is used if a field-selection + ## is specified. + #field_name = "name()" + #field_value = "." + + ## Optional: Expand field names relative to the selected node + ## This allows to flatten out nodes with non-unique names in the subtree + #field_name_expansion = false + + ## Tag definitions using the given XPath queries. + [inputs.file.xml.tags] + name = "substring-after(Sensor/@name, ' ')" + device = "string('the ultimate sensor')" + +``` +*Please note*: The resulting fields are _always_ of type string! + +It is also possible to specify a mixture of the two alternative ways of specifying fields. + +#### metric_selection (optional) + +You can specify a [XPath][xpath] query to select a subset of nodes from the XML document, each used to generate a new +metrics with the specified fields, tags etc. + +For relative queries in subsequent queries they are relative to the `metric_selection`. To specify absolute paths, please start the query with a slash (`/`). + +Specifying `metric_selection` is optional. If not specified all relative queries are relative to the root node of the XML document. + +#### metric_name (optional) + +By specifying `metric_name` you can override the metric/measurement name with the result of the given [XPath][xpath] query. If not specified, the default metric name is used. + +#### timestamp, timestamp_format (optional) + +By default the current time will be used for all created metrics. To set the time from values in the XML document you can specify a [XPath][xpath] query in `timestamp` and set the format in `timestamp_format`. + +The `timestamp_format` can be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or +an accepted [Go "reference time"][time const]. Consult the Go [time][time parse] package for details and additional examples on how to set the time format. +If `timestamp_format` is omitted `unix` format is assumed as result of the `timestamp` query. + +#### tags sub-section + +[XPath][xpath] queries in the `tag name = query` format to add tags to the metrics. The specified path can be absolute (starting with `/`) or relative. Relative paths use the currently selected node as reference. + +**NOTE:** Results of tag-queries will always be converted to strings. + +#### fields_int sub-section + +[XPath][xpath] queries in the `field name = query` format to add integer typed fields to the metrics. The specified path can be absolute (starting with `/`) or relative. Relative paths use the currently selected node as reference. + +**NOTE:** Results of field_int-queries will always be converted to **int64**. The conversion will fail in case the query result is not convertible! + +#### fields sub-section + +[XPath][xpath] queries in the `field name = query` format to add non-integer fields to the metrics. The specified path can be absolute (starting with `/`) or relative. Relative paths use the currently selected node as reference. + +The type of the field is specified in the [XPath][xpath] query using the type conversion functions of XPath such as `number()`, `boolean()` or `string()` +If no conversion is performed in the query the field will be of type string. + +**NOTE: Path conversion functions will always succeed even if you convert a text to float!** + + +#### field_selection, field_name, field_value (optional) + +You can specify a [XPath][xpath] query to select a set of nodes forming the fields of the metric. The specified path can be absolute (starting with `/`) or relative to the currently selected node. Each node selected by `field_selection` forms a new field within the metric. + +The *name* and the *value* of each field can be specified using the optional `field_name` and `field_value` queries. The queries are relative to the selected field if not starting with `/`. If not specified the field's *name* defaults to the node name and the field's *value* defaults to the content of the selected field node. +**NOTE**: `field_name` and `field_value` queries are only evaluated if a `field_selection` is specified. + +Specifying `field_selection` is optional. This is an alternative way to specify fields especially for documents where the node names are not known a priori or if there is a large number of fields to be specified. These options can also be combined with the field specifications above. + +**NOTE: Path conversion functions will always succeed even if you convert a text to float!** + +#### field_name_expansion (optional) + +When *true*, field names selected with `field_selection` are expanded to a *path* relative to the *selected node*. This +is necessary if we e.g. select all leaf nodes as fields and those leaf nodes do not have unique names. That is in case +you have duplicate names in the fields you select you should set this to `true`. + +### Examples + +This `example.xml` file is used in the configuration examples below: +```xml + + + Main Gateway + 2020-08-01T15:04:03Z + 12 + ok + + + + + + + + + busy + + + + + + + standby + + + + + + + error + + +``` + +#### Basic Parsing + +This example shows the basic usage of the xml parser. + +Config: +```toml +[[inputs.file]] + files = ["example.xml"] + data_format = "xml" + + [[inputs.file.xml]] + [inputs.file.xml.tags] + gateway = "substring-before(/Gateway/Name, ' ')" + + [inputs.file.xml.fields_int] + seqnr = "/Gateway/Sequence" + + [inputs.file.xml.fields] + ok = "/Gateway/Status = 'ok'" +``` + +Output: +``` +file,gateway=Main,host=Hugin seqnr=12i,ok=true 1598610830000000000 +``` + +In the *tags* definition the XPath function `substring-before()` is used to only extract the sub-string before the space. To get the integer value of `/Gateway/Sequence` we have to use the *fields_int* section as there is no XPath expression to convert node values to integers (only float). +The `ok` field is filled with a boolean by specifying a query comparing the query result of `/Gateway/Status` with the string *ok*. Use the type conversions available in the XPath syntax to specify field types. + +#### Time and metric names + +This is an example for using time and name of the metric from the XML document itself. + +Config: +```toml +[[inputs.file]] + files = ["example.xml"] + data_format = "xml" + + [[inputs.file.xml]] + metric_name = "name(/Gateway/Status)" + + timestamp = "/Gateway/Timestamp" + timestamp_format = "2006-01-02T15:04:05Z" + + [inputs.file.xml.tags] + gateway = "substring-before(/Gateway/Name, ' ')" + + [inputs.file.xml.fields] + ok = "/Gateway/Status = 'ok'" +``` + +Output: +``` +Status,gateway=Main,host=Hugin ok=true 1596294243000000000 +``` +Additionally to the basic parsing example, the metric name is defined as the name of the `/Gateway/Status` node and the timestamp is derived from the XML document instead of using the execution time. + +#### Multi-node selection + +For XML documents containing metrics for e.g. multiple devices (like `Sensor`s in the *example.xml*), multiple metrics can be generated using node selection. This example shows how to generate a metric for each *Sensor* in the example. + +Config: +```toml +[[inputs.file]] + files = ["example.xml"] + data_format = "xml" + + [[inputs.file.xml]] + metric_selection = "/Bus/child::Sensor" + + metric_name = "string('sensors')" + + timestamp = "/Gateway/Timestamp" + timestamp_format = "2006-01-02T15:04:05Z" + + [inputs.file.xml.tags] + name = "substring-after(@name, ' ')" + + [inputs.file.xml.fields_int] + consumers = "Variable/@consumers" + + [inputs.file.xml.fields] + temperature = "number(Variable/@temperature)" + power = "number(Variable/@power)" + frequency = "number(Variable/@frequency)" + ok = "Mode != 'error'" + +``` + +Output: +``` +sensors,host=Hugin,name=Facility\ A consumers=3i,frequency=49.78,ok=true,power=123.4,temperature=20 1596294243000000000 +sensors,host=Hugin,name=Facility\ B consumers=1i,frequency=49.78,ok=true,power=14.3,temperature=23.1 1596294243000000000 +sensors,host=Hugin,name=Facility\ C consumers=0i,frequency=49.78,ok=false,power=0.02,temperature=19.7 1596294243000000000 +``` + +Using the `metric_selection` option we select all `Sensor` nodes in the XML document. Please note that all field and tag definitions are relative to these selected nodes. An exception is the timestamp definition which is relative to the root node of the XML document. + +#### Batch field processing with multi-node selection + +For XML documents containing metrics with a large number of fields or where the fields are not known before (e.g. an unknown set of `Variable` nodes in the *example.xml*), field selectors can be used. This example shows how to generate a metric for each *Sensor* in the example with fields derived from the *Variable* nodes. + +Config: +```toml +[[inputs.file]] + files = ["example.xml"] + data_format = "xml" + + [[inputs.file.xml]] + metric_selection = "/Bus/child::Sensor" + metric_name = "string('sensors')" + + timestamp = "/Gateway/Timestamp" + timestamp_format = "2006-01-02T15:04:05Z" + + field_selection = "child::Variable" + field_name = "name(@*[1])" + field_value = "number(@*[1])" + + [inputs.file.xml.tags] + name = "substring-after(@name, ' ')" +``` + +Output: +``` +sensors,host=Hugin,name=Facility\ A consumers=3,frequency=49.78,power=123.4,temperature=20 1596294243000000000 +sensors,host=Hugin,name=Facility\ B consumers=1,frequency=49.78,power=14.3,temperature=23.1 1596294243000000000 +sensors,host=Hugin,name=Facility\ C consumers=0,frequency=49.78,power=0.02,temperature=19.7 1596294243000000000 +``` + +Using the `metric_selection` option we select all `Sensor` nodes in the XML document. For each *Sensor* we then use `field_selection` to select all child nodes of the sensor as *field-nodes* Please note that the field selection is relative to the selected nodes. +For each selected *field-node* we use `field_name` and `field_value` to determining the field's name and value, respectively. The `field_name` derives the name of the first attribute of the node, while `field_value` derives the value of the first attribute and converts the result to a number. + +[xpath lib]: https://github.com/antchfx/xpath +[xml]: https://www.w3.org/XML/ +[xpath]: https://www.w3.org/TR/xpath/ +[time const]: https://golang.org/pkg/time/#pkg-constants +[time parse]: https://golang.org/pkg/time/#Parse diff --git a/plugins/parsers/xml/parser.go b/plugins/parsers/xml/parser.go new file mode 100644 index 0000000000000..bbe99286bafdd --- /dev/null +++ b/plugins/parsers/xml/parser.go @@ -0,0 +1,422 @@ +package xml + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/antchfx/xmlquery" + "github.com/antchfx/xpath" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +type Parser struct { + Configs []Config + DefaultTags map[string]string + Log telegraf.Logger +} + +type Config struct { + MetricName string + MetricQuery string `toml:"metric_name"` + Selection string `toml:"metric_selection"` + Timestamp string `toml:"timestamp"` + TimestampFmt string `toml:"timestamp_format"` + Tags map[string]string `toml:"tags"` + Fields map[string]string `toml:"fields"` + FieldsInt map[string]string `toml:"fields_int"` + + FieldSelection string `toml:"field_selection"` + FieldNameQuery string `toml:"field_name"` + FieldValueQuery string `toml:"field_value"` + FieldNameExpand bool `toml:"field_name_expansion"` +} + +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + t := time.Now() + + // Parse the XML + doc, err := xmlquery.Parse(strings.NewReader(string(buf))) + if err != nil { + return nil, err + } + + // Queries + metrics := make([]telegraf.Metric, 0) + for _, config := range p.Configs { + if len(config.Selection) == 0 { + config.Selection = "/" + } + selectedNodes, err := xmlquery.QueryAll(doc, config.Selection) + if err != nil { + return nil, err + } + if len(selectedNodes) < 1 || selectedNodes[0] == nil { + p.debugEmptyQuery("metric selection", doc, config.Selection) + return nil, fmt.Errorf("cannot parse with empty selection node") + } + + for _, selected := range selectedNodes { + m, err := p.parseQuery(t, doc, selected, config) + if err != nil { + return metrics, err + } + + metrics = append(metrics, m) + } + } + + return metrics, nil +} + +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { + t := time.Now() + + switch len(p.Configs) { + case 0: + return nil, nil + case 1: + config := p.Configs[0] + + doc, err := xmlquery.Parse(strings.NewReader(line)) + if err != nil { + return nil, err + } + + selected := doc + if len(config.Selection) > 0 { + selectedNodes, err := xmlquery.QueryAll(doc, config.Selection) + if err != nil { + return nil, err + } + if len(selectedNodes) < 1 || selectedNodes[0] == nil { + p.debugEmptyQuery("metric selection", doc, config.Selection) + return nil, fmt.Errorf("cannot parse line with empty selection") + } else if len(selectedNodes) != 1 { + return nil, fmt.Errorf("cannot parse line with multiple selected nodes (%d)", len(selectedNodes)) + } + selected = selectedNodes[0] + } + + return p.parseQuery(t, doc, selected, config) + } + return nil, fmt.Errorf("cannot parse line with multiple (%d) configurations", len(p.Configs)) +} + +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, config Config) (telegraf.Metric, error) { + var timestamp time.Time + var metricname string + + // Determine the metric name. If a query was specified, use the result of this query and the default metric name + // otherwise. + metricname = config.MetricName + if len(config.MetricQuery) > 0 { + v, err := executeQuery(doc, selected, config.MetricQuery) + if err != nil { + return nil, fmt.Errorf("failed to query metric name: %v", err) + } + metricname = v.(string) + } + + // By default take the time the parser was invoked and override the value + // with the queried timestamp if an expresion was specified. + timestamp = starttime + if len(config.Timestamp) > 0 { + v, err := executeQuery(doc, selected, config.Timestamp) + if err != nil { + return nil, fmt.Errorf("failed to query timestamp: %v", err) + } + switch v.(type) { + case string: + // Parse the string with the given format or assume the string to contain + // a unix timestamp in seconds if no format is given. + if len(config.TimestampFmt) < 1 || strings.HasPrefix(config.TimestampFmt, "unix") { + var nanoseconds int64 + + t, err := strconv.ParseFloat(v.(string), 64) + if err != nil { + return nil, fmt.Errorf("failed to parse unix timestamp: %v", err) + } + + switch config.TimestampFmt { + case "unix_ns": + nanoseconds = int64(t) + case "unix_us": + nanoseconds = int64(t * 1e3) + case "unix_ms": + nanoseconds = int64(t * 1e6) + default: + nanoseconds = int64(t * 1e9) + } + timestamp = time.Unix(0, nanoseconds) + } else { + timestamp, err = time.Parse(config.TimestampFmt, v.(string)) + if err != nil { + return nil, fmt.Errorf("failed to query timestamp format: %v", err) + } + } + case float64: + // Assume the value to contain a timestamp in seconds and fractions thereof. + timestamp = time.Unix(0, int64(v.(float64)*1e9)) + default: + return nil, fmt.Errorf("unknown format '%T' for timestamp query '%v'", v, config.Timestamp) + } + } + + // Query tags and add default ones + tags := make(map[string]string) + for name, query := range config.Tags { + // Execute the query and cast the returned values into strings + v, err := executeQuery(doc, selected, query) + if err != nil { + return nil, fmt.Errorf("failed to query tag '%s': %v", name, err) + } + switch v.(type) { + case string: + tags[name] = v.(string) + case bool: + tags[name] = strconv.FormatBool(v.(bool)) + case float64: + tags[name] = strconv.FormatFloat(v.(float64), 'G', -1, 64) + default: + return nil, fmt.Errorf("unknown format '%T' for tag '%s'", v, name) + } + } + for name, v := range p.DefaultTags { + tags[name] = v + } + + // Query fields + fields := make(map[string]interface{}) + for name, query := range config.FieldsInt { + // Execute the query and cast the returned values into integers + v, err := executeQuery(doc, selected, query) + if err != nil { + return nil, fmt.Errorf("failed to query field (int) '%s': %v", name, err) + } + switch v.(type) { + case string: + fields[name], err = strconv.ParseInt(v.(string), 10, 54) + if err != nil { + return nil, fmt.Errorf("failed to parse field (int) '%s': %v", name, err) + } + case bool: + fields[name] = int64(0) + if v.(bool) { + fields[name] = int64(1) + } + case float64: + fields[name] = int64(v.(float64)) + default: + return nil, fmt.Errorf("unknown format '%T' for field (int) '%s'", v, name) + } + } + + for name, query := range config.Fields { + // Execute the query and store the result in fields + v, err := executeQuery(doc, selected, query) + if err != nil { + return nil, fmt.Errorf("failed to query field '%s': %v", name, err) + } + fields[name] = v + } + + // Handle the field batch definitions if any. + if len(config.FieldSelection) > 0 { + fieldnamequery := "name()" + fieldvaluequery := "." + if len(config.FieldNameQuery) > 0 { + fieldnamequery = config.FieldNameQuery + } + if len(config.FieldValueQuery) > 0 { + fieldvaluequery = config.FieldValueQuery + } + + // Query all fields + selectedFieldNodes, err := xmlquery.QueryAll(selected, config.FieldSelection) + if err != nil { + return nil, err + } + if len(selectedFieldNodes) > 0 && selectedFieldNodes[0] != nil { + for _, selectedfield := range selectedFieldNodes { + n, err := executeQuery(doc, selectedfield, fieldnamequery) + if err != nil { + return nil, fmt.Errorf("failed to query field name with query '%s': %v", fieldnamequery, err) + } + name, ok := n.(string) + if !ok { + return nil, fmt.Errorf("failed to query field name with query '%s': result is not a string (%v)", fieldnamequery, n) + } + v, err := executeQuery(doc, selectedfield, fieldvaluequery) + if err != nil { + return nil, fmt.Errorf("failed to query field value for '%s': %v", name, err) + } + path := name + if config.FieldNameExpand { + p := getNodePath(selectedfield, selected, "_") + if len(p) > 0 { + path = p + "_" + name + } + } + + // Check if field name already exists and if so, append an index number. + if _, ok := fields[path]; ok { + for i := 1; ; i++ { + p := path + "_" + strconv.Itoa(i) + if _, ok := fields[p]; !ok { + path = p + break + } + } + } + + fields[path] = v + } + } else { + p.debugEmptyQuery("field selection", selected, config.FieldSelection) + } + } + + return metric.New(metricname, tags, fields, timestamp) +} + +func getNodePath(node, relativeTo *xmlquery.Node, sep string) string { + names := make([]string, 0) + + // Climb up the tree and collect the node names + n := node.Parent + for n != nil && n != relativeTo { + names = append(names, n.Data) + n = n.Parent + } + + if len(names) < 1 { + return "" + } + + // Construct the nodes + path := "" + for _, name := range names { + path = name + sep + path + } + + return path[:len(path)-1] +} + +func executeQuery(doc, selected *xmlquery.Node, query string) (r interface{}, err error) { + // Check if the query is relative or absolute and set the root for the query + root := selected + if strings.HasPrefix(query, "/") { + root = doc + } + + // Compile the query + expr, err := xpath.Compile(query) + if err != nil { + return nil, fmt.Errorf("failed to compile query '%s': %v", query, err) + } + + // Evaluate the compiled expression and handle returned node-iterators + // separately. Those iterators will be returned for queries directly + // referencing a node (value or attribute). + n := expr.Evaluate(xmlquery.CreateXPathNavigator(root)) + if iter, ok := n.(*xpath.NodeIterator); ok { + // We got an iterator, so take the first match and get the referenced + // property. This will always be a string. + if iter.MoveNext() { + r = iter.Current().Value() + } + } else { + r = n + } + + return r, nil +} + +func splitLastPathElement(query string) []string { + // This is a rudimentary xpath-parser that splits the path + // into the last path element and the remaining path-part. + // The last path element is then further splitted into + // parts such as attributes or selectors. Each returned + // element is a full path! + + // Nothing left + if query == "" || query == "/" || query == "//" || query == "." { + return []string{} + } + + seperatorIdx := strings.LastIndex(query, "/") + if seperatorIdx < 0 { + query = "./" + query + seperatorIdx = 1 + } + + // For double slash we want to split at the first slash + if seperatorIdx > 0 && query[seperatorIdx-1] == byte('/') { + seperatorIdx-- + } + + base := query[:seperatorIdx] + if base == "" { + base = "/" + } + + elements := make([]string, 1) + elements[0] = base + + offset := seperatorIdx + if i := strings.Index(query[offset:], "::"); i >= 0 { + // Check for axis operator + offset += i + elements = append(elements, query[:offset]+"::*") + } + + if i := strings.Index(query[offset:], "["); i >= 0 { + // Check for predicates + offset += i + elements = append(elements, query[:offset]) + } else if i := strings.Index(query[offset:], "@"); i >= 0 { + // Check for attributes + offset += i + elements = append(elements, query[:offset]) + } + + return elements +} + +func (p *Parser) debugEmptyQuery(operation string, root *xmlquery.Node, initialquery string) { + if p.Log == nil { + return + } + + query := initialquery + + // We already know that the + p.Log.Debugf("got 0 nodes for query %q in %s", query, operation) + for { + parts := splitLastPathElement(query) + if len(parts) < 1 { + return + } + for i := len(parts) - 1; i >= 0; i-- { + q := parts[i] + nodes, err := xmlquery.QueryAll(root, q) + if err != nil { + p.Log.Debugf("executing query %q in %s failed: %v", q, operation, err) + return + } + p.Log.Debugf("got %d nodes for query %q in %s", len(nodes), q, operation) + if len(nodes) > 0 && nodes[0] != nil { + return + } + query = parts[0] + } + } +} diff --git a/plugins/parsers/xml/parser_test.go b/plugins/parsers/xml/parser_test.go new file mode 100644 index 0000000000000..91896172d3679 --- /dev/null +++ b/plugins/parsers/xml/parser_test.go @@ -0,0 +1,1175 @@ +package xml + +import ( + "io/ioutil" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/toml" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const invalidXML = ` + + This one has to fail due to missing end-tag +` + +const singleMetricValuesXML = ` + + + Device TestDevice1 + ok + 1577923199 + 1577923199128 + 1577923199128256 + 1577923199128256512 + 2020-01-01T23:59:59Z + 98247 + 98695.81 + true + this is a test + 42;23 + +` +const singleMetricAttributesXML = ` + + + + + + + + + + + + +` +const singleMetricMultiValuesXML = ` + + + + 1 + 2 + 3 + 4 + 5 + 6 + +` +const multipleNodesXML = ` + + + + 42.0 + 1 + ok + + + 42.1 + 0 + ok + + + 42.2 + 1 + ok + + + 42.3 + 0 + failed + + + 42.4 + 1 + failed + +` + +const metricNameQueryXML = ` + + + 1577923199 + + +` + +func TestParseInvalidXML(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expectedError string + }{ + { + name: "invalid XML (missing close tag)", + input: invalidXML, + configs: []Config{ + { + MetricQuery: "test", + Timestamp: "/Device_1/Timestamp_unix", + }, + }, + defaultTags: map[string]string{}, + expectedError: "XML syntax error on line 4: unexpected EOF", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + + _, err := parser.ParseLine(tt.input) + require.Error(t, err) + require.Equal(t, tt.expectedError, err.Error()) + }) + } +} + +func TestInvalidTypeQueriesFail(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expectedError string + }{ + { + name: "invalid field (int) type", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix", + FieldsInt: map[string]string{ + "a": "/Device_1/value_string", + }, + }, + }, + defaultTags: map[string]string{}, + expectedError: "failed to parse field (int) 'a': strconv.ParseInt: parsing \"this is a test\": invalid syntax", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + + _, err := parser.ParseLine(tt.input) + require.Error(t, err) + require.Equal(t, tt.expectedError, err.Error()) + }) + } +} + +func TestInvalidTypeQueries(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected telegraf.Metric + }{ + { + name: "invalid field type (number)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "a": "number(/Device_1/value_string)", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": float64(0), + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "invalid field type (boolean)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "a": "boolean(/Device_1/value_string)", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": true, + }, + time.Unix(1577923199, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual) + }) + } +} + +func TestParseTimestamps(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected telegraf.Metric + }{ + { + name: "parse timestamp (no fmt)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse timestamp (unix)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix", + TimestampFmt: "unix", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse timestamp (unix_ms)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix_ms", + TimestampFmt: "unix_ms", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, int64(1577923199128*1e6)), + ), + }, + { + name: "parse timestamp (unix_us)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix_us", + TimestampFmt: "unix_us", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, int64(1577923199128256*1e3)), + ), + }, + { + name: "parse timestamp (unix_us)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix_ns", + TimestampFmt: "unix_ns", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, int64(1577923199128256512)), + ), + }, + { + name: "parse timestamp (RFC3339)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_iso", + TimestampFmt: "2006-01-02T15:04:05Z", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual) + }) + } +} + +func TestParseSingleValues(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected telegraf.Metric + }{ + { + name: "parse scalar values as string fields", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "a": "/Device_1/value_int", + "b": "/Device_1/value_float", + "c": "/Device_1/value_bool", + "d": "/Device_1/value_string", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": "98247", + "b": "98695.81", + "c": "true", + "d": "this is a test", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse scalar values as typed fields (w/o int)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "a": "number(Device_1/value_int)", + "b": "number(/Device_1/value_float)", + "c": "boolean(/Device_1/value_bool)", + "d": "/Device_1/value_string", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": 98247.0, + "b": 98695.81, + "c": true, + "d": "this is a test", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse values as typed fields (w/ int)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "b": "number(/Device_1/value_float)", + "c": "boolean(/Device_1/value_bool)", + "d": "/Device_1/value_string", + }, + FieldsInt: map[string]string{ + "a": "/Device_1/value_int", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": 98247, + "b": 98695.81, + "c": true, + "d": "this is a test", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse substring values", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "x": "substring-before(/Device_1/value_position, ';')", + "y": "substring-after(/Device_1/value_position, ';')", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "x": "42", + "y": "23", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse substring values (typed)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "x": "number(substring-before(/Device_1/value_position, ';'))", + "y": "number(substring-after(/Device_1/value_position, ';'))", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "x": 42.0, + "y": 23.0, + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse substring values (typed int)", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix", + FieldsInt: map[string]string{ + "x": "substring-before(/Device_1/value_position, ';')", + "y": "substring-after(/Device_1/value_position, ';')", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "x": 42, + "y": 23, + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse tags", + input: singleMetricValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix", + Tags: map[string]string{ + "state": "/Device_1/State", + "name": "substring-after(/Device_1/Name, ' ')", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{ + "state": "ok", + "name": "TestDevice1", + }, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual) + }) + } +} + +func TestParseSingleAttributes(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected telegraf.Metric + }{ + { + name: "parse attr timestamp (unix)", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr timestamp (RFC3339)", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_iso/@value", + TimestampFmt: "2006-01-02T15:04:05Z", + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr as string fields", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + Fields: map[string]string{ + "a": "/Device_1/attr_int/@_", + "b": "/Device_1/attr_float/@_", + "c": "/Device_1/attr_bool/@_", + "d": "/Device_1/attr_string/@_", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": "12345", + "b": "12345.678", + "c": "true", + "d": "this is a test", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr as typed fields (w/o int)", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + Fields: map[string]string{ + "a": "number(/Device_1/attr_int/@_)", + "b": "number(/Device_1/attr_float/@_)", + "c": "boolean(/Device_1/attr_bool/@_)", + "d": "/Device_1/attr_string/@_", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": 12345.0, + "b": 12345.678, + "c": true, + "d": "this is a test", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr as typed fields (w/ int)", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + Fields: map[string]string{ + "b": "number(/Device_1/attr_float/@_)", + "c": "boolean(/Device_1/attr_bool/@_)", + "d": "/Device_1/attr_string/@_", + }, + FieldsInt: map[string]string{ + "a": "/Device_1/attr_int/@_", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": 12345, + "b": 12345.678, + "c": true, + "d": "this is a test", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr substring", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + Fields: map[string]string{ + "name": "substring-after(/Device_1/Name/@value, ' ')", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "name": "TestDevice1", + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr tags", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + Tags: map[string]string{ + "state": "/Device_1/State/@_", + "name": "substring-after(/Device_1/Name/@value, ' ')", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{ + "state": "ok", + "name": "TestDevice1", + }, + map[string]interface{}{}, + time.Unix(1577923199, 0), + ), + }, + { + name: "parse attr bool", + input: singleMetricAttributesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", + Fields: map[string]string{ + "a": "/Device_1/attr_bool_numeric/@_ = 1", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": true, + }, + time.Unix(1577923199, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual) + }) + } +} + +func TestParseMultiValues(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected telegraf.Metric + }{ + { + name: "select values (float)", + input: singleMetricMultiValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Timestamp/@value", + Fields: map[string]string{ + "a": "number(/Device/Value[1])", + "b": "number(/Device/Value[2])", + "c": "number(/Device/Value[3])", + "d": "number(/Device/Value[4])", + "e": "number(/Device/Value[5])", + "f": "number(/Device/Value[6])", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": 1.0, + "b": 2.0, + "c": 3.0, + "d": 4.0, + "e": 5.0, + "f": 6.0, + }, + time.Unix(1577923199, 0), + ), + }, + { + name: "select values (int)", + input: singleMetricMultiValuesXML, + configs: []Config{ + { + MetricName: "test", + Timestamp: "/Timestamp/@value", + FieldsInt: map[string]string{ + "a": "/Device/Value[1]", + "b": "/Device/Value[2]", + "c": "/Device/Value[3]", + "d": "/Device/Value[4]", + "e": "/Device/Value[5]", + "f": "/Device/Value[6]", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "test", + map[string]string{}, + map[string]interface{}{ + "a": 1, + "b": 2, + "c": 3, + "d": 4, + "e": 5, + "f": 6, + }, + time.Unix(1577923199, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual) + }) + } +} + +func TestParseMultiNodes(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected []telegraf.Metric + }{ + { + name: "select all devices", + input: multipleNodesXML, + configs: []Config{ + { + MetricName: "test", + Selection: "/Device", + Timestamp: "/Timestamp/@value", + Fields: map[string]string{ + "value": "number(Value)", + "active": "Active = 1", + }, + FieldsInt: map[string]string{ + "mode": "Value/@mode", + }, + Tags: map[string]string{ + "name": "@name", + "state": "State", + }, + }, + }, + defaultTags: map[string]string{}, + expected: []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{ + "name": "Device 1", + "state": "ok", + }, + map[string]interface{}{ + "value": 42.0, + "active": true, + "mode": 0, + }, + time.Unix(1577923199, 0), + ), + testutil.MustMetric( + "test", + map[string]string{ + "name": "Device 2", + "state": "ok", + }, + map[string]interface{}{ + "value": 42.1, + "active": false, + "mode": 1, + }, + time.Unix(1577923199, 0), + ), + testutil.MustMetric( + "test", + map[string]string{ + "name": "Device 3", + "state": "ok", + }, + map[string]interface{}{ + "value": 42.2, + "active": true, + "mode": 2, + }, + time.Unix(1577923199, 0), + ), + testutil.MustMetric( + "test", + map[string]string{ + "name": "Device 4", + "state": "failed", + }, + map[string]interface{}{ + "value": 42.3, + "active": false, + "mode": 3, + }, + time.Unix(1577923199, 0), + ), + testutil.MustMetric( + "test", + map[string]string{ + "name": "Device 5", + "state": "failed", + }, + map[string]interface{}{ + "value": 42.4, + "active": true, + "mode": 4, + }, + time.Unix(1577923199, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + + actual, err := parser.Parse([]byte(tt.input)) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} + +func TestParseMetricQuery(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + defaultTags map[string]string + expected telegraf.Metric + }{ + { + name: "parse metric name query", + input: metricNameQueryXML, + configs: []Config{ + { + MetricName: "test", + MetricQuery: "name(/Device_1/Metric/@*[1])", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "value": "/Device_1/Metric/@*[1]", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "state", + map[string]string{}, + map[string]interface{}{ + "value": "ok", + }, + time.Unix(1577923199, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + + actual, err := parser.ParseLine(tt.input) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, tt.expected, actual) + }) + } +} + +func TestEmptySelection(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + }{ + { + name: "empty path", + input: multipleNodesXML, + configs: []Config{ + { + Selection: "/Device/NonExisting", + Fields: map[string]string{"value": "number(Value)"}, + FieldsInt: map[string]string{"mode": "Value/@mode"}, + Tags: map[string]string{}, + }, + }, + }, + { + name: "empty pattern", + input: multipleNodesXML, + configs: []Config{ + { + Selection: "//NonExisting", + Fields: map[string]string{"value": "number(Value)"}, + FieldsInt: map[string]string{"mode": "Value/@mode"}, + Tags: map[string]string{}, + }, + }, + }, + { + name: "empty axis", + input: multipleNodesXML, + configs: []Config{ + { + Selection: "/Device/child::NonExisting", + Fields: map[string]string{"value": "number(Value)"}, + FieldsInt: map[string]string{"mode": "Value/@mode"}, + Tags: map[string]string{}, + }, + }, + }, + { + name: "empty predicate", + input: multipleNodesXML, + configs: []Config{ + { + Selection: "/Device[@NonExisting=true]", + Fields: map[string]string{"value": "number(Value)"}, + FieldsInt: map[string]string{"mode": "Value/@mode"}, + Tags: map[string]string{}, + }, + }, + }, + } + + logger := testutil.Logger{Name: "parsers.xml"} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: map[string]string{}, Log: logger} + + _, err := parser.Parse([]byte(tt.input)) + require.Error(t, err) + require.Equal(t, err.Error(), "cannot parse with empty selection node") + }) + } +} + +func TestTestCases(t *testing.T) { + var tests = []struct { + name string + filename string + }{ + { + name: "explicit basic", + filename: "testcases/multisensor_explicit_basic.conf", + }, + { + name: "explicit batch", + filename: "testcases/multisensor_explicit_batch.conf", + }, + { + name: "field selection batch", + filename: "testcases/multisensor_selection_batch.conf", + }, + { + name: "openweathermap forecast", + filename: "testcases/openweathermap.conf", + }, + } + + parser := influx.NewParser(influx.NewMetricHandler()) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + filename := filepath.FromSlash(tt.filename) + cfg, header, err := loadTestConfiguration(filename) + require.NoError(t, err) + cfg.MetricName = "xml" + + // Load the xml-content + input, err := testutil.ParseRawLinesFrom(header, "File:") + require.NoError(t, err) + assert.Len(t, input, 1) + + datafile := filepath.FromSlash(input[0]) + content, err := ioutil.ReadFile(datafile) + require.NoError(t, err) + + // Get the expectations + expectedOutputs, err := testutil.ParseMetricsFrom(header, "Expected Output:", parser) + require.NoError(t, err) + + expectedErrors, _ := testutil.ParseRawLinesFrom(header, "Expected Error:") + + // Setup the parser and run it. + parser := Parser{Configs: []Config{*cfg}} + outputs, err := parser.Parse(content) + if len(expectedErrors) == 0 { + require.NoError(t, err) + } + // If no timestamp is given we cannot test it. So use the one of the output + if cfg.Timestamp == "" { + testutil.RequireMetricsEqual(t, expectedOutputs, outputs, testutil.IgnoreTime()) + } else { + testutil.RequireMetricsEqual(t, expectedOutputs, outputs) + } + }) + } +} + +func loadTestConfiguration(filename string) (*Config, []string, error) { + buf, err := ioutil.ReadFile(filename) + if err != nil { + return nil, nil, err + } + + header := make([]string, 0) + for _, line := range strings.Split(string(buf), "\n") { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "#") { + header = append(header, line) + } + } + cfg := Config{} + err = toml.Unmarshal(buf, &cfg) + return &cfg, header, err +} diff --git a/plugins/parsers/xml/testcases/multisensor.xml b/plugins/parsers/xml/testcases/multisensor.xml new file mode 100644 index 0000000000000..88cb0eaadc23d --- /dev/null +++ b/plugins/parsers/xml/testcases/multisensor.xml @@ -0,0 +1,31 @@ + + + Main Gateway + 2020-08-01T15:04:03Z + 12 + ok + + + + + + + + + busy + + + + + + + standby + + + + + + + error + + diff --git a/plugins/parsers/xml/testcases/multisensor_explicit_basic.conf b/plugins/parsers/xml/testcases/multisensor_explicit_basic.conf new file mode 100644 index 0000000000000..faf2c85a243a5 --- /dev/null +++ b/plugins/parsers/xml/testcases/multisensor_explicit_basic.conf @@ -0,0 +1,17 @@ +# Simple example for using the xml-parser. +# +# File: +# testcases/multisensor.xml +# +# Expected Output: +# xml,gateway=Main seqnr=12i,ok=true +# + +[tags] + gateway = "substring-before(/Gateway/Name, ' ')" + +[fields_int] + seqnr = "/Gateway/Sequence" + +[fields] + ok = "/Gateway/Status = 'ok'" diff --git a/plugins/parsers/xml/testcases/multisensor_explicit_batch.conf b/plugins/parsers/xml/testcases/multisensor_explicit_batch.conf new file mode 100644 index 0000000000000..354462d1e5693 --- /dev/null +++ b/plugins/parsers/xml/testcases/multisensor_explicit_batch.conf @@ -0,0 +1,28 @@ +# Example for explicitly selecting fields from a bunch of selected metrics. +# +# File: +# testcases/multisensor.xml +# +# Expected Output: +# sensors,name=Facility\ A consumers=3i,frequency=49.78,power=123.4,temperature=20,ok=true 1596294243000000000 +# sensors,name=Facility\ B consumers=1i,frequency=49.78,power=14.3,temperature=23.1,ok=true 1596294243000000000 +# sensors,name=Facility\ C consumers=0i,frequency=49.78,power=0.02,temperature=19.7,ok=false 1596294243000000000 +# + +metric_selection = "/Bus/child::Sensor" +metric_name = "string('sensors')" + +timestamp = "/Gateway/Timestamp" +timestamp_format = "2006-01-02T15:04:05Z" + +[tags] + name = "substring-after(@name, ' ')" + +[fields_int] + consumers = "Variable/@consumers" + +[fields] + temperature = "number(Variable/@temperature)" + power = "number(Variable/@power)" + frequency = "number(Variable/@frequency)" + ok = "Mode != 'error'" diff --git a/plugins/parsers/xml/testcases/multisensor_selection_batch.conf b/plugins/parsers/xml/testcases/multisensor_selection_batch.conf new file mode 100644 index 0000000000000..d9ed1cd89d56e --- /dev/null +++ b/plugins/parsers/xml/testcases/multisensor_selection_batch.conf @@ -0,0 +1,23 @@ +# Example for batch selecting fields from a bunch of selected metrics. +# +# File: +# testcases/multisensor.xml +# +# Expected Output: +# sensors,name=Facility\ A consumers=3,frequency=49.78,power=123.4,temperature=20 1596294243000000000 +# sensors,name=Facility\ B consumers=1,frequency=49.78,power=14.3,temperature=23.1 1596294243000000000 +# sensors,name=Facility\ C consumers=0,frequency=49.78,power=0.02,temperature=19.7 1596294243000000000 +# + +metric_selection = "/Bus/child::Sensor" +metric_name = "string('sensors')" + +timestamp = "/Gateway/Timestamp" +timestamp_format = "2006-01-02T15:04:05Z" + +field_selection = "child::Variable" +field_name = "name(@*[1])" +field_value = "number(@*[1])" + +[tags] + name = "substring-after(@name, ' ')" diff --git a/plugins/parsers/xml/testcases/openweathermap.conf b/plugins/parsers/xml/testcases/openweathermap.conf new file mode 100644 index 0000000000000..99798582c6cf2 --- /dev/null +++ b/plugins/parsers/xml/testcases/openweathermap.conf @@ -0,0 +1,28 @@ +# Example for parsing openweathermap five-day-forecast data. +# +# File: +# testcases/openweathermap_5d.xml +# +# Expected Output: +# weather,city=London,country=GB clouds=64i,humidity=96i,precipitation=5,temperature=16.89,wind_direction=253.5,wind_speed=4.9 1435654800000000000 +# weather,city=London,country=GB clouds=44i,humidity=97i,precipitation=99,temperature=17.23,wind_direction=248.001,wind_speed=4.86 1435665600000000000 +# + +metric_name = "'weather'" +metric_selection = "//forecast/*" +timestamp = "@from" +timestamp_format = "2006-01-02T15:04:05" + +[tags] + city = "/weatherdata/location/name" + country = "/weatherdata/location/country" + +[fields_int] + humidity = "humidity/@value" + clouds = "clouds/@all" + +[fields] + precipitation = "number(precipitation/@value)" + wind_direction = "number(windDirection/@deg)" + wind_speed = "number(windSpeed/@mps)" + temperature = "number(temperature/@value)" diff --git a/plugins/parsers/xml/testcases/openweathermap_5d.xml b/plugins/parsers/xml/testcases/openweathermap_5d.xml new file mode 100644 index 0000000000000..2b7dc83a5b86b --- /dev/null +++ b/plugins/parsers/xml/testcases/openweathermap_5d.xml @@ -0,0 +1,38 @@ + + + + + London + + GB + 3600 + + + + 2015-06-30T00:00:00Z + + + + + + + diff --git a/testutil/file.go b/testutil/file.go new file mode 100644 index 0000000000000..b312adedecf6f --- /dev/null +++ b/testutil/file.go @@ -0,0 +1,84 @@ +package testutil + +import ( + "fmt" + "strings" + + "github.com/influxdata/telegraf" +) + +type LineParser interface { + ParseLine(line string) (telegraf.Metric, error) +} + +//ParseRawLinesFrom returns the raw lines between the given header and a trailing blank line +func ParseRawLinesFrom(lines []string, header string) ([]string, error) { + if len(lines) < 2 { + // We need a line for HEADER and EMPTY TRAILING LINE + return nil, fmt.Errorf("expected at least two lines to parse from") + } + start := -1 + for i := range lines { + if strings.TrimLeft(lines[i], "# ") == header { + start = i + 1 + break + } + } + if start < 0 { + return nil, fmt.Errorf("header %q does not exist", header) + } + + output := make([]string, 0) + for _, line := range lines[start:] { + if !strings.HasPrefix(strings.TrimLeft(line, "\t "), "#") { + return nil, fmt.Errorf("section does not end with trailing empty line") + } + + // Stop at empty line + content := strings.TrimLeft(line, "# \t") + if content == "" || content == "'''" { + break + } + + output = append(output, content) + } + return output, nil +} + +//ParseMetricsFrom parses metrics from the given lines in line-protocol following a header, with a trailing blank line +func ParseMetricsFrom(lines []string, header string, parser LineParser) ([]telegraf.Metric, error) { + if len(lines) < 2 { + // We need a line for HEADER and EMPTY TRAILING LINE + return nil, fmt.Errorf("expected at least two lines to parse from") + } + start := -1 + for i := range lines { + if strings.TrimLeft(lines[i], "# ") == header { + start = i + 1 + break + } + } + if start < 0 { + return nil, fmt.Errorf("header %q does not exist", header) + } + + metrics := make([]telegraf.Metric, 0) + for _, line := range lines[start:] { + if !strings.HasPrefix(strings.TrimLeft(line, "\t "), "#") { + return nil, fmt.Errorf("section does not end with trailing empty line") + } + + // Stop at empty line + content := strings.TrimLeft(line, "# \t") + if content == "" || content == "'''" { + break + } + + m, err := parser.ParseLine(content) + if err != nil { + return nil, fmt.Errorf("unable to parse metric in %q failed: %v", content, err) + } + metrics = append(metrics, m) + } + return metrics, nil +} From dd9e924832d6240cce60a98cd97597918a7aa808 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Wed, 3 Mar 2021 14:02:36 -0700 Subject: [PATCH 261/761] Update changelog (cherry picked from commit 6cdf98fb58afc1263c522d7d42ee524024026494) --- CHANGELOG.md | 63 ++++++ etc/telegraf.conf | 474 ++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 500 insertions(+), 37 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 713dc5b4bc642..2ed4494c54ce8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,66 @@ +## v1.18.0-rc0 [2021-03-03] + +#### Bugfixes + + - [#7312](https://github.com/influxdata/telegraf/pull/7312) `inputs.docker` [plugins/input/docker] Make perdevice affect also cpu and add class granularity through perdevice_include/total_include + - [#8397](https://github.com/influxdata/telegraf/pull/8397) `outputs.dynatrace` Dynatrace Plugin: Make conversion to counters possible / Changed large bulk handling + - [#8655](https://github.com/influxdata/telegraf/pull/8655) `inputs.sqlserver` SqlServer - fix for default server list + - [#8703](https://github.com/influxdata/telegraf/pull/8703) `inputs.docker` Use consistent container name in docker input plugin + - [#8902](https://github.com/influxdata/telegraf/pull/8902) `inputs.snmp` Fix max_repetitions signedness issues + - [#8817](https://github.com/influxdata/telegraf/pull/8817) `outputs.kinesis` outputs.kinesis - log record error count + - [#8833](https://github.com/influxdata/telegraf/pull/8833) `inputs.sqlserver` Bug Fix - SQL Server HADR queries for SQL Versions + - [#8628](https://github.com/influxdata/telegraf/pull/8628) `inputs.modbus` fix: reading multiple holding registers in modbus input plugin + - [#8885](https://github.com/influxdata/telegraf/pull/8885) `inputs.statsd` Fix statsd concurrency bug + - [#8393](https://github.com/influxdata/telegraf/pull/8393) `inputs.sqlserver` SQL Perfmon counters - synced queries from v2 to all db types + - [#8873](https://github.com/influxdata/telegraf/pull/8873) `processors.ifname` Fix mutex locking around ifname cache + - [#8720](https://github.com/influxdata/telegraf/pull/8720) `parsers.influx` fix: remove ambiguity on '\v' from line-protocol parser + - [#8678](https://github.com/influxdata/telegraf/pull/8678) `inputs.redis` Fix Redis output field type inconsistencies + +#### Features + + - [#8887](https://github.com/influxdata/telegraf/pull/8887) `inputs.procstat` Add PPID field to procstat input plugin + - [#8852](https://github.com/influxdata/telegraf/pull/8852) `processors.starlark` Add Starlark script for estimating Line Protocol cardinality + - [#8828](https://github.com/influxdata/telegraf/pull/8828) `serializers.msgpack` Add MessagePack output data format + - [#8915](https://github.com/influxdata/telegraf/pull/8915) `inputs.cloudwatch` add proxy + - [#8910](https://github.com/influxdata/telegraf/pull/8910) Display error message on badly formatted config string array (eg. namepass) + - [#8785](https://github.com/influxdata/telegraf/pull/8785) `inputs.diskio` Non systemd support with unittest + - [#8850](https://github.com/influxdata/telegraf/pull/8850) `inputs.snmp` Support more snmpv3 authentication protocols + - [#8813](https://github.com/influxdata/telegraf/pull/8813) `inputs.redfish` added member_id as tag(as it is a unique value) for redfish plugin and added address of the server when the status is other than 200 for better debugging + - [#8613](https://github.com/influxdata/telegraf/pull/8613) `inputs.phpfpm` Support exclamation mark to create non-matching list in tail plugin + - [#8179](https://github.com/influxdata/telegraf/pull/8179) `inputs.statsd` Add support for datadog distributions metric + - [#8803](https://github.com/influxdata/telegraf/pull/8803) Add default retry for load config via url + - [#8816](https://github.com/influxdata/telegraf/pull/8816) Code Signing for Windows + - [#8772](https://github.com/influxdata/telegraf/pull/8772) `processors.starlark` Allow to provide constants to a starlark script + - [#8749](https://github.com/influxdata/telegraf/pull/8749) `outputs.newrelic` Add HTTP proxy setting to New Relic output plugin + - [#8543](https://github.com/influxdata/telegraf/pull/8543) `inputs.elasticsearch` Add configurable number of 'most recent' date-stamped indices to gather in Elasticsearch input + +#### New Input Plugins + + - [#8834](https://github.com/influxdata/telegraf/pull/8834) Input plugin for RavenDB + - [#8525](https://github.com/influxdata/telegraf/pull/8525) Add CSGO SRCDS input plugin + - [#8751](https://github.com/influxdata/telegraf/pull/8751) Adding a new directory monitor input plugin. + - [#6653](https://github.com/influxdata/telegraf/pull/6653) Add Beat input plugin + +#### New Output Plugins + + - [#8398](https://github.com/influxdata/telegraf/pull/8398) Sensu Go Output Plugin for Telegraf + - [#8450](https://github.com/influxdata/telegraf/pull/8450) plugin: output loki + - [#6714](https://github.com/influxdata/telegraf/pull/6714) SignalFx Output + +#### New Aggregator Plugins + + - [#3762](https://github.com/influxdata/telegraf/pull/3762) Add Derivative Aggregator Plugin + - [#8594](https://github.com/influxdata/telegraf/pull/8594) Add quantile aggregator plugin + +#### New Processor Plugins + + - [#8707](https://github.com/influxdata/telegraf/pull/8707) AWS EC2 metadata processor Using StreamingProcessor + +#### New External Plugins + + - [#8897](https://github.com/influxdata/telegraf/pull/8897) add SMCIPMITool input to external plugin list + + ## v1.17.3 [2021-02-17] #### Bugfixes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index c70b1d2f9f87c..949b0cd5f9e39 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -496,6 +496,9 @@ # # ## Connection timeout, defaults to "5s" if not set. # timeout = "5s" +# +# ## If you want to convert values represented as gauges to counters, add the metric names here +# additional_counters = [ ] # # Configuration for Elasticsearch to send metrics to. @@ -1066,6 +1069,33 @@ # # url = "https://listener.logz.io:8071" +# # Send logs to Loki +# [[outputs.loki]] +# ## The domain of Loki +# domain = "https://loki.domain.tld" +# +# ## Endpoint to write api +# # endpoint = "/loki/api/v1/push" +# +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Basic auth credential +# # username = "loki" +# # password = "pass" +# +# ## Additional HTTP headers +# # http_headers = {"X-Scope-OrgID" = "1"} +# +# ## If the request must be gzip encoded +# # gzip_request = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + # # Configuration for MQTT server to send metrics to # [[outputs.mqtt]] # servers = ["localhost:1883"] # required. @@ -1158,6 +1188,10 @@ # # ## Timeout for writes to the New Relic API. # # timeout = "15s" +# +# ## HTTP Proxy override. If unset use values from the standard +# ## proxy environment variables to determine proxy, if any. +# # http_proxy = "http://corporate.proxy:3128" # # Send telegraf measurements to NSQD @@ -1293,6 +1327,116 @@ # separator = " " +# # Send aggregate metrics to Sensu Monitor +# [[outputs.sensu-go]] +# ## BACKEND API URL is the Sensu Backend API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the corresponding backend API path +# ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). +# ## +# ## Backend Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## AGENT API URL is the Sensu Agent API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the correspeonding agent API path (/events). +# ## +# ## Agent API Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output +# ## plugin will use backend_api_url. If backend_api_url and agent_api_url are +# ## not provided, the output plugin will default to use an agent_api_url of +# ## http://127.0.0.1:3031 +# ## +# # backend_api_url = "http://127.0.0.1:8080" +# # agent_api_url = "http://127.0.0.1:3031" +# +# ## API KEY is the Sensu Backend API token +# ## Generate a new API token via: +# ## +# ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities +# ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf +# ## $ sensuctl user create telegraf --group telegraf --password REDACTED +# ## $ sensuctl api-key grant telegraf +# ## +# ## For more information on Sensu RBAC profiles & API tokens, please visit: +# ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ +# ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ +# ## +# # api_key = "${SENSU_API_KEY}" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Sensu Event details +# ## +# ## Below are the event details to be sent to Sensu. The main portions of the +# ## event are the check, entity, and metrics specifications. For more information +# ## on Sensu events and its components, please visit: +# ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events +# ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks +# ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities +# ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics +# ## +# ## Check specification +# ## The check name is the name to give the Sensu check associated with the event +# ## created. This maps to check.metatadata.name in the event. +# [outputs.sensu-go.check] +# name = "telegraf" +# +# ## Entity specification +# ## Configure the entity name and namespace, if necessary. This will be part of +# ## the entity.metadata in the event. +# ## +# ## NOTE: if the output plugin is configured to send events to a +# ## backend_api_url and entity_name is not set, the value returned by +# ## os.Hostname() will be used; if the output plugin is configured to send +# ## events to an agent_api_url, entity_name and entity_namespace are not used. +# # [outputs.sensu-go.entity] +# # name = "server-01" +# # namespace = "default" +# +# ## Metrics specification +# ## Configure the tags for the metrics that are sent as part of the Sensu event +# # [outputs.sensu-go.tags] +# # source = "telegraf" +# +# ## Configure the handler(s) for processing the provided metrics +# # [outputs.sensu-go.metrics] +# # handlers = ["influxdb","elasticsearch"] + + +# # Send metrics and events to SignalFx +# [[outputs.signalfx]] +# ## SignalFx Org Access Token +# access_token = "my-secret-token" +# +# ## The SignalFx realm that your organization resides in +# signalfx_realm = "us9" # Required if ingest_url is not set +# +# ## You can optionally provide a custom ingest url instead of the +# ## signalfx_realm option above if you are using a gateway or proxy +# ## instance. This option takes precident over signalfx_realm. +# ingest_url = "https://my-custom-ingest/" +# +# ## Event typed metrics are omitted by default, +# ## If you require an event typed metric you must specify the +# ## metric name in the following list. +# included_event_names = ["plugin.metric_name"] + + # # Generic socket writer capable of handling multiple socket types. # [[outputs.socket_writer]] # ## URL to connect to @@ -1698,6 +1842,55 @@ ############################################################################### +# # Attach AWS EC2 metadata to metrics +# [[processors.aws_ec2]] +# ## Instance identity document tags to attach to metrics. +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html +# ## +# ## Available tags: +# ## * accountId +# ## * architecture +# ## * availabilityZone +# ## * billingProducts +# ## * imageId +# ## * instanceId +# ## * instanceType +# ## * kernelId +# ## * pendingTime +# ## * privateIp +# ## * ramdiskId +# ## * region +# ## * version +# imds_tags = [] +# +# ## EC2 instance tags retrieved with DescribeTags action. +# ## In case tag is empty upon retrieval it's omitted when tagging metrics. +# ## Note that in order for this to work, role attached to EC2 instance or AWS +# ## credentials available from the environment must have a policy attached, that +# ## allows ec2:DescribeTags. +# ## +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html +# ec2_tags = [] +# +# ## Timeout for http requests made by against aws ec2 metadata endpoint. +# timeout = "10s" +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## Keeping the metrics ordered may be slightly slower. +# ordered = false +# +# ## max_parallel_calls is the maximum number of AWS API calls to be in flight +# ## at the same time. +# ## It's probably best to keep this number fairly low. +# max_parallel_calls = 10 + + # # Clone metrics and apply modifications. # [[processors.clone]] # ## All modifications on inputs and aggregators can be overridden: @@ -2092,6 +2285,13 @@ # # ## File containing a Starlark script. # # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [processors.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true # # Perform string processing on tags, fields, and measurements @@ -2245,6 +2445,49 @@ # # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] +# # Calculates a derivative for every field. +# [[aggregators.derivative]] +# ## The period in which to flush the aggregator. +# period = "30s" +# ## +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## +# ## This aggregator will estimate a derivative for each field, which is +# ## contained in both the first and last metric of the aggregation interval. +# ## Without further configuration the derivative will be calculated with +# ## respect to the time difference between these two measurements in seconds. +# ## The formula applied is for every field: +# ## +# ## value_last - value_first +# ## derivative = -------------------------- +# ## time_difference_in_seconds +# ## +# ## The resulting derivative will be named *fieldname_rate*. The suffix +# ## "_rate" can be configured by the *suffix* parameter. When using a +# ## derivation variable you can include its name for more clarity. +# # suffix = "_rate" +# ## +# ## As an abstraction the derivative can be calculated not only by the time +# ## difference but by the difference of a field, which is contained in the +# ## measurement. This field is assumed to be monotonously increasing. This +# ## feature is used by specifying a *variable*. +# ## Make sure the specified variable is not filtered and exists in the metrics +# ## passed to this aggregator! +# # variable = "" +# ## +# ## When using a field as the derivation parameter the name of that field will +# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. +# ## +# ## Note, that the calculation is based on the actual timestamp of the +# ## measurements. When there is only one measurement during that period, the +# ## measurement will be rolled over to the next period. The maximum number of +# ## such roll-overs can be configured with a default of 10. +# # max_roll_over = 10 +# ## + + # # Report the final metric of a series # [[aggregators.final]] # ## The period on which to flush & clear the aggregator. @@ -2308,6 +2551,34 @@ # drop_original = false +# # Keep the aggregate quantiles of each metric passing through. +# [[aggregators.quantile]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Quantiles to output in the range [0,1] +# # quantiles = [0.25, 0.5, 0.75] +# +# ## Type of aggregation algorithm +# ## Supported are: +# ## "t-digest" -- approximation using centroids, can cope with large number of samples +# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) +# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) +# ## NOTE: Do not use "exact" algorithms with large number of samples +# ## to not impair performance or memory consumption! +# # algorithm = "t-digest" +# +# ## Compression for approximation (t-digest). The value needs to be +# ## greater or equal to 1.0. Smaller values will result in more +# ## performance but less accuracy. +# # compression = 100.0 + + # # Count the occurrence of values in fields. # [[aggregators.valuecounter]] # ## General Aggregator Arguments: @@ -2562,6 +2833,41 @@ # tubes = ["notifications"] +# # Read metrics exposed by Beat +# [[inputs.beat]] +# ## An URL from which to read Beat-formatted JSON +# ## Default is "http://127.0.0.1:5066". +# url = "http://127.0.0.1:5066" +# +# ## Enable collection of the listed stats +# ## An empty list means collect all. Available options are currently +# ## "beat", "libbeat", "system" and "filebeat". +# # include = ["beat", "libbeat", "filebeat"] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "logstash.example.com" +# +# ## Timeout for HTTP requests +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read BIND nameserver XML statistics # [[inputs.bind]] # ## An array of BIND XML statistics URI to gather stats. @@ -2713,6 +3019,9 @@ # ## ex: endpoint_url = "http://localhost:8000" # # endpoint_url = "" # +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy_url = "http://localhost:8888" +# # # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # # metrics are made available to the 1 minute period. Some are collected at # # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. @@ -2854,6 +3163,18 @@ # # basic_password = "p@ssw0rd" +# # Fetch metrics from a CSGO SRCDS +# [[inputs.csgo]] +# ## Specify servers using the following format: +# ## servers = [ +# ## ["ip1:port1", "rcon_password1"], +# ## ["ip2:port2", "rcon_password2"], +# ## ] +# # +# ## If no servers are specified, no data will be collected +# servers = [] + + # # Input plugin for DC/OS metrics # [[inputs.dcos]] # ## The DC/OS cluster URL. @@ -2966,13 +3287,30 @@ # ## Timeout for docker list, info, and stats commands # timeout = "5s" # -# ## Whether to report for each container per-device blkio (8:0, 8:1...) and -# ## network (eth0, eth1, ...) stats or not +# ## Whether to report for each container per-device blkio (8:0, 8:1...), +# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. +# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting +# ## is honored. # perdevice = true # -# ## Whether to report for each container total blkio and network stats or not +# ## Specifies for which classes a per-device metric should be issued +# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) +# ## Please note that this setting has no effect if 'perdevice' is set to 'true' +# # perdevice_include = ["cpu"] +# +# ## Whether to report for each container total blkio and network stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. +# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting +# ## is honored. # total = false # +# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. +# ## Possible values are 'cpu', 'blkio' and 'network' +# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. +# ## Please note that this setting has no effect if 'total' is set to 'false' +# # total_include = ["cpu", "blkio", "network"] +# # ## Which environment variables should we use as a tag # ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] # @@ -3065,6 +3403,7 @@ # cluster_stats_only_from_master = true # # ## Indices to collect; can be one or more indices names or _all +# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. # indices_include = ["_all"] # # ## One of "shards", "cluster", "indices" @@ -3085,6 +3424,11 @@ # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# +# ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. +# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and sort them +# ## by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most recent indices. +# # num_most_recent_indices = 0 # # Returns ethtool statistics for given interfaces @@ -3583,6 +3927,17 @@ # timeout = "5s" +# # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and Core metrics like temperature, power and utilization. +# [[inputs.intel_powerstat]] +# ## All global metrics are always collected by Intel PowerStat plugin. +# ## User can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. +# ## Empty array means no per-CPU specific metrics will be collected by the plugin - in this case only platform level +# ## telemetry will be exposed by Intel PowerStat plugin. +# ## Supported options: +# ## "cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles" +# # cpu_metrics = [] + + # # Collect statistics about itself # [[inputs.internal]] # ## If true, collect telegraf memory stats. @@ -5059,44 +5414,40 @@ # urls = ["http://localhost:8080/_raindrops"] -# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). -# [[inputs.ras]] -# ## Optional path to RASDaemon sqlite3 database. -# ## Default: /var/lib/rasdaemon/ras-mc_event.db -# # db_path = "" - +# # Reads metrics from RavenDB servers via the Monitoring Endpoints # [[inputs.ravendb]] -# ## Node URL and port that RavenDB is listening on. -# url = "https://localhost:8080" +# ## Node URL and port that RavenDB is listening on +# url = "https://localhost:8080" # -# ## RavenDB X509 client certificate setup -# tls_cert = "/etc/telegraf/raven.crt" -# tls_key = "/etc/telegraf/raven.key" +# ## RavenDB X509 client certificate setup +# # tls_cert = "/etc/telegraf/raven.crt" +# # tls_key = "/etc/telegraf/raven.key" # -# ## Optional request timeout -# ## -# ## Timeout, specifies the amount of time to wait -# ## for a server's response headers after fully writing the request and -# ## time limit for requests made by this client. -# # timeout = "5s" -# -# ## List of statistics which are collected -# # At least one is required -# # Allowed values: server, databases, indexes, collections +# ## Optional request timeout +# ## +# ## Timeout, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request and +# ## time limit for requests made by this client +# # timeout = "5s" # -# # stats_include = ["server", "databases", "indexes", "collections"] +# ## List of statistics which are collected +# # At least one is required +# # Allowed values: server, databases, indexes, collections +# # +# # stats_include = ["server", "databases", "indexes", "collections"] # -# ## List of db where database stats are collected -# ## If empty, all db are concerned -# # db_stats_dbs = [] +# ## List of db where database stats are collected +# ## If empty, all db are concerned +# # db_stats_dbs = [] # -# ## List of db where index status are collected -# ## If empty, all indexes from all db are concerned -# # index_stats_dbs = [] +# ## List of db where index status are collected +# ## If empty, all indexes from all db are concerned +# # index_stats_dbs = [] # -# ## List of db where collection status are collected -# ## If empty, all collections from all db are concerned -# # collection_stats_dbs = [] +# ## List of db where collection status are collected +# ## If empty, all collections from all db are concerned +# # collection_stats_dbs = [] + # # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs # [[inputs.redfish]] @@ -5282,7 +5633,7 @@ # ## # ## Security Name. # # sec_name = "myuser" -# ## Authentication protocol; one of "MD5", "SHA", or "". +# ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". # # auth_protocol = "MD5" # ## Authentication password. # # auth_password = "pass" @@ -6183,6 +6534,46 @@ # data_format = "influx" +# # Ingests files in a directory and then moves them to a target directory. +# [[inputs.directory_monitor]] +# ## The directory to monitor and read files from. +# directory = "" +# # +# ## The directory to move finished files to. +# finished_directory = "" +# # +# ## The directory to move files to upon file error. +# ## If not provided, erroring files will stay in the monitored directory. +# # error_directory = "" +# # +# ## The amount of time a file is allowed to sit in the directory before it is picked up. +# ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, +# ## set this higher so that the plugin will wait until the file is fully copied to the directory. +# # directory_duration_threshold = "50ms" +# # +# ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. +# # files_to_monitor = ["^.*\.csv"] +# # +# ## A list of files to ignore, if necessary. Supports regex. +# # files_to_ignore = [".DS_Store"] +# # +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set to the size of the output's metric_buffer_limit. +# ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. +# # max_buffered_metrics = 10000 +# # +# ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. +# ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. +# # file_queue_size = 100000 +# # +# ## The dataformat to be read from the files. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# ## NOTE: We currently only support parsing newline-delimited JSON. See the format here: https://github.com/ndjson/ndjson-spec +# data_format = "influx" + + # # Read logging output from the Docker engine # [[inputs.docker_log]] # ## Docker Endpoint @@ -7188,6 +7579,13 @@ # # insecure_skip_verify = false +# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). +# [[inputs.ras]] +# ## Optional path to RASDaemon sqlite3 database. +# ## Default: /var/lib/rasdaemon/ras-mc_event.db +# # db_path = "" + + # # Riemann protobuff listener. # [[inputs.riemann_listener]] # ## URL to listen on. @@ -7365,7 +7763,8 @@ # ## Parses datadog extensions to the statsd format # datadog_extensions = false # -# ## Parses distributions metric from datadog's extension to the statsd format +# ## Parses distributions metric as specified in the datadog statsd format +# ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition # datadog_distributions = false # # ## Statsd data translation templates, more info can be read here: @@ -7457,7 +7856,8 @@ # ## "/var/log/**.log" -> recursively find all .log files in /var/log # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log # ## "/var/log/apache.log" -> just tail the apache log file -# ## +# ## "/var/log/log[!1-2]* -> tail files without 1-2 +# ## "/var/log/log[^1-2]* -> identical behavior as above # ## See https://github.com/gobwas/glob for more examples # ## # files = ["/var/mymetrics.out"] From d85a3637d5b88e76673abacc7dfcbcef70d9a8a8 Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Thu, 4 Mar 2021 08:28:05 -0800 Subject: [PATCH 262/761] chore: update csgo readme title (#8933) --- plugins/inputs/csgo/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/csgo/README.md b/plugins/inputs/csgo/README.md index ad80030065153..dbf3f3fdf54e9 100644 --- a/plugins/inputs/csgo/README.md +++ b/plugins/inputs/csgo/README.md @@ -1,6 +1,6 @@ -# CSGO Input Plugin +# Counter-Strike: Global Offensive (CSGO) Input Plugin -The `csgo` plugin gather metrics from CSGO servers. +The `csgo` plugin gather metrics from Counter-Strike: Global Offensive servers. #### Configuration ```toml From cfc5300ee220a17cc2a768e8fd90c08dd08002fa Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Thu, 4 Mar 2021 14:37:57 -0500 Subject: [PATCH 263/761] Get Tail tests to pass on windows (#8927) * fix tests * Update tail_test.go --- plugins/inputs/tail/tail_test.go | 38 +++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 83f6c9e7823ca..f9acdbcdba6d4 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -6,6 +6,7 @@ import ( "log" "os" "path/filepath" + "runtime" "testing" "time" @@ -25,6 +26,27 @@ var ( testdataDir = getTestdataDir() ) +func NewTestTail() *Tail { + offsetsMutex.Lock() + offsetsCopy := make(map[string]int64, len(offsets)) + for k, v := range offsets { + offsetsCopy[k] = v + } + offsetsMutex.Unlock() + watchMethod := defaultWatchMethod + + if runtime.GOOS == "windows" { + watchMethod = "poll" + } + + return &Tail{ + FromBeginning: false, + MaxUndeliveredLines: 1000, + offsets: offsetsCopy, + WatchMethod: watchMethod, + } +} + func TestTailBadLine(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) @@ -42,7 +64,7 @@ func TestTailBadLine(t *testing.T) { buf := &bytes.Buffer{} log.SetOutput(buf) - tt := NewTail() + tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} @@ -70,7 +92,7 @@ func TestTailDosLineEndings(t *testing.T) { require.NoError(t, err) tmpfile.Close() - tt := NewTail() + tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} @@ -99,7 +121,7 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { //we make sure the timeout won't kick in duration, _ := time.ParseDuration("100s") - tt := NewTail() + tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{filepath.Join(testdataDir, "test_multiline.log")} @@ -162,7 +184,7 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { // set tight timeout for tests duration := 10 * time.Millisecond - tt := NewTail() + tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} @@ -215,7 +237,7 @@ func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *test //we make sure the timeout won't kick in duration := 100 * time.Second - tt := NewTail() + tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{filepath.Join(testdataDir, "test_multiline.log")} @@ -274,7 +296,7 @@ cpu,42 require.NoError(t, err) tmpfile.Close() - plugin := NewTail() + plugin := NewTestTail() plugin.Log = testutil.Logger{} plugin.FromBeginning = true plugin.Files = []string{tmpfile.Name()} @@ -331,7 +353,7 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) { require.NoError(t, err) tmpfile.Close() - plugin := NewTail() + plugin := NewTestTail() plugin.Log = testutil.Logger{} plugin.FromBeginning = true plugin.Files = []string{tmpfile.Name()} @@ -528,7 +550,7 @@ func TestTailEOF(t *testing.T) { err = tmpfile.Sync() require.NoError(t, err) - tt := NewTail() + tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} From 9b3bd86d6dbca22c214caee59793142aef336f94 Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Thu, 4 Mar 2021 14:01:43 -0800 Subject: [PATCH 264/761] fix: Beat readme title (#8938) --- plugins/inputs/beat/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/beat/README.md b/plugins/inputs/beat/README.md index 113187acda585..a3ef9b1b8c7cd 100644 --- a/plugins/inputs/beat/README.md +++ b/plugins/inputs/beat/README.md @@ -1,4 +1,4 @@ -# Beat Plugin +# Beat Input Plugin The Beat plugin will collect metrics from the given Beat instances. It is known to work with Filebeat and Kafkabeat. ### Configuration: From 9e453226619c617d4daa5554e0958f194e090fc0 Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Thu, 4 Mar 2021 14:02:12 -0800 Subject: [PATCH 265/761] chore: update docs link in influxdbv2listener (#8939) * chore: update docs link in influxdbv2listener * chore: one more update to use latest --- plugins/inputs/influxdb_v2_listener/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/influxdb_v2_listener/README.md b/plugins/inputs/influxdb_v2_listener/README.md index 4258e021d85fd..71fa6c19bee3a 100644 --- a/plugins/inputs/influxdb_v2_listener/README.md +++ b/plugins/inputs/influxdb_v2_listener/README.md @@ -53,4 +53,4 @@ Metrics are created from InfluxDB Line Protocol in the request body. curl -i -XPOST 'http://localhost:8186/api/v2/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' ``` -[influxdb_http_api]: https://v2.docs.influxdata.com/v2.0/api/ +[influxdb_http_api]: https://docs.influxdata.com/influxdb/latest/api/ From 74d4836c25bda00873a0493dbd445ffe57463684 Mon Sep 17 00:00:00 2001 From: Pmoranga Date: Fri, 5 Mar 2021 11:56:28 -0300 Subject: [PATCH 266/761] Nfsclient input: (Reopen of PR 1491 and 1684) (#4615) --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/nfsclient/README.md | 181 +++++++ plugins/inputs/nfsclient/nfsclient.go | 497 +++++++++++++++++++ plugins/inputs/nfsclient/nfsclient_test.go | 177 +++++++ plugins/inputs/nfsclient/testdata/mountstats | 231 +++++++++ 6 files changed, 1088 insertions(+) create mode 100644 plugins/inputs/nfsclient/README.md create mode 100644 plugins/inputs/nfsclient/nfsclient.go create mode 100644 plugins/inputs/nfsclient/nfsclient_test.go create mode 100644 plugins/inputs/nfsclient/testdata/mountstats diff --git a/README.md b/README.md index b1cf1ecf4b7fa..59aa7672c5dc4 100644 --- a/README.md +++ b/README.md @@ -259,6 +259,7 @@ For documentation on the latest development code see the [documentation index][d * [net](./plugins/inputs/net) * [net_response](./plugins/inputs/net_response) * [netstat](./plugins/inputs/net) +* [nfsclient](./plugins/inputs/nfsclient) * [nginx](./plugins/inputs/nginx) * [nginx_plus_api](./plugins/inputs/nginx_plus_api) * [nginx_plus](./plugins/inputs/nginx_plus) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 65d8d5254c6d3..d5eeead0a8bb6 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -112,6 +112,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/neptune_apex" _ "github.com/influxdata/telegraf/plugins/inputs/net" _ "github.com/influxdata/telegraf/plugins/inputs/net_response" + _ "github.com/influxdata/telegraf/plugins/inputs/nfsclient" _ "github.com/influxdata/telegraf/plugins/inputs/nginx" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus_api" diff --git a/plugins/inputs/nfsclient/README.md b/plugins/inputs/nfsclient/README.md new file mode 100644 index 0000000000000..965bcc5240fc5 --- /dev/null +++ b/plugins/inputs/nfsclient/README.md @@ -0,0 +1,181 @@ +#### Description + +The NFSClient plugin collects data from /proc/self/mountstats. By default, only a limited number of general system-level metrics are collected, including basic read/write counts. +If `fullstat` is set, a great deal of additional metrics are collected, detailed below. + +**NOTE** Many of the metrics, even if tagged with a mount point, are really _per-server_. Thus, if you mount these two shares: `nfs01:/vol/foo/bar` and `nfs01:/vol/foo/baz`, there will be two near identical entries in /proc/self/mountstats. This is a limitation of the metrics exposed by the kernel, not the telegraf plugin. + +#### Plugin arguments: +- **fullstat** bool: Collect per-operation type metrics. Defaults to false. +- **include_mounts** list(string): gather metrics for only these mounts. Default is to watch all mounts. +- **exclude_mounts** list(string): gather metrics for all mounts, except those listed in this option. Excludes take precedence over includes. +- **include_operations** list(string): List of specific NFS operations to track. See /proc/self/mountstats (the "per-op statistics" section) for complete lists of valid options for NFSv3 and NFSV4. The default is to gather all metrics, but this is almost certainly *not* what you want (there are 22 operations for NFSv3, and well over 50 for NFSv4). A suggested 'minimal' list of operations to collect for basic usage: `['READ','WRITE','ACCESS','GETATTR','READDIR','LOOKUP','LOOKUP']` +- **exclude_operations** list(string): Gather all metrics, except those listed. Excludes take precedence over includes. + +*N.B.* the `include_mounts` and `exclude_mounts` arguments are both applied to the local mount location (e.g. /mnt/NFS), not the server export (e.g. nfsserver:/vol/NFS). Go regexp patterns can be used in either. + +#### Examples + +```toml +[[inputs.nfsclient]] + ## Read more low-level metrics (optional, defaults to false) + # fullstat = false + + ## List of mounts to explictly include or exclude (optional) + ## The pattern (Go regexp) is matched against the mount point (not the + ## device being mounted). If include_mounts is set, all mounts are ignored + ## unless present in the list. If a mount is listed in both include_mounts + ## and exclude_mounts, it is excluded. Go regexp patterns can be used. + # include_mounts = [] + # exclude_mounts = [] + + ## List of operations to include or exclude from collecting. This applies + ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: + ## the default is to collect everything; when include_operations is set, only + ## those OPs are collected; when exclude_operations is set, all are collected + ## except those listed. If include and exclude are set, the OP is excluded. + ## See /proc/self/mountstats for a list of valid operations; note that + ## NFSv3 and NFSv4 have different lists. While it is not possible to + ## have different include/exclude lists for NFSv3/4, unused elements + ## in the list should be okay. It is possible to have different lists + ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, + ## with their own lists. See "include_mounts" above, and be careful of + ## duplicate metrics. + # include_operations = [] + # exclude_operations = [] +``` + +Example output for basic metrics showing server-wise read and write data: + +``` +nfsstat,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS ops=600i,retrans=1i,bytes=1207i,rtt=606i,exe=607i 1612651512000000000 +nfsstat,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS bytes=1407i,rtt=706i,exe=707i,ops=700i,retrans=1i 1612651512000000000 + +``` + +Example output for `fullstat=true` metrics, which includes additional measurements for `nfs_bytes`, `nfs_events`, and `nfs_xprt_tcp` (and `nfs_xprt_udp` if present). +Additionally, per-OP metrics are collected, with examples for READ, LOOKUP, and NULL shown. +Please refer to `/proc/self/mountstats` for a list of supported NFS operations, as it changes as it changes periodically. + +``` +nfs_bytes,mountpoint=/home,serverexport=nfs01:/vol/home directreadbytes=0i,directwritebytes=0i,normalreadbytes=42648757667i,normalwritebytes=0i,readpages=10404603i,serverreadbytes=42617098139i,serverwritebytes=0i,writepages=0i 1608787697000000000 +nfs_events,mountpoint=/home,serverexport=nfs01:/vol/home attrinvalidates=116i,congestionwait=0i,datainvalidates=65i,delay=0i,dentryrevalidates=5911243i,extendwrite=0i,inoderevalidates=200378i,pnfsreads=0i,pnfswrites=0i,setattrtrunc=0i,shortreads=0i,shortwrites=0i,sillyrenames=0i,vfsaccess=7203852i,vfsflush=117405i,vfsfsync=0i,vfsgetdents=3368i,vfslock=0i,vfslookup=740i,vfsopen=157281i,vfsreadpage=16i,vfsreadpages=86874i,vfsrelease=155526i,vfssetattr=0i,vfsupdatepage=0i,vfswritepage=0i,vfswritepages=215514i 1608787697000000000 +nfs_xprt_tcp,mountpoint=/home,serverexport=nfs01:/vol/home backlogutil=0i,badxids=0i,bind_count=1i,connect_count=1i,connect_time=0i,idle_time=0i,inflightsends=15659826i,rpcreceives=2173896i,rpcsends=2173896i 1608787697000000000 + +nfs_ops,mountpoint=/NFS,operation=NULL,serverexport=1.2.3.4:/storage/NFS trans=0i,timeouts=0i,bytes_sent=0i,bytes_recv=0i,queue_time=0i,response_time=0i,total_time=0i,ops=0i 1612651512000000000 +nfs_ops,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS bytes=1207i,timeouts=602i,total_time=607i,exe=607i,trans=601i,bytes_sent=603i,bytes_recv=604i,queue_time=605i,ops=600i,retrans=1i,rtt=606i,response_time=606i 1612651512000000000 +nfs_ops,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS ops=700i,bytes=1407i,exe=707i,trans=701i,timeouts=702i,response_time=706i,total_time=707i,retrans=1i,rtt=706i,bytes_sent=703i,bytes_recv=704i,queue_time=705i 1612651512000000000 +``` + +#### References +1. [nfsiostat](http://git.linux-nfs.org/?p=steved/nfs-utils.git;a=summary) +2. [net/sunrpc/stats.c - Linux source code](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/net/sunrpc/stats.c) +3. [What is in /proc/self/mountstats for NFS mounts: an introduction](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex) +4. [The xprt: data for NFS mounts in /proc/self/mountstats](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsXprt) + + + +#### Measurements & Fields + +Always collected: + +- nfsstat + - bytes (integer, bytes) - The total number of bytes exchanged doing this operation. This is bytes sent *and* received, including overhead *and* payload. (bytes = OP_bytes_sent + OP_bytes_recv. See nfs_ops below) + - ops (integer, count) - The number of operations of this type executed. + - retrans (integer, count) - The number of times an operation had to be retried (retrans = OP_trans - OP_ops. See nfs_ops below) + - exe (integer, miliseconds) - The number of miliseconds it took to process the operations. + - rtt (integer, miliseconds) - The round-trip time for operations. + +In addition enabling `fullstat` will make many more metrics available. + +#### Tags + +- All measurements have the following tags: + - mountpoint - The local mountpoint, for instance: "/var/www" + - serverexport - The full server export, for instance: "nfsserver.example.org:/export" + +- Measurements nfsstat and nfs_ops will also include: + - operation - the NFS operation in question. `READ` or `WRITE` for nfsstat, but potentially one of ~20 or ~50, depending on NFS version. A complete list of operations supported is visible in `/proc/self/mountstats`. + + + +### Additional metrics + +When `fullstat` is true, additional measurements are collected. Tags are the same as above. + +#### NFS Operations + +Most descriptions come from Reference [[3](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex)] and `nfs_iostat.h`. Field order and names are the same as in `/proc/self/mountstats` and the Kernel source. + +Please refer to `/proc/self/mountstats` for a list of supported NFS operations, as it changes occasionally. + +- nfs_bytes + - fields: + - normalreadbytes - (int, bytes) - Bytes read from the server via `read()` + - normalwritebytes - (int, bytes) - Bytes written to the server via `write()` + - directreadbytes - (int, bytes) - Bytes read with O_DIRECT set + - directwritebytes - (int, bytes) -Bytes written with O_DIRECT set + - serverreadbytes - (int, bytes) - Bytes read via NFS READ (via `mmap()`) + - serverwritebytes - (int, bytes) - Bytes written via NFS WRITE (via `mmap()`) + - readpages - (int, count) - Number of pages read + - writepages - (int, count) - Number of pages written + +- nfs_events - Per-event metrics + - fields: + - inoderevalidates - (int, count) - How many times cached inode attributes have to be re-validated from the server. + - dentryrevalidates - (int, count) - How many times cached dentry nodes have to be re-validated. + - datainvalidates - (int, count) - How many times an inode had its cached data thrown out. + - attrinvalidates - (int, count) - How many times an inode has had cached inode attributes invalidated. + - vfsopen - (int, count) - How many times files or directories have been `open()`'d. + - vfslookup - (int, count) - How many name lookups in directories there have been. + - vfsaccess - (int, count) - Number of calls to `access()`. (formerly called "vfspermission") + + - vfsupdatepage - (int, count) - Count of updates (and potential writes) to pages. + - vfsreadpage - (int, count) - Number of pages read. + - vfsreadpages - (int, count) - Count of how many times a _group_ of pages was read (possibly via `mmap()`?). + - vfswritepage - (int, count) - Number of pages written. + - vfswritepages - (int, count) - Count of how many times a _group_ of pages was written (possibly via `mmap()`?) + - vfsgetdents - (int, count) - Count of directory entry reads with getdents(). These reads can be served from cache and don't necessarily imply actual NFS requests. (formerly called "vfsreaddir") + - vfssetattr - (int, count) - How many times we've set attributes on inodes. + - vfsflush - (int, count) - Count of times pending writes have been forcibly flushed to the server. + - vfsfsync - (int, count) - Count of calls to `fsync()` on directories and files. + - vfslock - (int, count) - Number of times a lock was attempted on a file (regardless of success or not). + - vfsrelease - (int, count) - Number of calls to `close()`. + - congestionwait - (int, count) - Believe unused by the Linux kernel, but it is part of the NFS spec. + - setattrtrunc - (int, count) - How many times files have had their size truncated. + - extendwrite - (int, count) - How many times a file has been grown because you're writing beyond the existing end of the file. + - sillyrenames - (int, count) - Number of times an in-use file was removed (thus creating a temporary ".nfsXXXXXX" file) + - shortreads - (int, count) - Number of times the NFS server returned less data than requested. + - shortwrites - (int, count) - Number of times NFS server reports it wrote less data than requested. + - delay - (int, count) - Occurances of EJUKEBOX ("Jukebox Delay", probably unused) + - pnfsreads - (int, count) - Count of NFS v4.1+ pNFS reads. + - pnfswrites - (int, count) - Count of NFS v4.1+ pNFS writes. + + - nfs_xprt_tcp + - fields: + - bind_count - (int, count) - Number of _completely new_ mounts to this server (sometimes 0?) + - connect_count - (int, count) - How many times the client has connected to the server in question + - connect_time - (int, jiffies) - How long the NFS client has spent waiting for its connection(s) to the server to be established. + - idle_time - (int, seconds) - How long (in seconds) since the NFS mount saw any RPC traffic. + - rpcsends - (int, count) - How many RPC requests this mount has sent to the server. + - rpcreceives - (int, count) - How many RPC replies this mount has received from the server. + - badxids - (int, count) - Count of XIDs sent by the server that the client doesn't know about. + - inflightsends - (int, count) - Number of outstanding requests; always >1. (See reference #4 for comment on this field) + - backlogutil - (int, count) - Cumulative backlog count + +- nfs_xprt_udp + - fields: + - [same as nfs_xprt_tcp, except for connect_count, connect_time, and idle_time] + +- nfs_ops + - fields (In all cases, the `operations` tag is set to the uppercase name of the NFS operation, _e.g._ "READ", "FSINFO", _etc_. See /proc/self/mountstats for a full list): + - ops - (int, count) - Total operations of this type. + - trans - (int, count) - Total transmissions of this type, including retransmissions: `OP_ops - OP_trans = total_retransmissions` (lower is better). + - timeouts - (int, count) - Number of major timeouts. + - bytes_sent - (int, count) - Bytes received, including headers (should also be close to on-wire size). + - bytes_recv - (int, count) - Bytes sent, including headers (should be close to on-wire size). + - queue_time - (int, milliseconds) - Cumulative time a request waited in the queue before sending this OP type. + - response_time - (int, milliseconds) - Cumulative time waiting for a response for this OP type. + - total_time - (int, milliseconds) - Cumulative time a request waited in the queue before sending. + - errors - (int, count) - Total number operations that complete with tk_status < 0 (usually errors). This is a new field, present in kernel >=5.3, mountstats version 1.1 + diff --git a/plugins/inputs/nfsclient/nfsclient.go b/plugins/inputs/nfsclient/nfsclient.go new file mode 100644 index 0000000000000..37fa64fef498e --- /dev/null +++ b/plugins/inputs/nfsclient/nfsclient.go @@ -0,0 +1,497 @@ +package nfsclient + +import ( + "bufio" + "log" + "os" + "regexp" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type NFSClient struct { + Fullstat bool `toml:"fullstat"` + IncludeMounts []string `toml:"include_mounts"` + ExcludeMounts []string `toml:"exclude_mounts"` + IncludeOperations []string `toml:"include_operations"` + ExcludeOperations []string `toml:"exclude_operations"` + Log telegraf.Logger `toml:"-"` + nfs3Ops map[string]bool + nfs4Ops map[string]bool + mountstatsPath string +} + +const sampleConfig = ` + ## Read more low-level metrics (optional, defaults to false) + # fullstat = false + + ## List of mounts to explictly include or exclude (optional) + ## The pattern (Go regexp) is matched against the mount point (not the + ## device being mounted). If include_mounts is set, all mounts are ignored + ## unless present in the list. If a mount is listed in both include_mounts + ## and exclude_mounts, it is excluded. Go regexp patterns can be used. + # include_mounts = [] + # exclude_mounts = [] + + ## List of operations to include or exclude from collecting. This applies + ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: + ## the default is to collect everything; when include_operations is set, only + ## those OPs are collected; when exclude_operations is set, all are collected + ## except those listed. If include and exclude are set, the OP is excluded. + ## See /proc/self/mountstats for a list of valid operations; note that + ## NFSv3 and NFSv4 have different lists. While it is not possible to + ## have different include/exclude lists for NFSv3/4, unused elements + ## in the list should be okay. It is possible to have different lists + ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, + ## with their own lists. See "include_mounts" above, and be careful of + ## duplicate metrics. + # include_operations = [] + # exclude_operations = [] +` + +func (n *NFSClient) SampleConfig() string { + return sampleConfig +} + +func (n *NFSClient) Description() string { + return "Read per-mount NFS client metrics from /proc/self/mountstats" +} + +func convertToInt64(line []string) []int64 { + /* A "line" of input data (a pre-split array of strings) is + processed one field at a time. Each field is converted to + an int64 value, and appened to an array of return values. + On an error, check for ErrRange, and throw a fatal error + if found. This situation indicates a pretty major issue in + the /proc/self/mountstats file, and returning faulty data + is worse than no data. Other errors are ignored, and append + whatever we got in the first place (probably 0). + Yes, this is ugly. */ + + var nline []int64 + + if len(line) < 2 { + return nline + } + + // Skip the first field; it's handled specially as the "first" variable + for _, l := range line[1:] { + val, err := strconv.ParseInt(l, 10, 64) + if err != nil { + if numError, ok := err.(*strconv.NumError); ok { + if numError.Err == strconv.ErrRange { + log.Fatalf("ErrRange: line:[%v] raw:[%v] -> parsed:[%v]\n", line, l, val) + } + } + } + nline = append(nline, val) + } + return nline +} + +func (n *NFSClient) parseStat(mountpoint string, export string, version string, line []string, fullstat bool, acc telegraf.Accumulator) error { + tags := map[string]string{"mountpoint": mountpoint, "serverexport": export} + nline := convertToInt64(line) + + if len(nline) == 0 { + n.Log.Warnf("Parsing Stat line with one field: %s\n", line) + return nil + } + + first := strings.Replace(line[0], ":", "", 1) + + var eventsFields = []string{ + "inoderevalidates", + "dentryrevalidates", + "datainvalidates", + "attrinvalidates", + "vfsopen", + "vfslookup", + "vfsaccess", + "vfsupdatepage", + "vfsreadpage", + "vfsreadpages", + "vfswritepage", + "vfswritepages", + "vfsgetdents", + "vfssetattr", + "vfsflush", + "vfsfsync", + "vfslock", + "vfsrelease", + "congestionwait", + "setattrtrunc", + "extendwrite", + "sillyrenames", + "shortreads", + "shortwrites", + "delay", + "pnfsreads", + "pnfswrites", + } + + var bytesFields = []string{ + "normalreadbytes", + "normalwritebytes", + "directreadbytes", + "directwritebytes", + "serverreadbytes", + "serverwritebytes", + "readpages", + "writepages", + } + + var xprtudpFields = []string{ + "bind_count", + "rpcsends", + "rpcreceives", + "badxids", + "inflightsends", + "backlogutil", + } + + var xprttcpFields = []string{ + "bind_count", + "connect_count", + "connect_time", + "idle_time", + "rpcsends", + "rpcreceives", + "badxids", + "inflightsends", + "backlogutil", + } + + var nfsopFields = []string{ + "ops", + "trans", + "timeouts", + "bytes_sent", + "bytes_recv", + "queue_time", + "response_time", + "total_time", + "errors", + } + + var fields = make(map[string]interface{}) + + switch first { + case "READ", "WRITE": + fields["ops"] = nline[0] + fields["retrans"] = nline[1] - nline[0] + fields["bytes"] = nline[3] + nline[4] + fields["rtt"] = nline[6] + fields["exe"] = nline[7] + tags["operation"] = first + acc.AddFields("nfsstat", fields, tags) + } + + if fullstat { + switch first { + case "events": + if len(nline) >= len(eventsFields) { + for i, t := range eventsFields { + fields[t] = nline[i] + } + acc.AddFields("nfs_events", fields, tags) + } + + case "bytes": + if len(nline) >= len(bytesFields) { + for i, t := range bytesFields { + fields[t] = nline[i] + } + acc.AddFields("nfs_bytes", fields, tags) + } + + case "xprt": + if len(line) > 1 { + switch line[1] { + case "tcp": + if len(nline)+2 >= len(xprttcpFields) { + for i, t := range xprttcpFields { + fields[t] = nline[i+2] + } + acc.AddFields("nfs_xprt_tcp", fields, tags) + } + case "udp": + if len(nline)+2 >= len(xprtudpFields) { + for i, t := range xprtudpFields { + fields[t] = nline[i+2] + } + acc.AddFields("nfs_xprt_udp", fields, tags) + } + } + } + } + + if (version == "3" && n.nfs3Ops[first]) || (version == "4" && n.nfs4Ops[first]) { + tags["operation"] = first + if len(nline) <= len(nfsopFields) { + for i, t := range nline { + fields[nfsopFields[i]] = t + } + acc.AddFields("nfs_ops", fields, tags) + } + } + + } + + return nil +} + +func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator) error { + var mount string + var version string + var export string + var skip bool + + for scanner.Scan() { + line := strings.Fields(scanner.Text()) + + line_len := len(line) + + if line_len == 0 { + continue + } + + skip = false + + // This denotes a new mount has been found, so set + // mount and export, and stop skipping (for now) + if line_len > 4 && choice.Contains("fstype", line) && (choice.Contains("nfs", line) || choice.Contains("nfs4", line)) { + mount = line[4] + export = line[1] + } else if line_len > 5 && (choice.Contains("(nfs)", line) || choice.Contains("(nfs4)", line)) { + version = strings.Split(line[5], "/")[1] + } + + if mount == "" { + continue + } + + if len(n.IncludeMounts) > 0 { + skip = true + for _, RE := range n.IncludeMounts { + matched, _ := regexp.MatchString(RE, mount) + if matched { + skip = false + break + } + } + } + + if !skip && len(n.ExcludeMounts) > 0 { + for _, RE := range n.ExcludeMounts { + matched, _ := regexp.MatchString(RE, mount) + if matched { + skip = true + break + } + } + } + + if !skip { + n.parseStat(mount, export, version, line, n.Fullstat, acc) + } + } + return nil +} + +func (n *NFSClient) getMountStatsPath() string { + + path := "/proc/self/mountstats" + if os.Getenv("MOUNT_PROC") != "" { + path = os.Getenv("MOUNT_PROC") + } + n.Log.Debugf("using [%s] for mountstats", path) + return path +} + +func (n *NFSClient) Gather(acc telegraf.Accumulator) error { + + file, err := os.Open(n.mountstatsPath) + if err != nil { + n.Log.Errorf("Failed opening the [%s] file: %s ", file, err) + return err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + n.processText(scanner, acc) + + if err := scanner.Err(); err != nil { + n.Log.Errorf("%s", err) + return err + } + + return nil +} + +func (n *NFSClient) Init() error { + + var nfs3Fields = []string{ + "NULL", + "GETATTR", + "SETATTR", + "LOOKUP", + "ACCESS", + "READLINK", + "READ", + "WRITE", + "CREATE", + "MKDIR", + "SYMLINK", + "MKNOD", + "REMOVE", + "RMDIR", + "RENAME", + "LINK", + "READDIR", + "READDIRPLUS", + "FSSTAT", + "FSINFO", + "PATHCONF", + "COMMIT", + } + + var nfs4Fields = []string{ + "NULL", + "READ", + "WRITE", + "COMMIT", + "OPEN", + "OPEN_CONFIRM", + "OPEN_NOATTR", + "OPEN_DOWNGRADE", + "CLOSE", + "SETATTR", + "FSINFO", + "RENEW", + "SETCLIENTID", + "SETCLIENTID_CONFIRM", + "LOCK", + "LOCKT", + "LOCKU", + "ACCESS", + "GETATTR", + "LOOKUP", + "LOOKUP_ROOT", + "REMOVE", + "RENAME", + "LINK", + "SYMLINK", + "CREATE", + "PATHCONF", + "STATFS", + "READLINK", + "READDIR", + "SERVER_CAPS", + "DELEGRETURN", + "GETACL", + "SETACL", + "FS_LOCATIONS", + "RELEASE_LOCKOWNER", + "SECINFO", + "FSID_PRESENT", + "EXCHANGE_ID", + "CREATE_SESSION", + "DESTROY_SESSION", + "SEQUENCE", + "GET_LEASE_TIME", + "RECLAIM_COMPLETE", + "LAYOUTGET", + "GETDEVICEINFO", + "LAYOUTCOMMIT", + "LAYOUTRETURN", + "SECINFO_NO_NAME", + "TEST_STATEID", + "FREE_STATEID", + "GETDEVICELIST", + "BIND_CONN_TO_SESSION", + "DESTROY_CLIENTID", + "SEEK", + "ALLOCATE", + "DEALLOCATE", + "LAYOUTSTATS", + "CLONE", + "COPY", + "OFFLOAD_CANCEL", + "LOOKUPP", + "LAYOUTERROR", + "COPY_NOTIFY", + "GETXATTR", + "SETXATTR", + "LISTXATTRS", + "REMOVEXATTR", + } + + nfs3Ops := make(map[string]bool) + nfs4Ops := make(map[string]bool) + + n.mountstatsPath = n.getMountStatsPath() + + if len(n.IncludeOperations) == 0 { + for _, Op := range nfs3Fields { + nfs3Ops[Op] = true + } + for _, Op := range nfs4Fields { + nfs4Ops[Op] = true + } + } else { + for _, Op := range n.IncludeOperations { + nfs3Ops[Op] = true + } + for _, Op := range n.IncludeOperations { + nfs4Ops[Op] = true + } + } + + if len(n.ExcludeOperations) > 0 { + for _, Op := range n.ExcludeOperations { + if nfs3Ops[Op] { + delete(nfs3Ops, Op) + } + if nfs4Ops[Op] { + delete(nfs4Ops, Op) + } + } + } + + if len(n.IncludeMounts) > 0 { + n.Log.Debugf("Including these mount patterns: %v", n.IncludeMounts) + } else { + n.Log.Debugf("Including all mounts.") + } + + if len(n.ExcludeMounts) > 0 { + n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeMounts) + } else { + n.Log.Debugf("Not excluding any mounts.") + } + + if len(n.IncludeOperations) > 0 { + n.Log.Debugf("Including these operations: %v", n.IncludeOperations) + } else { + n.Log.Debugf("Including all operations.") + } + + if len(n.ExcludeOperations) > 0 { + n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeOperations) + } else { + n.Log.Debugf("Not excluding any operations.") + } + + return nil +} + +func init() { + inputs.Add("nfsclient", func() telegraf.Input { + return &NFSClient{} + }) +} diff --git a/plugins/inputs/nfsclient/nfsclient_test.go b/plugins/inputs/nfsclient/nfsclient_test.go new file mode 100644 index 0000000000000..f4f008fbce0ad --- /dev/null +++ b/plugins/inputs/nfsclient/nfsclient_test.go @@ -0,0 +1,177 @@ +package nfsclient + +import ( + "bufio" + "github.com/influxdata/telegraf/testutil" + "os" + "strings" + "testing" +) + +func getMountStatsPath() string { + + path := "./testdata/mountstats" + if os.Getenv("MOUNT_PROC") != "" { + path = os.Getenv("MOUNT_PROC") + } + + return path +} + +func TestNFSClientParsev3(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{} + nfsclient.nfs3Ops = map[string]bool{"READLINK": true, "GETATTR": false} + nfsclient.nfs4Ops = map[string]bool{"READLINK": true, "GETATTR": false} + data := strings.Fields(" READLINK: 500 501 502 503 504 505 506 507") + nfsclient.parseStat("1.2.3.4:/storage/NFS", "/A", "3", data, true, &acc) + + fields_ops := map[string]interface{}{ + "ops": int64(500), + "trans": int64(501), + "timeouts": int64(502), + "bytes_sent": int64(503), + "bytes_recv": int64(504), + "queue_time": int64(505), + "response_time": int64(506), + "total_time": int64(507), + } + acc.AssertContainsFields(t, "nfs_ops", fields_ops) +} + +func TestNFSClientParsev4(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{} + nfsclient.nfs3Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false} + nfsclient.nfs4Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false} + data := strings.Fields(" DESTROY_SESSION: 500 501 502 503 504 505 506 507") + nfsclient.parseStat("2.2.2.2:/nfsdata/", "/B", "4", data, true, &acc) + + fields_ops := map[string]interface{}{ + "ops": int64(500), + "trans": int64(501), + "timeouts": int64(502), + "bytes_sent": int64(503), + "bytes_recv": int64(504), + "queue_time": int64(505), + "response_time": int64(506), + "total_time": int64(507), + } + acc.AssertContainsFields(t, "nfs_ops", fields_ops) +} + +func TestNFSClientProcessStat(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{} + nfsclient.Fullstat = false + + file, _ := os.Open(getMountStatsPath()) + defer file.Close() + + scanner := bufio.NewScanner(file) + + nfsclient.processText(scanner, &acc) + + fields_readstat := map[string]interface{}{ + "ops": int64(600), + "retrans": int64(1), + "bytes": int64(1207), + "rtt": int64(606), + "exe": int64(607), + } + + read_tags := map[string]string{ + "serverexport": "1.2.3.4:/storage/NFS", + "mountpoint": "/A", + "operation": "READ", + } + + acc.AssertContainsTaggedFields(t, "nfsstat", fields_readstat, read_tags) + + fields_writestat := map[string]interface{}{ + "ops": int64(700), + "retrans": int64(1), + "bytes": int64(1407), + "rtt": int64(706), + "exe": int64(707), + } + + write_tags := map[string]string{ + "serverexport": "1.2.3.4:/storage/NFS", + "mountpoint": "/A", + "operation": "WRITE", + } + acc.AssertContainsTaggedFields(t, "nfsstat", fields_writestat, write_tags) +} + +func TestNFSClientProcessFull(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{} + nfsclient.Fullstat = true + + file, _ := os.Open(getMountStatsPath()) + defer file.Close() + + scanner := bufio.NewScanner(file) + + nfsclient.processText(scanner, &acc) + + fields_events := map[string]interface{}{ + "inoderevalidates": int64(301736), + "dentryrevalidates": int64(22838), + "datainvalidates": int64(410979), + "attrinvalidates": int64(26188427), + "vfsopen": int64(27525), + "vfslookup": int64(9140), + "vfsaccess": int64(114420), + "vfsupdatepage": int64(30785253), + "vfsreadpage": int64(5308856), + "vfsreadpages": int64(5364858), + "vfswritepage": int64(30784819), + "vfswritepages": int64(79832668), + "vfsgetdents": int64(170), + "vfssetattr": int64(64), + "vfsflush": int64(18194), + "vfsfsync": int64(29294718), + "vfslock": int64(0), + "vfsrelease": int64(18279), + "congestionwait": int64(0), + "setattrtrunc": int64(2), + "extendwrite": int64(785551), + "sillyrenames": int64(0), + "shortreads": int64(0), + "shortwrites": int64(0), + "delay": int64(0), + "pnfsreads": int64(0), + "pnfswrites": int64(0), + } + fields_bytes := map[string]interface{}{ + "normalreadbytes": int64(204440464584), + "normalwritebytes": int64(110857586443), + "directreadbytes": int64(783170354688), + "directwritebytes": int64(296174954496), + "serverreadbytes": int64(1134399088816), + "serverwritebytes": int64(407107155723), + "readpages": int64(85749323), + "writepages": int64(30784819), + } + fields_xprt_tcp := map[string]interface{}{ + "bind_count": int64(1), + "connect_count": int64(1), + "connect_time": int64(0), + "idle_time": int64(0), + "rpcsends": int64(96172963), + "rpcreceives": int64(96172963), + "badxids": int64(0), + "inflightsends": int64(620878754), + "backlogutil": int64(0), + } + + acc.AssertContainsFields(t, "nfs_events", fields_events) + acc.AssertContainsFields(t, "nfs_bytes", fields_bytes) + acc.AssertContainsFields(t, "nfs_xprt_tcp", fields_xprt_tcp) +} diff --git a/plugins/inputs/nfsclient/testdata/mountstats b/plugins/inputs/nfsclient/testdata/mountstats new file mode 100644 index 0000000000000..86651d20d26fa --- /dev/null +++ b/plugins/inputs/nfsclient/testdata/mountstats @@ -0,0 +1,231 @@ +device rootfs mounted on / with fstype rootfs +device proc mounted on /proc with fstype proc +device sysfs mounted on /sys with fstype sysfs +device devtmpfs mounted on /dev with fstype devtmpfs +device devpts mounted on /dev/pts with fstype devpts +device tmpfs mounted on /dev/shm with fstype tmpfs +device /dev/loop0 mounted on /dev/.initramfs/live with fstype iso9660 +device /dev/loop6 mounted on / with fstype ext4 +device /proc/bus/usb mounted on /proc/bus/usb with fstype usbfs +device none mounted on /proc/sys/fs/binfmt_misc with fstype binfmt_misc +device /tmp mounted on /tmp with fstype tmpfs +device /home mounted on /home with fstype tmpfs +device /var mounted on /var with fstype tmpfs +device /etc mounted on /etc with fstype tmpfs +device /dev/ram1 mounted on /root with fstype ext2 +device cgroup mounted on /cgroup/cpuset with fstype cgroup +device cgroup mounted on /cgroup/cpu with fstype cgroup +device cgroup mounted on /cgroup/cpuacct with fstype cgroup +device cgroup mounted on /cgroup/memory with fstype cgroup +device cgroup mounted on /cgroup/devices with fstype cgroup +device cgroup mounted on /cgroup/freezer with fstype cgroup +device cgroup mounted on /cgroup/net_cls with fstype cgroup +device cgroup mounted on /cgroup/blkio with fstype cgroup +device sunrpc mounted on /var/lib/nfs/rpc_pipefs with fstype rpc_pipefs +device /etc/auto.misc mounted on /misc with fstype autofs +device -hosts mounted on /net with fstype autofs +device 1.2.3.4:/storage/NFS mounted on /A with fstype nfs statvers=1.1 + opts: rw,vers=3,rsize=32768,wsize=32768,namlen=255,acregmin=60,acregmax=60,acdirmin=60,acdirmax=60,hard,nolock,noacl,nordirplus,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=1.2.3.4,mountvers=3,mountport=49193,mountproto=tcp,local_lock=all + age: 1136770 + caps: caps=0x3fe6,wtmult=512,dtsize=8192,bsize=0,namlen=255 + sec: flavor=1,pseudoflavor=1 + events: 301736 22838 410979 26188427 27525 9140 114420 30785253 5308856 5364858 30784819 79832668 170 64 18194 29294718 0 18279 0 2 785551 0 0 0 0 0 0 + bytes: 204440464584 110857586443 783170354688 296174954496 1134399088816 407107155723 85749323 30784819 + RPC iostats version: 1.0 p/v: 100003/3 (nfs) + xprt: tcp 733 1 1 0 0 96172963 96172963 0 620878754 0 690 196347132 524706275 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + GETATTR: 100 101 102 103 104 105 106 107 + SETATTR: 200 201 202 203 204 205 206 207 + LOOKUP: 300 301 302 303 304 305 306 307 + ACCESS: 400 401 402 403 404 405 406 407 + READLINK: 500 501 502 503 504 505 506 507 + READ: 600 601 602 603 604 605 606 607 + WRITE: 700 701 702 703 704 705 706 707 + CREATE: 800 801 802 803 804 805 806 807 + MKDIR: 900 901 902 903 904 905 906 907 + SYMLINK: 1000 1001 1002 1003 1004 1005 1006 1007 + MKNOD: 1100 1101 1102 1103 1104 1105 1106 1107 + REMOVE: 1200 1201 1202 1203 1204 1205 1206 1207 + RMDIR: 1300 1301 1302 1303 1304 1305 1306 1307 + RENAME: 1400 1401 1402 1403 1404 1405 1406 1407 + LINK: 1500 1501 1502 1503 1504 1505 1506 1507 + READDIR: 1600 1601 1602 1603 1604 1605 1606 1607 + READDIRPLUS: 1700 1701 1702 1703 1704 1705 1706 1707 + FSSTAT: 1800 1801 1802 1803 1804 1805 1806 1807 + FSINFO: 1900 1901 1902 1903 1904 1905 1906 1907 + PATHCONF: 2000 2001 2002 2003 2004 2005 2006 2007 + COMMIT: 2100 2101 2102 2103 2104 2105 2106 2107 + +device 2.2.2.2:/nfsdata/ mounted on /B with fstype nfs4 statvers=1.1 + opts: rw,vers=4,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60, acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys, clientaddr=3.3.3.3,minorversion=0,local_lock=none + age: 19 + caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,acl=0x0 + sec: flavor=1,pseudoflavor=1 + events: 0 168232 0 0 0 10095 217808 0 2 9797 0 9739 0 0 19739 19739 0 19739 0 0 0 0 0 0 0 0 0 + bytes: 1612840960 0 0 0 627536112 0 158076 0 + RPC iostats version: 1.0 p/v: 100003/4 (nfs) + xprt: tcp 737 0 1 0 0 69698 69697 0 81817 0 2 1082 12119 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + READ: 9797 9797 0 1000 2000 71 7953 8200 + WRITE: 0 0 0 0 0 0 0 0 + COMMIT: 0 0 0 0 0 0 0 0 + OPEN: 19740 19740 0 4737600 7343280 505 3449 4172 + OPEN_CONFIRM: 10211 10211 0 1552072 694348 74 836 1008 + OPEN_NOATTR: 0 0 0 0 0 0 0 0 + OPEN_DOWNGRADE: 0 0 0 0 0 0 0 0 + CLOSE: 19739 19739 0 3316152 2605548 334 3045 3620 + SETATTR: 0 0 0 0 0 0 0 0 + FSINFO: 1 1 0 132 108 0 0 0 + RENEW: 0 0 0 0 0 0 0 0 + SETCLIENTID: 0 0 0 0 0 0 0 0 + SETCLIENTID_CONFIRM: 0 0 0 0 0 0 0 0 + LOCK: 0 0 0 0 0 0 0 0 + LOCKT: 0 0 0 0 0 0 0 0 + LOCKU: 0 0 0 0 0 0 0 0 + ACCESS: 96 96 0 14584 19584 0 8 10 + GETATTR: 1 1 0 132 188 0 0 0 + LOOKUP: 10095 10095 0 1655576 2382420 36 898 1072 + LOOKUP_ROOT: 0 0 0 0 0 0 0 0 + REMOVE: 0 0 0 0 0 0 0 0 + RENAME: 0 0 0 0 0 0 0 0 + LINK: 0 0 0 0 0 0 0 0 + SYMLINK: 0 0 0 0 0 0 0 0 + CREATE: 0 0 0 0 0 0 0 0 + PATHCONF: 1 1 0 128 72 0 0 0 + STATFS: 0 0 0 0 0 0 0 0 + READLINK: 0 0 0 0 0 0 0 0 + READDIR: 0 0 0 0 0 0 0 0 + SERVER_CAPS: 2 2 0 256 176 0 0 0 + DELEGRETURN: 0 0 0 0 0 0 0 0 + GETACL: 0 0 0 0 0 0 0 0 + SETACL: 0 0 0 0 0 0 0 0 + FS_LOCATIONS: 0 0 0 0 0 0 0 0 + RELEASE_LOCKOWNER: 0 0 0 0 0 0 0 0 + SECINFO: 0 0 0 0 0 0 0 0 + EXCHANGE_ID: 0 0 0 0 0 0 0 0 + CREATE_SESSION: 0 0 0 0 0 0 0 0 + DESTROY_SESSION: 500 501 502 503 504 505 506 507 + SEQUENCE: 0 0 0 0 0 0 0 0 + GET_LEASE_TIME: 0 0 0 0 0 0 0 0 + RECLAIM_COMPLETE: 0 0 0 0 0 0 0 0 + LAYOUTGET: 0 0 0 0 0 0 0 0 + GETDEVICEINFO: 0 0 0 0 0 0 0 0 + LAYOUTCOMMIT: 0 0 0 0 0 0 0 0 + +device nfsserver1:/vol/export1/bread_recipes mounted on /C with fstype nfs statvers=1.1 + opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=5.4.3.2,mountvers=3,mountport=635,mountproto=udp,local_lock=none + age: 1084700 + caps: caps=0x3fc7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + sec: flavor=1,pseudoflavor=1 + events: 145712 48345501 0 2476 804 1337 49359047 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + bytes: 0 0 0 0 0 0 0 0 + RPC iostats version: 1.0 p/v: 100003/3 (nfs) + xprt: tcp 871 1 1 0 0 181124336 181124308 28 1971647851 0 1100 807885669 90279840 + per-op statistics + NULL: 1 2 0 44 24 0 0 0 + GETATTR: 145712 145712 0 22994472 16319744 532 107480 109969 + SETATTR: 0 0 0 0 0 0 0 0 + LOOKUP: 2553 2553 0 385932 476148 9 1695 1739 + ACCESS: 596338 596338 0 79281020 71560560 2375 228286 237993 + READLINK: 0 0 0 0 0 0 0 0 + READ: 0 0 0 0 0 0 0 0 + WRITE: 0 0 0 0 0 0 0 0 + CREATE: 0 0 0 0 0 0 0 0 + MKDIR: 0 0 0 0 0 0 0 0 + SYMLINK: 0 0 0 0 0 0 0 0 + MKNOD: 0 0 0 0 0 0 0 0 + REMOVE: 0 0 0 0 0 0 0 0 + RMDIR: 0 0 0 0 0 0 0 0 + RENAME: 0 0 0 0 0 0 0 0 + LINK: 0 0 0 0 0 0 0 0 + READDIR: 0 0 0 0 0 0 0 0 + READDIRPLUS: 0 0 0 0 0 0 0 0 + FSSTAT: 1698 1698 0 250080 285264 6 929 951 + FSINFO: 34 34 0 4352 5576 0 5 5 + PATHCONF: 1 1 0 128 140 0 0 0 + COMMIT: 0 0 0 0 0 0 0 0 + +device nfsserver2:/tank/os2warp mounted on /D with fstype nfs4 statvers=1.1 + opts: rw,vers=4.2,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=10.66.88.239,local_lock=none + age: 2 + impl_id: name='',domain='',date='0,0' + caps: caps=0xffbfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0x40f9be3e,bm2=0x28803,acl=0x0,sessions,pnfs=not configured,lease_time=90,lease_expired=0 + sec: flavor=1,pseudoflavor=1 + events: 1 112 0 0 1 3 117 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + bytes: 0 0 0 0 0 0 0 0 + RPC iostats version: 1.1 p/v: 100003/4 (nfs) + xprt: tcp 763 0 2 0 2 39 39 0 42 0 2 0 3 + per-op statistics + NULL: 1 1 0 44 24 0 0 1 0 + READ: 0 0 0 0 0 0 0 0 0 + WRITE: 0 0 0 0 0 0 0 0 0 + COMMIT: 0 0 0 0 0 0 0 0 0 + OPEN: 0 0 0 0 0 0 0 0 0 + OPEN_CONFIRM: 0 0 0 0 0 0 0 0 0 + OPEN_NOATTR: 0 0 0 0 0 0 0 0 0 + OPEN_DOWNGRADE: 0 0 0 0 0 0 0 0 0 + CLOSE: 0 0 0 0 0 0 0 0 0 + SETATTR: 0 0 0 0 0 0 0 0 0 + FSINFO: 1 1 0 168 164 0 0 0 0 + RENEW: 0 0 0 0 0 0 0 0 0 + SETCLIENTID: 0 0 0 0 0 0 0 0 0 + SETCLIENTID_CONFIRM: 0 0 0 0 0 0 0 0 0 + LOCK: 0 0 0 0 0 0 0 0 0 + LOCKT: 0 0 0 0 0 0 0 0 0 + LOCKU: 0 0 0 0 0 0 0 0 0 + ACCESS: 3 3 0 600 504 0 1 1 0 + GETATTR: 2 2 0 364 480 0 1 1 0 + LOOKUP: 3 3 0 628 484 0 1 1 2 + LOOKUP_ROOT: 0 0 0 0 0 0 0 0 0 + REMOVE: 0 0 0 0 0 0 0 0 0 + RENAME: 0 0 0 0 0 0 0 0 0 + LINK: 0 0 0 0 0 0 0 0 0 + SYMLINK: 0 0 0 0 0 0 0 0 0 + CREATE: 0 0 0 0 0 0 0 0 0 + PATHCONF: 1 1 0 160 116 0 0 0 0 + STATFS: 1 1 0 164 160 0 0 0 0 + READLINK: 0 0 0 0 0 0 0 0 0 + READDIR: 1 1 0 224 11968 0 1 1 0 + SERVER_CAPS: 2 2 0 336 328 0 1 1 0 + DELEGRETURN: 0 0 0 0 0 0 0 0 0 + GETACL: 0 0 0 0 0 0 0 0 0 + SETACL: 0 0 0 0 0 0 0 0 0 + FS_LOCATIONS: 0 0 0 0 0 0 0 0 0 + RELEASE_LOCKOWNER: 0 0 0 0 0 0 0 0 0 + SECINFO: 0 0 0 0 0 0 0 0 0 + FSID_PRESENT: 0 0 0 0 0 0 0 0 0 + EXCHANGE_ID: 2 2 0 480 200 0 2 2 0 + CREATE_SESSION: 1 1 0 200 124 0 0 0 0 + DESTROY_SESSION: 0 0 0 0 0 0 0 0 0 + SEQUENCE: 0 0 0 0 0 0 0 0 0 + GET_LEASE_TIME: 0 0 0 0 0 0 0 0 0 + RECLAIM_COMPLETE: 1 1 0 128 88 0 107 107 0 + LAYOUTGET: 0 0 0 0 0 0 0 0 0 + GETDEVICEINFO: 0 0 0 0 0 0 0 0 0 + LAYOUTCOMMIT: 0 0 0 0 0 0 0 0 0 + LAYOUTRETURN: 0 0 0 0 0 0 0 0 0 + SECINFO_NO_NAME: 0 0 0 0 0 0 0 0 0 + TEST_STATEID: 0 0 0 0 0 0 0 0 0 + FREE_STATEID: 0 0 0 0 0 0 0 0 0 + GETDEVICELIST: 0 0 0 0 0 0 0 0 0 + BIND_CONN_TO_SESSION: 0 0 0 0 0 0 0 0 0 + DESTROY_CLIENTID: 0 0 0 0 0 0 0 0 0 + SEEK: 0 0 0 0 0 0 0 0 0 + ALLOCATE: 0 0 0 0 0 0 0 0 0 + DEALLOCATE: 0 0 0 0 0 0 0 0 0 + LAYOUTSTATS: 0 0 0 0 0 0 0 0 0 + CLONE: 0 0 0 0 0 0 0 0 0 + COPY: 0 0 0 0 0 0 0 0 0 + OFFLOAD_CANCEL: 0 0 0 0 0 0 0 0 0 + LOOKUPP: 0 0 0 0 0 0 0 0 0 + LAYOUTERROR: 0 0 0 0 0 0 0 0 0 + COPY_NOTIFY: 0 0 0 0 0 0 0 0 0 + GETXATTR: 0 0 0 0 0 0 0 0 0 + SETXATTR: 0 0 0 0 0 0 0 0 0 + LISTXATTRS: 0 0 0 0 0 0 0 0 0 + REMOVEXATTR: 0 0 0 0 0 0 0 0 0 + LAYOUTRETURN: 0 0 0 0 0 0 0 0 From b6f043c0eec2bf324fe32ff71cf23b2bb061eb73 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Fri, 5 Mar 2021 10:14:01 -0500 Subject: [PATCH 267/761] Mac Packaging / Notarization (#8878) * initial commit * Updated config.yml * Update mac-signing.sh * Updated config.yml * Updated config.yml * Keep the .tar.gz artifact along with the signed and notarized DMG * change to test temporarily * for testing * Updated config.yml * Update config.yml * Update config.yml --- .circleci/config.yml | 33 ++++++++++++++++- assets/icon.icns | Bin 0 -> 508472 bytes info.plist | 16 +++++++++ scripts/mac-signing.sh | 72 +++++++++++++++++++++++++++++++++++++ scripts/telegraf_entry_mac | 13 +++++++ 5 files changed, 133 insertions(+), 1 deletion(-) create mode 100644 assets/icon.icns create mode 100644 info.plist create mode 100644 scripts/mac-signing.sh create mode 100644 scripts/telegraf_entry_mac diff --git a/.circleci/config.yml b/.circleci/config.yml index 97f9319a64f79..4bd732967ab27 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -179,9 +179,31 @@ jobs: shell: powershell.exe command: | ./scripts/windows-signing.ps1 + - persist_to_workspace: + root: './build' + paths: + - 'dist' - store_artifacts: path: './build/dist' destination: 'build/dist' + package-sign-mac: + macos: + xcode: "11.3" + working_directory: /Users/distiller/project + environment: + FL_OUTPUT_DIR: output + FASTLANE_LANE: test + shell: /bin/bash --login -o pipefail + steps: + - checkout + - attach_workspace: + at: '.' + - run: + command: | + sh ./scripts/mac-signing.sh + - store_artifacts: + path: './dist' + destination: 'build/dist' workflows: version: 2 check: @@ -253,6 +275,15 @@ workflows: - 'package-sign-windows': requires: - 'release' + filters: + tags: + only: /.*/ + - 'package-sign-mac': + requires: + - 'package-sign-windows' + filters: + tags: + only: /.*/ nightly: jobs: - 'linter' @@ -288,4 +319,4 @@ workflows: filters: branches: only: - - master \ No newline at end of file + - master diff --git a/assets/icon.icns b/assets/icon.icns new file mode 100644 index 0000000000000000000000000000000000000000..339a8daefdc240792904062c3acc42f1dfa3926d GIT binary patch literal 508472 zcmeFX^N%i0@TmEYZQJ%4+qUNn&e*nX+qP}nwr$(m&v&!gyFct-aC1AU>PmMfophzE zEA_Owp^YN|Vt~Wkkm(NqfOQ?NASVt7jSUR|0N^AgM3nyP1^!1M!T-}0=Jq=P08qTS zu&{!purQ&5gRP0Vl`#N78Sfo8DGjWMI>J9%04ZD$hE50mM;+TFGo{=g65Ig%FI`4z zVcsEQjLO^}vqd4ZdBa7^`4VSqohG<|0>}cLUx8ZLF{c_sGg17uEP`F$(=A7SzfEK( z-d!XBbcQ);p}r4acb_Y7pfJJKoq5sjvH&00w^lA+e2t|T<019pSlBvxp^=!xf1 zfD9MAb>j%fq=wbGs#aSZgLE|SI)Y9F8~fzNIV?#8rb!$C0m=9%E))>)KuJS5Oyn4O02i*a2o7H7>6ufm{fU{Q72c}N^`iI88nbuI z^`C0Owz;P@>Vy}|%OVE;H!$V|b6xS+UZYSm{8pI$9zn%W2{34znt^q!+-&s+N4 z@0-@$TRxE=wI5&QzBj%UU*G;|5WEnYRf?WlkStI-$Am#U*!rOem|0Zi0Kv*GQ6=>% zs=tf)t;*~)4B-H2m=hTkrks`lB|Xdpx_+A(4}TkSKdiP6>N6s~$=(B_nA84ht=5mu zi%r$c!pf1F4RnOGgd+)wt-U{Zhk^Zi4tJlLyyRrGG;aBm$ff6)sS6e=Xsaq-+h{d9 z&FMOF;@qjG=E;enM@PJ^U(KeGNmf8S=c025P;L*@NT$(qdCs`%YN=p(>nJ z0(VjR*qM~rKB0xA^X?rd&D))YNuK9z=IJR6Z;!W<2c1LNj!q@8NvINnyMD^}04CxUXqbIC7gcW&QN`bd-B>bh#hAPK5BJ&5zX+4 z88j6HmG>3UpADto+2nrHGk2J}n0%-91~GRE`KWqVhSIfJ%^r3HyY&$pNA@Oj8G*y_ z!jo`__g#7VWE|0PEy_oNEcH<@*>12%0s$t040{OR0VyHU)b|r3wrn@=O=BS%*00C2 z+we{>9QE|tXLDDeLt>070N4D zoHX>MRIl>*vJe1%A3-v#$8J$-kMm9IlV48p^vQ zdHD7~tf;^k)6P@Q-=5B}#F;~)oVd3lJcf`g3q}=0VjGmz6zt_C{n!aNw@vzB1j6-m z<{^^-UR?JOH{AlwDxsK?WtslZ<>BiiKk#}Er*AS?tb~6@@GTNvd(7hkRq?SBj#D;A zj3StkjQa8}$a}B&)`?*r7K`(jYvT%<2~sk{f^i&*T6E|qohDUN$!H3avn>);b#s%U zm*j^BZhJ~nj|#f+4O{VT&|8^@7$j)ZSplxGXiXD5K)2`;fcYw5vKC67z83R_k+C8r= zo{5!0Jl7Mx+WCWTI)T}?OZjGI9s{T7;lL@-B(}4iY zT_a(g1!TO0qd%mH$jHcKRG%{>G_Kjn4HmI7U9_SpGG}Dpo}Ehr$zy`atBYs(lRb<(@i-=FLXt$?RRVpx7HzMMiho0@8(2BWP0uhx&E*Pyi-EzS z^)}v)fNXE9Icfr;hTkKVw-0on0Nv|>!w@&I{xuA^i9r4^%jLRGgr*@cvPt&e2}Q_T zJCNPQln6J+DX^5vTt#{^{}QxaMI)ZZox)c!k`P|mCN4KGi<-9+JSm<=mZjH$FZ8Ui z0O9NQ1*$x3RE)BVP)`uMJ|^Rx+KKoeTibH?ymG8WU~>xbN3K7oIUNr!h~z4S|HPk| zpCFf5LPE>SD-xqlJ_YmoiGYJK#b9_5z^9AMM$qLDj_5&JQ?`?6UjHp>O;~n4Tzycl z+vO=TJvL~!!us9ws4dbn2uba%>oAb$Bjn_4s-K`24O#-gSvgzssA&eNm_v%G(UzvA zpkWPP;+Zx}FSvHHLs~Hy@kO`F!4(b{*eCcc477p(W#8BZcW%nkuY<^{p1; zxVXT)#q&v?Hxz=MXBFh{g6;^?2w9cy4Odupz(X7mNzNMVnAr)QG2VE!1* zwpKrA@0$-l3(nnBn7$@&ju=xL3?F;YTJ;rCZ^IIUa=nG*qmzzisl@*-f88OvIke;a zsaVpH3@53R4pdI|dM`QAm|WY+U*(2%gS&n2qrdhoGXC?6sYD;_ zvr!B%q(GQ(^OoqnRLb^$(FK`S8TN)s(K5MPKe&KK+ZH4BBk5mqM(Pc4RYJL~1*|P! z{Pi!*EV?NzFJsbPF3iNRsR6YY;;Qm(TusnnG{I06@Jk|9-Zx-J`_GY%Dw!0gF+ndU z=W_l}0i=5L)1Mc~!#G+`UW%Z1pHXymApf?@ZZJ)KDPCdnUJH|U5hR1UzXN9LAarr9 zmt<7xC{Nb*~;i?nwlOVGUF>+>{C$A&X; zt{Dp<8JoNw_Vb9MAg;U%a>lj5*MFe_)99OY*0Q&Lvqpw)HLC(!V0l$P$=$~-IvV6V>wo^18^QL zEO)D;7g{NS?er36+o|>5D-FX1^i7vazkWm=deK564&-!w(GX3t1c+&|vx#Ka;;JB! z%);nPIP|qK`a0*hhtXFw6s${UH3&9c@PYnuH?^R94wyD^FM*uB!XUOsUrgLOk|R^V zBUk+%m{n$RLEL~SrgBG7?!kxRwV`v5$|?j_G&0WAAHK`Abqx%%{EO$_i@G6*;Grsi z34lZjpd;v3?D>i7&Y3+$B2+PNJOAUPxCMdiC_eBoN?gjFpF?!jKcPldX_df+;E;?B zdQ)8u^w+gcG5p*u%qwY9__I3fOTy@-K9(ee;$p*?rpR!0-*I6J7^iywqJ7ZypheG# zOPg+6&&i()S5(%@xheQG&KIkWqns}P8;-~1e242oiv^vjx5mg+htZzN>t0%`!r0S0 z;HmeU-h3L1a-{q)050g@taR^2mnNhBbyg`ey)1}9&xDv?0YUx_>qDlmg$v{smT70# zRqo%q8p$Y`@&|(^ZD|#YWKFFK4i&HWQOwWZFcumj(NRH`PAny^SvG((De1x1IPfC%-h3hvqM9R?2X&o?Quyb`q*0MO-gq7U~&06 z3EiBxgA1yNj{WXU6DJT%TrNMg>@X&Q0gepo8#4IgtVF?FH=XwZf${ufi+jXlT8~Pb#$7IAjY6NZmcV0U@FOTcc8i^}|TtCuB}TlyQ;KZIt-YqtlpN zNQ?f9F@dNF>*~r+!qmjHdf=hd-0^-Lh@u&k zU=J2jyDWP_F=Pi{eQZW#5!|$>t7Vv%86-!C-Ldq)A?F|14SzIjJ)EXy@iNv0Wj@_7 zNrinn+rjVMDii$tb{U|?B?q{Xc8?H6H$n?)*D1C>Cm& zKDZx7n{%nk#Q)Twe1ifEY+TaUbZXBL?{U{cav5+2gFL^0uh1cnfp$J*pN&>$b#jL~ ze#oZYh~N#?T3xRCMQCCYAzjm#H6i5!4gr$}R+|iCXaNH9kwITSRr<^*>R0^!_+gNE zX9x<@!Pl@$;7wFNJW)zgga`X=>=Kg51icCxPAYpg*s08zgD+t1IQW-biWrVaHRBFr z!1$zybQ%GYkQ2uqxWrUQJf!#Nui1;K075%eQ41!1dHj+=ZcF{pPZ#>~F zEU^cQ7RDQt#A&S*G;kjWjEihdGUWSNk4`q;p~ca|nVoeNQVA7`I+cSvy?f6GyEjla z2(N(TYrzybYD&n7wbQssRZNtaRK^csy9d6IIhk>6UDSr3yNoeXEBy7E`yyeUte%mF z9Z~eQPtJ-oK7VCIaUzuvg?ffo)>>R8`R}_y?&p03HyyI_hVTz*0y+b4Yfqx&8+RDz z3L(k#5H;D{47W+wK9WykL6I3&Xuf9|EmhmQW+?}IKM0}3Ev1a9Vw1T9IWw{>|Pjb{%B4OB7&%ehuVu{b%}o|2T5Y~#6o<$z^+TywBp)+bFb)c-(f&t zj3b~+`pAjt`DHMVqyo^?i2d0XeKm+_WCGdf2D^4$`1y`TF{RlrOlP)`t_q3USfy-|t~ z>qVID!o%Q8hqprWRCS0YTj=TrzkFEkU6yv3O!(<~4zJ3%{@x(F@`db>?wn+{aqZ2F zJf8jM1oGo4+C?=aw1BJ~#uy&%B8`LRu7Qtm#vEp`BC!`-a#Avoi#njpe3h~tiK*Fk zY(J3auV_4HYUpB*pQ8=hc3{OzB*f!w+nIt)6EYnxCw?>D=7uV>MDGJi;5vQn+0@<^^cfGRm^G+JO01ugoBmPXz>_7 z3;R291qlXa3RXS-74g9@z5&Vd{!A)Wha5>f5;IUmbLreiV&=20)(;OYn(x@7hU(Aq z@u9Y3YU^Yb=m`4i*A=NKYq@iPhPY42n+m*thxy&l;swaWr$~Cb)A1G(g6KV8X~^-w z0Iq1!7-2p_6z!v!xKQjQXzV8IwSI4~BdPc^qp8_8zhd|cg;1x|Z>drHwRtWAWWxMK zon<6`Qoyzjo6`0aRF=QsqB>p_^$X<~acf262;JJ>5}}8b0k&;t=aMHmeHj3tFOmA5 z!Yc2IThM_3)Xm-9eLmEK&l@3LqBr<X+G&d?g@B`6gjr zv5oMYsCUyr3@_{A7FB2~HPL4g_jTsS+0m1KlR> z$6JwKG5}OWGyrofa2bQYJO(jQSO-4n#J}&+HW`%{4IIrm@M?&mL^lN@YlVc71nUT4 z^#H7SHJN3fhrdgJH)#Y5b0Jn<%)%0zxd}C}2Ks~9d_F*X50B%Ie1L^0tVx$WA!3k% zTuKv6hy*f}5e<(CLl8|-^}?1Z`I_n9QQFCmz-r4Sl3)&-zyDf(+z^paV$zAPqoB${ zAq3AUUqkV+3D8^@nUq;NyM%PD?G)**F)nZ{PD3%1kZII<$Gq2az58StawHb#3T`JW zrP#A#Xo*kq4V{_byhPED;XdR-DmoZ>UzmuwHaRP7F6MPM!;Mmfjf0Z!+W?>#PKcNM zv4Eo=u04^POQEcA?w&q3t<~X9S4qEPvS3xj>(g}A+Zc=ZW@X1QsUJ>hU#aJm1IznP(Kni~{{(Fv~fHLNDJ zPrA#xZCe!klQ)$Kswha&#HU_Dftf(2W(hxX{Y8<&@v{YTBO;k6U}4(}49jam0XG2I zz2TnF_)>6Nz^Cev&Z@IZFJX@_?YaU2lxGS>O-J0qF)RLhaG9Jm(6SN|A^ZWfIw2Yr z;&DO++j-=ykr>ewFYv2*>sipT;2oM)88AyV(x=po;i)SiHT#qL2L_t~@l^w}{uZw? z!n(d2XQmH=gM}B}(?lcet_cqa4ntzFNXfj#nHo{R0V1J5mrw(_P5J~#bN{19WLpGO ziD#*ZhQ|e5#7_irU>k(d>~=qqQ&u|~`VcUaZwTg)n^3m99?S6&xH8`|67sun*wNt{ z3E+_fzmQ)8npY8E5?Dm! z>Wt&x3b*S3E0so>=K~Q5hi(DHAPY!74t6g*_(nYj0!W2CsV?|Pn}@9KzxdF9XLdSe z{R&7#{{@Xr`U%H_i$(e(a}UqbXsQqghA`RyH5}Gc`X~>9wP6jf3!mEeUna;S##NsSyfgHU2j9zVb~K>Fl%)TY)t?LJ^+&a@=k zpWYTVh=XbGF}j#MRKx2M}#(#G@a{*H>v+Y(NZo{fWK& zNa(V2SU7@=gyi344p<RbdQgCN0z>gD{X)9EeD{5f%6~R*kk-WTit7WkZrl8vtHczj zJshaY=MqD2kN955JVTF~eyB>yH)}Cu0W*k@Y4V^$7SRVTt))FrF)TfGkPQ5eP`}nU z36OVk=KD|@Me0|fRH;2xH-dXvK9ju+d#nr@pIPS#kOBZmF1eOs9{`<3=Jts8Bm1Q9~xcXPZ*Coqc`)i z{>JCahja6I$_&qO3V>uh37r8C6hs!(6j2Z|Z?4Q*l*)XZk@I-{eZ9)`W`0~Cr(?4) zciHJ$)!Eij)%l+$A2xaZ2b|t;?|-SmFrd-U`o-<@$7^PcQ_s!B-b%`Cs_nlchjx*E zG*6YKFEyLn?ybKyAK5@^@}2f&i%3slc@2G3Te=9;{Xc^t=F>5#v6H+?PjNuF@S%`2 z`;l*5cSh?Svzcn{Q1~x6AYSe_)l|;1bOZ=A`MI39GgidtZSQ^L3p(fLceiJF>3zp` z5eke?)2t9HzrR0MO_jaW1gV@~;iRpn`wARcRq)i|M#LW=Z|BxL!TmBerIzPMU-Qnz zs^YD$Y+jvrAI4MLoc6!^|0R;qi6!`_nGX^YkUSY05?5_muEgpGdt z2Ljr00Hc|mfQst;!Q*;neDbtC*pdsCpRNqy+Zpj~q+;);#pM3iFA7CRbgcS7O@01= zx_mafYF1ADt7n%Aem^?&(7vN?1HKo;`;dtzk%X}VbcfS?7`S$_o3jVex6t1?t*QXj z6^de|VM4t=i2KN;bWc!geX+Q<`7BxWcH85!x+du%QQ_hc;hVigRH;Y= z>VyAhsh{1l3emw;@%}0fIRzQl*f+5FX^xSIav*qFwC1n7FJ;?Z`ZvywNlw{+{1U-7 z4(AZ1jg(@5A)Z1X30*GKF&agUt`s={{m{$Od49qhzf{?;O)H0b_s)tZD(kR^G z`cljI$$gC>W4H`^g0%hJ@S(f{ubp$~Tx^{W&1Wdxy-)hkNj%^|jbZ^xrNT`z3@N5{ zDE6R=cRFs`Uq+aRgA7eN^%Ri^Hm1zc<0SV0KZ4(_$POMkPUv6)v^scesrx=Ttp3)n zX=$TW>%BAQeepIJ5;OI2%+88UVbS*n4mZ? zJf@!6GxPLNlf(Y#bgPMxlpsNPXSqj|mcYa6m;p-)(m=yhbxzYp_^Apl;c<=6Zu1co zjHA4Ip>L#1QTbE@2t#;XYw&+j1X=bKp`&rqXh$$e1!w&A(jv0(cgyvmq`>>`fLg_Vbk1ji3-KQBbJ z6a*uqQIZ7_Jq;MIIDl8;v4Et}iDY~!^|aeE@ntfYrezAQ zf=)Qk(?jBa!I;x5cyZqkAxI+`=;#skmm2RS8sBij=GV|c?V+T?CQ9^K>F+4zU>m-io5yOfM~)W3u*aQ(wH<_?ALJK<8C=AvgefhE zAuUe>-7m#p->;2O+qX;*oRf!joYM}fv8jXE2c!riT`_wc8@RU z9+F=-Mh=M;*yAKo9&(ewMy8=zr%Q>QL~J)VL!1jCD(Y1J3Bzlzw8UOfX$^g@q+|ek*DQ| z{YQ^Hhp+YlmBiShFfd&Q{i5y%wVpV`Cx+7n%LECCt~u6d7&w6kCscn8X3CPn+SDos zUYiBnusQbaoGVfL9mRwU5I++(!s(PWC{Q+-C>Ke3+8A|{bnYU75q$;#v3dP!e~ z7C2&$FvZaaLc$_k@%deG^wOh6;jwwLSX6{OdZG!$VtKaAwGv3TdAOzcl_Cd$eaBLi z+5=rTd9RnT1Rmw%6}Y*(Ozmwo${p#cVMH7zc|pyFUiP7j^DQR3AE5`gyP8gw?Bn~H z`JgJ+fHxOv{?W2Dv|792)aWm@A?HJ8Q444>Z1IK+Kaq$(;>P|^GSJ}pKfRa!d%{S)4SBUdkAx-kr<7O1b2FkKEg zhzC<32Cty+;_8Of99EB4iupoI$@KI~AdHBNM4jcn~-7Zyh zx-jmvEf%@e)S5q@XDN+OOuOlc298!f5pBqzYNAf3%GyZz5{D2J;kEQMh`%7lm(~&> z{Nd0;J+57%xPiznIFO8BX=qx()RXv82Rbf%@+bwLYUm|Ie-3d(-SK zgJu>yHNRQcK>}#F8h&8ORD=L@4(3K-dA>o0hJt}U_=yvun)Wc_?~a-5hMGkU72l|U zc}DTfcZO+4DjuK~H=equ!O5@9L?&cuWjsrp%i;L0P*(Lo01Olj3UhY4%oe_A=xV>5 z7)%+aG?mPdnUTxig041zYA~+o7S9%K*RxT{igTUzJaj?#m`@plxlJebfOaC|Qq-A5 zlkXR`s=U(%-N(hxSgrT;(yb~kkI8DjvxgMePdTe){yT)ldKzo+gCEV>a()(D3md#x zhnXAR>pno$O4C;3JQdKr3dr)8nC%p#h5| zCoIRJAGX>Ol<&4;*inuV?KgSKXcbNr$CcalNBwIu33i-^Nd^T>&xeU}o8%?yIh=fr zX3EJQ(nmXe?qPIxf=;uw^8~T*ljDyrM1!Z6bWSDnMZQK) zS!PKh(y=BYs)80?O6rTiyk=mkvap5-7kki=qWr)3l+J0*EG*>8uv0H#KC?}R=vm~rr82AnCkHo(FO6(>uz|y*=Mwi-4b(??5YIkVYn7_*O=;zqeKZgsuX85t9 zTdVubbF3|voE*6VgK%t3&=5YD)Cfl-f?!BF^|8)=UoA=^&~K3ot*X8qlf z5ulHz|81ev^|I1zkq8j1vpl=4_rZl)JMG~+>B6KBhZq6(cgBzIy>)8uu^!>D@Yz$38rkA^ z#x%i+Slp2-^TYlbEisOm)lFm&J0TxeoYraAq{YZ}tCivbdA!a^tUge`T?*?>UJY|u zdm2v?XiD>U@iv$A*8o>$DfPdh^hnVW;vF|TG`K>u>m7iGaky}f2+)gY)j)V5IY25U zxoX3@mR}4+(+7YNQ?QXS3XB@+7{+R

af~)-5NheMNfQ)=;ref!@=J_fEyC{e!sw zuZZ=cU7c6CI|R(fT-Wu5{en`Hn?&~-v*uExnJ4_U zIRIRRSZ6UDHuN7(0ywADjZ><}YIBMVGOwQW0V(A~$Ff-MlZN zT-TXe#(*Q1!EIs^{&{*Dxvw!Yk1Dk7Cq{^!&%s45Z2AU$;&Fkv#dDFUSi1;;QDQ

=)I*1ixCkxf!?w)^_s7&-CJimUfaEI149gz1}7hZ)5ztm~?q>3^6)GEt9A< zJ=h1TA|&WK9=LVmt>=T^^zh%?ddMBV! zU-AxcAHOBk!5C~C$qX{iU5+8TG{c^#c>+U~ z2!>G~d#IiONB-6C@3^oncKGH#XBeL-GijrC3K3>n?&(;0>nA^GstG% z)gJjK7fj-V zdcd*-5}-C4?q9Npetxr|;(;)3_rl$918#G!HC`Qijy-%iOA4GgysLsX)wtLa$4)}^ zAvf%?Y4ncHQ$bq`ar8aMLVXK@5zpc};AL=Ukei(2(ot{jmglY$I6)xm`=t=(g0X!Q z574!0wm{EgMUJ}M0XIgN+qmb6;KgC*EJFR`hT8t(GVwcoNZEeyIZxC#Wc{nnHw7DB zZ)CJXH$`}z0CC<^@9z282u|<7K3#UpAWo3BEIt{6P!3%ecBFknrdM6#qw7VTK9@=V z(->{ou!adX&hEw6U(Jj>ZnnI=1M-^OIHHC9V5!-|!m(B~d@&{ICM|K>6A)I6J}wT# zC`4bje46DrlmEU7WO+_cr2py5EyB<1@{0x~A!iPx%dIC`Ef6l}UB7Rv8s-qDJVLx= zdS%zIOfv^ADL5b0lT$aQ?KyYj@>MzI`kEe`E~-DnfI`O&!<|g1NpS zUx4kHftkf!Q_u$=OajtipN$d{uL0-T!TAB#LgRw znq=DGsDzGs|SzY&fHfFr9TbA1V{Z2>n5g+r|Y1=(t$YRdzG6x&Bq2HWGm^wECUf@*X6&qm;!jL>~P zO(i}PpR-Hwbaq;NKZL*;V}WKV^p%ly-qv2F2IK4!8$xeTLT~+3mb;p{Pt0n*6uz2a z*knMRP}3d(wF+c%FJM436uoG$bNMHImZ@| zh}r?B=J-7Q1#nUF2UGRbh%gnH9VU8?vI|=TB$C!cUT*_D6FB^4S$|JXr^N#tN`1xB zJ&SBr$j5-tRoC;_g*d5$R*D$bKSzlN_S$LWoW+vg4}Y<#?+9&q`JwZz@IW^M2vjy{ zx2(i{bZI@(g8L+`VOs)AWYauUO@oMQksmf(8SB3GpcNN@q1N(C=Cb7zEMc3r4jWVg zYDA^#BPRz@c_tBZtDZmcy3pJ zj+@trjz%;>y29OwZdR|PRg)Rh7VO$c#IXXqMhUi^s zEY0JEfwR{(hzvv_K&NIm%p8cvhU*uZL*KDKp=q54{%sX*;diI#Va}JXVFkFb@`b8l z_R2?zJXcy-!KfV3B$%~3FgelhYHnqv0&rQogz=BMd9qk7n-24#|gqYq{AXmL_B$FbKZ=42){vk8_ zfQh+VUyzmC9z6;AY|L~+YWO5VQ!PX=(9PkBrT%YZ zTm_w2HicGFg8zsFlg*0J{W!x8#R4<&Pb%v7V4h%?sqKQmCjS+BD!TMrc?>`m6rAd7#=%j<9X+GTElI7V*glzc^-q!+zEk^8E z5=-sSR-yJyyE-sv@(=K;d6lknJ>{t#UaS_g9oYDU9Ji#~iFfAV6O!nnos4S4gQ{^DrYRQdavTGBU&S%0!;>e}av;u?^xJY7~jfbW5VR zru|0NtRridG7{$fb&rK6da_soZ9=hY{RBvS*fX#^&YN^Wft?82Pm-&&*ngHl8Px4x z-prp(r(V+yDUK!95Xj=!)?lW4aZdq%dElTWn1))pXHEz}`eqRREX)}(9Y&0m6-vGv zpJ4!5VGz_nkAqNToJdJJ?=St;8B;B#(aVAk9QQ_AZJe>WI9cru_lX&zI%y9sZ)u5y z0=f1COzCN45`lPG96Xcc8B8}GA`WmmAiaLN&-LJd#E_hNp&*zHxojpLq22a0NaAiN zqd)F#jV~TVpXv^m#K2IvqGj!dwj#)X3OpZ=?Zxb3QWkC-jHG4cDGnyI8)#=lJrDZe zms?Z2yMAZyp|1i%7|4S(Oz+`^fr^{n&Ct=g=H_y4k)l*(bfYPl3k`P z9zgz@_)dohVG1KHYbyJthMn?*WXuc#W!j&&g%k676aDE-3=`opxgZ<)t>pAn{CYN( zk33QtLU?M9^qY;9Fu>>R%k(0;CzpG-`<5qJGD^!GB`#BLjjLP*Y#m;dV8Uk>+hHHG z*g?+}TRC=t>e;XFFk=iq#`01r<->x1`OL3GU7NX*QbU@OjWAp&3h|#3)d)hdRnD*r zAk15al>q-SX&8OnZF5dNY3W+BhF)3O4L+l+Bc?!_AyH8ukTNMx9$Kw>pDtd}3u8f6 zWN7EL!784FFh>+4S7jnaIy+g0Ww)JifE^b*MKQ;zv*`LKD)!1K8a?8_h*bxoa8UoR z@n|3+S#eYo8*wyj3L3+*C3agn)F6Fld%jjJqQ}^9)d7h#Jlns2;dcky^>x9E#k>cnJK(Is_HH1J zdBLJLdl_iwQOJ6FUL!lh2_@CKr&*K*SB8%*w@7x++U}V8K~r1PAVSZ2R9Q%CX$#jl za;1kEGi>T5*Hyh10MRtR8(;zi>h7(xIpA9Lp_-0WXk=o>2tbBAx@_Q89E#3Jx63XtwXJ0;46?d|nCvSiBrT;;q1#M_Cx)GPnt&r} z;ardlsveB+pK3QyO4&*%S&DKQ19V+)0#sv*&mtl7Ke1b;s#5}kBFpi>ukUC!xC~%x z7r~?2kqiv5#oRzUH?lG;so7-by0DHfSgbZ-8`6da!i_+a4Klu37!*@XAI$@MfkjU0 zLrj9~h{lWvYY~C)ZPQc0l*@2T-w~zoNg^*VmLZQ5AnL6&*gb{860;_43lVa~`A4ed z2DN9#@{2V>Q81{?KnBO>EYhG*oWVE5kFB5rWOk#f?}zPT?ibRFDcTe>16v-mj@7ap zJb<-MZgb31de^NQheq{74<2PV?(V-oxwVj$uU_ok_-okTdGI~2nvxGv9f=N~s)rb#zA3cgW{M){&e1YKIel7!erv8D` zf$J`0DF)69G3XQMIV3H#5ahOrogyb}c1Y^ASzTGSoh@SS5*!(d)Qj(y4fzOkT0sQL z`kZ9&a2Ogy2};EiI&e-{P*QtjKtfn^D_&#N_WkNX4D2f}XsiR4EX?%+dA2t+ck7(4 z*Xh;-!izA46^0QN`MWf~z$g14u;8Ys>l0aQ9q4|(Zm{ov&Q0tWmteDi2W&8Lwzhp5 zaUrF!iw&EKE5&5C+)g!Uh`mL5yv@Bj^c~RSqJi(7^Jz;XTM^i##Uo&*-5QZfs@cMf zNx<`N3EA)rc3?#qSuCZ6paJz=!wqVE*rOmV{rGsE`}H-WEr7m;&C4NLqIDdaR}79_lkx{IXgn^pQ1wLiBONy zBF-*ULu9KoMaYYdhzG~xTAA8C?m*yzEkS_QK86lAEtvW=^VAs+um;y0{DxXa%gB!c z^`Lvtpvb-{`8LdegAswAj*sz@& zE5X_+5cGJZ^y=t1ss}atNJYR&2k{ymj1`60cHBzDQL{biswMDa*{&mt5w)Xm`l9-0Awc!iDJjtSzrYY4SB?!}N zYQy)h^B`hII%?m=t2Brano~3?*kQtig=k@H@)L&$PQlRi!Wc_sENzs5 zkn{(EO>N*3QjVgFK{PoIh><`$E*7!%1wM*V2r}a(Qt+Cp1HJ$X*>9|)`lqOu$XBLw z^!l$DOoHkGx3|rp4Iv0Yi(X|TaHofg!dU(3{&)w>g5Sh}9*d?8_W9gY6MWygQxV*w zZz67gRDu1fRZvjzKsf{qv^JI+r8d=?YQofoDcaV36i3?7$&mEN(S$Mk7ZN2a{eW`?&Irt49knZHBsqy688$;3x2*!#w zIBN^m#+<8Q9~Qr`K0CD0XO?GN$(@jvjgrqYJBYa!s2WQ4705X|ROGzr5J zQBxNubz3`RKpeFT{S9A2of2RJHGNHboo{+#atBX2(aPzAzT9%{&t0zFlcIODOSPtd}>~uf-tI^X_8RQMtWRn?FW?lN=?+ z7BoMb0JBNv{$^C~tY6G`$3JY&Yas4$vD!Tle*YJH@AO@X7kzu4*tTukKCvsdZQHi( zN>Z_HtCCcVigQx2?WDiI?uYI%?rT5YabNa7u=W^h?=?SjuG{yTM#R!+%21`%Hy3Mg zIr4T%n2~7nO3I(E#HczDMSix2>Z}bzd|yzln7LfA4#)6i@HHtTuwTn*Ho**0RW~g9 zAqL9x@**WnJ}SQxHDE@4dMBc*(4cE;Oz79@dLWce`3AIG`jBIpl-X{f@=>2Fok)$h zVI$u$k=K9*8>%o`kaJh|gC`xHNY5ZtoK-4i5L3O#nv?s#dOC=7O%l6E4`*7?%3KuQ zLghO;dW;_!jqPcC4jf%EdL=-<{I4i5FjqQ1V%>HeAuW26+W&gE*9Y8`1EuMsJd(%= z0=3yOC~f%*%<$hY@6m0oLyFYQ`YZfR<$uRKa_4BXBi0a~j3i-NBp`EE` zw}YuLk~L#FP*^ftu6T-8X^BvY%BcUr?;82uYFofSZnLOC8^=_IA#{kNd@u%6zwz6N zHmka)fc#})QfRwx^FT!`3Z~JWncR~4kgnj`ocABIKD3HG!JI+!WIWmC4>!?>qpj$f z_vGaWF-^|y(8CqQc=SIwA8GW-m3`?O6I5GpOw*KT3>4M%8zj`#pc7VGxJCyh0?a}1 zor$W$Y`5EzT0=3EQwa9KV%DoWh`VDntY^<7fud3={%|@%*r|7psD%Qxr`bm!EuQ7l zFGvDd2aN6?l1sQ#0CAt=(`F=*#jVx8R&#)}im~q33F6FgT&+789h9g_mt~}N5*a2O z_|h4pH}XAGe%m}AJsBIhF0ztJ!Q$f4}1|Ya2$fZwVQ(#8_fbByNH`FwhTa5%oMNaTgFgF3a2Aw@4csXP0*yT=&ADBcr6SJI8es&)a_1;2ckY~6x ze?og{PM&oUOrzQGxQvNenzP#llJ`a@6jJEttq1n&HR6WQ;lU*Tvae#b;TSW5fDFs~ z1T@qgVB(>02n+D|f9wx>6*BMM6y46n2X3YUnptT zv>Y$%Fxp=Sr(af#FRCT9J`hBop$QyrOsRyVE@jK#>3vCNn-NymeeG2<`Y~#?&%g@K zK*mSa%)*VHpgji?8p}|rt;=jeM2yJt%@S1W-&Z9lF4eh^4^AXE={Oj;(YyiCLWUv4 zZvG(r#W?YF!^s-Lln7(0?MOR!AGXn$K*}!Njd)Y61n2kz*{3^g`NxOlKQw&fXl?eL zPj4uDXiO=gjmcApS8|12FiEIi+(4%2cfz=Nvs>F&dYcX_&C|x8N2=4=0cx@0_ac** z)&!r2Y^;mph-Eu1%p8E)>Gzwb(@W7UjRL|#*xZ6&SPl^4zc7dRCvAotZTHdaDH8*s zrEq!X)vsF-+Bry~bCPH&f3%kB*#Bki6=f$l_clO2VYGRVb<}g;v@E838jm9NGmyP( ztg;U$S0MaxmrZ4JuO;p`*#ACrcPccZu+}!(5gS4k1b<#^A;#@bV)(liqeL_Qvhv87 zGKDwh=?F%2rCdmLnxEI;(UORea1m~~*Y8c4O*@(c^e6TOtu%=jx%|;rmLMQH(yM0% z5T1VF(HFgzH(*VZ%8S-SZ1wElcr;aFxt=cn0%-l!C!SYX{_Hs{$C?}JnU%UHLn0-z z>tIZ6qA{rc)o3xWO;6pJEApkcH2NYvK7bd>xr`k+sn06;d&wZj>1;31XY|Sj@%e}F zex(%%_NVyOF^K?CEN07x{5E++dvId@S%-yKIz!lwJK~=rPyN1f$a}io)^NMz5$rO~WH0ZJjD*G@* z>=oSfv(C?%FC=Bi^m(m1cQEeEdprv7l_9kWW(-s-cFW{38CE9?kQ(uJeW*cUl!_Fo z9Pcj1Xj}VXvOJ`PcT_2kF!_Rcl^JOe!qa~oG`b^D$0LWA=1MlqnrgfA1P!857&SVx z!nX!`DRH*%+RGxD@|~#-AMT0F;~!9>>UPcSAO=QJDijQNK9^Y06>HuicpR#g!Dxe1 z0fX1_I7hQH@^90dtjr@(Cl8d}W8(zkjrZxBgczFdUpX9Oi9v|<6B}$U+<;6*$CW!` z!G_Q%bS*H6=G48p;|%H9GqyK#SY8IDE6YAk2Lft#s$Znty_>Scx}U*rpdF>a7C|9P z0JvA=*z-+fEPPFYt?l@6?ngHKyebBgbhics^YPpyeAaK-$lyF?L>h5#L+O~le(5o~pfjp5V$PY$z zKp;!kc&Oge8LZMM^+U9VHRncF(AsZ+EEew|Ng#k?&l?nC1R~{aP@!K~T8(GB)R@na z=iw#Vs$&j5A{cL^_raND%ef#2 zbajaxPr(DhvI6H_9$TZjnPYWMO;DdqDYWoh04qhLq}z+9T0DWGu@C>sv7)b$&2N4^ zFZ{A(hI!0}br9CaLrv;8q)Hr-G3!bNL*$ACtI3KlZI}3zA|??mGz?o&?oi??dSiMn z9ljQB@-xWrt*O|RsyxHq3SN3ttkIgpV0EV3-RpbRM(CtM+dQjm+J+4_ZVWGSs>cr6 z`oV-^OPwxRX!&w-``D=e#T4$Bqw6O8@nUH@@MSObhto7o#%mWmdm~|c_L8@Fsv}ks zfku7VM`aY%<)HTH5z(U~soM{pm7_43$~e3DZGjx=iZW3H;R@IEp{ z2GU2xnYfodblwX|KDF>~??HPE5&h}?B{>n1t-0{w$qWgtG?pDyK%?S~uVEC9-aOQ7 z?3cbp$s|h5LH(*jrjc1S>$K(d-#@Xj&NeFxirxc*6Rbu^?DzQMR{#F;T&4aA#()i) zhE$jjxi^m5e8_O7BAFYZPQRs&sc>_3vCX`OWgO8jjF)s}Za0Id+=GRE#*CgThCd*E zyOyL};Cul9O~5^gL04^qq^lm-YoFP8m zMc&ks+pgxn+oc}mE` z&y`Kc1GlPqJwk}0pZY2r4dN+MW>dsSpiMet(YE=HEg9?L~HpH z|3@3r3kzlKn-4w{er|*p@f;u;;v$ZCLDz2^Bvmx|P$Md!)ztq^Yl=tDH)v~gug7yE z%^gI2Y>ajkoarj^$acdhhxoNRS4(H4g&wgWj}tHqQ0lmQPByX`?#&d#XjJ_fZjnBR zP9*h-ZSIaKlm8nEVkCl%D>d z!6W!tYfa3${WvUd;b4{JJ3>CMOiEFHOq5`RG6@(+LOr+Ki4Zu8hp?7^9Ph=XCH*xn z{|(XIiNe}&k#5t?_BUf$j!GED9?`ND~U(-rS76L_+x!{w&f8*7`!yTQmW3Iu6AL)+yxL5KLcTh=YO>c z><-o}yfHu0K{Wn#*F@wScG_5gI)S}~-WipwDdcy(Ag#HcU*eb)e8$mum`Cf!DhKaL z-1Arh&Vng*B$UE`d|Tu;;SfLZK{O`ZqpM=C&ROZ!gq$$ zfM=Y8I3{;oZtBvECrRw-zQ0jOnCTX9QQ~h`(`d57!NC92)`?MdrbuVipQ;eA0Dm~gI0y%1>YpwiI|Q9A4=Yokb!4LsO%QzACD``7KB!hWN3JL#n-BnCpt zJKwwCikOPG_(&y_P=6&n$?q(Dhxvnb`Uaw_SzXjjJhVK&Tv3?-+V7?U8v1ZuAqOlb zJQOam(8jfAzbj+T{cB!@`<-~bbhw&}>y&E)Z#{0kinPHW=|Mu$^IfX9v>Ga|H{$eu z6ne~At*%RkY^BtDfhYj?a$4NAKhbO4oKuHGc*BD{#B_8arK9DtS?OSf{GEZ*)ju=* z1eK1PNvZj2fKavsMmfw0vVwCHGGql64%&q^z-}YGg~&NPi%})RK2KFtoxhhWuCm73 zj%;oS8_XEaD-Z*}!yId~)p@f(@2TtnfilPpn6RrGyJYZ@x-tNgjh}N_{Jxp`3SS_f z?C^R4l?-B!4@UBCM6<2X7L1v`?6oxM zh!J^y3=^wn=_Ypa&gAk+Gf$jjf-4IKQ>#`h?u$nTfs)WUg-EQmM9%{(mRg=5sNUr|CXs4pW! zX%UBRMd$yCyaZ7COqWF?p0`qe0!WU4Ee&ce!s9CbpCM6!X2CSdnWUAz-Vo5iLXyQI zjY|_AsV^$wc6(Ue{Yf?Kkh@NZgVww4vfKfFP+2tztsN{RP>;nx=u~wcE*SNFUJWO z^cT+Y9Q+KfE8W(A{>uN4-Ik350PyVmU+uR46Uzw(_`mJ8`mU9h|Hp3ozm44%4DkQj zZU3jr005Aj*X@78bpIc_?M0uCJE4{@wYS@D<23TWe>J?_KXVs4G z73LPdt%?a0H>Jl@oL8IbBK><-t22|Mmobj0A8qzsD-KyT<|&?4`ei^_OpVmekUj8F z?U(=I4|o6f}9mP;bvehQv|^f_4QxO zrf8Xk*^-6hO_<>tByO zLIZ^3n6XJlQ%l{s_shyI9vFAu4cziCxhwczm=K#%Um#zBSNz!)>ZOeerwd;X`GFw$ ze$5{3H$Jz$hxd}`7l`-@;-EjKcxz9*cm3d&Cg@b$HLn?;9s&b>Y%3y#&uExpQF@6G zu+rocq|dEbLmN&gCG!lZXo7$wy^+;zOaug>eDQ~YLs(HVY@ePvaw9Oef1fwGB(b}7 zFnZ&NFXLZnNX%*X0cbItq9X&3I}cCs9a^GhQ0<5?#t1z`^N)pqJ15Rp8^+&H<#iq4 zDbdyfEzj<%J}gHEO!j!-$w^~dS7!J10PkTAlW6cfFX)^EfBBnXr^|rH>@m}Kz{PjF zmyzYWd7K~?T!;iA9Ms3e?s*SuGZyvd=sDoeg@_0A#dgdj_nlM3XZ|6fVk>;Br2Q3> z8F3IuGPqGTKW2|*x}fRv1zQ))H@cr}XB{257efQkv)*-7UMa?E)F#i;f6p>xt?c^g zNL>Q!e6&}^0h?;0Z1rH7c2m^n@)L!J?p2Z)82_N^wRB|%8HtZM^NN$mm zj0qEK*=~hg@TJO2I5nvQ_reGG5UkpTwx4b zWmhw$xS=!W?z`Q2baRPSR74)c$0i{Yv@J@+(Hq~(b-(JkDiWad9aZ6fcHC|01BcT4 zuTm`Jza}LgzI}zqgY}_;OxYuLm6mbR*I{)R1sgDge)fJby5Qr0kZlyfQ7;wVWElu# z-}tuER?V9~A5l>9W2eW#xex8hxv6X>@I~YrL`*&bz*^wJ@umDGL-HT^bRh8G3kebL zV+ju^=sv6vVfs`s{frp8GyBpf%PG+QhisL&9efnsHOtRapx^t$n^;Nh44^%khbx%Sl(#Vn|)lmXM zmJShUjG_SuHK9U!F@k|DkO2Y^{2o%DJyVp}qxD_f3rRs8CVo)dL-6Vw3P{v$FT>Qh z)wKBF$<|G@iJkiG=OSA{wh$?ZP)EppYtM&2-?}*7OPIJMzowr~8w_(muS%O7uY+b81s`Fv;y?Oac+o;Tt6{C0MqC?tuvsoe!F1D#B)no%)$Tt6_rRrg`Jt0S7 z_G1K)6cRCY=aZ@K7b|WF86(?U zYb@XjDcH8Q+#AT2&nfJxnW7mt`-uLemoEw65F`h7I!H*IIwm8bU>!|UlL9snATVa5 zRNd3x?ZGV5+Z*{l*1XAFM#Xy~9_oK5#C{LXkSR0u>lyLJD`KM!R;{Cg@DJQRYUrBa zvSt};*B`g!JV(hl6ke2;$GvK>zV{R08~?uQD*Qk=k9{H!D$v)*D7~*SHD^O5H4GZbTav}2U|_B4`@D8woU? zAT0X3kIv!+^k`6${&;egAR1${0bk9e`C9%Vqwl+i5!NEs#CfpPn}IXMm%Q%7x|`*h z85@!ljBBKewKQ!fdz{laWG+IgBICD?`#>G}!qW;zgJH3heaE+G|8Uc#V}~aDAw8EB z!(Jg-IM_kGIx6zO9G$V3OPW^=b;tgt7HYLoeF2K#=26ll7vc+^D2_n9+X4=pA$i#e zk}|4DQ&9kb@aVzEx^jgNrPWEtza9L)i=O&hsYAr>9592uq%2V~nbZii!99ULRTtUhxYa5Z zza6S}snR0BskI(KaKYB;Y{IPvBvq%sQhFOhrlpzdHIP}t*Ez4<5*jy%eysQAzAf7UP4 z>x#h&l4@TJw84iPlTi}SP#>kHCRo&b{_4v6V<_^hwtn^vkRGEZ19bT~jH)^wK`1K4 zsPJl&Eg>|u{HB-FCpH25A(V|AZSkG^53$oE=0Pso&@keHX>@s*o|{fd#t)bd;H%p+ zARDK3Qhfaj0a*(biq_DkWx_&ks-b3tJ_65$q^xr}yrpK;cw1#!ZWv7EOCT(y24igwo87`jb9CyHch@*j zS0f7K5{(RM-=xUgGyq+t>wSPWer9XK$smgfS%IcQMJT@}+VE~yk|i#j++!;Sk#7FU zVQ84AW$Xbq>}GzJFOo%aar55Rm;R>@L1&I(Vy3IBQ-^WS_yvXO z`BzNCiJL*89o7;<3|xwLT}&qp3~!CDwTRIvBO1;X6Lp&|C7$KD$i7BCQk0_Ak}Y7BVZn$mw?1tX<+MYuhlCy5&FkAJk<6|BONYaNg(@dXJUZEP1&ThiEw?r+<4`DP>IIOrb}#Uu|o^a zx>#({gjw%K%nvC7n+fVn@e*e7#iZh=Wq&;hPHiOBJcS`fT%O3EvjOHkw_$+BSiUu3QiN7M=E6GhLJ_40USNqcrulXjB? z&-BgmPN@RLR5~g<`a`GrfAI%MPRTlZVXl`}9dkwsug)o3$!_3-KK*O&w-z5uf96gB z`GL^RMtc7}vCj&_6dju?cF1xYQ#$d~z}j{uy!O-WTg4Bq(2sq!fODNZN`{GT94O+q z=K=^I_}0~i3uY*qJxqrT)k~iX;<}*27+BRkltliq=TJ}Dm*7U+VuZY%+C%l?6`4>w z;8&Z5?Yt}`$ZGaURv(#L=eB~SgFYn)Qm)@feB>KY@$HW8MjmAaGqC_D7{0z@{*TIP zgs))wtdom4=3vsi9V^E8B_@Iy8xx}&S}t?$%1gPkr7&5~RrR2yaehi@z(IkD5oNVV z1bs~M0``x3eInMZjNrmit3Uz*DOH*OH&XW40IxNA&+SLX7goNv-EiFYPbh}Yud%&m zTtoQicH2B}n<$tI!F+GfEz6Zl)uGV<8=5@fvq~Wgq%&mNdVtVB93zwlQRQZ6WLiFP zPo50$5Eqcd$S(A4=+7j>#*uUv{|f0Dv8}ry{T`$Z292q{DeKq)nh104W!fIiC$oq*j9~h{SWd6u&0H zA$1h<>E*za|NQQCyZ<<6)i50Fqgm`SniEp)$QUlOnDf4GV#$5&wxz4l8>2MeG1@gi z!+8gZ;}9d+nNU$;->&go;Y{quJa0QDQHnF)edM8r&JR8-4w=9+eoy+V(7&ZBhP`E4 zM(Z_PGNhenPE|G77fnks$b0pvoyj;R`DBO?+~dVM{LAl+9#_I&GOpKUGZ6B--^$ai zqoD*GMZYReX3+psj^w^eAu1ZA$@d+ZbTsUKp6Va`q8m9?&S@cha;FH^y*qMkc^N8U z&;B9=mF`%uwRggYN8`YGv!_yYBDSpGT+p|kd)~Tc>DfNYs<1~?j)8Ye%{YKDx{#OS z9vg*V@DSumQ0+nz`Y>-j#}q~xHlf4X*}EWrA5M<-Q(gJ9j0y_4u82MN^i|TME+W{0t3Bk9fn>Y#xgo z3EQm^fh2m9DiyF%vylIuqbV8}mI^WnIQ5JP^dW4o|5BT30`U~0)GHTwNdG7VBd-;& zmqrFaF+QN9MnAa}CY!%}!m+Q9@*`=x0XOGkQJ0%&&zZ<3D_HzIA(mekPCk$Sl&CxfLbNsj*ZL#9; zG5n05$+Lw2)(#NbyauY152jdVx%(V?QH1z1z9ABZ9jM&*5iOB`=tq@>v(-+Za@RP<|Cv0lk-k4TZe&WOBr+?EbU4 z(uXWLE6~T_A$}vf_QM#LaW>!!%)RHgEdT+%Vk#x@1KkTSiapA(9cSM6=2sxtf;BOi zLenLWFV}YklLJ#F3Ft^7%s4-7Hh%0>-bdzR016K1)-N?w-Y!EiW_S4IrF4(oVU*lV z^fI83nKWHs$%jOC46r+-`b=ZCf}xXnAL6~oP?eTH6PRU-3vGYl=!+jImSUmvV30!^ zEJ}L9{*igmQUUL~K<4h2)Ji~tt^T08wPM8z4gBz;)*Rd2(FS-m;d`9OhMIW5*78TM z$6BEgwzbe#@AKrYS4-=|Q(Y`EVc*Vn8g%1_)jN*7{TJ*AeshhS&u_yf*`GAXHc|q; z0aAiYU1#5K@<@z?VeCra=P_3gpVI{Jy_*n_L^JZBPHh^tfs>o?_$iSzfy}#cQR`9`@WwLIzJpTt1sUq?IP#vd zlfZdh#fSGt(p5`)t$>TU2?b2BPV@uX8*|aH2&I?;=Kuzor*m}Vl$5a5xe$My?dT$0 z{B0s``m5;6e{Z-qMDe({stl6!Y{-fr6y`PY#BRo05cwDWnw)%Xg`^uURlhMQE zw=b}1L}4}wAuN}q+K7=Z;3iF_3C8hsI(5AZ}%=!5(I4C^0QkKhbW ztKT^p72jS-6ORlhm5zb)-!8iSpKm=2-;kc;ygR4fKWWm?gI9KbHzX%rTl`y#8W*bh zc!OI)r@_}2Zs?G}-bR`z;wBaA4}o1nlM?C`E$A!A0Vj|u7)Kp+Xabk_D=7RpD7 z{(g=t_Qr?rQOe(%Ya`cHr7#q;W1c~qhg{o$jwY2}4XB&ERU68Nf32FAa4vlTiHZ9A z+i5Zmw<{f*vhQtNg`eNV7!WT2#Is_)$aUUNzof-4(9&`0v4YJguLwCR1 z$o(~|e&A*a=~B(d;)+gcUW~tcQ`4$ z9b8?nd4OIu{n!d<_pZ(9M>f@d4<5O3J!rzdi0}Q|^X*M!+4VIWX5(lbtvx=g_!ibzKHumEyauX#T+TwVK^>DZHD#<> zy5JY}u`}@#5P@i!pprfC*s_^a^n}6QU!^;;Mx96)Wwj2?9oTgy9Id%gc>G4=1X9YQ zw2id8@p<%dqA}dy8DArKckO!nk!|Nmbi_e0C!Ip|_vF3F(%`5zCh11Bdm{B2hI|Vf zy(_Q&IV(CIaE3v})#I1c8+}!HUrJf><5>i2u0L~5dX$Vj>EV)w{a0dX#i;*uCe-pl z$o?=u3YH|9pbNkQllgBb5PD3u=)Dj^ON3Nn>TPG7;571UZs|*{lZ8ceRXMXL>E}|R zFAw>%oVm6u)i@K^$5?yixm%gl(Y_^tI~X#p1O!|FHm3aDXy8mKEa}1fCOLOy?h>T$ zC)+2_zoG)suht^@3LpOQWrd3l=Add9bf*}Z^dc|PU%^w7IhZ3*EhX4WOJoxW zJ%abBi>tdy-tT;6Yrizeh>8P;5Ilo0L8GLt?<26g1w;4CZ}&W0$zi6eW!S)rOL0ny zwKOpG2flgyqPJJ^fa6YiHpg7@NAwBKiHkuKuMec#$Pz#9+5XZFGB*NYeR!SK)U%t# ze*gt*%J>b_*MK^up3CSd#*sjeS)r9#!EEb0{=ZWS1AkxpE6%2)cdIJAZL$cupb0@_ z7gqzFU6g5t&>`#2XzRiGKo1to@Nah~Lw_MtjDd~{%t;%Zo>bm#l}was&#c(bM;Ujec3TWUN32da$5-WhJbH3h{7-}1#LT|_wl%dJO0|AA&PLoAZ6oNrq-vN>oq??2 zM<*t)XbDB$&wKeLc0+b@13vq211_aok|GwIT@e*1&%8u=IApv%=9WwUWe%3FO(&~S z7SwRg%Q$qzM^fxTE`I?+Op$>zjB9^Br0GzWlHNI&Jpk53ve!*gAsvpBkOp+uZo5sg z2)9=^>{9p}cxs%hK?1aMU9I(FRlxODOHTW&)nDk%->nJibLEr(YtlQ%IhLrK1~P7p zJia>$b#1;BTVt!IB7QS&=|5%NENQzr!gf3gL09DJKdAChY_OvA;Tj}!*Al&aN7saJWKZRrDa$+p1!t6;*_g^-4i9zXZ7 z1AP*>MX-!<%)TXO{WMsQ+r;vD|7ws+WVm%OX@pHKXt@SmMh6&Uf+$y0;B?|6qv`a; z^#a-TdNqsusnL$Fp5%}oDEfhVqLADI>U%)umQ6{5BUjiM zmal#M!IcBhHj*R9mIzvu9CP<4UWGYn;>rumG zTDDQ{w=+V< z#KcMz#?pwl`Gfy9_D_)^*Pwkr@e0mSu61&k&y3&bz*d5O0@BQak;cHuZ4vx;@b)Jt zDb?4jLh=DZ&=``}3lu4nbGD-C_g-X&JPcop8ac#q%J}o?Q$HCVP-oC4jXMe3>Q?N7 zSDKfqc#@yuce{M9cw19MR?Zwa^eNa0rAT4zCS2S^VnkVZsF(d6iu69Frnx|E<6t)VM{o9mg|zYXB{>W zG8&oB6*4D4#hhFo>}eHVgbF^fUqN{gOBN^y%&+3JTGBbQsOWh^{W2R^gfiZN#JL`wJjQ`TJZ zXU?cfG$(6J9ZUBvM&xnxw0`X1868$4fVH4QmJnyb7>18o5BJn`W*h>nz=*VJu zF~Pi_8ulnA8pAcI!WtMmNZ#W9yuJg0y40_*FjiXB0U5(0K{jQzDKEJ~ee>q`V2Su5 zIwwf|&Kq4uF-Laq$WFSp7eZ`;lJA=zk+`YIJx};0bxi6SbGR4IT~iGK{BO>Qxu+{% z5eInruoFRo!w|LI{zQJYn|lkdnc(D`{1w)oivDJ-Nf?e-`GLkVAbrmzy$G} zso-Ze(9L~_vC_r<)Ejf=ZY6771o@4|f0AP)I491a(4W4-xONU3H5{;FBNs}0{HZdE zODh>Gin@6g5vRm#WQix~(V`|5q@r(K?3o!+Jj)9i_$BEwy-fJdpG$M$c6bcXy;!PzHf(RE(Hl^g0nB&8(()aeY1b?ivM z194BsUasq)?TP^%DGfz7!G6R2}?Ew4Ze- zRykT!^ui{L-9?zk(0RpLD=v2d6bWxnG>pyduE8 z-6(nU^s%GiFVjI^*H8yOEB}o!e8K~%9c!9FWeJ9%6OC#>b~Ce@Pglv!j}a#re-M0r zRJNEG1GtwWr%gOe6ZV(`cKDss!m*w}yt-evo9F1jtKwHXAK8YR5pawMJ+uzmnH+4f zAx8O%g(5@C-y!6Qp5}qvQ-^3WVU(68WugIpzws=Al>jzY?7ziGBJr!tTejzuuNy=7 zTtlJm;QMp+gx-?kn6zRxnvspbPhf}6nL&QHL(rFq);-y$k|lDr%aH{zvW(@IMzrrf*Dc8QFu4W&G{qh zgTW*imDr)L#qc3dXj@l;&moFP@${i|;ihHOWo_5_iw&VS`2RG{COG|WB>usjerqS` zJWqB}JWb~}d5=^_>*|Auzhwy=y@hh*!Zm=6bCO!!`)Fa?WXcNtdBM<=pKSM^u4ovl zUxZJ2>+eEi`e~ z_=~sX!v!JWm(Gm_P?9}Ce<4IxKM`mj#ZI(_AqlW0ZBTVl3w52>Lp%C>c#9)N*^?S0 zQuZZ)1+7~DWIssQu=!itZdx0y0#7)^SL1bT2jRJrLNL;Cncdy zobz8?kN$9p36_iKP#cV-pz;-cMj)8}9Bw1$s}Lq-SCsWIo;ut{wLRcKw!5jrv$Q(d zHNf;-y*Ma~EwlE9g`osG0_K|+Jux5kNJgwL*L!%yFZsNIIPkk%592lG?!D#9hb_e2 z$hLCyH1KDsx}}&>t55l&y0#=wEo4ATV2+5&-#?PkU?s_V>bwRwP0`1g>CeJ3MO`)g z$wV{(x{2qr5(VbiOoTYG5Z(H6mdWbXZcQG9kB|0QO4 zB55xl9CeDEJy!g-pI$z0OTHASN!x5bWs-e*l8IAug`=#N%Z4;D+397|#%ZT-3}&pO zekd^ZJQasIZ{IFA%%**otp)o z-qi?a(C|)vWd7QStr);z9$-B72(XrH z2|kXaCB8!^)Tx|Of@*!d7EMVx-juUIS9$#Os`#KKXs7)}>Zo$uh!q0AgtJ-qvwT@c z#W9UndTaWL$XpSy%8`T}4^g~?f)%vNbW$ZQT}0(wu?7CF3`0t#l!{gjalRYQILXD| zyq#Kl7FpwrRama00Vk%8t}7ZObO1JFTv`XKPeQG`jiMAoXa#0MR!}=oHEuPDcyUB^ z6)9F3-C`;G14;X}2SUaOsi8frac-DI_jM`rg)lfF5c&^4Pe`(DvW64=B<`V_>0 zSZ|Sb^P#&D>-%QQ3)oQDStbkV1uK6P`@kbngT-ruv1kH$%)jeRAO8Fv2}OOaNAVe( z-G6b_L=5Hc9tm6Cd5S+&r$-X7-3O1;4c65d5IHxBG!j(_Z?|4VCI6!enJ=_*hQ1Z& z6Yf&Et^7;vk*0vn;t;0z3NXZzWs^GW+FBLF@#cp_sCUw(31;+E2ivmKm@T@PsG|YP z`%Dfe3WJ(eGM{s$C_H^8?oi3e*^i1@OHmn)_76UL9QU_A8l{2iSF?xkn&~$aOf9V7 z2tJ3+$6E-j%Ykil`r?fgiJMD>Q+SEumyGiDAdKloNQe*`?IJr}^-Vk+&#c^!@B^WgFf zId(pLAusAKfzcw{9b72|hWJ8CY4yY6m;`E$;6O}+FrvX9#J8#kg=|}c?*?`;QX&(iC^9g zZ8!fBIFa-lNHl;h|G!(y>54`}G3v3wB(4y8aMsUOWxEa6Ow+B~wjBC>6UbRcG#4-9 z5p|2xsUV*hBHW+>Q%}fEDCLQ)(C#zo-VGrgbT0!va&ijSolM2*%=-HB67HtbLT?@zMe# zZwk2*wMWxR6(X~%5r%Hh*Ydxf(R(mBxpQ)!yd?Si2dUqH#0JfCFLQ!)IzeT9o z)K5!AL)3o~avX`^O?M~USV@yn@b^E;%Xp3CP+%9~j~$$5j;yw#H4+DUZTn;ps^;-- z$t;3YyWNW;pZb~)v~ZOZ0iR5}hTTch1uluNxsF$t_5L`E0c|y2ogX1!x*K zc{A_gcoQSVG(eX~gA+4OMv;X@fV#--4(3A*j+&e*o@ub2%q9onS_%d=HuW5j1C9L4 zvk(R77^zQj-Eq*_F2+xHBW*h(z%_rHOx5UhNja2Qx_QBV|Yvwhd|Urr?#% zgC((zZ*&YcQ5XC-+Hvz^CBTLd4Udt`SR;88U$u26EPEMNowZxh%7ft$)r zqaUo(=sAjnX%GZGo5tDkybMq~uQ7<4mKm8broSopCbX0NDERXJ8CL~ol=y;GW0B~& zN;Ap3dW@N<2)A5|*7)(X&x09NjwAG!B@zkr&Q+222*_)`(PYRg@;`17MriV{oZKAY?~c6GNd^Q#$BqRZmd^r=i1h3 z3_uTtqXZdekWe{uSr=JB5ikk#-5F8{rwukn%OEV8r9@ZlLXVMJqFeIcx`#kIpy^|# zB?tndI5=@;MPPFq!CnYe%mof2Kc2}pE`~h^;B-I(Az&gn*ITynF(|BV&IYK zbml{~{?O>MPHY>kQ|`7)1qn|NY`^1kXR^%_{01w-^_CKGTOyLE7r}0wvyV<{{uLiejy z$WCIaGr~5bGBXNrz$8pdcYu zt4(UgZ_|`J0`VUo+h*E_E2CM5{}a-@>cHyk`GdmxiMY1Y1Q;Plgrc;%_LLP1&0Bh6 zQP84;_xoM_#9w$12WM!59ocX;uq6P&bxezEEv6&zVIaD_uVC0fmwbkvAp#0=*g%s! zS2u4g%V-;2emZb}@O50Fcj}2od5F##Kj{6EFyCQNsrvr{m_TR0CiDzs-$TfRw1@TU z4CUoi9Vxy_ZU--hAqaFxB6p$$S_GEC0qjAL7M-}K6KD&DYwZ#9(4HxnmTtvxykfc4%LexL72gM zjbPk~A_a*KK5$l2q?!&$&MJ}FjB8ijSR*

=q5JEy7b)`WX3RlLirRdY}gZacF>U zz|~MMvlvg~{S_jWIf-5w9M#$okmUx)L?V_0IBp49%d>4a>vQ7`@dmeF)&o&bgp0-s ztuG+Zq81T<_+9`zZ*G~XR?K52#RzYws$-ii~}4&uDgDnpXC z0wVC62g}d}kZV$p9125lR}3aPe|tg3GbB49tstn+ynaHo?dcF@SifZ8DK8WIpFW5+ z{w@fB4-QJJvQC1=5Rx@x$ZzCYUTAkhcV(5n3WhYQY9wL`myr8bUDowAYXV^3JNK;1 zW&2cDuW8)uaUv@XPPnIAsL@S~SZOq+`<7W0!MKDVmoE|Qk_wzIm)OzKmg=@gzc=sP zAu6gW#WWOVAKqGA7cJtEf9+EalW`e zF9zoV(pTfr3&_(4asf?vKSx_Ua9)dFBO`_ZOUGY$Os>Dkrmd_9z?uE0&R9*>{{-2; zHJd?bJ==x)^(ZAUF<-J*qgBf^~C`69C9!e%A>!3GWzSl-M(Rx@P+EI#aC*GA3INkuP9XG~( zIDyw713$)c&TSkD%{~w}p8U$WDcgvB=KM-{A-XZppC zd;sKIhVll+q@>fZzypmZq1CpL4NmqD@3mUNw!=$s87%VsX%c{q{V@6FeaOcB&=aV^ zhP7$M$!F0cyo+;V(cu0+rJS9~JiN97$nOJuk* zv;;Yr4nB{OJV3Uaa%h-7Z~zklyQ2{@0!zdGVPiNNzLIR?brKm;(I1+X?( z2K`vi$KH#RC@=O9F%2_{cmm}d<36LE0Gxdk%M*8t@vwP{zCH%)wJ{V~yK0?Ks(Udtufp z1I5g@>`!@g8vf_hWNV1saYNR(1ch$W1Iqq+h%Gw?yA(C#J>uzy76IoSsce1c>~+=i|RQC+=Op zy`ecqzAXSL$OU{p5R_hjYSS1uPCM4+XlrgX${Plx^9wn(XLmp$h-4&^I4Pbd%1(pr zzj$XwuFnL-9hMnO7wF~p;rkzohYyB@51dmnAnSkg+Z*B^e*vE~1;8a=j4dW79*Wxy z|Hy-^%xM$<9I4t^b}a`qpp4#uw!JGYdIzst$cEjq0bYL}qN7gy8Ad?3DCJ=Le{i@$ z7PC^UGl$ltzrXPp=8Gs6oiO3oA>ay1Gi(>^5PCmR|`$oL(b$A7CT@ zT-=8zpnu6LLs?_@?5K3YebfrK+OYWf)j5O|$9o+<*H#9vKbMS#G@Ol$4J3R%FoW&{b<&)Se03s#hMnV-+m3P?Q%O`A*bu~2h2$=a6h0*bxC z-hSb6yQSGR80Zi#xLix(fVEr28b7>0DDHmKBPJkX1wdexz%~??Naas?{Y>$;9VNz@ z*kxqS^eCFrs6jvkI%bN9^LpCTO#t*XuIJtz$Ugh@juOF`>Oz6sD73pP5HOvwP);4| z=@);6zozb1(R!c*ZoeJ!(a{G(QYO9g#f^kbG2o~Vg*WGMDpGr`1mG1sA?Z-!5Y@6& z*T&zK_ZIpyqXawaoMId%@*1`MbMI@aK^U9+KUg%ikDo`_9)3;yTQLF|EAb{%GU3B0wk>6A%J& zhu|6)gt;&gG?q9-kU9d$#&rBq0#A*9b)-rh?|=glKI?j6%7*gvc@8K10BrAn{>e04 zm>-Aba@;Z5mnf>DbLou^jnGGgA{HHfB%mR#FO{(oApgeIi{8=zho$cys1z^mEPMqM z&YO;Amj}J#qh8}{>SBk?W0Ul}bMxHsbQU#j4&cn*)5pMK9LD)*DDrcVl{ne>M;WP* zz|)qlwfr|zSX+bV8~*eF#SRhxHS$x|XCl-(Xpm`c%VJo1hLhF3ET7GaLJZ&vEdwAUXuG+z#V%DcfjItxqym{+b5@%8*SpD-T?N(Ur$pDaVYrv_=h5%g0a zmw)w;QhsN?fJSWY?{t(&q|5VC#*L3}k`|O=Dj`>m+6{cC-LRVErmkSbl&*JR+5{kO zWevE%&Cys6pZ`XOsE6{e&_E<>sepTLL09GRDv&FB1!9rCSO)ZXXh}aYDaO7=i)cUw zpIq3zlXkB9(FXC7@g#M~C{-*#g(;JUoWQ%4IZKQ`Z<^s9_~~1n&!km>}h0zO0`|$xJ`m6;-1gBeYU?K-5@MI`wp-(QBJZF$iBM2im%w_^|b>%?1L(L8# zU&<|()2UTM{;$Pas*9TBw9_;ir#^>PlMZkJ#*4lt3&izqi=*;S-`sPmJ=t)QzMa#Z zryb2scNfIdKZWaA#_on%1K=hh%YU#D&LhZWpL8Hv4dmb&^GH_0x4jhO$+sJ_aymr! zT|j=;`D4|YO@{w+Do#-J!!1sk!krvJA%ML$RX9X5EKIf{`3m58AItLlQ zKy*fV8;R%Ik~N*Q-&#lHS*tbtDH=%0N+jz<>o+Es+=CR#yWk#4U+H21j)Fxs5AvlH z@Z_SxZ!ZYr?gr1Ht>=^7*S4zL=qC$T;WTqmc);Ql;h`AD=he&{Ghlz+KuM(}`=DAKnUjt<( z%X#GsxQ>h~X@dMuPEHx6n6ELU+YkL3801xfH6mUZ4a8yMa-wzRQuG8oBFj-(G~E`mZ!OZV?pf>-Jp7|#LV0jo4$-iO-nr5m!pv=fkz~rQ$TUqNWl5sl>_$dXu%yio% z1Wyvs3Q&W!lof^W{xt!YldX^DGpdWFNSM;Zcm!mWYAG*FO#s!9Qhp61m}v#E5sZ#x z-hzBX-nAnx*xCXJ8D&VQ&4SQbY>sSSd@dG^+q4|)>x(;KLb6nfpI@61^N46;o(aA* zJ;3>q$`h{{W1#hP6};!VzyfxmwjIrpPZAL4DdT>3Z>bo(52Lhq4)q2rW$WskB!v0e;F0Y(C^z5Ye)^}m6@Q7YyDA6 z%UGg~ky(#^g3d|tIWHe)1FPZ_l-lZ<^ozYsZt!C+aHddZ3cUH+U-$c6W z!O6!t?$TK_nml(Q9-YyAmr?mvnItpzd?(n}SK3ik=t;x=^$C&-*i&ak#UMArg8j&M z_&8plw-E75;jn)Ubs%mb75dncU)Dp2t&i5fBmgdCM&5x?=@adxa3`nZkx4j6Ns1Vl z|8W^f0s4>-_8(9iVG{Kp_&p~@Ov0u=Moo%~iIyYp1b_h{EF7_hxOaivcVR?w?X7o- zRP?)Z#0=j1qykaW0kg)wdOON~uZd|80Oe9}U3l(S!YlNZ4kxN1 zS!Iz?rt)HXG%x;*AlRMeJtv3>%c@bs@4;UK*?z>?Q{ji7iWZo%j9HS$ksggnU;7ai zIEu98UgSH-RTMM296#IhQNr?J33nXU5y`P6>yTCDkwkBUdEE6a$J?tRG;)j(xt(B@c;3tn1h! zDkPya8k4mmle#pOl}nDwj#BIN^EXRTjfA(Dfdr0q6Gsrep zSt}XUlT&o{=(Nq0v}XF8dQ7aA^Hhror@I-YyEewddx0Q-cViZwgY}}lf@R0(5{;8T z5-p^{wm1$fX;fdpe<(2c001lc#YZdHU0(6w<`YoX}Bnhr_+-iV~l62?+on3Y&rfoMS`yv?EG)lQosyZwNS&x!bRfc*XE^ zG&4>ss;Zcw(Jh1hm}aTPQ7B#Jg&f_6UqeVv+J!(j-ZbG!$2gewC zVv-PTCg5D~TBOcm46^=jkz{sgc2(S;-1z=;Jtw@2fY-P7lMnho-pFRDI!;OeR(mVI z61(PJcDt?DL7fea22_n>!&h1g&ff`21y2{kg}>K{(%R6H%MH2V&f@;U&2Sr=`8F&E zL{=rpXfPnsv+XQ`LViLYu(eUtyc@y^^C=3Ma+492kXTr9C9*fXH2$mXIRJu?4oXF{ z_pOf!yU8RT%d5mmBD(_|P$k5+r`tT@)ovGR6@p7hV;bZ_C3d(V#-(4oi%LFy zlX%}~s||<1W&$q=?Tj}pK1WsOFZ!3D;LF>IpV2ZUJ$G)2Tao>hyM)bV{?BMVuOVGz zlX!B?;WX<*HrZ($XRAm_tQ!5Xeo4O~;K!e|-x!Td_h2GR+81=xNA*toEY>o`6(%?nJn(UW*4e6J`8+@I|0Z zNh?I5fIEvsU=~{XyHh4HfFhjjcqR0Aptz6>$N`nXZMO}|)JYJq0EmEcr;?zQLBqd* zB%>4=QZ$#WtDOBNEf?IQcpoEktK!Z?P^LZ~o@Z6(jqhAJh}g;~=d1iZo6l%_0Bir{ z*_-Epv#B8|2f)W~O%1GGo9z3`!*yMMjIa5N=unM)4zg`OD(3b)^x*`842dESQ7^*S za;hFE@K+zQ0sl?!q8J5>u6FRC?7Odtjz8|SPD|xfsMzZrUlo6IeO7Kf*IdW38>BvVk}a-61FDAA3Y#-nzK@HfE@ z{?i@EfE=I-4m7yLOYr*3Bu;%94ly5Go0r~W1z;^zGuilc5y;>IZ(j6cHV&MYT;P}-$xh8&ob3yzQ*!`;`D^Rv-rzDTfhlkak z1`k}zOTqQA2<=*K2!Ie5u1)n_jP;x9Um|o`@ZR-vBk*LJPwQr#H<4nZbec4WF@Zxh zoM^}Ago5g7RPWa7u9EG1({M3^D<_ zk+QlDy~>IWXp!(k3C7@-bb*T9n8}Y81|`JIn=C9!ZlKUW!cK9Hir0R4^05Tk4E7;K zGLm_%Av&;+`L0`$;AI!Y9p)+EG_~%{c79;}FGEc>sHYvPo|~cKg%iAK5yLk$Mc9mF zo9xi|TA&rL5m%r{`Rx8O*eRF;f$+;|-lwdGRA%_07LuGI&wt@wP;S!6k z+DodZbN4j>#l;k4;@|WwOFO)>Cyr@%gIjc?!f!){S&)hM+;GJTc$Hv_)o&~LXc-zl z0fIR^1B60~^caxX({1XD=GolL{=>X~r>_t!r zv+D3gD*z>lII`?(i$VI*v^fG!SzP6en#K7vd^hL3*{Y%K#LMixS12RMOd zrgK7wpbeN)Q-%N>C;-X&a`BbG7eP}CpMNj~a~K?N#-V-R-4GUCNIXJc@qC>c3|OIl zq8Cmd>eGSVa=VcPwE^b8Y*LMpkhn0XMMVITay<(DnJhi_x-Q_H=nY9^`EtZ6<{%%_ z0z}|WqeyiNib7_QX8G}a=zDw+WvG|94-vBu@AD446X$SpFRS*S9OU|cI6j(pnTl*T zT)Ua&hx4NRPhHT>3BMFoV{=nJo8{*KPL!W`ecq%_KU#h0d{tm=f-uNCoP&5Ztx-lt zt6BZm7*#h7*7+7pDfQ3+5OMj(f(}*qP8bE7NacwSi^+|_KPAlKH~!t53V@Y_`zm%L zs}0jjO(q}3M$0A4f(Q-46!rk&u1eqrXa5Hqk)jHx0XIaZKpT6h2hoVsoFzMkGVLJC z=NQ(VNlX({ptZp}kO@(ivBlJy^j;8vI_!fs=!n4Fh{lj&#dFaJ9?&;K}+=XDc+ z_xdi3qU_LNcn1z*{~78X$kUzi(cK6c-3bpcZYC?(<~?<0d@qCi6^4UFP+Wj{Z6Qhh zy&z7EtEf(U8`{~U%(#k8^?a-I(F@6Bq0)Kg8X~-gA+E3}rV?oLt|>*XzgDXZCQ|;U z_cPa|9WvzuL{e_-MQ=;Fxt}TC%Mrh!7icyt1reav=Y4ojGEz`U-zF9VIEmpt42#cm zt+{bWA@Wbdl0hBR7x1p64vOPQmH>vxPZ8#Kc2|jGZ4CXk$c#?P7zT2N_ol$1B7&4I z@5KbEZ8aLBy*1Y#I9%I#2}jUV-~tk69MyO<gQedNr3iC5t`Tk)K?Ec_V z)OLTeg|BuaVMVP&x7}YoM!6ms|8J2&rdT4%Vq+{pOu#|Z0XU35MosDkIuChZ7Mw*; z*fiv1Wrb!eF+zkRa108Gqs=HemIW839v4$_4MF_8_An^^9;Urp2;7-n?_Vv-#hxXJ zV_l0Q&2I7ffimPgjFxZbyh-q5N*=cT5ROhC;?SA_cWOLDuA;L)gz0Z6leRgJjNsj# z^OvCN)Gc!)>x?g%6mkL!fJgOJZ5B7n7{nTpCoqkty484x3S#Hn=@!~>@IZiHNzj*D!nF_9QDLXvp{jU-~J z`c3C|5@;sq$TX)RNd^&!iBY7;rs|X~!&5K`(&Wo`n=P=3s54JUj6rWi4L_C9(@^x$ z!%?e?*ch4ays5$}JsFwfpc?y-*@(GMmB9At%{YmC21WyV8#kNGcO))m*my=9K&-FB zb)9GMxjpvXsDH;6R)OF)iSTq3Q399Oh9kTko`Cys((Hj*%>w_V2Ne1f05=N}^VYae z^o+pk49Lou0%jme$73x)vyU!QfAQ|sd8vF_MIXl552B ziLZwO;;+A55TC;A%!o~O(3ZMc5~#ziCF0*4Er(MN3y-jXjP?PG`EL;6a??hrx&O#z6bH* zW7vkNdr+g}%ezV>TTBi>eLCpvD|SD}Ndop@3Jy?^S+zu<*uDAqm#?P`qSdKACDOlv z&Mw&i9fEMk%M*1J?@0+!Zfi{zm8FtRFA-pj2Ay|qAU)fMw-&^=!(RAptj39L%0BtI z6;-UCZgHd5K#4d87w!fq{74KEY)#3b%tKlIF|t72#=+yug}fFqoxMKYH>e(~p6|Mq zfXc>^YwRkLhLmUbx|J?G&UU>b)NzUN3FvIJzLn7}{E zTWUoTTPJI`3J!oTgc4b`18f{VFxQ_O{qd!y3serRzz4p6CJ2X>jcCY?Q4U=OE5O!+ zr8NrqV`F30U=kGuOHD%bC?t?TlVf877b7AyDJr#qpwo1AZ6umHjC+EB*C*xkK?FjP z4Yz?CN@s2PIITxhVDBFyKLgEr_rPT=!_g?#w(LOhALu1z{=sVKaA?+J)@FM1)$n~P z?$i3n{-(h8v%iJh1Z-UjS=O=^!L#B3IPt?ZUG3n&&+xO6z>Q5EKvTI>+ENp70qm2= zMp2;)O15^GiD-u%g$ajlo9p`~ZfFeFVyN+ToCK4I-)19jSR3hWmM&1f8;05MZ*DBg zX<(1g98)Z4U^y+4RFdk^w)Pk9?Z7g^mluLo|-GV#+@ef ze>a;Wf0E#qj4$)Fp2TT=5DpbAH=SzX`Mb2of?^+TM3!L>l1t)wBwdvjfkXo#h3=p7 zOSTZ)p!7|5fQv|VvQTT>gUGyI#E7jVYa)g=f#;ya;2 zj~1X`+*%RmZ_P_<4aGL)``Xwt4gs75zl&TA--Su?_jbEw2|LN|t8Yy3oqC`_cn$vT zwFT%g5_1>{9#8A?+FJN0w8o`%bCR2~CIEf2eW5*NZ2=?=c{@oz8U4hUL~}oDypxSi zN8ZE18x;$XZ;PH;%(f$=?!eC_V#TOjBC(+Zh}7ERbRkzkc9XI;+f+jkJCaT!bULEF zSP69WIRNG4^QeBz@K~+qmG@`*nd?DB2_pjJHV9ZERyNc2Q$5ErTuf*F5|m+0_*Y|@ zQZi)?CA~R_+b1!yE`A6^Y-%H-k<@{jdV@J@K95 z|8YROy$5@Sishpy33mV`;#p;g8ISZ7?WB%^q2QO`!cHzjO}OGZ)_ozefX(@Tj0ILN z4$Kd1+<3a%0$E(@QCa**?Q0d$a^@+Y1#srRBr+=-s~WpewYYC37kBY zV7n3eS&p1`2AZpt-N?^Gek$Mg?{Ff8ngy(K{-WmimMh_rNF7Hf%(8~Pbj2+B4#{EA zg_>*4XhS9P4#G_rG#=V~wKO8;Q@$sApUCDPKegzUn5Cej+A0q1a3f{6Rcd_IvFFO?>AE=x z?KAWF^&;;;31XEiT*&x~jY6@2FZCaKS<1;LU$*jD7fuldBK^z59fkl~XUvZn^9~s#6FOQTXhrb8z&AFQ=iCj4-@V9smFkeWmzw%esQ(1X78Au`#IsorHPz z-qf1-HZ3Apq>0MBr#$H%(G3Y~7yefYi_NC6R5bMbZ$O{1{8G z>*b&Uo>ojBMq=Q^sZ6T^%XQIgNOtY-ClzB zeJ##>)B;4@-{6Ga!kM>}Wn2%Qgbc%_XCxhEas_-J=n6>V5l6r-Gdj;G#h=!g`R?cs zI>9PTIN6zJZv)`>5V$&4RQ`5|K|A2!z?Dsb-IobcH{2Zk!a305vlE*AqwwTo6=)WG zVbq?q>GOsRfMR4HM8d11KJu1@t`j{7mg3x*K@_RoPGsqfs0 zk0<%vNC;yaRDms{lL*~{^#b21WpkSQ>YyYn$EiiV89fClw~+{_2>}lqKQ`Axxc4sL z^no;jt%#x)*o42jvxw(6J^?Nijxa9qEaDI9GmQ7wJ1^c(B%!W>sKWeo446vg(2_|% zd)}4X_qEf+1AFT$h3LQ=SW~eAiQ(~9*0}4vMbJ$+#DUcK1UW_x_vWh zFA-2YBl0)Gj(>EAN4$hM{he^;;hdCughbCLyH4Un*8d7pfwKyb(SY~-4$^)7#K?9H z(*YPcD6O{WrA6eI<)3RZr&}Zh^rYK|v|Ky^`dZyE*U$@$dFc}CRED8R9)rwz$-D&f zlvyeZNg$L^Uq&+|lYK@@o$)u-Ey~*s|4i8xYD*9VAj1RXZOH);d0yXNB{~qYO@s-o z7eomq3C=QN7%lMpY?woc__R${Sc5Q#p0J(!chDA9#`VGiql;ohPch8w%q-cXNO_xG^rO>-pjrS9iaJYYBhID7m2 z0Fs78tT{OQ7GRzmhKJw`QZqG_7(=@$v_Gg<%)1xP>GgkSi~zKbZ=r(X7~yE2>rWIduJ4Xi+6H?|Z&D6C(c z-s6lj%_Vs>=}$~g-Bb>_5FUgF;Xw5H05kf|OE*r26sHT+N=uDpx({r^`?wl<)}&BV zBy++*tnYS}h&FiewIQdVWLR=AAnGBgfMk&rd^8k5tCmsX>-7)%FJ$fZt2MBk5P)E> zXT-E;<&eb^zJyK@PwrqIk5~z(DkQ zJoOL62ccDq_W=;kLap#hd)w$ApMDJa> zTbcEd{P4i33y@(>=p9(9`wzj?_69PsG$TV$w&t5;eetD^K(X@=H(@D2&Vh%}5vU>M zBBs1S-C_>EV?N2qCXq-lWblaIF5>UUB-H0EW&7XVQzrki$w?63nf8@6C`1@m{@<=H zAdOkpZu#+@n8ZbR{hf^e!MH0q5dn-FUgDNys%R*6xM6qTl+G2feO9tP0&}4k)Lu;h z_&eDsby17hcx1v6+*2$NhC>sRYG^S}oC`htG65%)sR#;#uKqF5EElWasQmo+4((4` z_wt%tYnnvW<1H6(m zMlKRP-tJ-HdHOfQKfs90K8eOhlOdzQv~YuV@s4Vz*oDwkE#R}84(ccSf0>8+Vy$Uo%;7BNf6?f6)$Qe+0Gz#X{w$J) zJTO@zXVjf%`@J8IIYi21Z4TjtCO?;u=oPyW5x1|oM0}?U!2%j!g%wAREXU(hcby0c zE)T$`e%-rK{S0*3Jf?|c_m1`*@~xqD3!)y3) zlELHx!XeYypY@#E`~)BuixX>e3PD0vI0St&8WJzI=a!Z+wezZmqafu@cou^IB_a{I zB}}okFydbvtiq2|6*15oPG2M1Ho6U$2cp*!2CR4|96#N)eW)_wjP zxk^xyubFYV9VUo<5C@bHdq)Z1lk(;aKhw_70x-Y9o@2`Sob&};kvNm#81+1s7XrYN zVr#;5S}I?*aXsl8I>wJ-F`RgeNbg2WbY}bQ`V06e=NS)QlC%=cWV-WMPw^x z#bWL~V%Prrm-KU{fDFUv5H~;vqSN#5gQt9JuXa`%UF|yFC z?#q9R9jqBBDvg*E-Mzm+wPG*!oI?FzMAK-)G=d!e?FKcLRx^3w063DlU}PM5%unK@ z;r`4tYh6Jpuu4n+{a0Mer3;laaqLdX0z^#0_=7 zx&EGa0+7oI4Im%f9i-{*o6Zdjn9H#jqHSzC>G>CPQQ)bz40ET^r&}TBW^|te$V_|( znV0WQ8eihe=&|Z)uOI{fonFA+JF91)>D)_?tPy1Ny)lM5?3*w$E1fxJdk^j8G37Ph zfh@NjGYgXaC!4!j!~m_epi`dFDgf@K9wd1g0TEDFe%2a;-fP4K;)LGkjJUrb1mNt= z^B3spWW)s2ZO}Y(3whv~w%c$%P2Jfh{7yfg7;U5(t%iv(*WLbz_)MX_83+K0mllw? z!9mbCz?#tOkE!WtZq5Y|7l;$NXggJ#1t9>b29?e4evto3cnDq@3gs*@W;Ww1^s|%R zfLL00X(ZlPm)m9W4UUV-vw5svu9Rp-&fSH&i-`iTjO>ST2lVRscm2V{5`wv<79hSiXQj*?$Gz zZJU_`h|a{A|8hN8M(id^do%F|_m;tca!%qW5*G|^INjz7Mgab@_oKnk>fE1!4ZVl0 zXJ{}r<+R&S){H_QK&^Hzfk;GU+GYWYMllaI#xc~Qx({>baSA;77eN4c>J>Z4HBYea zMvb0?k|oA^P(arB-hFQI@45r}2D>kvxKtT}Y8rTjdF@@akq)8ul^j94qs zEzRVfbkGU1uA2cckG*s0z8~I^#@qe5=ehTZ0|pE=<0cA9061#t^UTbr1_Kjx-bSgc zJ1EdNGMAf>O%9o;zZuYhiqXH zc_$g|M-jm2y~DlEJ)i9ujd;E&2mn2xu;C0Vc&J>M%P182dEbhx0jRvb)jm_69f2Eb zOaVx?e+QChT2Tw33F*))QFbi`EG&|fQ!my*4*#W}gEvbNfTGfGhy6JH{gRj%KuO6+vBBUf3YCz?~S z0-}J2tiU=vTnIxC5c{_E{=JLpzBk9bs1{|YfvPAMvSM^c{%dSVBtl4pYMD)R5pvv@gG*v!g(j}d z+Te+(e$-SP7wlit7Xb1oY@9U`?4Tj8zp;sTxWHO1K;pxZH z%^%-e#>Oso^>(6JWPY9Y{UuN^2C|R<(Bwx1YAB5+|75k~5+paZLmxmGMY7e&rI4T^ zY%2k6{XJu-J-NIJD@cYX#U@*@q9OpSD75YrVL18a#=i@b;KVw6x5?g9R>P&}Gvf_Q zZ#FhWgs&~N3m07gaf*_Isc>6GZCO+fAh)mtae-;piWNw!VjPQdj1<+V2-FK?{aJbV z>abU{^t;~n;_pX=ER*=JUY|EFslMX-*S?~V{IqW<#8t>ii!ekne~mTIgt7SMh1%V zET@PJrEAXM)|M3S-*BLOylR3{vI=jOQ|8`-Q8Z50&<5S!4N zUmV9S{37<<$bUcXT@t=Jy?FUjE%@+-c{vz^qn6X;S99D*7@if z9jN%14JzM63fc>gsNNcpj#|5qL?49urF4jU;*ids*)3M86mC zdQlR9_xjE)f?djhkxRXj<14G;-gE$U{`A?o{BQ@Q2VK5^o@gw9ItMm98Mw`ne5mai~bvY@PYFuuGdrl5?Y_>u{5e)DkZnT7R(*=z|VQsIX>esBa* z0@VSGX>h|On6kAGiUTSRGtIHczQ++i-#^O?O{?M|KI>U;<=C`7GX?{qqSjBkuPq0# z-dLv@rbIRvU|Sh47Oy|8scqZ^T6Yxa8a<`LA(4|DX?Ky@>9hVNY3j-i$Y-@0Y_V1FcC(%051^s4$`YVz$vbW3*nNIc&q^N zDGKnQwxNh4oed2-v=gpy5VE5W{9`)6aSkC+jMkxPh&o#l(p#wltxU+WMw7`UqxAt~ zMOo|bKJ490_)u&#fbsd%&@_ea5UvwAX#35gu5_qJBr zM3o!izX=?d=vY&S1R=E^o9rJnL@apG4p{glu*m6Z$Eif0g4Ka`b&iLThaUJDF#I^z zz;~Ih2OlFwndx^Hhe1FOg-d4=Q61K$^*x-<69u`as0qMd_WbIq&$sqEw#{P|wn588 z$DnVASEO<*-HmNf*D0In;3*AFJu44$Lr);Z*=G;@@J$Dsn!vx?zG2RI0m7zO)JapJFjJ0m)39HI-R!X2<8RG=C%9@Amw1=!g4Hr6%h-yQinoG+kb?8CFXo30dQ0&2kfFKsS;#Dcj>oT@TN^x>B2% zUJl|^MSME6`6FOi@0HSxH_E1?cR^Nm0ms<`5CEn{pT>zsF#tcwwBL6a&%$ zp4fb52`0!Hcuoeu%F{ED773@7sj=Z71fuD2hESe8I)C846cecJA76-$mwhqPum(N` z0z+dcPE0P0$FBTDzHhlc9rgB&He$(>*X+Zhr(v@H#cU4mk0=j-qd11 z+!(e>iQL|kV!Vwa^F&H`P-Gp(hfyUrnMK(O)wVGzDGc=RQM z08A=lQ2JdSTzSN$B)ebFZ&-Uj7XKc+{$AJfT%qpWGz5U2R2IAVQy}QuMlaT<4_1T| z_)+EY#Ie>!_ob)3GP2HK_l2oi{QTOC^vx%tktP4^!II<@B)!5lvfHtfj7V8mVi#{c zT`qPZ>4%H9eLWrH-U^)9B@h`JU#LLkWxu%8zkpLSD$^^QiuI|y#=gi!ytcnmJlE!g z-a&u=%J%ep4q!mb_~W6TXwLo9FiXNhvfzUffHxWl==!IjfcP^5o6Kobn}z_;lj?{2 zPUR#+1A{=ALAZZ9C=N7jl&y2R`Y9Uc2BuA(rn}?N36Q;?02@BVXfmOj%is#A=wlXPH~a|P z;8;pPY|219C~|NNr66Jy6O0C?Fl4iAS-B1onopB30QC1CnGZnwkC+GLgT@WeU#wn# zT2tIK1b~B&m?8@%lX(Ph&M;*l+ocI;1j?&k*Q(gt-~?OEtHlA_W>(g|gqQ#{=QD}qW4O5)``3Gd2b zxH&&mi86+a(PSEd2BN5<{Zh?qQxSmafcej>tfpUEERpM}CZ_jyo(ilg^gLMBW>^G@ zi3o5m4UosFOnbJ~EvCGsVhjXp8Hy|dO?;K^Xna4p04rp8qfcF$2P>Q=(NT>pgOf9~ z%KalNPovJAil~yI%iNMC%mBkk(Xg&Kw^7e1;)H z^*jXU0`Vmm1OTbN7P&`@XgN|2Wm6FV&SrgqFaFViQ^pps!H%HOW)!D`AKrm*hLfjR zYz4_~=VH^u+Sl_hZSVBm-^w6Kj|TlAPoxwHVrxXsU>fW{)58U_%o-eP^YOpbCYXjy z!5nxLJ?cz5koO$9`~BO2va6_qOo zr1p?QsW@;!ia@G*=nv>mfD^fq8&?wqs?-Y-QWJrMg49Ez019c`ME$}^>taAr8{*jc zu)RFbCMMhUI{unjdw1Sf+IT&l%*^}D?7Q>veyIB75%pIqv!klYi?W>K5gui&3hHv* zp^KP~&MPRYp@<(IZevprA+owLgKH>F5_BNoVBM3#?)qhz_Npkh0A{_F)l@Fz!8P90A)ioz<|X7Q>-3SWON-_x0BC!KYn> zO9u+jgrpZ}YW@8JXP+9V2u0|-nEuB?>o1J|*vKkT*101;Kg8r zTLJ_K?(Ux8?(XjH?vmi{PJ(-a6Ep;O3j_%g2=4B4$=>Juf&1;&o$3!gRb&0OO|R)* z^~8QPoK3%wI9Mo?u*0Em?V_@*>mPwIh_Anj7xKA_$_Z$UD|dpeellpP{}A1RxIS67 zMQk=n7$%E+phWf+7sxuy>+L#V2ENJsJWXQ_eg0m1wr`}(lTQl6`#AIQWT-YH# zuh-^JVSNy(3!+?W1o-1QSj)hslFSJM2_F|e{yTB>?nC#0E`ZsoXV<{@%YXq7Xna*{ z*n`~B@15?U@GW{ic#i^4SD{|Io*K@3g~WC1T|*tub{N<*6Qhh@dul5pPyv&?);(~vys*?VXxFyW=;=_@q6 za}C6BY(__OPUX@?-hTsADSUB`L*mJTNiQFGfe8>aKHpB&6}UEY4ZjxRxj(T$ngW?Q z5>HQmY?>u6dL*I)<=$e5Aw_B>dLKh(pO}a?_q`>LtY#OzZJ*#>{@h4j_PX{{j#n(= z?4HZZXETf+Vuf&mvQCk)Ngm$vZsFO+34ewYw)C(hZf`iKSxhIA5$^d2PgzH*0-XMx zR7wj!pEvO;QdgJ@I|koz{P%Vy{N!K@`F*a;OfN0Yj=rP=N#C7fy)Jkn`v_4n(oF~ zN&RN)oRVU0J_L18(W_*2xZ6{;0}QOneCnh@@d@CU${c#%c`i4IFAxtK&Q~}X*)nQn zSLP6PeaLHltEr_|$k<`gsN8;aP_EA<_q)&G zFpuTqtt)r(=PSIAQZKip;x|;z(gM%ozmE6%ne0%zZlC%Pjqx`ImV7WV-z&uso-bf^ z*O7>$$n+hn%l>emA)rnAZg7>{sZIaZ-}zyLFUJ|vZP?Xz zB*B^YafB83-pp;`f*;2oHczuz%wMe~t10i#^W>}&gT>E=gfF|2u`$9J6H7zQmD&#s zEiBXXtIWuX9YFApk0nYI2?dIaVj~pDSWXD|Oy7+Q-JL~wvzhrNpG&&I!h>IcHvL^E zhRcQ3RR-B0fyRZ?v%9LD8AYbDPVx#;EO4vF!})?8hY(F@7EUIZxk_<}UI+>FkrXJZ z|NRt3h11F<2@vs?ygM8c`Rrz;C5>qvFWg_hlhSov=jId(6nd^-jvQLpw@?KdXr1Wa z9lnBD=(n^tZF=sVVwD8K*;s5sd#b>{?!qcB6!{2)jDqUUe?IE{QGyAr7NGM(cII^(zisan z_>k#>Q|exSx<~+1juW7=*hGFoG5t+qdW%&RFn4uhedTJ7fH{sIBf<3Rart3;I(7^& zoz_Dp&}qPMCbD)~*H$iKt_b}oi?xJRIA5^26y0hN)tITMik!gvUy-WAW2tOVq7NF4E0>y78b5RrijU~qncRgYOnQJrN7G2!g( zRy`ti4<=jlf`Tw20f6B2jj}N6;@T%L`w61daY__W+Q~Oqk5}5T<`gVeq!eO~v`)CW zz#Q9cvAk!o;ITHfSv07XwQ`LFWBbrnK+&^fUO>RA{Bs1gXW=a;=o1^5__>2c#}OwS z@O-Kzx9rP6GOeJ|e5oSP+7-&lT6NnoxaW{Z{z=sFJ!I?$4-9M>RMnX}%R@6Au#*CF0>eh~G+wW=UwI;I2 z#l2g^Fo@>n8QtGQrHID2Oaw5nEVdFOkc2FtZY|WUg}Sv+w-)NwLfu-ZTMKn-p>8eI zt%bU^P`4K9)efQtTButKb!(w+E!3@ry0uWZ7V6eQ z-CC$y3w3LuZY|WUg}Sv+w-)NwLfu-ZTMKn-p>8eIt%bU^P`4K9)efQtTButKb!(w+E!3@ry0uWZ7V6eQ-CC$y3w3LuZY|WUg}Sv+w-)Nw zLfu-ZTMKn-p>8eIt%bU^P`4K9)efQtTButKb!(w+ zE!3@ry0uWZ7V6eQ-CC$y3w3LuZY|WUg}Sv+w-)NwLfu-ZTMKn-p>8eIt%bU^P`4K9 z)sjDWzkw88^+Q4m-+0El?WQh~Q; zFWDtA`p=ha90kyeg1~740ODu_1ZD&w7ywivAb?s300Y|#bXslsiWT+dI{y9CT zFvnZh$T-;0Ur%r9aistIBxFEG$G|AqKrieO0;y?-K<-0y3@rfzqjAW?+0ek%9c0Yd z006Wip9WgLogI~x?>xt90wy{=kbdii$Hxl$1xTN+p`LEU*!93xkk|a^<;VwJ13=gH zXG_tLhux1)Z41si0N|iezc+j^Iea*LSg)ZG44`N`OsrqbO`WbyIq8G|D8f1sfA21@ zZ+4<}Ekgk$1H+o3;o*@6W5X~21ToMH(FW-m1iu6u9H#dY=D%PWLMTDZYz!|Az`;t% zKo>$AL`P4@KubqN&%i+UPF6)uL0OTHmVuFhnt_qdF4D%r%s$$JhLM3DV4xTN7!j>) z6ch2`JuL$Vz`$S|{UOlGG9)b8=H+RCS~SWs(IU`aB`7*Vf)-$;HA}GI)sKwO;CGF; zpkv^n6SWPu@O;njWF2ZPLdOiya61~R+VHZ!wooy2;-UcnN@`tS>p(l}U@JdeYKE6! z={RgXLLKdc-R(JP{{>4U>Kz^&66P&V$MF&@1HG2Dm6eSiBYijk!$2QG1EQxF{s4qg z{!gSDAu=FIMd1(tUO`$$L0URkDo9#JN=i(gPgYu5N?A=)ONm@o)N?uw*%Ft3@ zPtU+gUrJG0Dp*QJ$*MoPuNLeE4|!ARdiUqwn1kW?_#(J)YueXXaVW2Er% z0Wy+0rut?E`sVtkI#U1IRYKmt*uv1z+}Kb->R+f*DkhfZ<`$-^(l4Qcq-C^&4PXs* zWTjq0m68dO0?A1I3svS{s5WM7oB)i1UzD<<6dE!C@(UV`CL=Ab`qIT;9)bWc!b^7P zJ$DZPFiAFIV#+dNVx-EhPL?+I763q-6p%P72U2-8AT;fvC}h!*-8{Mi5^T&FPPUTbD8LEM^4vi5Fg z#Z4%98QcBc8Ww<@Z9`dP68OHe$Ln2~7|F`1sdA#MFKtp}*dGB%Y=QuhPSDA+ zWLV%W>tv`X`da!rQeT_@;V~^p z2N5so8 z!qXoC`+;(ZcsH6nq(x?hA+E16)!V1rXuZmmU6QMYD#~A>rE)31vN|7Y?=EO+qmm_` z?Bh6eYfW$TDeYV^JZPDJ9uH%yR;&HECH@LiZ z=2MU(H%gLOwH+8VxN%JE0p>*@-DlVu*dbjfH6C*$KeiZ`G3(HJx4H9H8a*P|>9M9- zrWjr5$I~BDOyM3jz>$VCyr+HpxA*bPwS9%<44RTB4KLUo29)nKAaT3nzgD1pvuV)U zf9k%MwO1*_{Ahgkpd?QpUmZY_-<^5df$`*BbL&n!!Fc)#_t*doGYEtyI@d*rlIjr3Ec|I2743j* zPo=D2po0uP2A4d<-|~E{Kly8c;2i*o;;K;gj=u@oSrvGRa2@|7gomXoD_<^qp)xSw zXP0^%O7ce6ewb-y+5US>X;3S?Rh|Mpuh+MgMVX-QRTm)E=PLEF$6%KNua> zJ@Ff%!il166GSV$XW>n&RYxOST}JMH6MEu`5;{obW3ZLs`Je4N&b~D7>T8v|nRBUV z>|fF6iNAk+!+0sVJIO#&XPCy(plvzw2o(8_?6+`?6Bal9=70~bo}6i$}WgX zZ=WV{>Uf<{+w<^ytM6t23{O7|rZYh>O(Fgt27FW*hSZ--}Z#M&&`@mubkL>8=g)v(sle`F@VF~Xz^pWsbj|{dgI0b ze+sj%B_}&fI0jQXA_?YerOwi`>0x{fsx?+9Y~Z)eX9Rzk12jlejG5V)`E?L`K166} z74i`un0xx_yU@Bu1V;oHj{^5t9pwYj?fo89WVSH6)_vbT=#?kb zx%blT*GZj8;kHnMZBDQ3C0$_heb!kVAuIYEE$D)a0etG1&Zs@gOl3K-FSQ;VCR~Mi zI?ImLMg8Fvo*Y(C?Oy)fOP|(YMPI**hr#cWu;VmW_pXT1Yg4Vs)j}sJo3Mjpy=f2Y z`T8+f><%;Fqx094nB|M%Odo+`nr+U`G8c##uNDv0f6I>; za{aFzuGew*s<6KLCOsE1M+6mTjG|45^t_}(7EEUqAOObC%V}S$vo-lS zRxWQx9{3I4bnw!(f01HltUI40#;gh5td^YMh-}UDV=N<|0n1Dq*J`(dM!))$j>P^=*Jx_>e)wU>R>WMeV+eTX$USED|5q!0QQ3h%h01MclvEx zElcCC2!R0Mehb2DJvE*`S~&a`B!asK z!341p*F8;s&GzzQH@FiPyX|24;mmVH07nEn#Wi)^y!kR!z3TN-Ief6_B!4RN6##kh z{F^v8LW6ne1A3}d5w>us-}CFcxi!FmURvuk;Pm*z$@ahb)RrE_Wm5?e>0M^3+v?lX z5|1BVn7b@+sp6=A>fl5+#c{8^SdWi?2M3!0-(Y?cUuO+duU>bY3m|PO$ekOxhj&Nx zc(u6xrzY`@UuGQRV7!D+eqDStCXnUm2fVW5;%68mMIQhE&dvEEFEi;o*rxWR|MaOI zVNv1!(2midvymKX;+uJrS5J1#u7vuM~@Xo}4NRe z;aEE;k7~7wUhY3{V&Dl3NiW@-XNOIc&5@AE3fjF3+!-D7-EN?3c)*DZtk`=)^aBT; zBIid)axJo)sCfe^Eg*84v4=H=Dax)$I=;zOck^{wg`f#<2YUXe z>UZhkWSiJnj_+`K2dq3OkSsgxJ`W=-t?a+eF$MBf_(loUqsVl-RMcSAX!Uw;>qeI^ z{nd7OcxA9%J-k}Z+ckfZqkZ!b`qx`|!9+^n!;M`MgZov{65RHSg@xzsc~%%xp&C06 z!!E6&kDyQ&q3~bLNnokw$XD1Oz)DJcWRTt;-V;qJ|EG70N^7YTvJx@b?sF|YMmO^Q1cUvScpR_g?n@4embJ#x;#tYYS6i|!))l4f{>Boy z3^onlf4AV~koSszspp<4QDY1#KTCOu!EHvzam`At+Q4r$bY=;$>V7vfghiG-(}K0~ z#ng6zN3qn?AEWeUZyqe-)4Bx8L3X-qB|QCrxUGa9e(W6EUh7V0$~a}$&qIqp8}m>7 zM+G@{)T)(Ckr4Ub4&nrwXML1+Wi|6YG-H+_TND3+<()4d4q zvGZ@W`LdY7ZBZ=WjM7w}ka{C+4RUBsP#xde>{`p_H>!-=e5Jv)g(}>j_c!ZeK#!YH zTpj*;ta=6UzvB+(Lr)-%bgL9q6Y)nkF+2}_Be^-_gis}HAdZPau~_siS0QIY~32&#~LH(JEH zhqS6H{EMu^Jjqd|S+c&hrZM~+vG?WGUHCUWECmafJvFG{wo`*?$)koSPuDaVb*#Ut z7Sbp^>EqIFM zo}7Jcn*KCB{;c!WXe}H!QR6zh3jyjvo%Oa0BDiZ(Ol-*Ml}}I4HACdXT^+^~>RCom zQWF3DuNF)QRZ0}1EY8Cs;m+Fa;cQL;ZZ^!A-p<<4j%iklEycF~?~5pY=i55w$fdh! zHsDD3sLd{1aQi9y?yKR!XV^S9f!92c@=>Sv3;mx+JOY35Oaz%5Z#j?u`>}fs)&>Q& z`f14r==?TH2{*=~>>1DOo@QRbvgXUh9wd7;3a-_n_~xyj8X16_ zu8PcnCD5`P-eR%fd%`x63mOfji!lt`?x5pB95pr#_ix#Hn%JM0{4&-?7Dj%$s8!{j znSHArY%9SaFydG{%O##82;DA>+4sj=mGU8E{DUsk?L17ZDRg*Kn)v>P^!uP&p5V>d zqMlvvYZre^r0Wmw^4}HhDXm1fP;=^_uaa;cJ3}<=22w9~%Hg8@`aFb;b5+~ep6b>( zC+m+5=7sU)G+@Dl>X2T%AT%cl?s%nXV#|Io!4vIGkki9!yD9=yWde7}*D5^XL%C<2$1g1xjss)^~}`}}vL zj49vy`eN27zpLm(JJ(O}U4~bso=I}VSS5M&lXaD?=c|}Ty^5uxx28h@hn?;x9CwX; z97)_rDyD2Po`xH=dwMPGTyO2#UtdHZ3Y02OM^H4n&u}!D$)qR@jP>s|zEN6GD%YGQ zJ$6bR?;Ni`KFUdB>P*))^4Las1GnPV7Jj$D@>iZYE|!=xFu_j#Yrp~4^YtS`#)t5y zFSC0I1JXZku46idFd+soEuVP~K4jl*x(?ePTvV=i)Eg`Y*^ZvH8F|OrkQ5Y*SO>%{ z($9~T#)tA&Hi_ztsgD}mJcE?zoC6>=-YbxF>Yu4NO`2JA=P`(4T{mjHj$ZDl!cJO( zCF6lA7>^+s5rNAi+2XcdTEn!i+r`AuT35eQKW6S9u_ie4D4>g@sG{Uh!PLx#oSy$M z@zf7qg2-R*-z6_%E{l|}3m@&LmsUS2K9-_F9CCY+?K=FFe(xR%?RiZJC%#BJk|pHv~7Rfs|w8oB3Bi^-|5 z4{(PGS;3k;Yt>Hrd_cG1U`O;V|H{O{Y7jg7W)|stBo)rfPg>2O$j#JWS@`G(-P42B z(U

R9oufkZy`l%8XT)-EPMwmpDD(UR}X(?W!oB{)$+0pJlJ>HB5b&*WNE@jKd*Y z(3lJp2Ek})wznj);ok|b8WPNMi1>|}xLAOavThz2c2ZtT!IdoS{QRfx>oOs1huHUW zibaT|mhjA5_*lIY$b8FLw&hb#^_V&`LuwEO8Msib!1cf7Xs??btu%hxU=3j|a^ODU z36!|(dEp0#-_m!XKkWq6ejcpoy|X+_FlZd+~nH8a}vSVr*FcWj9%iAA*=tIs@_kITbC<;Ag$9k z&ijF7eDkf$hh>c_?#}BkwBgi9Dr;JmWV&_^l4R1}3j6s*I&|{lgZun|Rs#_jGDK z!z#a`e-MtLSDvBOc*zbNAhK0ZF4<(`mcGGH!20c{YRAtPN)o&c_gCFc^2uouXX=OD z;Rl7sugzXC?t0rXdQ}=#@!h^bS`V$Z(bl||(cD03;qQ+=7QUP|S(Oq;%fqOp)tP*G zN;(6M0WG%N?u<2QEZrNZ_qZJAV>B+x21|tBp&gqiJ4{R)mI_ISApK^bc%Zx9 z*IA5J`Ls=SFG9jZ^zdb1Ik|T*e2ypj94AtX!0yd{Y!V+Zj`glaI#Rl?$EzETh!t>s z#L*|QFh=L}GQjVxP`rtF?z<{q;xpzaRld?y6_0)gY$(#0fBZ!;e^tx3%OhZ9F#2fh z+Squ#?sgC`qh~iBA)pbq?P&Y- z9d1nR7Gb#9!s@NVh9qzf)3XDsOnjC9;CQmi57q{A_5-PA0{yAOfrcr%va3_%s$99; z`n=i8;GPh6O z_38IA_IKYtjXoq(7xPdNlF%xuNCiy6?IHavHyKNePKO5gQp9U)?z7BmwU!0+aisSt z^V3QXQBQ3ur{kSye2*KkWdR6hEgd2_U%)huB0VH;4}Zv1-1!(f+Y06>X;ZuCpDZ%? zlQ_P5M7c#zku}UEDRm_Rd2tO<)Y0!^hPSm< zyqz5e#LKZ=oa+|`3Q_fM_l{rM+xZ|$J)p?7?|74V@=-AYEOl3ldvr)OO%upQO_wZs?FU$G5& zmL@b`A*t$H9DZ;OWV5pqaqeghrnZMNgkQ;U27XGxw+o&1bzha6j*veexZ3w+lpwKo zuwoCy^TBic{1X+Mm7XqpJC~bB*(*BsRi4P^NN*i46jca$okzg()tf|ayTwYMLU7Dy zaz<8_)@{-$qVbEEabPAxX&f}f};P#iDJ$s#~s|>;} zU*V~y^f;xze(w>4LlRsmV@%`JfR$pP9&!1FDh||V)}G2~MO`&dR?AL|GvoLheP!D@ zUmt-TgOZmMQ0j8LL+hhpN)tjP0KiB~Y2Pq$azA0^72+sy0t)fDZSj%iuSbvUfZ(^W z6XnH0W9XgJ0JtJ*%E?aXo%-!|SWWaI7Ob!~#ZpLf&UUZ=uZ*6LkqGL~d^J1%?;T45 z=P!NG5UGjANHPsi?L@eOjevl*mjLPh1?awSSzE9X0z^g@Q<3iL>te@Y5=4@`c7DhB zx^k1xOEH-#$-)Bbkg3A&EO#xj0I1YXWTs!xRAzz0cn6B~*I5dF%Z6YU&2E^{m^Kr4 zkvt?)89N={5HkvsKM)_&Nlb@z~stJ?*W0u|lESq)Bj!G$>DgAs$U zzoRj)5b+jpw_JHN9fdgNjXs;v*b+)SMiNnQp@yoA~o=9=*yG zCgb=<^w81D!-w)G7u5j9Qg=N+ONA4%=PlNCBY`-!gvbz+wK!yAGxDuWjw!`Y<0eXQ z;RPQ2{-L_AS!l&*Q*;T<#?>D0#b&0#`A4x2-Ugd^?^v$wYFq~4b>DII3bLoE^}g#J zX9KVdb7?WqhEu;#5uI#Xj0!SgctpOc48ptX2h=oxUOK0t0nS=m#iKQtF# zdJ$yIS?U(vcegE<@i!#}{YPd?JQhc(I(E(t|JpX6Ev z;uzrtXA(W?!(sVz*2F2IVDeruQag_R65Fc(g5%s855}Le>8c*Re)p$c^@2FR#hbj0 z;_K&uHxi`UlQ)xA z_+u!uZw}_XJlRcfsj-((^7E5=5P@u^%P|wCNBHjSI)-`#oPNuS%5u2H)K?}d={BV> zych>d0T4LQYW>ut(xa~Z-munN-_8Ei4P`8Y{Rw^#ukz)1uTA{m_^``c_sDE)8#E?i zT8)Ji%V7D)3)5u#sK3-Iqoo!^uLcd%i*oM883A-9ZO0<$MJlt+}u$2DcuK2UgM=mJf z$jQ;eS5@{y_4ta_q+Pcc626LWaPyP)eXhwnTnIc&c{O}_x zYVwE^lS&$^%~Q1U7@kk^NP5PngK!qh+stV=1>GENe!FCGpe^&YHKFPib4`YVqZX3E z2ol3ah%!yTKb-VyQ7fgRF_J5Z?s6X&1i`e7EHUwqE605!W`Bf7L)e3t*<~jphAyIi z;19|y#z>W*I}LQqWQBN@;#I*UA$O~47$PX}Z%{G0{7##TqXs~P=f>4SJ7SDV>!f8q zMW4P-Qvwl1pU7)C#vmDlyl5_ zwI6qJh!qen8E9BD5Jc~2=Z~q*{7!F?`8RFf;FSH`bDfMrOp&G}v0{xhH)=vx;Ic5|fVi=ZXV3@C#@J%&*y87(i>Y^&@+5*dG_U%5s(u0+gJlE$x;CJMpT&ver@7Z+ z3bE`H!QJ0FZGM3C_Uu?EuWCIM6bjV!mz}IK5OJ9E-dy!?0HWLnC8~R#+;->4g1T7E zyv0+B55Pshmuw}?uFk>Mr!Isi*;70)#tt`FdD6{Uf9op~wtPcrFsit`E*^F{=rwvc- z*KvL{xXfAJBpHW%3=fs>L63vIX`T#qoEmu*&z~Q!@6DWlZP9ITgZW3NoYhu?nJ-fGl`bSARmJF%bC@gXl{6)q(~Ylln=Et5IXA`l#6U z00&7Y%Y;t3rV=d*VV{+3uhaIHo=-?x-FqE zW3dw-_(aBIxItz8bT*Phw}_ulq+lhv>Q|#85&5I%CBOTsirc93k#=Mf9lR`hG;K|Y zgkV8$%~8V3VZ7|OSp`@_msA~i>_&~(4Tuf2UHXV zJbCi{fMWm5b@&{J=w!H|31Do$#1-?FliC<5%-xnTk~T}V7G55omwl-+p+@O%;qv97 zKn*0BxTfHsskVojbya_@l2b#s)hdkSGa(upY+?sryA87gq$d9)-ij7gkpJ&E9n%VC zO@N-72zw4;#A%E)5|YRlMpZ#j@2U_cCz9{3!{J4_EsbKzJ$Keu?3J3I^-=Gbm$i@H zE>13+0*#S5uGh$JS5A&GFqmh2JSpjo_|}|Y8_GxY;^im=w&3!+eGZP-EzR4m3eV{M z$`_a`@S>C2>wKUTIJVOnX?IKf8YKyF_#?Q8o_SXk`_*$RWrL5gGr}|DbKoa@ zE1jDQwOPq?%xvcOz8>p*qTB@;1mR_2`4%FWrX6Q@Du{MID-S9pjC5Z0sHRJpXqpBxz%5|{(DkjIL0@DLVLN&PJe!2}vK z+`7ow?%2aeMgql_U@`e2&(ERa%qk-BAyuOOY#5*4rdsv4p}b}Re|bX*o*7IHzC={z zN_I{XyqIB>AK?UCmNYm)s6gLBI@j!$$nFt$ZBxd95-xq&K!9p;{7u4}P@6XxPv{rZ zbO?Q`U&e((Dl8zhdM98TzQ()~Sn0;qVH2%GLIMvOZKFqe&n>5rwV`a}8|4lwM5UmE zzd_nk5kZsDi(7KOKlGLRizVZP$K# zlWC;HkvKCDIfZz+hg9C3rbPHm}ES(6uuuyyAFP;ivF>{ z!R`^D{T)c{2_=GI@$SEmnU}k>-$u>P9YuGNw{Y3vFMEdBu&F_R z_sjIg^L%f0*r}=+3}6&l4n!qC+eS0;CcEC9`gxq=MQZHMiCgPoM@gxwRG9=F#Ya8N z8Y6s(xbPAGcAg!{gNihRU!hvbj<(w^aVh-)^Hws>jn^3+l@5O2jDJaH-Mr+z1*33H zEMKVK=GaZ~k>A5syX6|u-zCcvw&I0 zeZ==a7%HMvjv6KpVZ_Rx5Y)g8L-~guVOZ;T4bx=45-IA_2+h-!Ok+(8n}OSby+P6T zM39Yso$`QR)Q%yxub$=S#mPi(K;R{ zdG9<}n%5#^-z~>y@VI|C@AclxxI}##S;MMN$a0fCh;HE_i=rv{0Kgn{$FJf9=Y!tK z^5De7R>@48iQR$E+RM0tXmK#@0_79~y&r!x?ggo#u3yZW@wutih~2V1j4ek8xVRaf zJr%M0eBGhGeD@*1w-fT?YyF9HZ$G2tC9+~0C5JA>cq1oS*c9i;2k{xm*9`WS|J@`( zG+DtS-u#ANnQ#@k^2)cPi9&aFO#3bD6lyBJ`$QUH_pdLd;iBaEii6!XAMWG7#lMFo zjdi0hVCO*g$yN#UJKW?Yy~@2r5^z&OWy1_u${a!I<}6C2CF>c>a5*?_vVGfa3>?xi zJ-z(xK^Gwm{{GD#Het(5c(=$FWiMY9#_stp7NoG1bcDn#f+{Z8;!M+m1S3tT7_}8e znognz!NZW-F35->zZdtSu!*@{pBAFPnXWUA^c+EP^?APT3(oRAB-F@rfvPxT3RTq6p1 zk@AiuWRD0?XVeU`sZZNaBP^i5(%F338dkm-ktn&+pAn-*)gG1VTv9Q^ygZKe8gKua zyGYDb5=U$lq`q4*$nI|E20t0HRlK(%H-}v=k|ccXY;5<|^Yn0Po50)94Ag^@GxsgD zS`da>qME~{eiGgygbZ0aJ`N1(7BlBf@_$#Jea7(X)z_#fvczjGK5#i&0vvUe@~7yY z&m@)WRz1VnyC?&_rM)MoR(F?*@@T4idQ45vz7D4GnxHQox?$gIhyLOSwNIs>#-PbUv3u1~--@irq{8&}WT+8l# zH~a_ucy=E_XWaxto~bKOdFUi6uTr$B7HaTTN~uwRvJLe8L*=#R$!yidbnsV6!Gbw8 zH-uao^0LnG9Gpn z2K@wx>jD@wFOC&U8Az=C3#4pfUGGONq|LL7df*=pQ+gOA3_FMifXM9(3;8^Z)80qNr>hU zch{mz=ddaEC9BNRWM6v7p}FL6R5CYD&&lzzcBVuS z`g67X@Ci*vw*A8D&i2eX;_s2&B?s-`1BIs3bo}g)?^iEZUSOuY+1`Ad`tH|t)Yp^; zjw~K1PYgbZ8PoXhE*u(PPg51JOdydTkG*dSi+e0lsw;-$)z*Arn1ZeH z?&eGQ5x(mM0g84Zu;55_XAu$Nh2pHYla(WdeOuFwPE=nOWxtFBCKS|8-2}HfQ;Jxd z?=rsWElty6>dqxHq4nK@E%ZR16Z+m?QUKV})2_uU9d$M1Ai?=A z3KowIIMW&5eaqhf{0k`WSE*-y}U?Ps8Dk3Fr~HF zpt;gV1l1TgeM>R(jY!QHS@i7RkKzwkYBmDhe&?uKOolmZ zby>9F$RRm38@(M9#U-2ri`u5a?oYD{f*BTF#|#S~?&rZ!$j=9OtiV4@bgL8s-DnSf z+3RhubrCj5q_XD=F zT{s#I)#2J7wv&53e0>apV1bw8Bd9&(SFVxCS4?L#c+ZidpT3w;sE|^jnW9i>vA2X! zCUL4|&$e;`&DN+?AI(I3);-H2qXOhiv@jDL(ryEapGkXZ22;M3Q7Z6(=gp1c$%bu# zjm&TGN}L$K8cML5_x&2NIJFZbR{>V}+d%nY$@p|Q0Y2vg+~v>_l0TEW)yK3sYr2cn zO(7NZBQ7{cBPs;q`A;!(2~Go<+mb`ujrbsW-J{w&m4Uf9JD>Mb{f!OXJdKEM(WbQY zNfP!kZ-}uXf8&2S;J|)4pGqc0NvG#nOJMXLn<4kc13Z%VyTsraX1JYm;2hGg$4(`^U>_Qf$-7m^Z>zKZ^2 zyw~Wns=-Z->4!nn;2Z<>RWK;zi49G2=PFlf6R2Q%$1H=v<1!=#Hs$wkFuI6u6)+(a z-+2WVSz7>#KMxDV#}T(IQM-Bw!q^NI_91+%bC6O!UipC-inYvZ<~l3RAXlDWe>7N| zGA)Ds%4p^eLqb0chq8k=w!rNbsBB1%-#e$V&C)Wlh5G>B|}%RKl_8_QovE17z3uEqR4 zSt&{D2UVdj_EE4?dwnTvUUT;uigB_i6ipBr{6*myHfr?9Hg=W?!e-p;4DCUu{4w29 z+dY+Gd+-2mezx;1!E_soto*(5pMn3J&+|IFR*Dmypx~eXgrEMef?@+*#iOk4XnsB| zu74s#jr~DOrZ^uWUl+!>wE%h?gH|BE zu$e_FS%E`EQ*j8B-vi(Y+Je65@|ef>Zo*KdH=M3FvGIllIg z6Ve)oxZCT%kIuy<&)**L>N;%xxHvtDOn2jcFBVn;NJ@A}C2~6wuaOdv!9Dq#Yc4&R z*%Y=WiPV4JD9T_Vk*y?^%ci?_%(d7*2Ys4a51JiCocZvzC+A%)_7ATcqE%8wi<~Cq zY_L40vPo0?N8L&}hP{0ZY7Bq=7baW->YgD5w|$U5eVgKX2Hd7D0BDb^5RA3 z&T19@7Ko%FGfR&B{KmbBjmtwyJzf2q0j^4B9|kNuWBJi;>)=?MoWa5?$2ADs{KPu0 zChG8n{sz<*=wGmK{dCjImBg1r$~FYyXII-VpW4wgNV-gYT^*4$z0A+s^`fxAjQOfuIQY{>U?P+MCuuxqBf(rl z8QY-peT&g92ENzHoK^9_>5Zo$L$a_*{;}iSSVp}B&L6L<=XA0^AS1Xj7*NB&EF-58 zkwmML%1zLPy5!eWYr*pI!sM>(S_|uIU`eL6i#K`KAC`lUZgMs^v$igOXrzL}MuEvl z@E4m#0N&H~&a1Zb^!$jE@{wsFUVTHS{>K8XshzvdzO~RL|To&wt1?G*wHFB9HJ=ssKZ@sW^tFnVXlGAwtBgR2JV=VBiEi{ z6uc)py&@2DQ0(2(C~>XV|M&2+vNIk z2WIxJWu4%?PcPQL4TtMAmcZ%y%gNsfy#P_brPw@dK4j$a+;@Vf?tm?4mSbKjPTFjJ zDWFmavsH#{>VjL|*4gZtaI*F6O%L3k(9S@{&76xA@2MEF=s0a8Pqn_o?aKYhm@ls{ z1IrYaxu`?h455 z`rF`aR{*2Xfdi*FM!g<+WO-KBR|K}@ju10#y};MDhr2SwTZ%VGkmHncJbK@8OXjI0#` z#C}YQQ_Uz-6}bs6C|MD?3LtQ^WAgGB{BZn3%B3CW{Tt4os0hM9Cr1=oDXIkQ#BWMf zt2K}&prr+{P<4L}f`C}~Q@)pcN4svp^J;m)RSdvj>_Eo=9JHcseuvj;-18^2^k=mR z1|si2YG>-6lOG-FDJX<87@N1vWomd?sKP^%%!;5Ou~w)sC<<3r>>Rb?jkvsWIcByK zL6*8_@&z=3TM5A{=#S{Xsa52ytu7N<2h`+9{A6FuspK&0qh$%uN%-1u2E94K5^(0K z16x0~d7yxS2qw>cDuq`bIaBQ||smfTQ?b^b~G51j?MMimLfsty28i%}-V5 zFj_6rWKqCX_5!GS7t20I2IXljZe zQsAQMx@J~3=2uUq72@YBmc42oj#aHQ+mfmalEWS;*CN*@i5;hLjr`@TSNzrcyEYbo zS_z7%z(^&=`FzSEZesh<&O}AHAi4o(NeKL1X)`cuka~7m>V6XZJe0PC-%8SUGzkv7 z9-vST{*c7yWL}mv!QZCv714)DU6@qyCBX*M3Gwf&k!vqP;Hf!F=A~(AP2|(^Kx_0+ z>xEWe0NT7lSbv<%CLS0s@tMr5|KOs#I=5<$0q7ho2d*d46WLJ7`%cEVl&y}2#OVa7 zXO|M%VhOKavoR#jj3wHSRI>K|rkDT`Je=G-NxcQ-1#U&U0tJ^*p6g9n*Ka(aQlZKj zivfTMm{Vj1+4+c27?)`vEj4ZwXk94vZ;R=9q(n2ne^|0+5zHD7HcF-{#sKC3vp`GK zpYzqwg0K&BisL&!m9_p6m}BNANN_~Rro8<18lD$i<^Dbofh3%f{^#Yy`jQxcY3)U+F+RkpV z1Vn!bJHvU(ss}AH1@X4iwRwNce-3=qqjF1(| zVZzHcOe?OG#0AU?(lX#=NJ1g}64FX7Ro0C|mAD;KkJpQ_hVv>ao*zvJ2de+=eGY(V zHCk=V2ZGLPN`LT+0Ioq@3c*;?enwoYHH%0i!2Wfu0R;9yB)-+ct}KjCjsUcLG5Ag` zQ|l|$;GUiQnUEagE0e178p(~JlU=*MhBOL58+sv;l-ekncu;s{^M5yAn48^P?D#92 z3(1uCR=g@NP|y6i9;(Q0RPX_a(-U&V?tf0+)Kp3%4jrQAP-^|15=fi@{6uTkpAc7tof>ujtTZ6l`p-?G_E$)zG*y@a z&k$#~a8te$f-`$_vsh`0Np1d4?#w}gz))%cMBnE9TA(&c#rp{;*)K-cxcJjhCnfg#IwZtT{+NdlqxVvmbNE#o3g( zcp`1zPl@nbiB)?R;%{5{`fL~O(;*N@X}`>A!?GWj#8~C^%*TtKn72wx&Y6dF@4g;T z2n;0yAR_!kZRA%;=Gv9!DHCG8R4nE71fB6?z9Po){Ev16Zz5q~SI!Bsl6j60Di{L& zcdZ2oSYosXw~V3$a91Xbl7uv<@h73ulHOqqO0o|?*ddm@)@%F>Aw>m%Q!y&{ir#Dy zPDc_QbwUy$(iRgbAnw4foz45+!8K$E)Tz$M_mXEJ9ps1ROEejsSnD4EnVw-I*;BYbkJ=o0eUV0;Es)e_dZR@?D z7DDu6{s9X};Ea-|oR>oo`QOJyq(BI01zc+n0%%Uc>U-y=zeg|uZo!I^Qt8G%w~{Jpk-pV zhnpd?INgD)iQbUL-8#da7!x5DQ3jzSiFdNmA2xqV39l%g|B=ETNr$|mB0kIyudI;x zw{z=2A|<9Dd%slwvQHCc34?=&Kl600a}aFP6M|y;!P#PHu!N=5O0h!`~4Gpyri}2`{-MWFS$}WMw4rDxF$g(GM zYt1|JgT?QkOQloms!2m0e70{d+Y=LXGH6=>06+jqL_t)rvUXB~rdy`w1Lh>1v+r$P zx$$>x<^SK`VMiE%Qqf#Xrp-@eot&0;`8#@Ul@`$)V-9UUh@{f6$omDo`R$c zBFYfRYjVQ7y;87t^z$=vn$x|IsJ|xD07!mJ+MY&|wCztsAC*5&et%Lff(;S;4S9%P zTi%j`ALWp%mN9?{(Z{A+2oGNIL_aTc1vL*K{;#MCB24IvoYDyI!_kkdl#cK>4c?Yd zgh7SbvO<7fzkV`nO#vha;uwtNKvaJ9-cNY7b(vv*CUfjZs!qGrGBA#C!JwLepPzC^ z7=SA`Y$c-4NY|JhN&GAAv7?muetwMZa|a**QpL*}35!Z;q_gEc2*I`PKmd57xD-v6 zjaw3z)alZmn~{P-`$X)LD@CdGb#K<71)|ESVVpgc^*FGbC#2pP&tiTsvZZ4xkZjWA z6AoeLx}vBpdejNvkS~nd0`<-11SA0#WgAYQ)T%k~iu5RplldjD^R`AC)u*glSO5JxKel z(OB6jmI>2SN10Ys->eG>f2_}d@W?YY1sZj@?{`oTec*hEPXNw`^d$jX1u6qZ&t}fk zdEt)bnv%{(qNmE?hC=&xG!~oCXV2e&bJ}U#1A=eW{d#5ISq%m|za{$Ghx#fQOE{o* zZse|M8v+n~Gy!Oeh_v7{hh6h_7ryP$xC{FMfuUvqgk5s7aR;g*<9%)}Eq`FHn)^oq zw7gFjleNVAwRr7}_=|<3^wrC2$E9+;ypXl(;rWS#3WK0>b(psoYk-3woG6(C&nrp* z`}vMivYpa85Kn(iRrB!3ChOlldHO@hTWt{gdrHcKkH=l<(wCNH77$YZug;BH{{t%M zQ93tZ{wP8uTfVNjv4Us6O7}i4=?Kak6KaUspFZ1F45 zXhe(v66lzi0JflYplo%*?)$)|&V>(B-&e7T0r0TT$kQ5?A2z@s?thSG(8MI8 zdI}7H#)ia;1Wd!>Eh*JE48TKq*NZ2xtzDiBEj?j*w3tV}fS~f*+3AWUu=Fo_Zto+d zfXy?2Xn#I^oI zfc-cwEfks|(k%1Y?mw2Nbsn_;6LKSH83M!20JIE_;kp2IUPjU&&Q&?ZRHJ$&OwBs} z@uO>fg8<-{1Ge?UAmuIZk%&v>lmSFgd?2}e7T3skQQN=|c12j15RVC_np4Q}avZW00#{#8{_ zX?n^nDVcwfN}iekJ1ypG7h*D1QE@T`{`Siq%K(!5VeVnl zItGSF3rE5`lKL?@;ONI^u7w9^r9u2h#91KOe`Pvlz9LV5G#vBF;ZN@QwCob3oemg< zR(f{lb`t{N0QTUB^rT;zNy%R$W_?F~b$iq1d+6r9JJq);1itOXV&9RQ%bk=W(bVr} zo$C9besqsTWqtWD=EoL-koWLuI_T`^f9Dv0=&w=xBh=o+6CCwFQa@4|uY;7GnfYIW^V=n>inmn6*@vQEO+ttRSyHVKE}q{h3{98iHHd`$^Ft>Ax!CZ$GyE zhw(8N#*0%K38r8GChRMYfPt;Q?HjlYy9NQ)%{ppo@zC6qr4_7LhZRQF|o;B*LFjt>Li`%*~yum9-M_ZBL(*q!pV+)}d<0eJq;rM*Sf zIM2(O>#{an*EiZB>($cN;Cfr>Pik0RLC~&67Xnt2mKh)>&(#xSafJ*f6@M?4>`u(u z<_q#3q(G2pc_5{?AgW{kCy9@02|a06<^SYUlW(+#*hUZzV)oH25KekRMSRFGHzzf+ zjWdUV(MdQzx)yB+utI2W#-trYbClEC!GK}~DD7#5qCJCSFgkLI`DRILbw^IKcT4fg zk&^%cwd2D87_+EcO&3pL!Q>|I}PJ8_jH;DYH9kyzKj)od2`{#GuXwXt0^9 z`*#ku|Ci@d=B(navCSf#?Zf~58<@NEGY|;ZWJK$;ptV8`e>xvGmrti{mcKtP%S&74 zfm^EudFt;zi4tgm{@ZIF@06a={&0P(re25z2U6lm*)Pt@WI>k0KRX%hu@U*2z!X6D zNj~N1|1(P+$qz7URAC%Z5p%*v|2dx);rA@V_-MP-vPmeClN|IxDLPkd|Ie${_)n`T ze|2>yvD}F%b({gvUMl#$2X~^U-&X;LUZzN_2NEwW%sQ`8jZwU(R8w*6CYzrei7RO< z=^oDo?pRG<+TKgGqW`Y~sC+tVk$)bfhx?uZ|RF@BdB%{=Hs%nx3&UlNkJ7c$i7L5=#9RDGO;o zBmNEkG>3Wy0kMUklDbtDP;|_wP|n`6&X5^yGwgXX|t=~JP zqZ$CXuJB{=M>$El5rHp(aSoylXv#0A*GerZU!{Y;HVl z>G~&|dVe7HeAm1iHy2h%^<0eW49Q~AZXxnTDzx#~;)#=5VGy`$g9QP*mihx?#zrf% zS`enEjVLXxEvQ4%p6#+E^9sKy^iQ6Cm<4DDZs_2rhJV9*vu!XvV2a3z_>V~X$59N2 zQi}MD>od6}TpJDuu##wT&>s4FrMe_$#EKk6YBB>v+Y%rTt)MC&W4&V%Ba<=*Y?>RB zTg^Iq*I7l+7XMNHf8VQ(3QocQIu{JePn~7}B0q0!VoY7HXERoEd7w@5zaEx6BDuId zpNb{^TWzd4Ayh&?BHMU)?V&%UT)i-rvPoEQ$Z~BsAb^xTF5du(=e%-KIsc0ZbAKyg zmX+*hS2_G|D{lbKf-nJ8b!&$SU=IDrrmtPNg&) z8*ASzSW^5tvPl)LQo8Cn0UF5ajJ_*8Cs(d|*h0>b5V46aGu#(g{&l z92z6t;NDTk5`%qS-sF?A(c;XbuUs2&2tbe_{ABVY2o>oYsqEfHRSbX%`N(^FbEjs? zC6#p5T&sSYMgiR?u7`uz@bPrgd~I&TO8YO*Dyv^c`%4=vtwjHf$L*^D-&6+-AN)b3 zP*%54j++xQ1#HUXw4;0xMao4ELQ^n}9*rMsldcJe|58J~b(CR?M&zY{dq;cz+v*oh zaPBDmMd{k($N)gcGx{S&z#p&F4%cD&RWI%(d?lK&CV(id+Y4g>Arp3mx}U6TDG`38 zH0SyM0;mS}3$1a0nu9dLd?H>Oa}R}kU=CPwPbvUq_if7#VEfk}2myO3E2aaD5+S74 z&aaKOm$zte;pj$Q7Xo2v=7iRq^_L>R?gelR$TQ+{5CasEwo(+XF?D_r!UHaKpEZI>ez%c-=E=WYr z2VtL5snwa$q{6f0tR;J`RsV-ib417OhSeSfo-zP9d@ARDIc8p(O`C~a%{#8i zTxUTW9Qv2%c{B?6;XHpiQ_r)0erFPGEgO03KE+utjstIyovT_?3i|H{_`GpZ~ONet*AU z?r)UMrku7Q;_bAWE-b510O5z@>6E;{#lb854?^JD@qqwHkF1X=IeoCtCWT{gFs8&b z)gvR{Y$^@!Yj)&VF@5eY~ytp6~gbJ#(w(dmsPq(H{?V zR6pu?x<=SOx&BFM+w5E~7OTJ0R=KYQh$7RN5|{XO$xp8ekSC4eJhLW|lBMgSP~Bx- zB}#RTNjmD{alPPb5jCkA++H91B22Lso6j=fX2Db=z@5klf_T#mJjc40U#h$Os zfLn^8gn)L`aiQ_FnmvOp?vDp5Pk%0zN}ZL<<#^oq)55EJ9-qzj)t~cF(Tx(Htpf=S zM!dD6c06ADmel?UagB@Ym_casa{POFHlrBrn4JGq zl9eY@Z7@bZ2oR3;7|8)}cfD#owb!HpC{@M6g77+km^VfIf3vh{Gyk0$1rAAD>?bc( zFg_-(9S;Z~RLoDrUni{E{0vK^`XcMw1w0{~Q{DiS_Br+4S5tTssGla#C{p44D>p4u$uacYIK zZ`KOFS=Uik1|X#WPBrYjD?4RLc{QaRu31fk?R&5ukxqf**!F|Ue^jBMpvJshqpAA1 ztMt>N|3ls4)kNMTd?o)O7fkOsUTIybg7r$1($-NE)1lWrB zV@WG#?Q>xXR@8N_9S;bE>Q@{9cNMa-D*{bsz-AiTBs?0OPmf?DObn!i<+a)SH0s;^ zK6nry6x1f4Qivd$fdOjiy@;=~GD~CL9jL)~3(igh&@ISzv0rR*c@^y@>)R>i>Ry=K zi`9lJaBb)z0P)`v!6)4BPpa1T`JG*}qtB@98?}Q7|6y!H_wQn@2J?Pzrc@ zgfLSAd#D-U_FN;F55w<)Uy&!g6F1*HowZRXS0C(}hnpmx9)#vEs=B7C)0S5<*~CXG z?(-2oYJ)KHZjRA00NsRcm-b(IF<0AK=eG+dV-lH3byT)2w}XaUeea%#xvrR8Hp3Fu^^W^$_oq(Jt)yzKa| zYu~TSr}bB+kcf12SFg5B_ZfgJS;`Z)Bq#h*n@G+SwMFGN9taQnUM)ha_dyLek&UZr z7xDfI%aKaJNn@=-ad6Mq17)55wq^g{-mCssmym0!Fo$IK<25JS5eNM|*i!MJ27UuD z0hk$RTLn*{fi>$HPR$7zPuGqQ1VR&lV$&_|85bUcK`e+H9e{`c{1`A?oKL6Y-+piL z&6Njb^J|5yKgq6#=l@*VTU2T7ym0De+5d-{QRq4YAT`UjY%V{~X0v~E{J0PVYy`POZ|FxBS^}^_Z){c zYDG*asSkVS{8Um$%G&ck`C`qM5I*cWzK4aszge=(L#389w;pE{8DPw;YNDOM5yiD* z0|81v&&do#=;x_yZSI{)j%%>x`s!a!16aL1>0=q;GNp&`1T#J#9+Z z2ATtVf%`L55MUpHbJ%kT9x#-}fxf?+7dpmMx^W1TT6oz5qe6Ch@8YaiZNE}DwyRaH47U=Ew&N{<)VnwYJ3 zO_r6P#7*)}VRv`^%8gIAo4Dvmr#J@SNZ{+u#XN8jdDLwb<3RG~{eZ>c_6Ar(dKVH@ zp#j*H+Mc5z$sapxy~e%?oI_FDN^>Nl4dEuOE8g7H4;a<^vZ`O;+=n$Et$w{;LGC-M z3-9R_GX!HIHL#4O!>%pW1YHXuz}`Zu#C`%N;S8E8lN?A{Y+wKeqWRbZ#`p39*b(zt z55O~KfgVn!=-0fZ|8VX9yxpsNItHLukZ$)rJZWmE+;G4FiH}MBFK5c&2lsdAApr3w z`OdDK|JJ1eAU}U-ldn5@_KbB9r9c#ET!YfC`pPyW_q?k7fv(pJ75PT#O^Cb0*WlDA ze^C&SArN&hjL4=udSpdV|ACLelM~&Wp@#rM33~})g!};PjSW?ODT^@;?B`JjfhQiu z|9U2soC-%OjzIQ*Ux}}e2Yz)7K)CV|ePtVR=ovGoWW#-RE@ipyF{R@?9$Pe`3)7=v zLHt!x+j>~vT`QS?S}KU>Qu(j32Ob1|ZY*VMxz9^odwMJg9YuW`m)M@w2KwfF)`k+I zy8Uv!WIg_w+9CAcR5YH=a@iDXo>|&Rm}?IzGAjhy1s93ZqfHTk+aP~Xd<>zas;Ge1 z=10Ut*32K5cg^*O)d5Kp?9WP)`q1Z$n~Blxtmh zL~0frV&UjmouvjisQRtPt)#dSNpU3qamcm~(1z$I2r(+vva;`>a^EVEk~b*VTW^}| z8w7frC~D7agcGTa8rjnxA;IOOQQ{-}3S?drVYiV5hjfmV2e!oeKpg@Y5i|g@1Xu}> z`#R3_5a12j*Q_;r2U7rLf|cLJWTg@yQ>nMdpR!+~+aUHnmHwVKJ7BwupME#Q?|RAr zY;0FHCdLvU37`F=KzvSn<88s#B?YRp!7?KHbE8RXXQSxPgetMYd1H5VZTKNjQl{01 zTS3Slsd&lA2P04~dCwkww}|-6M8Xz}!ugK~66+JN2R6>jYA<2^M^kWNx@rmf-Swgk zF{}q&J;QQFte6{*b`^<`R7FbJ0Mt)Pbinzh)Pvmc{bq9mjLIKCTFLtxiZDqfq_L!K zgiKYDW&nn7En1yXPZ@xZZ+^N?4ra5Ph?9v)oZ@#hV8;N|SvBlyr}Jr>^$#RHAzND@ z#nrjU{WLTXAP@ijO-b6S_P4yHvcCnX->r?c{uGg6U=mL&QsnGp()s`-A@(76px-%X zMBhpHSC0Rrd1*$fIr$ho-mNsj0Qh16$}%5RBt$7Bv79w=I#U1Gh4xvBdwR1jdb zF-ayR(FOZ0s8bOu@ymg`XQ<|Xw8`+4dE0spd7QQ|lOL_W)~@a~1JK&&j_KJvHaKv3 zdZE#2?6nW}{Kr{-<-=JJK4} z%NN038_#&!XMq}bER(RN0GN#u6@U&wN!CG8_ah_W&DFA5l@r)}K5qRERC&#@0mHvd z?F`W>v4<$HGb2sX#aZoJ?S-v-6{{6ECSYiHniMDcSZu&e;r3fQyJo%Jj@r4-F#zpA zt=rd#$n#Jk{S&i&N_JdqxFB>MG}ne70-@vImQ?aLS4v9MBip|yGU0%Y+J3VUS^vhx z0MHKGIxv-aF#tBbbW(xPdcU%xJOScD#D5&v!cj0BN)g|9T(V}WY%;A9)j@VRw^I)U z-B5l>L9&E6#Ti8eD7xMztGd3jYaYvI#kIN+*iiDVkBY_WFUkDZi{wB1N8=cP{m_WW zdk`lc+emUF(y8KPgQqR3S=WXi0uWVfr`Y-nI(S9Z4pUH%{@d-LKS@z^Qbms`-+xp7 zBDzNS+a%@1&0b^AaV=i{+luoKiT(4C55LDDaYtGJd=AJBfGL2nU{*Mc>3F^`_j&Lk zfY1V-BBG?YXSM{7$rT{jyMsT)F3iC5^>6FSjlb)uO3R@Hwt(T@3f*ThVXMc>y5R2JpLef9l zfM%LaEF}UC(|Jbw4i$coBDgF+gpkx9+!G;+kGJKVSdN*;lK$6rl_FOCOzbfr2VdpB z9X1HC(}+;QR0-RN`AOTtY%6iqmUR{#wiyrpd>jK1!8JrBJEI~SU!Bb;JPWVdAd8zb zszz|Jx-cZv{Oj`izq=kp0PHB7s)QJ&b|@*hk|$O4*{Qfe{ZID4W}3ZrPkN(N z@RY($zg#aX(a)|?Bx2o$#Q$isY_>%7ABh2Yyq%N=LdAkKj$~kcsC&KpaX26V^9kbz z(@Cu^%rTc2w zAQ0d%nJcw##-xeLrCc@bL4x`%B*#fKt(1 zlfTeUv?0zZX!Lze^Ky5m8vWiu*kh?=;P@vs?D=WRw#IGvU-U=sMtD>RU`yYULmeT1 ze|lUnpWoS$H@-^#*30^{cT;lyzcQDR+BIQjl`?uo2yQ++U%9Cyu z#?#6^SToCt`Da}98c_pFiUuGg5c9*h7?EUL<^&M~_O@%o4*^!01Ht8MvniDemNX;n z)$&GJa-PgsVgXThPLO!J}^sz?B=n!0F+$A1r?Vp6re5jx`Ltw2`<# z+#{0aDEh-I(w%F=4*?KkSx(br|J#!EPkG*5GyqLZ9m#z%OV&Tx{$+zrK>XVvx`7%u zK;vX~Bl3iCJ?5(h4HYjM3lbJl$s7kpzVqc=XrJ#U`e<+q_w#^2Kxj<9070o}OhJ5R zJ2s=u8jvX-ZJ45YD!=vFUtjy1TSx2YXjk`*0XW*Qhx2M2u~6e?MfhO^xX|O-3);Mf(wi(`w?>zZCf1fVWuQ|g8S1`2sj3yr{J!Cvr&0X|Jmt` z<(wBLg8ctB`(ORb+^+);fsmkXsIbm&zbx3QYImhFB@~d~!ru>?W>yX(9OQ|3X^OPJ z%EK;%i!VUCjhJrQxHJlv7c%ndugU~aQaWN3xIqhF_dePeGzGg7{fkOpSXFJ|J8N?5 z(d&sZqk0xWUW@(byZ6Hj0T@%XN0@%jPiJJtirF&O*BF~lOYBO^TK!jEV zP8siOoAgJX|2WdLqsEQUl-y8SfIwN&@?#O(RUOv(Pm=UOgm!dt$h!G$yT*+(Ae9R@70#H?9)3-GBXd^`0tC=XQS+5B z=1GMF!dNUW(5}i4DglA^=V~sV4yPre*`aQwEGXUo5?6SGE{XwPXsfLvQx(tRN3Mlja228K+ z58)?$CU*Zbikm$*8KiE-<_dG*+HgYv&v~2dM-nq0^fw^!t!xSYlr#y% zh?CQz$hFd!kHgb zOzbl$MT(a8HFsgx&gwe@aK&7ynycnMGgO7Z^P+jjEO~TLM8j5Z6K9+hM_`?}oS4+L zO&_I>DX4?f9FjG8_lwd7kXWbLW*w}AO>ovbg4zGj+rgmXQ^0-55}1;MVxXC-KSoW- zngL>}3(tJe7$Ef>Gm-C5xE_}ukYS!UsEYH$7m&{|z81yR-$ug*Cj5Nw|MeIB_59xt z)V?zS`v-Fz--BQ$rCud&npZx%c03>u5PxO;lgb}QJ6<7_jT)Mvv^ENRg%B%DMp+@( zZuyJASmfoPTC-Y-Ak$k}c>)OWJ5)=tNAQ^sc?2G9Rjj#UB$+&tgJL7txZfiN0qvd8 zCxvpslgd*;c<81aUe~1%uQ-mduM6fFfWCqsMDCeaW-{{LRstX)^Q>~Gb_56(1=YQ?~u31u4@V1a*Gtxidq9>%9C$wr_38PL+y^gJ&TI}6u_@i7@QWvxo zw`8XHXH_TkJ^U1^rl9d3)Zk+(DkV%<pT+>oAdUBX31P#IchK-6D66e;P$ zkl?W4;vCaz=C;1=9Cza*K>z|7mi*n8{U3=4&-q6A(jl5iFqBxEL93nWyNHG8vQ{76L#P5Y4R0hQ?XrIhWbm)>?)8EyAE5_?Tr&{eG zb8M_OM8@I7{w_?9?vCnQiT-T0G*agx`PDm0760S|dvC`)R5iUY9Pl6rfIN}JdByJk`b#tu2(Z>LtY|yg$SW#$oe;v?Rdzu3!!c+|dGCGv+eg=LMP;s$(g1vN{S)FM zm)^bb-OcPsVqJR#L1U(zm^Wl@(+s`tOEAC&F(sW2WZbTa! zHdOzp%_;mdB1B3KfL`T`fccl0!G zx~aXWHy-y}?;#LkI)tAKGb!!qsaBcV@5Uw&+PG1rOFfIn*w_i$=v?bpFO`kydVQNVJn$kb)?pFSnR4lnx zy*{=brTImc0T99y;8Z5qCJ^;=LUZBXi4y!;8VfVP(GPWhN`I1){B5zH!Jk@7a5%On z3%8*P54`_-BP#3|~~rai;Pw|hs@H)?knMpW(Zw?yG1cq1Eza3L+22(t5$B3GUapj@x}B$pGLC_}=+9KHH8Zew|Lozb(4?9WBh@ zGXUD~r}8N?BTwiV<;NaVb~O;W@TqIZ00LCXd$^^Fe>*kVJ*75R_)`g=fI4$$Z{UGX z7B$rV=cnT~0Z>29M%e5VD&9q%02MVcSxIoexw~V-8tq&TnGpQ^wnTsC^f*~D_XUHp zU@|eHq#u1^XzGC8{nSMW2&Y@zr+`p%W+Ej`KtkCf%jUk415qcywcbD=k_-S-|Jlt? zi7+nwaPhk^3a|cm%=n3C`5Y$prhVJW3~h56<W$K`6VRImiyJ7ZiMO}{v+BiI89MWEhWr}83_d1&xXPuEdUH`R?>S`;Q>Wq69_uT zK@(%HcE0*k(m5Mq22#4FR82fFgYhZfbIBKK4_R?C(PzHT;@906GuUgf7mz?{$UOdYU~+^AO~T0)ePmNHz2Ohr4F$f%XsD0N1((0mlGz4YWbW zAafl4rewbzmmN1Pm98uI+hw3fsCx(alyjYfIbYK;$JF)p(QI^g()85c|R7wG5DcA|)~=1I!v!8*IWYqI(l@J@*9y zAwI;1WM)ieu2jv8t_x50Wi-d@rVhqkT5nJvj4d`UI9$}5AoM6oa9skyvF{ceKLnqo zH<$r9njvtw!J$@%_!Ax%{EIL+VN5hU_BP3_Fm+{c>T3eKkQ|7b@c0B2^h&y5n-Jz; zz9GUua6>yP1jsRA@ukI?GI@z1;5lG#4MxL_(Oc8ga9eX(s(Ix|+Wq&#(rEzpL!*Q5 zL55?gq`ah+zEvL5U!9doR~s5Nu4}^&0h0c#DA~`tq}w}6`TM8kLO|q{0=H3vaFQna zLLRg8{>_(4bhKSxhf5~J8!H8i=+_E* zPNov3C{sXCD2t*)y`GJ`U%w!LkIS6)dm*cQ89P<;Sf+qgrOLjyUL25Wf!?-kdmqcc z<)eg&t(1!9TEAe~?0p>r&@7}{JP*WWC4Uk89BN!SNd+lfTkOc4K9CSVvZd}lS^u_# z$ToK?_H#3nVeeozBGVoPfKahFAxt;Zb=S^8jVlelA^}vWXw)}Ez979dYNG!#jRZ_l zUS^IR!7Rh!FjFJY4c+f-7aI;LW_MKfq%rONqV~d|XifFp68(C!Sgii?KStd7FWfwl zf6V0lpUvxDM^N6Kh$gg|Ji%D-pI1+K=TBH!C>%CCYkqJ zRb@$4uC`R}*376e-55Xkq1C_uHXuDr8*T{x1^wuspa(znFA)4PeklX?Q>zDP^k^^! zEc8gNYL`n@D%CA7m3Q9nK{8^#ZylaYCXx`_o&`vh~gMQOZpE*l1?lB^{)0}QHU$6`ai+X{Nj-!m(@oEG9Eh1S!(at{7l1+bMO28wH>7e&aG%HlXGkynRz?&feV9POX1{J!PCS| z?H~Z$2WU|pPyo|*Pw5|B^vc{jy|=Ai`{Q?iFuS_B^~ncc{+C`Bz)|L?P|Cf56X-wm zRdd(4&vo9Hx)uZW3{+p}ZFal5K2b<6jrB*6(`g~(dxp`mw=Uvs(tY%aZxRWX`hD^L z;tSrpz=c^JVj#hUCsGM7Bcy!Lg-y*r{MvAdujk6iz4>x5HHDmd&B8htDHCnl-xaos zz5?#(GLovChUQ$E>Ec$wTD&fg3?${P99tjnBunLO=5_DPzCNNAtR=sBQjTC*ue^W& zoifdn2S7G`@FT1gY+~e@v`!D*7^{pk1RXax;>bp7&kb&imf?@zCdO z(4_cnJNc2O{Wz&tz=6dp$2!OpEIoh#nIxY*?qfHd6nkDm_N01P6+u!5t>@ z8+g}G`1`{ezc}%)y50hi-{~K(|ATd>o%35ejz!C!rrk^UrGW|v-+pX}lZ!&>-BLAP`!?f(LpVe#P zh`18kKLr331PmNKElh##N3g5qimM2BQN9@FXYGSV#*kS6ED3vilJdYIHFcN4&uLWK zm)Tp*a0LNilQ4$`+&o$aw%FfcYp(#nz=sE_$uQT*?5&kBkS7?Fc||^g@pFMU_1tR# z0U;4CFkoWpEGQHLQV3!7m(j^V5eop_yG;jfW68WL?>f28H1_1K7XTU?Mt5B08g`ps zgIV-nX~I>53t#9tT;RdImw91GX)ZjPq2}*wEC>Fm zi$4_hYPnOyFsw0tH$0lRgctCs)c{>$VW2#&p5;wkC>+`iEH``-un=E2kWS&}$uei> z)?tz0JbDTAo{aU#%S?;fd8(?uG)%vY?xUilIc;l$K;;#6d z)p}4^IM1cRRiI1@)dgm$X)pjm+PV=`2dslf8SGE^Tx1=*|L@=XH#>bh$%EBTNAE{b zyA$rz{>S$Ofc+3T!rw~slMuaW3ja*Psjg2BP$RuR7V=3Dup6Ri8|s6W0jW(NcXJGjjP?0yC78sM<#IB+x)lWLFrsBIv%^iI{@LZU=#S=atsU@z zQ`JJF1@=P-Fz;M0Bp2EDqI!Rao&R=y=QT#7^=EFa457~@_viDN$k{b>BuVZ}r;M`3 zPuu!c#=b4z{Ag)AEI6M$-vE!d5n$QLabX)qp9-?_dhs=L@i@jv#xZhZE&n76=2`} ztvZwItNsNO_zctKPY3SY`)NW&+ivT3*9rh^2Rz7jz{P5=GLo)=?p_A=&c&@ps_9b` zmJ7L&Na$`uilg)=2p}(>%TPx(qE$4xl?^m~G=3}LEyMhk=I1bvt1WL)5iTR3`=6Zy zsC|9-1EC6KjxB1>mn%ezQ#ZV?N*#KRH6(-`OwLB#$2`C_Q5sX?d0c2C;pf<*4{C;M z2s`HgYZk88)Rx5^i$uvd)&9k!6R~2{>uiJHJ711vi#^e-XHY8FB_L8S`+ z0D?e$zq_CeaMXkS$-*p$JD_@*A(S9Z6XnWiS59JA3IIP!euOHwd|4$rsr21JzQ$YD z#cg+LMc5)pswOvz`Tr!e(36Ie#51%b&Wo5wH(-YP!JlC^4WB8Jd@#pw1*AmuHI-@4 zAo_5r42}#X{VCz|#r8H!yKZ!J#b-VINe$>Ly6eXo0Biz66M?IFDEqRgJm zjyy2PGpWVmAm>!GXKS-CbMHVeHZgavaO0=cO(+7V5FGif0hMX20Jr4t4h{chf4l6R z>%ck~DhOKl6qcPM1<77JIt02>0Ki-_oaCqe7<~V8)Vvv%+)RkaM;~uT?%<;IP>8>b z8T$7xVV+h9Kzlha3)bc6nq1n|meiU)dbB|V{8li-*dmq-GGeY$x5G;IceR$X%>VlD zy^#zKmy(Hz@nn2*EXhkyi3*f?F3!&-4;Pk_oz0D;^6-9AU0aQ9bKk!_7z}lKxDZT; z$8@a8{5PT4R#-) z$>Jw!$ohlDBl~+6nwcBOYGJw^C96TksX5zm+YUnq;c3}qe;_W&e1&fxX7PSZ!R-`` zd{)dK&(P|ib;|0ET5&8;q%}a?ZnTBl#<``FdG@yN=NK9tN`@5xO2uRh93CDSesR04 zEn*C|%E@+rKMW1aM}8p5VKZLAwYAJ{cqIg)$NLuG^0E)lGv3HRctL0szlTx$kC#&u|HGwn@(qE0YEn{OJ{38g3SKR+xV_KTg#owE+xn zTUg5_Up~XyN7#K*VE==MEc(lV=OF$vT>D^~qLbzg!;8Lua&pAs~Y&?G%I> z9piCdr?}`?i<~bJT>b57m5EC7$YN=Yc*l|L>RZUodS?s_(A;Ed_$_ z?!WG&{_Y6?ZGgNI>eX?idkFYNn9KxaEY;-B`T~_SN+3=3y1&T6MCl6SuQZZd|(KYimenp7c?@$V0etD>FBXt%evlDP*@ zlFhZX!b@L@fn|_V!M!|JyeJkeA?>!oB&M6vy zv4k53SJk)v`FqKgH*O{u5M(c0o(>!yC=6i1dMSkl?&1azk2i1u%!0dX2m-(T^*<$_ zeaPPkv`e@{N_|+lsslk|j0?yUV%C04Zg`}}cA@J;tA}&0@KdQ2!-aI0ywU}*{?ihW zG5&7v`@th>?o&~Zd31n#QTVZn=%+MT~kqNA3zRcK&C)c`V-S2`o z@M6@qDUOa^Hvs?Zj@##}{hR-7(4Tmtb>ir~s@v^jy+AO_?=;;zMwypm&H*F>-VYTH z1Gapv5Mc1=023EDwF}#$Okdoa7hMur?1mXCfxDcY!)DC>PMY)Poc1$7@z9tR1zGV} z93C7Rir~KjLhgPcHt(LuyPeY2<=Y~^$z$=VAWUwquO`S$xB}=e>&meY07v-Z`Dl)l zL-$dn0)r2}SD2T+C`1MLx)&Nnx`-v9G7ip-Tizbu&dvuP|9q7$pJ>8WJy$+P6y z64n6vqbROfS85VWh`*i3RvW{NEo`k{|LX^9$zx2&rieLy2~(|R#XkeRu2J&$+cU*v ziXcFv=0kw2^}|#6U91)QEl$i)55kl{<7E~xvnxE+YOW*kP3CN2tqH6NTAd{Rt*)_b z-j<0k`1ONzaCj@ZH@6i5t5zeF`PZAjy?qw%pPTu~5f0kVT)*H!NzyEG> z;mS-hGCqduYA`f^1NrthHF@#dL+gOC0$RNModUt^ljj6c?r;@C6N0ueH3k`Lc~lxW z3J&k@OSk@=j#9=itjU&G26uMXn1X$s<<>RB0%!aB2m0PE_Vr)AJ#_u+PZz$nMAnJ5 zu41lO2d|UO_2=O9O=c8zF8n7a2ArA?jlz6tS^)>J zQeC}$J^B6r^?Sj*FW(03WCjRh>th%Q2%m^}9^}WEFX>KG5?j+V+_oQ#fnGmkC>pl@ zer&k^zjEh=w%xsj8+DZcP~FMl;H!=?j;^nvETPtPYXrB4RlU;uZNE9YElqh#%iT&e zjqbN#i>6c=VzW&GRw6B`I6~TobfMpv70wHnET4|0{Kfgha3IRrH`*$q}_@p>^fuAKKytwO`{b*vC zt#tQff3pi>Yv5QQ3sfOsQg@kVU8&*W^jmm}S9xkZC=6Jb$8wRA>=~azv$>)9D`Pg& z%#<|hUk0KZOXlB<=AS)T_tPMFJA~O@hCe(?)(YnTfq`O}{j=*DIS0f zZiJC!yS$ZDaA&PhDX|Y&*k|`l80g2gEDrC&xRGa^)@xJ-bSG&gGizpSkh09=m+}!i zFrKh2y&<5+Djx|PV+h+7B$^X$T_pgVFf5vSyvR68u%CI`MsZ8bOCY9%{VWr6vhK){ zmiSbYhl|@p{Ffu@T_WB}Gid4xk^JKX|6RIqHMIT#BB;aX-y$G$PzmoLJHGnHbp(RG zWM^?P`R0qyl73RI2Kok>I|K0205yydO*MzA2ybAT0cuYu7So6s(%}?f780=`TohBo zZW*1L{hR8*DZN;;z2jAz-y%(O469FtKtSue*UITm;hnIb04^u+&0t#t3rz_N3sHmc zPwWd?=@lS^-2N9!MnfYtHoi4bBf{?k$eQ50gu;W4se zZsSf^o}EbMzIu?%&XX~K;Qeq(YXzmS$%>#{075}aPdZgUiXj3UvjQypt?+2T=9Aq5 zyMEsy89G&PXlG}80ziw*0jQW@)oXy34%JD>qc-Tja$J=#RH$>e)trZ z)e1Q5R>e_sfakQ@bT?i9_U&Z+!bGCkf8yeWNcjtGzQvm=0DDZZCfD3UV&Bh2F| zf<)W8dg20rk^jT*t1B=H3sTE#8vmF(Esm! zRjmgRiqVX6<&(@9pIM_+#@co zB|-KmeLSh(Dh8NMA|)Pz1`p{&BY13+xpf_$l2&B1pzDA7ogeJXttHQX_W94KbaDLc z#07xly=OB?uJjlE`Re<*YHpn6*wtl!C192}mFZtY3Vrudx_g&?sk7Q-{wpv!eYX$M zo_=+2HF9y65derYj2XFX^% z8^7lmD-zE*?z8V+Ek!2D68UY`+OO5b!DU=7>lAldwM6f;BIf&C1j98_9LQ9{tXR_n zBK{xF;<`c*(6VsU76E&=ZYGmx{$o>95y^klduG2MAO-dU!GI;=07fRpLa=aytuLOg zCHG~11Hw*C6NK9cSKrj(!R*Czpr#-1sQ2xbApaHlc@%49SR@Q%hF z(4|+&IC0g6@82zMjd<4&e$=L>BjZ=gF$y}JFDEq6>1B+RJMCgCzuBw|Le09Xe=+&NDzn_qOD4n)?FjRoh^5f2wo~>J9Fkm~Hf~5hhBV?gi#j>3yooq9#3IVOI z57EW3ED^hqLuXWQGA>8PUp;YZ-VSIsc8?5a+<0vQVW5vf38$)vR;uiNpQ*f(-|0WG zp!X9)nIGKwL2+s*nSqO3XUZ*?cq*Di+(_m$6)82}1_?jIbXrm&YIcyf3ya? z1X(Zys9Hqu!=T$P0CLZ`x6knY{8#C7Ope1b$p~^pmTLj92eq2NvDB|eBkm9Hr!`p3 zSE7|=i!}1ATKzcf-#HgDE#u^@hFB`3TSAv%boJy)zqg))bFj_axm~dGo6~z(Q`6T~ zBjaym-VVFZfuHl>Py(3fOu#wtx>i2q-rh3~xxbc1nLRMD#r_k(!rb^B1R}}|rC*$r zSBxGJ)3e3C33%25Ji#RhwG91{}^{j zy(Z%yBV}ov!{D`>ll{r{iQRhc67+N0jGwuTE5!XX-nry%i2Y%{{fc@D^)z#10e*+M z_nT$oDUxJnlrx0HxPtv4&E{>9Xz_n~xIxVda2N|#J52xC)L8^*kW7G*1+6Su-9~Dl z^-9g(7?yta%IG5k&SMAjmF0tk4nD!eMWNYG_g;$xM@t6QS=AII}avYwPmBFEqG(z zzr^#%iwoW-u@3DRqNN!%cKK^3PxStV2-Dn?-Dk<)bzPXXU?ATDrsOKx{}dav)fCRg-^U6$!JmYr<)+`o_O+kCD0SKI!0MJ0+y5fVL~6%Lr7h-<_xL zTw9~8DPgJRro3j|d!{4~Py3snWkHt~p?Xe5%3)mZGo<70%2qFa;(eWGU04QiYF%ij z_MbgSA;Tl>c=qYh&ocH(McnmBixtV|<58R~jR50F>#z)41+#H zoK4HND3@^&8%UHp%=am1?0J)W|Igm}0l1%Ae6m^n*ioG!r|$PT4Xuh zCqARX2LM9mp9ugn6fJq@B0I!^Gb3&bDnj*Iz(;}TGG(;KsLOzdCMiAYhgoM@{lNrW zuv@cV=Kf@9n~3;wWO7(lvAD+s?scZBYUz8%E(nw7J;lh(B`HL_h@XvZ)*br098R-+0+ z_MHpvsnJiJhwbD!sR7ce?r+se(gA-=b<0Ki`r$}%-Hd?8$JOti0MG!WT}m>4bGL^j z;uND+t81sfx`A#S|0NP1i5moSg_4_E0C*e6Q9HwJQw@(B?nvMu`l-3Ty^Ge;zuwzt z#MZ%k%%ovT#-gE1U>yQ$q;)l{00srVB%k&ui~Ki=T(hgegY}wsFm2R;(F+Ixp3j5T zs2*JQejgSR;9_3)!aDx$An4$j!1Fi&%zh+J+LjT@v;a6AW_Ijv3JJOw2$~N;Kw+R0 z?WF~vy?2lM9$*1L4v3}&U>hO2pATes;&iCQe*Y4M+uNxwx|{3NrAp1DaLX!C>ZVgL zI2|olL>x5>4xA^5ldQPo?Dw$>07m{_D(2qBG&8Q*gVlYkgKZVCB9j4JS^+$t9^`6u z#Od!8pk;g>@XuiU!MKlh+gE-qf!kc)NY>d4T?U*M%6{dN*8kYOxA*t(S$ms*Y0dDE zcp?kL+(xXx?u8Om)6WbZ(|_!_5fgG|+O}r776yy|XffI&SM2S|xqfdpfX&((7l^M> zXo@{1&~CNj)M~~7SPvdAd{}5U+2cVN8+YM+6Z^;K>A5Ygs{=Z-gPUn%bv@ZwS&I_D z_PJg9w`-!jz8S|3maM7^(S^AS7g1WC??>(`tPmto_lk4C|6k08V>JfAbY5Mw#+L{S zEk%v2*B3WBXK#*G0GLYFqv%hL;6Elz$)~G}{$#iy;eKa&kO=NVqyU!4T{;gwHnpY4 zOBI-&)ze-^{!3ii)zr@Co;^=W_lJ^E>Yh(roQ@K{?YO_p+j}q1FUHwt?QIfIYr$9X zo0`urb1#FHQ%C@6q%w>R#w|1Y=t;V5tiFFYKC0K$L~c{0#G*g9F-=|^F$)1Ub!s$| zdLwMl_XNhFH&!7sw27FYsPF5CX?4 z08|S@dBRjI_j@aq?7Le=Rg=)aRDfz5^k>lt7grjod>}l})Q7S;&1oKzoK7qwckJ6( z+f3G$*C3vQLCn*V=SO!y z5eBHXueD0fm(mx_|Easi^I`XhhacfZ3&8r?T2fpsCS_t-)Y|*nfWvl4=DxAM&i$!4 zi7*hzw|5}C_s`T-m*Q(4_?-s&dIXTa_ES+vq1!d#@o-u$ESmA=BpaRdXlM@;Po;B}x65^Rh9m}lzFaT2H~7n@P}4m`7)Zs}rD z1Jjh|`%W~8<;v1hvM@WBEVE+r@%(|Ip%fP+IHi3WM(|h}jL+{Dh)DB~; zj5lNOD*7u{TPy2BR)b8B!}{_9EqLSBx8}HO{#R&x@!erR?tLB^6DaP)N^W4q)F$Y z$p9BIpDl4TVD)YZB%7E|KmG3?CtrQ`MRN1b8*F|0Rs`!=7|5d)r@1MA>&vSVSMZzv z_K(TE&%a7GSBL}PSqC*41@(QBS1p+JlR#3CGVmKcFWq`L@^YqT>>T(<;+{?$MHDj8xkAoCcfFJGi~{}8qN z$JxLKiKd070K0==G5d5j`S09&{@GkId-pNf25|QSnnLU1dNp}YHNbITacT5`i_Z8* z7G$zbk_qtL%cUs!{or{y+~414nI0dD z0ATRnNM&nK^9Izj<5JC0L6z}$Xn1IGS^f#!)LL{{1sR? zSP#qi|2;N0UJqo=Fh&^Y3USG( zege7R#s##fdIZ$#)|OTx5_|T^^W?Yx_;GUoi@V9^|NS?~^0S3xhaB)7?V|@QBmMPL z!v*HMU@?xU@gNa-iq`+Dd+gOoPVpQj0*Py_=5H==kE`PY$z_grFBc;9X!OXfP=-s zBF~^=CRTycNC`nUo%nu>9MmBk3d05znz8cu{=?+%r(Yy@fBQx9#jih2R&X(FE)xfU zOW+_J7RRoP7ZI8)uboRSk#>5jHpi(yxlLUm-dG{<`6~1SWkPGNH6iLO;ijI^$%|bE z{r!Dcin;zvZ;#&o>NoRW9&ElmVkbIo0hmhWib;Or530Goe+T)TM8fI%$p6fYYg*C` zij)>8|HYQP~)!XT5;+Of_b1#Hen|9>-2 zfFfUPHo$=t8pkj{UY!_Ze}TT(1Hk>t9b2z827w^l@Jxm8n*`I#>^_FMB@x_r_&uE3 zB?!E8P};~*u9?!Xn8P=W1%V-FdjcGOht|Id%M${on)kDB9w$}Qs<8_bxB_OiG){oHuom2Y9*h5?tW$VT&MckaRH!H9q-hAl^!*vNqnx27m^Wdw}TLJm5_7S zTqJNrkK zZh+;b<>dL@M-c}QsZ-g-hnyt@5d&cGV6H-W;0orq9A#!HCyH6tT8y6I%=x3g|7CK2`io@h(hSFR#I%e~j^WZ`q;J=+7DX{Bbx_g_N+zZKZ-}PtuPZo&jRjij|5dielFQfIS0WLj! zo_viMKYC$2x$*dI{Ha69IKsgA;6Cf4vlFk;s&fI9~$4T zb)nuo$>g`t;-m?4FdF|hr7X;;d{~QCsBH}4V>IQ@y8pV3tN~y#pPXE(!&mzl$@=2-hz&G+-QU&zdHH*KzMBrDL2Rbp9+ zY~y?B*0to~t*c3CXeizfjgT#{+YDG;(NabIfW_yyz<&Kt5fr<&MDG96M$`dtBcNVu z9s=qKGZY7!VqUf@X(?{AMXKOJW-V&|u4n5P9V1cZFb==BI)e#r27|@~#Ml+wy^Y@4 zK{b8Nju!nf`d=hEm(1Up5+EM7?4179k=M}BWZc}HP^V~p?18?rKombw(L~0EjXvAr z*9(@&k2XTuQmm^80qdMwX_|@4aEoo8p$5tS7pDJuj3cxQcZlTBZ8U&Hpdi_YA+Oh3 zgMe#JL9l=jFh+48h0tZJ0M=I!tO40Wz{^xdFdzltUbL{{9CH1Oi;nmvZf{5Z56|An z;eJ8@H4CK_ru;Icf``Idl|3rHU%dBZ=}~DMvL_0uBAW$W|M&0x@72CSe(A41{5xX-+Sc`s2>@HS zpW*x({5M$PzDU2{ogh=dlJ9 zFyE~Z%|ADU3t-QjgbM(l=@t?G5h2||KNUREC~?xr80sPx08Rg!1fDhGH!h-`Ud3HB z27#HxY+WGuNI|mKT8BUeJ}m*)$m_S3f(3q_EV*T~X?>}+=Ab876|WpwB`?2~FSm_q zzPxSCcLuT=&!QjSAOvdyEYB{5Dbr-5x3GrhlQ7xD8Lox41U>j z&gT<9~$5HAYET&rqS9EQ5c>|TNFoqwJ*!l!5=of!@`wxHd^RIut(XMWH zOaK@g?jNJ`KZKzED^{M&ziR~mwOe_dnw)va7e|f$PXqr>koUD~j$6Nva6y9eI1UTI zK^-k`l#|)}Pg2c>`Pu9L_j;K@8VIPXqBT>gO4d3lkMl$dbGu}Ybg@l{x9>gbHJOwJ zeEXiQH4s0XwLzqdHh?a6@%#(Yj%=9ffr?GUHoZh?o*jh`Fz#U(G=Vb{L6 zpv`aMd65%iTv;G=6W=b)}j$ZXmLeE=`@0yZ}u(L?jA{dd*{+IBO=Ub z?Y(uwcaypAR64y>Q=gzw|I%Db$JX-^iWXXj)VeJ7Yo>@UBk|}QqfEo|+USMC*o=T3 zTr&y-2IuMqSb>lqW&_BWeKY*C(f%}ZozI7O(|AD;NLy!cIq(ssENzWcOuf7)G) zBws{aiPK=PuIp66-{M~BlXi&Gl-x!{i{g2DDjF!}n*kx%|}(3i@5%6XN zLBI?E-$#7H*#%X@;Az)|uFWW=%-J{q*JuG1R|{RQy@Y_!P-oEv;JF8#m$31jN}e>{ z?V_`_s^EX#1*LPX=4OjVpP9s5Q?BMlaHZFPfQv(75(K3OlUHy;aL9xtDry%!oOuO2 zZT~}f8>18J6u7%)#Y^FSu2IwRbEvRr{?}cnY_vB}w(XN~if<^cxNfzQ7`qd&;xj+* z=7zA6Q>h*p7`XYT-}%9HOYFC!zwNaEw4+DIcn^JLQ$quR4A*f1yfrfrNTIK>*HX>P zS_p1Ztrpg6FCieYy-L3SzqmOVRsXCt-m@CLgipKQ=~jsh3ne0al6j z*L{=u{oDQAZoi*Gk!7-rWG_wPf^xm4DX87v^W*PdpO&NkYI1F|5HMO`m(MTfNV%jg zK-ONq%RcFnGr9(w8~sT>U-@2NCHKu_ar-Cq?8Bq>?tBXXK+hB|cWg7bNb|pefO*VB zy_QC@yQ7_ll5(%Tgn$f5En;T0KP*S*u~hU+XmtLa=!d5z$x&;eLJEL;ENHRY+&SNf z8QMP}aD8gM3~Lltfb$KN!|^d&w-+ebr2?q1Bn-P=fJ`%sZ!xl4q`BVB@AS7NxV|j{ zz>nViqw(Ql;oa?|asfcv$y1>(spFRZ%&+;91*W#xZh7mJ2$I!FsssL*sDMQ#`md7rp8ZYoakA3NoU}y%&>T_8SHDjO zZUy3<5@Fo6GKD5Fi4`CsL>T}yrq^~MAX8IP)=d%tR{}>u>m;9&Io=&g+fJGcC%~66 zwEqz^Mu3kkeQn!gdN(=%0$Knh;Krl4W`(`$_B^lI0W1It0hYNf@V*y^ascA&`$$@* z6=1v4hk}C+&FWnw9zfhW9W7TR#r}P1Fy#2>X5Y2i@0JCCsP+HO@(sXtOLGJqp)FB~ ztq!Eo-vt3N7z9E-j49W0zwSKd-=_!z8XfM@t^-(VTrL)g%tVPA+L&Dpo;d|oG>|Q%P!LFAFZ06{LoJ4WOt>2a1QHK;E?Qt7FUCSl0)ug^ zBCgY3Ya9ZSd;=$4#d8&2|5e~dxyKL9@^l9|(> z2g%STAQ+kEpVe>=A9rv4yn-gu6AL1ZAlls`a#oFNfgSQztySJk&ipHPJ)M6TA0j+P z5gOmRSPTtxjDWNr${hcEs=*n=C6T_N33s!aEK?cq{v5mTodfrjIF}ul*tg{OrqVhT zlt}2cCLmzYqh*%rR}>##Wa?z2KlkX-5CB})Y07s>-v+S+ z?~q}#agM;)vH;K!guCgY-AT2McoE`l%Ae8yJ*!#$Nphh?)H`;bjV-F(DMj(wi)~r! z-{MqXA&v|gV1=+~pcVgv+4OMd_;29O%5b@|HO?Bt3wk=5{dD0Bgvm@z z&O$C}klX1P!O8Vfjf34ZBf?mdnWnNLxz`q9Im-fo+N90;2i6Za@?Y0cnvRx!*Lyu# z2|P*;+TyA?=k<@@X$5xp(V+{UPGJY z6UKxoL&jFkt=;-qekBj)LF0dV=LgmD=Oe4l)&po-04Th>dL8T2pX96kmw=C{f2X2Z z;qTH&Ar{WX(KLVCNzaJwa89R*#sVTjkL&(Mc|Y6b1>!Qw^;1*d5&wfY)skTDJh zArCk?(W^s0>a)-xQ>*FR_LcxrG~GnT)3{Hzs|*M1~f$>Phw8 z)xS+XN$xf>=IolL1pvGwSE{+f50RU0Jw-8>guza!6gc2==;Aw@s?42xd;Ff z`Ch9H0YQ`%7%>Iz&8dN8iKy!hDrcQ#0l;8K3n1zD*Ytm7Ol>O-2CNOS-nYjE0fRG9 zf{EDS;gMw{Y5dtJSkwpPR)NB^Hv(4SI>#ugqmJ9XJS|r{$ z?jF7Eo~d&#L`^j}*Z1aNLZ(V3+&0-fb-4yt6PVff#5?1aM$uG9I1I{FYt>wxjkD&B zdSBLr0ANf=YYP`K3Uvqsl}H5?yC87g+NwGB1gPqcugaU7?Pk*gK(h}x$?H?X;2!Pz zaPyJNZWTAHh-D1zB>Q(7pRFb30y~aLd~OgpGrvwP_Hre8bjD1s&W)*$Z(hV*gm2Kj z=(9*`@v7Il3jzxG7e`VdU(3x{0n$cIfc47hE?T88dKed!k^VQ?vD6aCqu`>&f5gWg z9cvFB8e0*DBa)H7bx#aoER`P7+Y?)kzObMKMi@hS!l+k{)ElEVLbt43H z1I&!{B~zHADis7aOjX<4Nm8alAYR;V>~ceWa$F<3!Kb%yy?uwmHCauLh(+mj6Qm|UkbFJPT*0f1T8wBrHaiUKwv5b&P=z19K*gj+<&a@GtvjwZ6T zZM8rwEqt~O0aiHB?0S-9H=EyP`;d)K`7q75unJz}A=*clMPvFz@Ejo#>#UU%jN043;*Fl;iZn`o@){}BV=Lf^aCPw5;U zd$)yIm|0=|HzwQ~x0)<&lpWhtLOryR>RlY zv$Y;Oo;CA8=h?MhB^pXbd1De&`QT2nKv`Ee3v9GyxRc>cc5d|d?YItIl+AqzRL1q> zE`Q&iw$?!(L8yg@1CXBiuh)(afh{dY5UJHN7EQ88)^KNjIa`jp5^fMwIpgI3&a?pF z5*Z*ZNM9L=5+U8KZ?WYs+Fa@AxO1-WAU`w(X$(!vGqP3GW6wQJSUlUYsDk;QkOYvC z$7cKBxJDqlLsYn!B=TRgauf^E0?=z+4FRLLZKE*HH>T4}mETbuXdXh|OcQt1!j)c6 z0(~8Z8ysCCtHwGObY-GQ0t$^2Bw`wQrF!2QK8Q4MJ4GW)CLz+9R8r!8is((GL4)i zI(gRx;P#qoEt=BSF`+3KP%sdIV0Z6;hVS0oh0;S!rqL(oHmD@?&H``70<4QjbE{>d zEwmD;LEKdTV6SyK1a_xLca3fvO}bGg7n8YKUZa^RK?}H-As+>}aWD#0LGPi-IL>VV zh28aV3tyk9t5P4dk^Q>XCKRwS|2r1PVG3%L;cmmT&+{#UCoqZPhYUYv|S$!%>mJFjP20C2Ic5(%UNu)esREPc8ix&PDT&(2Up z!?dCAUMsP{(i9uhYB2syktyw>Lx|z&Un$=<&E;I#! zPBn?+B}$?ixMyzoFan7}$M2p|9@bpm2AhQTo#`S)WeOqG6c7-5d#pDIye_%Sl+9}t zZQ9t*MTysbY6Gle+I_N8j`)jM1nD6UHlFlXf7PpdROaOT{ z*O1_KvU&fs%yYU)iuE280GbWl2Q5PL*NP$W?FodPoVCu}+c}IXpWNOGpqVwGQ5^I- z59z`f?gRyZxxx-iA+-VoP8?AH$QFe6DxiQAITTu_*>A#>WixH2(Eja%)~x7zymSU; zXZ@~G(ieK*T*}-x$PigW(|^32*8N{cVOitg>%{#3m|k9vI?cxb*Th^@x}QaIFl z6;!%YssL-Sp9MDH0@8$;Ps*wNKdEk$mc>N4UAu+==VY7t@Ep_Qi=U^bS6;n;9J_Cv zlYVTWac0_lwx&}}rZWC?&J6B3=x(pvI{xssYJ9p7vh7v13c3G5=S7#-`_ud6Xb{j{ z<6IgPC^H=!OtVs4+s<`8tnmA|hKsEhs=shtW{hQE6CognfU9Dme^`bQ*MroeTi~{z z@IUY{o(UH1{>?2|8R>Ugs$}p};H{P`g-%^*mYcTtl(@(xDf2%XNY3rAohRo-_s=?- zzCnMQ`sY_ubN?y|ipM5AKvzJ6B%^EK365#@U;IBm|JhP2ojTJ3z(D|!Da=zUbQSeq zv+?Z0X86~xQPk%8ElWO@N--UF^<>pdq1F;PCq= zJmVkkt%d2|pwD_u6aOTd*CZvPjr_kfKEVA8FX9R6{l52o_d%c!iLQV!`Zlhq%b3z1 zE^Hq-J9y z$Q1}=B$mkb-hI#)X^26{;oBF_0!Lx|;H&X63IGbOe(x~~PDktk@42McT7*Dl*WCYX z)sp2fedg@bryJqMNnHTWA-rH6+N1E6N^UJa?ezZa+z9|mc$Nyq%@dOKDvZg6*N136 zh?!3b{4#!QQ`=(ox{`Yfv0)5XUQ#130yF}dF}W#BRY+^f2jiDAN;FrgvbBU@;L6h# zW4gBA3GThkk0qiW`b#6Ii-$-&htck?j`x!ug#aM&UVX_nEeH-6d%2zWx%9vWby z4d5vp6~@-erV*y@fGsKXo`osn=cfn&%nQF!>$f7GK~fTStMqx+N?7rQfY2E%0N&63_S)HifDG2=z7M6My`hVj4GF# zJ5&*+-i0`#1u+x`Zo5O}bzNPWnFmlHj4F4F>BARW5`CO;lOG|czKA*2#W@9&RV(+{*i17AHhHfQ#);}}yV}?8?WEt(-5{nC zUN<9>u)0g1rc#5w{M<$V^(>M-H+YO|VI>N{)VBaAI1JEFw~GEYb^k56BBAegntTuj z9`xC9kK?$X_d3?S{6xlT8QowC#QuEiVkx4(A1rR74Q@uT*UPJ4f472Z>Nk(pV=suC zlN8|rH!Of-DMb+5Gm z0r8&YABD)I1B+N?KBJhAv0h)zZie;8&)wNiIF$_5V+&S(FNb$&1LNl<47toL@`RfG)xmZcO1TV&Tt>rU5=OG=Ilsz1AQE)TlG9 zdU(%obPuG&SyzEURcKH9q;t3DgodYt5{_@+l)!f$_BQ8a1?k?+iI(!N?M1Og!dNJxwi z@H`c%ZkVQ_0&1kTUZI#24o$y8LxY(50^G6Yhft8+C)1V`%slo>UeM+c;7sdTqyphA zSsj}H6zb|VEeI1OEITlLtvmW?^~J_>>ixKl-)n)gdBQRsX9LRAR=m@U^f>A)7(b%N-mRY|h$`=m+{An@?(q_o|#gwqoM_TjLLe%o9%7WnMS zPVx_5Z(*uq2RtI$+(qyLfji&1HiT)B_tc4>Mi_{+qkTkg-)(z;9smJd1h&1z^q>;W zFK8`qUcBGE-v$JH4F+js_NPzRao=nP<8&jBdToID{JpEgFot|`d#aF3an@3?w?Fmn z(T?wfnM2JCFf&+5&2yRL9=uXfQ0XyyEBIN3aaI^t_?o$1;uczuOpN!MqqU@DO}v!h zwv)li;G;f5_7KiaV?P#f&jnQE%(U$#E{hSi<{hCU`nxS1tmexYH{ z?c4HJ#(ifxAG=pBZ1m$BE+lyv#|j$-0J;O4pX(wtKm60D<>WE+;PPmHs7r z_;}!rdLOnG0;2c|)>;L#-#y<+QYg8wCEQ_4};=>UE6-0k^et$8xjWGh(NI zvYqt(VxTy%_`yg2x#|4>teZy?03ehbz<7~`!ji6H-8~yzkTHdt1-(tSK{Sb89&`0T zo`Et%%SGDsKab+ymhce|6r&G<(4z_gpGIHyVokH4bAihQ)c~y2rW?nFe;84Jaev-8 zZ(}fhqyHn2(9d36&}i>lzfNXtKg$v;-fM9(P|5Kuv$70kl+dYtmifK{#j6nLDC2kG zdEVR|yW)=ZEEFd8HVe$Olex;ot@W@MPA>!iCbGa9F-SABIykr61jg-fqycZK0H8U2 zcpx>WyKu5#xU7V8jG>Kl5o=VfA1lcq!iw{~LmyUs#;YLP3%>YDd|CmwEW^; zdxo>rNwRMLIK$H-gKR?K4AI=al7Y(0>C+FF8#-U{po+Yd(^SEE&znt zq5m=S@ga+S98&f!oskco$!6e4PQ*h4o5d99S-B?&3j}YxBoRvqnL9)CKnmH@17Z3od? z+1g5;u9bt?Pf=iGhWy|QM7FzB;_F@0Zx`G53iBZNu|$UNt> zVCsw`m^wd@uw}F@JNtq6ih~!=w(RVChvQDT?H*QW%|2jn+ZB3W!+RdCJs@Nd-}A=) zD1^E9hVF#7W(K3KgS$$;cc~cKxx#=)yP9)r`dvR6Ja4d7u$%m7S$VQp4u(?eISOBo z$Q%d(2MOmJ_PSUDcgzy9WN)u^4gxHbWC`J37uZ8ei$7vd7mK)kMo}OA{2>sqP+Tj{ zQ?E4#ftCdT9#j3R5C3j~^TMCK^Ml*0FO`5Z*+i=toJhv8iAILfpY)-%UN&V!ntnx|T*rr^lOc8)Cn z*5Rhh!oI8VTerZ7W#bprWYl7^%4F^6MyEz|FrBUSwAa6rnXa-pthsT4v@JswwVYX# zJ!hF`Ru$9pV*RuO96r@KH#-ssqkU!gmKMG@ zZ>}#Ubck}jCb6Gkey&dpM1Hq(CZmxdb+XwrO_`5KPm+O1ync%oW2%ywCaDV})nq!^ zk8ZiQ@Ngc>WN$oPE(4D~1R)FPP&JZ_jO$p|u8(W2VSbdh+tjo?xN+5mVejjDWgj)F zQo5Emb_+?>dXz4iuja}yAZuI{B9PJRvw+rGzwLcj8v@eQ?cLzLC4yXZjTsc{reaq1 zw4AR3vhhByp^W+0zU!I!c9ZaxU$8*_ftpiKHp|I_c8shi0JI3jQ~)se4AXPpPO^aS zRz^QFw!`68viZm?H(*y_%w0!E>rNK9WE{n9Jq_)@e?MrrTSX zfuo8b7F?}1oowFS!;MIutr)CC90?WXe)E{ENYQGv2B=^LLF;T+Iyuay?(JCB`wec? zjiYNo0l=6JYcd!NXthUo?X;Y3yW}%10G8sl_)|agK0#IwCI4fBHVv$(1AkpN4;ISF zB0`E8Kx(PIK)|M^{?LoH-zn6)PUcu6kCs^9`1(J7vL5SO>x*;5*IXm^q%AuidgHV4 z#U=#e*E>7;zuw7L=NGqg&)PApwg><@%n>XCb|b2q5im%{&TKGX26T~5->eyf25oKd z5RMk|aiPPE$8UO%507*naRI?A!t)N*G;uBkYX4EV@XE*Jg8-jp@!o3{S z0^&djlQ7q6!d^nH1;Db-?j>N=KQ;eOPlKjq8GNMop~&Z+TVB238^LpmMTAYyi1JvC)SW=G*(acM+U2wxrh@gMjeW+%G)colnb;(foJkw<69$E1AbK>&E>;(m+^# z#Q#>>UI8)$f!1%;%Hv!94RCsZaQU|grcW6AFP>Gl-Tt?2tt|q;&ZndIx9+U`v|QPq z7^Gkhz9v;-5FX2zcGlRC>+U>p9$0p6Ocava6aB%gBYpGb>i6{d{`OYxr{(h2y1SAHU(e;^Y z`d5oKC%BQ>(Ud%lsn?u;12nBx=V!;!3|dmNRXCL3rF}k*C!eloWa}))n~3_x)V{_% zJKtrj0R#1 zx}xUHUaJiO=hJyj)(8y5Wn(~?RM4uMGTU}BSQ*7I={XAg7jV&C8DkeogvuubXx59( zkdY|Ne?7NJtU`bC#x&vyaXxf6tbVmec)q?3*BwKSwZL3v75wg+y@PV31wh}wFxTo+ z1_3nrx7t*wCRpQfCe}wQtjfZJ`Hi0Ny$`5u6##zr`Oh|geESc7vD(-F>w$rRoB4e8 zdpz&% zz?Q>M9wVG9gda~(P-IXd#Fx;Humt$KUV9Y+n$p+M2Z$ zUuLd5wf_0FwEl&a{QlcLYxk@DZ-qs`wIaQgc6zkh(hb}ClM-`rlNb+$yr&3t((<15 z?Nz9@RRHkTf0gVr9nhm2f~EK=f`EDt~ZX8{$5yn z$UK)|rsmE|OzQd20pSbx${>t{@RT@Gv!LI0zB%9a(@YT$1({x}4FMq}+*~}uF5B;* z-h9`->%rf;{#n-36)r zz0o<`w@6=?z{AJ6ErN$S)Y>ipbf}*ly{$;$Mlxm0Q09gY50r>LCRhxU@f$M}Aa9l6 ztulVxN-&?V<~9Opo9+}3+9Fi~7fJG*h49O}5Dk#@HxDTs1oDcs-@aVr`B(L281WwD)pLX{ww3fWQf4O>*EEA{}f_=CDg+8xSGC9tC^YB z=6HIoaR}%!St3A8p?QMT&rZls3fh+`JX8Wt4Ai{OT3=t+42|2#O$-ide8&z4vB$w2 zI|}|IcxiRPw{Zmw4^V1Y#&gi`&iH2vP#(_PtjwOZ`e0!zd4LO4w~iSwS*o9cyw@E? zup2_VSHm~6>f4l&o?s57pI67ya_k>H*-E}y-U@+Z%f12s?q7nrRe<<+C<}kn*O&ih zE-AGo=l>wo>j?lt#l;LF>gQLpgcoB4KzoERln51815t9-pdrGx$~;6onZM_47}|Vw zJG2c>vDL6r@Xe`Wm^$r81@XC0yXU!Xz=4Hp?1U}=Q-idg2%maQ0$jwRq5x3Bf9w{t zY6Y#%U%lrY_C4JX{Ibiw@3qDuz;X-A3G#Wsu<>6uccoUpan!`h?q)^08B%s}@O_-d z7i*b`T?8jl+Ys{D8Rg)goU2cTf#!BM?9tNsR6jpflBbydBW{Q9rFTb#mE=(+YLIn4 zMV5~#*H(HoEz`AD%@LykagShbk?E)pJcd~W_4F`3-rU^zXtT6A_p{G`UdJKTdZ#)j z0BE-8%afZ0f1Q1YU5Q~C{(ydHO=p8rn zsyCiaf3FEq6QLFWHE1KR707C8(!z?40m3i`juYE4!aQbm-~H}5n{#e|Yxem#J9L_! z1>Ay=d+t!bT~{)a0vz1nUu#8Jj#xZf!vb)a&=&pSMWw(1Ht!*RYB3bYvIz7Jb-Sx> zs{$pQq=zzFVPNcwwfhZz)e>M3uWQ)#+-!^W%u7ZCGjqQhzm`(X{$tG9D90c)Q>%_N z*)6hl-|D8JiygzEX2;XO^``qRvgsZXFr8~1696<@P!;GWmCDXFr0E|rb3bGlBb^&c zd+sc9e)@Ei0v}tL1q;b7a*)T=9Ip%pLh0*!`@%pVm8Pw`b7>jcXKMtqv_Rqg$?7&{ zF(iVu9fAeF+zN?yiahlxq}U5cwC_z1B8cdkNDJAx2wV{7p{=rTKY#8%-)3YGbhWV+ z1f-VPP*JU)`mqXd@3M2-HR>=}@l7|&al=hg=e8YHh)^8O)4uaC%_lxat$k|!mBAct zxDsn`Aa-c{7N}GBdrgbsrE0eYlTJ%KWScxG))f$w6`y&89f)>krjBmXM}>jf`R>+MetB4 z`xX-mvMgaH!@JDa{(O0Fx{_in?N6G9swmGoqgaIn1YiTle z5AXt_{S=GaUCK7MP4z*lgfS`Y&SKiKiJ+}C7g4q-TtKEFDZQ?3=aW_bHvQ@Lc~t7ZP0Hc>slOLp(gk?G{ZO{O9l5~*^`!>2T@(LmUc-KP0xLZVk8~A zj}qm%T}=y%t{Um`#nJu{ur5(qbp~ZgnrW<5r1tVmEiul+3K<;+4)@x@5a^fyaPZ^0 z|5MRb^Oa50(W3SIS;!-lGqO&I1VJ;QZ4#f;^~pjYd6g}IT?FnbW{`_(Ul)NU>}H$J z1R32r&vrrGpD&}qV(J>E>evvj0+4+SX*Z&kSrjM2*qR+wlh*^vfL-_y39_I%5Tn!bQpnK~xPMcjb8{oK8Ahjm`7iJRuP zf66w`GN>!#))Uc+sE}U!x3%uuP2b7XSCIf*pAQzdaex>0x zsR1t;zUF*4?RtBfOdgOxfog_B1?&RKm&_koAU0T2!pc%?SJ-3vYl{A8$z3z|_OvvT z%_xBQAIT*BW~b73Klx;4z0+Oq2>|;bEb~#5H%d?c^CIa$GJB<7n*>QSl$6M`0VoEz zXsQ?awH_GtDnjO4+_u3!=w9$x$LzgGYLbLWZP-nr^>y8ttfHwXAxF2)+>2A-+BJR7 zLP~SMAPuzSi#koq$V90S#6tl|3&2S=F-7fFA)p@#Kn=6k8izmzKVf(rLay^O5*7f{ z579P>38?VkzEyit8~3CW3JcG%+KIn1Ny!FXKQk?*O^l7qB?J5(qU@-oxXD#3!F>k- zCY|%tVy3_UB2^kK*rd7t(in;ulVm)PX}+&CI_DfnH{3-~E5K>u{~$ObRqhvm{qfK4 zzA~;3{?T~>05b-Ob$6MnzsEmwfhF-S4mL6Zy1&VQpHU64TK0O^Tj#U$)61+ZD&3GD80&a)E&QUuLG6}2xO+dKEIdt2cXrn{@a4!SI6e81k zIb;wg+R^TOxHMh?&Lf0?W9p{9WX;bPi61fJXBN`DE|Q6dhxEbEEq){JY7SpQF)(=5 zqGEX{t+;ubn(IxRb?r&V9GgmrRu0dPm&(EX*BGbQw9+z=2%Df`gz%?l-JAqfVr@0O zPeDM-o`IUW2lfNgz8Zs|l3-ugh#S&{npp)}>DzhuO~(ZQ(im0=1pM1lsq|k7lexhJ z{w08QyWp;G7OMEw{k162a}`sdTAWPXbQ1|v8aO2LPteG%B9jn!QN7Nv{(lJ_WtF^TEY1yYn5 z9)7#7{>DZ893u0@^D^!f>#~~~2_*u)htU~nf+dwT9hy1u_Dot0%;^7)4P003`wcPFDo$^$cl)fNro$3Ib~Eojy^mB%VlB06m7wI(vEE!YaApH^EIK6?8h7JN?(95mEI9CgYCNs_B^g{-{m3 zddT3llo`CM_OE-@H~`o3eulsQnRdf$D%~y~(vP~?Li=xW4*xz50pJB#xBAvL2a?i< z7QZ1Hn&dXz0o}Y^@a_U|F>8vpu!ZKlQ4q8iW0A-!sk*sP#n^yx&O1~Tgc!`>S2Vay ziFKJ272Zax$KYN7araI9j29(wiwCK37?88Do<_!+`ldFl*(`He97O=x|NKt;Eev7- z1E*C2i_|uj4g6~WI1+2!A!>`01pb-2d622F6aB7rPYIiXhy+dlWHuMIeskWx6K2kx zg`9!`u;yY5=a2&Y_IR31$GJ0(#4^9)m5fE4%+~E;Jux6~9{hC&(1{X)!4CMy#og+f z6E|IEGIV2~v|P9??<1o6ql7O(kkUZijw&Hu%m1cZ%pNcL@k{vijbWPL#q&58fQ$I~ zHTMr0JjpddXKPG()^D?>6ikd2^4x0tVbcRT@(-$;jPjEz835!md?=4R@-3UV!LbSe z$;UI#lJ{1BvR%nx)Tw@-+0`9zy@~!F>!qw=7Ce!pNWYr%TmaMfqb`sqyjLUG+o_D{ zK<|@VxCM5Ic6f8DABlM|d{n=CUJ28b8vuR$XeA_REe*{qrtDhJk6D<{*p*Di@+ocs z7nUaQKe#>=rnEPw2V#>G*O2uKy1B{x=P0P88Sg8$S^ebEdc2qD)@u^#_bv^xrK`bz zY7j;V*Vi2C-S?>9Yr34mEd$A8Y<i22zu0+at1Ftw83dzaGr z72Yq*{VW-2tSx+H08J97Jz7p z$VH-<20_e;q4Y=Aw)b8>z}*GqaZ1%8U3=p?&3@Tjv|HT7ew2+e=avQ|E+DLEeb7{A zAe;h%nMkfFk8U(YtLc+DiMi2(KpxZ(2~sxJbi}W|I&S`l#bpGhucoFIKsX#WH?Q8I zvwvwgcK+cVFaR~5G+@)YcD%L5*4dtQ@7{pI}}1+GuW(R#ny`AZq7xad0DL~S;ZPT#o#Kt$4$ z(GWC-^i`REvrQPr8s)`ttDtx`m*3qNEwMg7w`(qJ}lWv;vY-;{rvg4lxJf3OBLp-dvj@9x_2~v}oLb#e9*7vW^ zxEV6iSqRw2v0S=2$ut$r)*(>FHY{N6I5mn&XF>GJ-CZx5+pn;L<25Wg*0}(LC&oAx zps%38k7Aa#8s{fZbGY#=7F2`L?5@WxIFx4TuV<+zLQIhLM63;Pl1)A?gB16kW8#bIpDf7@b8e$bkWD4&I3sA-!kUAn^Y@4sjl?|fCeD1Rz8cFeSxwJY9>qg z?)xCb53d|WQR4Se(?b=*@p9%q~Cea0QP4r!&W*=oD){23k>GH6&cp01=D^?Zqo^Q+xlelr=g40w&%H_GRZr3dfJ4B9Hqal^r4mC0G3AxV1JY3le>%CxsGl3Td!{? zCIIB})g6#yiOE{RHa!f6vCO-(iBS5KB<3ayg038 zLmZc$*s}4ae|7#f3*IxM3GxuyzUF={Z`RzHg$_jo{gKT5DN;MO0x!}z8NcR!OZuwx z7_2KiwE`F{-Ac<~Xh5|Jj|aw!5L#Nf-wFI?vs%eL`0J1V(}NRu_KQbCHczG@WIh-i zNPY_AyM^NVC*aGilj%ugPmMLpixIM#smC_*$r}@eKxpsKtSMclV01D62^K6M4d-Eg zXtG!o^xM0e)f}cRec_8Z##=FK*=P5{c3~uI)#PKMFZ8i#4KPs9O*P({N`0-C)r>t3 z=nwyotvN31{F^TI3~gj?Io1C4pJt1^R%a{;`UbTSEEy`ZXm%cI-Mii284GY7v$p2Y zCs;b}&E~@V>T?#3m~ zG|w$IzCjzP(KN~}!dzjutnYj>w@Mxq>rPwuU*Z+4EBvy@%2L4+xsm zZ2lZKg>ec;HTNsvrooQn@nh|f@oV++kiJOgvhlYn00muR912%YnS-xqH$rhxIqS9N zAaG&=z|N{1gs&i<;ZtquaNd>-zCsEyB4@J2~@A-=gYYD*<(C zHA8ZLj&$>p^xl_7`eILkuVzK;GUjllThayN=}nHA4qJGL4GklpZr(i(AUv_nFbmP3 zCd&$^5;_t)rcKA`aa>J;vw$B@3I6#if)6eWwWV+i2=F>>3f2EDYK6NGxZ5SGWp!h- zS6U|&0A@z22_YC^u~5(nqT7v~KIT3-cLvMKM8?USn4#>exanFs>83X8!Waj~+-Kqf zII8wvR{#k72B(RlUqJwvCGKOC3YBWWGRjUbHkXiV$sjq`m)ibk6!rOx{WXl1U&Neg zEI?S{c!}#<1a_GFQ6w&e&%ZDE?zIw?QTs!f8-vzJd$ibPOEfz-`#6PVHGfDkN&|HA z`9{PmZ0-s>&ED@kUw>i(Kx71bo-9!u2p2$W{Ovd=F5^Y|0M}SJ?hvK~t;r0W=I`d$ za<%~U)!QnV>(i3L=EPzuLrYI`VWS#p76y2h5W_f$D*hJ zpn&Z;BLAUfb(Y(KJrNv1_jsd#7I7|o5GH=Ca+>>7p*$6IOn;Qj`MaYw`-?7vn@}2z zaKi@~T|DZ&JNtXw-bvpxC@N-U1m*!8BP=NYt9?Q~w~P zJ|PCJv}{pGOw?kHG`>bP!4i>UOSlDGNczO*Fxz>E?5i-1TK>nw`XO;pa7orULj?Zm zQJO8FiEeV&Om}sUnNB9PE1w*8%a%U3ACALfBM+&w{)7l*g%Lli*JSilrGe0(r^(Zv zAV)kkE7tqv$$l>!HG?|O2D#nDbunDrp@7py1Y4bK?@s>dYQ(oE2cl?_g{;i(P$w7&t@;mNq*^0+t(K{MKAwF;{l9Wc&7bH8iJ zGUPIQE1EvSy(S%bj+M>4{tyysb-31dOiJdikw%!YAxzwT)5G`E(34@TqzVTulE&A1 zAc*bv+^(fi%gQ!d#3`|&ErTTgnIU|QSa`Qw>0f?rfXDnnPXK5V;4)oz=aYHPA^<#J zwd5}w&ymV z9;S3@Y&*F)z8#V~`2sN5G*HLI-HE3CVVeMeb*SOj3!BN;b8Nb#l|xmkPSZ*;Ln!q) zk(ieuGSe1`!K8W`EC2>z+H_sqnI1%A+6gnJ!H?&?iED0iZPA~LFi7K4x@}GepqRSp z)=AZQfS_#M3ANC57GpixRcqm0eD2Q>;%`j!C+}Vwhzy+^7PnU1i2Ly+699Zpx)pAf z;yC5lS3{xS6l3X^76JPjE3N)=+y9vOTP#+qcl^@`YnuCwkCtA2z7kR7=D~zS2@11citn*|pG*xG) zPzfTem4h`ME$d_YS8K1rPZOlXGY;NMoU(Boc2~Qg$x&B{<#i1hQ-JB#607^ik-^US z)zmEvgst%b66J{0-v6UB{I~EJfC1`4GXutX)uj`4GekHiyo;LMQ7AEQT`kubnmj=^ z^X3KCS$(cMaUE(A)kW3D2Qdm05)e4u$}n9o;*Ezn{A$NmE;V*YrC~W@h%oo_oxy%u zA)0NDERNlN%J_vrnEPS)3T#>_rBkUpE_KhfTXo3Bsghugf)%v?GTh(K`F{!;1@No6 zl@GrBzZSctvnMV9%q68~m#Vp+RCkhb#NhYgGJgSZE_Vxn4(o>t>HZ>RCo8M$T2-J> zNE-2~)N91>QCqg?%oMnS1wa$m4ArvULu;^T(3b=WYUaB~?(QRUjFtK~a3phTx>-QT zqBLtPC{3=9=F^hGM%KSWxz3^gzrA;fkt;jzJkO0+W@ILrOg`&DvPx3vQI>6#yX_en z?U@l8wP4+#B&7^5%`ljeF1epL4$V`Hm*{=TGH* zdR=HTCcVS9kku5V9A)Vdfq$&wg_v>dRy-d1!;@k9pF5M&4vfbGpuyr0Jj5_UF(8F9-<>1hQAaTVMX_byv-0r>3$puF1xo z0&`f2Q0xXp#te>>^3~CaEyaqO`D4wm7iBO~U6%EeHYh9~Lr9 z-lU)9{egvfCN9jX90J(3H1H}0mj-N zK<9LHCv29%$3#XW$Lxky024-w?&m~^&D%soN9)|9t%fls#%4H7$ywFIujN&}$l%qf zX^ggb)b&R{^^y40zmRSCh-JQGXTCt586H8QO z3DF33Y@5t@2cIMtYRGs*2q-EBh=5(m7yvn&YJ*Vobe_=c12{JFT^6>!A^tR zJNDTvD?RG9#jTdcg4nTtfmyyVKjqHpfC#;yG_l7-gdkEdleCrCl#I{&%i6i|RbhQs zuAB#Qhcpz3!MO5!JEW~)9o84-c}xSD@$~u&Q{Ih+rHfS(Y3NSI9F_~1qINe`8%Px% zB?lrw5X*)4D_9MFac9e~1~{wj!rA^7S$Z$`8n?r=kLC+=zplB?a=t|+lzOD+!3=rN z8NmRdx|ag>(;4C#v{FTZs$Y6>+Q(&_m1_n84T0dKf*+x8xIa(=23?p7STk;@&A(iX z_6qTvowXj^TT2IB>-he`ruy#vu-SYs=Q5A0+T=9;CJAQ~0D9~N1|5ERHmo7l=dz^m zx-?^wybNqWA^_;QOwM>i5v_t#W6>#sj(5LGd1 zh#gu6%=WaR?$69erZ+N~!MdRV{D!*iTXYe0iFN+?C&`fx(ZqdnP~jP@4PQ`$$r6Xj z+^?xW=!XgZ9wET{RcFwwyep>ophPmC z{tXoR25Td8?2MKwqV1nw!0eCZFA@SeY2!f{bn-o#KL;5Nacw+M^gmVrFw@70FTzPw z*xf8E^8d#P3jT|MOOyc6$kt1RAa_}s&u#VXIe~Xp6GE&k*#&bp=|0ieNaqVPWC}DEZ#;bw0m9|6B#ndM8%&)f z?xCG*=LtbXD-b>O%!|6U7w$psaflefN}9vbefOyoxA3NN5hIp~Ey(XrCN-ky$EDDB z6x#QKEGqcGG3|1I?>_KEKTOGAKN|FhKE-tWf|$#3E%(E({Dk@Yi&D~{RyFBAg5VI( zA8x=T)kZo^$6ic|uQd&PPJXmmcC_=WL6}FvfO(2s3}Z*@=S&U|0%j2amX$YStz`=5 zUcD3#wf;aq(Cmwns1ZPqi{Y~aR=Ar`F$u<-KQfad3E1eqqwdlFgklY?={m$ij|pKVMU9Ww#SIlDYrhnpPX=H!y!zi@*${;M|?H>+C&Z z3)=pfxheP3v4S`Ee?~4$zxA=&bxL#2&p`>Lv7j2ayR#=8?kbc8_6WfpNdlu4MNUH3 z`f2evTn*Fx>qH3v7e4t3>mx6J>$7jp7iV&Rq=AN&J@JLXSklC=YKvdYR7_@4?hd2w zXxZ>t818&a*f`pV*2hCcSO-inA0^5-);*y97y3hcNW1y4rlm}J=DO7IlJW#>No##c zOCcfOGx^9|Cjm~^EB=MbwTi0aEdXdc2oglVlXqah!mqu*QEg+L@q(7k5h*U(G&UtB ztoT@8lvaGCAm6Ucx(Igx7>!Fy8K)d-S5b{VeKfCSaAa|eVmZ>7|)HI*+-;tQ24Ka{PINNt7XTxv=0f^Jf=-Q=nM5^{9)!l zA@>^V`OhDhP_KDLedwoAN@d;xAg!OkMgpyIw;~i+=??;l5&)hwNm-IILGFeI_=jR| zFKf8Ju2I<9Mf=HB$$6pi$dJS&&OjwYAj{^+4(ONg4Y{W9T<;&(8LfiXnDExCkz2q| zBvBrS3>g8LWW5yclhjR+&e2Y0yzrxFXjvfg(in~^t<%%xL4lsd&V~(1@!H=&*LI@0 z5b1wXTB1)hqL69UV0%HPO|a;t?n(TOC%6)BSY@#w>(S{W1+M|FDyS28NotG6q5gaG zG8!ce|Fl0xaU5yy%6PsN6u80ecc(KMc|=Kb1VrwuEZc5YzFeWQr#Xe zlCL1rGH^ru=3&`uF$fQ6G6xmb8~>}9W;~u}MWp=haC%PKnZ|DI#tcpjK$HApq0AK7%Qxxy18rMtM0tr=&(1W$h#; zh<@|?ZtT-I=D-al^TOIgdT4}k{P6c=0V2T{+j&PjMn(z-CQ)Y1!87OChXOGNZaHEv zUY?h^Uvuks<+>w?@u)O^glw!AaUklmcNp$Ftf~Y`!u0I{ToAqW;0xr(H7e1$nZFRQk5YZ3`bfcGU3 z;X(#xEiXh70fKuRR3MD3HX!p@UiXIak0bL32igVkH<{5>O(1LlH> z9A?8A#u<;(B-g)_y3r0?8)(zysUWdddkoB8_h23{C+KrxiLlOr%euD6UJ=9d$$VN& zf3JIV`Z;|-J#kqu|7SI}h*OHwFS9*)BIZ_$Hb5Uxs9}Aq1Q;^_= zfbxfF3mREQaR z?N~Q%|6Tj*ZtI)W>j|JdAt6mUkL1cBaB{|*l<%%q-5njB#7-ooZMcQbElg>g6}{R2 zGg?oM0E$aYdhZhp5%u+JzQy&@+NaE|X$h)ll-Nh!8+TON^{O;^ves36#Ow3D61SBz znK3_Bj(dvFF`@~>=kYcEi!S~UEAyfv`+>6W6O8Aw>RKIKLe)t71d2kspMPK=+xM7Hfk@QPh9%=ILWCB_F3R7E$*e-Pyx@dm|SU>W*v1btZ@ zu5`cRS>jzouRxa%OuPUfq4>^p!M&|mvoiwsD{9{#>qzd3fK~@!&b!SX_ZrA9Bz>(S z;eZO2)@!C1{nw3~3Q5!J5)mj2UFBO5n#Ri3# z*d25qpZB}0c*;`f{@+hARide=)>M2FiU%q(f!>iN1~06@dw0x2z;_CDw~TEAZR+W#;D*N8uW*|Yivm&-X>&d8N<_Jnrs z(|g=5EV;yr7Ux_@8pMnhmQaJ;eI*zKG!A-#12`vk%X}dm_)?|H(u$|p7-1jlb zMHnHiF7)<@OqV1uVgk;{e#x?%6-~Znnc*rT(`2OU#W)Ko=l-$bp7#e8u18BOOMoT` z$X2cF1FR1E-8g=a@Ku%|hIWWAG)?Az`Xe&uM$6c+K;6`VHHdM*IDBS2sN=oL^vs9c zC0&K;!_0^U()8ca$icJ)D;uVMHYPful{deu9R9>zoY5A`EV~5zf}{krey&ZT_K)zi ztv}ncp0XU0)e)rq9fknar(QwxztU&~?>4gGig?#jpYA)b+az89(00K{Rd84sJ_u!i0H#G!l%DQE-~e6`bx7Gg)kd5KxxN2|$5 zL^DE5Q?3E!D)EGkSOVj`a^%1z9<%6^CwtVrv0O2ei$Q`!P0pivSZB{3rJel95b33*rf4WgI^ zveZ?ceEk zqWeiY;oiq4!=03s3paokTDFeSzB{SpDxtxTaBG-(*~s^cB$cJIrs z^+2l1<)vmFr8O1B1b_i`69boJc6CwjhnsnKd!?Z$=+H-ZCshD|K#Q&@OPkCrdaZnn z$`D3eK&a?sHm6}j66NyO5pP%PO}2f;Fe09<$xo#cuLATLfcD$%;3PZVwqKdZ*VLD} z5W9d9S;+xaYEXTJ37=Im#8iAnKK{?1Acwz{3oY&C%=ZLbGIbCZl3rq*a6jIY+mwyM zNUe_PAGc%teNx*Yh;P?x!P{v58<~9h(v3?d01TwFqUcKY3()8eXEid!u98{eK@0D~ zcguQs%}wvijiuV8PEMk>DV!RiDlMQ16lC*}a*eHc^_jHNni|4qw5J!hfPcsye`5R+ zKl}HnzkU2H0#Ew$*^lkr0SLr2f*HwtJbeXM9Bq*9Fbp2tLvTs3;KAM9-5r7jcNipC zaCdiiCxg3df;$9vhgrTod+#skK3&~iZ#`BwBh6;_lHo@9;sn11sXoN=Md0$|SlH>v z$f5go{t5<+zfH>x%Fzv?>T-SYcD7;4XzV^aPWy-gh+lB&do`61aJ_`GePrL5{-xvB)??#4ZMsi=E_R(m5EgO4bTRsU~~Rt;)@$Fy3d$w}%;jiEaesVuq(0vw`?Vpvo>b~GDswD5 z>iAo-C+uJ&fr#n_?j~+Q!s>ZUCeX#ZDgP5-M&pl0E<9lXP7NjfxOj<29zCaO#&n!7`|OuDvHW<_ON+ z6$_yw2YJP^{X?W&6zFJGU9T@{NL|oav)QJ)I&17DvnhN9I$6uBKn0|lPLFFKHQ&m- zy7xbGX*|7aGd0tsnj;$*+*kAc+BD?gB zP?ZGD&2~r`I;&9Z__V8Sog=zLGw6yv5U@R^ifa)%z zyE>V}96KG>^~bGB zK{-|6p4N8!$TuI0ufc&Os{Dkv{H1u)L?!@Q5EW(^)t;e7bf)D~h74I_W|jA~J~Csx za^Fb}c`OLTzBD`+u;DL!DN1!r!Gz$g~4Co@b76)T1-zM zWz>J>so^F`rUoy-P2V*p@o%&p3&?!4TOa1pM+>Sm*3aiP_Tlrav3xc!+0K1{BoWW2 zJg^a%L14Fex;O)D;>?-KVO_U;Z`vrCot=%?NwwRc(f5gVgTPzHrxUum5gtXha!bC`vMec_Gx=T6^9gpsXZj_y4m>^Vyv-PU|J}PX?iu$-Rm4Ef zrJETZz?kIxP?oq9<@_JYomi{(_b2&44EVzogkJ3D(2SKn9dAAvi2WyJuP?wwC0 zV+h~YQ;iV_xi2hU_f})S1qv0UJt_(pd+O6oj%^nUD)DaoDX0p6B7iJ8B-4#z#;Q~K zYFG!(cvH+-=F{W#nS$sXYp7qcCM;I_nWjKpeW+KVSScA6kF)dKY1_*+Z?=C zJSBX#>~n4_db8HOV~+cwYwcK;3;sd>bXS6Z{j1{Gd0&EAZdQK|%T9&T$J0u~P|c+N z>IN~BmX}9bD{(q_kt*D;_Lw@IpZAW5C^i6i+%Ipesjb-iJ>S+q;!y%sbi}?FLWko0 z$XyJuF@C19pXRV2)#NO(1|~FkwvfpVA1Ek1emd}uTsjcYXwCB(U<3y8a1B!LJ7;~1 zJNf>gqnV^joWH`zrv)(3{_^m;d$6}mPM(-_h6%n@4*SLJRB=+Jj|dunZJegx0QbEq zXAp*J!Tfo!^b}1KQ?-OaY}8l&@wirj??8eVP4yz`ZjJc=vH%3$>wqP6QiP_}MYr3v zxcnH;&i-wgoa~oKzoWxuPJ6?3(pbLP1}Kmw!!Zq2MuRsJR}oz~a|q zKtL-KZ|nS5Zj1m8+aP$a=?K>_v7iHgf;kw$k{eeazqipF9}%K37_t@8bivU;#%Cfs zXjBwHY0Op~x!U9B-nRe^pwytBex(@vtr@;ay+-3bN3n3{-&-#Mo%TJ#{JLJVdxLtL zp~Idk5s?3hfwWuHP>0)1s21cz{bg^%49DtIa%6-FN%)I@q1xee=htmz$q#C!^LA-A zc^ZJy^XhIJ3BA1E{C>6|fcx!y{PWemu8bI4lJl0PnZt9Q63+@}A-qdLME_s01&2NNx_K;K9!N z*!@Mx!JFnfH-Js9KY+c*J&DRpG!T3@cc}~CqW_%^>5zC{{hsJ{yd%oCRizxEZ1wD(3_Pd; zK&&?w#kQ3ftt=G&c2IDPCSnr2@f@BjnEJM~2agCmO<)|YXc5R?j|5PjWU_cPk{a?i zQGN`H>!#H(I<%E%PyRIRsPTq?@YmN_gf ze3FF`64}rC7wFyb_lG6>;@jtmCe3yU3y`1@?|4U11l3+7FH3?W6Y+#~hQ3`*MPD!& z(`A@|4=WD~N`HWZxy~Z=TTtG}_=l*?-qDo=r!*)Q3j!lT`Bh%-4W0Ao>`rFD;c+b9 zl72+khye~fI3#JmWU%7Vv0O1uy>LdL0i$ zWjR||L=(H*E@E6EgE0PA%0b8L%$gr_`3MoPn*lO=$rSxG+m=%Pj)WG}Yh6IjK>lJQw=@Cc}K?=AYI`u&S5U9b@O8p^53hstfGV|f^F8cC(+qnhOa-1UxkTvk(gPuP404D<7$bIVJoYX}kH?FBJ z<%5wCGL9%0(s2Ph`tS7$>#(IvV44cq@@AEMZP;I{h%kK$`OS;xpEJ+K5)hNbywg5z z!8Gq|FWgaYeuWV|B25xM|2wVG3$t#*c<%zS`vK*Nt%|Ih)!FldxtE|yzo-rC?S^Xm zM(DBYsmJmKAykDmLS~fuJ|O*v=({_K#OWbh@r-y-=YM4td;iHQysI$)r~y0 ztQ->Cz<}WmTfIgy91}lD9W=()efeRdiWJz4OQ5B0E2RDg=`2`p$3VH2#3W}wT>2dwpBCe>tFkHLQ>zse!&mL~?~H9tVNU_Lz%CrSx?`1aN33f+|G zaRY7Z4+`KkN`B0Sq59Zw!gkI9#>>0u-~O)vUV(ywFXTeSALv86jWLaI_8O3*l#OHD zu9O?uyavfL)NmWot2|-&0Ta0zBXEzr6lP9c90%fu(Gr8LzSD#=n&U}a=)7?a zHlu|s$F< zpCT#(;K8`hDM2|ZDIE$?3qT_kC9b1TA9xtKg;W>NFosOU8>&5bSG6&#V(E9yVbn1fc_q~iAWBBlajPu?zry=7@(h(_W3Y7*TcUeiA$i$(8$&Jl7t z$O5s~y$h;HZt(GEcTU>{&m;RoFo}~;gx0BAsvGTk78`DVhB+K58vOUAV!>?b51X&4 z89V|S>w9(e&^*VeUNXUu75dqyy?aQFK9nSdew`S+Y5o5%tm2|Sk8hX5(L`^)I4@Hn z`^%eg>ml0pR?NSGbxdOycJn1ug2uHm_XUev8mL{9Ebt3{*PH3Mr$KYMLjj{37>l{z zZ}4OPl?x|OjB__%Zw{+lUi=Lk1$;a!bA_482P5=1>pv1TY%7%2I~%cYpHvDbN5)Ij z*uvDZYHuQ)!iFu~ikq_pRfKHS>c{&zV-16BdVuZnpw-_jf+m-pNfv8JA!PTpJ^##) zGF{&fvT}XWFd_^l@1gq{aX4F0l@63H=okD)NM|e#Q!S`UPx32ro$PvJ_r_p;HXIv} zNciKr!@*}KsCMzex^H31T>qjaaNBm4mxT1p*ADkZi`Ryj-eT>qr!{8vX0(E3lZa5& zAL*`yb+U?$ZcHJB3Tr#VBISG8dbcC9FRvy0R|5Q}uxa-0(d2;$(o)pFnu5bhE(5;J z!>_xRc>n^bWnW3`7T1zGR#X%|>Od89uO{P?=l4OftZF`j$5$;udTyt^OmBW{WgNt6 zT{H>+jM)F<^ln#b{Pp$NNe~*)-CFsZ%j)It0M5?Y)QFf0|>DOOM-CDBJ*1;2-lTg=F!BrV) z-)DmfX&gW{SnIzNr|@OHTvm%X%GCFkZs(6(%J%c0HK`B8<$}`8imHlr>7;n)SgR>j z%Wo8J8MznMTWG8j`dNF!xUSb_{4u~dJHHsm#KyR3CP++Qd(h=S0D#nA0p}@;$k@oH zsDo4ITsIpB&IcX+3K(=L7l8}nsv27E8p>Wlew&-FD#)&XRW3*^{LvW1en{~xFFu( z7tg!e!DyfpLBd6@0#IzelS6B!XJA@ZjYehP7&!#(+xS4f9c$&4*S90#0mc!yTJ5#9iNw}*mCXED zs8B`ux;;)W?)|l@Yup7=jZ_DrO>FkcUy08a$DVkXaMEn#1(Fg02rJk-3~b#(ur! zz7F9L2$=dPb6I;lihi!yno;zrPDmob2&+4{s0?!Vxw#nr4W(*Et5ZZGg4)tPkp_UL z5)6Vnwtx36d~b3)?i~`~)|;*1n|_jS1TZU^A$T~mH^wizIV=Iq*G7*^y;1Fjdje8j zs^q=_55p0ET3t9f2~bZmP7+sr>Uqg=6juhIF#w#Azlev~esLF4h3q-44Wgh6jQna; zs)S#T@1+7G5t}f3B~vzRO^L!$ZRnIW`arRfrA9OCfI`T;72?V8xBFXU0_V=Nvpv3S>W?^v<1VcU=>d0)xWSUQZhuS5Xb2#T2oaKoYx!q|C~rpg7DA zm4)c<DQ#frJMHRR+0? zAmiTWR%Ql_Pw!GPuF=+TMqnlUCK=-oGneQ$leYtsb%ZEXj^Aln2+;top`mWl#U9vU z)SkQDJ-X25=cJghy0`}x;oneZqclV58{d3b=yJzj5FoE^zEX1%zHN+~A~$F=5~o-Y z7s;oHxU}%M!MauJ##eP8d+=NX>%aYX+ipfsy>6NmNQ*?qq8ar(!xs;j`rOzEeKZ*) zUI^d-A407i-%PLI%y2JS7$&N_gf{81*eOk7u+>db2zy4mF#*r%gLj&Y%4&lcdUw-hlGAodtPsza?to%R0uUUzCm03J=l&x$CViJ1UR=!QrF_=}kGf z$Um?D+CGFnap3|1U%}~xmvG|FkT;PY;tcZ!GHW+0C+UyY8Yy|?;558_NOjQC=}#8n zuU>ZmE{YYwWkDj~7Lbd^A28@Uf-sdR1(?=xK*D{=zSjr|0^ntS49Mic07$_3G5~D7 zVRFwj63HScs}+4n(CF3##dq%9y6`&XFE%7-4Dlv7VT72|0n;C4@oNmtgw)=-F?qGK zf5T^fm8iG)7(co0-7>!n(?iFl5&$8~_|l%K=s-0ny@D+0$#XUmEcNCe-D*TQ{2d7p zSing>VEEwN7y%G;P?d$D{C0EGGNt$3;VBxxPeAGm@eJjR)RR{JVXdt~T@jPZITcfl z=D>9_rnUC5wP~^WqR-FEB*zjcWYSUG5K%XR(!l(gzB~VJYtpRVUc-1GL{eqgP?UdwOv1AVaWDgqx@@Wg5bzEejU%$*X*zn z-uEio?X)MvmvmNXyiQ^W(U|?xkh|-&XToeq0VsvR0emaa3F#3*9Pg`+wUGcJF|v7$ z@`KXYfaS}~;H8;*Qxh4SN63r|I%kCdJeeDc91wnISppm2c4Mow^4+X{K>TORYNCB1 zRmTKd#~(5Gm+vkPrv41U*+@J|Hiuy^KQYd}(VtJ#NoQ?)xn3##k@se6oz1m1Xy z?`xwOoHMkB4T}v4!+4v(U;w#lu^RF1g)u(bK)BDH#_q;WZ%GgzSgHlbrJ47AXHl*R zmg~z_F>GWWJIE&>J*~@jM%2|Td6jQz2p{Wk2bvi~Rrx1%{!=v~%;uPvLJ9W4AWAv6)J8B?Li_(uA{v>r+oI~oWXa)ySS_(W+R=7mi zqnNZAn%7q`W`rH(F7A?mFC_ED%D$v*7Re#&OJ{x+$g{nMD+WefgH!F} zP>2>oM@LaQ2*xS8pVBTk>G*J_r}*2mr>w|0Zd$d>^j);yH(`9#UmS(-DsZqGoNJ4> z@4SFG_k8s)qhgWw3fR*M52a&);!1H15z}5Na$a{FyIWZ~e#HPB;4Nk*FrAtR z$Ny5M0ly2v$5e8csNa{}NQnlxiT35~O}kime>~zJ=Shd#^la=y(PXl_xXkH#+c)); zbg<6<+FI)=!{&DMIF@jUfKX$eiwqe_l{wt`sgIN$4m1 zhMifD#XVgAKEOzCk^vIHA!uTKwXuW<(H2nR10JEKe`EQnqCqos02L5^BT20HLeV8Q z-0#Za^UAAI+$Ph-a=G3ULS{zkd@FB751*k+^ngOn6e4@|bSNF~o3&og%N!LT(iC#j zAWs8039_)6I1C*{y@)`xq#bEiBmn#z37>z{dfO;puhm!@1(D_2hZtjLQjbC05$t62 zWB&M?*@gTqcs0dL$J*H!`m(#}{9fZqL`BU-_BDp5coy{;c}xl^XnB@l7j?G-a-nQx zOZGPyqg5}p(*Vp`izQMl00xk3UDO>elDt{k4y%xGj?Y`n z1G{UzCBbF`x_xE!LAy%az*N{H{3Fzm@7o?F zRB?O{_cq7Z1ZLA75w0I*{CjI=HdAW^o)V^{ljWoJH2!-qRX5cK?6Eint~zCwMmp^Q zQ<|Rg3|uBe?aESP^Qim%eqx^7VsYP3ZShiUetV0>h6dv@V4Nfyo^GfV_^VbMrAbu*aT|coEmetIB6$AXkw0JFk18;*dl5%m^ivIY; zvLun(Dcue{aB`c?57^!gg1Jq0=|+rpZco15xWvsqKSUS)S?7wB*C*qSXyggwHkpO< z6ie_SIiD)~^FMtrbgfN@0CY;%6uUz6J^DQvS~C z(ly%qoF%ysN}kShe{^_|GX>?Q@*bjjyq{M?!oa-y$362h!DJ)65^3+d1@gNMW<@MJX{~&nf%PPAi7u*Pkl%UqT;L{ID!`Y2nzMas zQK@;R&br;-)D`cXp1=i37riX3QxJK%xqc}Q?c2s>_$hy{v1a~NcCd=esyPW3lNn}H zC@iyM^DFzRddqOp|3cQdb(l8temy9W++6eH7YCn7wsx1+zW*TxViu4={S(Wg?oZ)D z6pcui{hz;rPnRw}w#B-1>kWBY_SA#jw5D5o7uc`zEd;D-nX+rHhATW$k*BHJu}C@# zWYL{h`2EMOF-ROg$H^=U`cGA0-@;Yo;%~e@oZHisYarD`A_0)jbNe)y7C|lIwM&f| zx-{5NO)GOMt50^Ii++)Yi|~=F+9buzbMju6;>{ruo=Jv0B5`3;N>p|~om9cs$QY7) z^iB*z!CU&$KsKEg&H4BKD*#x1!uXIoX%b(BnOK3=C5c8i_!&0eflMN$h7jo7T)6~$dNxp? zJns(4FvO=Wn>VXZ^lDW_IlXU?a1>01UM1fd;+T(*)@9F;k2Tz<^&y7pU(8ioCE&?5 z%Mx*%E^7GZvrxdu*$k21X{F|jSmO%yFn)0|JbUHW2QZglFMl@rP0(t0yQTK+voV;A z`H5{5;YW+wWkBFNe0}KY>A=WUJpl@1Baje2Y-_?|d5ghsS^FF_nqB61Hs5%ZrT*G* z7S%8y1tygE$>Rerf?bweLe=wQ-xmw{RR^zF7i&L^{PD0Sx#O{Q8;$|xMot7BwHl2E z;1X^Zi%K+`y;z8q#O_#pd)*HYD`F6Pl*h)fp_8&GmgrtojSCZY$6P7E#uz4TTfIB8 zqq6Nf%~6)6D-#)bmo~v(`^&I)HnxMJ+Cn473RKSRa%wqyjP=d|! z)ZgbqpulLc3t7h6mSb4@^}wA&8x{wepJTM93_ap*?4q#4T*A_b9oFKD45`2?hh0Q) zgD}u;Ue=}J^$tlRiF`P;*5>u`x283P4BZ>Fx?XW}1;i>go!Ibv>f|XS8k4Boj}=Jg z7q6`aJ0r`8n>JTQ8#w1r(3U#nms!dTjZl+KTdsr1!=#X0o$ zO^dwR{sh-YYIDEBm=zR@H|?Y1AY8yaO5SGfvUu_b6F?({&=}04cvTV>*Kfg?P~*yQ zq&_!yKAzvUWR}@*L4l7u6?an_)WbeuV`pZtfV^W^d~!B;O)-9m}+kkuz9(y<9Yk) zr@qr6GL{^p_rWqs)FQZzWb1UY$^yrngdrAceyN~I0IvDawY5zv@XP>J>!rNQUZ zuD3vPd!j_p^sMSDu`t#Y`0Ow#FH4qyB4+AS;}PmsNps8LQ6C>zKoNqyac;C%uk2CW2-!02{C{W)Lx5wl|(i z4i&{C@uM!o*zIQrP27gOn3+Q=EU#>Vi&}TX9@Wy$du-{^WEh9MljRXmK6};lso%b^ z-vPcMvIv?A(!L{ZkQ>e^Infy{di9D0U3`JJ@Y9DS(a!9`at*;(#Wk6Pnw$@uF;LAjgZcHb zd_A5VUW2_pPbOFR6$hyVsW6a6G9c=Zl!lT?9YH{`A?O4Njy)(zsBmj=SX>bxvB9K& zb7=?0KmTg*hHKOeZ%n~pUuSI*{>FkS`^sIV+mB23Sag1SsOrq~8+F2)4&d_A5dGC` zxN_7kn5?bP@y4j?h>_mWM_Q7z?w3l`J3SM6C@`Z&Xo&lv)Pq?$PdhHdZ>~#p-hN0? zT{$fLb7A_U)T@<;+#N=5)@u5>)v(oiuUglLVHTPOi@_$6s2J?j2prh@;{-cB&F)XI zpW2f9C!pSLjku^N3JpE!BH*Z77~iS;5J`&aK#*6+WVGaL%}F#E|K({HwL2eN6vpGx zQ0Gqr3_lX{oqw|?M*NKWbvupcN!#OVAX*x9`o;6xY4HWsg~aq_>$R{y>6B@Rp^jbT z%HJtuG9`*XtZsyouD7~8$*+$yzst7DaJ4G|wX^g|B`}0Pl5q{$P5YuJF5jm-^{!) zbQnlj2RvH`glDs_LKiu%t97RR1 zSR1*hZx!@7n$FDCc9Gf>{;M9$;(}snjWc zloRFpz=^yxOt6Q4)tMi4;KX8O&KG4A%!_SQMFQBVEf$8y=k0&A?s8K(IT~(!CtLhD zQP$^E_}=Oa;MDeb62ABv566P5N&s2FE+rD6k}e>W>_!P;6`p~b&vq;O`|lV92>lYm z4*mr0R#{<`9e#he>UgMX;p*zkmxBMIcTyW4ra1HF-VdFXkIf}eWz>gX`Qn$_YX0p;~>xp9L) z#9QSk-!+-p`JU}zoDs9>{sM0$SjJIrR<`^%zh;BgpWB|q_e%)4uUl`spUBS0WMCt$ zZW@)Q2&IW6LF@+#j(%RDTb=m^mot~x)>hu8yl-1)`1M|5%wC*iA^>__cH7%90fBeb z^8G*6(~*zxMDPAbgkLX$RQWW3Pgp{J234&#kF@u`|4csz%3a7$Tsj=>v-4|=aja50HPZh>{~Aya++0Tanz`ZYI7qDdsz8T(R)GS?1j0CQ z{3Eg==uJT%K)G`^Ne56atJN^~6(XY@VGIDMsh4lI6O7c5+I9@67a=EiUp&r5UGzGwy|1^^%22;Wi297e}l5iTd@dMXJuebG|r@+Aq828mzoKHvM!~e z_l7s3o~MrDWiW4es!6rk)J~By#?y>@z!za0O>do+^~PQ;Mb=cP@9fL^83ZtPFt5j8 zhRG#;eI4nQIBklG5d+1@SR zl)hDI5gl1w*nUUOw=QrKjmr}xE0uUDkYP;!T^rb_l+y}moS>aWIc{jJjR12THlorf zKL6c4tYbjdRngYOIR|r4p#dZPhtk|8k6b=wGX6cl19OY_~-Z%Ak3=AyP$T9A3!@7A*00 zH}8?(M+T1V+!MdJw5Bwcau{PV6Uf~>w9R~DaoYFDbB<-vXK+jGg?IPJ*p% z=gar+L;i5#LklsO>!0#}pa;K|*P7cpF}uBXp*wDDy{I*PSWRb7j1L*ll2DT6%OXbm z8tNVK-qrh<79bW^g0b4J@^ZV4FM}nG1uL0hhre_O*vDwfq z^DqwN0!DKcciRjynoKx6Q0wegpv;vk_OQKecv~-*yhIV&>ZSN)cEO3EQ}JJrW@lad zJs9$Tkwrx8#B0-uc9V+CffIoE>bX7QCR0d!$T_#-yp<4@eB0vK!o(p^mM0a9l=%ki z>RtK*Bm`d$ zT&V?@ugF-wUO%rG)E4Iar;$Jn{ZAv&l{v;Yc$yyV^|>RN20+!}53j=;CZgR>ok*KZ zeD=*8k19H2s_Gv9B=as zL}Vi|dZYY~K^JQISbQI!C*FKQObU;0Nvf$DHW540?_dLnzu}w_F=4hoPjp7ls?{;~?;F zVOnQf3;yxoAuKjA>OA_aU#&lN+Vkmpf%li*?^4# zpbQMy>KG~+J$9QN^$xRZ%mF+uB8B|&+1`5=<{OL~kSnr^ zk=kYKgNO~wGf@tk% zOCf#FIU&uG9eq2g2{GHuXGMD!{y+@RebOv?8EnmTLu+VBBN6F+Fkvqlf;N?9Yk}8f zW^&ZqI*RI z9>{AwG@|`jwQx+b>r!59iVVVPQ3m#F3l1L9=q76B7)KLT+Q87;hjAOc~y#yE6(M;Sh3lfrq?)%0C3wFO*J@5rpxgWeU zflD{&IWdKYbq176*GaZYAGDPp$`#qhz32Eoga9N=US<(*rpW!a=f!R+PmfzheCtw6xd0mJ9}spydClq4Ns zhAHkLX@=mamuQ6pZ}?ta=P1M zSEZQ<6LHkb>-8L~_phx%x!uQQ)HiGbc0w$a+3py*z0P7857eK&#kLFv zGD(%D&n9gej^-_~#py@9&slBwr@@a?QqyU1v3=1Ymit#&Zo`mRM-(0_&XMTa-w!-e zU!+8hJ4-#YA7w*+1(NFGZaB*w{wkJ}T((Xt`>P%>(r}I-a>(EIP)+*}9|+#ieHSlO z*M3_1B@ZB2%0A84&we;Ww=@Ij0T{Z{v9~TZTpeqqJJo!qZFWw|xjxf`K7-QAq+SZjR9GkBd)6neY${t5GE_)OAu21o&L*W#=Sf=gt|R`=-}&mSC}#9{;n`@ri0V$ z%>M^3&r84lLxBz5EA=ptCWI7!0&A{11_7gvY&c@7@+1cEk+%4;mJkl1cibXL7iM4H zh;$^GiqeDP!*zj2PTS<>@x;1UfOSUDgpUI4aBMecFaWZ~h)>hV#s}*V<&;d*TX-pG z3X}U<|Cx3Ky@M{qUf2?(pr`0{ykd~31~y`5>?~d|(9mKSMjm*`Q06gaw_;L~pVbQ1 z9zuXE?(5ZyzEhlj!*P_#j7i6RD8K-zy4h!X`PU3hCklO**Nz}%%e;12jpI*hb?01xF=id=?>9c_-0IKqDm@K+^=6=+Iq(_kIqdz|o#q|q4Zd%Mr;rtGi~;|? z%W)=orZCzrN@BiIK+*i3YQ1vUuE*jkc%~INdXTkIG*@!_$WtYy5$aF2{MgJY#l&q@ zmJ~o{1nd?va-FO}Y(2tDBJtqW9p}ej(Y?LqM6~39)#aH5llXwqFlVhXPQ=$WV!+Etp;zZqOHn4o^xAG~ACgRDJqdad9|QTO8^34M zK=N)&iXQe`%8%VCe5$tO@ZgHgzccG%MjpnU(+tkgTg=@wHLdO3e@ocaye2H0SagPdmO_dNDW_ z=J}XlV`nQmxAD%jntr3$J9phQx)!s5@?qe>!Mj4x?foxgC20z%`7a-hmcILWz~r)I zdFUjQBGbQg$j2NIb2P?i`mra?>I_;AVmk2)<@VN~(U-Ydf;0*~lL4MXLh*~%6$`d2 z#>e_&)Vp^+1y$8-+w6#^AfoO}LMg@g815|5K?D@8NMWCBv;Rm3JeuAO-MWZAv#c)S zjWR>kDxc2_1Ki{{{+n+cl+HX0E=0@*>1erQ=lP17oxJ1{P$c{Ph+!GQsS|06}A>>KJPA$;Qh<_OTHBUSG958=5N38dThK=+>CR z{iU7~k|_qZqE`*i$0Y8AF6&ZK(p|BE@T;EhwI$eO-^k8wDw;@7_3UG80s*e36MlYO z9(I#F7RyrlHmq+jL+)k%pAS<9u|JaZZyFp26<;rf82Hpc^(Xrp;^rrWiO**}x4Rcc z)Km2unYA!VIuT@T`ZaJ4*e@+{BPmR4t!BV(Fz?j6#Ts_i?^&<7+y&5vS_Yt=P^|(@ zM}R@zUh0yHO49;idPAARJhnM;49$QOC1VPT{_|yB`x;aj(QZnNDg4RHx+%SINW_$P z-tc|u8z#;7`tG{^M15$xHc#tPSLj`v9)Kd&hB4*eGzsqrIxCD(SQn)0RHi}vK70S4 zpv@Viyyp#-|LqS(#q?Z(7R1k{Xg?t$QyJ|Jf4}LgelyQ_suhdI2N=;@XVCTnac031 zLfw7PxqLIENIv*xdQB7ALIr>g36hBarwtlrj7n!FaZ5V4|Ruh|%>W{nRkI%IE)=n@SGY!Ji|>#J9KyM!bF z>0@HDmI_`4*VQJWO~u}=3cY3R$rs;k{@nxvPHf}5NP$2L7y{t1T?}Fq!;hf*|42#~ zh0wfA9V`q)M&Z8A(%qzaiA4IU$3H7i{RSU_1;T*A4W#RsHi3gI@tjMR$n2<;=_lyGnk^|h$I^bMQ9)~v)tj@7((I83ypvJ79K zO^gIUFDmj*IFiG3U9OQ?#d*V>F9!}x3lZN)(R3(reoJ-;Ki}FL_F@tf$_q2SM zTj_v>BbFXSGC(8gvq;Y;n+djkGD*EZ<})R)1B^p?uB{A&wh)@%-?S~MpO9E(BLb$Y z{hNmH9tG3aSzwIVTRZ-fpD4DS+kAykSU#bvMR7`~B~_Djknclhy5u3EM&!CKOpLoe z#ogR@T}>v41VG~Vkve;cM(Gnr&Q42IMax=%B2x37&;w14qJgBwm?I`x*b79L;s}yQ zCR7AvT;ji0{0-Qwraj-#{oW^UWvrWUx`*-3RvU#GwFs6|Um9q5Z9XrbHR8xVoGwva za_kQ-*ij>tTQ+J630J_xYspq5kjL7BjMk}Owl{D6;K%_XXXpmbEdDp5Kqt+yJoculomUY6 zccQ_h|3ltec(oO5Z=fLr2=4A~#odaxxI4w&-Cc^iJG8jFwODa2S}0PaXmNMQ3-|u+ zdw;}ZR#wg;Cpl+k_WribeCN*A-2b$#yXIMhIiX1rA9BLZH2xcoisUo0shHqc&hF|bQMEnvD&hdHwFPsT=w zp zEM?7c$V3A`<4$%7EjrfB@7-->!{Jq9J9uBKP4&f3F@tx_sbfp6|M|}T zYXFBJ)UFed-cF17l%{<5^dAn`(BwkSw%_(+Md>MzO?dBR(*Is()$tp%9%_;{P7odO zH?d--72#1C_5N?;RjD{2NuUCrZFvjQ!qsM1nI~pq+X=yPv*%qztQx>U(`!uv!x1~AIstw%mSP!BE z(gh%?xB>5hIvSOI$h>Lfb8BD_=={XvaKp?|y4vwDrZsvFcouXQ?oWJwKPZhEx$5e* zmOj<*6j_k8qUg_~0LZ$hboK^N$W*6SwVMy;pn{mC^;5{mK;OeCXJCK~FF3VX9`kCk z0;545DkiLd^lQJ(eHcJqIr^1G^O8I(!Sv%P{=@Cb2g*;n%j3E`5b=DG!82$?L-qTQ zu-u%h07~n}VB}Jo@Mg|J4>7uLJ`#|+Az+{0PKB@!W{|O!WFN+FxDao)V1pjtQ|di} z9tcYt>|(pk#Z#)Lo?|6>Vm)X4pz}#LEchWB}>H$23 zeN}qtZ-x^Any}_V%Gf&4X!$r=M}=51I$$}3!OsnBBb@m{c?>~i{nMf0NxVCen|srn z^6yMUwvu%H|RrTu!27!OYZKu9SnK}qJ5!8ucpP^R>OS`ETFkw=ndi%jEP>Q z&t$;H6Ppy14A$`K!MM;45a5H(_gl&H+mI!a&`dWKacAnVPA8RCt(H*nVccL)@SXf} zD2<^2oEjXQhJP0#0SGJ|He4UwQ3T5v*XII~`ji@(2!Rp7lu53j#WeDrZn@5jts2uD z*7Xi^VhBxxq#{7ZxV%NXl5l~klOpINJdf$;gowII<36QTJpx zLg1eo;K6Z1agW?l#-}nG{2@IIkerpFUmUU9LTl6h*;^&k&nz{{NkY4WTqZ?JtDDP} zC{TLRC^A<`YWGnCaZw5zd1x8BTNM#J%65uW8@9Bq3bc9H!i8h}w73$1FjIG7y>#E! z4+;*yb2wQ!&fjc}Z?B$uZ2ar~6OQl>p$R@N9soF*h<@1TTDg%)bGElO=^shgbZP;c zbI+Bh!9+oSQN=v7#yX^b_r&$39x40T_T;f91)wX-33UT#I=wX7kxwNC47}{1>?1+d z-607g7@ZxozKxsxdyKlUWmtwU0o4f0v9d<~>vssZrIhTfVC%l^(W{?b7evpXr}Wpo z$Dvl2e^oAyku0zZvS(WLg&X$V&1Vz1=lFGBNMZB8XQWh}=_y(pJ~bXm-+uYe>_qhS zVX!0j^%b3*#dp(g=6tXGzW#iBWE4b;&!FfmhzB}D3-9Q~2beM%-&G&0Lnodcgc1Tn z2@R)$gIiaL-X@Tp5FCyRN8dO$El0pD|9R3kM>BQoph=5a5s+1J+BW>3lV-d?bz>|K zT3Lu^AXtaAPaYqzfqqOp>y*xvT$k*6_$v;9SRCB^FMHJtMf4NOVe?nR}za6 zWUjMiY1U-y_?uql>Tn2Q9`A8cx&W;3{*L)?FM{7>pbe>rD-X-#*N4)MuRI}V;WwK> z_}jfDWip~Q92ot5#$Rl@;Qv1w>GLk^wfPE4s= zxN9kuj?>{<>!4u7lY3P@A-W)a9$K{C&Xd4S9T#Z_u(K`eK3vGlAwqM(_}(DsX#lZ9 z-VhT{&+xNvclq}ef-xvmhs6nw7BK)9aL;&HdzjaW@P&$X5B<437MbPR)TTRwZf z^n7_SZM>h4s4kp<|AGFCqzzd~(IzG`v(2vHqTe5X2~Dd|*x^?pthK#1K}pC(J73h} zZS%{?ozmq|Nju?IOKKxcSY~3-)+Y-r9S+ckV5rKyrPmu&@@Y-=VCuz3DzIDM>B%#YvD`eme0k+PYjfnq)0H@%zwCjD?Xc)d0jP1R}wr7aeFc50G zhQAWmV*W?*P*6g6c8|R4)olC^Vs3^%E3;yPtSwNWLTHR4=wgMPBVL<=eCb8gIAO6Q zOnvod;iuhk_kWZ-pAsYdvE5}WHHdwxm#a_BO*2TXvb3x-Rdsd-_jJPiuAm>Vj(gQB zN?6;b^&hM;2D^trC+AO5dWxOhlI7U~)9J|@=4$K;F;=!|&c9(8jHGeYTMkdEA@ZgI z3cTA+bw;1&uhMD;KAxf-&EAr{**Id5%r*9!TF!+~2kuCj?qU)>SO=vKo8kELe_=@zjal3>ym=tT#m^bo{1S@fj)ga&~xgmAlfhUE6^9JZ-|| zE&tk8iWX>@`G07p008a*3AUB!2xbgCw5`9+!KUK$0eyx$dTVX|GwS~vhrf2j%f6Ku zJFI*I=WSSih3bZ9%`2fxykT-Kg?hhGLtd#dw1AnNWs{<`o#^#R5A~4g1|d4QH|Jvx z8@BGFgV1ve`)=TZ`__NxqbDJ`zB_BdBUOBLX5&JEvxS>3zNb8}$BO?K{;8 zSO>#+#c$@&>22iR(E9pK$3%iHMgLz=o=XTG0+e(k_*L}YUb9AidG^!8ulNH@JTR{- zB6DsjBzTKyr8SD)={4*%Ww4pYJ$h`EWx*L6c@LjYFjQrhi1p5dLu6QL5=@wDWuX_D zVFa$QNim1v;bLDS+W^Ub8JHE%7p{2x`_RYT>wag%){5TqG|ab`+d4e^h8*9{ojg*T zyVpho&7gN_7SgA8(BGaLf4o|UQTNWNvlJQy>HSPI8t|&~Z>Wl> zeWR_h8>`QL8g5LjRg4fT2ajSgz>29ZqRz`?p)Qo|2)Yzk_f}tVRoBb?m5dHdU@oHr;v(vJw zM?`JR0pP&G_FiB2EM5NU_nq(?7Z$LG3d~h2`3^;dhjeBKwWS~%>T+~s70hx$5?P;2FxblghG-UAl4=jMdib*eoj zewMr0^MhtDsURcm^)m?^IR6N<s|?u*ODX*T%Q$8?I+Cz@15h!D@?^KZU2B5SY$0WjufS>s>>VRiS>2yW_F^!3CE zLXHifvqx#+TY`3%o9>=RBG?qj)EjiXE5TX_G5d@_o*YqBROGCs1v5b)@WX5%=RF$K z5yCgen|(~bIU7)LJ0XYcJ<`?!8aT(mS;)pIenFXSY~TORWwU$BKfvq{-EBB$Z?btT zud!yXe1ytk5*etjGs5p}qXhBP@yQBQ_kX^q``SA3kb>V*MWDs&^_FzlXC#*UpP4;$ zul4Z~=4cIXSq{bz6ks_I{7->*heNrHj-NHVr{CgdE3uJ(o^RQij%OAJqOLbjvJ==M z*!Si&mB7M~UnGthUGb~LO>0q;-n8kVy%kU+KD#3oszVp;ESuir*AaFdbDkpD_7gMO zM`$i%WT}8cry3Z&3=ir$>{a@^`M!whG~bE!g?_&U48ISI3HC(wr_FSDEz}dd78QP3 zFofcnSPCY^kXJL=b0YMS$wQF59bdga(6B=n@F6n5w&2r$sd`EYTi{^BS*yMFX~cU{ zaw2r3Q9~M~DQXygOt-RwSKF`tSeqZ6oQ<^IEHDy;jv#Dr#8kItGCVQ2f398!lwDI$q;85&a*q z{r~=+SpqF01U|iNfYuwRh(hbk*{FMuTgj` z@SNI2mbn>~Az*k@cwLs9%;KVL_5_U?PV0FI>shmTyV)`i2ZI*W(68XH1`x4&#ZmV< zGizzGjy&=Wk^MC{(co1qcaaTD08)+(O|ePf`|dt3kuXW}wR2OIBspJ(C~&2rKKKh zAM8rQHUGuC~6U(hLz$@&YP~8%JVViF(+BQnU0()66bxHtl9hb1mFbN$+M03ysdvJ z*&opOnH3e+U|4xWzC@Po2>{l1em#`35=a=h&TVlLZtkYpE5W`jiA|d_a$&Xv?I6MC zD`(sYBDp)jw#HZlkm+ijWaW^DR9X5hWqnTdqi97NtPk!E=&PQ~m4|pQ7~EBCNv`HZ zBpNxwdHuqtxG;du5x<3wtfitNfC+jG2Eatx06@?q80d=#`hren5%7Qi0RoC(|G(q6 z4;k1qdjSA3z+3p_4RhSR>}0T%f7W@MjgCGxpt>L?*YuU&QbTFy1M7+h<|ffU;XxQy zV_05iA#u1jIx<>(%DR^fkJ=Ysr@}bu1i<|Nziqaz zrxsJPW?kAM+xw(4XpzBA&$TtOC1|1pFMla8#QHb@j`ZB&ec$)~c%RPQ+E-f6p(=aQ z^Ml=CfCWy&()auR8-*%&+Xfv&=kEWq|5eGdJ{w;=DJ#(ae8(<4u{xH`4_}lzL$cVz z$}&yF_7q5UYDBik_{~R}1lSX87k)L&C9wU`zb1gZpf~Hh3;o5r_THW0=eu(pyi)@( z%m^Sn$+yVf5eReltue|lx3SAGke-q zDu+(f>UT3frS$>o%pxO?34+mJ-IKl%E1oIIwL-Kr`j!Ygth!o2^_h8xt!PPW%4k5< zh+r$j%fEZ|+=J=fHMgn{bC(}uuztr}CjI&S4IO&t?&QPC-C>$1BeoSt)4zyI7NQ@Q zWg$1lJ5X}XC~na0B(T70(E{~8osl7HuGI`3Ch}L}7@i7Nx<70Fr?vW1d`^BF)7M^n zFplAFPFQeQA4hOs20?IQRS!h9e^84oZK__h^L6aK)ekcO4BrqvhBHwxeG$PaUuBE# z_4c0R^xb~pv=2nD|3^@<+0w*D z@8kCouYA!y7u~?AdCV|M!(%mv$!g32w$|R30N?bf4W9q#01PtB5iCt}QBYeI_e4KF zwrJP=9)FG`LeVx1j1`cd?C99?A5`o&DV!-lJIKK{=Qs8rTwn@(HrX5@YoEB<(S(-+ z1T-+5QTkL^%5&qOqaGY4R*i8!&xzSX_xT)-3RYPCQ9%SM)0*sPn-2*v1bxzW+~ykI zm9hGrYPEUVXyoNT?Lh2z-^2PEBZ5bJozf0hr?Cme24@=KM>ul2%m5M2jj6G#S7TW| zLZ|e5+}-6a5J`S*KHC4@KSIdu+dSNE;{B_}YFr6D^AW`}3erk`EoO}jD#@Hc{VCoD zZAEMt&Z>X_7(XwkL+$R4l-D@<{Cx$$AGp>dsA&JDz{uQmzC?)K5WQP3J;N5?nHxf1 zMLNq(Gy~ki40L;7I71u%I>z}(R~DpOs_S+mTj#{jDKBW?0T$&V!kI3Z((n`Ce}^;# z51RaJ-V(xk(qtQbcIC~u@2q2M`5h?)U7-!VQyATf4+CrRPeS@t*)5-ZRsRT_K|Vkq z>j_mturLA)Az+7(=5OLv8M`lo;{vkZe@efwfOAaYVzc2KN!xwDPWqWAJ0 zwhN>Qf8o_r^EJZ4;g2v0=n)7L#6ed7GToTt<;7|6ASQX=#Wvu~cR~arft}%*xoz8q zLKRNGpX%|GMK{$;xvvn=i|-9Mw?c#Yn4l(84Solk?!DkAH@9XOAau6Q1I|xBpY6Tj zsVx(-%eFECqDWSn+dAoasmFj9#sM2BO$z02JxF|83h%~?{q*!taIhKR8_X}V+w3vA z_1msXA;fJ(`AZ}B@ZQKiuXfk}j3j|E6mg7$@e{xJ^$5_LKvrX(@XJq2USW)s`27Ei z%?096Bo%>e{hs_End%dh5E}~Znh3fW&!r=~TO`MMv16q&rdYc8EG`7U376Et_NE4J ztw5r$P{Xz8uEg=vl2>73nO8Kp19I(k=cqyn9e!~G0Y1n7yM$Pu{6y~oCy%sklK`C^ zHLH-Z^5*(WZp$QK)1yG19>2Qh-#wl4Vd);;^Hi);+5X(RY3t#B`t^QSiYGt%pFOet zpa1R80TRfbvrU^9-`0g|yNZShMC)ic@SHHeSkL#UubUAnl1;xcCsYYbDIQ<;2v zm#5+8>#`0(726B+{Lj!IFoEP+IoM7gu=|ItJg5;ZyY0S?!LO_xlIEHM3RDFqh&7@q z^?Fp*VbvM*`|s-~R7gQO^mr(;j366>E37C>ORo} z|Ic;yGAx%HN9`F4R0ub_J#PQGNoQybmB71hE3EZWxp{FGiAeWkV^Do{sW8c9$ih}? zPp;7PT0_ONp6P6uCedM|?yHU>cBb*uaK(M0z0c@Q!JlYk$P%9mSN^f|sCX61ub041 zIkn!NeYL6d(QY@6#AT#)?D2;MFPDN>0(6}FW+aT!Kl)iJNRRBjbDYwu(yj~qR?BFX z7^mTPH%DA-$u}!pr%*y?7kCm!HyaVHKY#aRk&xaaQ~|Wp<0$1D0!Z0P>l4H+aQy4o z?@pbf>G^eR@po(SW$2_Z*N#rDiX{r7(BDOtNdNi?S){yn(T9H0GGyoHTW9$fF#zr3 z%O@UNi@h<~gDf0C(Zrle+`MeASD{vgtHHHYR0#jp;ulLmO?@c-Q-NkI_d`cai2kJl zVybuvK``SLzr$bb`Uk^fgq5Pyxit3)lzEzxH5)V_8gK+3cpoyv$}BJ=KYaCGeu$|T zTmrH( zT_eh%f*`FJ-psq+WjK%hH*XW_%;2>vRcuFVY5e?1C}RzDXiHR^+S%^e$PqNEir>c3 z+HYu4Y*z8co&C6>~FyPd3AL-?=W!F=uu#I~+|pm8TuT*{U^s_8TX7=n}xZHv50 z;H_5>RfM$n)3~V%YDM(QFQ2dr9>bSRfK$kPd{cmqo60x0WFb;ny%!1d9Zkw>;&!x@ z0jk(L%f+gC`#}31cD~$09r~ekeZlO@^%!kzHwoi7gF|1?3_$C^=d1@pn50gf=xAK@R(AnRtKW{&+sO5HAO2Qh$GAbl)_)-IjXK z|Ibylp!0n_Yt+iaEC=93?4;8!TzKy#=b_Q?=qqf#n-DG^q+-J9{nF4Ea*x2@d_RNC zjdz@<-i#eCSO*x?;is(-p!>%tHQ=&H=r1{dc`yjLsi%N7ZvXDW?qv=Kmc2ke?kL5p zMR=nQ**Aal+{ggjdQ)r$SOKoO;V+j6zbEb#zoOSvzM4eG>kT?B!dB z#V>PXd};ibi+Xk5h1s{dk`kL%)(fq45_q(G$jpYmFy3mvyE+B!M+sWU{=ZWx-cuK>mP z4SI+g=c#paywq=SPdA+!EQ%4xYr=v@G$8%>L8wmBNSVpzYYCLXu^VVQ3FrG(h691# z7G#k;B@o}<$c7qQi6K{thohkis(lj!-j@LWcQ#-}wV?ZV)4sKZoAlnUvBevu|F&8J z|Gs~w=l@x%R@vO|LOkWJ!SmYn_q2>hynN9K-k9|L2nT=^-umTKX6c~2@Z2^s3#n^* zs&p59oyW;O+cu<1?F`JEk4Z!jl@%0g{<-H}$OF=qWza$@3Ey$@HqyH%W<{tfH!Uv97lD#EEUuuUsi6 zH60B&?sh-pdT0^gO6EmWHRXu)G~8nNr{B)WLu&UO_bL)Ws7z%xlDfrxj;q;BHdS$W za_CeWA;Z^RhwN+2FpRV6K9=9 z-f=eMg@xnR0rAUBi<4yuq5M^?61tNb69#v$KxIbf07$L(8YF}6R~mM!R`$YWEP`au zojSjxmph7>leTc_RG=#Qa|n85;Ocmel&zQc7=!Cx30aKx&7ZW0tiu!bL}xxlG$~{? zq+WC<4%K;;g)D8@PIjLugou6LO=3)$#3}eAH!v7LC%pyNM?a-_r41!R^WMuE< z5C%m(z_2QKunml+s&!1YX$fts>!k&I*_gA$(=;{Q1&dv`@;ft;rUFTLoqP(@s>Jh( z(TJntkAms3xs~<-?l2*1nDZAMI>}#;7`GhkNWK-^m^fICVCCG+BmRh@#fF-++7a=) znO!-!m`J_zqxA`>xV+Sr`8Z^l!k4jP*5`b;i4P*Mkew z6z28sw=??js4Z|(mIa+?qAbVz1DWCOPp?{Xj0%X;>~9Egd6f#H&QeRWZ|O}v^5ncG3S#Xm+!q4=1(k#`>CpN(jgwvFNC1pKozES>HWu z+EZV6=2v;K^C|sh+slD)%j&k{V}h<6^&@8#elp%(X@!+!?%-$Pa*xJMwA^4)izMzZ6SJVEh_t)DzTlh}m1fQ5%@uy=}_+%-Y8q+j} zsPO9Z6pmzc2dDTyvrTQ2%6?wetmf^$ZD0teLsZ?+uAgw*L-KvdrkiOvo+En=8(K3Fi731hFbh^ z$9dBPL9uI2wCc13ynd{+)+J2Lr#r<{>?0}GZ%GI^Av!)?mYRB=KPkU*+-_@I?cgu3 z8MjH0dwkER-Z!TDJ7$1*60Pb2wbn~+_y~ccl4ivw2d|8fAQAJApPHSZKqz_e9%xs? z?!$}IH1^DZ-SKC|=f*ZK7Sp90v>V#h3Anm6P+Zb#9>lj`@S<#;e9}8b@o9rs- zlhrYlvYIS`d}ZBX$AETQUiWvk=~LUaDcrKk+86)6d!C)vxnY3cf)j=lQ0Zg(_{Lss zo3*1@fx9zj3Q^l%UCe0Yd09*bc|BUj2TS*6x_>;5i?Nzl=g)thpmjOY4rP5vG>)=hrih!{oh02YdnpBJkU`vVKJmNR?;!T~v z0iTeO!Njw%YfH=RrrS}#oW9*uq>yIVo}=wc!_$UAeOBb94WNgXU}-aLQE=Uh_o}aj zGXr;)Tsfz6Hr$xbEz)qgo!wiP15xM_rf(lsh3ux_$?&sqy6Te&WwrmBIsJKnO7dH3rR6eq*JONdCpa?*}FCe!T9+czBkYlRJ|BSEXIL z(!zAfiKXe+_fGEcPg+TMzNbyAc^i^ubwphpLfd8DDT}YUeszkmA2m`PFgk1|+Ybx0 zZfVs-44xgWeB%eycb!w z>g`KtQ;6?V7iX29qF*{y&!@Uk1)jI!$^+mp+PlQD8^H9A;(g?##{;sJ4?c#@w!-+OHcfQAzGdQpfnv->VbL-R3Oxamo z6Q-&g_&Yl#dpTi%1bL3D%TGmtqO|?UFksRWf6AtgD2O-RIqaycpmdckc1rhcku~Y= zX)XQ`0Q|jemKPaUpchVUE`p{dO=^R&GS(=6TKD@!XY_CEr$$@h7MI9d))41JdDx;Y zL^2EJAFcE15>m!cPCVZ8ypkaM%P&&iIzi!3HaPxDxX{*KWuvqYJq#J=7d@)5UDg0P z=?045an1RbCiIPvG>siDKTtE}{QS>&chqK6+hbYcMoR4AUmxMVi(U-&-c*>5Q@tL! z+7D)yBC>a}V+|(=z;XTh8y%OOks)`#ke5%>FEQDuKw@*Ezlk4;B8s%hCuE63n8a(h zT;)>)j{QpYj$O55k75S#O-7DOJ)iT4Ux%2T`b+SHT-=sMoW&?+05moEyfXmM;i|J| zzY|@xLD)48zFKOZQwHvPk05OF;3`>TdZ%X0R0EC3>jv6*;Go&}G;S-p>P5;rPBQE{ z$3JLmd(H));5pH0__+bfJ&q5kgVZePq6kC)Fyc!3Hw^5&FPQm7*vi}h#e}@hgs2MK zi4!|OFlpS+ijts7wC-5|xH5Xi$xc*6<9;u!Hf9+UR!oO_C8RBPuiyW7W?w`Uyax11 zg!}&Q9ZLfjuYFJv=tw5XvkcGe#Cd{^00EuQ4l=%V(EZS|u5c>^5EWHIOL6!~4=Wyn zD2g1{S>zpV)wY0_QVL75g$3p@OQqjM-bPX(ph_o+m1#*!g$;x*0*vz4T?r=TfH#Zb zG|X&CpNqdr85OUJn@wnroq#DAPKbq)5uhW;_E5LP`taE1O~C!`b-|!ULGy4{2Z=4Y zkVSqrVixmvH0Bp2+W|bR+1ye~O;)w@SoM>S;8d|^R4#kaa;Q>AfT*Gu-$+Mo>9K;ux}a(#d9P^g_vx_BaxX=?PPJy zD1DhSQ3izbYj4mY~isCbk*Zc)XXKn*|q~#65W%Y~zct-P+Zn9%KK`(_HS^8av=J>)9S}yVoxX~D2HvflCs5~SO&RbH@ElZ#a_(b~b_69O zD}ExsRfRZ4dVx%0CVaRof6bdXMHWs!Dn;qUGhJg@4P9|vTI0j`)3n|+p*0=+va4B= z60{)9&n&rp9VV2f(4ktnU%z<_c;y2J>Y~Vs&%M@XhQ4@ezf#NF(H^CS=9AJba3U$P z+fCn1TMdESf1f}bLyF!;IW4+E2}C%%V{_! zsu?zAF#PC8D*+G?aJ^|}TKQSe{@<83sqgks+Lj8Y!QoFq53h>V1g~v^;DoU2d-tdu zEE`l7F$T@0RLfw6s4LSH`{-Rd)rm3-5}XmkjN;r!DS3>7B_wXiOWZl*Y>TbIpC<*u`az1@&JklUDVJhgcoih)1TX5levC}!h$L^ z9p&$3it7YfBfqf)qu`C50FjogRstPlI-k+fx_I4A=EeEE>4O2-XkE`22mTmWpPMy0}-mJlvUuYfY?n!&;lE z=%|foFpkK)6{12vpP((as-3gH=R-ne`9D;K+>q!fU;(Vr;S22QsBv8D5mc zo}^o2Dhhb|UmFjV6n^U`JR56sV7evX3<+`}=Iw;c8WIVj6NpB_cP+r75%&)W&;Kba5 zGAwBZ$CA@Xr+wnjXh~%f_RCVVsL-Nh%0K+h4>UgD=SIC+scc~^kdb>SoE3zpv~M2_ zcZmMI0)`a`>p#!Ka|;iGJ#<_ouPh`&FBa7KUR)!WwP5uzZY1*vBf!Y=OK9h7MMaX} z^6LLLPbu5ueVHTN(MD338P43BsM+mHa=l=KGTrW9u6nb&-DRG-ufZX9fg z)0&3m{#sLt&0wtHU0jC@6tO`A7lW6hu$Yppi+6(&>vnYIXC@^Q@PKrIxxl0=BO*-9 zps!3aE;W#DCxA0#wjWIK38ObH%3s#4j)9ejLxlUb|wEVH?FFY(ak? z^&L=<74a3Q4g*U3vo_&!ArjN!h9&@G`xTy8Qf@k9#4vZ;cTo)4YISf51pJ&URf)CA zyG5(l$3nG`6tddFqt?1UI`;KX8&%wzdL33_Fvm zu&(NykSDoYj6_NI-3H(R1nAcoQ%6AL9Md16x<0YvTxB9*?#@fO2r@|Zcc1&Y61KL zXK;}{1?vysIdaS%+dRrR!#51ZW=Ihky!fW3TwieuG70c%^^!RZ8!vQL5UR!7AL7SO z0VnlYxiY%B(3zFK#?EK`80@nyAjw;jg%?{DQ)nlFX-!tU^z5%UZ%LXlL^{N7;g84A z2=K%8Kefq!Z@Ux1CQxkc;c#QhQX*@BHqvBqEvbk#~o|z#7L;b5-h1O>iIQPid9uSA*5Qup9B3XX`0ngCo+^Tw}FrbJU5aS ze2t*Slj58#d^Pt@VVoP_vZBciL;(ygW$?`Jh##Er*0p9HDdRDf4+p5FB-|wuhT0IK zzo1>sGQtn8H%y6!R9Zk7^v}RH0xkKaure*{V2Z$cPd?0 z2+Bc6yFof1BZH=6mUrZR2R^9`6|a#0aubvJ$6tor9Z|-9t2mN-ii$l(#(1cxrGT68 z2e%C!Oud!&N(SiT!X??!A`TCLMOcg=iMKLLdMA}UwdMdzTk~42Dz0Q z69%*0<)I}%>n{^OQA*#AGt%;vVcEl1r6V~giUh7U^?~h_Jx7Y zTfcwP=@ev9*mDp@%5K?Z(Gi*u_gs}PF2OKG@uhZqlinCh%F(}BWIePMe;muWj(n+( z8Cc@t^a#-T0Z1F<_UO(c5w z?AfQymbcyG4TKTCQ{CU*@sf}B*D{~`F|xhVZE!@ZJVy%_1-gEh;Edbger4tKQDmN- zrdtT-s^JG)-W5uD6t($eE4b`^Nv5o_2Jyl;}Sng^I$>BS|2O=qnc{9Nzh3` z^wYdCd_&}wkJPuzoG3mN#5saWwJJ{3gI?)tna>#aAL8BkozYMj;SS9NS7bNMOW#|( z6RV9A2=&{Zyem2Jd)oPKxk0kKVtK|dZ|Ca?0-IfgfHiX zL(CW?dmLb{j8;8qo<4?=tayP}2RDxv9D9UeZay?aw?j*(YRtm7&C;+;wk~Z4?gjP- z#n_WTwubmd8?dS2l#eF1{#~roco8~eYi$a>ZIY)qn>KvrVN|m9{(&g2WiaYZ2w=8 z1i@qtlWhANK~>^S)EbU&S1YyN{G<*k>r?|WM&ZB+Sjiek?&U5{Vxa7s%yc#Zjc>+nwp#5n*JAm7;f| zDKg0QA^7O>dxe>?RF6_#4BQ&Ml*n5Ia>%bM?TRn6TsE=yo3oxr#XD*m!zAH!7(eAh?#y5=GSYm)9XCkA zT|Rop5pzZcXuQ)3vS~^`OeZd6y3yT+mWEZX#-&SdKFvweq3BF}>|Rkd!?-?;^P2kJ zn72&EQW{TY6r^!bIl}2~=LR<&vQzSJO@0BZLOfaQ*4fyO)bsp!W{=3*&@PvIGV6e-eR?6v!@#df;E>WAe4-!65?91=NYQD+f74p_ zZaY!hoSaQHAnR`(AY;I zCe;2|qJr~I`SPYkViJ!7?Q(T3=5u8)WG&|4T5m-xs=<l6RcW6k#0$#K!KUIc6 zxoDQ(W9v>|pur&7-`8E=U;dq#8qr;)_kmx;xVg5K42ZgX>UB z-_;YN0LvojM2NkuUI&M1-o1#m$ST8VW=94bhW_l}WJs5+fw zdR$wqu+s}C$$?+A8U{lpX?@^qMVGxU;$HkYaZz2!3%#x}r56u?7^Ds=d>3ebLw$`6 zAKYrQxOMee^|uck5jOLM{I2rBKB9ETj)k7YxDxi`m#eQW`*+i@d#TrBS#^U5zpbDd zeBX8b5LYFm$bjj?>hU<@O)s4e1%iTjDEOgIxNq)5r~Px0{Ou-PfH*de!RF z9Yn3|WqYlRZ{wj;5oE z0r$`TPGjhWg2=C7rp4j6sh*PQC9>Uj@#`o7<$njbFeposN-saYMf(h_D`#!w^oxxB z1wUUrMlxEr!cb-DDbO4{NhqimZ>xtIkbYEd5u)h?{`g#Vt93SCeKi~0_(8aELEZSd ztwU8jPPHE1rRAy5tU%*o%?;@1@?`7i6HE>4x?}B4uTQVu?!UPL#!97Fw?p;eKyk!R zh^#(V7l9!$2aIzaJ_VH3v;4Z~qcA%Td>8j{`SUC;6kA-jRoBje;i! z^cu}gscQ7^#`lJ|4RZ4iy3`P{bNK5?5Up^Eq6jT7B^p8d`Q*q~;?vI(a&ejAM8;GGN$z3`3UXSdyGN9KE_$l55kd`p|Hb=N z_QVRjXJ^&K+vOxz^~%TrgP~#n3Vscn;asuGDogQYf*hNF7>iEftu$y{u>*;Q3jvU!M zkYAX6k}_urM6MiKaA#=?*?uBYeFz}|0}9SnGb8#5i%R+|k!vc)64W<*VOWB#@*fsU z1>t|_2LY7qLSVsB8qVV4WJ@L4q|;U7MT0xDEl#uz%W@6lfr*86Gk3uq&NSlI<_GTx z`^(a`S$gwGOc;E3V2eDExSqB~E};9dBx$X`)rP+J{~!d|(buWNFPm^R<08lY@zEnt zE1N9rTxm+(FQS`EW=ha+ML^Yxl2S4LP~Q5_Lm+!jdHHbf?=p)YsMvVOk+ioS zSE2;!S-9CddPW!yRc1QLsemx&P-sb*`cB-abq;rk|XBI z5h|hFtF5$=3gW7}ffou!nq)8~GWJb`NBN}Q)}o?ijsX$Q#aMJ2>i)r`2!rZKi4jl+ zcBN=vF)@bCFgIGgxTMa5;jFQxzWbTcqQB_JFKLIgW#n%1+b-nXtcpW9tXbIXl)2P? zgqorXEG7Yy@!dZ((rBwns|wF(J(NXLm;tT% zd}=t{ezKFkGCCcu)qBvCrkw<5>?masiq}q3#?0?VNIl)C+X(ggU7~2S80K=+XET7K zM&;FQ^!H7aRRLy7zsxHNXIk`}GA{vnUq?bAznIU^)U#71wfaNp!8AQxJISiuw2mNzeY)XX)vQ!rJzMMMW)r}Y!9JH=2p*{ z@8AZsS)Q4|0#nj0lhj@bgXvcAD7 zbEE%mF2!s+^lQQ7)=B)Z4q#Wf4^$ZYkdOfq5^z4kTMZp2|2wT$bIOprp|?!e8dAwL z?t*E-dIGPqEC5t^cirE7cMy{&?O+rqKUNwb2ltszbv@Bk;vrFqTFd=bK`S zFCxJ#2$QF>Lz-Zp1~HUBwMn#0>dP3um@AJvqqDquDpfi}suUgoqg1InenJ-yN*uUW|%eU7_ci{&@;D;7+>#+O{>VM#EZ zM`?<0TA?c&TgcQcm*`%ihfIU&^y6aRsoa;#dmVjhr%0?ZyBesrNZS{oId z$M)V>k>ycFJlI=hi`V(u@%8>Po#k_O^nFlZImMZq&vYzCEuLJ4F^`)USWELK=$k^l z=2fnr=Ft~aO4O$Dp{aH}PPq*v?4th%>Od90dIpMAfZ^)&u0R@u$$+T;`T4A6W_Cna z@2v-+b1*nKLgM#8V+@Rs?1@=1LqS3ut-Ja;uwr3wS<8PR>f~gA`B)KrKUb0_)YP|R zJ|U(bxP?+buxyC~(CNgJF?ZDZmrjjJ3nR7vNf_Bci!mk+h`BN^&KtHCnwMC|Z=TFr zrfTU?$!rM57M_}L_!`l`J82}X=hxkCmU6t*w`~4@q0r!e)5{0ag|Wb!J+2bNclR9LAWmhQ zwNRP}Ob}3EwT^~bs!J>B>JKIld^(cW2_Q&+i~Zf+Q}c)T6Yhrws8eHrq;m_@`eaN* zqw;W5seEMzAc4}_ZpAua!jQDv$WOq&#p8W)Ph*9~Wp<1@4fwAFFtu<9K-})F2Ie{* zS#n`i1si-_CS?w=5f*z|mZ(oe+kvOyJO$I3nA|6P=NalF$gD}ZF_xt-ur&aU2 zFE?$JKuD^>*G6}Pwvmq?r?%A{V-wq;5kO7*iW0Y=dcQQ6)~lQZ1@N>0&X_vYAdWP& z8q~9=F~?B)w_oTF7(K%F#bKTwZU*{GV5=@t;;x{_5&ZV!7U#2mRJI1E9|Q zpMC!EnqJo=b)VP6Wo3(lZql3YJSjPSRPI!7yNL?EH}`$^F7KUxBeNY#q@_t7m$cZI zF+4k>`kz3G5P8)8NK6n3-iH!t{i}!PSEOFZBc+B}Hl$-^`Ij?fRT44Y^yCc)ywXb| zSkNpy`XTW38b5PRNaIS17O+h)U)!aQ0P*fsE2#o=)VHh$M<*OFVd7}Krb5=Di@TD! z0vF~BoRIv9=U*2ywW$M7f08HhJCeU%nre0$;aqeD5Dzd5xzoZskg~ za$Z|nn&@V)JoGqW`e$le@&)tv0033bvL)HVH|H}OGdd@em>oJ?tGuB6q zNMqpIeX2hYxJe<<^EOOsaAW=(&$#g!zy#ugy?!j?oAZ-+(2F68jaUhGQ^f4%s_M^+ zuod)vr%l^6CV{)_MOAT=Ux2g&L?RHP_LZ5G%~S_r%c)cp$fOgDK;xMo{S1gdW)oX? zLG!z{TCs$GS!(}>a{2>iJ2)TeJkD*Y2@a&ZO(~}V=Bm?^l5hp1)5=wGLf)K2bj*n7 z$Bd)pvv6yv{~-)vx&%WAqK-@{xwt(qKI`qcp8)5- z4inIkj(gCFV*ok`KP_ZI{5tu?`Tn>z zTU1B@h>Mua=RK*b<5Ht4uf7P_^0q?Oq(euYEixv8)*h5da6|c4)YNNAcZ;|EEFo)> z*2Xfdi*FM!g<+WO-KBR|K}@ju10#y};MDhr2SwTZ%VGkmHncJbK@8OXjI0#`#C}YQ zQ_Uz-6}bs6C|MD?3LtQ^WAgGB{BZn3%B3CW{Tt4os0hM9Cr1=oDXIkQ#BWMft2K}& zprr+{P<4L}f`C}~Q@)pcN4svp^J;m)RSdvj>_Eo=9JHcseuvj;-18^2^k=mR1|si2 zYG>-6lOG-FDJX<87@N1vWomd?sKP^%%!;5Ou~w)sC<<3r>>Rb?jkvsWIcByKL6*8_ z@&z=3TM5A{=#S{Xsa52ytu7N<2h`+9{A6FuspK&0qh$%uN%-1u2E94K5^(0K16x0~ zd7yxS2qw>cDuq`bIaBQ||smfTQ?b^b~G51j?MMimLfsty28i%}-V5Fj_6< zC)3iZpzRgj4|i^~#(3Ch90PC|WRAi2sJc&&r>rWKqCX_5!GS7t20I2IXljZeQsAQM zx@J~3=2uUq72@YBmc42oj#aHQ+mfmalEWS;*CN*@i5;hLjr`@TSNzrcyEYboS_z7% zz(^&=`FzSEZesh<&O}AHAi4o(NeKL1X)`cuka~7m>V6XZJe0PC-%8SUGzkv79-vST z{*c7yWL}mv!QZCv714)DU6@qyCBX*M3Gwf&k!vqP;Hf!F=A~(AP2|(^Kx_0+>xEWe z0NT7lSbv<%CLS0s@tMr5|KOs#I=5<$0q7ho2d*d46WLJ7`%cEVl&y}2#OVa7XO|M% zVhOKavoR#jj3wHSRI>K|rkDT`Je=G-NxcQ-1#U&U0tJ^*p6g9n*Ka(aQlZKjivfTM zm{Vj1+4+c27?)`vEj4ZwXk94vZ;R=9q(n2ne^|0+5zHD7HcF-{#sKC3vp`GKpYzqw zg0K&BisL&!m9_p6m}BNANN_~Rro8<18lD$i<^Dbofh3%f{^#Yy`jQxcY3)U+F+RkpV1Vn!b zJHvU(ss}AH1@X4iwRwNce-3=qqjF1(|VZzHc zOe?OG#0AU?(lX#=NJ1g}64FX7Ro0C|mAD;KkJpQ_hVv>ao*zvJ2de+=eGY(VHCk=V z2ZGLPN`LT+0Ioq@3c*;?enwoYHH%0i!2Wfu0R;9yB)-+ct}KjCjsUcLG5Ag`Q|l|$ z;GUiQnUEagE0e178p(~JlU=*MhBOL58+sv;l-ekncu;s{^M5yAn48^P?D#923(1uC zR=g@NP|y6i9;(Q0RPX_a(-U&V?tf0+)Kp3%4jrQAP-^|15=fi@{6uTkpAc7tof>ujtTZ6l`p-?G_E$)zG*y@a&k$#~a8te$f-`$_vsh`0Np1d4?#w}gz))%cMBnE9TA(&c#rp{;*)K-cxcJjhCnfg#IwZtT{+NdlqxVvmbNE#o3g(cp`1z zPl@nbiB)?R;%{5{`fL~O(;*N@X}`>A!?GWj#8~C^%*TtKn72wx&Y6dF@4g;T2n;0y zAR_!kZRA%;=Gv9!DHCG8R4nE71fB6?z9Po){Ev16Zz5q~SI!Bsl6j60Di{L&cdZ2o zSYosXw~V3$a91Xbl7uv<@h73ulHOqqO0o|?*ddm@)@%F>Aw>m%Q!y&{ir#DyPDc_Q zbwUy$(iRgbAnw4foz45+!8K$E)Tz$M_mXEJ9ps1ROEejsSnD4EnVw-I*;BYbkJ=o0eUV0;Es)e_dZR@?D7DDu6 z{s9X};Ea-|oR>oo`QOJyq(BI01zc+n0%%Uc>U-y=zeg|uZo!I^Qt8G%w~{Jpk-pVhnpd? zINgD)iQbUL-8#da7!x5DQ3jzSiFdNmA2xqV39l%g|B=ETNr$|mB0kIyudI;xw{z=2 zA|<9Dd%slwvQHCc34?=&Kl600a}aFP6M|y;!P#PHu!N=5O0h!`~4Gpyri}2`{-MWFS$}WMw4rDxF$g(GMYt1|J zgT?QkOQloms!2m0e70{d+Y=LXGH6=>06+jqL_t)rvUXB~rdy`w1Lh>1v+r$Px$$>x z<^SK`VMiE%Qqf#Xrp-@eot&0;`8#@Ul@`$)V-9UUh@{f6$omDo`R$cBFYfR zYjVQ7y;87t^z$=vn$x|IsJ|xD07!mJ+MY&|wCztsAC*5&et%Lff(;S;4S9%PTi%j` zALWp%mN9?{(Z{A+2oGNIL_aTc1vL*K{;#MCB24IvoYDyI!_kkdl#cK>4c?Ydgh7Sb zvO<7fzkV`nO#vha;uwtNKvaJ9-cNY7b(vv*CUfjZs!qGrGBA#C!JwLepPzC^7=SA` zY$c-4NY|JhN&GAAv7?muetwMZa|a**QpL*}35!Z;q_gEc2*I`PKmd57xD-v6jaw3z z)alZmn~{P-`$X)LD@CdGb#K<71)|ESVVpgc^*FGbC#2pP&tiTsvZZ4xkZjWA6AoeL zx}v zBpdejNvkS~nd0`<-11SA0#WgAYQ)T%k~iu5RplldjD^R`AC)u*glSO5JxKel(OB6j zmI>2SN10Ys->eG>f2_}d@W?YY1sZj@?{`oTec*hEPXNw`^d$jX1u6qZ&t}fkdEt)b znv%{(qNmE?hC=&xG!~oCXV2e&bJ}U#1A=eW{d#5ISq%m|za{$Ghx#fQOE{o*Zse|M z8v+n~Gy!Oeh_v7{hh6h_7ryP$xC{FMfuUvqgk5s7aR;g*<9%)}Eq`FHn)^oqw7gFj zleNVAwRr7}_=|<3^wrC2$E9+;ypXl(;rWS#3WK0>b(psoYk-3woG6(C&nrp*`}vMi zvYpa85Kn(iRrB!3ChOlldHO@hTWt{gdrHcKkH=l<(wCNH77$YZug;BH{{t%MQ93tZ z{wP8uTfVNjv4Us6O7}i4=?Kak6KaUspFZ1F45Xhe(v z66lzi0JflYplo%*?)$)|&V>(B-&e7T0r0TT$kQ5?A2z@s?thSG(8MI8dI}7H z#)ia;1Wd!>Eh*JE48TKq*NZ2xtzDiBEj?j*w3tV}fS~f*+3AWUu=Fo_Zto+dfXy?2 zXn#I^oIfc-cw zEfks|(k%1Y?mw2Nbsn_;6LKSH83M!20JIE_;kp2IUPjU&&Q&?ZRHJ$&OwBs}@uO>f zg8<-{1Ge?UAmuIZk%&v>lmSFgd?2}e7T3skQQN=|c12j15RVC_np4Q}avZW00#{#8{_X?n^nDVcwfN}iekJ1ypG7h*D1QE@T`{`Siq%K(!5VeVnlItGSF z3rE5`lKL?@;ONI^u7w9^r9u2h#91KOe`Pvlz9LV5G#vBF;ZN@QwCob3oemg; z7GnfYIW^V=n>inmn6*@vQEO+ttRSyHVKE}q{h3{98iHHd`$^Ft>Ax!CZ$GyEhw(8N z#*0%K38r8GChRMYfPt;Q?HjlYy9NQ)%{ppo@zC6qr4_7LhZRQF|o;B*LFjt>Li`%*~yum9-M_ZBL(*q!pV+)}d<0eJq;rM*SfIM2(O z>#{an*EiZB>($cN;Cfr>Pik0RLC~&67Xnt2mKh)>&(#xSafJ*f6@M?4>`u(u<_q#3 zq(G2pc_5{?AgW{kCy9@02|a06<^SYUlW(+#*hUZzV)oH25KekRMSRFGHzzf+jWdUV z(MdQzx)yB+utI2W#-trYbClEC!GK}~DD7#5qCJCSFgkLI`DRILbw^IKcT4fgk&^%c zwd2D87_+EcO&3pL!Q>|I}PJ8_jH;DYH9kyzKj)od2`{#GuXwXt0^9`*#ku z|Ci@d=B(navCSf#?Zf~58<@NEGY|;ZWJK$;ptV8`e>xvGmrti{mcKtP%S&74fm^Eu zdFt;zi4tgm{@ZIF@06a={&0P(re25z2U6lm*)Pt@WI>k0KRX%hu@U*2z!X6DNj~N1 z|1(P+$qz7URAC%Z5p%*v|2dx);rA@V_-MP-vPmeClN|IxDLPkd|Ie${_)n`Te|2>y zvD}F%b({gvUMl#$2X~^U-&X;LUZzN_2NEwW%sQ`8jZwU(R8w*6CYzrei7RO<=^oDo z?pRG<+TKgGqW`Y~sC+tVk$)bfhx?uZ|RF@BdB%{=Hs%nx3&UlNkJ7c$i7L5=#9RDGO;oBmNEk zG>3Wy0kMUklDbtDP;|_wP|n`6&X5^yGwgXX|t=~JPqZ$CX zuJB{=M>$El5rHp(aSoylXv#0A*GerZU!{Y;HVl>G~&| zdVe7HeAm1iHy2h%^<0eW49Q~AZXxnTDzx#~;)#=5VGy`$g9QP*mihx?#zrf%S`enE zjVLXxEvQ4%p6#+E^9sKy^iQ6Cm<4DDZs_2rhJV9*vu!XvV2a3z_>V~X$59N2Qi}MD z>od6}TpJDuu##wT&>s4FrMe_$#EKk6YBB>v+Y%rTt)MC&W4&V%Ba<=*Y?>RBTg^Iq z*I7l+7XMNHf8VQ(3QocQIu{JePn~7}B0q0!VoY7HXERoEd7w@5zaEx6BDuIdpNb{^ zTWzd4Ayh&?BHMU)?V&%UT)i-rvPoEQ$Z~BsAb^xTF5du(=e%-KIsc0ZbAKygmX+*h zS2_G|D{lbKf-nJ8b!&$SU=IDrrmtPNg&)8*ASz zSW^5tvPl)LQo8Cn0UF5ajJ_*8Cs(d|*h0>b5V46aGu#(g{&l92z6t z;NDTk5`%qS-sF?A(c;XbuUs2&2tbe_{ABVY2o>oYsqEfHRSbX%`N(^FbEjs?C6#p5 zT&sSYMgiR?u7`uz@bPrgd~I&TO8YO*Dyv^c`%4=vtwjHf$L*^D-&6+-AN)b3P*%54 zj++xQ1#HUXw4;0xMao4ELQ^n}9*rMsldcJe|58J~b(CR?M&zY{dq;cz+v*ohaPBDm zMd{k($N)gcGx{S&z#p&F4%cD&RWI%(d?lK&CV(id+Y4g>Arp3mx}U6TDG`38H0SyM z0;mS}3$1a0nu9dLd?H>Oa}R}kU=CPwPbvUq_if7#VEfk}2myO3E2aaD5+S74&aaKO zm$zte;pj$Q7Xo2v=7iRq^_L>R?gelR$TQ+{5CasEwo(+XF?D_r!UHaKpEZI>ez%c-=E=WYr2VtL5 zsnwa$q{6f0tR;J`RsV-ib417OhSeSfo-zP9d@ARDIc8p(O`C~a%{#8iTxUTW9Qv2%c{B?6;XHpiQ_r)0erFPGEgO03KE+utjstIyovT_?3i|H{_`GpZ~ONet*AU?r)UM zrku7Q;_bAWE-b510O5z@>6E;{#lb854?^JD@qqwHkF1X=IeoCtCWT{gFs8&b)gvR{ zY$^@!Yj)&VF@5eY~ytp6~gbJ#(w(dmsPq(H{?VR6pu? zx<=SOx&BFM+w5E~7OTJ0R=KYQh$7RN5|{XO$xp8ekSC4eJhLW|lBMgSP~Bx-B}#RT zNjmD{alPPb5jCkA++H91B22Lso6j=fX2Db=z@5klf_T#mJjc40U#h$OsfLn^8 zgn)L`aiQ_FnmvOp?vDp5Pk%0zN}ZL<<#^oq)55EJ9-qzj)t~cF(Tx(Htpf=SM!dD6c06ADmel?UagB@Ym_casa{POFHlrBrn4JGql9eY@ zZ7@bZ2oR3;7|8)}cfD#owb!HpC{@M6g77+km^VfIf3vh{Gyk0$1rAAD>?bc(Fg_-( z9S;Z~RLoDrUni{E{0vK^`XcMw1w0{~Q{DiS_Br+4S5tTssGla#C{p44D>p4u$uacYIKZ`KOF zS=Uik1|X#WPBrYjD?4RLc{QaRu31fk?R&5ukxqf**!F|Ue^jBMpvJshqpAA1tMt>N z|3ls4)kNMTd?o)O7fkOsUTIybg7r$1($-NE)1lWrBV@WG# z?Q>xXR@8N_9S;bE>Q@{9cNMa-D*{bsz-AiTBs?0OPmf?DObn!i<+a)SH0s;^K6nry z6x1f4Qivd$fdOjiy@;=~GD~CL9jL)~3(igh&@ISzv0rR*c@^y@>)R>i>Ry=Ki`9lJ zaBb)z0P)`v!6)4BPpa1T`JG*}qtB@98?}Q7|6y!H_wQn@2J?Pzrc@gfLSA zd#D-U_FN;F55w<)Uy&!g6F1*HowZRXS0C(}hnpmx9)#vEs=B7C)0S5<*~CXG?(-2o zYJ)KHZjRA00NsRcm- zb(IF<0AK=eG+dV-lH3byT)2w}XaUeea%#xvrR8Hp3Fu^^W^$_oq(Jt)yzKa|Yu~TS zr}bB+kcf12SFg5B_ZfgJS;`Z)Bq#h*n@G+SwMFGN9taQnUM)ha_dyLek&UZr7xDfI z%aKaJNn@=-ad6Mq17)55wq^g{-mCssmym0!Fo$IK<25JS5eNM|*i!MJ27UuD0hk$R zTLn*{fi>$HPR$7zPuGqQ1VR&lV$&_|85bUcK`e+H9e{`c{1`A?oKL6Y-+piL&6Njb z^J|5yKgq6#=l@*VTU2T7ym0De+5d-{QRq4YAT`UjY%V{~X0v~E{J0PVYy`POZ|FxBS^}^_Z){cYDG*a zsSkVS{8Um$%G&ck`C`qM5I*cWzK4aszge=(L#389w;pE{8DPw;YNDOM5yiD*0|81v z&&do#=;x_yZSI{)j%%>x`s!a!16aL1>0=q;GNp&`1T#J#9+Z2ATtV zf%`L55MUpHbJ%kT9x#-}fxf?+7dpmMx^W1TT6oz5qe6Ch@8YaiZNE}DwyRaH47U=Ew&N{<)VnwYJ3O_r6P z#7*)}VRv`^%8gIAo4Dvmr#J@SNZ{+u#XN8jdDLwb<3RG~{eZ>c_6Ar(dKVH@p#j*H z+Mc5z$sapxy~e%?oI_FDN^>Nl4dEuOE8g7H4;a<^vZ`O;+=n$Et$w{;LGC-M3-9R_ zGX!HIHL#4O!>%pW1YHXuz}`Zu#C`%N;S8E8lN?A{Y+wKeqWRbZ#`p39*b(zt55O~K zfgVn!=-0fZ|8VX9yxpsNItHLukZ$)rJZWmE+;G4FiH}MBFK5c&2lsdAApr3w`OdDK z|JJ1eAU}U-ldn5@_KbB9r9c#ET!YfC`pPyW_q?k7fv(pJ75PT#O^Cb0*WlDAe^C&S zArN&hjL4=udSpdV|ACLelM~&Wp@#rM33~})g!};PjSW?ODT^@;?B`JjfhQiu|9U2s zoC-%OjzIQ*Ux}}e2Yz)7K)CV|ePtVR=ovGoWW#-RE@ipyF{R@?9$Pe`3)7=vLHt!x z+j>~vT`QS?S}KU>Qu(j32Ob1|ZY*VMxz9^odwMJg9YuW`m)M@w2KwfF)`k+Iy8Uv! zWIg_w+9CAcR5YH=a@iDXo>|&Rm}?IzGAjhy1s93ZqfHTk+aP~Xd<>zas;Ge1=10Ut z*32K5cg^*O)d5Kp?9WP)`q1Z$n~BlxtmhL~0fr zV&UjmouvjisQRtPt)#dSNpU3qamcm~(1z$I2r(+vva;`>a^EVEk~b*VTW^}|8w7fr zC~D7agcGTa8rjnxA;IOOQQ{-}3S?drVYiV5hjfmV2e!oeKpg@Y5i|g@1Xu}>`#R3_ z5a12j*Q_;r2U7rLf|cLJWTg@yQ>nMdpR!+~+aUHnmHwVKJ7BwupME#Q?|RArY;0FH zCdLvU37`F=KzvSn<88s#B?YRp!7?KHbE8RXXQSxPgetMYd1H5VZTKNjQl{01TS3Sl zsd&lA2P04~dCwkww}|-6M8Xz}!ugK~66+JN2R6>jYA<2^M^kWNx@rmf-SwgkF{}q& zJ;QQFte6{*b`^<`R7FbJ0Mt)Pbinzh)Pvmc{bq9mjLIKCTFLtxiZDqfq_L!KgiKYD zW&nn7En1yXPZ@xZZ+^N?4ra5Ph?9v)oZ@#hV8;N|SvBlyr}Jr>^$#RHAzND@#nrjU z{WLTXAP@ijO-b6S_P4yHvcCnX->r?c{uGg6U=mL&QsnGp()s`-A@(76px-%XMBhpH zSC0Rrd1*$fIr$ho-mNsj0Qh16$}%5RBt$7Bv79w=I#U1Gh4xvBdwR1jdbF-ayR z(FOZ0s8bOu@ymg`XQ<|Xw8`+4dE0spd7QQ|lOL_W)~@a~1JK&&j_KJvHaKv3dZE#2?6nW}{Kr{-<-=JJK4}%NN03 z8_#&!XMq}bER(RN0GN#u6@U&wN!CG8_ah_W&DFA5l@r)}K5qRERC&#@0mHvd?F`W> zv4<$HGb2sX#aZoJ?S-v-6{{6ECSYiHniMDcSZu&e;r3fQyJo%Jj@r4-F#zpAt=rd# z$n#Jk{S&i&N_JdqxFB>MG}ne70-@vImQ?aLS4v9MBip|yGU0%Y+J3VUS^vhx0MHKG zIxv-aF#tBbbW(xPdcU%xJOScD#D5&v!cj0BN)g|9T(V}WY%;A9)j@VRw^I)U-B5l> zL9&E6#Ti8eD7xMztGd3jYaYvI#kIN+*iiDVkBY_WFUkDZi{wB1N8=cP{m_WWdk`lc z+emUF(y8KPgQqR3S=WXi0uWVfr`Y-nI(S9Z4pUH%{@d-LKS@z^Qbms`-+xp7BDzNS z+a%@1&0b^AaV=i{+luoKiT(4C55LDDaYtGJd=AJBfGL2nU{*Mc>3F^`_j&LkfY1V- zBBG?YXSM{7$rT{jyMsT)F3iC5^>6FSjlb)uO3R@Hwt(T@3f*ThVXMc>y5R2JpLef9lfM%La zEF}UC(|Jbw4i$coBDgF+gpkx9+!G;+kGJKVSdN*;lK$6rl_FOCOzbfr2VdpB9X1HC z(}+;QR0-RN`AOTtY%6iqmUR{#wiyrpd>jK1!8JrBJEI~SU!Bb;JPWVdAd8zbszz|J zx-cZv{Oj`izq=kp0PHB7s)QJ&b|@*hk|$O4*{Qfe{ZID4W}3ZrPkN(N@RY($ zzg#aX(a)|?Bx2o$#Q$isY_>%7ABh2Yyq%N=LdAkKj$~kcsC&KpaX26V^9kbz(@Cu^%rTc2wAQ0d% znJcw##-xeLrCc@bL4x`%B*#fKt(1lfTeU zv?0zZX!Lze^Ky5m8vWiu*kh?=;P@vs?D=WRw#IGvU-U=sMtD>RU`yYULmeT1e|lUn zpWoS$H@-^#*30^{cT;lyzcQDR+BIQjl`?uo2yQ++U%9Cyu#?#6^ zSToCt`Da}98c_pFiUuGg5c9*h7?EUL<^&M~_O@%o4*^!01Ht8MvniDemNX;n)$&GJ za-PgsVgXThPLO!J}^sz?B=n!0F+$A1r?Vp6re5jx`Ltw2`<#+#{0a zDEh-I(w%F=4*?KkSx(br|J#!EPkG*5GyqLZ9m#z%OV&Tx{$+zrK>XVvx`7%uK;vX~ zBl3iCJ?5(h4HYjM3lbJl$s7kpzVqc=XrJ#U`e<+q_w#^2Kxj<9070o}OhJ5RJ2s=u z8jvX-ZJ45YD!=vFUtjy1TSx2YXjk`*0XW*Qhx2M2u~6e?MfhO^xX|O-3);Mf(wi(`w?>zZCf1fVWuQ|g8S1`2sj3yr{J!Cvr&0X|Jmt`<(wBL zg8ctB`(ORb+^+);fsmkXsIbm&zbx3QYImhFB@~d~!ru>?W>yX(9OQ|3X^OPJ%EK;% zi!VUCjhJrQxHJlv7c%ndugU~aQaWN3xIqhF_dePeGzGg7{fkOpSXFJ|J8N?5(d&sZ zqk0xWUW@(byZ6Hj0T@%XN0@%jPiJJtirF&O*BF~lOYBO^TK!jEVP8siO zoAgJX|2WdLqsEQUl-y8SfIwN&@?#O(RUOv(Pm=UOgm! zdt$h!G$yT*+(Ae9R@70#H?9)3-GBXd^`0tC=XQS+5B=1GMF z!dNUW(5}i4DglA^=V~sV4yPre*`aQwEGXUo5?6SGE{XwPXsfLvQx(tRN3Mlja228K+58)?$ zCU*Zbikm$*8KiE-<_dG*+HgYv&v~2dM-nq0^fw^!t!xSYlr#y%h?CQz z$hFd!kHgbOzbl$ zMT(a8HFsgx&gwe@aK&7ynycnMGgO7Z^P+jjEO~TLM8j5Z6K9+hM_`?}oS4+LO&_I> zDX4?f9FjG8_lwd7kXWbLW*w}AO>ovbg4zGj+rgmXQ^0-55}1;MVxXC-KSoW-ngL>} z3(tJe7$Ef>Gm-C5xE_}ukYS!UsEYH$7m&{|z81yR-$ug*Cj5Nw|MeIB_59xt)V?zS z`v-Fz--BQ$rCud&npZx%c03>u5PxO;lgb}QJ6<7_jT)Mvv^ENRg%B%DMp+@(ZuyJA zSmfoPTC-Y-Ak$k}c>)OWJ5)=tNAQ^sc?2G9Rjj#UB$+&tgJL7txZfiN0qvd8Cxvps zlgd*;c<81aUe~1%uQ-mduM6fFfWCqsMDCeaW-{{LRstX)^Q>~Gb_56(1=YQ?~u31u4@V1a*Gtxidq9>%9C$wr_38PL+y^gJ&TI}6u_@i7@QWvxow`8XH zXH_TkJ^U1^rl9d3)Zk+(DkV%<pT+>oAdUBX31P#IchK-6D66e;P$kl?W4 z;vCaz=C;1=9Cza*K>z|7mi*n8{U3=4&-q6A(jl5iFqBxEL93nWyNHG8vQ{76L#P5Y4R0hQ?XrIhWbm)>?)8EyAE5_?Tr&{eGb8M_O zM8@I7{w_?9?vCnQiT-T0G*agx`PDm0760S|dvC`)R5iUY9Pl6rfIN}JdByJk`b#tu2(Z>LtY|yg$SW#$oe;v?Rdzu3!!c+|dGCGv+eg=LMP;s$(g1vN{S)FMm)^bb z-OcPsVqJR#L1U(zm^Wl@(+s`tOEAC&F(sW2WZbTa!HdOzp z%_;mdB1B3KfL`T`fccl0!Gx~aXW zHy-y}?;#LkI)tAKGb!!qsaBcV@5Uw&+PG1rOFfIn*w_i$=v?bpFO`kydVQNVJn$kb)?pFSnR4lnxy*{=b zrTImc0T99y;8Z5qCJ^;=LUZBXi4y!;8VfVP(GPWhN`I1){B5zH!Jk@7a5%On3%8*P z54`_-BP#3|~~rai;Pw|hs@H)?knMpW(Zw z?yG1cq1Eza3L+22(t5$B3GUapj@x}B$pGLC_}=+9KHH8Zew|Lozb(4?9WBh@GXUD~ zr}8N?BTwiV<;NaVb~O;W@TqIZ00LCXd$^^Fe>*kVJ*75R_)`g=fI4$$Z{UGX7B$rV z=cnT~0Z>29M%e5VD&9q%02MVcSxIoexw~V-8tq&TnGpQ^wnTsC^f*~D_XUHpU@|eH zq#u1^XzGC8{nSMW2&Y@zr+`p%W+Ej`KtkCf%jUk415qcywcbD=k_-S-|Jlt?i7+nw zaPhk^3a|cm%=n3C`5Y$prhVJW3~h56<W$K z`6VRImiyJ7ZiMO}{v+BiI89MWEhWr}83_d1&xXPuEdUH`R?>S`;Q>Wq69_uTK@(%H zcE0*k(m5Mq22#4FR82fFgYhZfbIBKK4_R?C(PzHT;@906GuUgf7mz?{$UOdYU~+^AO~T0)ePmNHz2Ohr4F$f%XsD0N1((0mlGz4YWbWAafl4 zrewbzmmN1Pm98uI+hw3fsCx(alyjYfIbYK;$JF)p(QI^g()85c|R7wG5DcA|)~=1I!v!8*IWYqI(l@J@*9yAwI;1 zWM)ieu2jv8t_x50Wi-d@rVhqkT5nJvj4d`UI9$}5AoM6oa9skyvF{ceKLnqoH<$r9 znjvtw!J$@%_!Ax%{EIL+VN5hU_BP3_Fm+{c>T3eKkQ|7b@c0B2^h&y5n-Jz;z9GUu za6>yP1jsRA@ukI?GI@z1;5lG#4MxL_(Oc8ga9eX(s(Ix|+Wq&#(rEzpL!*Q5L55?g zq`ah+zEvL5U!9doR~s5Nu4}^&0h0c#DA~`tq}w}6`TM8kLO|q{0=H3vaFQnaLLRg8{>_(4bhKSxhf5~J8!H8i=+_E*PNov3 zC{sXCD2t*)y`GJ`U%w!LkIS6)dm*cQ89P<;Sf+qgrOLjyUL25Wf!?-kdmqcc<)eg& zt(1!9TEAe~?0p>r&@7}{JP*WWC4Uk89BN!SNd+lfTkOc4K9CSVvZd}lS^u_#$ToK? z_H#3nVeeozBGVoPfKahFAxt;Zb=S^8jVlelA^}vWXw)}Ez979dYNG!#jRZ_lUS^IR z!7Rh!FjFJY4c+f-7aI;LW_MKfq%rONqV~d|XifFp68(C!Sgii?KStd7FWfwlf6V0l zpUvxDM^N6Kh$gg|Ji%D-pI1+K=TBH!C>%CCYkqJRb@$4 zuC`R}*376e-55Xkq1C_uHXuDr8*T{x1^wuspa(znFA)4PeklX?Q>zDP^k^^!Ec8gN zYL`n@D%CA7m3Q9nK{8^#ZylaYCXx=++K!?Aqx6n>D{)2wgO`kbSGVU=Vxzn6(GGDL^2#b1wkB*~^+2k4U(hcA| zD>xfO+f8%L)G{7A%UNpg*!)bxj&tw({k0vX19N6 zP3<56+y`h;9Z&$%c2DUaUG&P_JH5B9Ui;&Be=xhcx%J5hU;dX~7Qj*Fs8GtiffMLI z^i^}$xX*Rom%0`M_6$^C=xuhpx;{}zE{*j^kke@)tEv9~VbZPI=8iEk1Kmim41 z|KbbYyTFB69%3NDgeOu7FC(OU(1lISKm6KoiLd9%$-Vh6D&P|0GTA8J?>*SofLasLiVJ3SmU|!rX=}8*1;Vn z@*8;9Px$-88oxO4ue#m>kl*PaujH#%|GO27DqC464!YVMkZ5ZGn9+x;1%O4~O`1&Y z-go|6r!hO=`4J7SFd#8ii^^JPO285W3Z|bb#4`OxCflv>v~Mzjc_ROx8JZ2DaGWVQa4dz`%zGs>v|d$n33^Fpwu0lzBxyg7I^KH}%|W z0RbTqE-+wX>MSS}0#XQJ^_S7fK@kf8-MdW(Zez*3EAKkF&NTMqt``6r8%B3ruAb#hTqqpc4J z=hkC^&77Q5(>qsT{DI5#+v{ZhZgZS9m;G9~FIn!dgd1RSBh~t?c(|~h3V7qFx#F() zoYi_zSUAt6!d0M53)KZ?scA3(LE5?zR0phsM;Yu-_*`Tiy#Mdt`!_p%JIRC9Pe<=Z zQM(iF)c(i!1c3bzIl|vc^OF$0X$t>L!l|xL4p1Y#KNj*y5U?W*r7b@Vh7K{DU#}*! zYcj)1a@RWe5Kc{}j~2_j>2Yj{%c^!=o94cEeajrlt$_g;M2RUCf?&k?4R#PyLpD%u zYA6-DG#l!JmI0|vA9r&Mi;VU8Y9*M&ljU+UySfzw>oB5aFSEl=5h zfm78&qXqUu2r%zlE+iM(_o8}#hn@d+edjesqxENQtqh^hCHLp^n8?{Rb0kUbOs9;p z#!uV&RmQ$8-~4E4J1jV#Jl_D1w-I33$#G#DMxP3@@_O+#bMZLOd-2zfx%M-3jQ8;w zcnp^_-s7zqi-e_?pW9?XX!S8}$P6NXCw`o#=9nXG8u(nsCHBv*4YRIvscmqqClz4d z{jEBa>#P0+6Zj0%FYf$m-g_RhtvMylyk z6P63Pkx1xnLyDvHCkP-fp36{2HKJ8Cxs?qxeKdY6;Vr}bmFDL#kE<7eH4 zfGCz|#r8H!yKZ!J#b-VINe$>Ly6eXo0Biz66M?IFDEqRgJmjyy2P zGpWVmAm>!GXKS-CbMHVeHZgavaO0=cO(+7V5FGif0hMX20Jr4t4h{chf4l6R>%ck~ zDhOKl6qcPM1<77JIt02>0Ki-_oaCqe7<~V8)Vvv%+)RkaM;~uT?%<;IP>8>b8T$7x zVV+h9Kzlha3)bc6nq1n|meiU)dbB|V{8li-*dmq-GGeY$x5G;IceR$X%>VlDy^#zK zmy(Hz@nn2*EXhkyi3*f?F3!&-4;Pk_oz0D;^6-9AU0aQ9bKk!_7z}lKxDZT;$8@a8 z{5PT4R#-)$>Jw! z$ohlDBl~+6nwcBOYGJw^C96TksX5zm+YUnq;c3}qe;_W&e1&fxX7PSZ!R-``d{)dK z&(P|ib;|0ET5&8;q%}a?ZnTBl#<``FdG@yN=NK9tN`@5xO2uRh93CDSesR04En*C| z%E@+rKMW1aM}8p5VKZLAwYAJ{cqIg)$NLuG^0E)lGv3HRctL0s zzlTx$kC#&u|HGwn@(qE0YEn{OJ{38g3SKR+xV_KTg#owE+xnTUg5_ zUp~XyN7#K*VE==MEc(lV=OF$vT>D^~qLbzg!;8Lua&pAs~Y&?G%I>9piCd zr?}`?i<~bJT>b57m5EC7$YN=Yc*l|L>RZUodS?s_(A;Ed_$_?!WG& z{_Y6?ZGgNI>eX?idkFYNn9KxaEY;-B`T~_SN+3=3y1&T6MCl6SuQZZd|(KYimenp7c?@$V0etD>FBXt%evlDP*@lFhZX z!b@L@fn|_V!M!|JyeJkeA?>!oB&M6vyv4k53 zSJk)v`FqKgH*O{u5M(c0o(>!yC=6i1dMSkl?&1azk2i1u%!0dX2m-(T^*<$_eaPPk zv`e@{N_|+lsslk|j0?yUV%C04Zg`}}cA@J;tA}&0@KdQ2!-aI0ywU}*{?ihWG5&7v z`@th>?o&~Zd31n#QTVZn=%+MT~kqNA3zRcK&C)c`V-S2`o@M6@q zDUOa^Hvs?Zj@##}{hR-7(4Tmtb>ir~s@v^jy+AO_?=;;zMwypm&H*F>-VYTH1Gapv z5Mc1=023EDwF}#$Okdoa7hMur?1mXCfxDcY!)DC>PMY)Poc1$7@z9tR1zGV}93C7R zir~KjLhgPcHt(LuyPeY2<=Y~^$z$=VAWUwquO`S$xB}=e>&meY07v-Z`Dl)lL-$dn z0)r2}SD2T+C`1MLx)&Nnx`-v9G7ip-Tizbu&dvuP|9q7$pJ>8WJy$+P6y64n6v zqbROfS85VWh`*i3RvW{NEo`k{|LX^9$zx2&rieLy2~(|R#XkeRu2J&$+cU*viXcFv z=0kw2^}|#6U91)QEl$i)55kl{<7E~xvnxE+YOW*kP3CN2tqH6NTAd{Rt*)_b-j<0k z`1ONzaCj@ZH@6i5t5zeF`PZAjy?qw%pPTu~5f0kVT)*H!NzyEG>;mS-h zGCqduYA`f^1NrthHF@#dL+gOC0$RNModUt^ljj6c?r;@C6N0ueH3k`Lc~lxW3J&k@ zOSk@=j#9=itjU&G26uMXn1X$s<<>RB0%!aB2m0PE_Vr)AJ#_u+PZz$nMAnJ5u41lO z2d|UO_2=O9O=c8zF8n7a2ArA?jlz6tS^)>JQeC}$ zJ^B6r^?Sj*FW(03WCjRh>th%Q2%m^}9^}WEFX>KG5?j+V+_oQ#fnGmkC>pl@er&k^ zzjEh=w%xsj8+DZcP~FMl;H!=?j;^nvETPtPYXrB4RlU;uZNE9YElqh#%iT&ejqbN# zi>6c=VzW&GRw6B`I6~TobfMpv70wHnET4|0{Kfgha3IRrH`*$q}_@p>^fuAKKytwO`{b*vCt#tQf zf3pi>Yv5QQ3sfOsQg@kVU8&*W^jmm}S9xkZC=6Jb$8wRA>=~azv$>)9D`Pg&%#<|h zUk0KZOXlB<=AS)T_tPMFJA~O@hCe(?)(YnTfq`O}{j=*DIS0fZiJC! zyS$ZDaA&PhDX|Y&*k|`l80g2gEDrC&xRGa^)@xJ-bSG&gGizpSkh09=m+}!iFrKh2 zy&<5+Djx|PV+h+7B$^X$T_pgVFf5vSyvR68u%CI`MsZ8bOCY9%{VWr6vhK){miSbY zhl|@p{Ffu@T_WB}Gid4xk^JKX|6RIqHMIT#BB;aX-y$G$PzmoLJHGnHbp(RGWM^?P z`R0qyl73RI2Kok>I|K0205yydO*MzA2ybAT0cuYu7So6s(%}?f780=`TohBoZW*1L z{hR8*DZN;;z2jAz-y%(O469FtKtSue*UITm;hnIb04^u+&0t#t3rz_N3sHmcPwWd?=@lS^-2N9!MnfYtHoi4bBf{?k$eQ50gu;W4seZsSf^ zo}EbMzIu?%&XX~K;Qeq(YXzmS$%>#{075}aPdZgUiXj3UvjQypt?+2T=9Aq5yMEsy z89G&PXlG}80ziw*0jQW@)oXy34%JD>qc-Tja$J=#RH$>e)trZ)e1Q5 zR>e_sfakQ@bT?i9_U&Z+!bGCkf8yeWNcjtGzQvm=0DDZZCfD3UV&Bh2F|f<)W8 zdg20rk^jT*t1B=H3sTE#8vmF(Esm!RjmgRiqVX6<&(@9pIM_+#@coB|-Km zeLSh(Dh8NMA|)Pz1`p{&BY13+xpf_$l2&B1pzDA7ogeJXttHQX_W94KbaDLc#07xl zy=OB?uJjlE`Re<*YHpn6*wtl!C192}mFZtY3Vrudx_g&?sk7Q-{wpv!eYX$Mo_=+2 zHF9y65derYj2XFX^%8^7lm zD-zE*?z8V+Ek!2D68UY`+OO5b!DU=7>lAldwM6f;BIf&C1j98_9LQ9{tXR_nBK{xF z;<`c*(6VsU76E&=ZYGmx{$o>95y^klduG2MAO-dU!GI;=07fRpLa=aytuLOgCHG~1 z1Hw*C6NK9cSKrj(!R*Czpr#-1sQ2xbApaHlc@%49SR@Q%hF(4|+& zIC0g6@82zMjd<4&e$=L>BjZ=gF$y}JFDEq6>1B+RJMCgCzuBw|Le09Xe=+&NDzn_qOD4n)?FjRoh^5f2wo~>J9Fkm~Hf~5hhBV?gi#j>3yooq9#3IVOI57EW3 zED^hqLuXWQGA>8PUp;YZ-VSIsc8?5a+<0vQVW5vf38$)vR;uiNpQ*f(-|0WGp!X9) znIGKwL2+s*nSqO3XUZ*?cq*Di+(_m$6)82}1_?jIbXrm&YIcyf3ya?1X(Zy zs9Hqu!=T$P0CLZ`x6knY{8#C7Ope1b$p~^pmTLj92eq2NvDB|eBkm9Hr!`p3SE7|= zi!}1ATKzcf-#HgDE#u^@hFB`3TSAv%boJy)zqg))bFj_axm~dGo6~z(Q`6T~Bjaym z-VVFZfuHl>Py(3fOu#wtx>i2q-rh3~xxbc1nLRMD#r_k(!rb^B1R}}|rC*$rSBxGJ z)3e3C33%25Ji#RhwG91{}^{jy(Z%y zBV}ov!{D`>ll{r{iQRhc67+N0jGwuTE5!XX-nry%i2Y%{{fc@D^)z#10e*+M_nT$o zDUxJnlrx0HxPtv4&E{>9Xz_n~xIxVda2N|#J52xC)L8^*kW7G*1+6Su-9~Dl^-9g( z7?yta%IG5k&SMAjmF0tk4nD!eMWNYG_g;$xM@t6QS=AII}avYwPmBFEqG(zzr^#% ziwoW-u@3DRqNN!%cKK^3PxStV2-Dn?-Dk<)bzPXXU?ATDrsOKx{}dav)fCRg-^U6$!JmYr<)+`o_O+kCD0SKI!0MJ0+y5fVL~6%Lr7h-<_xLTw9~8 zDPgJRro3j|d!{4~Py3snWkHt~p?Xe5%3)mZGo<70%2qFa;(eWGU04QiYF%ij_MbgS zA;Tl>c=qYh&ocH(McnmBixtV|<58R~jR50F>#z)41+#HoK4HN zD3@^&8%UHp%=am1?0J)W|Igm}0l1%Ae6m^n*ioG!r|$PT4XuhCqARX z2LM9mp9ugn6fJq@B0I!^Gb3&bDnj*Iz(;}TGG(;KsLOzdCMiAYhgoM@{lNrWuv@cV z=Kf@9n~3;wWO7(lvAD+s?scZBYUz8%E(nw7J;lh(B`HL_h@XvZ)*br098R-+0+_MHpv zsnJiJhwbD!sR7ce?r+se(gA-=b<0Ki`r$}%-Hd?8$JOti0MG!WT}m>4bGL^j;uND+ zt81sfx`A#S|0NP1i5moSg_4_E0C*e6Q9HwJQw@(B?nvMu`l-3Ty^Ge;zuwzt#MZ%k z%%ovT#-gE1U>yQ$q;)l{00srVB%k&ui~Ki=T(hgegY}wsFm2R;(F+Ixp3j5Ts2*JQ zejgSR;9_3)!aDx$An4$j!1Fi&%zh+J+LjT@v;a6AW_Ijv3JJOw2$~N;Kw+R0?WF~v zy?2lM9$*1L4v3}&U>hO2pATes;&iCQe*Y4M+uNxwx|{3NrAp1DaLX!C>ZVgLI2|ol zL>x5>4xA^5ldQPo?Dw$>07m{_D(2qBG&8Q*gVlYkgKZVCB9j4JS^+$t9^`6u#Od!8 zpk;g>@XuiU!MKlh+gE-qf!kc)NY>d4T?U*M%6{dN*8kYOxA*t(S$ms*Y0dDEcp?kL z+(xXx?u8Om)6WbZ(|_!_5fgG|+O}r776yy|XffI&SM2S|xqfdpfX&((7l^M>Xo@{1 z&~CNj)M~~7SPvdAd{}5U+2cVN8+YM+6Z^;K>A5Ygs{=Z-gPUn%bv@ZwS&I_D_PJg9 zw`-!jz8S|3maM7^(S^AS7g1WC??>(`tPmto_lk4C|6k08V>JfAbY5Mw#+L{SEk%v2 z*B3WBXK#*G0GLYFqv%hL;6Elz$)~G}{$#iy;eKa&kO=NVqyU!4T{;gwHnpY4OBI-& z)ze-^{!3ii)zr@Co;^=W_lJ^E>Yh(roQ@K{?YO_p+j}q1FUHwt?QIfIYr$9Xo0`ur zb1#FHQ%C@6q%w>R#w|1Y=t;V5tiFFYKC0K$L~c{0#G*g9F-=|^F$)1Ub!s$|dLwMl z_XNhFH&!7sw27FYsPF5CX?408|S@ zdBRjI_j@aq?7Le=Rg=)aRDfz5^k>lt7grjod>}l})Q7S;&1oKzoK7qwckJ6(+f3G$ z*C3vQLCn*V=SO!y5eBHX zueD0fm(mx_|Easi^I`XhhacfZ3&8r?T2fpsCS_t-)Y|*nfWvl4=DxAM&i$!4i7*hz zw|5}C_s`T-m*Q(4_?-s&dIXTa_ES+vq1!d#@o-u$ESmA=BpaRdXlM@;Po;B}x65^Rh9m}lzFaT2H~7n@P}4m`7)Zs}rD1Jjh| z`%W~8<;v1hvM@WBEVE+r@%(|Ip%fP+IHi3WM(|h}jL+{Dh)DB~;j5lNO zD*7u{TPy2BR)b8B!}{_9EqLSBx8}HO{#R&x@!erR?tLB^6DaP)N^W4q)F$Y$p9BI zpDl4TVD)YZB%7E|KmG3?CtrQ`MRN1b8*F|0Rs`!=7|5d)r@1MA>&vSVSMZzv_K(TE z&%a7GSBL}PSqC*41@(QBS1p+JlR#3CGVmKcFWq`L@^YqT>>T(<;+{?$MHDj8xkAoCcfFJGi~{}8qN$JxLK ziKd070K0==G5d5j`S09&{@GkId-pNf25|QSnnLU1dNp}YHNbITacT5`i_Z8*7G$zb zk_qtL%cUs!{or{y+~414nI0dD0ATRn zNM&nK^9Izj<5JC0L6z}$Xn1IGS^f#!)LL{{1sR?SP#qi z|2;N0UJqo=Fh&^Y3USG(ege7R z#s##fdIZ$#)|OTx5_|T^^W?Yx_;GUoi@V9^|NS?~^0S3xhaB)7?V|@QBmMPL!v*HM zU@?xU@gNa-iq`+Dd+gOoPVpQj0*Py_=5H==kE`PY$z_grFBc;9X!OXfP=-sBF~^= zCRTycNC`nUo%nu>9MmBk3d05znz8cu{=?+%r(Yy@fBQx9#jih2R&X(FE)xfUOW+_J z7RRoP7ZI8)uboRSk#>5jHpi(yxlLUm-dG{<`6~1SWkPGNH6iLO;ijI^$%|bE{r!Dc zin;zvZ;#&o>NoRW9&ElmVkbIo0hmhWib;Or530Goe+T)TM8fI%$p6fYYg*C`ij)>8|HYQP~)!XT5;+Of_b1#Hen|9>-2fFfUP zHo$=t8pkj{UY!_Ze}TT(1Hk>t9b2z827w^l@Jxm8n*`I#>^_FMB@x_r_&uE3B?!E8 zP};~*u9?!Xn8P=W1%V-FdjcGOht|Id%M${on)kDB9w$}Qs<8_bxB_O ziG){oHuom2Y9*h5?tW$VT&MckaRH!H9q-hAl^!*vNqnx27m^Wdw}TLJm5_7STqJNr zkKZh+;b z<>dL@M-c}QsZ-g-hnyt@5d&cGV6H-W;0orq9A#!HCyH6tT8y6I%=x3g|7CK2`io@h(hSFR#I%e~j^WZ`q;J=+7DX{Bbx_g_N+zZKZ-}PtuPZo&jRjij|5dielFQfIS0WLj!o_viM zKYC$2x$*dI{Ha69IKsgA;6Cf4vlFk;s&fI9~$4Tb)nuo z$>g`t;-m?4FdF|hr7X;;d{~QCsBH}4V>IQ@y8pV3tN~y#pPXE(!&mzl$@=2-hz&G+-QU&zdHH*KzMBrDL2Rbp9+Y~y?B z*0to~t*c3CXeizfjgT#{+YDG;(NabIfW_yyz<&Kt5fr<&MDG96M$`dtBcNVu9s=qK zGZY7!VqUf@X(?{AMXKOJW-V&|u4n5P9V1cZFb==BI)e#r27|@~#Ml+wy^Y@4K{b8N zju!nf`d=hEm(1Up5+EM7?4179k=M}BWZc}HP^V~p?18?rKombw(L~0EjXvAr*9(@& zk2XTuQmm^80qdMwX_|@4aEoo8p$5tS7pDJuj3cxQcZlTBZ8U&Hpdi_YA+Oh3gMe#J zL9l=jFh+48h0tZJ0M=I!tO40Wz{^xdFdzltUbL{{9CH1Oi;nmvZf{5Z56|An;eJ8@ zH4CK_ru;Icf``Idl|3rHU%dBZ=}~DMvL_0uBAW$W|M&0x@72CSe(A41{5xX-+Sc`s2>@HSpW*x( z{5M$PzDU2{ogh=dlJ9FyE~Z z%|ADU3t-QjgbM(l=@t?G5h2||KNUREC~?xr80sPx08Rg!1fDhGH!h-`Ud3HB27#Hx zY+WGuNI|mKT8BUeJ}m*)$m_S3f(3q_EV*T~X?>}+=Ab876|WpwB`?2~FSm_qzPxSC zcLuT=&!QjSAOvdyEYB{5Dbr-5x3GrhlQ7xD8Lox41U>j&gT<9~$5HAYET&rqS9EQ5c>|TNFoqwJ*!l!5=of!@`wxHd^RIut(XMWHOaK@g z?jNJ`KZKzED^{M&ziR~mwOe_dnw)va7e|f$PXqr>koUD~j$6Nva6y9eI1UTIK^-k` zl#|)}Pg2c>`Pu9L_j;K@8VIPXqBT>gO4d3lkMl$dbGu}Ybg@l{x9>gbHJOwJeEXiQH4s0XwLzqdHh?a6@%#(Yj%=9ffr?GUHoZh?o*jh`Fz#U(G=Vb{L6pv`aM zd65%iTv;G=6W=b)}j$ZXmLeE=`@0yZ}u(L?jA{dd*{+IBO=Ub?Y(uw zcaypAR64y>Q=gzw|I%Db$JX-^iWXXj)VeJ7Yo>@UBk|}QqfEo|+USMC*o=T3Tr&y- z2IuMqSb>lqW&_BWeKY*C(f%}ZozI7O(|AD;NLy!cIq(ssENzWcOuf7)G)Bws{a ziPK=PuIp66-{M~BlXi&Gl-x!{i{g2DDjF!}n*kx%|}(3i@5%6XNLBI?E z-$#7H*#%X@;Az)|uFWW=%-J{q*JuG1R|{RQy@Y_!P-oEv;JF8#m$31jN}e>{?V_`_ zs^EX#1*LPX=4OjVpP9s5Q?BMlaHZFPfQv(75(K3OlUHy;aL9xtDry%!oOuO2ZT~}f z8>18J6u7%)#Y^FSu2IwRbEvRr{?}cnY_vB}w(XN~if<^cxNfzQ7`qd&;xj+*=7zA6 zQ>h*p7`XYT-}%9HOYFC!zwNaEw4+DIcn^JLQ$quR4A*f1yfrfrNTIK>*HX>PS_p1Z ztrpg6FCieYy-L3SzqmOVRsXCt-m@CLgipKQ=~jsh3ne0al6j*L{=u z{oDQAZoi*Gk!7-rWG_wPf^xm4DX87v^W*PdpO&NkYI1F|5HMO`m(MTfNV%jgK-ONq z%RcFnGr9(w8~sT>U-@2NCHKu_ar-Cq?8Bq>?tBXXK+hB|cWg7bNb|pefO*VBy_QC@ zyQ7_ll5(%Tgn$f5En;T0KP*S*u~hU+XmtLa=!d5z$x&;eLJEL;ENHRY+&SNf8QMP} zaD8gM3~Lltfb$KN!|^d&w-+ebr2?q1Bn-P=fJ`%sZ!xl4q`BVB@AS7NxV|j{z>nVi zqw(Ql;oa?|asfcv$y1>(spFRZ z%&+;91*W#xZh7mJ2$I!FsssL*sDMQ#`md7rp8ZYoakA3NoU}y%&>T_8SHDjOZUy3< z5@Fo6GKD5Fi4`CsL>T}yrq^~MAX8IP)=d%tR{}>u>m;9&Io=&g+fJGcC%~66wEqz^ zMu3kkeQn!gdN(=%0$Knh;Krl4W`(`$_B^lI0W1It0hYNf@V*y^ascA&`$$@*6=1v4 zhk}C+&FWnw9zfhW9W7TR#r}P1Fy#2>X5Y2i@0JCCsP+HO@(sXtOLGJqp)FB~tq!Eo z-vt3N7z9E-j49W0zwSKd-=_!z8XfM@t^-(VTrL)g%tVPA+L&Dpo;d|oG>|Q%P!LFAFZ06{LoJ4WOt>2a1QHK;E?Qt7FUCSl0)ug^BCgY3 zYa9ZSd;=$4#d8&2|5e~dxyKL9@^l9|(>2g%ST zAQ+kEpVe>=A9rv4yn-gu6AL1ZAlls`a#oFNfgSQztySJk&ipHPJ)M6TA0j+P5gOmR zSPTtxjDWNr${hcEs=*n=C6T_N33s!aEK?cq{v5mTodfrjIF}ul*tg{OrqVhTlt}2c zCLmzYqh*%rR}>##Wa?z2KlkX-5CB})Y07s>-v+S+?~q}# zagM;)vH;K!guCgY-AT2McoE`l%Ae8yJ*!#$Nphh?)H`;bjV-F(DMj(wi)~r!-{MqX zA&v|gV1=+~pcVgv+4OMd_;29O%5b@|HO?Bt3wk=5{dD0Bgvm@z&O$C} zklX1P!O8Vfjf34ZBf?mdnWnNLxz`q9Im-fo+N90;2i6Za@?Y0cnvRx!*Lyu#2|P*;+TyA?=k<@@X$5xp(V+{UPGJY6UKxo zL&jFkt=;-qekBj)LF0dV=LgmD=Oe4l)&po-04Th>dL8T2pX96kmw=C{f2X2Z;qTH& zAr{WX(KLVCNzaJwa89R*#sVTjkL&(Mc|Y6b1>!Qw^;1*d5&wfY)skTDJhArCk?(W^s0>a)-xQ>*FR_LcxrG~GnT)3{Hzs|*M1~f$>Phw8)xS+X zN$xf>=IolL1pvGwSE{+f50RU0Jw-8>guza!6gc2==;Aw@s?42xd;Ff`Ch9H z0YQ`%7%>Iz&8dN8iKy!hDrcQ#0l;8K3n1zD*Ytm7Ol>O-2CNOS-nYjE0fRG9f{EDS;gMw{Y5dtJSkwpPR)NB^Hv(4SI>#ugqmJ9XJS|r{$?jF7E zo~d&#L`^j}*Z1aNLZ(V3+&0-fb-4yt6PVff#5?1aM$uG9I1I{FYt>wxjkD&BdSBLr z0ANf=YYP`K3Uvqsl}H5?yC87g+NwGB1gPqcugaU7?Pk*gK(h}x$?H?X;2!PzaPyJN zZWTAHh-D1zB>Q(7pRFb30y~aLd~OgpGrvwP_Hre8bjD1s&W)*$Z(hV*gm2Kj=(9*` z@v7Il3jzxG7e`VdU(3x{0n$cIfc47hE?T88dKed!k^VQ?vD6aCqu`>&f5gWg9cvFB z8e0*DBa)H7bx#aoER`P7+Y?)kzObMKMi@hS!l+k{)ElEVLbt43H1I&!{ zB~zHADis7aOjX<4Nm8alAYR;V>~ceWa$F<3!Kb%yy?uwmHCauLh(+mj6Qm|UkbFJPT*0f1T8wBrHaiUKwv5b&P=z19K*gj+<&a@GtvjwZ6TZM8rw zEqt~O0aiHB?0S-9H=EyP`;d)K`7q75unJz}A=*clMPvFz@Ejo#>#UU%jN043;*Fl;iZn`o@){}BV=Lf^aCPw5;Ud$)yI zm|0=|HzwQ~x0)<&lpWhtLOryR>RlYv$Y;O zo;CA8=h?MhB^pXbd1De&`QT2nKv`Ee3v9GyxRc>cc5d|d?YItIl+AqzRL1q>E`Q&i zw$?!(L8yg@1CXBiuh)(afh{dY5UJHN7EQ88)^KNjIa`jp5^fMwIpgI3&a?pF5*Z*Z zNM9L=5+U8KZ?WYs+Fa@AxO1-WAU`w(X$(!vGqP3GW6wQJSUlUYsDk;QkOYvC$7cKB zxJDqlLsYn!B=TRgauf^E0?=z+4FRLLZKE*HH>T4}mETbuXdXh|OcQt1!j)c60(~8Z z8ysCCtHwGObY-GQ0t$^2Bw`wQrF!2QK8Q4MJ4GW)CLz+9R8r!8is((GL4)iI(gRx z;P#qoEt=BSF`+3KP%sdIV0Z6;hVS0oh0;S!rqL(oHmD@?&H``70<4QjbE{>dEwmD; zLEKdTV6SyK1a_xLca3fvO}bGg7n8YKUZa^RK?}H-As+>}aWD#0LGPi-IL>VVh28aV z3tyk9t5P4dk^Q>XCKRwS|2r1PVG3%L;cmmT&+{#UCoqZPhYUYv|S$!%>mJFjP20C2Ic5(%UNu)esREPc8ix&PDT&(2Up!?dCA zUMsP{(i9uhYB2syktyw>Lx|z&Un$=<&E;I#!PBn?+ zB}$?ixMyzoFan7}$M2p|9@bpm2AhQTo#`S)WeOqG6c7-5d#pDIye_%Sl+9}tZQ9t* zMTysbY6Gle+I_N8j`)jM1nD6UHlFlXf7PpdROaOT{*O1_K zvU&fs%yYU)iuE280GbWl2Q5PL*NP$W?FodPoVCu}+c}IXpWNOGpqVwGQ5^I-59z`f z?gRyZxxx-iA+-VoP8?AH$QFe6DxiQAITTu_*>A#>WixH2(Eja%)~x7zymSU;XZ@~G z(ieK*T*}-x$PigW(|^32*8N{cVOitg>%{#3m|k9vI?cxb*Th^@x}QaIFl6;!%Y zssL-Sp9MDH0@8$;Ps*wNKdEk$mc>N4UAu+==VY7t@Ep_Qi=U^bS6;n;9J_CvlYVTW zac0_lwx&}}rZWC?&J6B3=x(pvI{xssYJ9p7vh7v13c3G5=S7#-`_ud6Xb{j{<6IgP zC^H=!OtVs4+s<`8tnmA|hKsEhs=shtW{hQE6CognfU9Dme^`bQ*MroeTi~{z@IUY{ zo(UH1{>?2|8R>Ugs$}p};H{P`g-%^*mYcTtl(@(MkU($0B`Nbi8c5FVubn67MfcA- zn!Z7Qn)>HgQ*-|+3W~=jJU~}KgCwJC;0cat_Fw!zKmXZME1f#i0>D84ktxhmDs&a~ zU$gP-!e;o_u2IzH`s83}vbQGEv^E!?i?G)kgMb7t-8R?MrD~L0UT1O5;a{7@4d4QM ziWO#q#imL2>C$$5wuYNV;ed4-Ej#;%_k^VfAsocH>wFlYKTUw1+Fk6ixd=efL424~edTF#0yGsmqws zA1-Vq_vfLO;9%BXzRN!0%!gYP50YW;RH?ns@-M-V-<~cZ+@@(_?mURHNF5AoVWehb zBghp9WF(fz_TGKa7iow=$l==;&jLqb{NStcG710+u72+^3QkAt0q?n_*II-?W!K#Q zY}JzGFn#9i)2AEZ#z|cO&LO;D9onPtmP&3dKJE1W?A!?eN_dtE#my6v^(u_Xh1Z8@ zK8Tr53H&mCY*X7}^}3RK3$bAgS6)&hF9I|InK8L3OjSs0$_L|@GDR^qz$&>av z2K4N?)oa}iUcie)-hj{Y7qdrUs$VRS)_>3x- zn>$nyrQU@&q6IM&25!4U<#kwV~KZKmc*^F0PL;ZM)jn?(L-C(A^-W z5?(hWlCZitZRQzaK1aq780Fu-D70Uw^lPY3etR)?+V- zo0Amb0XHmwWt{=X=?*NxOgvW>rkFj1~&2EPE$Iso_Pdc;L-Ppq8&eTtu763l@^1mzsfnWUS z%|9A14fX#|r&*K=g2{`>qm=03f}CGV(||6*6mCr6D`Mf#jHUrTGBkh3WxduQ1k|WA zt$KLRZ*&i]E?K~-o^`=oQX=Y)o*gc6Q#;FQ339`-ipWCiKo&54%wSOE53bI^Nl zr;uSPqY>Ed8KK73Yi9!jdo2Kyn4$Xmj7g!!djrGEEC8mDX-eKkh+vPH(A-lL>ETHO zU_72nAOJFXV>~qTKSuL6{r5g*e+9W-I~W2@3jhb-<_vubBB_ZQ&g-IovIGJl07yuT z5AZw{scx92p#o~8wqBu_6b?Bt zELk0z{}k%#H7y7eB`iBIeXTqCX!XU$bL#!Ljo)j5vU$QX9cKf|)K;{VNYuQd%6qTz zy{72&oZ_p0RS=-kPraMX{BcWP3Apw&VWYsbxA|w4`Sa{|&e`5(8k=r3Kh5AP>=m@U z^f>A)7(b%N-mRY|h$`=m+{An@?(q_o|#gwqoM_TjLLe%o9%7WnMSPVx_5 zZ(*uq2RtI$+(qyLfji&1HiT)B_tc4>Mi_{+qkTkg-)(z;9smJd1h&1z^q>;WFK8`q zUcBGE-v$JH4F+js_NPzRao=nP<8&jBdToID{JpEgFot|`d#aF3an@3?w?Fmn(T?wf znM2JCFf&+5&2yRL9=uXfQ0XyyEBIN3aaI^t_?o$1;uczuOpN!MqqU@DO}v!hwv)li z;G;f5_7KiaV?P#f&jnQE%(U$#E{hSi<{hCU`nxS1tmexYH{?c4HJ z#(ifxAG=pBZ1m$BE+lyv#|j$-0J;O4pX(wtKm60D<>WE+;PPmHs7r_;}!r zdLOnG0;2c|)>;L#-#y<+QYg8wCEQ_4};=>UE6-0k^et$8xjWGh(NIvYqt( zVxTy%_`yg2x#|4>teZy?03ehbz<7~`!ji6H-8~yzkTHdt1-(tSK{Sb89&`0To`Et% z%SGDsKab+ymhce|6r&G<(4z_gpGIHyVokH4bAihQ)c~y2rW?nFe;84Jaev-8Z(}fh zqyHn2(9d36&}i>lzfNXtKg$v;-fM9(P|5Kuv$70kl+dYtmifK{#j6nLDC2kGdEVR| zyW)=ZEEFd8HVe$Olex;ot@W@MPA>!iCbGa9F-SABIykr61jg-fqycZK0H8U2cpx>W zyKu5#xU7V8jG>Kl5o=VfA1lcq!iw{~LmyUs#;YLP3%>YDd|CmwEW^;dxo>rNwRMLIK$H-gKR?K4AI=al7Y(0>C+FF8#-U{po+Yd(^SEE&zntq5m=S z@ga+S98&f!oskco$!6e4PQ*h4o5d99S-B?&3j}YxBoRvqnL9)CKnmH@17Z3od?+1g5; zu9bt?Pf=iGhWy|QM7FzB;_F@0Zx`G53iBZNu|$UNt>VCsw` zm^wd@uw}F@JNtq6ih~!=w(RVChvQDT?H*QW%|2jn+ZB3W!+RdCJs@Nd-}A=)D1^E9 zhVF#7W(K3KgS$$;cc~cKxx#=)yP9)r`dvR6Ja4d7u$%m7S$VQp4u(?eISOBo$Q%d( z2MOmJ_PSUDcgzy9WN)u^4gxHbWC`J37uZ8ei$7vd7mK)kMo}OA{2>sqP+Tj{Q?E4# zftCdT9#j3R5C3j~^TMCK^Ml*0FO`5Z*+i=toJhv8iAILfpY)-%UN&V!ntnx|T*rr^lOc8)Cn*5Rhh z!oI8VTerZ7W#bprWYl7^%4F^6MyEz|FrBUSwAa6rnXa-pthsT4v@JswwVYX#J!hF` zRu$9pV*RuO96r@KH#-ssqkU!gmKMG@Z>}#U zbck}jCb6Gkey&dpM1Hq(CZmxdb+XwrO_`5KPm+O1ync%oW2%ywCaDV})nq!^k8ZiQ z@Ngc>WN$oPE(4D~1R)FPP&JZ_jO$p|u8(W2VSbdh+tjo?xN+5mVejjDWgj)FQo5Em zb_+?>dXz4iuja}yAZuI{B9PJRvw+rGzwLcj8v@eQ?cLzLC4yXZjTsc{reaq1w4AR3 zvhhByp^W+0zU!I!c9ZaxU$8*_ftpiKHp|I_c8shi0JI3jQ~)se4AXPpPO^aSRz^QF zw!`68viZm?H(*y_%w0!E>rNK9WE{n9Jq_)@e?MrrTSXfuo8b z7F?}1oowFS!;MIutr)CC90?WXe)E{ENYQGv2B=^LLF;T+Iyuay?(JCB`wec?jiYNo z0l=6JYcd!NXthUo?X;Y3yW}%10G8sl_)|agK0#IwCI4fBHVv$(1AkpN4;ISFB0`E8 zKx(PIK)|M^{?LoH-zn6)PUcu6kCs^9`1(J7vL5SO>x*;5*IXm^q%AuidgHV4#U=#e z*E>7;zuw7L=NGqg&)PApwg><@%n>XCb|b2q5im%{&TKGX26T~5->eyf25oKd5RMk|aiPPE$8UO%507*naRI?A!t)N*G;uBkYX4EV@XE*Jg8-jp@!o3{S0^&dj zlQ7q6!d^nH1;Db-?j>N=KQ;eOPlKjq8GNMop~&Z+TVB238^LpmMTAYyi1JvC)SW=G*(acM+U2wxrh@gMjeW+%G)colnb;(foJkw<69$E1AbK>&E>;(m+^##Q#>> zUI8)$f!1%;%Hv!94RCsZaQU|grcW6AFP>Gl-Tt?2tt|q;&ZndIx9+U`v|QPq7^Gkh zz9v;- z5FX2zcGlRC>+U>p9$0p6Ocava6aB%gBYpGb>i6{d{`OYxr{(h2y1SAHU(e;^Y`d5oK zC%BQ>(Ud%lsn?u;12nBx=V!;!3|dmNRXCL3rF}k*C!eloWa}))n~3_x)V{_%JKtrj z0R#1x}xUH zUaJiO=hJyj)(8y5Wn(~?RM4uMGTU}BSQ*7I={XAg7jV&C8DkeogvuubXx59(kdY|N ze?7NJtU`bC#x&vyaXxf6tbVmec)q?3*BwKSwZL3v75wg+y@PV31wh}wFxTo+1_3nr zx7t*wCRpQfCe}wQtjfZJ`Hi0Ny$`5u6##zr`Oh|geESc7vD(-F>w$rRoB4e8dpz&< zzGrsbpE6z<@F+fF_nW^#ir5JIcFqxD{&dOUOC&~*CVK9*-q*@)5(=3Bc$j>%z?Q>M z9wVG9gda~(P-IXd#Fx;Humt$KUV9Y+n$p+M2Z$UuLd5 zwf_0FwEl&a{QlcLYxk@DZ-qs`wIaQgc6zkh(hb}ClM-`rlNb+$yr&3t((<15?Nz9@ zRRHkTf0gVr9nhm2f~EK=f`EDt~ZX8{$5yn$UK)| zrsmE|OzQd20pSbx${>t{@RT@Gv!LI0zB%9a(@YT$1({x}4FMq}+*~}uF5B;*-h9`- z>%rf;{#n-36)rz0o<` zw@6=?z{AJ6ErN$S)Y>ipbf}*ly{$;$Mlxm0Q09gY50r>LCRhxU@f$M}Aa9l6tulVx zN-&?V<~9Opo9+}3+9Fi~7fJG*h49O}5Dk#@HxDTs1oDcs-@aVr`B(L281WwD)pLX{ww3fWQf4O>*EEA{}f_=CDg+8xSGC9tC^YB=6HIo zaR}%!St3A8p?QMT&rZls3fh+`JX8Wt4Ai{OT3=t+42|2#O$-ide8&z4vB$w2I|}|I zcxiRPw{Zmw4^V1Y#&gi`&iH2vP#(_PtjwOZ`e0!zd4LO4w~iSwS*o9cyw@E?up2_V zSHm~6>f4l&o?s57pI67ya_k>H*-E}y-U@+Z%f12s?q7nrRe<<+C<}kn*O&ihE-AGo z=l>wo>j?lt#l;LF>gQLpgcoB4KzoERln51815t9-pdrGx$~;6onZM_47}|VwJG2c> zvDL6r@Xe`Wm^$r81@XC0yXU!Xz=4Hp?1U}=Q-idg2%maQ0$jwRq5x3Bf9w{tY6Y#% zU%lrY_C4JX{Ibiw@3qDuz;X-A3G#Wsu<>6uccoUpan!`h?q)^08B%s}@O_-d7i*b` zT?8jl+Ys{D8Rg)goU2cTf#!BM?9tNsR6jpflBbydBW{Q9rFTb#mE=(+YLIn4MV5~# z*H(HoEz`AD%@LykagShbk?E)pJcd~W_4F`3-rU^zXtT6A_p{G`UdJKTdZ#)j0BE-8 z%afZ0f1Q1YU5Q~C{(ydHO=p8rnsyCia zf3FEq6QLFWHE1KR707C8(!z?40m3i`juYE4!aQbm-~H}5n{#e|Yxem#J9L_!1>Ay= zd+t!bT~{)a0vz1nUu#8Jj#xZf!vb)a&=&pSMWw(1Ht!*RYB3bYvIz7Jb-Sx>s{$pQ zq=zzFVPNcwwfhZz)e>M3uWQ)#+-!^W%u7ZCGjqQhzm`(X{$tG9D90c)Q>%_N*)6hl z-|D8JiygzEX2;XO^``qRvgsZXFr8~1696<@P!;GWmCDXFr0E|rb3bGlBb^&cd+sc9 ze)@Ei0v}tL1q;b7a*)T=9Ip%pLh0*!`@%pVm8Pw`b7>jcXKMtqv_Rqg$?7&{F(iVu z9fAeF+zN?yiahlxq}U5cwC_z1B8cdkNDJAx2wV{7p{=rTKY#8%-)3YGbhWV+1f-VP zP*JU)`mqXd@3M2-HR>=}@l7|&al=hg=e8YHh)^8O)4uaC%_lxat$k|!mBActxDsn` zAa-c{7N}GBdrgbsrE0eYlTJ%KWScxG))f$w6`y&89f)>krjBmXM}>jf`R>+MetB4`xX-mvMgaH!@JDa{(O0Fx{_in?N6G9swmGoqgaIn1YiTle5AXt_ z{S=GaUCK7MP4z*lgfS`Y&SKiKiJ+}C7g4q-TtKEFDZQ?3=aW_bHvQ@Lc~t7ZP0Hc>slOLp(gk?G{ZO{O9l5~*^`!>2T@(LmUc-KP0xLZVk8~Aj}qm% zT}=y%t{Um`#nJu{ur5(qbp~ZgnrW<5r1tVmEiul+3K<;+4)@x@5a^fyaPZ^0|5MRb z^Oa50(W3SIS;!-lGqO&I1VJ;QZ4#f;^~pjYd6g}IT?FnbW{`_(Ul)NU>}H$J1R32r z&vrrGpD&}qV(J>E>evvj0+4+SX*Z&kSrjM2*qR+wlh*^vfL-_y39_I%5Tn!bQpnK~xPMcjb8{oK8Ahjm`7iJRuPf66w` zGN>!#))Uc+sE}U!x3%uuP2b7XSCIf*pAQzdaex>0xsR1t; zzUF*4?RtBfOdgOxfog_B1?&RKm&_koAU0T2!pc%?SJ-3vYl{A8$z3z|_OvvT%_xBQ zAIT*BW~b73Klx;4z0+Oq2>|;bEb~#5H%d?c^CIa$GJB<7n*>QSl$6M`0VoEzXsQ?a zwH_GtDnjO4+_u3!=w9$x$LzgGYLbLWZP-nr^>y8ttfHwXAxF2)+>2A-+BJR7LP~SM zAPuzSi#koq$V90S#6tl|3&2S=F-7fFA)p@#Kn=6k8izmzKVf(rLay^O5*7f{579P> z38?VkzEyit8~3CW3JcG%+KIn1Ny!FXKQk?*O^l7qB?J5(qU@-oxXD#3!F>k-CY|%t zVy3_UB2^kK*rd7t(in;ulVm)PX}+&CI_DfnH{3-~E5K>u{~$ObRqhvm{qfK4zA~;3 z{?T~>05b-Ob$6MnzsEmwfhF-S4mL6Zy1&VQpHU64T zK0O^Tj#U$)61+ZD&3GD80&a)E&QUuLG6}2xO+dKEIdt2cXrn{@a4!SI6e81kIb;wg z+R^TOxHMh?&Lf0?W9p{9WX;bPi61fJXBN`DE|Q6dhxEbEEq){JY7SpQF)(=5qGEX{ zt+;ubn(IxRb?r&V9GgmrRu0dPm&(EX*BGbQw9+z=2%Df`gz%?l-JAqfVr@0OPeDM- zo`IUW2lfNgz8Zs|l3-ugh#S&{npp)}>DzhuO~(ZQ(im0=1pM1lsq|k7lexhJ{w08Q zyWp;G7OMEw{k162a}`sdTAWPXbQ1|v8aO2LPteG%B9jn!QN7Nv{(lJ_WtF^TEY1yYn59)7#7 z{>DZ893u0@^D^!f>#~~~2_*u)htU~nf+dwT9hy1u_Dot0%;^7)4P003`wcPFDo$^$cl)fNro$3Ib~Eojy^mB%VlB06m7wI(vEE!YaApH^EIK6?8h7JN?(95mEI9CgYCNs_B^g{-{m3ddT3l zlo`CM_OE-@H~`o3eulsQnRdf$D%~y~(vP~?Li=xW4*xz50pJB#xBAvL2a?i<7QZ1H zn&dXz0o}Y^@a_U|F>8vpu!ZKlQ4q8iW0A-!sk*sP#n^yx&O1~Tgc!`>S2VayiFKJ2 z72Zax$KYN7araI9j29(wiwCK37?88Do<_!+`ldFl*(`He97O=x|NKt;Eev7-1E*C2 zi_|uj4g6~WI1+2!A!>`01pb-2d622F6aB7rPYIiXhy+dlWHuMIeskWx6K2kxg`9!` zu;yY5=a2&Y_IR31$GJ0(#4^9)m5fE4%+~E;Jux6~9{hC&(1{X)!4CMy#og+f6E|IE zGIV2~v|P9??<1o6ql7O(kkUZijw&Hu%m1cZ%pNcL@k{vijbWPL#q&58fQ$I~HTMr0 zJjpddXKPG()^D?>6ikd2^4x0tVbcRT@(-$;jPjEz835!md?=4R@-3UV!LbSe$;UI# zlJ{1BvR%nx)Tw@-+0`9zy@~!F>!qw=7Ce!pNWYr%TmaMfqb`sqyjLUG+o_D{K<|@V zxCM5Ic6f8DABlM|d{n=CUJ28b8vuR$XeA_REe*{qrtDhJk6D<{*p*Di@+ocs7nUaQ zKe#>=rnEPw2V#>G*O2uKy1B{x=P0P88Sg8$S^ebEdc2qD)@u^#_bv^xrK`bzY7j;V z*Vi2C-S?>9Yr34mEd$A8Y<i22zu0+at1Ftw83dzaGr72Yq* z{VW-2tSx+H08J97Jz7p$VH-< z20_e;q4Y=Aw)b8>z}*GqaZ1%8U3=p?&3@Tjv|HT7ew2+e=avQ|E+DLEeb7{AAe;h% znMkfFk8U(YtLc+DiMi2(KpxZ(2~sxJbi}W|I&S`l#bpGhucoFIKsX#WH?Q8Ivwvwg zcK+cVFaR~5G+@)YcD%L5*4dtQ@7{pI}}1+GuW(R#ny`AZq7xad0DL~S;ZPT#o#Kt$4$(GWC- z^i`REvrQPr8s)`ttDtx`m*3qNEwMg7w`(qJ}lWv;vY-;{rvg4lxJf3OBLp-dvj@9x_2~v}oLb#e9*7vW^xEV6i zSqRw2v0S=2$ut$r)*(>FHY{N6I5mn&XF>GJ-CZx5+pn;L<25Wg*0}(LC&oAxps%38 zk7Aa#8s{fZbGY#=7F2`L?5@WxIFx4TuV<+zLQIhLM63;Pl1)A?gB16kW8#bIpDf7@b8e$bkWD4&I3sA-!kUAn^Y@4sjl?|fCeD1Rz8cFeSxwJY9>qg?)xC< zIgnb-u3pQcT>b53d|WQR4Se(?b=*@ zp9%q~Cea0QP4r!&W*=oD){23k> zGH6&cp01=D^?Zqo^Q+xlelr=g40w&%H_GRZr3dfJ4B9Hqal^r4mC0G3AxV1JY3le>%CxsGl3Td!{?CIIB} z)g6#yiOE{RHa!f6vCO-(iBS5KB<3ayg038LmZc$ z*s}4ae|7#f3*IxM3GxuyzUF={Z`RzHg$_jo{gKT5DN;MO0x!}z8NcR!OZuwx7_2Ki zwE`F{-Ac<~Xh5|Jj|aw!5L#Nf-wFI?vs%eL`0J1V(}NRu_KQbCHczG@WIh-iNPY_A zyM^NVC*aGilj%ugPmMLpixIM#smC_*$r}@eKxpsKtSMclV01D62^K6M4d-EgXtG!o z^xM0e)f}cRec_8Z##=FK*=P5{c3~uI)#PKMFZ8i#4KPs9O*P({N`0-C)r>t3=nwyo ztvN31{F^TI3~gj?Io1C4pJt1^R%a{;`UbTSEEy`ZXm%cI-Mii284GY7v$p2YCs;b} z&E~@V>T?#3m~G|w$I zzCjzP(KN~}!dzjutnYj>w@Mxq>rPwuU*Z+4EBvy@%2L4+xsmZ2lZK zg>ec;HTNsvrooQn@nh|f@oV++kiJOgvhlYn00muR912%YnS-xqH$rhxIqS9NAaG&= zz|N{1gs&i<;ZtquaNd>-zCsEyB4@J2~@A-=gYYD*<(CHA8ZL zj&$>p^xl_7`eILkuVzK;GUjllThayN=}nHA4qJGL4GklpZr(i(AUv_nFbmP3Cd&$^ z5;_t)rcKA`aa>J;vw$B@3I6#if)6eWwWV+i2=F>>3f2EDYK6NGxZ5SGWp!h-S6U|& z0A@z22_YC^u~5(nqT7v~KIT3-cLvMKM8?USn4#>exanFs>83X8!Waj~+-KqfII8wv zR{#k72B(RlUqJwvCGKOC3YBWWGRjUbHkXiV$sjq`m)ibk6!rOx{WXl1U&NegEI?S{ zc!}#<1a_GFQ6w&e&%ZDE?zIw?QTs!f8-vzJd$ibPOEfz-`#6PVHGfDkN&|HA`9{Pm zZ0-s>&ED@kUw>i(Kx71bo-9!u2p2$W{Ovd=F5^Y|0M}SJ?hvK~t;r0W=I`d$a<%~U z)!QnV>(i3L=EPzuLrYI`VWS z#p76y2h5W_f$D*hJpn&Z; zBLAUfb(Y(KJrNv1_jsd#7I7|o5GH=Ca+>>7p*$6IOn;Qj`MaYw`-?7vn@}2zaKi@~ zT|DZ&JNtXw-bvpxC@N-U1m*!8BP=NYt9?Q~w~PJ|PCJ zv}{pGOw?kHG`>bP!4i>UOSlDGNczO*Fxz>E?5i-1TK>nw`XO;pa7orULj?ZmQJO8F ziEeV&Om}sUnNB9PE1w*8%a%U3ACALfBM+&w{)7l*g%Lli*JSilrGe0(r^(ZvAV)kk zE7tqv$$l>!HG?|O2D#nDbunDrp@7py1Y4bK?@s>dYQ(oE2cl?_g{;i(P$w7&t@;mNq*^0+t(K{MKAwF;{l9Wc&7bH8iJGUPIQ zE1EvSy(S%bj+M>4{tyysb-31dOiJdikw%!YAxzwT)5G`E(34@TqzVTulE&A1Ac*bv z+^(fi%gQ!d#3`|&ErTTgnIU|QSa`Qw>0f?rfXDnnPXK5V;4)oz=aYHPA^<#Jwd5}w z&ymV9;S3@ zY&*F)z8#V~`2sN5G*HLI-HE3CVVeMeb*SOj3!BN;b8Nb#l|xmkPSZ*;Ln!q)k(ieu zGSe1`!K8W`EC2>z+H_sqnI1%A+6gnJ!H?&?iED0iZPA~LFi7K4x@}GepqRSp)=AZQ zfS_#M3ANC57GpixRcqm0eD2Q>;%`j!C+}Vwhzy+^7PnU1i2Ly+699Zpx)pAf;yC5l zS3{xS6l3X^76JPjE3N)=+y9vOTP#+qcl^@`YnuCwkCtA2z7kR7=D~zS2@11citn*|pG*xG)PzfTe zm4h`ME$d_YS8K1rPZOlXGY;NMoU(Boc2~Qg$x&B{<#i1hQ-JB#607^ik-^US)zmEv zgst%b66J{0-v6UB{I~EJfC1`4GXutX)uj`4GekHiyo;LMQ7AEQT`kubnmj=^^X3KC zS$(cMaUE(A)kW3D2Qdm05)e4u$}n9o;*Ezn{A$NmE;V*YrC~W@h%oo_oxy%uA)0ND zERNlN%J_vrnEPS)3T#>_rBkUpE_KhfTXo3Bsghugf)%v?GTh(K`F{!;1@No6l@GrB zzZSctvnMV9%q68~m#Vp+RCkhb#NhYgGJgSZE_Vxn4(o>t>HZ>RCo8M$T2-J>NE-2~ z)N91>QCqg?%oMnS1wa$m4ArvULu;^T(3b=WYUaB~?(QRUjFtK~a3phTx>-QTqBLtP zC{3=9=F^hGM%KSWxz3^gzrA;fkt;jzJkO0+W@ILrOg`&DvPx3vQI>6#yX_en?U@l8 zwP4+#B&7^5%`ljeF1epL4$V`Hm*{=TGH*dR=HT zCcVS9kku5V9A)Vdfq$&wg_v>dRy-d1!;@k9pF5M&4vfbGpuyr0Jj5_UF(8F9-<>1hQAaTVMX_byv-0r>3$puF1xo0&`f2Q0xXp#te>>^3~CaEyaqO`D4wm7iBO~U6%EeHYh9~Lr9-lU)9 z{egvfCN9jX90J(3H1H}0mj-NK<9LH zCv29%$3#XW$Lxky024-w?&m~^&D%soN9)|9t%fls#%4H7$ywFIujN&}$l%qfX^ggb z)b&R{^^y40zmRSCh-JQGXTCt586H8QO3DF33 zY@5t@2cIMtYRGs*2q-EBh=5(m7yvn&YJ*Vobe_=c12{JFT^6>!A^tRJNDTv zD?RG9#jTdcg4nTtfmyyVKjqHpfC#;yG_l7-gdkEdleCrCl#I{&%i6i|RbhQsuAB#Q zhcpz3!MO5!JEW~)9o84-c}xSD@$~u&Q{Ih+rHfS(Y3NSI9F_~1qINe`8%Px%B?lrw z5X*)4D_9MFac9e~1~{wj!rA^7S$Z$`8n?r=kLC+=zplB?a=t|+lzOD+!3=rN8NmRd zx|ag>(;4C#v{FTZs$Y6>+Q(&_m1_n84T0dKf*+x8xIa(=23?p7STk;@&A(iX_6qTv zowXj^TT2IB>-he`ruy#vu-SYs=Q5A0+T=9;CJAQ~0D9~N1|5ERHmo7l=dz^mx-?^w zybNqWA^_;QOwM>i5v_t#W6>#sj(5LGd1h#gu6 z%=WaR?$69erZ+N~!MdRV{D!*iTXYe0iFN+?C&`fx(ZqdnP~jP@4PQ`$$r6Xj+^?xW z=!XgZ9wET{RcFwwyep>ophPmC{tXoR z25Td8?2MKwqV1nw!0eCZFA@SeY2!f{bn-o#KL;5Nacw+M^gmVrFw@70FTzPw*xf8E z^8d#P3jT|MOOyc6$kt1RAa_}s&u#VXIe~Xp6GE&k*#&bp=|0ieNaqVPWC}DEZ#;bw0m9|6B#ndM8%&)f?xCG* z=LtbXD-b>O%!|6U7w$psaflefN}9vbefOyoxA3NN5hIp~Ey(XrCN-ky$EDDB6x#QK zEGqcGG3|1I?>_KEKTOGAKN|FhKE-tWf|$#3E%(E({Dk@Yi&D~{RyFBAg5VI(A8x=T z)kZo^$6ic|uQd&PPJXmmcC_=WL6}FvfO(2s3}Z*@=S&U|0%j2amX$YStz`=5UcD3# zwf;aq(Cmwns1ZPqi{Y~aR=Ar`F$u<-KQfad3E1eqqwdlFgklY?={m$ij|pKVMU9Ww#SIlDYrhnpPX=H!y!zi@*${;M|?H>+C&Z3)=pf zxheP3v4S`Ee?~4$zxA=&bxL#2&p`>Lv7j2ayR#=8?kbc8_6WfpNdlu4MNUH3`f2ev zTn*Fx>qH3v7e4t3>mx6J>$7jp7iV&Rq=AN&J@JLXSklC=YKvdYR7_@4?hd2wXxZ>t z818&a*f`pV*2hCcSO-inA0^5-);*y97y3hcNW1y4rlm}J=DO7IlJW#>No##cOCcfO zGx^9|Cjm~^EB=MbwTi0aEdXdc2oglVlXqah!mqu*QEg+L@q(7k5h*U(G&UtBtoT@8 zlvaGCAm6Ucx(Igx7>!Fy8K)d-S5b{VeKfCSaAa|eVmZ>7|)H zI*+-;tQ24Ka{PINNt7XTxv=0f^Jf=-Q=nM5^{9)!lA@>^V z`OhDhP_KDLedwoAN@d;xAg!OkMgpyIw;~i+=??;l5&)hwNm-IILGFeI_=jR|FKf8J zu2I<9Mf=HB$$6pi$dJS&&OjwYAj{^+4(ONg4Y{W9T<;&(8LfiXnDExCkz2q|BvBrS z3>g8LWW5yclhjR+&e2Y0yzrxFXjvfg(in~^t<%%xL4lsd&V~(1@!H=&*LI@05b1wX zTB1)hqL69UV0%HPO|a;t?n(TOC%6)BSY@#w>(S{W1+M|FDyS28NotG6q5gaGG8!ce z|Fl0xaU5yy%6PsN6u80ecc(KMc|=Kb1VrwuEZc5YzFeWQr#XelCL1r zGH^ru=3&`uF$fQ6G6xmb8~>}9W;~u}MWp=haC%PKnZ|DI#tcpjK$HApq0AK7%Qxxy18rMtM0tr=&(1W$h#;h<@|? zZtT-I=D-al^TOIgdT4}k{P6c=0V2T{+j&PjMn(z-CQ)Y1!87OChXOGNZaHEvUY?h^ zUvuks<+>w?@u)O^glw!AaUklmcNp$Ftf~Y`!u0I{ToAqW;0xr(H7e1$nZFRQk5YZ3`bfcGU3;X(#x zEiXh70fKuRR3MD3HX!p@UiXIak0bL32igVkH<{5>O(1LlH>9A?8A z#u<;(B-g)_y3r0?8)(zysUWdddkoB8_h23{C+KrxiLlOr%euD6UJ=9d$$VN&f3JIV z`Z;|-J#kqu|7SI}h*OHwFS9*)BIZ_$Hb5Uxs9}Aq1Q;^_=fbxfF3mREQaR?N~Q% z|6Tj*ZtI)W>j|JdAt6mUkL1cBaB{|*l<%%q0LMT$zug@joy1NgrER!{&Mi!7oE5#< z|1(-ojsS{FOnUDV3la78Yre(x(%PrYt!W9WXO!4S-WzvR+V!e5d9v13e8lVXy%M*T zGnp|zR*rj$&oQD2!{_le{);aD5G(VdBKw&~3v#)y&G zriPE5V^-2D#BJ2}H8cwfsXeD4z9Lc!+Kv`YWqC_lnluDVEhIcT%cO?x)+Nr}zf(M~4=alWWnZ zffgt&7}L`h)f7g9^#(1J;o)N(^m%g!vuBR*S(=LsUeZ(%b~X0tJ~f78zd}z_XgXQ% zdMW-nt?|Ja$?FgEd`^s?z(ls~MevGUFwFHNjwQwi5L87s$$t>#8}SCkUSJveZv=f= z9j&=>FeNF;$|esMb_`5{d^ZGJ)QaB?Ze25#ce8X@P+HKGMcCzgC6m z!^Kk6myu|Qi1V~Vy#PSN-~uTvn)W{EHd?=4_uBt30@sK?fZ4P92A9h@SYs z$we3;tuFNTh)kCxFk%AE$bQMPn-xvIWtrhBBGY7~>%}+=Dd+yN;hy&g6|P51EK7hU z3CLEh>;tS0`rSBwkMLENAcl5`FEmZ&fBGXb=SIueut43^fi;M6z&LznJgDQn%Jj^K z+$CLw>ch;41=952(a6EH1uGk-el{jLp_Mnks~rBsU7XPt%PhMD`+}qdw0^ElqV|vQ zw5>nevYxUWlGPEU{T+q?)u&!T^S{z)1n)Mo;fi?IQlIWSu-hbF0MK^9NL6=~9B4f| zom`^2Gq2`Oj<6nwSQiWXu=!+D8W-bbs+ zNJKM2OH-}^$J^%xlTAV1q4D7Orn7$|$Uh^kdR;K-U;5)Go=j_H|3V0FZbD z_oyI)c3WnEjLg_1^0=*F7A7RS_Yu*av;xCNGgI0EC7@9fA0;s&?|v>Q`U!bg`3<6& z2C~#up5zV~1c3t^?u+t-!6am-LH(c9f*?fm2Zi;Zai#9ehXY5wuHo8oD1&SV+WV}u zd!qYEI^^^9YpKmkO5y95%!Db^Qz+=>2~kS zuJu5w%H^eI9i=rD#RPx>brS=ZWp;H@?}wXtcYCFwDCp2fcPCWrJ+O#xNqDt;tWN60ZXE8G!cN?cgLk-nL(v$k)`D zxe&X65?RRsRccUug$bWkGQ?DTMn3+}o*;+6lnX8G<;?d4TrzbK7Ls0KoNzzhliQSy z!bq)-=^wXa{C!f}A&76+Yr)%S{u`Nm`O=L`CIAeiv!duq_6yMH4reto#IBNA;z0}V z!gtGhc+E}k%Z;Vlq)twvwkez%p(-t)2^3`Wk#dc#c=egI(wZ8=XSAmmw}5}h9)Du| z5-2n*1G=dq)eDuk-WcNpjU>8LgzCR=kkUWYNJrrhbT1re0 zeY}f=0L=c-OvX$q#tmCID0_2jFPpT$?(3Nv3jmVUbi@5)Mm}}%|1%mi-_VO!eJc|D z2LWJJ>sW89j`-z1cZxury!YJ^r5jDxWHx}>6-0G1A}%irAOO>!R!BJ-{c$Dl;UJ6OOWu{JbjnKWOUiM+ zN5P2a-;*08`~~!p)kbLh%SwWDd$pn*0MS<1d}n2_2&6^;AQXtXwu=vW_ZCfw z^O}>swWtI_>|Dm?A;%PPj6YyOOOK|Nt`^gsPbJ)Hw@LAb#NU2CkqE@B9L=7n{ux=} z=9RPKh&J%S{7)KRviN1h{Np5BX#@&J?}LkF1@LKqwFHnQz{xM{*_lz6l2Dl~g2kXF z^F<0vssw---(^=^IuizOX_@$ZkO{x5NSANwySe!Lq@59D{Xo~?{`g@FgNu=!29exnWr`&52ov6GC$-hEgqyqC!R(F>0tc7m`oN!O#SkzA zI!6F{5YvPcq7(o97v_8-F@$Zh`LSe|WIjH^A2VG_OH(QTpbsBED!WCkGGZSDl%#Z& zU{gf!gRmLABY$oQo|KMyKU>RG3IMVMRKNA*uU>c6T(&r!Thtuz8EBtvS~A}G;jBMQ zQv&5!;*!ZWd}c0$f~QkG8TCD=7SJ1F04W3{5r`z^UsjHsXZo8;AhevVd-GiiEs{~g z?5`n^1OznK$kTz}e?jp>BYWRS$#1sD7JD0YG4d7p`8~0JJ~dW2gf-fSgsTT^ThqRkBy)aaM& zAR(lrK!7MDA<0U#X=SO%DiEN@ToEvGT^|7rE5%(cI<*Mmg>-`0Hvxsbr4 zrfqodo|4&Il0C^hhzUq*VWXGhGkNd3t7{_^0+Wd3UEHW@XT<$o0JJ(jcp?3!D;P*% z#O`@kc1Ivo20l3}y~{@F%DNH)F!bcIhx%p!mO)9n_cj zGZV&Vxgy@{THnt57NqfcFNDEDZhwD3BJ_VOiZ)F3yZsUQ ze|M$s?#XRPx@!(A!$ZIXfZ<{7&0AZFc%^Ji*L~1SF|QCtZ}&>?)P~6Uz0XC6XB8Mo zuo6vTFZ50dS8)%{-STpXKffIt%Ge2DonE6pXo3%Cesd7F*?C{#N`H8KcYZg3_Y9Yu zxE%bP-RwAaEoz6h*6{72|2?yB`;4*RP2_w0eAj#K_hA?J_JV?h;PyJmPpnkh+UG#` z(e?mO>aMp377{3tikkyrcU%gA(NRFO&H`oY07YS3+MTp^q+S4!c5_qH;zLc26Jn*W zOWo0mfaDjV<(D_>B6gty0;Ao!sP#Ba@=kgI1~Wgcw56i{m^f#Z&%7J|Cd5C@QP7=W z)ax~vQ#TZo5$9K5W~pWu8Wstk&T2n-`UA~;(ENw7fY8Wu;w_N(YJ04a`xS7lip8Jt zc4QU+VIJ@3OC6{S@97KnM7UXM`b~lm2I!Yggn+#;NaN>`vcs-_iWSoGXex zZkvWV^;6|(*r>3^TDd6hDK&M>ojPdNyxdd&=CxVB1dSc#$XzkmBH<5ng*p+&xo>;h zc}9HUZlCq5!kylGkL$$$-MSKI)qjKOTib+->8&;6#2NjnWU}uAG$aedI=Ch*t}j*n zcE4mvO-%{ir*h}~NTlYjc+c(B)O(MLrd|Me_uAjxRNHQT`}MECk_&=TT10B3|HqPk zA#`375QUgD?Vm9Tub}vR0SCv~GqT4MzSO8vGDER&4=jGZVA(WNw5rFYgh>JOLtL&{^2n5Xiw9qTxfW!V# zn;C~#0oeFwc|(~*Q)dAPv|Ukf`Q57*ZypwoB>Y2nX$SxSKmbWZK~z5V0zeW#?4=G^ zy(;9r*4p`!G&DBaAvo~JROEN<<(^6S4HFzQv*x!CJnN)RXz;9QMi_u5cEiTG-pXJQ zz(DN-*ukK4 z=!8xTyRMV+JQDy$)YA}Gv}6+H!7O2#(@4W3s!u;2OWkSLlcU3%dVjB|q(AF7hh@23)r3pM?o3Pp0s$-Xvl19nk%2D z5zth|)h7d=KTm=W0NPd}=x@E@O0ocKX)yWi(OZJ$ArT$u&sFt1dcgm!{(4ZU`qu+} z-Y9Nv0>CIxZ+j;5pn)??#DO%ycaenxs^Y)pwUxso zK&}t=u9$?JAD*`94+)SSuGL&gc@VZm<9sqI#?;~A9Oxa?Rree9%)5SLlbOGL#(7hHs_=34nkst-}{N6KbZd4?UOCatG7O)-i!bv?3^)p zv>#tRwIdpXjKhdrn-I_wjO{-Xgnm9x0F?0x5>g4JN^*>Gnx*8V5WwO6`EW&}TAJIuSKiX`QXJ28`SFCEQkn_X?M ztHXqV5p>8HJQAT2|10Md5#BLCzCTT1VQ@oh zZ~4oTXDi7;U@QfI#>Yo)*IwIryV|TDD;9HqBDm8b04OTz3dMHjGXxB_KxxI;&ZpZx9gur083tcJpb+Zd;-mab|HMS1#(qF zjNkJl@lAvZ2b=06)tVIm<6Xv50EoM6Ba<)BhT(fMNBy(r-K&aY`i8zc7Jr}AGckdW zl`!MF%w{jtb8bPqvSsoRj>$QAKTVMX_d#;?z zPEBS1L>>BX)fI^s0I-tzodO?MyyX1FlLh6;&uHlou>fO8HIh3&Cc0+mLW-mD2d}Lh z4gth>pO{H=xsYX>ALjCy0jq&R&ImOAd)hbe)=JGSZ)!J6DK#`T1+86cNNKE88Xvl1 zy>apSrLoESx7+9)YXN`;U$}b7n{kvJNawVWu_$nBQwf5$6&P4mY(Q21zr3bT#)1KM z8^`upj|e=GCwC#v5Nm`{4qjV1JOZ$MFdtVj-j5gkA@ZLfz)c+lp|<${;Q<`@9h68u z3^p!&@(*Jh`QHxV*b4ydjZMpIEUxQ*&1}E?@pJ3gbMC?Py z<+BQnQDKjE@=>%idd4A}R28%jt$6`FO!j9V1U%i+Qa^V4TNVAfwOUp5f0Xu@gg{o3 zhkywH$?FrZZpnfs%}ePsWdSH_6OC!b0FXM{rcn|*ETwhYsfi*Hv<)z5Hl|w+qu(j* z;~`g@b^ZvAENLd{e>)~xZR2XlRkFwJgTTZH0L`FQ$p!g4B4l#CHRna>jz-3~eb5=v zSIm_-4?B^wYs#dB(torNxKQ~4AjVHjwfes%-9kC)+#rVnfzpkSuEr!pMUsU_Rt+_B$ z5?#I15U)sno%>V6i4g#*6?ZK^<-Xr=%@az+`AreOZwf+X+)s8{(;Axp@7|U>GL=y> zm!g}O0N{6b(@sI=2}pL|jl6cEy;_PbjVwAtq}vPoOq^tei6%p7+qqQB1Vxaxj+zw8BoHMs%OQG!j)jrYT5 z^F8g>_!wT1j(Q)ymJ=fYT=>MNbG)qF0Q1F}+#iYNVPQ{vA()2!v)M6lLsS4iZ8KCx zFe-0mfW-ITs4EC@hH>nsh%tPMibyA}bW6D>#A`IJp4J$C{-IGz+W3e4Oc*<9D4GXIDmTlD{QvQU1pmeTGBE-`+~BmGnL?h3 z>qVr^u~o1Fv)EBAZH=+|4iy2$4r}=F%laMIc03(^Pe3UafL(J;v?tN_){8&3{ z9V=$t*?DOZuHcSNXSG5=t=LJulinb!-@p+-2dimqU!_s^OR{$AWGMZo=JGy0?E{%^ zca9-~2T4dJi?xJ^r}2R{)CS&lsdxZ_B}@GtZPwkDyW3Ju8e0EPpz_X*@K?jg4*j z|4IFeP{3GANoj=5;LiVb?LXW~NqzfX%}Epho`6`(Y*h+D@v=;vw?zxjYPZ018og0& z)F)RZ=S56|nPjD{rdujVJJT|*BU7sMPW3H0)!w%zXp_V;!iYHY-b#NEfE444Ez3m9 zIVQ~>WBtJoO$-16B`yH(m(~gs3S$k%&+j<&-{*7ZyTncqDwptr)kRZQSa()YdV_x8 z;B`_8b=gwzA7+0JEJVJJe37Jz?gUIr{xrmItBQ9t-tWQ#c3v_`?vW<$YsOtiRyh%ov#dm$t&%1iXfX`KdQM0+GPt$a(q@U0EkNRs(oOMkX!r?X<|a0 zQ!2+C*v?^J&{WW0Lnfk<#>k_RUW+>GBQIdZyaz^fOsfERkpDods@`rC!WH-PQ%i}f;}9z}(E`8)ccI~a?(Qf# z(0X<{w<;#;Gw==(|E?be0T5mGD*_-OXm?lyw7aN;fae3bx_-b2en30Wh&myNBdA{b z9?2kS9d%^^#B~Jo=OYR@b9l>({-FKG78pKbd{H=Qc=8e6Ev~9uSDJ?`Eun zcYO5l$iDw1whpmR2^;@xDf%CQfX|a&guCLt!_{iFcI(2&J~>>{t31&9O|$?oP-F*q zb2PM))^=~#0%hIG>!^DTZb89;m~^d7HUj902!&|ya&kT;&MnRztsKGppK(P|01hkw za|$R$h-@f#KvTcva^1!Y4?f9@v{0-D+LFQ~fC(L9FnX3Fv*FJ}1ti~EskD6m6ADPm zn{5I>3Of*OuA~I-Xk(9MetRr4uW|*sIqervFx@PLJ=)(Y!?@AnrJzX=z(N$M|9lSb z&MM-skk2R~jm-ZtMN4jUGjanIWtVO!5N1n}-1_e|I&w}RaQa;Daw+6>lTd<#iJATF zv$Wg|U;m~ekP!kNZPk^Wtm=c25e%%P4*?SZ;yw$UfXt>`lReWxM)3Y+^=5yY^n}f1j znMed!9!h4m4ey(M76O?2w6y>EV#XcSfi-~W|7mglWAX)L74Ww#&91#b;42a|BVvye zRbvYo!OFl90Q;JxI9xFF>!?Z`9K}++=b8YZ-new*Qu%AIeeECTnzf%53Wf98Z1{)zI28f_E`XY(@dZV5zy3nO9h+sd zAZd1Ja!DF#R+ph6fSDHNzpjJV?S(NxE_(I=I3n9W+CLit;Q}}&e*ngP$p_DrNC41U zZwVFrp7&OUg8&ws8iAZT5CBF6eJlWMdjX*CR+J!$Bd$+;9u6wqKSksAcD)w7P4M4F zCSMlKHvvE}ngGxmOBb(SWI$Au9B55c?&-+xeyk1k7Z8Ow_ja5p$SeQMNRt4D+80TA zm=6L}|0_;Q1p0Z>jDSe~e~!3d8OUl00D_}8A;n+#bF_*8KW|e@0V6i_^taDkYH#|E z06iEVeZP5!H;i0KQE+KlQYEX8pf}Htt`2WF?8{i)8o0IAw(!G#dSahv0zhKBrn8E% z$qTbECo90B_TkQCvTjyk+Ed4r;1XhPW#SNkSeK=(bMU&oh|jwzNr-cD70pW3M;L&W zlY?PKL4XARu@tbqaF0#htE7#n7;HQ5*?z6P9zO(7GFG(QkAzSUlWJZ(Xy}OGa2{7QElV8lai;rtycZc5I`nmi&jkbylyWM zvzGu+hy?(Bs}C6~2n031)bCjd0EUlCp|nlLNfQ9t@tlO$(XwM=N)-9O(R2?tt3C!G zb_+}b2IJaB699JfY*5>3WU|FYg}2<4Zlol;%`>~$M0fmZbZ=vSEQFp3I+bhai2Q{R z?lvp$&?-rt;(B%RP}og;baK62vp)j#dqw714qoppxQNI^$3X#mVgV2WIIt457j8ii z8f(8f?{drb%kA|Z2>3CsKg>xm=6%u&^BCH)soW52O8L7X&A-98+HGJZt}&sk{+S0F zu@%L(EJE88SH%%m$OM2L9kA?*OJ~C1Eo~7ruUYzrX6s+-lDQq1CGKricDm~ix7=+d z0y?))aGyPuRRa|0C9RGuZ$l{}76hG%Mu_=DNn{RsE<`W~<59gir-3LQ)TU8=A2Vi_ zhThlieya-7!e857U_7{pPHM%-5#^0Q7y$E`ymKdJqx}GO1_9#xf4p6FRaaM{puGCH zAR)l|svXx(UW4;9X|Fr^(Fm{cf~3aJy3GH-SS-7{rJ4i*a*o6!ZG@HU=+VKZHv4%? zLd+GJu1e7Mj(#>l7Y&Owqy|z216DWP*7b6=c11bVu8Ux%LTY5PZYe>>ni2$&sDs@C zq5Z?2s?7S^1TFYRJMl8aF$fOSdq%Tl1%B>>#~?Gqp_o`w{)m z=OhKfQpDi{1Nl%t_1&m@5FE%oIs4TB`U34iKeN&$0%?H^;SrY){f_&GgY?pEL#oS` z5C8HY%xqlv1JT})73kh-#Vsphn*=@-7_~C}YiGYe zDWfA70Qvu8*^oUXjs(#v08#`Ov>uRq)ur(h1-`7v@2v8u6}4m5s!YP{889tLqG)?h zcznn44}qjNmm6+Xn*S<0{%Ksn{L>@=jAOs_vrQ%d?1N@&&FF{94e*KxXDP|HPZ$7& z$%)kdSpMIt-mq5J$^;;wZiZnKoF&V@cL!}sU60s*WNn7oqy1wMniAvYKq%zkb8i8V ztO!q6)->HL`jRnK(M=H|NdKyxU%e&3ZwjPKtc)iD$c`kFT4zbAxBeI{8MmG`0btyE zRPBKgux8gE5&#eaAl^fF1&krKNo^9O8Phy_M0gXu-nWx$-~ZzBX!;}o;=TU2{zPZq z!}l>5;XabSCt*?_sKod3n*+6;!#I0-172G>2mvsTkAtp_zefP$7*>g&MYKPuf64xr zj;Q|_A?5lHG6ejsG=rLD{EK1Pd_d+1RBn%VFkxn?VQ8BbCS;b=}!K#}TF9=zzs9|M|_vXYMI-sUS zeH^)xNHc!r)}~)x%u0Y1m1_#!ek;s`cX8vUsNO@bstEu?hkWo4RIs+&kngrGia`Pw zm;$<8l(+*jWM$kCfN*1`-ICuH-z|(I*5qg_>-=Y@lqGVSZGgq>^`1Qd;^Kn=^_FzN z2##~Qmi6E96`t?t4?%^obqS%vN_Plgc>#9LofP_z7z~b~!0r23&(gz)A0hT*Q|=#D z0;HzMU8@DFm*2g5@n&l3Jfe!40MG&Owaiwf;EGq8%7PYz!Mw=p>zcxe*L|3>E7I2R z^&V3|;0s5x-U8rhPp1^91AOd~(IWtTUXz+ge{TUc$tv3qoxy=(GzoqpnxKo{xxy(8b}~+OHe$aUX;Xlm;UI z@&8MfZ0#wr#}5+oaafT^do2vYl9K*hfo3PBY6-502>_jR&ec;(?#r8R)tiBm?1f(u z_4}cIcD9qBj`A8A0o=9wWq0#YM#+J4?iKb2Xl?%r0k@KV1R@5A)~`fRk1Fo=YLu*n zK+yJ5RD|Cv^LZ}_F0kw09^H>C;H4uu?G=!5_cAs2%N^Hk={Yx6s_w{k#?43se)WVh z2&wG2to_jgc^$WorLQba6$DA8{MFtf z8+vwiD5i(Q;uS1~P4`yaWiCs`tvG15l>-ql0pLK0oV(yIG~89U$fe);($`)Uu@P%c ze!&ZjIXKkJ@aHEU;pErdYS_w5CSPhcO~fBPf6uEE z^#!Y-7vKH(?`|flRtc|+2>=Q2pu<)#)&#dju-CMkTP73wwcFED-1=<<@Wev1@QKOt zu$52X?;*l5rr^gyI?gPi8%055BMmfuT9`lmjE^4o54;dxKlLl9f-_pTSJXie8b1Hs z3ZD`%%C^5n#R{+-P)q+km!<7cS@OaMVhv;(L>NH0@OudCcEbUD1~c4LTX9E}w(H&{ zL3553bG~f|I=ClxkzWQ-n=WptN>V5K#Mf<)KIr0(eY+IFPD4qIpCHe=0!bSlTgH)< z@kPJ{fbs2HuQ9AwYn}m2ZHDf*kF~+UmUAy1b?(KZQPg+8n_m|tsnle$lzO;R3+?x_ zMsYuw|MHghUle7`%CVl)Avnr6WS+Wz8JV|_2E=DCt-jfF#0pf2XMDk@qvOb9nI)@d2cN^ zuFV99GZ;oe=()8LG5(Jix7{a?%AToy?$osEomKs_?v1m>NPrneg*to&LKCT$rzF8} z=6w}5;sq=V^h`nz4XzF*0Cb6MkPREsp4Oxil{9gt_4C9A z143Sq>Y+0%-&&W}M3i@>t`z}@_AsnIGcAZoY_8v&^4)@TS|pvfO85;MPs(Cj$U)3%_*pF zqN->DK$qQ7ca4pL%YCHzQquf5rz!kOm$w+ntMRX8u{~ zrh^p#)C-~DpxQ;23}OJZB0$Rl`9-aj5;(p;kB4Ebi3!7YcRV`2Y#?Wz1%rb5@8Ob$ zdMBlhCIEEV6DyV6wc|7H`>vW<$YdMe6LI|aUEX3Om$NkNzuw*U3}H%~AM-8iy)h9V z#1KYO;RHPtbN`ZjqWIVEma6VQ{dz-Nq(;n!-1;vc%eyzfIOomJN5n8W^zpz7v^f$; z6tVrd>V9^&?6qO`Yi}=-N8IO6OuN&H>OU^i|E#POdG-B%RpYxt%a)H%{qrXa-pBs& zLxi5E3YO#{x45A(wOG~iQn?qj!eG#G0JNK`)lB`%`^J)yuVl z`)|%oOCXaAL}Prfc!zeZCiOLVvXX+Wey=?go?7DRmyxyr;rnEl@cl+3`*x!cuB>e3 zEb_n0&N2a@OTe_HBeCxAst`}!=iQXTey=iFtThi7r{j*;@OM7xUrZmF`$*kJz zkk;44GGJQA#J*iqkW?%13{x&lc2-XHE&1Z7vIJOS43Es>gK96@HySR-milsC!JV6O zOVG!ZxIfW`sT%?bRsia{AM=mv8~>iw0<#hj(FhTA;@|qG=ubtA7me|VG;Y$yVs^)R zFffLzXYy!t)c&(4JZ@8(&%S*FY5;wPO9xj@Nqxz7zw|fH zOheIFD^;u2TNgh5iS78;Lx-6F&;w{`{0RgS20=zIQpVm;K;WFX{})cP^IJ^X)7Y}X z*BN4nZ**HsmUB!T!Bn);r_BA$qpEga%(%}T&&%8{SAa0hz=8W%5JWi8{_m}6jkbvO z1Fh9wmopv{Zc7pN0svY(+VAn1z@0gQwMW8AWZUn!M#MH?-I&vw|6e~Kb|7N3+yL|X zrnJQe(j3VI@JMN6YfT9ndM56WScvHOVI5qErb;XemKfX*RdFhU)Z+d?@Q4KvhV(cL zYsQztc>NKrz32d_4lhAST9E~XcnShYA1Z$yu9euL(&26FvJ@j=0zis8ubZkvBpXC? zHv$O&4fpBenoQOi_n8v~@9X7WCI;+|D2yJ4;{^aTPQ3?D{)!j-;7SC5Ij?P#bcyAb z!c!OaQyYf)!+425xTl0a$lU|;@0KA>i;=&q z1X3&?j4TLR9;k;PNb0`?kcJN!lX1Zdl??^m1rj)l1VqM_>C>6#VTIXNJBe1W$luQa z6Z4(n8NLz$sV+IKTf`M0`K9mm=u>k#z75TLDgbo+vyLug$gIk3gdn-3ar9uV>MbjA zd`U+)v5QiTfC&JH=swX@8DA9)sCY0@p;?iee$%}})NaR5RZ)nEC#0JBNd0JdO!g4M zo!px5Xvk>Z!c@JHepJ_Y90FYAuSG*c3nnWc z)&T0jK^Jns=m|(9=RZ3@V#=rP-Eu>O7ySC<4Zxy+rHDFsv$20B0*UHGeboMG9SA24 zF5;Ow`dN4`f)1Jg(ENEe?WXV~@J7(_xbs1OAJ1ckUs`R9+u6?wAW$OMGIkk9b@f|1 zmi_tBzNz7<6oFPzfO ze@cdPOh-ZWr(ff1o^9RU?=&3T3`}3`KC7Js0|_;Lhrm+VGmQ9`tcdSK#?r0iBGal$4TIvVNesaYIA$GPF1)^-f$hO#tYzO9%%1 z?yFzDyq(GYY-%d|C!(?cR<}yN0H9gUa-R*q);B9A!qUL3JF1KZQ(2aLS=osIJZxb` z8{`PPty&aN2k{JbAON87vMQjg3-8M}Kb@=lcz_dPNSMz(3+z#{g3Y0s5rBAqt?qs$ zg3Z>fpD0la5%A&-PZ$Hw;V~@v`{L;-cTCTppH_A}?BP8WjcM=C7kgDKfS9_H9z2q- zcyXihtNxKUMtUId@=IKa-N&}2&^t4dV8A%&Q1T^PPaejJ- z;k=?=@zL=UX=r<6JO1JB7qWUpFnTv2LI5jUG;H;VK0ajkNB8Md?a3Mj8y7zL2aEha zB%&q&91=i&Uah!m`6>5(>0>8?Oz=(3@o(zmxjx*w@3&~5;-vq)qExC!#n4`t#&~3E z$`wSQvv9g0l|Z&UDUPx}aE3WhLm`-;LT#dz5`jA@^YXubsVFV9>3+RbcBQTDCpCp3 zK3K20RWa%rX~9*s%`9nZ;Cp;ZfdUesUhS0v!I zx+0g$I<^((Jc?nCD`}aX{xq&gAho1jbA2)=1OQ=gWwH=30iYlK{mH^c?X``ML1lo=88l8QY$621wFJNf;v=b``5-z~j$pWBGBNX>rFHGa zGeR_Tm?4B#LP9p*bSD)*Kq_67@ko_>_IK^~h+p%2+zj~FNkX)teu7ZrgJ*E*>t_o0 zX1)J$CGWxa&Qd%!^=W6Y&-|c^H)yOR5o(HBtQ91z$*obSE4WbB4+>R5ZsSaSv4Y?Z zIC%t1NR;tDn_(+zSpu|BpSS?D%_-6g>$AA9ihwofv<)Ut``P5^u`CK(Qdw6N?|?NV z4g!pS$C-A{>H$Z^{CRdi+TDKte!Nx{z4p)80aMz_#Tf_l$zebhwwa zs5M~{Fc?hq__@(GF?ZumfpK}sPs({s91(MMLD#OzNU)+EBZ$hMmFYb8i=zos5F987 zU{#-THK8<0xus$A1Nh$JLJ!-W;NE z4}|`zuw`s=Fn0aD4{_h&{8Y6J+?q6$l7`HkMo>#UogEG?hkJ?%0Efdb)xT!)Kag2< zO-$sp{KluWquZQ_=!;r6jt|+&^9Zof%sH)%M?)3c5yQ#3WhKG+#r-XBrjCEX(zv^i zx80I<&bzl1xSvT_AVU580&zN;U$$z}GPX+e&Bs^ZVOgu%w^T zQG!hkhxfx~^SzwQJjU&sj(R7%mL>osyMG2+JBUJA#3hs|mL2XkWfmlko6oK6jzAm` z$Cj@6+g}i7lvtaOe+7n{GTUzj3f$X9>y1p)MC5OaiQ=PXNg>RNKr9&CnqJ&oj{M#0 zO76o-jotb}KY7AL(fH3O&VV!d{*Q?fQn;p{C0Cu*$cjE5Yv>Cf98%x*uX4 zGtnncZZSQA5P9YwohwQcSDIgG|EIJYANPbA!XO7(Bxkszdd{4djsD&XQ|@K0a3Dd? z)%z9i7I>&t5Uhl#BLS!!M6KiRcb<2)fp!5GEhGlxcU~+iH$zUV6AJFU_FurF!nVwV zZvW1D4f;J}p)4$OI=~cI!K&MK5=ZjUV#O~bz3q2Vta&#U_e*%WCf)xnX;ybNh3-IO zo%^SRS2;b^GXbEFedU7zKXx~C%NyT*{p+u2?RYA73q-qsK-7KoI%~aUE<`;*)#`=X z^bymf-U?=f_Rm3q5zi>%e|f9s^98Ug9ng6K>t9H-*L#xp^ip^V-N7b9#1gOtFH7D? zdNWTfK~#-iZr#;yc#aQZRAmqrXB8__lskY;i3$YlNxfthX}l`nKnXxk2cAx@i!tE& zC**!P-4cFI%tkR4<0-0QRe7qhM?iJo*;tlVB&Z>*t;_9)fXZr$#23*}h51 zK5-2Z>^@#HfZ=fgROVxwA_!^^benpr0B6;BiHkl zY)}kYQTVD)Gt7I_JPd{aMq&XNTXEZ2+mA2Wk2jt9+^?KaBWQ_lk6y4kfY+>KAVqb2 zmhr<|I!?~ze7pm0h!G*s6YSWE$OC{I2mCk$!JYyT-+Ao7aPP~w!>$F_9=|8Rl$5`$ zDj}x=pII>kOaQPk;3*3c!ZmyWTCer12Nj=y2BLmaS{_=bmFE$NQNjQJ+R3~(+rK!U zm3eyF$5Z_Be${W2yQ~C0SO#K7cOdQ6jY(zem)uFif9*t3ns(N`tYktj9gY0`^V2eW zi{ZvhiGjyrxPX0Fw70NRqc7#zW2`;z|$-9<5h=;3=6DSM?oc z?HU#*ow*n7g!#`%(3z2t!;Xe$=JFDD$iI+r3p!{w$&RqT_&P`^NJC+~sCl0GONNVr zA6pgQb4=)qrc6iLX_=|nRe{N~?*~HYJ-PU1xpE#d_ewwPHCi=peW4|%M zck$tW)a4&mmvA72k*lJt!#{7ohj#f4yF9%Jm;f-KzN=-nDg{@(qUi=rJ1gFBPBRaB znCOBgh94Kr64S1a2N1C!$coRb?2iB%DW>_b83hNz%x0Ba0p?9$AIX*^BX}YV^P_m5 zxz6D0&E9%}X`6%1d`>CIkia*D0KeTW7bdhbJOgiya+W5-r13vv0igY|Cn?DfyDElm zL1zR`Tu*xI+g_AZfM75!yr$J&R#+^^3dQ)FQ95BXXfIGr(pZF9#|HzZ!@^vS_D|6M z6mmzgLIoj>TP3<^$)CI=w^h$OazI{zmL|HsDXyFe0E6nhtEZOSmp9+i7O|(aPyF{Z z^S-A?X_KI27tFLbw9_0!c)eV3zxPtXGoIs05(HzjvNrD6EbTlMod2xm0|TRPH!9)5`1Qb$aM&|H?JvD+P5T||8J!pgn~z_2_A z(`i=B8$7+qUOO!!T(NwxBB?+x{km*!hL#&?&IJ5L%LILzVF>^PzoGS~rTFHzs_SlMx=T;1Xb_7fSXh&lJuPUy{5?96<+W_MQRm<2cv z3Z~DR)_D6Pzo+|Cly1x1RsX``A9izciKLWsec;W9lH7Y3YB<9U7qXD6_UYf0aEBIq z?Yqtojs+%1E_LyPIt9lY?{KbaGx+?*L37>X?eF`RZJZop(L<71RjTfxhv)A-SaH&s zOGSsW7;)YCm1+7@(tnO^GvfU8^gVrQd8V&VnsfUynXUIV*kGG1-#nx)05j@Q+vL0 z_ZmgZ0nc{(V9EEa-`^fg!@eDP{x&Uf%=sA=74mFN&Bk~ar=xI?kZqa)9l8LPaEULPl_oC9<8J18EbTW5Z4T@Adfl;~bVkc&Th>zgDb5tEGZe2GZ4BP4H34o!+wmkW4KPV`QVSBG zLK>SJ*ObfU_S1hZ$=Izo=Y2S%lsfxHlRxnFtwFP|UDnUouvd?$_UM0B@|sqO_as>h z=ur?n$b1I>F?{XS+fkCzeg6%$s^%=UfSrLUeR8xGK-cWA(ie8UgdSGPR6V-C=fO-H zZCPKn)V1+AaYaVUDQe$&rBZs@7ghc7;?rlkwV#+1rXq&JCiI@@SA^@>&=Wpre_~JS z90@!xm$~q;wM_GDvpE*V6%~~>`u0?2TBi@J%=W5L|Cn2s@Y~jIX(s?!wSU>j41=Kw z7+@Wbqev^cpZf3P7b%_9gI~WgI!{B&bFX#w{e1k8d>mKd;kSvSPlzk9xA%ay8M5uD zTIt0!xr|ML@brkpfg{5+1*$0L+o0Kl7#fwZ=kG((xrv2|cnvOJfeMinl*>EPq%b-j zLgy!MU%F6>glvsQ+6%1`WlXyo_EO?L-JHEFdGGmA&Mc!<$F`%GQR%Y)(rb9$^v;k z=0vyH#e6JV`l}`wVHN?h1<=Di>M8P;%`&Jnqh$q}Cm~#~E1v`O*KF!Higc$U7E)&H zp4X(s#R*TohRR_NatdhZUqXw+`l@iyPU98v2;h|254 z7ctde{|SC+RBL{Kx^C>u$20Gp-6FQvSZPkm0J{dI)aj#anpcH0=Bgv|tEX@pWK|2Y z2W-U}2nD@vM_QJUlV;-^%gwJ@WV1k?QK@Z>CSbORV`IkHwpGpA%s6&!fd*QwMuniN zW%6!qBJ@0GkH^<5E~p%=jk0bE|7;t6e2XLxQ=u<=FQ4G0fl7Yt^=NxIYMPbc$Ly{z z(RCnbof`2>z;EXP%t^odHP_Ot@V3xJxjV({&$~Ae$MvbX?>Cb428XI>-MAkWKWxss zA}UAjOhwnPbzMN~zGUf`&b&YXyDQNSEXO+2jpJ#RIjJf+jA9=VEV&)wKP)zp;{WcX z(yBIA(CKJYy&Tw~?oMB?`1_u%(o+w|52EkR&r-R+&Pk>GuD}o0>CkSS&cslFeAU%c zxnl(qo=v~Lrf&J@%U$44?C!dd;!=C}o~OQO$qP!{&UIj}zjLSmOCb#~H-m+Cco0qC zT3>Xlx)?VxM4t9(g=%T#y+Cm(8E|ub7J9z=<;D^Du;vqJ&GF4Pvb{-e@7fs67sHn` zF;I9Xu|?6KB6;1{`fU0_p3p`i18go5%e-%E>AhanSg_qb-prfb+@QOC34lN23X&Ct z0;~Z_BiA)LIE(NqbxHQ@+na&m8q$LURP4E%_oQt?8~Ne#0kTn(%xxQPN3kEHe)fA6 zzogI zc!Cux?-)9gVBv2xzo-ZV{Pw>5_Of9ib$-OD1^QL$xKN8SPE!U;LBP{W z>z3R-c$ss3IX8 zD(~F*MZK26B0J5Vap+YNt$k_c^xF-PTEh8g*9*#911*`?zy7_qp{h=&z@!)l6q=MT zm}$7;NTtT#*YhmEP=qh@=qU!H!&%2_jMm!d3EX8m%kR23Vr&H53T!_e6zbo2?WL9= zGU-tBUF+k?zWo~?0ddH$GhEy2m-5JZS?}uI<};%!bS~L`&Wo3#Dd$C`w42#SX@iL; zbzDZ2(fSws*cj_%-*S#ib|`9ptn12)y#Pm@rLbQ;RsuFD?zjMO^+ou-_lU%-Py%6j0bI~;8oiZ-08YlNd#%0`^=MYe2bRhKnp)oyUE!=rgRvX#TyV2dy6p^Pq+1 z8xhW(4{O@wxqkB}LlekTML1Gzvw8f&Hn;9x?%$6JKI|`GC4)rkQ*(y37BD7r!RzM~cWnda^tSk>i$*Vk7; zO5Uj&9aqMM?$p%X=9fzwQc@pS7tdPl-R{8UgR!dKmw8ebe%ldHzbCWL&}r7S8vI^z zkU|KQ7fawxd?f11ncRjVDXM#xrRTOau{QQ1ga89Q{P$udb6VB8`_WY9vuYOyiq5b1 zs83)r2Sx@J<)T{DTZ6_o z;wDEMlkUe(T**(qK5!W}RRH_78u_+Kc_qOlo0a~v5e-}!gYXJxd^S{Bwzz>`$@H>_ z4RN@yU&E~UQctf@$mxo&Tn{ohRsG?@kq+6&L%MUH$DpTG$w)i+*2xR~N9P{FY!1I) zB;qg`84n^EDB83hJ*fH!#|G>_UK?Lco2`tf&hI@c{o&k4ba|hoj5s_30M@!g4;O{T zcNOEF+FP4_Y>(G-Sqt-Rz*-tMr47B}j2Zhof72JezDAfHg2u)|*RnDZlmo0yFVc%=Kd;bt=KBOXoSnFx zP3c!A7}!*2H1|mhz#)~wqY172!m*9frTru?u<%Qk2}U=aRaRU?3ng1TtmINTEPdo# z{F&5`hJ1fgvR+p)(kd%+61+KaZ84Oisd-p1JEs-^&RtV_cuxmStoEF-2zs(I;%#1= zPu`0-s@>0zYndH5W4~XVe3|>CKR|C9jwM?=cqA&p=4UK?i>kSH&9~OTQSLG6Q(N9e zBHZaRNqKtL&rk5|3iOf}+j}bsijOxgieQ_%oR+gk1 z2e6PjX#}Jjcza(;eAN7+o%Vjqy*carap#K>`$i8EN-i92+#zl)xm?ub)q4X}kXonO z_ND`<{1Gr;F0GpPwFl>tnKS};y-nXKOc3Sd$abv=2J6WQ2)#sza8lYlI*!#)1>?PM zYuowh+-Bf>$Ve&lx@$q`8o4ARf9t84=TFhO-$A`^&hzZNm)R0ODM}#O>D3ctlJ>8W|?!PlVtjBer&)WcV+&Z(=}gas#7*`;rO^rj!B z(#w#1Je)yjZLiPp4!2n?8gjH(e!8}=zP0kM`ttXR*iu&i#3;Y-CRXPS1mN;mNN#Te zjTuo5zdJSf!NjwZZild)d8QQ&rAu;`%NMAAg2R86@pZx-VL5_w+^*2#iP|zGFD|Df_kDI zx?qr&-gPdj{6M2f3e)2C9ea;CD>ErNZP&W#HFvb-(n+8<7=|?j2FIWEk4iK=-5n%m z_FGgzeiaLR{kgxo6R!)nn416(nMjsS%c{zledUmoRAd8M57&yf0N;O?BN_i#fOXQi zxSd79Py*2knz^4NRVScc8w_$?ZEqM0Y5yo51qDucVa2^z%f|ax*lt^(1}`n#+KK`a zLKKC#BZe;JUg67gTW+kS-BzQLH@ByT`s`L4|0s9bb((;qYRB)fe*C@7LM>X)bp5n) zv$AVrAAK3PxZZ8KpZ*b;zPQ~ibGfie?~koPkvjWB%$drJe`#koiryIFTwlZ6T%838 zj@&BUuXr0q@BkHpui-r`3Eq7V8QwXEBBaUJ?YM$gg>|ZxFMC(YODCiH2C>dU&j?s^MUPAp??a0(HK<3`jk!F^&Qv^h`WXUM6MuO`Jt$^V zwCi`#+*sb-P93kAD=eWs={FN{S2=6QqVh#2{%y@v0oYF%!<;%B!+@!azV(|y!q^Kf zFpsvH=0cl4RR4z#KkOLCJacL0c|}`)SDp9D&_-;Ok3Sz3@mF@tFdC-Bo!z9-gC*8x z_sHC;r7n&>EC%~aB-n`X!N(vSVUfaPiIWQc1>Yg(?|-(B?g3Lh=O?kucGaSggy>wS zDd;dgh%5_1IF4n`7p0G@>+TkonO@jj^6GX&LR-H?K*H>M2 z1x9l7X>8K#=Cgwze$oe8^*7c?4QNo~UIWVxc*^0e^$uRgq_Ak2e~t2BB=$4#u93v+ zee>Q9Z}96AFC+T*Q?RICmPMINPb)GQ@HwrVPA0ce$ zyu^31%>x0TB2Pi28CRhaG|2!5Y@F{Qm;a@UbUWv4V9dj&7H_|1jJqt!x7=xV-beCP zQa2s9iT+HjZ2N+@9Cg1PTw==`Cp&yCASf+xEk?gSR;Na+P887;aXZvzL0OPdUv%lO z@l!@KUhL`S<#vZww<7P7yr4q-wl#L0#mUEQrLl!-L9#id9p`To%4+86#@P1*hy7U!B_}ie);2T|EkNiBK{~EKA@s|4iSZJ@Jqe0P-1S-J(_|}xPcab$PFx% zQsMn)W{J$3GpjR<)xdnkgwd6oC;>p=Je~XWv}@Mb8?XAybwcN1O*csIX0y|f+m&7C z4<%(eXr>GJPAGh~CC}z?za=07HbH^@DBjRm%UtNKb-H!3D3dp|+}yJu$VUvtI!BPV za*bZCyw2Z5DrtX55VWh+I*tld-u>wXRWEOV?bXF@kCVxO`j3KQ%xC6@o(t6KugLu; zcj%w9Y#T1X^Eeb*;(DH=ucf>)L-C|Bz~f*&Ru`6-CF71aT+8cD=bBH~G!lynrr&E& zz()qyNf~1NV4vrk3|!l}*csO`_e8w)1HP;NdhS3Pq0dfFYgc=nzlan*xYf#Kbl@iT z^n<^xP_=XsPo?X%aLBq*wOOwg*ED%IZf=$K^AT4b#k0g7v&NEv62eC(-$W;`Sd*$U zz`}ONbbif{Oe`EBiF#LM&ej}2W;%e)o}xl@6hik7+kx82>K5qtS5lF%u z6O@^m`BYDjpqoMLgGFnKG!+yG(OX{naumP2(W?4vjT!uTbgK|35$q&QI){4c<(F8+ zSN~%$ncrRIkYMri{>;p7@v~>`^ zMv5;L!AEwcE8+=H$axO>5}Vl0iX-k@QkCGV931j2q{=SzCcgX2pHn%W1nM^St2yj9_RhrMj+XwTs~fZ|UCp*X&q7y^TkniN2dMq0EkTikb;g;bV!gcWSW5rO zX&MG8*ga``yj@7Be6n+~?8@JU?f?8&^i}woA2~y^*JNc`AOQVQl2dxk^um=DnB%v>C#At~IB0#FQz-2l2)F5KA zc@R8Z{tyXL9eo&`9sqw+1NEQ}KVl~eq#R--C;K@;0)&CDfk&rNAjN?!z)_Qrq&1EfI!pcMlGhQR;-IRml{IG&e$3%wql}Eq5r7E_g*4UGMOdPc zSOm3kybJDAhQEJ!#M@q<;1@>E5nL#QE`YE^7?~hU91nh{rmY+#zdQRDUU;%#jzl2P zx(I|4fIuSwgqcAV79?r#?^#@*p(3g!prrM<|a4*ZU#ZQg9YhUj=}CU@Fe0pxVLZ$g1gU5kSH9$ zh%nQy0>R_tlAqg=b0FCNYQ_8G^a%(y8ki%n00o4M;|{a}ulKPn{v9;RS#l_1w(u7S z9(X_-9YA=X3~R9<(S#9%_!5N7B*F-3zljCOx=hSW@y+HVSP*{l#uohs0y@FgxdjKF z8@2jF0&^3){9P<~?sxdF)fhiq$oP+_11xxJ{SC&%3<&@h2vgU*z-|z1dkYTe7#V5i zfN5ReX@h_1GiWj_zzFfLV?HYBN6Z>{5@2Lvy#Ox7d<{=o|7d8AgfKHkn7AH+E$)vK z;^KXcO`gQZ#@qT1f(Oo~NZaQC3Buf<4g|{zup?As?~^CKjg95jgJ7M38NweRF-4d< z?SN%nIFO(z2M3b0RY3$ZMd$_q#1O&C!Lm-Mr7jN#5_gn?)duFMK!9G?6!CbstP9+j z+o)@XUVjXBmmWGH5U3#FqA5zhys8`At14REE-(EJ_Les4n<0Y%B3%U1A-N9=0%!gp z(O`-j!W0z(z)ex6X5WG##X=)c$WVY3;<|BeTYE7c$GDU|$LIM~C8xk5L z{t|6Vta+drp}8dj6%N4ANK}X^0m=-o0{~YLH;#*dhX>LTu5ca#Zr%WZhLww(n}kn~RH+i$nRYjJ2k$th~IIz2yCex4EEJ z=5Z@q^ceIRNgSijhBmyPe_c5ixq0-0=T)^O~XP$%w+9+J&o@< zgawB_V&~=tpzZ*`$-~9L#VLXf4hjl$m(y0(dL9}S6ynLx#RYKiLG94wf4bv>h(ZGI zD1`^(f=I&j*tvPQczCe@G1SNbb%t6)?Nk*!LZH@QUm;FtXh;=Ep~0bESDcNaj4_Xk}5?l25I zw|RN_JVJtkLY=s73*7My4hpm8Tfzy}cr z;NsyF)W?Me2m8o*`Y1}vJaBSV@DB+J^U>mm(1O~zIIoE5-LnaOf!2z!yT!qAQ#V4x zIy}fs)<~3#O9)a6E*=hyr@X4WqW6m+Sq@GC_i#_er*dk3mbd?4FUrL!77$_~E~KKY zD#gipM_Ex>Q1VHb|6K_D=fGu79(H6{gt5M(gM+iKs>@SoNf8wuhGggA^an2U;wksS z*!o3CNVu6!cu2?#w8;ycC?qZc00l%h<49u+1bQe^9~(>(iZ+fkW#{3BR0HD6o#)2Z zVLom?NOLz=w-+`>0V14SERbqITsAhf34LyA;Egpl@(r^!La}pm1p{QdTwKCxcA>#R zfzN}9Lm#V&adEPQ07TGGJWnG6Ly2Cv@Ig`#2Ee(vIdrYD7@Re+jV%Id777Chp)Zc$ z+k_lkJlv4k=i-Gn*Wl%5ffPU}jGKT*;E!4;1}Wf_2$Yqe3zU?@RA8z~iU9znl7fME)!DJd!_Dr%`|nCNO~JbI*Ss;;RCU5iyz zQC55iK<#o$ii)aMmfAWNXl*?V@{vB;Oi5W$RZUkxQ3+5~R{)^F4KZkpk%p;-xq&7U zI@N_n2cU}pKoLrT6qQshv1k&E`6FFzT`LS0O>3?UwaKeP?MjN!=umeY8jeA$Xjwl; z(_r)zl$DiK6d?DY2wkUY`5aAzG1b;G$2><9Tc{{0DJwu)K?DsC_47x=ADNrzLQO;% z0|g}&CCEkK;e{roLz-hubTH^gD2%141sZLs3IPHs1k@h_7p?iG*!+*I~3Z)TdKS&}b_)h*AJVIDB^uNT2p{1^&i86Zx z{lr-4LL7nmL*!D^(=^3co9J4b$}1?y>sjlVSfhcE2t>r0BVTiYG{nHnu@lTwz{IC3Ra6$Ssj6~g5*IK@)J;RLyWb7 zJ{C!Z($hvEktjVQYmA|SvJ#{a@_5`a2By{+jJ2^I{?o(|V+r+yZup^q5I3+kFu`MD zq>tyPiGj5tM6*ChR}__1te_?fGYcaltQm>5iNT-wAccU0!@$r4V`ZpsVQFAsi7_=W zg4i1j5bHvHbWAaUXkrXP8zQYNqz#ZTC?Tx_F+|p85HS=&0U}5Q6!c63Oo&ZQ4IzPr z`~(aV*iZ!+^aGM~Mb%J9(iLPOAEBn89HIcpe->b4A;1q1cHIxvR9B!Rzd{bhq?AgE z548XQ4xPdQQex=K?;4js01(93Jb0j~^xy%DrjrBO#@-45ZpZjUcdNi2U1*bOe?$7< zO~@r4VnO{Y=+uN<94WCmu^~@N;@iwQ{z#nxLF=#gt-n}&wfmCo^4PS3^7$LmH>RL$ zv08_7k%hG!Q_U;p8L7p|XENA{^UhK;v;cXE4eMJoU+KmcH!1%I%#**(HKUb1Z^wlA z|BwK~qa^McG;Nj}`fW_e7Z=2_wtS$|P^vRK&qo(kkR(1w`Gdee;l{L8!MJyuK}D@m zvC^%ZpQ=AK#9g%fA!?oxCrl2s|MZ%Ft|~72xV-Idw}JhVu5nE?pGt((7`17bh-mln z62;9h26Qw)O=~qD9ZV);6%lv-r8Bcgyl$+pLfBDCjCMI2^NrvO(%DKD($Wm1k%T=P zGtwfJKd$p1Qr+cJQ=25<6~Vm@&i*L|OwJDUTxIe92H_=5vyg%38MQkRqC)tT?C+Hf*VXu@38LiT-rbME6~n%$Z=f zL%|AtdNm|WLEgQ_Z=Ke$&vAdw@Fxn!_WzInPl5k?6cD``J+pcz!|K1}|0yNzWP5e{ z#_^b_|N2uVC7*%1@Ypjvu!|cF^i9bu+{4 zKckGEKUiJDG@6Mm%Up4m>Fmmr$t|0%q(6kyxs%`;bk)KHnK8P%KlbNNWPRIL(ruzs>GGDtYBPHFNju8PGf|LCO3FvClv6R55(AMSusP|JT+z0n)t3yOWb;i=X z566?^QRKXk>Z~nndBd-usIEOvA$SaR!q;jazSXY@kkIi%!w3;D0_)*@^eO)Ul+0Gl=tP389m4}&nXWOR*oX2G^jM|KHJ{tE{ zbWyXEc{WXOmPd7aJDKQw-qclk95K(BDefb{-5%f6na(rIsVI$xvC+U!9G&F;pw%L( z(X_N0S?)WsArsU710xr|+uiUISpB1m)WNdpixP&0>j_TqB+NxB0%rA}XYx__`Pg&-|98otHhi$c!;~JPWKHC69Me+bBI&3 zJ?)44PD;nj<>T5Ke)i`H9y^@(tWMo5-hU-HGmU3bf`Y71k#Vij*>=I?S5qDf9_rJe z>D<9;VQrthbX{m)-(M3(%M+efqrN^+Jz2k&bM}|c%)Eok)KbuW$0}$5H0x~C*Kok6 z782gc^YKyJA7mecfkvlX9H5hT{GzMxLLH4d9`<1lDH9Jq+5TbmyQ2@r=2km^~CqwyB3$Ysl81bwcNrW z^q@l|Nmj}jdv(*c3Y)=iEZalD-`UzOGvqFN-gFAG_Q~oyC>hz8` zaO$-0_KHSc7)Sp4)x3tQm#hQ|!o2lkej^-5MpC#MbZ;@6^Z0SkGS!2IS*RA){T+j}Ddjn1_i46Yw=JJp2tba{d zH49BS(JwpID9Oq*1@}(3CS7+l*7)g8E?dba+#dLLaKn|DnE`iFo|EA+{JA+ct-^St z>)3K|_>NC(+MAAr+~%Q=o+C6*iZ6y5pJ8q-t>CRL-tfA@KmLxC$;cgpOre*dp7)%a zG`K@=Sp7?9u`yQgcvpcPX^D&zPnVvqu21V1A)qrbhBT|-@fWf0Y`=YAPFvcR3-`cB zRl>JP3fF61nG{BJTga#%-;vjx+I{AB?nGLdSpf1Ec$Xo}p1DyUL~y}SVAa)F;>!7G zyMvkeFP3&E$2sk9`!oEc6KGj>FBxn%h#x|Di{64HN=@ldlFonxmcc}EJk=*ux$JjC zrKO1&G+mlP3bqu*fu>?>vD?iI#3fX<&#wH9Qw#iws_e{um?1$PvT94Y?4GNU>ekIp z(qON&AI$O@Ojps(ofR99t@#}d@%neP@`T5fH_U%i*5X*^3(X1U%KpAim2bpmv*0Z2 zd%k{IujP}RCE(g$nt)e^@D|xN-(5KOyA%DC%Pii6)@Y!YN;ABvi+E0GVU!q4$>oe` zhZo(15O4`NsV6I-#rTLgebW|hR zyA4Uf4;O_96{(&=WOIKCKbe)Ox#1Qhnk+j!rp4Fnl+%;%!*L+<2vO|1p3?+Yj%MVW z!tDibF82OmX$zc*7aCz_nB?+mfc2WZ* z%c?C2&6K<*n8(k^Iwt&9`qR%0F~zYP)LVT~6KN+J6LKrf7d-Yt*8-Bo|))`pXk$lj166^y8`9GY@)iT+hp6t{}aVwna4ST$yu9ZB4UZC@6cgtk@v|P2_d~LD5ZK3&J z-MN?fa&E?3v|mN|(tPmNMd#a>@Am=S^l8vLKHXh?-+z5d^$QY)lGpG87JKUt;>M|J zsBaQWw}bUPPBwCn!6r{&b%}7shWN-qgkrJpZyKx4X2W?gP#spAl3p@aBhx~S`eagw zaPq?Iej(g0-tBuPW@v+&WUnfW{qH$%%tS4nm7O%(YR*J@A6vtL_6LXiG#SB0GOrx| zXa^K){V`vJ&Rp~Hi{{>WgBs4V&cpoJ?buaOuXe~B zd=r>W`*n1K&Oi!p3J}S6<^H8L{-lH(7Wj&wOAJ*y!#T{DnfH!4{M~Q*_PfNA?w;N$ zQvLRTfu7BQG+PM@>dwtj@>cX8nE#lGi);gr*s+ao26_Ow9*U>$YyvfsFUzhJd$H#z zDA!|%4Zg#i2UTZ9AhTCP$&+FCpU?`LoKY^*d=_jnI`PZ>4p@l6lR8*_Yqg(=6?gf0 z4(T2HB?w4mLIU1GOjAODA|(G}_!qJM{>0VO4*UWo?ebSZ{`#cLvUKE__*WE*r+USv zlRO9{6fu{CLDL$1)^bew*BszaP=@fl)vjcMA;wHm=%rrW${`yT(8bnx1XD`pL~&ze z^BI^7`mr36*7cI@8etpy*iK7!uDr*FKi#ZeZv>LejBU&@{Lh$puM5S0j3zaG<7gV$ zLm6Ms$>|kmH;K2)3RVKPc74-nZ@7b&GpqAGyccRk-S!)LZ$DZ5_>X-4;pioKh|7Vm zT|Tk#v%_SL(fmDSW?U}nXv`?~Dpr2dscUEe2TpeF_ZRt2AjXklI$rI2KSI8##6O;b;DDEUs z%rQ;S7OaNSC4H3$1xG9oJ|w=$2;AzY!RBc(V+)q^^Z_Y?g41GUoY=Mhm83U#YJYj| zSf+|fX{lNLxz@1S`F*Bp$d>~gi;M&7Mw{!;G~RAi{bRBqUq2*7xFgIwySJEecHb%Z zBT76XA2m$@cKFA|H4ZO$Qg+)_+}D zry}-GQy3e0Y7luOAQ98f#9KjewU!Jj{yy^+htiY+eq6?y&$vUS@#&4?^xU@~GM5B+ zKwxn7KWs$N!r!OsLWqXvFPa2FyJ99y%d?ZIeV$1U0$f~ z76!qfn5O>zt_xd<3CtB%V=~>8O7qe?ngrGr%+UC_in(rM@uDHOBATiJD292vv~qOWJa{j z9oD81+9~{-^*-f-y_DH9-Bt9yQ9HjV7oVZ5F4h&cS|OZ!=AcIHep~G49j|l`C{#1s z?Zl&{$C}VR;Uu?z_c*iFzQGJG)BWD5tU3*|hFIYYq@Y@;jd+1bV0p({~Y z__%xU^V}z?hp=H*hxppMBmLnj>7v`9#PU(UzTSsA@Ie7x-#>y{04KOiz#664S$ykc3U+V_&!OO+LI`Jj7_oteidl#u(mJZe};g%N9$< zb9St&yVlXEuEMNqck+UlsQtgPW{9=a0g#MQzspSPg>siDP)?6&r~IpJb?o9XWDUPg zA%k2Zb`xXMROilwYDXOZ+XUgyabkFjPepSrkZdyWul&>P*h_0WL$TLSPe*klM*uXe z<;xMzc&{-fd+bDx4+;MFD5rx4a1Dz50AGJN+rAcCgx;Cx&+9AC>p4q}JAEwojtNL6 z{t~ZsmQ0YTMGZv3YUof%%l{qaH!ge$A?I)j&EN^va3%6FwRf?Jq~#&EVzl7CzWH|9SS|zx5#e zNlOat`=n@_)+{H;W6%v$G)26<(|Dhpw2zLC~ zV&qNIm-o@kz|vU+okg$HP}lexU9V?FW~zh{BqS0v20;WYAXLVhGTV5E&!XHE=Ne50 zdBk-w-cD!BD^c=L{G+;li?(S=o_2ttMsKg&fq1I?o+BdifMH$7Lu@F{uhGPhS2~F? z-VW3DkIJFS$rRW6%bF+%PBp>G9N|pG zV4r;x;1%G_gdf{R zrr|N22Z>Miw5&$fj^Y=?wan}@3l6k9pHxrSm+vK_M{l+430rLGCzNxFd@qT68BQk% za8Kw@jq7KecX`phE2-lpE;1mX_O^U*?CFpH))2hkCAL_KJWLhb{;^(lUSPBib0%qL zW5<6sY9@j{IQ;00hoTPSetNZ{9C3?_sxaeF>Sr!}`b*X>?>4^$ zJ9SqXL&;rkV`7ok_W~;;_e)24Dpq=5w)hrgj)f+bI~X$K)TIb)(X~ zza|f^{Pc_EGJojy5PT_E@VboJI-!8!3O9)Q)z@{m4&#aT-QATDyvj08SuE?i^5B^e zpsDoxTT*5IX=Iydb(m{CcmB18SKR@xpY(6JG7)*K+~GZiRh|inFb3Nn%Z+%B$4-1~ z;2h$yaJgR;N**T2h*>*(vHI2Dzu#~Bur!CuByG!4`9Hj$y?(w%Ig&T*q{C_DXoz@6 ze2KobU?P#)Zt21D=emL+JL`g=i0TSerf~n&qvU&-zWrgBGd{1zky+1{HxMJJZ)SEk=i$WWoy zdT}xc^C=#VHd$9Bj8DbV1->a|PnnvmOU>FZmN%<6!OmYH56w+F)+s0ZIJ5pua>M*k6&SyM>q zs(&n;G`MM@o6mk(BB=VNUG^!()4MrLMC+FFvwZIzj)x`>+_{-@e?~Jz0GY zBSt_k_s)};ONK!^EquP&&yyartT4HWkKqs^e3o$374qPHn%S&Z@g%2EM#OHIvpgpj zpLknHS&}s>d*j}G#=knnM1yndEG(3iSg|zDDVCNxD8y9MHQj$_I69x=*PQxzE#BX$ zCM3N@=}BRkmIe4=271S)w8dk*yBCq$Z4CIqZJv2{J!3vaKPx#Gwcb^G^}6+*|5o4# za$j9NDlV*cX3k5@c$=_svomveV*@RB$ZfY!?_s!R%d2gu*?lV5y>F>BoWy3VHQJB_ z)x7^^3@(+f#^?#p)&mc38GDzBV7_$kX9+%yVZPN*(mUlc4trxA#Ovn@kUPgtI_Xkh zfWy;Y*)ofa?6ta&!3^t`(uEPR?Z%B?P|acEe9iVL*HzxCELG|5vL89)(xZgp`|F2W zt(#hT%QB0)hZ<4iDc1bmR2#46Nt|Bar7ZVu$k0tg<~@`i$qbFk`u%p(4v3OWDe8Al zqJH=3mD;~Bem)PuSlG4d=8*sv#YdejKw+jsd{BIft5>P&6Xtpbr88yR{_Xyhijv zm5ft!bfS=CWtQxpfut#UvWZ z)%$2kyY6?`k%K1TcUG=2ZN-2wLc%NKH?HmoJK1F!PBMkDpV@tL80?2?LM}F2f~Kpg zMBj1eyOHs`Z5NqtUn{za&FRz2vbj9rDKM%xrqZALMWL0U*y7jBzja%Yh!$O^e52xR z`|ESxL?aMg8bT{6Cn{F9nJ+j-`4)Pd2Tev3j2FY65#1;!KoOCf#m8pq*$JOv8YM>! z_8N{3HqL%H(7&8)&{h+V^*%W+Uv4R<`8u?OhPIya4301v5)A* zs;L*YutbAfPo}#E~ou*XQ@xE_xMJ5+1a~1&+R5V_eH*Qhbegs zh&-j>U9gJ*!|?EJ#NBg_BJUklQ@+T$ape2DIh))4BrLG)EBjBtT)>4} zPoAHTG+M|yQ9(aJ<5Jw7{O*glab3SEqy4hqk$deCu$|qov^fTpEZvfPc4mp8P`TL( zY~A;IQ6@BgZnej1f%IRa0G+J+w+xrd_DbAxUrfA{Ze#R0$*^l2Y#@^fIIsuYNwXyF zn1p@OD$WuRB0ge_hMKn*KHQn%=qA_5mdNlsdNH;k^p`m#lDOEjP@egZ5$x-x%^rK) zX8lD?rDx}5m6hw>M`wz^@($7KB~-{)fuZL`3Fe>ag=6PgJ5@T=lnpxkrrv0$?W=dK zjxwipu%laS`E__9mbsGw7MRM||`Iz`7g!c5BSc}Dv#&D9qr&|)(#$t{>x z?`}q1hbY>1VWAH5kcoKcYi387S@p#DmqirYH&CX6+}4L)?I-W(yT3~<4xW5o?%T|1v0RA+Vk9_QiujgGg zZugde5pwjtlp=2o$+uAA6%u2N({wDL-`r+s;Aa3=_!;SDPACEYQnXUXepy4r7g$t* zyZ0~&>qht6@9FOt#jO9y?{T+i2HN@J4l0^@c@ANGPlt@O_6$X@B{{^3XVllii(Z0v zgyPV_obpI3e}S@zNlM_$8Z-3~luATo`DKeA>QGa+(xAx_K=kWHyF`4oH0$+VMA7w@ zIJVo+yMwl$dE8Yh`zV~)IJYS2my5g;kE&!u7?ywT;eHz5uHUJ_)A@))#1UtHBm zD;)X7-E`|&u-s%}-g5lA`oTCp!#&IqHsHjCoy>ln!#UtD6`&3Tl)VQ}zaC|KXpBzp z)upy&^xgF30z-pF^Gau8L2JU7f>OMB>JEc34p~G2P(@;T$whd%Vv=Lq7P-5{UPl@E zeuIe{oJvb8HsOS75D8%uqQ;yAy@<@?-Oa9I7njszqTjL;mor{9A054oksQeWPCB!F z$u0_f!S^MV{Ot;hruyQos>9fn)(WxHlnx9_lp6J0^08ZG6&73q zLq}VQTzsY-AC?+Q)MDn3=vI~_)HCk-oUEh{v-;$!EiIWBml#Gn;U17DZLm?$F%z(Aeog9ng+5sPRSluV=FD2w zD!^;rc*A2X+%m*gNJth@S#_(qH#-SQX~jO@YSO3?L$CcE=~EPy(jt%dzHh_6CM&^r zA`-Tcl(*ikcQ+g-HvT`BzACJZu5CA1arffxQrz9$T}$y6D-?HkcXx;46fat&P=Xf- zZV40!uGzfbzfa~glbLI+SBf&SSYNc^hN>{hoUK zBS9t6{(Pw_rCva~lu7NRHA_EggpICAlqo}+cc$X%C}sS6p(s|?yx;O|-r^O5f1$%g z#`@?|&MDY_6UiHG1PT|lUYE45_%7ya=F+j9rVkDrx}Vgy=}$q}9qG16{4A2|joPT# z+bMNl;{tNYf7yS^B1^YyG||CZpLzxvuq~fj67HG=0Wj+CMMsJm--3>)M_s-qEQzMM zpJ76Lz%hBpO>cdoIQZWq(weKI@Rd>>3()%Rt%(?p*+g} zz+!L(8IRoHgV$C5kw|6!DJBqEy&~i@v$kF`Svp8aJ=md9x8H8B?C?Xe-OKrA zs+mRe*O=YSYS1Y$ur$S_KflmO9j1I&>YSuT?7rps`LWOYOF2%KNzxpI{|RNAnF!yJ zfF_2kg|@Ho+pQ_pJHbw9+>^M%2Ekvl&Ym&w|Pe4WsMQ&5D z#iON1_i{o70RIk0Fy1l?s)8RnbqQAedpt%-#(SAo1vrkoz8n^lz_eFkX)e1Zt z9|JIFL^qS>5D%r__HbVG!2=<@l;g; zI!+h&65qhgP}NGG;Fdi81$@%(K)g;TkM?QDylvZ+)|fi?SUrsn4>G4PMm0Ku$M zL^{BMF=$wiz!~6q}w%#?ki+yPM<6mO;W+g zGpr@ZhSBnP&MgR^&5N)41$sz#{Q!nc9Hh?H&dnh zR=-w#JvkA52}LY34amEqD-56~-DZnzeVhK}!z&Nt92drln?7pZl-@VTc__L=+EsXr zIQIa~(Y|;nrE0-4j((b>1e`~pvXm|S^{^ln)C9F9R0a3yd`HDJv2{VhjRoIUvQNM+ zt_&4DbONo6j2@n<{QlhPN58^_rpd~CkQju>-<>Q&iY4Mx1h9K|`G!lMukAw18nJt~ z-5W$=n{yz?5f_;u-82S}lDbI_VDH6CiFyAfH7Qp!wS82Q1$2ruex^nW(;WgX5ohxa zK;)(Q_L0ZnZL41!;uEByJr3Ty1+UtCz?{XNbPU$2jkS<-R=yuL;W<1gLr_i` z)P{eL6y|K=)Ea00LIYAo0Qegs&o*aQq?iSQ5|iPvq8{>l? zDQ6!=wh0C5y=x$)N-7QK8Ap-HzVt?ll54;57o%W#I2U=y+dF|K6}|W+Jf2}VuE+&L zipz_@nw;gBe(&d)&xye(US%BoO_g3WY*LSnO_S4HgY`VfbEEB`Cja(DFO!2RD1+sx zE;f3Dr-=5q#CUJbp4Ynjz*9~{z3_XrPr2_sz#?ARMoohX?q7jUdSj4u2(Oxz#@kst z2VY6C%@J|2w0WxXH2a!%)%qRW$@}%k<8{+h_&AXWrq>JH<-FfG)E#yS93T0g4Bwf{ zLeQ})k2w=Ul=;~Cs;h{0SC6_pse&v$V)*#(L^eaiQ5nv>)D^u6tpJ zm|Py31!=qs(c%;Gk&p55^k`pp7N+gB0i|QQAc1z zFd;K_dQ~Lh($DeJlGSEU8K=8-oDxj*9XR^Q`{tsGIW5OXJ!}tLqNwd%Mi>8-{XA#z^|Vj$=$G21WpZ*AMh@m&(% zG7@x*wK-STy&UcPPC2^@oUQF;CTI9Jn5%< zx0>pT^(DFTnJJj)M%4EQzu?6&_W4+iQZ0MC9SM=X(Go|-3pe;zi7Xeu&tBe@h|tO~ z3Ij^BCN)?2QD-2Acn{0=Cz*KUbff2}^MB}$5ygK>84Y701~wU~;dsyVi{rs$9uzbI04-Bt%_o$u z(P(P2Fis|OLf}IjUs;@_VWCf(4U71P*J2j{+R5tr2e;OlA1#q$p^Bun)jkcnyU_1^;Zbq;hawC+)#LVzG22jO~j5ZrQ~kpNwvx3b%8aM7pZk-q%Q}U)_erPsO3UMf0ulm zGS~^&IZ^3i1XiQ4ODSQNRG+aT;izY0c_CMzSAX}wI41Mw+gL$RGhNIzwXLEOWLilF zJ-3xWobaB3qE`&EX{yHZjg{r8G9#{Y`Fm;KWL*&2!Q-YO*J1WATV4CLacp?B%RP%C z@qb4wQKO4#!QB6RA>?~ob@=3;%eOV#^Zcg@}N{dkG-GV;Nun+F4& zBU&W-oNA;K2`EB=5t;Oj|+f1f$u03L6qp`IbL>S@ne}1&e;P zfSP^ojyg=bE)3VGEHvK^t$7iTlJKGFaAI!o% zfN)^uxdTv1r5{@}EQ4Xf6BG@#y)^nOAt5G&Y%`t(5^X++59>=kUzvhIuEd@WMPHg8 zC);XZxLGe|0$n%%W4T=9-VT~%)=*{ac&W%X$yoR$-dID)rCZ^&IEm9V1dl`**TDr> z!MGSZJNnEQ6#K^o*OkT>lun@~KX!(D`=@@RwqSj!6tUD>?}pmSfk1Qr(kelK$}Uy% z#3fl+^`{O6AzcW*AU(ZfBgk}ht2^;^^h>@cHTB19snrDG>PoW`sn65t_tB4_v z01a4acv~>*I^5&$c{R`<0@Vo=>(y?4s+Jpb|wAu6c}PwqwjA# zxC4yy%j5&$mXx)m9>O3xBQ_@x?le}1bW@NWOL!AhUC_PAv}lflR1&2lw1N|I!e^1O ze*KfQGr#vY&d5!+4pnV7i3aNF!fvumu$v6qU;%7WX-xDECQ9Nc-0W|2;@u>ge((-~ zCw_Ab}x zMfIa5u@w%p!D~{<(ZQn3yW$bB5)>1Tmmg+CN?Q|eOex)e)CBI>vD!>Fpe(FCmi1g! zuJmVgCsPDrk~hQVeE5&7{zLygWHPc*8(g@E=634i1$3)F?1pf0O+Q(aQY%y<+~M6z zV@eAa6`OiIkdj#3E4U7d1JdosZzt|`DXe~cO==b(+i=efPPu!O@VoTbDjCA0ZZ8VL?bT4?)JLKz4JGglB}2A%7RbW4BOgb7=SwBj*l~|>#9iFpCWd{PW!EC2uX^W6^YU!;itc` zLfG)DD^}hC%(a%jG< zB(>dMf91PZ8Z0V?$^HZP-1tL(kOF$fyx(v0=Mao-Yn_j6oqXf!I30cJoQlGm*X=qv z@!nsmg=)V&b!2(KJSt7xa!`g@XBS^o&sJtg)fvgtbCO_m6cQvL;)jXy@=42F&Mbc7 z)`oWQ=W$oslvc|6lS8%N`Y7Mo5ql~aHJ7LHHoCFz6r~uyc(FB)XVi(FT^grS6mGu> zo8_Hoy)OtiH!>xOUa%R```TbQ@N1t+8;sLzlsfU#Z9PhwmTov$P+9Q_;5mp77Bkcv z?iWtC1PT|kPYya)=57KQ=z+9}O$Mjn8!`UA2&bNg8wa@`)mw*o)ezbR^$+0D_HXJB zWqhrg6y450EWFokVHS5ab>}K;3h09Zz;17l;lz8*n@v*Hv#&^%#ic^_@9Xe3pY%1% zRFxopuF}C{l73sdSZJ4OPQMfA=VhADe6qp*P$k)hZ^MIVBXv$knK$0iUzztF%{`Vx z`F9V(m#}JX}{)gaYuz%u{^8<{!iWcranHj0|=dCe#cJc=Q=AL2ft|S zN~skywkX}z?8_z2*?1Oa)g1m$o|TP=(}~mPa8TVVIs889;vMh;r?reNSZOVLrJ~V< zxn2h6yu%$Q%Bt!^e%ff14XVG##pu2I_!k}xLYhoxoJAZvT%ba`fwB(zPBwxn8VLpM z7rYYL?ZtspsKzb+kAA-oxdE zrugm;x%sU$T!17S`JQ*u-g9%cy?|fh339(dnOb!mFYV!bFjW*hiC6okwjtrJcs@Z# zGXlkg(o>ihD zqF7Y#Wtl99t}(^oml7ZV;Mpu{EdvCx#_}BxgkZ-vMn}tG?C=*n@QxJ+ROg57C8?qj z%MP_VK}D)lasXQrK-fvLckf#AonMRjU0TYWlW>lI#RD^Uo*PH#AfzThZnybCmI!y% z9Ywi*UFsRwx7W-E_#y44&VKcj>jF-t%tUZb(C<}QPbz@j*baf~%$P-G%sFo!1z1W1 z#^3Dh5cjqup=*Q*qDnLKT#bLVJ*Ks$-n9lA`di!;nRN}!63+%q;Hi4ofQ!1;f4_!S zI2U;p)UwT&nFEs@#g~WNK>A|UvW>z~J84G*vO^+BV0|8ClxX_aEMCJX znr_gsISZBT&(vh7_@nxNa#s*_h+E`PWJr`-VpzP?@17!OvoQ0tQBZd)?jL#^LSRG= z9=dzH@F@CWtbIVpu7Z^;w`_MgnLA96gRi}4`U8N&ga#$aYnO+gWwYpC>$Q2~dzueI zDGDrP*DBQVB(gXT-usuYa??iZ7)Vkyq{>6=9jRhokn8Qy*Ipm@SrOzMIn@Sevo>za z_hQ65+rz|mEe=^@tsio(gv%R@sT@I!ymR}m1WGeNtR z=+V;#y~va>xNu64^Z=L##Xd++UluCqep4g&5to||3S8VLdrV)@2TAHg3(we8a^BZkRF&Ij)jF5Y^2b|C3B<$v8*X^#ty*FEJ#< z&i6(f35+d`rlChe9nU*VJ;hu7Ck*W{Hv{nhvjC>wmnN&`Pj_?_4D~DL zzt^SPHh!=D77AWTrG!05XH?|O>61WAt|#jdD6z7fkG<=fkq*@~ZlxY=s|XCT{I;F7 zAy^seb^5i>IIFPAb|cx3*vrdFeD1H0zA@AINtf;Z*GI|?!31%YU)O$*1zuZ+f$2nk z8k6Hj{&(59+j{8zVr7}8l10wF1_6*Robv{sI}k5okti4q&F^fxS%7hGsW=j3`)2Ml zIDVi!Zo^qI3qXw7Z^VhdW_lqwr!>!xs&bRuways|T$=AnJ2n3g2Ix*362k;+lsqo2 zz+zhG(gI&kxV}F_o5YqZ{m}li6o^^DE4%GB;xtDYsamDc%d1LqyrS68EH%KaQHE0S z8*48aeUU|X<+pXJAB5;4v?yxmD}YOsm+h4x00sg{4Lb8HlEWsD<4u99tva8>Qgl7+ zBGR z9c5T#JhC3g$u|6&nL}mI>*gCL>fddpvIhMU=86o<_U`%;U1yFP>M8!9=FGX(;?PO? zhpL`({`?GnaU37T50YZzlZ(2wE~Eq99#Oqq7r?Hei_-*oE>LCc7lpYu`KvqFj<%{HlSd?2X6MH#Qr)T_?wgy!Up2(foE6A z)HcwLwYjA0n+K!pXyXke*oXyq5VsOFyDl@k(xj+23LowEiEdU7#N4Aknv9COZA8%i zbSDV1!y1KK{E@e$N(!enkZL%xleuh-E#>2!)MoK*yU4S7%t6}9A-q6i`Z8nOpf(mu zUg;H!Y{yWhAF3Fn3kq+WwY-~Sc!-q`{-wH@T>Ax#GvTDdYFSv^*&%#aVk$4PUF_HV z-A*!R?j@-n-+JG?()~wM*J0-HXqKN^ZdL2p>JqTnp<^jIgKL*9?pSPW)iapqmq1j9 z=iL5__*wdZesE`1F+!)W0NFqnY^fQtA!Ugnbs; zSs3q1MoV-*tLbpYYqV;2pqMhI-m@S*pXxT8ZAP+A3|CKWE&7V`(iNdgeKdWmDVE^}H$TW0Qf>xD^LgQx%gqL@GvWH0DQ2Ef$$&Efx@F zg?XcT#>|uM-hm}*S}PiIz5D3~BvX#2n^kwjKG+)V8tbF|4JiaK*zeBJ!}IH`&O{Z8tnRL{P#DaRecwG) zJzFtH2mejBwl&eU_`*S8C??v{glXLM8#zwG7RJ;w@q_(4_k+d&u0w2hPE~EzE8um6+O2^T5M7rGEAhT91@Fy^d@`MbBuV`1Re9QCKI>JvniZ+= z*I#fz7^=%kSg?gBQvx8tg<{ftR{g8pzIqH8_UbRGQ7Os#80j|tgZ(iWf&&#=!3%}& zpV-gigE$pC7HfTXJdpXZ3VdZl9rWoM|H?ZN@5BxZ!1qeAX>v{g#K}G-%xR9G(e5HW zOWXG2;4$!}EuYWweGLV|5hI!qq^}%tp!VE+My zBa3R70E$HMeB2?Vj0~}6x8BwXI~vdN*Wh3goz@!yicjJT*B9FtId*y_Fck9biBOtm za?CLvC`+8&OunCRIf-JfwHJC?WT&jwdnHxvnawXEWr2CJD4x)BnAVy4WK9f35&I@& zEQ$v;Cn8-alF8gLp<@u85 zYC*bZKfa0=a@t0f;jDFHU|%NUe9x+K8zWgBk*XA#6t^Zo-%6I3^>}7{I}Dp?{H+VG z`y5z_{1Dl*JlK7krY}S@cYA|)el#Z6HgzD*F!?TobZYGFvi+d*HrcM@ONGdUCbDGE zH?(YK?#3}4E~<>=rC_}KW!!~nMkmZ3(b8>@mfe$V8%batfJGfsDkcXhYmZZkwtFX} zACJlLF}j)mP@f~IT7*a@|CL&dlc(Zo)cj*L?)MG~4i6Ig3;?hWR*I0wPx9J;>vzKO zy^#_<5Yv9BTZTYcS`8v!6-?@`i-_{FKWHH5(AY@Wk_E=>R}s<(nn&M3ivAHnKc&s)Zzb}8%C zSFsb$eH{84nM_h7JQencTlBK@cd!~{a~)O2&P{NkuG~K0@la-TGG zq_%}k!~lU(lfK}BWROdQP3pM9 z2GK~pFGGLhLCi`n-ohD%T2cP+Q&eKwqQZ%IRmwY{(*c0G_}uUt{B8$h2Y;pS%a=Nd z>mm^a0@ZNbjGf(b5fwO`I(`ChX3H`ZCeu4g*x;~tu4o5(8Nu&X)Tg7Be>EMYHwEVs zaq>ISWCSLqy7c@6ZWf=3EXHZ0z{pJ$-npbq^cKkY>O_@#vT%K8spV~1w3YXj&vi-+ zMsBjsRm@3ZtCVjIW)AOIXqkRg^>v%=gLhZw8qHVnvAFAY7gGoM2yhXbi%;|r`sxRK zX%NDk^wIo99j)WdnHX;@UQw@-+h`j0!;Uia^F@PhRFD2=MPW$b(4Y5H?<t z{V09AV-NyN^7KDn1k}t+MhRL2d>SLOUiaEqMMcJ#|BwJ;BAhcdi}+s#R{+i#-cg%F zRJO(T!8Onl@A0CjcUA?ZJys2sGu#^vnRS267Bu@Z#K%jv5p-E#OyxZiI?*+B#5)6U z7WAWJ*aaaKQKxENw@6bk*emS$EkK*X=Xulexv{7%ZsJkxgaI3i_dieEJD;yV^rCD| zC?1~=k0$Y}DOF-gbN{liC3*(#U46(l6~V(d5(uS7DD9&mpwX*tYHL~kxws@y*qgy8 zkZg#O<%gq8Hja7&O?JP%Ry+3cG6Q;uV@M4uBUOG8$3&<>8mx-^!a5(HaTAk|WaDGf z%^xP-4>)d}UiGlaek z=$OMJP{B+4xR7^t@={xz)gN2hT>I+o-04fno%6>Op0Rd)@cpw_p8)wsz)+6P;EHEa zlSTp1PumyO^Kk?x8@>J1xvSBd^{W+MD_$-HOiH6+$6sA8=sGyw;#%z~2=1-hUIy?9`Qqqtidm#%_}3X!hpK3cOc3ai~r zv<)W!TowAZ6Jon?6otRrm11lsxMD7!R=dEcYO?VUh9u_RW5Gzyi!t~r45IbhAKNZ2 z4xdoMnD>o!c`AY?RfZBak8+MP z!LJ`P5|cZav9YjI-xO$f#^6hiA$6-ja$4G3kD+%~=SN|F2E-E^o zm!+1GYa{$F!;NSP(^j68=O3x9B23>PyYlM~dG(si`=s{aDyN9u!F7ygrO&dLDR{%D zgp*H_H!LjdpG9XBLj3aKH8@CNFX0@*{(89!3z7CWa=%x%YM(@Sxk?;Lt>iUH^dk&) zJZss=Z72>JoQ?1W-$~lJmI#KHVMV4v*XN&56)Tx@7vZJ?4DUA6XhM!6zoJ~ud{62> zc%{L1G?Vm>8v6mQucJC=gsK)!Om4vfd@0ReYM;1w?*%9JjF{|Bgf8VaRHv{`hbEt> zWHHpr1(Uzs@^*4?lHjb?4$`Kc6Yp?ObP6A&ydtl!_md;< z__h*O-Z#tpUIuxrqQF;sltc@@6`?p{-~4Ha6b$UY&R7P2C-1^itS(nLUyXsfF}nOG zSu-6n8LEBHVnFdr=DCAB36}nsW06dXYPjKskL_M&Uvh~mBe>68BxlGEFNK~oENvNO zJuSA3!A}uhj6=AiB)Wti4hta!vB$AA7?i6aLv~5)KzmGU)VWk=1~e(}q7D06{by0b zgEeyZ?I&5c0OIPRot2J77L)Uojq#q9W5&zVD|uzfYw+FQQFZfA{+O+7I5zkL@roc` z(iRuU+^N#r2gV#2=_`v)Q9P>XjE$c3ycI#OXjX(NPG7B zbh|K;$B+!EA_DB)q`o6DZM^maMG3Qd%0WC|`OE$=$uH=+;$BV>XU`*qyeQRm2_b}k z!iVTsIJ|YrRK9swWGM0Mgp6PEw=&AKP9pK7Yx6EZmyHj;~eN zP$IM_{x_Q!vEt-Ut@LS`VVscVA5%sTnkrb3Kd{)&B*qM>aUv#$^5%PO0>?F*sg{Zm ztD)iVX&A;V<|KCB5^Q2~T=H*WH<*=b z?Q}4!1uy&{m{PM<7885d`+g{*RJCD^IO!h$JbiUX3Lga0oH%Mf)DGo3XDp)Y{P63~ z$RYB`-&&TD&tO+M&ONm+AvVQB5Gi>gg$SYV`;rUibOQ_|JR~y1ZU8WYmrl-zmNbZ5 zAX2LG-<$bVEn%*{?#A`#=m^${B@7`d1ou^ckRE(E-k&~22PI%(r~LN?Fh5Q+yg3lh z1h7_yzaH+e=uTuu4Jsn z1YHr^jU1Z~v*eTN(zFExAv`8?41)G*T$-wyifEB(l&VwqffB*Q_?#9Pe`C(#5m0V> zn9wxvuQ^ktOHOXRwSr8<9of6xc*ebV{4IQUN z0C+|D>&_CmSI9#2G2CJ)v8Q;QP42WGFa4#PW4Hmw1%E%37#$b**l*a_Nlm5|30h;> z=6Y1=OZq+^tsM$ineF->TAN35n-ga+Vrw^`}Js7K9qXaEb8DM=qb+Fk{FN zqdApK;VV6yNQA@L*8-b`>AE9P0(FLhmR}JUa*bS)HWH4?vbfT#3$SCFA(6AM+>7Uj z-c6PoB{#J$aPs#|1#H+T3-(U96V%}@au5TrTVvSaPM@F^7xDGDaQKG2Jb|<7H(EdX zDb=yIWr?ux?8G+pE>LqL1J_H$Yr>_z!B+zUU2xCLWQ_dN_UHOl%Z$AI2zyF$1YM?f zQF?cWH0I+$k;pmick%w<%aJg_Q6cN*Ee0NslKlz)>Q`Jk6cvdP^V!9l(5;K~6hHY3 z&K@5{{!8=GA2s{&`Z3%M`TnE4<&vju7wRG6jjtruKAqf$a;|_`o1I5HV?CWCT-SZ& z;Njc~aEb+KUR}EY%HmC9ZTv<8&vsZ94v_`X)vs-l3EZj2{5F^0Srq;Y#finu1DbCm z;|9}9{XcOM&2g~K;%^hKYx8^i*b3P|f+)kn9-WCC1@kR8RANb;9&!JlDulBy^o*!; zLf^nDA+qjeq9Q7Upkq>A3K>dxL3mxd{Y_1$!Qho+IO^D3ez?k#Y6IJGB%6%#%+zHT zu~b-WPc6?K@8-;Wi#BKS(wTtCZhJA}`4uWf5cnm=6Ezs;UK!$i_ypX{$YV> z05haiphy1B@`bc$mr!ZxGBH}dbvJ5Ga`bV&VECaAV74T-g>E+?56R_%IdLkT5{+t? zdq?#b6VI3RBS$!->JmG2OL*bmT8tPPU zEC}#@=kGppX_EVE@K(ua648ANccEp%ogv)!$udQyyk~O z|I5NZx2*4=Bcg51MRbc?Alz&2)ZWNn>q_S|AbHj%qkGzmprjHTEOc5a*Y%4wetw7n zdGFJUUi)oTuy?=!wCKMvXAbG&_AL3$^4zZxR3Z^?3F*Di3{bo-H9}?znK2lGs!b;W zXTL3I)Nw>3D{jlA%7mm7M6^=912noz+{HL{RwT&BC$su-I5ZP6GC*icj>XfY>XHZg zH0UCZ-aE(BNfCfG5_-p2s2jhux5IYyH#f5a5&z)~jMzw~ZOONzZ*7B^QMMd6hI8rF z5gjI%`MozOUQf%FXgrHuAtZ4rQy6erTex;yi08Egnpe3ZeT(XdE>no;^ezAg?echU zkoBwHc%-huJ<^bD0A*C3`0t<;3;=!BE;U!nvI6rnt4#*pRH6S6P2W&FE=T4DwuBpQ z6V79Y9}b*Gj)vB=bv!he;C+y(XU?J8!*KE4v<>NzR+Z9R)^c*iy~~M^ql)9F=s=+V zLsA`~8Ug=Qy2qH(T4qGVo2oF>Uwumyi}PO7ZAl#L^4#&06(fF%O5-`nb`{;$k_p;m z-7{#Ihf9-?DiUqOP6D{Z#8KcPaX@86*9hjuIvcdBpHO;7{JW~eTXI&^_xxgD=izT~ z0?-nB?`QYa<=MD4`_p9u1lwJUsGj6Lsq`BAQoQVu*FZDlu{T5#wZIVUA4+5N%rcQG z;6`0<9xacD<0RgX+%vWkWuoe;X~&1;pj< zEsSkMQW&!k@=#CF2jG}xN{^21I?<*NE|PjUsas~(4Cz&zn4U%uWk;A18y)$P8Hs?BQ2QF3!w~+oTLp7mtMlZA zOg*EP@p}h!TJ4Ln9JJHAyeV$kT>_;d1LkfOJM-D!nx_@ND}L*a*JcjR>vK)<>DR^> z9&rfRyJw%jz_F-2v$0(6Gj%+|p5~dK!pWcdGu)H!u%}D?0DZt2WczC=V5Av(JZ^t_ zjPcW>W%H!kshiboDIC%uiCqQ%ISz=IN&~stCoEE zCYpeFS}o<;ppUu8AX0StflYaYDmo#@h1>3;V9`t0mNs0i$rYY=%4~5#|M%)+HN*`2 z9G!5;kTnS=yT@Yz<{#r;7e)T9#B-t+clonMgyhvubN+3ZdHfH6x|ynbFF}>m#V<~omtOtld5px=F^R+6=Q_6;lzM%3-*w9P)x}uIyLyw znl1oTDbm@r`^+}sSSWKb(n1+V-jCpbBx89f#5C4%r%jPTNjdh(llr$)1R9NAkwSSs z)|;56oejx}{zDa|=%!iq-o7IRd=1@@DcLELc`8pi`!H$=S}J>`M}l*nBukRR!xu2q zL>XsW^I1*X$>?F%Ar!d{EvQVWzsY$9imN5C2=Q6&?!P^NaG{-@^-S+{$!2gMS5NVQ zu`dS#q2gz{h7j-gBK!f<1y|5%^xKzkE}kNCCuDzwA=rI~u&zmUmm($!haXlpeOo16B4 z4ljnrZ&^rXF;ER$ON&dluI;;94^6(^`HhR#9b66cy(iKu&3afQoYeXb04F?z$d9sL z(Cq5Uzqx-b`4Z)l+=E(<@1rf7#(Gn-tr|0HpY`yg&iGt=bsL*S&W@}4%9 zn9LRL(Cyi?cFbFu}L-vd0H_MJ3*sM=Z)lMDEDQ1!I0@9+vGVbN{E<6^sFA4fykvdgZ$> zz=?u0ZUgmia@|&1(1n6ls-?F=dJgEJZ%%EiJu4yEH_1-Dl+eXcAubBGKqH_ zn7bLer3@eis|E$P5$k1+#s<`|6yJV|K3A1>RAmKzh?d9kk#%tXhJ$8|bAM4|?tT8L zr}1%B0mWaP5;q{#%yAQ3=ldgLBA`HO$^69GIyqoa#8ufS8&^t4q}9T6M}Ehk84mGD zYrLbWqLNdVXunA18)5=3kf{|vk+-UuULl_Qb3_R1`39buzep^RJ2VVBhX~2AX-{*` z{@3%fEXe~2rlXWixxbY;WaaU2WM2Ky{dWH$NBzF5#-;aHgsUOSe=l6ZQ+yDpVSE~#D zVNtp$fRb>MYZ!>jdcYM=vcG@kdB&qzsSS;$vM_*UedpSsGoM*@Dp^Ow&@k!#%~&>M zH(ApB7Gq)L|DTg25VuL`UC_(?$~u9C=)S`@6RJCN-|V3+2CCXkPPO&KTW`|jA)<}lsaJ~!%L z?)$?nYH*%wmKkbB@kKiKYzpsBQ0Hm;B}EPWW?#8c8H!w>8JcQi3^wCMRmVLJKhtF= zuooG&69+70KS7s6TbF#lg{MAQC*c}1SGY=iYNRu9rBlhL4wx+p#_0+tbV)Q%@5Z3-pII|ELMO~ zqi&Am(%xjg*p^PmrK6Wi*}&LbiF6hr-{6Y7U~xirmEgwvtIy;7V&!c2|GGJ*J`o&< zVEmj3xY`eQA^H5~P;uT{U%?N==fg3NDy)GcDEnVPGJELTw5}TRjN=lDknvx5^=wfh z7Qg?`oHcaDhnL?LDTa+-IRtPwZle`UrO|rFdt0M#>gd#);MvLfW`~craKfGA? zqlN}lQ3Cd(18NbZvQ(8dEPmlaZBRlI0FEet#&15F)#zMdJX2Dofr!#KW!h@!S=)qx zVgwZ(EGT_F#Y%8Ank<%TruucxvQG{FnpCsRGxkCYDBs%jbS8i7=Zh^O=b;$7%Y!&Q zo{JE{Vs9uBJDfQ8v!eA%qR*?tSameclz8J<`$hS-4ZEE7Cs7Q5NbwQ&-W!%WY&qfn zV8$hImfdnybD?(GbS_}=Ogg6tkraxsDKGF~q5ne4mNSkcFDq$(^!7#m@N_S* zPhy?hm~=zo3KKkdxHWu4!_Sd~qbzKIzPPv)TIm;-|FeIIz5R8fxqF(;(*LAizh;o`B>dsp@M@fIQweg)>sUY55&u?; z36nXpR3|leoE@h36RV&#(XwFJq~2C4(w=_TNRHxzci&#J4}45Ce0C6bnay{hG1jWf zB0BR_r#;u>sD*u&dp`^)nd++icgeia{_44?9+K79xFEB6x@1!ISR0B6&f9r-g(=Zs!qA>Tt9ib4P@&6k|!Xt3+;14wPL(W2X$~j7U89 z5#(fo_K2m9aJwlHq7AaG{hpx5(9VBPuhY}D=j}3x4vm_jin)PBH(wp^pNF5e_147< zo9DoHve$q=ZJNm;_+HpUBtyQq?J9+ld2}p~KN|Kv%u^+V_e}w9MnqPwem@oc0lqsA zj6&ae&Nv$YIriQCKLE)#IKFpt?d%>=vD>Gd?+g_4P-in1Smd za&ewlhk$0PQ2ms&m7g=4y51H?dVRdq_G!B~uSRoBsSN;ggz2bOhXAt@Ov=Y{x7In8 z#KDv=`k;pJk!A)^-Ivz#AjFU5l?#euQu0}ckf&pz04f=XJ+%n5yfm%Cog&}*0|#V3 zBl&R zz)HTlGHNgb0^9Tq&`QdH{*bRaxT&_WqNC6zV=pb)7y(f5W#bGV8)5Y29M0Fm^RHDb zT0OWx-s8R4@PJ8Lct=WDiQZsQPgLIbQO=uODB_6#sOp&aHAZi{+m){hIfI$BWKt`3 zkLAOv%AUt3R`eZA;64bbmg?27HwAjjv2>PF?_C-&UrJy+ooM=(ZXpLTa|$*9%n78U zSuFzpJ|3L!x7#l|dG%FT+pDUtM{7r^+}C&D6SZ059I&tb-fE?MJyR2{#1d$>n zs~v(3EkP)^A83DP(8{*d0u)`~l|>8#2tx>!n;NEjIN&I-t8Vk#0}1mX*w?LpkJK&p z@t_bNwE#)VNAN$9R&cETzTe{UTd(@$7?3f*-|09CH;TbB0=HpI@)>f`>smm?$a5<1 zz_#UfEAbIvD zg*->J+8@i`M?C*}oLJMVhG+T@ujKGg=U@Ohk!9^z&le8jIEU+a=6U|Z%fGhYc>J}$ zu%O44Axb!OP2>0J-p`C6yx)1R^Ltl+>gT_m{Xp*kw$u6B@`b{bfAu?}HUK#UP>8bsO5J}CrCb-qI9dVm7)^^i<9=Vo6n)FXrXRol`6SvVmy74y@C5bI29pJH?BzE=Qosmeca*#a=13Zf2) zdRq44fl*N?(y@x-NQ{pK`YFsc!I{I}r&i+a7`(sH- zM^ahFdTG2kYY$F7dOpOd`CNhx0CU0UNLGyi4hcUiK0AHX?_JDFP*sy1&kV4bl0r~H z*u&FtPtsloVMhvJbOV)es7(nxb!!qSKp+a?hMk8Hl-Zh3t^AW^1i?bWUQ2Ixgz7K= z=+(~lM59x_J8y;XpY!&YBG5Ud2jh~1WKrE(iRC|iDC@GL6>F?XK2Y{K76t$b`~wv- z1`WCd^Od5^@n}At$h!fGOv(ToX<)I=b1;F8CLkq_F8s9Mk77~Fz8JASl z;HXCSD1qJ;X6ZgTpSB}yzf?!--Mp;&-O&l>IGDhF5uimyKGgCB@yQS5pCInH;|C^t+u8)EarD59>r9*y%=^ zRZIU`r3YCBfd%0ZXHT(~Ac*3RfD=NXewQrxU_wA9w`SQ1-WXh&;*ESf;5xCaa`b)fmjLx0#gA70Q1BZ?pO*etdR~rX1c>qfdQedG zM>3k7>o=~`?!5&bO*A6j+dR{}ul|4b-laLNBTdshagP9k0H{lf*`>~wG@6;JN*gmR zYuRPii^?{e-DuN`vHoeb&mWMrD6{Hi??#(yo3Y_!s>`}6sbWebs2~GCT+Tet=f{IV zilj&)A`k%FffMfM=Eu+Lm-F5Hb2t5b9ALt(d4C85ILOe?K2w_5Oy}u77T$brdvNI2 z3L*#FL-LSn|9vts&RcKPLhFvDY z6$1?V0tn||?rd!HW!G{z5AqmCo7j6m!!udNFnon%p6Zzy!w-Hl;>qa5#$<;gb+ zffim}yNBbGVYNITj~1i$VzIynKs|=s(hRH(p=J9k;XM&p2e0YKK?KUw$@sgO4#My~ zpK%**6T6HhaE10vqEJnn$2I>qW~yj7-|A{Y%mIl(neF5$oE?J4@)ien^phDEg%OrV zqBUa59*ho}P1i7&$hAGRxj;t4!uoE8;2Ye}2h!nN`6~n&hMcx-Jy%KvwzFzxtWdIl>{o%x|9huPW=?~ zbo!|lN;>{is2c(Sj)hY{F0>oFc|HHJT}k<`;X}nsW?w7~bK(oOW;c$NpMpR^pdhdf z1hy&w&=Wg$Hk0wN^$ir^V}>}q8dk@cu?`V)e+3EnImhtISsbx#qjGO^o_KTZ-|e;T zRr9mYdvyzs|Fa>YreoV1?d~(ktTCiY|2AXv-tP{GGHDVnZiXS;zC_{)-t2#Y6`g6< zgqD!eBs6)1PS?Kob8p~{lGy=7~?~kV{p}<0{lTR&}tA*4m#B@#>tDcbA@#b z_`@^Y!eV5lDRG`7tSRvfm7?dj#!XPJU{o4zC7hL!e!^Vly1oczKt{p#@eE%EM@$U} zr;ut9d0$rw7&Bj3Gh(l?85z5uraC;YY^Ockw%{v^iSS1Iu#JGHZK6k8aB79;taF*m zsD}920a8ubtS6m%TAza^ZGVsIBEAg)=wa~fe=_>i1N%vCvCepyX`Pb(;!ZCZ($s&en9?;y8hMwo z-Elmx0&s{#^28NisQ>7*E)zEh9rX=l5wBOX_N3IYXz8;-zM1ps< zf^qT0e#0Buu3v9FnBwuZcNaC|XFU6__+Y~Dy9^+fCwhW~u7OJFCd!$;65a^`okE3o zXsADl$L!4Lka_bQv;ZIb`Y9KfJ^Abdf*U$ zuA*=U0$avjQ(~tAqLEOs2@jV>93olzaMTUPG&}Y? zWXw6DpMHdQf9L+EaX)FT74pUy#!uuy1F)@{F5#yLtmC2}#qrLiU_UrW!d|Hsebqfh zXxhg&j8jM!;j4}|!5FKsuY`g?L7*V82?Vw(0Ki+}qPkx#U0V}|?E7jesaGBM`<>%X zE7oD3Fb0+}Z4)(F@^}#e+FEzik?BX{U;QN{36>}IF&|WER6etnYr?L7k6*mM?ENeP z5`r=%f++DArlBBvh+V~y*tOq1*JqTG!7E_D8d6{cn+6@CL|`KobB@Ml;(<6Sg! z9rV;*vm7Gc!+UVCF{hXto$j#Y>!6Q-5>j8uNXTfrIdCO`Pm0CccB7ccYu(>f@ToF- zmdScfTW-ge=9UCN&y0K))J-(0r-Z3ul2)zDr&@&U?R^3Bqj->lK*O#KMv{;kh!H zEK~FRT%H`KGq>wEnypQjO^Xbgcl1{UbLR-?6aW~zIYr0gZwxsB?9BCJ z-llgd<@2E>6a)$aHx_|i2mo#@cpG`Kv;+CHTm6LfoAW7W-wpwpGMW964vcUNC4rb7yd5x~Q36uJF>C%M zYRW+UcG$l5Dln8O)Kq^T1l`WL&teG;6&94y#U*j6nz**B3-qspS*^y}bQpz}A| zpw0;`b}rG2cV4eO#bvhN9Bj9G@Skx|X5MqQUrqWSPH^r=?7mZS+$QJA2Y_jX0BZg@ zz3O{8-)+a%hnoF99mqc9elYhWjxhJPk0pj5hpkM_(UGfTyLDpsr9R`O@hy7CN4%zW(%F%aSB8O`_a)dseynrwGu89kFxh zpaO~jU;`jtA`523w(jyx{S0~5U$vL5Gd|R(kjM}C4tzV9o}E7YY%-3;_U^&_p5lG$$hF}BGy>%81baB0DH<|w$VC6}=oy3HE1V^F2h3OD&Bh-vc z#t#SuCg*u&fXt+%>!+M|&artI7U5btr8oVXpE2hdK1Sw8=Y*R4fBy_kDZT=R=V};} zxv>n&tnR{az?9HvY-4Vp;Lsq_pdbzs%q}5#liQW=nS<}k>&9|yF)tfofL0XZISXG9 z;NcA!@O1waGHTHkkTf%C-Dn76JQ-V_1xTq@z`E;2?Ygv}2_d+s-rmyd;5>ZIAg?v_soo z2^&FxuP5|jtMz${?+l~Yy`aH6Oi^A|-*g}#S=)dy&ges$7+fdO!NHyJpY0SU?oL`~{l9#gK(2sHSG2YuE41bGN)Ks|KN@t&t< z$YU}i|2kP#PtX>*JfO#QLZ%1uL`W{G+1mZV;6#T0Y&bM!lo^^U!}L9xcXQAle zfBBgHMxD8Dz1i1JT$zI%>)7CNH}AXd^_t#je}2VPfw_1as0IYN_(oaw7|Rjx(f%B_ z9YYn|o=&l)vLThQAp~l*|BPTdC2#+)q%VN?l;QSC^0PBLgTDNM*B9`Vj&Tn z7BK7}f}c)VRK0{ZB5<+1(;BtAUDRUYsKY;&mkdIS>BDONgZVEwz7fTDJ1HY4L8ul` zqA@v)N{oR(=k48cEmyw@0Wodp#JEf{K&;BL002M$Nkl2pTs9il)zka|qoIjMjqp>V$JF(p;AX6_HoI z=a$8}&mj+pz9$w}QKz;d%VJy9w0yO(?{WTPy9w2^-rj7lwndsJmMRJWQVf3lzw40eBiEDGH8S^(9SUzW_0P-sg=GNeyvEHC_c!&Lj+Ea`BOt~G zW4)lH`cVi-J7OF+SReViI8c5zfPe@?AJ#2w=vW~t9FsRFLrtWTFSXB?17Lei##%0}KHGl_LY-{BbUM9@f*(k8UKj$~q^Ab5dq1 zBmm8m+;!wr%i3W?c)oCTiBLYxD^SG~{?K9ZPZ|1(+kb(gMKtsdo09&Zhal%X7wUwpp#^)rcp^m5k4;wr7 zg|N>9&9(3Nm~#1R5wPV~%>J(iD;k`^708Fum9M?6oY*P?^JTx)VcBko0r;U>C+X1j z&1f1?=?IstfFc06EQ00!g1`+%Ack0eAP*Ds$h&5SKeR<~09pW;4rV|GlWWS*d^L?i z+$Hj@lR2Y1mg0OnIf}6S2-9BJfM8g%X|wa0v#GP+RH>dkouK*D?&L*$mW;m4R5(kc zs5-vmXqa5ban6NKfcNuF5;K#E$Dz(uwtJXoiDp_!#lwdx24j^k$n0$d_y z9a8P9eUtASB#qtFN(Q#1Rm=>#nEsXB}7h5 z+uK<|%YzC61%YdiKoJ03`zkCiP!QN60uqJI)`9piEfL}pMPdFgQ4)kn(gJ$Egxw(^ zv$gIqUhPpA&G`VzUcyjDzai@p0LaEA(`1LR#!ayEw%YlntaZCD7Ncu(WdlJQa`SNE*9!DxR(Df86fx1Ro(1 zGy8!|yjp(#Ve}JopPcw9l^ZzjSZ=+gybAxn;VOFP?CeJh02HiQFSbSNMuC7% z9Boz}GhX=1zfCU=7?)5GC4m%X@D*&*IOe0MR(L2RDy<)hat&YJfR5AnE<06^kjrc-|j&O>jzOkjHv0umWn zozb0>(ChqVYf${e`3?>UKLX&;CNmz~#i86U?{uqQu!*aZL+$?`x=1(@iU|`?ys-NxSFc61uUqRz+>QDfH_rUBpBC#%0a928aIJMa#wx%#cNz0FZCt|c5wI?XP~~mv z)Yx`<#&j;*<&U#&_4r)pfN6yn+wD`9Q+52-YKrDpLP4M)P!M=)1d0IQt)VIx76fh| z0@8sJgL;zgLga_XVq-Qn6gZWR4Z1T3QZ>-mp5wYs)lB8A)9Nw-J;+9hd@rBaszseq4w+it38jCO#c~9b z!v2VZ3NvKZQfTHY^9Bl^Vu?fBAmL@}h;;ON{@Mq_0)T#J%{V1TMr~yQz>Z5S3S1k8 z!Rt2_qD_(ikj}2=ScX!efEEE^g~2&vgV6>by{T7teLgfi+nxfbIbA`k3i$n)Ug0Vl zr!ZKj2mz3{#5H>P*CIe+KrsB=8*R7ACn_gD&5GJ`u%3gVr;>6#>FNIv#$Jmb>qP;L zu~UE)poO0n?u+Z2`_|jbdV0&nFWJ9@fPF{&wE$p6)M!q*8AV^G?>X|#=nO)QJ<`O^ z*;YU!!-D&`ARra!IG0?lt%RoMz20rt!RIRk@Zml`2wW2S0*H5N(yk65qt<1!{NAVL z+v{&~5QCTBS4P1)Tkn^fWC$X!z&$aJ*(yV(6J8`370ZzOzt>-sDt`_?!A;eeiB{spwxesRakztgGnjO@9}HXagonC<=d_wfyaolT>Ec=>xG!PMUiD0=Me~|r{>*V-<(#?cc?d{W z1GL?^C;y~kN7*@a9YXF?3Te6wZ(`-T?rNZ&hkrnQxpkNdpk{gjw}8kbFtgqa8Q-)Q zhhF((m994*=Z8DJN7L~Gu#NfU9CP2XpaVyem=f=7u#MDy{N(q}r4bG{=lgpuG5b!N zNLD4rQ`G%oWkPJ)E%^aJ0Kj>t2Y8?TXj>tnnR3r*an36~sKvKmI~{98J*eTgy&s_c zj}7aL11&T>$9u;9{{gLUm;9lOSKA9gON zckf9fLn{0ho8}dxv#ZCmo(}Y6(3Q}aUw`o*uypLzz4`Rw>9-%xep~&P4^RmOf$NPx z5dd88`nz?dm-(N3Iq7~<9gJrOb+dG^t27DIGOXa!y%zz+OIW za%aIJ$@n@=Kql=1YXG9EM*(9QHl_%8jZAE2m`z)JH-iLpPNyT+=Qo$z*gDM4-mEi& z?|cCkOWx2UOZLShHY!pr_nU%wwh^RwubcUsw{0Y*{BTY`APNM1R>t;Cd0zE7-`!x_ zwXKA9-?q%LaE7U7pE=adt=0#IY1>wEl$kK|+Ps<9P6HKTU1WilX@#z~)D6Uef^hUh<8wwA{X%|k})dyLzk zF=&qqsk6DZA%QPZ>3q_EbEh9#K#7lI_D`H+3Z)sus1zn4{ci60C8x_HU>!ou1hdd> zNR}>ICFgU0_CaE#Wq3lPgl7FN-oh`~iRJj9hR&D^tw;37&laHxT`^Kv5O`l?xeZc? zJk7RQSv{;1ba2SeZ7ErkIYTyy}G>g2#xg^vy~4H^@$);l+4)G zh^kGCwt7t(ti=olP>wD_dZ&9w-KfW3jFZzpwbL$DrzTs02)nV#_)eKOq2NSJUlq*R zDF%mNCSE>DMp7#g`~Sz|7NcWy~g~K=KpmFUj7XOKk@Vt4VYnJMfw=8 za7UwV65Ij0$3MX7<(Nuv-oN!=+yCIOTiv0)vco@zX0Np_n=CJ1(2~Q}5Xe>@Is!Cw z(f%us{otTm{o7;4T-v6-C>(p2wBLCMt`^Z|qGj^qm~W$x{m7;KyaNJ#kdu!53d^$- zG)!M{?^%1%I<1yx-`wrB|L~hHfAipX_kYKNpd}Opt}g;b0C0Wl?sk@ZG#@cA->q6* z2jzQEH23(*sk36huJ3Y;^CfHw0f|S=jYm+A5}<1-2~??Ny*jCx)HG1)Hy|aAZQGeT-wjKR%g- z(?5kIGWS0q0C0&v&Ha;uKbS1exh$pknV)&s6#3t7gC~0xju}%I?{u6!WFhzuS&2bG zmUXU>#?@Gk7?W-LGWc3&jkB{x(%~e|{Kzb?z}p+o_UiH8Z~cZdoAHT5#TdHXBYm@={P8x>Ir)q!aX2=yyR zdqP}p+Ybba0AN4rp_Jk+5$K;YrMFiPq2ST@SD!Laf11dfs*kjG#C~bhZ;9pGUM2ye z)?Xoj^GYT9!y7p9$_ud+W(j+XfYl?B7-S6*mh&)3FcoOcjU^amgj_A)Y}QHU^-!9j z;pd#o2igW^n3qdv@?UXit08Bf30wDN!1NRF3`WfsXWLW&V7V5x^WJ5IlF7R15vv_s z$iR6W_j-ND9Wx`kP9a#rrp)#`-(lEewtu`YyS!IoRuRlm!8)+BPGx8{XScqz>r-CW580p9>GQ8qLRxz&~nts!I`In(rqdz%M zZCi_yBpg{H09b@(HMIO(FP1KaDdjKL=CO-*6L^Yi18fMns2k_=k1)}&eY@PBOrZkV zBZOQEWIb*jIyNh`bwB4}++^oll(6RrL|rwNXCJfeDah2ng8~50>kQR4VPA|(<0ejc zvFT{A#3j5N0yQ5JhBvkUd^iSxdBNc7@Y-!HTPLk$J?zh>UQ0lsp|n|!D{|t{;PlImoC0_680r*6af*Hz)*7kb)xZ z$N~WUC3@Vt)KQ@I_o^U{q3y@2AkX=?ulo$m{heZ=P#}jWL*CiPI6)Hc>nq!KY&J;% zkfyk(@%OmXCb9*9?fs46XHLKI-nTrtj;(E1*ku5pJ7HWC03Zao1Dlc=e)}|47J{)= zgL%v6mdo}J0R@@10sExRK5d?D#<|=ktapW=*$Lndn&%+I4Smw!GstB@_g%H3C;60O+={PprD<^q>i3%&8up5wEeU3I`MJFzLS5xL&UC z>HUYmazsY;x(Cv~{r10Tc2vgr%OPCuegy*YMdK%CM-#i>ftb|Ow|t@mhYSJ#4fE$k zZm-C!9J+o8GaLR^Kc9XqQ2PuNl;+Yg>w{_j7@F}F)#TAaDH<8l^)3NxLWmq_Y9#2b)Tm~7`ud@ zf)Is5wvpIYn26HuLR1$&vThmLs5^=fPFDfnYZSUzcM7%C_=Xc!W5#5y1MB%9FN_fOQ>JYs?nk<)(dfapYXiX52GeErj~`E28GzAt zH5y-iaZ3sI9|3g|kSfeLVKEQCV(`OEIYH2L-fdmfvn{{D&IJJd_K)@Ae6(y2I?PCH z<4&_W<+Yu0gRhNNm!+wCyz}~JZOahsSN34xSD(*&3p4lnsNe5H%yn1>2Fe8wq{qqjn9@jGWTMh^5EwH55}OhnY%8XK^IlWJ%|XEW%=K$s>qTBh!%Yq3f{{rk zTjEnA{0oRq_cgC43gR;uZ6Wc%j)IT!^J!V!WcVe7QSh!9zz{TZoiWp?Q+wvRXv4gp zhM!tRtcN(aeqayNmNA!;S4e%7*n0GY?|~5M^OIn|&sHAM@3}X(g(jP{~-hjiYul>L_6V}xbVO8Ag+P271yK_^Pl>g=VK{9 z`krkT1@LOKf4X`9iZSGRc1yT{2w2C_r+GK~YU-mB&Jv6Ygwj}?$KPnrSJ9|%Aol5% zUcEBF;o@Na-ItTu*Z04km*oL(Y~^p^#rWzVi>5c9F=$Uvf-mXD2mo3WChDCnaZuB5 zPq*5GcP;?<=lSRYuNsr_u=Nd-Ngp$&eZw0LdGTYu!YA;*f6cLbpp8-6tqgq{1@B*f z)umO`?InVehJMXcX8)pb7HFH4D4tP1y}D0u`X;d) zCe*ygnD+f4p3>ELJEvVH>X>=&2Z)}B>}dI5NY*ZVj?-=&+ovCkrRlFy3r&IzlBkCv z;Bd}=vUpG{ZhhQgbqB=Ad}d!=3u^fl037P*(`>=P?Ct9c4?LEh5|4TzosR70*V5ax z@>`g;T~0A9{E@o$9c_F1iEv*34|wy}Qbp~5UIpNXr>p|Nec8u_?IfjhuG@bXv6Zd8eMGj9d<4N`1sq8 zXRf1puxR}OJ^y1Q9pie@($OtUZ~I=Wy}-MB>qB1pb_xKXwIFTohSw@+0dSZ@+aVQV zx`ZA`NAv%C9J5`E2hweJjN6#E41h$x%YJ38lXLy=;CXlqQ;-~iCDKx2Q$ke2Bptbp zh+Y48Uavdr(mEZnRDd3jBs4YWhc|&KVOcTefhUJ@RrQQd*Au*^cUc`k=38Pj9L+(1 z%5cY2fHhv~H9Y6W0CZXcMW|{V0pvD0!#d%NN&in_1Z}S}S9(Z48geb=^j&qPrf>ij zD~M>BCs#_yxESa3#tOT^jC?Qn{Z%2Us;bR+j$xn*K*;$>d)lKw7$TUVV3Zkput@gM>^V+JtS= zd&MKB1iS&8ZMMyt{Om(e2~Wb z)+vL&LV#~ZRkuH#{=`ysdo85od}|Ppa8z?TfKiYrl^9h+ItnclS}HV6YNvA8Cx#%G zsl=BDwn$p#Gni}%&(q!<2DITvrBn@x zVoh;6ve{qq_Yry;NKk9nvvd;tQHVnKB@_Miy4_w{)rQ_?@m^Lq_;=Pz>KK}B>SFmqQ zzPwrIG{4YXBOKRjJl4}P2ngKzt!x}%+hvqbYsO(QamWZvJwyIDhy zw+dT_2&NQ7>E!PSCSPl-PmWmdhQ{a^Y@0kFKID9~&!&1qP(wa1w}BlLzdT}x2;{cn z*lKX!I@c+n)*=_M2{XoNSO0j@#o`HzmKWMY+*N)qi2#(5t54M*f~`Y|W6?kHJI0=j zt!9KRp&;;f2-t@B9y<;G0Rp`Lc5$&#cyx*&X)&3NW>|Tv{om>rY3Blf2AaAtVPCDI z`JevcKR#*KZM58VbgZ|7@P=N65P(`|sIP|-C^2_cr5uVlW`!^YZPhB|hJspN>unH_ zXpksXJDNB5LrcoBY#wc1Tw1~nL}0y+{ruP_v`C7GeKq}hmdBDF;Xu-^FVMCd)K0?3 z#Fmb1`3uIgC{q07*xl!!O^?PI9N;Bp>s(<4nlk6ccV+PTU%(uutK#nGKlxyOndfKr zBlU`MSyR@-Z3=!ILpVh}hLsAj48LaoOr=a|>)7k4X|FZp>P>sQ|Fv()#5>kH#v1pS z;4Rbdl8g%DOk16Om_3CBrlU{pzPROySBSuRotxGgz_B{o4ei9X+@)c2D9&ABQE%v@ zqXx80Ryo&%`iKr0R?Lf*w0Q{yfpQVW3sbS-|h!x&;Fti?TC80$Zd+W z0;yDgX=%Ug z9J8-^Pri?22?c>@4-8MyR{T!e-e$xxT?c%k-Q-5YIfv+-bbwMf1%Y=*U`6X^tMEeW zzdMNUdPfleyenem`kf)bG?9L_d^9|)4n9YHH+20~Bmnq|=&&uACebP;SC^@cOb|3g zf&_!w{=>$xR9xTd}&(~N@r96!qC#MCB;8^YqbOH#o4B_d# z>nTpk=JQ@KWin!FN}PkAwXhtDJk+>np}?+)Qp=M36)dZ#Oa%8m<{XgO`X1B&#iGM; zRtTW2OHYsSz!DDDV94^E_~Z<)(XcoD=K9s=^(e7z%{t5N$bIWihX0g$GLC)P(o=m> z;Ctef%_#uR?Vqp=z}W@u6Jd|OEX>Qc7L3rAA+Tx0JbeSsg+hhp^0TIaU!6|uupPll9`;%y6xa^5(O}05Vo4p}7 zpS+)rmfwQF)g!P@>n}&iFI=JZw_h(s0I+?G%M;!cfx&z_>I{b`tTFZ#oasqxS)ZVR zJBFG&VF|Lkup>BS$)C0Ne8?@mEl|sp0I~aSGC0R*aV1pUBwFu4q#j~^nhDWc3fZlF zZQBJr$f~Pp4O08%pxrp9TtKq^zsI#2I3u3Y$mXmLK~uvD zAmzUPjLVn!r#WQCtB3mDwCzm6s{P_4W(wQBw60PJpu<&H*Kkz|$6DK#Or6IMxF+9g z>oSkX`=^gAM{WrMd6bEE5$y+yB^tCoP3$ZEvt@NSnO8$%{Q=kya(iDitEAi#sLE?z zBLvoIeVX;XWvgaoxI*i1y9$c{VA}|n{NEb^Xrgh|n%}GH-rvA9e9D*XQ$A*pMJ-6A zs6-zj0+S3D11`L%$Z^`+{I9J-X1x!Gp9M!c zpgdws{o;duIQknQFmvtp5jJdU{67c_fTY3oUWEZJ^%gCxX=e7rCb{`SmmIGZ0@)(T z|*vDS@O2^&K|0+UU` z)JDJx- zQa=6@A>43*7GEJ)hy5ixp(*C8s8&r21({CY%Y8kw^1mXecUSP+`J}*_S5J(5s)IBI zAWaL1dH)D=Wb(Bv$xEOGGt|_RHP%%1&Ox+o<&AE^IGR?@P|NQMD+&jk|F6JlNIFjo z2x)U=(;mXWlxb@f3b6 zUvT=CvDuhrRCQU>Q0IkAFN)2T0%mZWbB*nq9Cwu2xQ_^GgZ?P3qw>1eKxfS3IHTnJ(uHsdxuh<{X;-{)3zk7DRHWH z%6awrlbE4mSK}ED+0fW}{+xNLGIjiiW_|`krxxA}f?hQ?)4cDwEd-zCChzF;;r%fw z*SXx1{=mjN$h_Gvmb}LWT&Kb^-&KOUkDZdS`#IQeN_aH_*0*@)5cSW23mS*k7M|3s zU&j6k^O=p^&r<-f^rt<&XrZAXzsf)4opH29W8y_m}JEl5aCXcbU zC8v;RWfkKqgu8j8 zLEx4kAi*HRqV`EbRA$T>7donP?qH8|Y7>Z7j836YZ%O00#B0)If}xQQlUX))j;c;1 z7iiL%`kXYnFxO5kIm52rIog_bD+qY%4eYNNMD{(t?cgJ_;#dKN+J5JhKZR&+w-;4! zo|w2!rxV2Vj0+^L?ISXHhfP0m-(bIKBe^gx;im{>ymQqClfsUxFt}`_$Fsvfz5C1j zyRXJyFaO!Ugl$L0Kj!&wxtT;&`r~tmO-4QQvR+L4UMId=b+VCF2+rZOG76pL~B2WYX8w9gt zRS>u?2uP61@c;6-A90wWLX>`h!!sS9#Zpvg5DXHkdAly}SIW6t1Ty~;T5@*?OPG6^ zvtg%SeM+0s1HXi*%-#=pYu8!e6lP9_ThIJz+<*W5c{Mplc2l;RDXP`GcAeiG$rPQWe&AwP*A41_7DzFCz)T+kBr?{nC7`L z44<+4B_sr7?8W<>dbN&i3m&uXW$yp`qgm8{nEAu7$oT)uql-oWkQf-5eV3NZf+O=Y z4}Ae}YOBGj%+p>>4^ZE$)&KN^LBx)!GWd?CpB{B1c3so5l4HOS6xmK z^w|;G|Nl&U#z=HdCw@Tupf_s$?YNqfuG~r}2<#mKMF6mORZyp!gWT#HkBO%N|?%zjfmlkLi^UyT4302LMh)jg1&lV13_ z6K9sK9`HVjapbs?zp7o zan}uj+*XVwKW$qytMIblZ3F-V=FHDAqnFW^Mx8^T&Vs3K7$WZT{+t)dtpv-L!|GWa z^teo@WwY#_li`#J?YJBO2cn-~e%ry1K?pWD?E5$;*SLh&BOpBMo7jrARsk{hpIEF- zugB)-ed29wh}fT11;*i>{Ma@!LvK4*NR&2&t1$c78o=wc29Yzc-H=>`_4MjsNfZR$ z5`nd*6v8CG;Z7|HKNI9ceT83CU9|ShFmyL4DJ9Y=NDL*2q$1tj(nxoRq;z*T(%mH~ zNOyO4%*-9%``zF74>0F!_CEVr>xtzcp5_eEu52@H*!k5cDYi$w(lYzW#TxxON=gY& zHA?7&ol#6S5$dX$P=*0&eV>g6a1Fm>$W&`QY~k~3`uoxcdP52ao@iE`junwAbwb@- zktibTIx0K9{?pEv7Wz~M#E2TnQ#KxwJS{d?_#UzYcWm2KuV=q+(`Y?tVRM?!rn`e{ z-fWZ}lm?aVJ?ID5Zy#zP9#l`7W>NMlYab(Y$oWG_yUSORN>P({UL5Xua^LW&5PLCaSPoD2zkBZxrb6bW zjuZS)L4sRLWo{DOTY~^uI!8jAyk4PUwD$DqF#HL)&#J4SwfR z1E!;jBww_vAiXkMTTd=^>B5RN<(Ab2L1n0-$rbD$I@i*b8l z+44fBpp;)EsuIJAg}~X$q^s?XoN7@`#Qk@FV;v5yjQ5y45d;_@&mRcsz_De2IVmP6 zOuR3L(wr{iUqd@D`0z_O&fiZ@CINXrU&zqKmc4xCKbzH&RyPVc`vc8?N5&zO0rM(J zs(37Jw~5}+;i~-|(%;HFq2T+YuRN6r9fTg}G6AQ$^FmaG3Umgwjn%tt>_oRI4cevevOO8;d`rJBEC1$K3USC_Li$AIJsG*s7YpqUZVh>gq zxS*=-yLRdK&l3v8`{&E(wVnHRL;mY0XG6!Yp2OSAa$Pp1rSMtJdc?TguT{{{HLr&> z)XrA85Jim6^m%{Y#flyr9D7#f-sp*^jP#+Lu=tn|?bLJZNq;)2bl zN+@M~Gu)&?iT$odDnUMpYAKFbZ22?7u9qZDBb**5r?<3x`;F7@(?C27aH874}^CcTRO||BHoy4vy*U^YYk^NOK05Dy?~`U zS~wmhQgm;CPfhT=hrO$AmRFEQ9TtARemf*pO77Z<4ph-N;CMAVi!hc%rQVs` z|INM&#Q$rfBYjSBWoPmCyDs2qR8h@lPZ$pQ( zByl%iyBDpfb${$Y_7CV*UFSW1slVYA?o9yc3fWSMOaHp_Ck)aZIOyR zY!5%MZsE%C^pYNtB7x!)JLnG%(kpmi=D41koVNFxRs6n*b*_O9JrO}|Y#j_0uaTh0 zs$5tdZRQTSb@`4EY$4y!j@H0N;A&+)cBC{h*$p&MDzr=H?YtL~z3L%98PF*1^YTO^pjH_B5@G0MQ60^dzz+@=9;7J{nO zC+piPfY?V>6%%Bn=YJ3I0+hq1Kcb6YG^+RVp%ANY1~(D=OwT$3?(GL}H~OjK?us-x`p{4Uv&h~fqb*OIbmY#- zD8i&(QEHp|j1fN6|J`S@gIQK~S|&XTWw4jYsDJ^SPzN>$CO(}jyfm;ZPhd^I?Df*o z_ZmlGi_lv*m<13m6!qIa5+8@GRUaV)D8)NpI@O5FsMLybLX70Ty$ZCOBHA~xl(M~* z`-0E`FTeSxt5Q?FTWzz!%tD4<7%X}g48kR(Z0mA^c*s1_0}aw?2zaf7n{M2?+M}8e zZlIBgg(hlvZpa{x#<#alJarWz#v*9yd!i-XFMiwZRH^rJGR9ri5n|=b@`PomUjGQy zhbdouANugPs4#YmW!s5WCEt0q&expq;qD~jB)Z-kk5;_RE`5vDfxzDF#3#hyuVk$F zLtm;zREtkHiRG%cf9SNp^``Hidmvu4&1_bRch$EgGc*h6g*f@%u0QuF?H40RkP0t{ zLJ4uqZo(%Cyll=c>xIvhe42a@d+xHWd_YoAGj@3Y(QAG$tN80$+_-=Q9$e9U^$g1H)82y} zik-HlmSV1$CHEovzvI+8RBqPzA4Z^T)Y!OXfq%+`VbgpVg7w|Iw8>vn;j+sg+frQv zslKxzWo}`Mb|C5+g!b3Qw;6j;u|RdkLbiz=Pa_y5Lt}T8#khD~1w)CvaLdaGk@Np| zMpaqMXHk-@yLZNi1?_x3;sKP}75XK>oBYFC#jbAhN=)*{wI5qt)7|RJ(|Bhe7Km)f zmfw;~;I1WIrwfe29~?dgu`mohdt$@=f8L$GEAM?FFN|yt^I5w1=}ZE!c%%ja%Jb?_ z?AKf^(s-k9Z0F9DGu83jw86O={oe#H~PetDRjqt{?G985dd*5a_r(Hy^+DhGS z5^~TWMApp4-tpwf{qzW%yLwq|bGHQeRH;cTX1C=uq%IxeJH8swaR>eb4+;w~mlJln zT~cY?QItfW8luxnPChvCD7<>@34`GLO5O9h%05B0rx4HnG;8;7r|@CF)(ipY8?ULU zkO})y*F%}r7xXY1i*gXZs$Z;<__{e>%!h;mjIwy;r6XWn2y-vKk=|sp7^z3fgeKnEwWx+$wu}r;Hox2V>H47y z=X~1yvvqhkb5NM6cG--Bmc3a7`=t4d1ij=sP+1&Iu>~;S)DB(%#8B}8cY-82tv90EYvhZ^^08Tk z2}yM%EyHDg#6on|(?(yEjOf6T?rn)7E3FT|-0D`~pGKuuenLVYdVuwldRuRRyVG=( zqYRe4s;(n0$LkErhC}P>2@ydWEGD1O&a2`C18@^9;!DJ?l!!U!eDURkc6XpeH1fS~ zx~@v11nS}pM4Fh-iIh^jk2X!_-=op=B53p7D<^KYha|6jlQKV~BowI_W8DQk?13Lj zX)6M12%x7P+%(jjQrmToU^xZNBmPus+=sY@q{7KNeD^*X(V7FNy} zL3XEEbChPqt4@Zvx6`D&;8#W)>yMw>kG*# zmuwLbc4bpHlmEn3I&boPE%5q@;Maj~o-u_aD!0KQMmgW6c0Pj{`b&wfGta5VZ#1A5 zajfF*j|h`;yH;*fP(YU|UG*xCZ6iii9mrl45GdaXX3e+h(?pnKhvOHXsc8K~K~3}l z5vAYwTMyO?eVuj{j%#(bnoh9@BwoSGmpF&>T-B$G>FQ=cANZzw~ ztGleQy(grF%YMXk>h9h-UEAVu&udA3^L*yQzHlGm+<F)rbU6KAAdfNJQGdcIbU> zZ4vjl9B=Ky(Yr6D?nc!E%|v^btFrx(Tl6+7BfgnYol|m$If3)5plFDX;Ti_&qUx`3ZQn`*c|`z`+6g-?xd$e5&a%U ze1rH8AIoc;E4K0#^}+^ja%VK_bnz{!~Omh-SpY+p#Djz$ZzX^ z%4;TY?#iO;ip2=`3_<-n+cdTmFko+(mJ!LIaz!q}BF)$pv-vZ*iX`FtR}olhs8bIL z&{SKSXYkf&hv^3&j2C7p#y)pgx1Yuiy-|^oq=sMQ$_~DV-NVW<#^_nsJnYvLoV){H zNR5Q7e^}A{F2Pxa+s;6MiQE}zo*c-@(JB;&=?P+ z&C1ko_+iG5PoTVCk^1N*d-Maesx=R>K*!OAd72@#3x%za;ScS`98+> z{1P)B`fs7$gEu87l&)6ncr{a>)kmL__FYb`e=vYCGM--fnA%0-TP&7EP+YUTy6FQ@ z8#Q?I>ZnmiI_$b`5c&0efwWqvdsku?5`E>}E$np>LKWd@c(5IQBZ1fRupVNZVt%DQ zzaG?SuQ%kie0BlK2lPJdDNdvqA>=!Zdrnk$x-t$p0}0NjwT4k04N8}vX}KQbtAO2P zs~xGRy;-ABZ`2VeonoJJOEI3Lq2Iiy;hO>SU(Z22XTRI^Cg84t341)LABm`u(5)qcx=-y7G>bn;>4(;->TNZH#u4~!bdU*2~pa#9AS`v1Y zr_Bi0%AG&7(;$kMCq-0^RfqE`z|ujvc`VR+nRyad9A`%#q>GGcp1N9@hHo#OushD5xTD=pzK0s29RLkm6xw|IH z{Ihb-9sQ~ZL0e~Am9+1IKi%sZO?M}M{+&tQ+?hjw^fDl{uR1(Jp!6i`^EmazE~NJ& zD_Ml@BB&XPn>A(x55R2ar+_((*GT}bIHOt?mC5zTL*|M=X?cC8JeU3Vg_GMjQ6&BH zY@Jt68JJL@9=u=KW+z;mr(evr|yG;HlP@s1X>huzO zJ~8lceLSx{(7dU@Tv%N`Kd%jKN8H>t2#%3Q%6EY2Z1tBa?t>5Y^lPO=11XjFs;WKg zc=Oc2uCp^mDsMr-CzDB-klDA}J^4ic*N;SLDN33|4-=nKG}M9t|HYG3iHp5cjhqD( zLh!4Sr@8>m+gXYgqS7c8iJ#u7^^oY%eczfhQO6+#-(molDgxLjHFjylg+kg|gsjxF zCOa5xOh4^@N`-KgvpNbWnz2ARmlkj)bFq)=CfCGXw8 z-s>63kv@26Hkb*ZW}eRSi8ob{XvX)x8PC@Cb_?a8C5nZr?%qsq>0KX~RIYA!i?EG5 zEI2VHTt5XFV7)eRJ!XpHJ|j$I5;gTaCp!^sazADs$JE(bZyk0&-XkUz-rH?W&7!@G zlsf*pCnN8j$@Q~`hWwr`{n6VIwN>J7<~gw9WP2iN`aU_B1>-&O8$q+h0W1tOet+xV z70)E#N)%wKgM8fQ^MogpUm=#NcN#`FIEli9qO167X0R0#EN|m0 z9DDNDZ7vE4+pK>TOuI}EroN4sslY;C?p4Tt)8{?9H_@VEqb#$frt6DL;@cksP2z2! zGwLvELMco9#@wh#bXRl#wo)E(uDY&GA8w#kdrH1g`v5+%*SwEV*q>kZ$#q90v#~C@ z>0fn-<4ND3+28=f=H`c{aDBa=(==WsUa zM|kt8u*Oy&RrNC-J$UTYzXbsd%8X$nkV7!1>j#zo|H}dhM$9RIU&y|!3Ueluiltqf zDEg>_D5l#`fSs?nKe)#gM@Ay~?5|*@Dx`h%bSOsL;+2SU8LybvAZ*DV<-28BP99^b zu{W8jI#!Ioi@R8gX#WT;=Q&lgw#4=ztUzc3LgkEC*(LS@ARAVY@a@1kPtls& znNT`WNE^eh{xrOA|25jwUDzTF7}>SefLeuXZb9E>ZmiKE**sE!>~Bk#o%fewF?q;< z9*r&LF%;Uh-r+2M2o^f5yV%w~ze-I2lG&poJpc9#O=;A>-xxfp3_j5V=Bd9_ip$cB zQxph&Ao`iTp`5n{PXNXJBS#kl(F;?i%e1r|u{%fsm5;jK@>H{Yp5MFoLRe7wivSkD_4G&dH+R#P z+$wh&UY)_FjJ8?d*6tLej;5Zf)RXoc&jq>fJM1_gkDqjcag@em1?@FHRYL^LVGy&P ziE8xOCExWzPQRD0XEA#s(%}u8Zm2#KBVL+!3cox1(tX4}+>F1d9X){b96!Wda8R&v zqu|IQe54-P3U_ZWtjx88IUY#Bw_xB?1hmh=#J?ckd#&a&pjr9P+Yg=!;BjW>WlI@5 zD&1TJyfE-YLsVh6)hn*!QyhP@a0s;Tw767rMGi(wuS!KdIrYY^lh0Ug-n_ZW685>h zS?OBNBxe!&J9uL18KH_gXr<=*Jz>8wa;9Q`9?+#kvqumJ$vna$1OnvCv8wtwI*zA2ZjOh% zoL<+hI1X~JwbJ_#zmr;6?>d=C6i0D&T>S_FOu1_X?r8P=WtXIhNnWGUM@}*9{qg-< zsF&k;#>y`M{Yg;*;dNY0;&t2t>Ps*P1L$1p7Bv6~BJ|@nC%oEg-d%1aeG>iZai`>x zPUe0=UQNk)ir@dhwUvnjh@?bCO`pz^7=8LifyF5ElX>G|;ub^Sksa+A9mM}F^t-53 zcfdstS5aX0MZ^x^ChO;u=IPQOktU|IvtjFaqkq2OBryb!#SUCeSjC7&y$Nm{oz-t# zyiu3WlvQCUokjYd(%GATl(tYsy;?O}_~p2*4MAlY#o}T*VZ&27eye*)b@XA42|~uN znq>rC?z=sY+f3fx`iqeHMOb9F*z|PqIN%7k^vSWd~yoxoD;f;`wV;Vs+tJA1c z2nRWl1qKxN?DaZbyn1T%xo;01*0{#+AX4WC^%#);PvU^dpuny!`fjAcx(vqlw71Mk z%xK3X{x`cB^01|u_PDQf$0~r9a_Km=n@|~%M-d6J@?46y$B4R%JDN@i@Ikl$dX%rN*2yhnq&*`Sse!pWr+)D-|I11Wul+^f%}QO29mphbLDEOLxEI$gKN03tb<7S>B>v@z&p)( zoz^W%S>Pv|^@(a4i}YL5L!3>Z%+Xv07b6D$tscEk7QAV1X1o-#IYYdyQ7|3ClCZS zqE^QFM?zETe2XcQ@K0U8yot zLG;5{pR%gCKG2NdZI3}&Y_ZPZ?{>xmx;$yuUlS5c-v4}Hc-g?9-%i^DAU6mzxYt2j z+s0zy3Jn{-@p+0^Vh`-Ev#G=}F!=FR1ts}5MES05RG{plg4gY$gCa0mN&ekksyaK| zW6^8nEiNDY>t;+a5Cg1(7_Mz{ExDyMV6^WIqoLxgzru6riFQePd!#UisOU!E!^h^` zMNY7-cjJ5fe|WrdA87}5(~d_+y+(rajtQS~hKp&4lm4{FYzm(1YFSWC03d8L z7uxxiPxtpz2im+0L5?*>=FCM15?_4fZu#k2C&a4b}yZ{E)dozvGRc zBAs5sg_>t(Xyte41%BUyfae*pqt8VUW9^s6>t%PzlnG`P>S4dAhvi3$aS z;UWq<#GUoZ2)Ex0zN=UyAeeYC&l{9>JhG3lrO(X`4@I^R6(#^^3c>jelO`&;0bLc+ zT|NwKlcvu+t~vOFf=?0N@|bjQpIUn$6BW^(jW601I)n#EP2TpQZGCToJ%+>m{v3O8 z2~v;Uc7@$BuGZZkY_`+XxP|Rz?sr>m>f3c*Ki+Y5*=;g2MBhmuK9Q@K4UPzG2f-c6&#!dkoF+~L7FA?1SqcbI8no&rWYw5Mp8K=UP_)IJO9Bd>YjS{G3HsqlvQ@$NsT`MWV%3iwDU+1tn_I>$-?c4if zO49^2ptk4>DmB!+onlSoh;KI;nhWhO$&QOTceo(`v<^N$WCcIt&HIAszcLr>!oJd? zt@}Co#|d6Jfyj|HEI^oCZSWbbk4?JZfXN-15d9EMka~j~n0(jKp+)VFiSTaQ1btYF z{QE(4HnRMAA{_TZu-{8!wB%4ot3APftyf{UV-Z4wyrd7_I^v~fBSE=PRVt&%p8i zp3G*7-9L4Kc9dlvEi+1&1*oi`2IMJeQy{QYG>AH8I!>LG`$|sZ}-wS zVg|HZPU1)7XUx5sI-u7T85(Ajn?QG_08e(g98qX?FI2gP?+m-&6q#G52t}Biqq#(aX zQI+Wqa@=uP<>QUBV*!i1?yl;{8-pZqJEl7lV3(o+J1#St4Jr_QdGg0kWx%@Dd}?o{ z5b%099fPp*BtCar3$KX^ZQE!4#W{Yqd0>99dwjEVWtr61sY;>uKVjf4;n0tXHC|w@ zWhX_!o`9iO)4h?=ezw^$)P~{=i`WhkS#@4(j;27OMZ0m9K7qXqlPD**%#RTreb8u& z=SXjJa$qnm=It1S2jvtnq8K3Iw_OGv$Z(J^uLpb(eNi`V?^RYRBsp!F*;z#<_x$sN z-dGl?s&vtR$FT@)yM`0@&F#0O8DV>xh@VQ`j1qwIKo0?N&6gj2QZ)rHPto!d+BDG1 zTwY!G;dG(_-mn4><6zB$m97j`-+W8#Zxn1v_-^!mX!POLsMFOaG?#pWjESW_Y-Pd6`-r&S!b7H_c0KT4)Uu;seXl4vrOlZ%=@3F=QY|h1?aY( z;#f#)3ajSD7v1$sKaqi?D=J0l1M@O2o;Br7xmS4Kaqt>(@P7UJ;u@q9vPrGwa@2H6 z>1_7Vp~}*pn6r@ES?lJ~ZK3q+;#KFSYLsL)#hJ6M{z>ZWkW#oV7y#+tPNnXymtJ}C zw_+!mRB*P;qI0E;#15=RgJ-J^GsVwa;G+d9sHYJ677Dtpo88f<!>zT%1Z)>-$d)@?9 zt(bI}NQ{lvyWY~ByR&VW(JJRX*m(#4Jpg0RsJAU45;BbR{#+vDN>9`paaX{j@JzeM z4BewzqvmL zBRhQJh>Eq`4`p=<)~rniQdg{27l(=bRA%C zqSgCtH?u)pRI(@RdaM8Uf*#)3^H9@AzCl=8__dz7>G931E(b|pShtU=v|iD4Pqo4P zXG67Q{J^Yz|BDaFX=?ABv zxa|8~T(V~BFDK-g@6Fe0I~{;sZk#7S&x);FAapR67vA#X>klgiFzp-N-;*rV7?TTP zF+XEIi2BJQIK~Y_^Y`@B)%0q3v*{dC7bsoqn#&kqM}02Wms8#8Dpkf}gB5~u#x6@M zhitJ$~)~R*`h3 z`j12xO!bZ|6A~o-;{k>>4)4Q@%xvPxRM5S*!ZchwiI7HQb1cm0PoYQs^q`c~|5%&z zfV=A}G_l%qbnKZZjwT5_s@}u1O_lRaxkVDSXJ(DLP5tlt^i9go$9V43%Ynzyvb3ap zVzkjv*(VMpZxjcw5WMw;QRl@hJyt_2QZBo8Y{E=wL!%OtTAo!knlq`kIE&RS)#x9u z&IzkXg{oStBnK~rw5iUg(=`wd$*sh3X!;r=0tdJ?H>jab!Q(&XATO0rve?<#Vay*PsT`$izQR!58|{*S|68SD3GmWenn8cC6~+xd-A$8TUBhp`i6p36 zV?9N#*1KFHZup>&bS_!huWjyxdv3IB%y0<0VZG@Bo$KgIoP8nandM)}=LP}uhN)_2 z1TI?RR_i$lz($bLwvMyI3B@)iqZ|~X89@7qkUyX8Jjo)SABG*+pRBiRNx!3F8isHc zOnbq3zjFEbt7}auQN%H*xPp^MW66gpCL9^7yi*)=5r4i33v|ns;qXS^$)cLt4AqZn zWGlz=Ell0}iF<(j5f1q={i6_D>$dbl+DQP;OY(K)Etn|e@(bEZ zWU-1|MFx$tzLRLugfGfskUbZEeaztx@uSuZWYd+Hr`b9q8L!(%zTk?wDv$w-?G({e zTY`^0zNT0z>T*36SkKmSirLgWGwS=WIgornl`tLF{i{jek^?(xe9QuC#QN>D;xzY0t!~er7ifwf z1g@wFo~0G-<32H;so~8Ff_@cyJWf!QXdU|u3jV5;)30@=q^5tDBR7wL$R&m4A*B#N zitpZ-;y4#pkRRj1X!qP8VDCkDS3Sc?D%Ts<7&qU+*MqMHbC3q#xSGJOtQe@bYPT(t z3jwc%{|v7IIBy=akkfX54sfab_vTB|G>9@!x>u+`pCMh78DT2KwIJ9L!YgC3NtD{*8bGEMy5PhMAI3KbpYie75R+3{K+u znHkF2hwucFRBOJsIEIW*?$6xRi0oWNxACPOTSPH5{^4K7IzXFQC@FjdcmcfIDnnT`B;=-Jn>#tWmVefUMnI&|F= z7nbAX6dQ5;p(KE3*y;8kgh@pS<)kysIHg2?PW<3ozRzv z4uZv+Zg9x#{W?I}cOiU6iJ}a~n-U6G2bQG?jgHC7-cen76 zr6|ABCrLKjXpJMw?1KnBqOD%NlV4&|eK#L{l~|4?qTOSLZveI}nJ;%J)ZLJ89r)QW zUUz{ySs6+>D$J4i4dXlw89KXLh~k|kN$I2u!)NkSxK~1C`DN8zRBF^8B9j2#Unl!B zUqs4?1!#q8`a~cs$oX+DVw}*OTKlafG52m3X~Znl(T+Jy!sg&XkG~I9CuG~LS!^kjoa_ad?~12Pk$Sl?LQ_sqO7okQosc>xZe z|4#H%sGsh1NBK^iw`M?E4r}4Xhfuc;=QZrPmp*_X4O+heVl-eKJCC4qK}D%iLNnsy z^is@V4H@v#tv?nSt=Q%TXUN*o-Bai1?^A~};j6iP@Y6P*YbwR}S^{(jzT8w1W+PV) z=6`HCNWzWj<7HfQsU@de$SQt++aOS|qZCFrf%K#f9xBV(=G8p0#4X&sWWQK;wElOd z$-(q7w2QBrB>W=IX|G>OpVRhDJ2y-?`|*>qYfMU~h`Hif9A5*^k}f?3+cqahl5D1C!-<1a=D)3Cz zp~N54!9Ps;gAifT)?9xAauKey9Lb4+kp<^~tl&e~k`QrCp8AMP?_s&&MW@kaTw|ef)2XK5uDNDm0PP@J(w#>q1ONbOUkOoKz8?)ug40yHl&VV4zFeaJ z9j^NpydNRX`?TfDLtq88A6mY{d8Aaoju}1^S8aS3o-9*l#`cVC{|xE|fc5W@E%p!a zz{jaQ+L0`bQ%Ag5{yj33PYj{HR;UkEr|W6k;{YHkVjz*nGS`SIK1HL1vY6U%Qjbg7 z{O46fJtj%BkLz>XS(r5X9=D9@v0hF12Q%^Kcl={9M&t8g4pwu4Gh-DmI%y{VRuHoO z7S)yLRlzK3*RM`!%7kT(}(|WHCttWe%vTlzYz`XhyVBqv%6=EaM*Sp9n2Y- zhs#~=xQ`mYq4sO+y&_isq%^gJ|5!Y2LM2LmR|0qU#;!!<(K+4+t>c8%(3`i zC!7}KJTn;;ZFtKNfz5q2$g1ZVA@e+W*}GkKk+rFOB$J5pypbXS5EiQW+!ZZ=>2>Y= z33|1qi3VnWAqUuPQ*~NiJ@(LWp|Fx{H8uC&sQ{WhV`H%Omn=jYBftyd0XV?NoeRJ1{RUMAGq2nU9%=K+sWRAl{SRs} zZ2$u9?=sYjCU$?aj|1O+Nt!(M67z^i^d0*R@LjFcb{bGnNAXLnf<-yp{vX-&(Qbd( zE(=?@n@lv@3KuRCxs9CZ2lb4TVcUz8zGBkUoOH>~}>fTw7l95-tC_I0n9W)f2x=#;nksyIk0dbX6P22}THpM6lj!JDVio zexaE(?>1MkDcX|~}p6Vq;bI zb%pT2L|3#5E(BBr%#5>Sf-d4_f4Rh3xLI7XV09B39Q8|WWnF?*t+610PGz?MmhxP6 z(H1m-9{Nb6Q1KASiI+&oc@MH`5EGeIB*Mv-al0~lb0hKYQnW3W8_1gMTLci>=Du#! zz!!S=c*QXg8R*o=K_ByqMKYU2Az*0?U!RdqdCm6c@9BT+LzcKfq*)=-6~`y}&Y3q| z`QWQqj&rPCeQb|oYrPS^TbHNCB~hGwt4n6H2+2fsOooy2I0$llVJ$$6s@obV`k7mB z7m5G`2MkM^>HssuFYHw z*N;IlR7WXpSverC!om~o{ys6o;vUe#0q>Tn;td~p8xpzu&l(%-q|ajv5YLf17{H-q z{ww1==+JI}VkI|`dq&b`x9}ODT>Axm(YIWiP}5rUEPbU_1OcHky$VNvYm~U~=zDk?)wzD6@hi ze_+f|dkfy&Lfv!aAbetXH8=Ih{xxxeYfxbT4md@q)Q0zu2x$_9BCOj~c4;D2Ikxbb zMwI?t9Cv8>u(fOR?{Vzcp`v1S&z5ux&+rA$BqrIY?a}F#tqk&qp4wC!{y!Q+mG*Ak z4wZCMp{nr{5o8Y)`&_?s;ihO&{|-c zkd40+)5dk9U%l->n!`9az)Y)+fl#A%uHj?`T*At2QdgwL{AOpge$g_!p5n4l*vN)x z8+&b(81NG1uiYM6c{sqEq0rg6&?b%`Pr}cbtMZApa~0nnoQVQ_3b~Q|VSrs^0BUG> zSjDdvBqIj!H0^%`p3gJP#_jQj+WxjQI z@I8%d`L0IULbYOjS#v`~De`+jBG7*$Dng(HRi+Dno(tT^45roU0Pu$NZcZ>v1FtKt z#_sB7Nmx+w?~x}3o5ES=xOFTrs9Xftl*tw{)e-?!^3S${oDTSu@2`m`8DCz^@jn=A zEJqCryMQVDS0w$a7ExYHwe>htQ{pQaEqqpcTXU9GbD*9D_nQ7NcdEJ|cZ}AC>7Sx^ z`g0ZT)P73G3>^44k?^3yk{el=VRBoE*|^cf0%*t~Zo`Ta3kGB`MZh&1Q-p=SE)~Q5 z+n#$Z;03ozeU>k$u^TJ8JeD5c4lLu`N7JpN*|34gscndph0=asch0a9b~q#(C+~0R zC0_3sQcpbaxZ5-*w4RjY!_GLEhWEK`_Ws1lwJjhO4qXsw>*>oIiZ^>WUVqDV^zr!1 z2}jF|Wv&*YL4JC4t6(G*p zAB;SIO2S{-8p9{r0Lba7BudpDvac)P??AX=Vji@ZeooA%@J}4i&Cl5xao-vM==e+1qyvnKvM5`Zwtsz;3z zVkY*pT9hB%c?{Zq?|WsvzpFKn5>^0C{(*4`n^ze>xh|oCTj`ld1dKxq09JzAI^adX z2y-OOfY}ni6f{KyKv7~)JUx#K8GAgq!V$%eN*^$Nm;EQZ$=+1jv-3zsY*p~Jv< zW!hl*@y%Nn04Hg(*Q-xg&x<$fh?VA(^gE*nEyVhm4G=2~joL_#I*>bmJ;@b?&+5_* z@!RNhkpyVB)tI#zAfH3wH*4@Sc(WYwvLL3P89zR=r4^$IH=b$t(k8KA8E{>;kKb}Q zOz>c#pwR93msP{Z6#n0J`$eW(_hl2lbQ~}Y5%LV!#@5EwwqSYD&y%Xysu!VI(8*MO z&l6d$;PNc{jLIb1*O?^(+lHE8EIyxdRBZKW%_;Y(u*F#4MgKoIbk-xH-B&`Os4hhs zJaQB1XJW3j{$pxViTu6*_xkN{cj_FY3x`{a6OAIV(&QtW9jko<73O@b?yf+@*8NKX z^xpM4xNwU03RfVG<*sH(<&1+6=27PoDx>ZPhbp{Mwi55EGj2o7pSU>JVk=c`FzX#2 z`$nzQ-wWB4s!^C7IlIw7%N|`TiHSCUuQ$5w!bMrUK9|d!^oUkz17SQmDfTk3Nz^}FLs~<;vr`q~dA`{cC?)8t1 z87JsNVOsA7IqE5;Ywy}!*0kZV_|N9tOOLiZvK$5P+y2^A*02jgtWDv<(C1C=2ma>A z71B=YtokYQVVmnCwGjg%@;T8t)Fuqcq8Ktuhmdg+7W zj=@n}-a3BU+?0EUXly(3Lut7jWSryQUO$oW6X%k?CBI;j-~Mo9md*rO9gl^d8O260 z+^_xaCIQ{{+nXsil$J-U{rtQ$ax)o5L4X*&bm+iMW9Oo&m#jyK^S$gezC_K;Y?y5@P3tQ=|8^4LrKSKo& zk=AZ#&7H7f`1JBq+*e-z;n)GvutYh6r3>HvL1Mr+{yS?_PuU+2KDPzn$Df-TL0&Yv zC*kyIK6xboyr?oC^Y2-Qwe+@Lg;030*M!tX|7HQf^9T>+w0G=o9JWFUpp}CArm(Lt z z0b+9~!2(~8SJ4Q)AeMl$>>wiZ(EbcyHgUX_0^*HrkJcoDp;0j+Uoew-IBg{lE}CpH z{Jen!M+azsseqq!f@}z@KY@u1ujMJOXFf#KIJS2KRloE%@D`eUkQq8So-IlaHeMSiHC8U`#CzR^aTGAVH_20Vx-; zLC5ufPM9;gkrTB#5;ayTjG~cfG_=+Z+>-vOKy~ zeW-^DPzo`<4=kffj>7N#$XeGdibxN6zkW9GFRW^N6j)O{e7gOOJHpkUXYjwngGV(D z#?F1vgA%ZHAK~S1OL=&%VtXuK4elx((hAL~>2b2hS=y{=l^G$w=dayG@faR--R4c zIkWTse$QfZpv})^r&k!aHFasO^Rs7esYFTKvwK}DWBM0;LMrDfzhXd(&pg-QrA;3#=_ z?JanGtugZ!zcimiLp-wcclL+f(+a0fWNnXpc#sf*SP4&Q>Eba4JuEMq>FDJNOlA>o?eS zs5;hjnp?VNgS7c?!pLo{=-^Q?>=s*NZ+yl@*v&YAZvhD#D$eTpoF^_bugu0PO1t>^ zqvo2JP9@luhnNzMpFK&E^2E^*+7bG0~<1{2cS}yXDs^ z{$WF2!tPUg#{mA8`{6EvYz#ytQg7sLH=6_u{gFL8zzSY_dxqQy@oJI;ImlxHroFK- zr0O!_DrwqDR#qM1;7TTYJWi4SLz(c)D19lC%K6MOSsxdX4A!)rFIOWPr!^^qmDNsg z9ee~lWqWJip4y3O^TB40Zb z{i}TLF3ypIt__Flm9mx2OzN}t49BK8Xa5Q0xyD}IPwYBizVG75nq3`E|V! zMsg0BKH9JWm9&ftL7oCaix^ZmAuO_0W`fReDKBHUv>|+FiOL`iw9xKbA+tzT9&CVV zk!I2Cs|mV3EJ4(!81BTCG4V=DYg3=YS37abLX1(!m>sqm0M`M z#|1>zqKWuhE!qb{{-lJk_3JLA{RmE7qmY6ky%>T@SbsPhO0bHobw!TFaawZC1PW$r zcqQDj8~=8G-@dy5m5CO*n@1_2F5q?oZwOfz9ls} z?%K(c#Gj}Z%O_7wlKh9!1DJt4TW->xX^sYLxaE;(1!R0}Z~AR5mIUvA$OA%f9Cw)N zLDu2-J?oV5xfa7mjAl({Wh=?7@SwBX#g$UX{O&3?BLBGz|7{(EHH)t7GxYcAZ?-O? zOcL&mu>nafsqNILogP%W+zWw|WLM*P-@|TmFa+KXZ&+uKnGjU9n`Ou$fL z?PDvYnWkF&W-!DRDt5&uV&C(cN%E%V=X2!VU*y)`x+$_}d%FkPzmC`U$*q1>@umLO zl$rb65(ceON7XbGp-Tkbl{KHrFTxl*yZ|Tp)|uuJ+=xT zLc9<)|DJmpTC^ctiBJ>1q&||A<|RM`GslVJ&IsPsYXt2jteUkDi{%t>$*51=8c$*b z&gQapP16>h#W@?(-CAs!<17bQ9FPHLF52~DnM^=oLifrGv9Oq~K@Vf%J6S0=7J6Dy z)B)U(Gc-G{lN^YY)T#jnySRco%LV*C(1dg-03b@nOQT*HHDKLjF}O2_6&WC{c|Xua z;J!hx4m4pWX9Dbq;ntS|R3+5B6(=`F)F*eZrN)lEghjc)tdv&F0S?D~0@`M>nu`DR zrdG6`W*ysKyE?YwBcvIzv08u0qK=Oa4&>m!I+tEJMk!q(O^ZD`$t8`ESdSXCQE)1K zFC{&{Vz)-|_s3$Q%^WyV+u10ndULPYz=9K@&ueI0@G)RlRttF^MLtANl+*Cq1HzIf z4t*%=;?OF7+}^H)`~6>O-ASy#$epT$Od%1&rl&6{e`FZ@nnkoyG{0B`9YG_X(}Ffd zSw*ZyNbs-ax^Hy-r*j`EzS4d)Blqoc zGA6n%@FXImY^vm-wNkz%8dI5pVHFz)CZ-8dmP33??3{qyKGxG%kIsy0Pcq;Ojz`fS zDSYr?_eoU(!$B$Tcsr5{!!=|TZPin)C8tt{pM-{oK!uj@c1=`bs~7o9;QT*v@PCm& zBFMc+-UsNR-*i0nYRd5D4CgJu{HP%7Z^bnunqa|_z4**XGGX83FAd=C63U-b z>9d7e; z?@XTISg>bUP#ihpd0Pn}r-TaF(=v1(&8VLCwWjk;W?nO%BtH@geK&zDJBh6zKOQeE zAW^Iqk+|ABSs7e!Hq?^rW4h`a=ySRH{7R0m3zX3JJ;+Hjuj9Nd`9kSusRm9a_TREu zeF?naL9#c4ujhJc%JMr<9&1tcSRa;#y0dkw*rT>dT$inGkI?}Jx?|qs*DE+66>lGa zLb6-n`%25&Md){73Gm}MV2Kphlx9w`8*?~af+>9uk2%~nQ!tk zyQ-Nb<0;C2LA40T6LXG1QlbV{T?r3uDR%I9iAs{f^R3t?I}>>+o5@4_Hd9ciC(b5g zEc>nwh%}?hH@P${15rayf)D$Pu7ndo`o}Dll@@htW`BA*6ng%1`Jf@PlIK39d9HHt zzMll4T4~%&goQNqug;+Ank(wlSjzlrG}TE0a-7uj?cWvY_uR?GR9-!TA><}O>l-h= zA>t?p9`g+Y)G%T(I^{36*O#nk?2@PoCeMJ-iT@=_d|#eQ=h`U?V+BH+RKMtQCGKLQ zKdBlx$eiB6A8mb!zv%?oE}=0NbW;f8%KMZP`3E*UhquHum*8QUy-&&XVzO%l3^zl3 zU^+{5aM{n{G{ct52&Y9pv;-v?5k=p-l^OrMj^qan>v%oW=X=bnz^4Nh*h4O~5mm03 zc$u1aD(6p3zMD{5Cp**Ld^Onr;W(tijfTs*nv_a@vJ0-4o=QjSS2S*&spRG5(u8y)vy*G^I-B|&; z!Q7!t2^swsPUQS{V}$Vk#)!lV-;8Tr=Z}`uVi)~Rv)qC1;@B)H@)dtX%M3_*LOSPz z%6)G43CxFzckl*kak&P9ky8Ra!WSBz`Q4`&=@k3}X2Uwzz<*ekCTJw@d6~ab=@U zMH(G8Rt-$yWY)jIY9Dnx%wvERi38l&0WDz;%7LcQ^blFPkM^u(&VX%z4u0;V^hwzj z6;qCdXkr6`JUKWwnp2Ic-5JtjZWy?;uQCJ^tuL;<2;ON)ovl|a>oVv%HOH%vNz*fy zH*&DzEhEKj`-oT9`KFfsM*zg77_i14`P;aXfd60qh%=*?&(Z@`5IDC?ISKcx{ z5wF-BC0G4px#&v+Iq(ij5=g8&)^BDW(3wAE_1UUxkwoAhCT|=vKo5vzdlGFT7%prB zgu4?^D**eBrT|O)!paAs?*EL#T$5T{;P0zlr}TGSgQ{Gy&=h2YP&?LUCZBk%(|`kQBR2q z{>Lu`YaXY-NNL@rM7F1NxNd8gmw1oC@|AHRyg5&oFx?l3Gk@t}I$SR$aH})TJS5^_(w80L{ZTGWp zw5iSok*E^j3_QasIvHoG!_u(mUv}O$+%@J)53^RCD%u}=#qi18GV8kxrkwo0tOUX9Q={uRk?sF!fBShv&9yA0W zLw)c!Z%dY&EMd`w(FL!KEN@fsc^NeMjau&0rpc$48jh&hYM4Ar;3E9m9p8B_h{n9} zz)KbaW4!YC&b8m?eSJz^FXuH99b#WExo3L;yy6%py08XA(xcu7dNB!Qm}gqWx8I^i zw`fONEo}`Yg_(Lu^Nn87UlGO*Ok$6|0eJ43=lHFgks3Z*XrG=-)}d_h(wB5s3UOj< ze&3alo9XYdG$oVcDsj62uCU>-BAIdfH^c7BZB|`(dav0o)oVSf1DdG1;b+hG(l}WR zzEz}y+@+~EAs`1%BfYYJ&@~9!y$ui6dR;~D30V+ut)iZY455j>zwu?Jv|(j2D+|66 zv|_YLV@Bx)m`aEgq~89ovXw!Z&7^?&PnI(uc=Y*ZRD;s8#!gO53HA~B1So=b#deDA zMh(tBwbOfD^bK!|OjFGJF6pzTUAHFyW!8$OWzwiW=~$diwWL$F_J#WFfDI~Ft*Vt(b&sOQ$i0tn(Zh>0G!LKLxn6aZD`iD4nV@Q&uNdP9% zOZQ2;@-l;WUOOw{L74~C-^AM5qSG^V7Td~hZOj4CDJmCR~{%XNUURi z_PEBEn&Qw>nHzALuARev5Bg%8Ci^n^D7*3@=jB1WYf2#9SNUVFvsgwajpR$htuufA zmm$=hZFaY+hZXXg7-ZJV#jH$JrwcCfi)a2L@N@D>u9sKt2pWlxivMQRI8*6;y3%#1 z_^xLy)0z`DYj-r}jz95FzjvI3>70_XqYi93DwLo*9E|Stp6WHT?wkI^?o6icEWCtX zrWjRKcT*kZTC2>WmnLMDmmo3*M3St}G_^6FD9=cC@j0_NMKfvaFc%$GCY z%p6=rb6JS)TaKUd-U4ml)hCvWUAvvkh!Sd6M9$*70cM9nPS6&RY}tTa-?|RdMkP9H zM;Iba@zT&%SJnhF@6-iZeoAeEA*F!GoNo`B$PypOOy~GnI#IlHn#B6?kQC8ElrlcP zW4_uZW+3yEO$&V$OT&LvmxDU~XuJ3*#WPQYB#QSRm}iDuHly(yu<=eX>>wr>O+g+{ zk@ebW3xu5SVBwY-n9A_9S+_4%%t)g9!r+^m_n5&O#8Ta)fHv@1wlN6GbBvme{e|O} z8m$iPpljyS3;}k13d#}*?XG89Vcy6+d8~>oJMa!9zC9S1WeWfQvjE;MJM}4j<0bu> za*C{*Q6h^W@Zp%`kn*%+Wf%!g7%#2D-A*6vd*`qv8FyX$$0)d*Lw;QiTJ}MTmEhtZ zG**HaN?1v*NQ2Bs(!o6v)_jGGO8lq9tsl~kQ8r45$NyKavK^*YJpU3ObJQOgnz zG`zAu@B1$T!4158-_hQ@7!8P|ELWE#Y@xxW7a7@*LQVZF=)s94gb6k&h%(2&TSXR)xY7Ba=x0v`*L zZjDc490w+X1A%NN_-V{8_A_aL@s~Y#??aUqT5UhM@5;ke0`WBrt_;4W^_iAb6UGQU z*+p!Eu=TTpGc-Mmt6b$b#r`LqbVGvN?iBSI@;Mla4F|k11pW*O?(yJY%T1JVf3hRK zBKi3=1#mZ>`B*06Y(98V&UY{8)4AgQQKs#^H59p_TYsiuU^?elMGUS3s`On0Ea+%K zb_RA`W$AA)K3Lmpvs}{0_OJ5y}H;{=fVR+jBVrdS}`g<_|ZB!PpPzqA>6gI6XLUhG?`Si=zcRoSFZ?4<|6wXLq^>Y8aC2G!_=yySH&}~9uCk@ z2oYp|_J3G>03%_S>(KRi%6^kESATiuT~0JXqIxkkhlb}{$%#GAkCTKY)-x4R35+^w zW2prWrbp$H5$d48%9#If*B-?iN=?lp>oY1H6Iwn9T_zMzfs6quU!2R6SE!HXS$}0n zdnYO|Vd3jUPyZ;DnD-*;qwCgBA+9Lp=XWT2O%syK0C-a|8-_af5B==2(0s>=>_v4e zun~{C*(iQkJFt8;1)tcw=X4CRp_6Uis+ntcXf2YCe+L^(A|>KvbjuoQ`ej7nOzbUf zl=JxWcjP4q9j-%S@{9!Zl#Dgtv0oB>oxW%dotOV{q`1q69{YEB7Ag{xsng#-8I?91 zyKc&5m=|=jl0rTrF8m@LA1Hjj9H3__lk$bV1N%@#^*D?DciBotc{)i&g;d7Ow02+h zZ8U`Zp#Jz~HWzy8Fu6ine#iTn6y{>k^a`>sP^QJla?5`ASm?5tRta^4A20b?p8i$< zQdR9g@iKY4jcph$(y#~od+$t$aqbE(=&Y?h<(*hCK=e}L+m$A|pOezPF4tEmbHWn+ zh5u}x2>WDl5p-PIN0YHf>Q7fraQ-nB-3%VZm#`@kmJE7?PU|XzXVZL*{bgT>rXfq5D67=s!x6orC%LLAc@orE62r zcM`z1ettl&yBVP7iZBI7*C}Y<3vOPTzfa&%IZM%cL@mBb+@be@!XeDEC%SJ}F_$dv z8XsSh2-fi8|x1U(=wiHOcC#cL)pwuEW zg@K`ZhdK$7BzxCq6n<4&DwGN}0hk^F3|t=0DRw+=mZt#QAOy-t@eILn*hPk(xJ>nPIE zK%sn$1wDKhykO~vFEaD5$wv;7@$5WV#nlN+gKlbYr#5h5VS_3Ri4u5rFeQh-fpe<( z#Vv)ES3?L<%e9`|C9P$ljY(@uXO8>Z|C!;?wK}fZDswkyEI0)#o&#zoK#kcU#Opa-<4xT%@0qYzxIJpj6>GJDVtGmN04@jkw?ePX{!iweq)m9z zpwuiy7?=S%&MjO^n8zhxtE4bAKY^=hMBOvVQ875p!LmXwo-j^8oixHc`v_V*cwpI%pIsHA`B zgA$=m#;B#!3_=@8uI;qa6w};Vh#e3?@E7V6NoUm)Q!{gg2T(UOaEl>;`?X`&l)FP^ za{aMxERtzql5{N@TolNbxGo$LT=L5+bPXHf?gx9-f(yM9SbpPQ$HYoEvX4x8>q)z3 zqP-YD+|$3%43r-(JpfAU3m6nEH1Y!-M7c)#0_oS*m2rn8WHg3V%<;Nhigm*jH4o?Tl@L}=<_E-9iJg!y^p#}jVn!SEv1 z6V^a-0li@1{G1H=&I%&L=O*l49jY`9f_@vODKfT_th9Eb4kYcX5Pq@Jc+Y+sU+#d_dcbQ6eeN;!p3riF*d$}-@s+~wZ#C})df2)k;F`2fm+YIfI z47dwEabCh?g+FCQoR|O_tl_ecpR+6TSaBrXX75=g9dA^cb4sK)wy#MtYNY{r=4;@) zGMY-Rlc6X2*)fS~#kzwlB)ziRW1(sUtc4FYu_}775~jlUZwlWJ3Ba{(BZ4u5CSGMB zB)4fQNoEoR{dy`QzS4RIvVlzIPOCWrwnps0rY{lK71qZh>op&-#) zw^*{#T`j_7&+Lf9q=tCIRIHH;zgb(d)?X@5+pL`UNzj(O>)y=6g+p=lvA@ijo9c?p zcmBaEi1S2CIx&ABQ=-f|DdhSP9j)KYx75F@ogggG&&6MyA}7S55f5uL!3vS&22!_& zPNc5BVOX#nvJas=Yf9d4A{KyWTEZ5gRPidrNC+xR+Vl`spuUe)WRR zi<*(8=NLt(puN9d`p;}w#gc~|qHVpAq17x)W|HfitPyTg@pSs*v^OV?&-B04MjUeK zOa0vdmc}NNuCQniFeq1+d?yOLSejI*Vtq5!a%5b8fDLI^WS>U{57{&Bf<-sY`H7kY1c6na5d2rLlbmoukT^ zIN3T71L~_ej+Zn;&*)DY``fu?iK$BCN*hytS478|QpId#XQb#s{Ft_l@t%&{l`w&3 z2hTr!{KmWdn%Va~_ON%fz@>Gz>Jgy1N$IsfJ^F^6-qK<_2GvBJMx|(f8o>6aLOf(p z^~syXTTfS|dS;8%@Hcq)1TA8^a{=Xcb3)qGoBcjtW4)5hD+XTYaszY(uECRu%mROX z=Buum!ULt8UwvV1q9IR75m(xob@=*$+Ss+O?B*uj<9+wnr0_)_c&Ady)_nL1zJ(ZKc(xxB%s$t zU^nY3fc4Yzpy_hP+h2FzWs`V2=!WA_6K^PH%<~t&O!|u+^G9B2XdTPQq(=qW#g;pf zxL?Jky*xhB<-UTy2LygcTgMBZI;&3hc?-0>VXF+~cvQs=_OGwKy3XDAxNqdNk*ael zIb(dhtlJK7( znac>^#TGLw?WsI2d^6q3X~^u|V3w^v^M1%+x^%p`m=#OA{ZnNk09CQ7FCZ#L7H*RM zp|0gUl2iM`U5)-s@|8?}I{DB?dTwp$Fm7P)N1f&SD0q?WyWJ9=JxnDM%C51&E=eAx zozoYko_f~iolzXID^m~9jXuN)MA$JvcC0iu=?5K_AF~iEwvwtI^UNZoJ*hR<+rdk` zqPU9cT-_|3@Z7;4puA%z(vYAObq~%dgpVk6vD}t!tXf`@ubO(}0JKrRzh2w?5a^Pl zHc}|3`DNL|UYR@2V+l-s5PA!{v6ihCUq_8|A!2C!5hU4wJ0wH1l@ie_P z=lr-0{T3_`+8G)@ns0>@vaHgbMfmNM3G#;KZ0+{@)>ki$#qknf@U6%xwvCn1#mp(X zwEB&ddW_;;rSsJjEHmlxLzCmRxH$^DdNhsFO2&;wl` zT+Qa_l7X)1t{jI}Ft!K0Ae-YmrhlkEm*T=t zzDCk6e_K$IL-Rno#J}7lJ~IG)s+bUF8qIMgeV%*2KdgCy3ja zRDR&Sr1UjIIWL6sr_M}5!45kIpd)FdtAL4FCNPj!CKh*L`&i(op48wZAd5Dg0q>Bo z`n~YMFqr9Ct}f#ftBctLH*@O7@99hoQb{*?a-le6IKx=~{tq?evOR|F))h_i%gZ8V z4*8Z^r)GKCaaO0shXU6|k0Quz^jZZYu!f0yeTj9+%&D$|w|at9bARp0v^Q>(+$#W{ zmu3R*{|}_WcY-DOh-r{`tSL-ImPxT2`-P=Ymf?eQ( zZjGAiBx^u34+FRhfywMf8Y+t_Iu==)h|ImFX)V>jx6Tf>yVnnJZmODc9JB@=fR9fHg&SLV}Ln2faB;DnP4wu%Z~EU+_Pa&b^j z-{t}GQO|6thmwCEze_4xy4Csql^Lnf8zelU`2(a`GYc|l8#&GsIPy6Dm?q`yG+5}> zuv#eT)-49m7|w-Ud!eiegMx z3=!w6z+oHpj%Kq+@72!dZ^&i0d_@QF#q<4bIQYW3o}`wz$$gf<;n?eV`0r5Og~?0$ znuZ2=jNDzd_vx$dJ#2Sp)&(L+nWLsX)VeeX`fzTPiwd9$pB=SS-*aj8yN=o8P49DL zRO(e=Y)qK_O2;hlEwv()%%p02Ye%!R&?GV9%`sVRG^`3-asz zAE%bw$^c~azpgSvZyolKLXu+oO#r-G5w5qRQpO5Jei68)-N6%4j&j?uvS;7JZEgwa zPOw}35de=|0Z9vEuUx_fF~j5he?8(Zi_8n6n2KZAnuKoH?&;zR^mWsKut^$PhdRsh zYOk>E{FV)5Zg(GVxy7U!G$U#KmUP3m6e1SXYo;&kHe zC4G4PL}v8Z@>qrWMp|p)&1_}9?D@?*LbjxMsReS>ew2kdJB>)vNirx3YZ?ywm;Bop zEb#TY=GU5cVXMx}TB%AonBTn&$`p=yw=6J2tnc|l%;Wh)BCe2{iuyv!zQ2?iWYUMi zgA6B0U|e|;a-_N<=pZvF`m8}Lye1BFj(Y1F)Xe`z6ahP z39QHnVk2>T;rH`Ni8()#Y%Hiq{{C{-ButhuWh!(Vo{X`V?V#JuSgv$+Pr@FGd{n*J z@&1DqHe#8lkaf2xU*wx2%YEK_Yo}!haR-pAlP*Op=E=4E&T_{rF~=~Ex`${+@1eQ~5b1FiGV0H#b} z!%D3CTkojQ40PLgoMG#y-F@)5%MQ&qB*ky+_!mVI1j>Fo37GFI^&Gmvv5r|vXgQG{ zMP8!QHqRUmAqaVRg8;|u7c{9)bLuvD@4r5^x}|U6yNvQu%ErP;hY2DS;jDB=)2R2b z7Y_IkTt^e@RM9PRSTmxfQ4XtIwiurd?AR$r&9W%_hI-2GGp$((EyFv$L@dX7pK%Iz za2p5M`S3^-vR*Ny#J6d$9%O*61^#G$H``TAC0mJ+zv5?Nupj|kJgy|PI&0cReVc!y zT)o%W%Qf!>Q+@bs1r#|;s$I4JeDJ(XY|H%A@7=wh@$2xvD^&cm#V+NWAsZMbv zjnTeuZPF(p$@@%?e_22CUeg5&NJ?jHP|sCcRsLDG;@&PXJq|IVd2%U=j&Q>D86ZiN zWRsW&$N!brqy1B>oR^f=NmF2(m?YnuLUAb5tfLl_N%5SgwA+p29_vXyVUW9bd0~SP zyxI`(>I>k&w6gYOkcE6tET`$=WQfp5Obig1wEQ=Da?A99CS)MJA<4w&*~5s$DG=xGU0yVUo@OWs5by0CM_{;dVb~FnEQ-g*I+KeYIkHIZZ5EIX`m6V!b zhk`fT^KkG^;fso)E(`5cSqn`~MnsMJTNRQ*7LxG@s(oUGdVPL85V9^uzeWSS zkxFIWCj3yPB!`d)J!`jba#QwII1I8%p=6T-T=lY!uCVMXZ}BX!^6-y!* zeq2fat(awA0UjOE%5d5{PxL`#J^P+Me^y}uBcE9ZKW;(!J~72x3Fm#FH~w28^;(49{j4(_4*Mq z?E1Bz7`UCpM<8y31SnzIiZJ4&#Xqqh`qx2l2}eM*6ttV5*5)u%pU5B8q9tI2#-6+= zaeM*X8?29Gr>+3%=4f{YO318*wQqm?2VbGM8K4%k;k$J$tk!L<)U!^F zz;NZ4wvW{&BF~%~sluwQ#9{Q#Ib`QzhJ;=JZk?fn1NSNzW*UnGtTb+H$_X(XLa&hA zUhoh`EYxBpz=M85cgDd9#Fy_M5H4%?a%6`-aNw@~y8mwiur5W5$Lv9y^J^V}BMa0> zyV8A1V5|!d-W0x1qhD;hu#+l%un;kV1rneITN~QFGMqQ^Ik}OXpao;PW0)~s%5#d{ zDyOI`k`zRe^JDzK6n9bg9&erejo++B)|KKy@5cuUwwOg$dr1kWALr5`Qb`=JDEb?) zwRDecF@Pnwn7w2q&zsFdZjDD2`z(@}1w#b0P`7_w@?54O0vcK;gU_}JG1??=MdXvd zx>sW;G8a)#F^#*q*D2SyVwW4ccELQ*yMsP<&E4ZetT`wTLa)A3i!Ik3p~1I%rRK%i zMH6q+R(^bYj5w7~>`r6?`l54?CVIbhdY>5ps~#?V$yCOsh8~`;V!3|nslDNY{ddua4>fTilN4a~5_qe| zlUz9F^6JNcf6zlN1Vfm@P#;i++MFwC{o#v>c#HgC#U}QP>C?oaun{m1V>(@UbbEL- zK)v*l?=!|4{nSjRxY_fGjL;*vhMSc-P;d&aeFBQ{i1UUY%fFzLK3v)>7Fcn2X2yRs zdEWDcO)rmXI+)Bp;}0VT=Ft2kFrpg|!5tdc|l$5I{T|^L^#biSv9R|ykz-fhc&BDEvq^8M?EqwUPbPu9%EihQ2iH-hZx4_IW?cb0V#d9 zX0vIx`$7)fmisE4y{nu1ih>7Zgx#U(y6)jf3PEKny+n()UyFGHO(~gh<06SN=#Pov z=pOfDk0sIN&aq!~#Zs~2tl=_3lCt`_??)?&<~<~I*$CTs7gMjcITs;`^(En0#y9n- zlYKx09uK9M$Imnv*YIoc!01OvXjzQX@E<=w5+Y+@ht=s>*gZbvrmmuF7}dv!7? z$TQs&o;H6uGGysr$^OhJ@Rz#TMyXD1SA=TaKIiWWjQqJbvOi+c>#3T}WCC@*)l)Hp zc9d@=h_U+`t~<`da=0BkdO@$=OEVlUBAIVDO<7EAqmOh&-$@XxD`6i!A^-8g00Zcm zi-u~NfEWmKt+3ImGUsajI7t_lh@)Z+#rs)VS~-Wg!Xe?rdNGa+tti?B}ii zNK;5J4_->0i*GGMd~BePKKYSH7+0clY8D^0Ez{^Al4L=+m~OUcf1V95dU=Rwf@6uI zIP zNB)t(CGJKoyL8qc@E1cF;la`29I6XhzX>Ab!#CHU!o$Kx=PL=%c9Rw^*3 zV%bHyngc@#-Pqo6a}vRR7tjhuBq_G9VPqSh$5I$&$0&C6EM@be1~w2VW~Py?#r4)9XNf=^pgp$fJ)XZZ$r{F0k5j=6 zv`*las${$9mlgi@uj$S$K?1v{@xeQ6yXDIf-YHR?lf)eyy%m8KS<3eEi_g(@zH3L! z8jfWv4@2_=*Qol1>Ko&)_j#c(@44~n0%x<8J2Cp6NfKSNmpEWYTYvc9R|5gwDRh(G+B;gaCz~Nsyu$DVC*Zv zElEmM_O-X(fIlvy|MMV7IyRe;3fQ9CNfebFa!Ezz&;DCW{A07Il0+rs zzYfO$tRW>>FwF0k-iYo#Dx*6%2xW<%n-RIPA(T?h^Mc1Bo=);C?qev85h%Wrk#J!{ zE-L4LVnP@yYqeZMr;>Ceb9+;wo*-HobZ{Z}8u-|-POLPtDONR!wMjKBVywG`PSOe! zty_Nq&AQE&!aLs#_Dfq1pTfUEZz=4O2*6Hf1IPCXsaqzA<|iWKF0o1ykGTZOmxwNL z`H>HD_!tB5Mx@6|B<%`0P9uft!~0?!sq?qRO|o-htHr<-~UALL^2}k04u_7S)l*J5S7(^L58-v|2-?KpG zelv@bV6i%J1*mW|qw(<&3`#w1KNB8i+lj9;`oZ&8Qq}}EZgdF@+Iu0AWvBXXnp2p_ zsNPlvDww~q&d0oNc?haAC>*mn;2kHpNPe7}M{Moq>OSf({lV7LaJs!Dcrm_pndPe9 zlx$~~>3qvFfO-LGyM96k!LLQ-&)>`9{;gkI=0@F#uO?^I?(CQPVtYKZ-6i4dyHJ@c z8Va`NQ37}N$@9W=w2Q5`YWCpc48#r*u5z->D&j4ns_c-jLT-zoe_QiY5gtyv@NG~; zqhupcBJI6i0>QipVwlN#DkD4=ZbYG|e3*odK;gI@yzP%Uli4~}q=Z}MG+8qd0Y%Ml z!WYrhA2Yz|lnbApkMq_FKTCK~LFu8b4;@V6H^LOSfC8xDJXp&w>f_q1P$HukIBQqT4uLCEnuCy8U#m-|8octZf#V4(E41LZ1TffB#K6 zlHz&j1EC89ddtF!gavSbGhBgX1qiZjVGD+ZG1ylNc-6)hyU_8EsstM?(>LS~owdN_3cQP~cdNg3zUvNxT67{06fW~p8} zG_9v-wNBA|;j%*V(Wu|t#Gs5z=S@F5W=X-Pv}D0<&lSGMFPSU zFz79n08NXkg=jWLjpr~dDd!T0-wS+~byH@Am#O~;{+wYf%OPAv&}Kslry`}1bZZu! zb=IHvDy)K7=P2DmD85N9r&9vky}dF=Lj@>s2>(fFQkLT;|6(1Ro5O2uSB2UM?ctH( z>!_<7Sm#tA3Pq)=ZyQqB%6cfrw(vlI=|RrYAf1y89OXaFDnTgyyF+=v1KvuYtJYym zq_A50mP-FTcEgL}?TjEP;&+OuHPswmGTzQ;v725oJM@}Xb$C3Vcp@i_&wIO1a(2V+ z&%4w)$d>QOVe*{5EJ?-mFa8Z_WudmWT#+-x7->se+ezY8Fn{nYS^UDHrMOSmIw${+ zFy%;(|Eg((m-b9$e{KzRS!$(?XgxQ%SPZHArO(J=YM}D3W%?L`GPs>q>V9&JY&?DK zaJ*KNbGsL2O&U%-xu3EVzSQ?q%RBE6s(_%_${R%#0KQo`wtOMaAXMjfFZ)|0H7P(- zALuUXtNAtUVxF~r%;tm2O`9+{B3_8SSNh~DrY%0~5*a9*_qsmH<_lHPBq9T*84^Xp>SN=XKp6fr`1L5Rv490c>1|Mm0m^6 z9yk2ds>bs#?TK^-S)10Z$~izrST$eZ0`VflvXj@rV!wE9+`OI&X^e6nbKAr z`H42)IAyBt65aweGeJQg9vI*wu#zx$Mzu`v1Yl&4Vo0PO7OMmg%VzRU;8flQQQQ!4^rRU>z7}(qiu&Bn$q>1R|jf(purcM=Wq#R$O6bck89iH_;4c}=Y z5E6V7Quq7W*HKqH)cL3jR;Zpe0eCRfbGp!7DV&fbCZ32hNBP@88#Xv@1I-J~Jtk4f zy&N`?->{&9M={wy%okeztKw;S9aV^{q1M$i$xhYl*kt;2@xAPF!aTI>Z@*oc~4~Umxj6WTL z7;oJzWW;38u4JZF6>P1Z+k+95e4`S$kJUhRNB<*L2&@<)@FP+Y$`KLT3PNIE)B6#d z{6tr(ZWGTE|NRXI7XUjX1MCy#%6$!@6=X+9OWY9W_Z-lNT#mm*&J3_*1lXd(XKb3y zsRAAox;q4r-UI2`CFqDNc{qXmshTma6pl^syPKrxHvhU^^jP^dsRJ^X#qmTb?6ty| z))AraoQ(qCS#6;R-eNrb7Uilh`bE{sh zmWQH@zW5B7zm}V=G@?`=-<6W#Md*r?yYD#tGWgNe{3t>sUw;v$KcRKGONVpHj?&8^ zQ?{AWAI=FRO(z?wfnj53 znxQV&5TQI9L&HnC$K?To%rj?bV||;4wPe4(*7s`kN-p2%V%Ja~ZX=j%N~j5@6W4#I z#RMs=r-dALr07|*5hL2aRN`P;I{#t3$0l9cC+n$yg-U_Ea)|moSnvm*_1#c8qW+Cy zCwD7Y&IaP-{L&e!d4b~u|xWL5F&XL&t8s+orrdAs}@+K{@<&of1+ARK-tp7=lzsH0ZXDvFr~LJWYTr* zhJgkbv!Qq#V{+Fz@yBK*OCHprXyXS%V{2f+r9UiWmu=7aziKbi{MVL&_GxXD2BcI> z@>R#wh#J=TTy8Fv%VwD~ey5UM)BK#Er_djX8rDn*mjL>xNCByrS%D#oupFf>+yM?T_^ignC~wjPgIvi}bGN|LWlNa`68#b=E;qcyYVmU1|a85TrY$JER*V z1*Jo}1?gCn?(S}BB&EAMq`SL2_O8G8y>l;rG7K{d=X~>dK4&W5^z#Km2ozttoiL2r zGa1SU5`;jS%rRWaRX!+ZzJ(!%n&j+|x;M_P1TuApzF*WO55(;ED8l#Q1-JqlMuYZo zgDrlrpZPK!T0@Ma_UU5h{?oZ20fTvVv%%0XRgPaNg-$He@WT-Sp_v`=*jV=|&~X>c z=bQX@6GSEFW?v;UA9FuP#9-YkO=+cux^!c&k6QHe7Y)?EwqD^Q!m^+?6}h-S1kM}w zGs;frjSkFH2K$LHQAQS~11iL|!G%EqQvDI<{qxJ(^GY3$^rpk*VKkj!Ab`qdQ@A%8 z5v`hibFom}@EWDiIWGJlvU98VJG@mG1lcD6bKuzrzV{EDD7;KYnD{0!Jc~BFWMC|A z6cs)3PquJNt8DJ$G`6xUc4%nSQs%I+RQ#sREN*#jxlup@8`<2x?MQ>!V0p;3Nni6P zPwv^qKZOLz9HRZk&8Z;3=*O8;zG?#;@AhS^`HwIY>wXZB48%n@i*e1)?51_xV@0bL)(hqUmXwH$?aT4*-fXP8vT;xPv=j|oXa0jNQk3{Kt_?}g* z34AgBHyqc1zkkh0k&`c5+0AHn-i=BFYQEI7QARcvF>?Ym!U@bCx#kjcOwQosbJbVjm$ zr8M6~ZqvN|h%qtyY?<D-kKw_-B=I~|ZKZ6sWH3QEVau0{3znPDo%E4^`TyPd@O zn=&CqV0Qi37bF#mI5}~d@paXrlSZ@_zzf+=?|JGX)%$B`iOGE$kE&-@`GOu)wEFb0zwl>7;I@y4 zH@qi^>Xqtbc|iSnXH-!?i-}G};6wU0Ewg6R4~Ndsc9@bg#hV|OhQUbdE2O(^9i4+7bZup>IAMgoy zH&=N!Leg9$?_t6uGJB`)d&AU_g(9UN-vBg9H&!#~PvxG3EuxVfI24!%jG?m9&@Tw< zKf4Pfo=r&n)u))s5j;7^Vs!>%9er49JLllta;0^4tEa9??ph{j)_!%iskY>&sUBqv z+b@yTQIDv+F30zGr~S)DN|IjfJYdm#DR37!t$y;nD}3_+x|O$2C{Wh9(l84cAZrd= z>{!Qx)r{D{WL_mW-m`NyCuSU1!J#-*Wayf~$GGahLFOos-~OiA@q|o%-+#I(ns9yj zlFxLIc_2+sB*JHrj17P}I^n`CNd`aWx0*eW=jdE|b0p?|FS|&tkyx;jzvrbBteGie z|NPE@SqWfUSg6BvUKVre%HQ+O*;tbx`30R$jqhvu_FEZd1%hXe^XzcsE|iZ@G(=dN zY30{J$s6?hCR?iRT_37Al?s3$qcOt@Az8?s|E~s3xxP5$4c1?3sSVoKr)aREW_N&tuvo+r7Kz#2}y1c=!$zGm3_u3y<$bJKwJ?vpMqSdhbpCoC>7{H83e_T4R zC*C*}<*1+Cq$jDZ)ZkwMvJ?s!Su*xNn1(2GOk+^>NRBz#!2NFTgsdz0Z%`vDLygk- zizO|T-k_#ksMx9bbq%UMYhk+FFu?)IpqTtMR6fBPbjTlJ`8wF%ip`k z`pVQa!Ac92)$heAmXrJW3?w^GBHboSh=un7VoyyUMvP`3XE4=DZl7Q6##?~7w3{{}sh zrW#08uo2l`l7fYEangb}Z!P(0i3>1EOrSJ&rp{{Q{8_|sDY~$3!Emg6cM}z((1e|} zodTeWo6QF7)9QODMLu%yucbq8)G55v{-_kmk} z{4IRkxv+o#P$F?aA(|f!1zY5OEXQIuyR>ic;xeB-9VsU`J=UU{OZ)oSz7{FBrifxn z2i{u~5kfot5r3B4uz|s?%+}JH2RB*UP+h#1l|+S7*uOxN)^H{FY3;f^7S&;^GV(fj?f-w)KhnE#Q z9q+H>gu0;Fe9>lrv+*q}Hv%K94f})+(+EX9Kc4vym^kjtQBc@cTmED1;=nIT7-vVB z>iel`c75_HwoNabtuG}~h%oy~7O_9ggMK86Q`gPbEHmLoa?EWN)3v(o$lcb(UH*2j zf5S{P6n7dUU=r7jTDWs{nk?m3m@*{NI73 z&RCeviO}dY7n3|(wE2`Ojp4K^U7)9!wxeSNimMZc0JbX^RU=3u$0LIZbjavb&$ zPw)Gq#;GTNlirkEifHTx{BQVxh?;*~|B4KuG%R*zhE+7^DVXk2jUfONLK9rZyg0!^ z0C|si)NMx2*LQ$7jY>e3N@UV+`8p}PK@2+UxMmxHiC+pLj^Dj}4$@sASI@I{7%zkv zl5{=ASZa&5!LUKU?KTt6Jz=Fu2RkNxG#z~bb?BKK9oQdF7=RZodoXvVwb#I z46z^DzwsisSeYi`fRqekF5{fB`8F}8=7bjFXPiY(19Y*hC~V6hZ*71yvDjL=k|iBW zKZ5H27#zhr{2SzdNHo^aOGmn|(Sw+0>tA_kpC#lJtKMc9Eyf^2!Sx(W8JtmJ+kq#& zFCN^TYIS~ZWYmO;=8T&h`)0!<8!ec)zs{B^tU$|0mK9kf)A>~!qw7POI_}AeZ-bJIjp(0%icJY&t0hL{;Yg+j^09g z>G1WhY)I~!bO*&Yrpsn}vB+1k=hc4}8)|URY8I}TGF&U|wh}ik@n1iTuGPZE23c#H zTG{2?uUbU>zr7QH?<7gZ6}wk`pwfj+n{_{>@UIDJmkBP`gTq`#Y$K%Dm#ZD;An0S0 zg+$p1YKHkyPr2mABn-rm-hMaFa$v2&A@~>!^4gook@}$eD4Mr({{|9>lY`1`HQhZ; zPL8c_l^dvO9Ji5d(XeYoH}{*6XuxTtSd{e2!T#SzvoAg!bK_J%Trs<9Nl1iJU-hX+ zkf$F82u&(@Fq&C7s~F%R)x+2cKEf^Xl_`Vo&;Upk z1f4`<(~-#v@(>oI<+ii0NW^S^Mz)=%ZO6`>yNB-Sd z!^p0{`)LQWgQmzvHyH~k>I!RfHb!)}bRC9ijPmD`2E~VozdW{I6Tw4UFaE7<|2B{4 zeip5>XC1sl1`0arz1z&VWE4paUpQc~7P++FVuJsgo$qmqM|NV;&CLd$Ya&%&zT(lm zZmuA8U0xGFg}15O-!4Jwc?>$ayn=z}%#I@du`?+vawlJ=cA`1)syBpm~RRk-f<2Z(wZCk|EK;-OStrY%)$ibsvr2PYBiR}~*^8^+}$+mp(R-wU+KwtE4&A72dDHY+kPZ_Z-qjQ!*Ln#)WC zG1>G!OFmn4*hj$6f{s{>^W%TpuN81Gzr<&g{66CXa7Q)K?km`3_grbu){>9Io>HS0 zQEY8LoIqObmOGoeLJezkd*eHNhwVmiIXd|W<}G_`k$b9DO`sRA1IxC4`()L#XiGl@ zHIN4GjmQ}?KJ~@RM*7aWd3ARBe>#vlanXF4TA6sUHP7swdjJftI))`h8Q$(3`O1){ zzhBfCFd5vw`zivxy}QC&$o#F*{AlyPy@i1UIm#=hy$L0E?hGbXi1}#)0UwFS(Y&?m z6$z~UoTq`uJXD%bG7Pc_`sK>9fyFAuW;fu-UZKr85IBiurxBAdKrHTR%w<4LDLX;k zY&MH@I{AHF^36y?y+t?5bn`VmS7*rvH-!Oj*E@d(L_Q?RFOu6SlQVNVj-T>h1Y`_{ zv4Y`KonwkE$UemWlDEBxodD@b*XADS=rgfqQaP~x3wx}|Leo4{9bl|*6B!A9zV#n4 z4HYysiHWnqEHTA}H}aJewEe$@rx#J-N}g3)BXdh!wU7SCZ=5eORT2Xd=RXH=SR1~v z`5B0)HU{zg|8?d*YcFymH&?s&W7DVpzbpXw^+xRBhRg2TeQ^go=&T<`jpx3;@yEudb2XcV`e^C|5e01Ttlo7PQg5k|WP}U;y2^Mgni8%zIdk3H_Cms2*H8DGomS?8#l_6k;ezMv)W$%{kWBYwd{?m4vNx zu#lBq@qrgsW;~&A6`cGrKPZBLBBuHkk?cw89ss94ge~-%LGJ%H;=EYFAmRevJzwK7 zyIS~j<%R%UAjh3n3vP`I6-|dmArNL7)2(wm;iwC8$yWTDBH*uj>xsrdNb`p@>@V@) z!hW?7lxiT?zX!Wx54wh7)M=GW61HK7EGoUdgx!~x3I}LQK30b3vz19dbWL#CPF)9Z z=-TGJTnak~CU(h=TU#e%V64{~$=J@Tb#5oZUj8oWcdPr}-sH~m@8Iae-)C+hBR?L; z#gchn^}kbmN9O-Yu8N*$C~D-hGcr;Q{hNZkYgnVn1-obS2fJ5YKLIU-S9V{f4vw>p zG8>ILzJRJ&PtjN_l(3u$E|b@Ip8NkB_4`R}L``~2n>%dZ=KTdW;pGK(xRsj%kOT@x z4Pnt4}Ic~|KIH@JRq88?}#f=TjUZI_9pKyZMY}hP}P#`0XG}n26My&2YZqXd4 z$!D_k-17!zvp#BhHvi0QL&*uc-bMOUC?^7dv72AIE@OKc!_-0Ca_a4{Lw#akNo@x2rnLEs0ue#1zO z(0>9abdXA-Y^l`LB;q^-JMUR((v>;(oS|2P)dK4aY{}Xj`43jniqQI8SjWAqD}rh6 z?`uL(hlXS4qL=@%?#0#-=H>u&72f=wm3p7U@ZfT?S$=k){Q=J zWkgOh#4mIUz~*L#B-hW2bYdOBoIhH5=B4M&XULS^%R?ept!f(z|9q1F&xZs1upJKU z80nMR{vx+ntC%pZXMa96y3v3GJON>i(hTtiMnUfAQ_@7#w-^TLW2$1xDrq&GX(b_7 z-&3HHdCDF{pH{AA68r&>943-LX;|VCaMB#J^d_aJkyak$itK$9JYV%$OQ2!Sn{3J-CxFU>n>z5LI zD!ZN;b{m-wPrxKRH#$DdOrj*HhT`@a71n{3A!Hdm&x6TF@=|QW_^AUJ&Vtp*lBp`B zzfCEO#>BH9QOcE^q)CR7br4Pq>1{2Ew+5S@7 zNxS{ATj>~JQ;^75*R9pIyH2Q!%4771V9@nNN0k?B;SCqjW4RT5X5W-vgdf5z6PVhL`%o%4(btLM zhb1On@+w^_@?SgrjuLNK8^7L*M!FgmD(8E6q?uoAc}a>X#&+J^W_>-s@rF%Ei$*!@cNUI~we_#tg`H4g1-lEZW(7R%M1?`H_CiG^ z@}+`o$7p}0yoAY__y-#94M@Ur3v68#1!y6_`Vdy!nKYLhF~CM3p>$)yutbD1Py~Ak zog_5I+ae+^^+rsedN%rk4I@dFZ=>7=4P8H=PTLv60m5CgfL|o;ih1OiC2$ zRqk{g%>^x=bc21^?{D@4v=oLnae+yOnDiit$aqSzmr*_dpSXD0M8+jM4Ldq{h6wqT z5(QR7xz)zZkF-huA3HdcZd@uw7&ZuD0{j(Y)lKtB@JgWZeUhjYJ~fRq9^&fT;&}h4 zT>{>sm_(pMJL=Z;d(R95%}4igmC}r*@k2QP6VJLMoZY{Dw<8sE^V%7%t-Z343{Q>7 zg>3MFBM&K{j{WwCHa6EKX`?RL5k)nkRp=b@mDzM)NJ?VpMjR$e#DPU^ma1_Y&Jep} zKp`O6pmA*a?s*a6je4^6@qwTdsI2iVLVKT=p?-M`4~57+vP!0vm3 zo5-xcZ&^RBM-uIC0Rko~Y>2QEtu;hk4LD~ji=YZcEV5lKrUCcG%({hoTkPuen*`n! zc;)4&?*7><#hrHhVcs(9Vs93jBD|slzfN|Odu|VWjFJ-7@)lKg&dzy@e%S44J_HG9 z(LLW4Bs<+~hY{WWn`112HvYqJT7H9_-uThK8g~1?FZq~$x6wxRq|z{t^nAH?ae zA>?6LU#TO#|3P$o@co(vcAFy|^LSGxT~~$0XxMpNI&R*oL~q>b#x z*1rMU&|Qo`Qs;zd{n&;-{W!uk2f7oew%H+!5aLo$ZRy@owFbq06^D*@gS{xr}?(O3HLWE^Irc zDcxM|%f8zoL1oCr=8p3+8^jSl(!C+PgGBaBe5g3u(ylZNzv1oK*3wf%N170gqHsSx z!fUhiuhoEenhar`F5hc-(c5qBSa$DDT&X&Wecymh!!M(_4n?MxebtzO%-l4hBY&hz z3iG+U!JK2#b(QmpgIFKewR89RUVg5UCmV<05=VSX2vVW|>>7?+v0UIKhpwd2_Z_^= zPnVE|_g;-kcAfnuDbT?I*>x2~Z!=5vw|WWRr5Mr36wio`;M2mm@Y!9l(0SUgtqbcw zSj(1;nTi}PG0A_-H#k|MJsIR<(rOiih@kUTFOx@CNo|u*_S_9x)Frx>q6ax0zbCEA zgUuCM#36Tx9&qEx&_QfJhC&Gskpm_Usj~QErP-*~Q290JZd+5keQN*&QAJ@;WuYc; z^5*wKGOdZ_vP|rsz}QylLDcu{>qe&Vx_o+sO%6uwoI=(#<>tBY(CnNX^TQI{X>`_# z{jSPx)MMe{RJL)}et6W%C73myC#uZNw3j&kT4VJO`?wx%AMpdfa~lN=Du1z$1_yiY z!W5_GLN|l|hoq3E>;%`e;g;_ECmoR%gFdgrR%%m&$~>vo*sBvaqpvDkZACD<;^sh` z`*RXp$C*zmd=wOHZdI#O4ssCAO!?F?zIO4#pG{V4WN;PwS&y0#KWa9+ozIS@ozIq* z>~05MC7fK5)SVEE@&?8x8)^8#7q1-kj_2g3{gsCXz1d*l2@E%28`7HT3rK6WLX}Z<(NcQ53o8QvdqecTteGJhh}iD1uaW zrI>2>MGP^2Ze#bipl5nJi?tlhm%gZ=@@1ntL;i6q8VoxRnQTJLj&-zTmM&NDd!At| z+q|+ccekSXGX{famlI-np&gGaqTTeM7fDFHfsOC}3tjG0*)zvXv-Pzsy165{hYv%j z>SV2y)^07M1;_4DG_0LWb5{v1vbB~ znOxeET!z$YV87mMK-F3oSXIHH4zivJvYrJq-F=wZKMQNlHr#dm2|7hiw_?(41$<$E zpk~f}qNG_Bs+&{`KV~-q5D@*J~DO>BM1o)`h#f zD-y$1M}ihGd%=5)yd3ye9kjQbSfm?9`R{Rj=5Z|LJ!-g}t#;o#3dYCYvI?l-mCGrZ@~A(1PKW8-bRxd8p|k0zZ=@{ZMQE z5~`1+^Nj;sTDi&%tyr557LaO|S{%Al&ilM9XQA@E-qGY?lbs?ROQK?9D%3 z!@GaQJl3)DEkm#<&SNG=N2 zmzzhzNH~@p$#$`IIFcpnLu}r%<;kz%Dc(=Z33+#jjID|AG}`&B>$#q94U^Z--c=dFT#F!GBDKqmuvnJnDg;DGyKla#9kFeENUmAv(#@Dzpb_tg%YR=xD%8qA-Anl4Q0 z@I$inOgb4Y0ao+>uhK`7>UcDIF-+BZ8OGtIdhN-EZOnt%aBRQU9Zrzd8v0z7OOl`O z^bLJ^$eq9>>>mg=PDP@UA9Fw(X^X`A-XTDkVM4Y$QxV1MnYZirCtmB@O1M^(Hv!O- z&J;&>=))Q78T(n~$z``uK_Or9ZTIA1WBV|oj17(p8x8Fgt>hxRUEHDX4rUsl6XH_y z@Um!FyC6+a7oi4mVm}+gxI)zuhbaGGcvlY!UqoPf<1h2DnxeB&=DZaU<`xtjc7|8v zJ{R2Lt1h)$a7n6CIP>B1#e=BSlpZZqVNc#2}bYx-NTc5K->>@bJ!E)2@^Z1>7mwN{ew1# zegAi>ZX?)-Uu5kfqE#Z>3VI(|1y6pa`Z<&98gD}n?Tq`L*^AN>X>ON^IU-Y23$?V2 zJF@-pnV0T1e3h9j+;bZC{E{&6a#W-t)72RP24t+`smrYi7!Y?@2xQ1Yg$jrUp#dWm z`Q3$%$v}NJC5thgd8nAm<|75)2Nr2Dme^9+60WhgH~6j;jfC>tE79B45ci)c^M?Nr z9Jg)@&qVAkU3kw#L=4V+*b7&kXg$`O{ovP=9Bj1|8Kd>kXqNSpJQ>xv+PnP4*_ho_ zFmnx8Uo;9}_@h1e4TGY7zEqk=#tm#E|cKFVhXFJHtUS;+~v&HK~PJBv$ALG8QS7tgG}&R7Q?=Ccy~G#x|9w3jd>55 z6q@@^1r_+o%qUpCYu+>4B>dG0%~ldW zGU!q~=_e)Yt`x&^*OZCh9DV6YuaWd=_lwXm9q8|CiH8@0>`m(DHOwNK-aDTX*XL7S z7m?~E#qu?M!t508>tDNZ`N?nU7}F}>RKZWcVl;?({IL-{L70~yGSxE<&1&VfjAiD> z=FB=ft(MWmi?+z_1m)9rXW~cBSmsya9&|&@{|5MCi9lM$17rqNWGVPDxeDKAt9<#> zbGY&3m9@yJJU&uf26i0>yGEH~?V8#1s>8lMsjG8-x)(X*LCJ8;IOV0fnFoGB)ccuj zh)7zWJUd4U1gZbI;23y2q-%^uYTao{oAF6)z*Tzi`zCr1yVIU`y3>mrX|XA`sfwxS zw2hNT^&aQ6-0k1!l+LDDicXUh1;+-Y@7$v!g>SkX8+!lv4{M`ZG?k?M`S89+@4fbv zP0dlpT85weU&P0ot9;Rr}H&`4yzAnw%QE`NHwhMagPH>qJ z$q-Qoo)Q%0igW^3EGw}z0eXx3`A4J^JBk}CBI+xDsNq3-wu!&<>TX_sn2wHr)b52D zTTIEd&suG!u?fY4LJ3`%ZMeb)Y=mwL%NICGbcWzJzv5mv0@UC%&do3l_6T=S#H#ftVK zHNde4s=eb2buIcT`iRHp`&cTTsR?rz)RRU zN*00IT%116I3_m9&rjHm`pOtHd&_Upf2|96C#@E@%FuG5z5u2EsE9vC91Ds#IWu96 zZ?!O++KUxyiDd5`jNeX`6*QI^i{h9CK-mS3{k!#xh*91;Vw~yRZ$5jX?zwQ6o@Zm` zL?_Lm8f=~z1dMUNOZ#vUc>6}o4^d6=KF8%zmEgj#BT>T*?y>7uFtATZ0%$1s=haCW zm}8SvG=ndK(yr{*O_^>PXb}{rzZl9?-dQ*62p;tCUa}?WgicdI%xVQ<(7DhX@t7JlfK8|=ZA^W~E7u)}-p{5ILGr%gtwl~kdr=x!2{t)F$Ln0(31 zKDS;#^BodE>yhs@7yzf>;=}r&Ac+58zlLt|vRyq}uP9*C9L3J(PB|qXboY+&{!dM5 zm;)e>kKLR?TQ=C|!#B_2C>*f2kr}+l%k)MtyYmmX+u;YgV*UbL#eF8@k7pq!s8ir;+0BYdQNpSg;1NE zJL`|dj>;kHZD$fA3`$5;1F`B~)=AVZovSy$G$fQe;b zRh*`ipuB&4s=;y1+3=(OxZbpmFMW@&so?w7Eq4=m4rgm)?0w-%P=`->o=iE$1l~`r zDQind>{~D3v2?n%dWRWi>h;j!ouwE+L++mX5>w-ERA-#`zG{SD-q>QRnckX6TnE!x zWtTR^L?J@6QhP`XHGAJYUlMx`@TWOmnnM!fc>y>4TJ z=f=?&(80V zTX3}O(w$~vbrBAsfGMaa3^`uyT|v?Kll8SSsUj^fwzS|J9B{)9*5IxpmgWgL1M#J%H4Lc!Vr*JhzKZQ0m44I3Ihr#4i zaS5?3XKoJjIXs2FQP37!M#yy5b^o}WyaX~i7H#9HK_+FIZG-A%qfN)9oNFwZ-|v>h zn;aCPf>IwIbkKT~Tl~*j`bnyD*`0hYKe4WT@36aF^Acw~@Wju9+?EZ|AtPUB1!?R= zMBI#vGA?BO9USHT1$m%Ddtb%NZs}SrhQN#FBnG~u*(<$!zVk53r|wTl?PRiRt)5sO zPMh+5V%ia2n(%zlM5R?vm=1sGgA9ou($XgEuv$)_{_ zKKKqyJgl0Euak>>FN%;8MpxHBPa~OoY8SR6m-}1&w!-mu-|@E*8-br-{iu|0A7|hY z-Xe4k37fU#VW{w_w#qTSA}yLP(~T~e4z)XuGuOgBCMS!O)orle!vJu>Zs8|%V1 zG(%*;4ez>ZZFFSw{DGit6F1{hkel4L&4*>tqddX|^HL$q$kUxZ4Wq_Jr#Vde^cU^N zwJg~aycO9*TOzF7aV{dHT+b)iSCQ7#2nej<}rnkp`bY*RBZnZ87t^9^`kwv`u}!VYvA?Sj!RbZE?>``+YZv^Hp^4ENfS?hE6{` zbi8(kmEbf3fJkx(P6!_AQKlavl4J+?D^o1Vq-Sn61PozOi_CL>f{uJA3 z%g4NrHi*KwkMZ>Xj1eljc2La~!DaR=$Gx$(W-W90*OvpR*Z32EjLDSRR6v-w_p8e8 zWokfQr>sga58jm1Edc3jF~X0H>s8YFap^0Ldc-xHH7q8m7_67$T41<+XDRXnFdbNWGDV~*|4iwqffq;YC;nCA@? z-#(A*ISuENOGfzUB#q^MMN3tDn!(y90^g3BS^Ka&3f>z(5xRwcQ1EP8``CC&}x(M!IS#7Ptq?3uww zj}e68WW?&A*$34S%>Wco8pscJ>7o+%!=G9`8lIRu?vt*s_NEuhP-8>_;rRT#MGDgH z)b85*6-ssBKgUMiNLTl@WZ*0G9J1HGMGQ(DNV#$;sfbxn$ zj6y2#8HFjz%cRNsm%)-0?Z?zy%wJWNc_PyeJVT3Pq-b+~1Pns{mw_6O8Ue7>?9kAv zF;8E-=!;i7StsS0kqhYPqb0C4|MiJDLILRfhxD~RlH-gKH?1*^_~N7a13F=xG#{V^ zsGNpdSWbu`rYlz2@ZEcgrI-4(1w)?mQ^ld~4c^#u?%FR0so@gJ)7e>zk6J)6KHXLb zV%pu8HD2mjMu^SjGzCNmQ=_mH(TK#k@e^0`Nj}&?2C)APm8q;%IKHO#)9)#e!F}P0 zNi-snt@=HGOQ@<$uCpr}aaB3_HGx#wopT5O`%(0q&ZqS>E?sTX#1KB$n}UbdgOyiJ zqa+IQ6D%Zj74m~O8UcoI@K;_)q@ruwEEF|l6KGYqk1-_gT?jvaT7d;~SfxU$G_}au z6pm#ee$Z}a@f0d!vVz=C4=SV!$6E1hsc5>v2gh%HJ4a*$q$f-xCoNq!!iiqi9^P7p zIJWQL&Hf_sgTNJr=Yz61-8o_gyHCsRr+bsVuo3a#bqx;RT=46sw|z6C==Px- zH=p<@(j(YKGv0&}W}aGURc*4pQ6kh3_-T4F;2fKD;C}{|mN4sX`#sue#@09X#x#w- zk$>wE8!dW_7>`*T*z=%r3Bf2u>@;iU%PZ=reCv=0f*RSNCE~)3?Gk)_Y!v}(r2q0E z#Rg5f0@_CEIib1o8Dj2a6w9{)Qhe6cd~2`eRd$VCEW(P{3N4{=3eUwB8v6wbea&@< zgyE_HMIvz6bS~OXJ!r(~qTz-_S(^ed5T_(;}@&w}+E5`p<2)lz+Un*k3cbbtI&5t0)y@^{gGQ z{*ZcXy%Mf+QiayhI8!-k)0p2qX?&UC6HvuGupKUM;q*JDz}1XRL+DTQq8JYw%<_y7 z#>upARK?3xS`Ou3baNV$yPN|j+y5ei@o^4|-6g^Z8SwO<6lv0L&G;(hzhgmfJI%j?B_I9r0UGKv8rH{ zsr6u8vK!T&Z+Nj13tzr*_4ua=L{*EX8Y5sP22FiSw3aKHL~>veOhLdX_9C^eWyTIH z{4}ozmW9&*1S4&>o!I+25ax0*T$IQGF;ym73Q_RjdEozx18D4-^%pDLCv1IW@mvgT)C^AS+mXa)h%Ix2F#a=P4b!QGOwx< zz$ZEH>M~E?osyyEo(bI%zutHfW<(s?w!CEBI zgs+k#!E!KS!TM567fz3thSeQI*iw2EXn@AK2tZ@yUI{T2#y?Uc)e+gq7ESW^aRBm3 zw!S?xKn8=I9KMB&QKO{9*o>Y|3RP2|i%@Z)fs-R^A-zNROt{ z&Tx9h-h0sd-z(pr!ZY#v{pz8cI;GF*KOifD%-tQp3sKXTr;h+rIgpG}7Pdo4lO5N; zx=fmPe40V`T}R5KKdNi?%{;ej#ZCOLIEo~(@rwh|UCSAb*TK%|l-d!6AsY{`kcDzf z{b|qgb&Toqe#-#wv#1m!wY3^6%)2)m<-VQG?IWh(Yqqd4`jCA0ztH`KL7~bASh#KN ze#r-~-`B5?s2{%}zmnTAp$(-0;v`}#es=xH;swtIqgAG;tUKoK#|cOOHCift>Yv@(g%_w zML4@xc(jiW;dRf>^DYe%?WUms0_(-K!|&ZF)=>*ScoitmB&Ym*^GvsTpbp^$kYRN= zBj7iJXwB05rfwtlKt$zV>70)wtK2P3$O7Sxa+AwHHJfoEXNc$)7h=1hdoacvWa*VL z--GS_6n>P`fQiC~imCTOq6_s`wDl(;jAYH)aUnSCF)z&bgny|Q#}kQK>WMY8itjtr zGi-uFl5r9Zo;O8aN86EJ(MGE2x=UZH44yZxFB^L8PbyGnPI2(ehV?xK&6`XUibI=U zFry)~{S0G01!5YY#Q+9-_Rx9)eohJQyB^`9rX>R?ofcg2EhV=LBz_~l^G#o@$<+dC z%dh_GJ}v@Pv&YuA(p?9E~MSW7aPt z{7+Yaj>Z>jJn%Vjr&r^L#Wv%+dHle*&7?$Px$3Qp2zgTDk>{l`8Yu1@a#NVwJeZJi4F@vqEI5>(<6u37&b#7mwYVl^jM?C6 z1^|vH%TQ&N-_y3fab?S^j-K)y2cJHy9fo4QyT5>^Q9Re+~+~8T>zhJ*u`_y)Eu^;3X0GBI1}L>xuI@|Y0ugw|&iqR_Bj zR=V5m`dFb?{v{nTd25W;$lj2jB3cM=BYPZf3&p`eY{H*!%4S=3+S>mAPQV=L%f9_OXk3E zJwti$AAdY*eh()Y^&1YbdUl~qF;)^i5@~fqhb4Z5Xa!Jvk3Cv<=5Dia;_KVBVBk_; zV%d(bwSW|423@U$9VnD3=BKS94lSoa0(w_nU(BMhEz#&;jiJAVgti~M~eGY%=B z`nyi2GF7h%I?k&trwH&oeZh40Mq6zux8_ztea%i|dtEOO#ROKZgFp## z0+}HTt7s$FzPsit^BM3U3OF{~reg~c2%jzGAD}-J?ag>hn1!SlE z@xN1$O2L6s(u*erb7bbdW)tY%g&obA6Pr8$oCNx~ZnB`~{hJ1_08Sv4fKwRb=g47W z72=wRb<(>2Fj2Oww*L2(=~yfHcIt()#BT28-{`)$=u+rONp>C9tajft{3*}q&2ZAy zyl0_U?A>+LM)_M1K?4WT<({ zy~Q;olGWh_*J^*KFX*#jojboEt)+4}$fQ9}rP{n;MdZISFrV{l*R4`OaBFtW%N1at z`mofGEi3uxu=$W_pa(sP)jP`lhPFU`$*69yd4B5a#rvi=O$Ta;m!11*7-IdH{o_po z>c>p1!Xs84pFGaJvDYI78ir4PM;k!F3S!*$N0Ha>`JO?8*x6xs(3hXxn*=#}P3ilr zW>9(zZEeHJwV(KVNlBKZzO)l7a=#{$vsDt%4ch)d(5a5Mia!Z>MX23oz0RG5CHX$q z^LaAp1~xhvx#N%NusY6LElM(A>fqNfL3AWf8|r4{t>%*(!I3&B8UcEK5Q&hQSAJ@y ze2XZLd>tz{iDlP_1aA%h(^Newq>#_3QoJ z6T+$x#T=8bRlmLp1Hh$Btf5W-dy+i=c_5w62^z08&pV{>$%vPNT*C5edN}v#7@~>o z5b2a0fA+zzpBBqV;FhtShiW7W+zL3M%`fXz;qTx){|^AiKsdkNLdqoo&MtK@^Y7~Q zzuiu2{4qK0V1laYX;uJOjM#VD%*XK7a{E_SOh!{i(HYAzCaF^x%BYtNMziGt0A?Mv zc%zT-YGE`YA1eVkZU7ev!u`SE)}ulS~hrM+N#C%*thnai^6PIz<($I5^@=L-6X^p~PQg7#6#@?GXNR!@XB#X>Wr@bQhMe6hWw$4v(=2fD*@h zR0vEa<8bskpWr^@>Vg9Y+kW+KI5i6V z(gxez=&^LF3hRb|1j5~tT?T-ykewC^0QJIId#&5RnOMJnSV8_4uvZFzTYyrYs{Q7x z-?R@p{Z`SJEewo00?}M+!ie(24^~(100H2qIb4QgXvgWfZDvp0Z&zZeQ!e-4PCfxD z;&}(CZpAG&y91-ll1!%|%=uwD-O8_z1%SuS_-lUZEZYsorg1kvArza&)8)@Q2a6Ye zI6v>kF*1vkCF2Ow*v7oS3sc#_3P67VJyOj~UEA;WQeMxV|2sXF1h@kLag{J*N=;@p z6acP8g%M>;Ln_5r0NM=_OhRj&8nZR5L;7p$IlX1@=v7YuL(~Su4 zwOzso1&=gq676r>6RAqzi}%dKVt#(|$1{n}z>GmxgoX6Qn9Oz4a)4mexCRMIg2Ru$ zwz}gHAz;^~hlJux#j&U{p@THh=;#l__7!)=oYQ}X#A962oie#s=@(H?o0Qlp58C89 zU|qwh_;YK(Oy3r_=?pS+udl7|=Eb@?bDxBL1bcv^V6T4_m)~e@BOR9Z`i%FvC8vGz zoj$m}qNO+4-vq85lb`^v(zV@#&O(2UU9GZ+L4&2k`dQ(gE=#g{`}=JDw-*->$>pFo`cJ?A zPjH#337#_aR^wK87jm_4if`buUw0W|dedH`J)Y{`b#|mi39s8tkZYmK?=B!7tY$#d zK~b8S22E1(9>-u=LHog);*)-;9Oz^?;V(nDlMLf{I?POwWn+D*PN*_tn$~H4(o%tB z;+a4E!!*h`I-~Czf4xc95$tvF*W#Rx{gQ*gWF4TDz&dd>iouTzu)%LNCd_2r! zM)5c2{ibp8bD-Lzg~k3xv4ZwGv#(%pxfNJF4uu=ny+6mtD0I*)`zkb8<*HlyIpePn zfKawVhl3U);JZXidbtkzmhnhMYwQH$yLoY)?`Z*+EDun``aP5cHTQwMWlx=17L_Lb_c zk3j%ZtcG7Te;3ujk=B|j0^J#)m=62McRB@>3<~}y8!83BElscRfBDsA^R(4I;qoaW z$pPLa8~E9Jj*6Odn3{y1@0N$FQy+x@bnk8OfEWos$~zq>q-!>1*0M7liKq8eVf!Hv zgB{gSz?b1Pm4WWF-rH@ouHcxiNo7jYY_RK4>2dFq>UELr60t()57j-|`CvAi%qp?2 zl=Y_jc#&?ntsgT#I{%XaRS}@Me+cI=hAI#ITRrd=nN0>O%TcHD**}0g5FE0lRXD41 zoh!=&>L>uljA_m?x*Zj#n8l+X0b7-4_#X=9BNz}YYAoR!wxM1_;<#3Rr0 zf?={A60g`)Bb1Ga>0XVH__Xg4PYtm&$9_i)Px@R`5Hwh{y8ZMKkSh8c#V|EUUxmed zi`DYj?f%^NevIZqllFM7Ow*H}=64PfGenb^y@v)Ku~UEYrcT7+hX} z2f&ME0xldF(#}^dq2P6%mzH$kw!6A3uv@B7{?Y-$|t;Kn&FMDTafN5=VNl!eZ=T$ocEMNp^ScMND@V75| z2l#-(uDXD7|# z3!HIjF2#Q+0%YLy#$AQ&gg^{#j&=msk;*Io<$PScaeFBoO?fU%|6!PZ|NS7gor>9i z9+7#{`IM<$5(IR>*5kiSqyoKyzccM${>@j#Uw!%4#bJ!e)C!5HUMBb-zH#qmIx!P_jc|Jop1s5w*We8% z?c!f)B@+)V@gwbW?YKLY=w;qv>WJ?iv!lc>j@glb{Rqcc?}$U<+X2odL&@}j9gk{K zM(9GROtTMO0*g!N#sBj!gGfDA;8zLz;&K$q0X_Y5YjjwbI{4hD41z?ndSPY$U?1h{ z$x-od{?D%?Js>Y#R_OovzxwM)JvPMb9<*EE0E={ZF&{Vnz>oL&?Fb-`V^$qSLvN3< zM-4RL7~1cS0@q)CSq$760OYwmJ1?$q7WiNP@BdQVkX{16YjC11uwJB{0Xit3oX|J{ zFaGAIt)M~DogCza9uOV!XRGuRK|mUSH~QkUJ&cIK15sgs0kKNKpW$S7AAWWpClQWm zT8CU?s^@Mu_ED{es;~wKu=xUS`4#a5G2`E$cpcNtPyoz+->A=ichI@{&&A*NFN^<% zALS<;Dh0qTjo%*h>8d@XP-pvVc$cjGl3sj-&_Um>9gxQY08d;M*}esOKXR%Z>FA6B zJrB)097;G{H>JRj-;Okc3DcCAyB~RPb$M$b;1DPC?V5gP`;!77DFI~CU4!a%2Smri zJcz!da9ig%DK6Txkr3-hgKqn$5bnCt{Uf%PV#cS-Y?J1uJ%sRfw;L7!nt(SL`65^iG&*4FD6`V_U&l%*?SC6=rv+1p7Qr@fpP$?96K*k*(T#sydQ!E9M6$>vkZ8B$t_h@IC@=yS9F6a$Rn@pOD z0)x%Do)%5-tA0N*1fsoCi#cYvtfNie>9z3APTM;7>QbXt1NUul@9O2TOG%%!W=d;) zlfupIzhd1yOoPfyzh{}EFZvMqhYV59#r_5ya34}!Kx;1mO7{34{zIsX!bw;FSV$fY zh3ls*u<#(3NrD$0QB(hY1o6Y;qvG)NBxd1bSr&BI)LD5$&`jDoFPn;!_x)LDlXxFU zqe3?PMwH*s4~$A;mxU#NEx^Y1g%(fwzEVcK2AF99FhcmpP9SD{TX<-7NrLQQ$E9ATTwd! z_Q<)~N2&l&;Gdne2cA>Td)lE=0Nm34!|7p-!G&FfrpMFT>^YtDm)wR!iOh&r-Srl* z-TPE9VG>KCOm{GBV5IwRyN{|2ljohWG+%}7hkyei{7=lt>1FyF(|^rToq2K@z!hfS zM$bJ&kX`dn#!-{cIuc-30%!?vfXV&w$uTCU-MFV+diz%y@X44q@g3$NOQJCw1?}m< zC^&oQaT_k;#&vCYzaBH)(8oN&Sq51R9eJ@ES>fE+#D)I^`eczof3Q_eyc9s8KO6zd?31r(&7GjRaW{!3 znUi)kPlG<>vW}is9kPD}6-(g&(wpOfIKA|bcr2c3Ht+jYrNavU6Wd+|0E&T$h!;x+5HGks zA~rbg(D+?=8*dfPRoGn!NCV^zbOf3x0w_UN3!qqT25lHXo1_Q!w+6>mST_WuB23{H zGp)8=%56BK`@O2oYR9wIUSl*UXIRp;C;3|`0G?#CKW?S%nKI&8xVNh&@2H%Ih4OeF zEvgSxVQV1}=KBnQGEB~f85Qu~jfpw(o(B!$7BM;fOEdp%7)ztGPtE`-5r(iYQpWA7 z3?44zZ!MM~q*Tgfx%Ee2c{NFGG2>L!*{>UC07LRJ@>apV=KjO_2v88nL50FY2U|EC zr8x^p`D}6p63wx7Zs_$Kmn~b=${$};R?(^ghJZzrwloGE@;uSoj8)iv2t-@*lW;$I z+;)tK813#Dl&(i@!IQkKNR+v6SbRp+Rdxud5vQ*p`s;YL(!OZexZ*WA_UqAvCI351}8*C-HNQ zzsKff((`?$RoGq#IFQL;$^e<>SuvR*x8h2Sa!Sny1Ny+DUoe}@?lhZt{g;U&hJ!y% zN|Btsz8D{~H7tk0RED^TB|EaV|hlMXs{&Zl#lN=X!D(2NomP&*j_06+jqL_t)`Dr^@7 zWHgOe1I^O)F3nn5xF4C%KcA0^zg&!qSD2paY)WR?SrOOL%CJ6Eh?#$wx0BDG701N= zlf&3s+L@fso_|psV*c;G({B9~xU1>EQp7_dWce*;$6WCmRuc3F_ox2s`A>=wi+S}| zFdX2K8pnX+7k?>Uo}U*luKLA{X+Qgw&hL&`5jUQ)uQnafK4rf<;Jm5F4-uRJRr#aS zzt^<=CCfgv2+HWj~;^d3d;+VKcUMqk@7*4m|Q}}nisX+#GabSO_a{A5KbztFx zo;yTgF&I3HS+ntQRNP!%6)zC}FUJ?f)fGzt*sAJqDAQ5L13F6u=vyOZ*qjYD_NZ2@ z8u-K=EVromU|Z{T{7s(-9@zfj{1G9i7Az*up-};lG=LAl<4ch>Z{O90Z4*}DK0dCtDf-h@h zCgf$p#lO3TxR;NGRSl`fE@KgoU*lk}55ixe@v`4xixbMvHdDU1eVc#cZEk*Q<;-Fo z^1A+4?0ygmSIVaU6}Q=R+fDr!u$LrLKd0e#BcM+tCE{I>B=v_In>N}x6l06{s% zJ1w(RLM4AL`zQj$jKgkPs#S41580xh_FIeisj{a9K~f1Qe>-G5Nco^Ceen_+#i?ZZ zTJ}3>QWYu)d};_(3V=@y;p+YGK)@ke*

PdrZ+BVhNC(a_Ny9Cn-=uc=BN0@`DdN z&gaio=^7y5Kn5Qi4*s0&)zP080B(EZ9@K6{w;_;Uk1>gkOam`xwRmX~yp%3SWN1VrvDEltMTkvy7!cGC&{+1&}wD^^3`K5xGdoVNEh;UdcK)A+)VlZLeSB33|fMbSCx0C)fZJ-l}Y{8Y3 z0&SRX6-y!XgI@||n&>YTGJcjRh6I7f7bv#|2>X2&`)bjqLPGpb7B1;#X^D!`b<&hP zgP&8ssepe5EpWzBivX>zQomCmEz4x(E!HD+0xhweU02Xo@!IM@f0Od;a6t^n zLm}X+RKlusap0l&@>okpCxk}0I_$=R&(X|XAn2=h|j2D*4>-?`2qFH%^bR=S# zm%eCz-72Gw0KHd5foTNsd91O<-kakx5Lm=laoBz)PRHg`&^pj7 zjx&BQ^NnSniT+lWM`7QX{cl)K6&99j9v1WSlb<>xjZI2`@b0f47U-}_fd&G#+oC>} z(EonTGY$5Jrw+2k*Zk1ynxBzg)w;2C~Juy20bjgXdunM~k0Y?|D7-pQ+Lc0_%l9r2t6qto`Pz-?R@p{Z`S}P^&n`r0fiuYDwYatMWRylHo|05V21y32CEbN;)_{o?q zjCYS$XK@nz3nmkRUvpIRGNc>b_unhGGk&*~TQJLGhQJN%Wkd}2Lz(~BF}G~9bRmA% z_G7s+k88UdWqh*($SnM4Cy?vcmz;03=X& zbE$_GFgRn6RNRc^A0mnEAB1U@8m7)9j%!s=hnbi#0~`uCI-KX75jKdQS^1RCtn%IT zti?}#DVM`28(6+RKA84 zgXK4R?0K&*6x?wzkAutz&FJ=jmxht zo2RYX^LDX!I_*slU{H@S^g5ugK1WPBV4a{yQ-viVAi;AmbR?9}Y4Y#d%sousbcW-k zrRy`@Czr8vV+Z>stN)}PlZjENG-&=94zYALg{k)$Cb62KJNu+lKm|W;f7}3G%E0MO z-(k%Dul+bw!~_+4R*j?1z<_Iz%`$1djw-v_C5M%cnnj0}zz0qyGn z4h?Y{YvW3I_;%cOYQ9s*JX6S#bny za2#eSEWGf(zOdMm$4M|PMcl5_C2`S(H=%>>Kr=uak>ix>YR?`-1y!i4(>jWp-^=|i95b7`KoX;V_$I#_2i)FureK@t> z?g0Kjd=&s3|3TDrw&sA=c7SBylNmq48|pD0nmZ1lbj=tL9Z=(`hc9mR#nm9-I3?4m zuxUP&-MI#k7W`Nc4^LO55FU%(i>-i(St;gDO*F8 zS1EG!>?eT0Vw=YJQ;t2)_M2gmblp-RiPwJUl56#ABL9j(yqCs_>vcGN<-ie~Ru5!5 zLkSmK!!y$t<>8!O;IVX%A9X>91u;zE`OU!uX;66djLu=P}M zR_WSB_}@EZ-9OItI?!XI=e@p(Av=+6_j*_^Kxd%6gZ{9%xw;7&4A&F%)HWZ36z7bC zH_b$Y^$P(7VrLS(!qX;GD3FQ*NwKlDkt5G0w!-F@$(FWF+YCf1fb4TxiR_~{Iz;$C zMIoub5OE?DF>6fcj6gKO~SXW_DJ>s@>2x+SW2MdL6e6wtrFpOp1$80we{1 z_h~s23V`_fYCnn#Diq!3@Nwo2#0irOm)mlw0Ep!P-0KYECbsixP^g}^Q8OMZ zL&|__b@tg_Ba}`m0L07JSL~88SPu?J(iE=6c@+xwsj*5ga8Sp9N_p-NFYaN0!}VFQ z&qGnp6_HSSsx?FGxWcTz<+vK8tyopMUaN4UzV^3VPSeK@(l3>kU7VWk;rOq+*JW0{ z9Zpz1@AXX#v2={`btrFhocsXXT;FhfofH7HakMKy*4+OLC9Lf(zoR2$al?ra$4YV4 zddBJyE|es>qD!0D@b@hf?d)DOTDv;)Z?NC*wcP=tQJkQpwqNQfQf1_fKC6S|P3W8V zt?I_!ufa`$JU6R-3A3t4(J}BnqZuw~nm6^v9Y>3`(572t%^}xi?!8iNK~9A~*PN;Z$V&j6adKIJ z%Bzj!z}_o2rG#04fq-d#zudxEo%Xp)Fh*(SQGU~gP#7N}1Wik(B=tU9$TzMYQd=hl zgr5HuieiQXoXC5TXH!SZW3d83R{XRyM_S_&fD>l@v(=IN!mE4?pCizk`ErN#RCmQv zSC6k70@17@1HrD`Lh^D42$ulBZ^u|9KDs1LoLC1M*xkf?m$djo-|0DV|C%)hnWQKL zdd;P8USG$@eU0>2x;sDtU_ZZc`_*y@fPHXr*A?$yM}PA*tAw-!06`kl`5qqsHUHO{ z-dknnI$CCx68^xm(qYC5P5v7Idx{p*-3nS*Foe}YG&Ufn_<0F{H4!D9_|W{nKXQ9W z@B)0mI3Vx<7O4t51_9gLB>*{?OH1+ChL|0(S`Z|a1R{%8I^G(}p!`6Ut_A_+xLWOi zmvo@F&u9IWL`m-k`>s1N>uwguH*P69yhknQUkP!Zl9Thiaa9j=R zAMa@i+&q$z0Olz36UqRnTrgg7Z9NRwSG4; zhJ2ik?*CgNg5|QkjS5{6DM&abgK3sA{uIG_22)K&rK2$vkq{x8JJoO1I(~|RSMU5J z5U{BOCuo0{yfE_~sl;bV=Sg zCLRJ)DnwWf;_p3;5&(z-?OE5priB)%+3sxe^nATNH3dL;`~T@dr#G4o4jPkYeKMIG z7xmc@?7#_lpk^r3=l3wDI$Q<<5;;x&HFry<|2X>LI6QJg=mfU+MCu3TnPt9KJ@Z%) zaF~LR2{T7$)XeAE-sI_+bKK*7hMcCO2mBS+{J3~#;xRul6U<aYG2uV5r=oaizfX3g9~8YvOeZve*-cvazfv7O>BCf(#tB6XFbM%#;V3$`fLH<;pR~N?%ggdPPQ+M2 z+2?s1*$7R_VG)nSGHs9@AEk*=O9-^|s_uGlkU?_p`6}!l1Z-E)!*NMV0LLW>zzDw~ z9hLxS4J*FrFfamn{&Tj>RY?VbQpz-)WtslS{ zv(|scx{#O~S{|a+lXD0-5ITT4plR}-JpU&j06P9t+vz|b113*7mR7^v9DG}sad2{m zBb-cepkt#D_Ba;|t^?S@2$q@MhpK2EM-!)2e$|sidwpJ}qV`=uIvBc#D%_IavCmlE znGx7phr~3O^(yOU9cQ$IFgNOOPeEabX|U$Ot{L`S3PhSDFDgmyQ?zK_vXK>dG(pZv z2A6^uv-N?(Nk)hJeB*pnbMkwf9ipvkr={1^>OW`bwkDr7)9H1UlpDf1fsw2 zBhwA~fuE`0sG#n!G^C5qi0CVTW#}3v{@1ZoNjy(|0n=Xko8@TnZFj#J;e61UdCh(2 z^pQLi=*Js}2NwH~eJc8!%MB>MGt4RgRK`X<7H|IXw?FtDZxygAVN-nYLfrF%wo+JP z*?#=#7%&~DG31?b1boU(U|TybYMJGj87;05NV`kN@_nAE!fryqu}WVwJrt0v#ADpm z{9hh9?NXzXTt%fJS%_3vk)Toit^)!LKTjv<@&9?ddy0F%FVV_=O~+E;uRQ*LA`}Q_ z3*x4?@e|fu9^imWM_e-Hhve0IO8iJ7cz>DwL?x7xhX5I{7dcJnMIr$l9d$Z#= zJ^oJ^!IB4nPgRP$y%+Wj1}$Nl+b}g@G#hoz{)r7dP8+!{6Bp^-Ui+YTMd|G?!6aL5 znFlQZ^jqMTLPW~nAlj+CU*RC&&%qETLq@~x8D%MSOvI}~QD+~OG2xX9C4kNDlR}r86S{SjA<7 ze%oY=jutVN0dVYjuWxLiaCI7M2K|X;SkfTdHZLV}Y8SvqTTK(a^}zAaaZwy|%#<%u zAv=a%)o0QzlnFe#*_GeeR@wN1iPMfpu{#Y;0O_7Zzs(5E`D-jfR04=K;*_`}=7bX8 znt7rM6$Dm`t{{Jhz38Gd?fnl>)+6Aj!$0`l8teb3J&tw!w<1`p$Ay3l zjON6rc=`L;VXJrsk(1a-*W8-xfGIf{b7w7c*yw>i7qUt}fItX)3`%Y__2Oa_LS6`a zjB0wmc2>$6Fd5+-7W4VsCrv>!@r%A50;At*z+pwy380K8IBBZj-+*xollo0W%Cb!Q zEof5q+=Zh2bTG@F>PCfuxnLh7-30jg>2q54;`^&4Am|}ni=0E8a-Q^9D93e~llz9G z2nvZQ1P&dJy=MEzKc9_?7gtFEAfux)z@-P`&8ornbjD2K{}>@onxjegDW2Yq&xq0) z&nZj@D;ewi*KXC4R?j~s1ne?lAp!zq+#X_Tt&;K!#wXX{c3KHXRbV2Hh1tGB^r{TE z)-YN`eRqi$c>0xNU>AHHK@_#P8Q=&`&OdFJD}wqjW`8_n;lR|O)q?%)+w&3M!s9

?wO?Hr> zk?W2nq|rRI!;Jewh@-h24;>dBAHQSF%)+2MBSF23XZz4~?psz78wIO=HWmVoMOpwk z(V%sjJ-CmPUTK44;~u-!xJB=*mZnYY+u}?WDhR9#0#8Z-Fq|INdT!QQ$JBq8g#YC6 zUn)LBIS||Z{n(hhF4#Yr_ox7OaC64PD1l3+%83cD0KOxrP%<@)Q!t!mB>%qPznQfK z3nc^Z#LX<}yQNwvvlpWL@7|G*WH5{d`3R$JV=OIDQ(JL9-$!<(;Cs^hlMtdHqR_8E zRLU$Xy+0n#Am9Mf63Ppi=7?OA{zNSAQvtw#6`mjjqI-aUS-0g_BtCkqazINGqqA^% z=GkY~5yW?`07wG&!wDS6@cPjgrRh{A^?CdrgwVq z5pN3gu*_u{LMae+;DjgV7d|YcI3ay>%rrP|N{em7t!S_)b@j6>1f(sAKTgeU+XsKl z`HVUrL7!zmTRm4nARzFh6ady(Pmj}@3IH7c)nfL)^7y}o&M+rZQnd2GwaLzQ#Clw2 zJDj0&)+3lL6amS+jgRMt*HDh?6%P!7%vF*)10tj8tdp~9E(dTxlj+UtX!Gq4T*@c? zx!@&jAD|$^X_UdFiDK<(DB}=>T1Y)Z`lZ+SUn-<^|LN&5oUQnvnQLrIrn4)e?@vsw zN`U3Qv`8|t(65qF`qGi+P>(LNZ<^IA*oAPX!|;NoUQ&BRBV@`^#!`P36FSJBU~wcu#*YRp{eOu_d2Y01D~~0!=PDJ}ORnY$%JG z4sQ9)ecDU*za;p(%*Xx>P3BvEFO+WgE%+_p`;@1w_xpdJ@_XxfcQhEUN;s8Fj6{*N z%tU;#{Ue$;a^SJ!V)C2A7zr}U6AkhD7K);{seIUilC8q-K)`-&7gy2f_#{4Py_P1+ zT*j8pO+Xk?!R1Lo3rgJC9Y{QOO=hf~tl@KW>K1{=-~9UTSx(rRwZ{G7)gQkY{kHfm zkl)Slh{It6wQnG@0o(j&x;4+Skru?F2Blq!NrobEXGM znr5F^@6XJC@}({d?~U2W@|p4zcTX&g&z5asu`GX!HU2%Exw+$rG)Ir_DgwN%y!GzisP23c z5XiWp;wUWD7^7S#-(VYz*bb9%Di2c`T{AA_xrCaYwMMmZF}wNpU^Y#+7Yg>%!$$G^s8M`zpyz*G8=(l`yfZH4*tR=>cp^2C zpNtuP1v=`igDag(y~NSmKfW18UMGNY4M|WzQ=#v%gC)@WvNL6d;S{G_lW{Q`4T+;- zJQ(7IxfRa;>S%(EGqhDnj7-mL!mm)5;hc`g#h7wWMvPY+kB8KQbEEMn>NZ;NI=xtK znNHj6&*#H1byiTeuMWn|;;YXwoo8g|ITcxkSPz{KZb@Sn3*cT3@N|wxYZ;cb|M(0? zgJbYw6wb+XZe$St;nnz-cYW-Xv_qs?Or0@tr;(rFH0xh~(TW{LT-LCMH|;|lDRr3E z4TYmbsR~aX0*hD>4rn=X3hs2kp%d^^FZ!C!pRr!$3Yw;4F8AOPb3?&a=I;ea9##k+ zDj4!Q%it237G~cJlSL0CcDYKAGPpxda$K1l=DHN&{U9lo2D{v9ztAI33sgzV~cmuv|gE#_U`G{c1wb8k|gJ-`sKsKq==GS{e!f@)O!4qT>v4 z9DAw?b?RYL8mX8xQyM(OBkPbcNFGWj$y$TRAN^v~>StvLKym?ei&pq|wfgL9dfFvj z?2LxEq*?wme9`YlfBWM1#ed^?2ScR**uh49j4C-m8D00^Wbl%(HPv8}PZj_O7$uYA zeKKASc<~TP6}Alma%>J#iAj^0F}j{LEC6IoOXf3K0HpgKvz9WGhB-gxfhVnp0x1l# zQ6^v142I5oHaGzCB$NbmUzXpKxn8CQPg$ulST`Pl@l4Ew${a1>+>biilBp@T1~fQ3 z;r)4*qI|`z3C$l2tO@{l_<2;}kfRUgB1SoHebYxyM&oe3tdDd=0be?!0JVnzB^^?s z;W@9Dw|?ZM)upEj0dXT)0GKOvOXhXcjSrUc{kXY>j*IpT=e>U99kscx4f@9wWScC&9hoZ3lO&@?mz>a zK$9lOuMANXsvwlF4qCLORo%E^&tjGXz1r*q;>#XLwIO9Z1LWtmY3-yoopsxz{@a<5 zjjO>I}zt#ZGZWuftId6XKKpCPKWm$q><42*_g3c6t<)F1q;oO}0dhBxF`u)ss zmgTm*0qj&Ct7M#qg}mpjW-I}Kv1EB+1kF!9|ITys7yNL>R~uC%I2)-TqH@6)+mP{2 zXEREa_~94b@k&|T5TvpbrI8Le;q{ZXJG?mK6JHETR?dd>3Jc_Z3pD9}csyI^p zCoZ!CT_^^;o`oVO^BJ1PKXWkZQ; zL)HRFGkHba>LZ3o^MQz4g`I#vG-cVI(iiED%h|LBu^-1d=tEX!??k0HrV>HXG4sSB zd@;ize1tQ)3a_P>158Z~?E;Z!?58d*<|XnHiP zqHC=goKO38b|tq^QrLG!PUbuEoO58S!tOvoK17B`fzNy_2k_YQ3z2MMT+D26a1G}a zj(Li!T_bK`lQ6yErC%(!?P1wN3GgH3p0^Ph9!tz-&U;+ubCbm|nGY6I?u+34aICC2 z2!>XERz-l!tqKL-x2_;52|pub%!jvjXpJ<-axTVoeY1McjzgftrHng{S6T&VnyV#% z{xMRYkluN`9)ylQ%d435jgF|bR1)anx&OcR_(U!0jmLRNk$fQ1&`kMF0vvG%a zZ$l{SOGPpr*5asMFHP$=wJtV7z&|ODlOGxC5Dmh$MJe8UZwaZr(gpLGR+g{c)SBN? zlj_vEAfQq^mM4J((xC}so5w0BwGuE!;CfJkNZ0lC>OD^d0=ujLsNun}xqoodEvAEm z-lSKbOeV){y>q}Y*@aE4Lu?sRtFR6T2uNd?;+D*~j8m9VX2yISAFAH*=nz0}(kgKc1_99| zNBh2!;VZ^3$1vAoI~zCEBR3iEUBrJ5?;V2f_)an^6r3HND^lq{p$OnZj;V$ypFa!1 zci2~>Y&qpR+3gAzn93+P&y}!rBkm47Glg33<5l|jSp_eOoRGCL}YGazO z!frz#_i~xD||(CQLdwI_2D6IO39NFDX|}x2l#^ zVKW3|PH#rZ>6`pcf2hen5t~-8l{`?~9)ncb?D_dkNn@rArgLy}Q(RqK78h^MaNsl! z=YREP6F(M4oB{^>y(y)3;FsZiTkb)>A2VkcXXnMBe^X3wq~$#y3tk`5W~_fp3k#W>rMcTR zXF(Lmp7ok@;Y>@T&kq{KX9um~sM{n0eTsKw+|VJdV-wc*un9Wf zn-yOiwgP8+3}Py?bPV|Na#WlTq<09aKhnQ`44!re`^g24`)~sneam&fBi4Ugo-YPC zCS>_s2c?7Lv&~8U=$QA@bblNa7R3dQH?N_;D&Nk@OM|s2Sh09NR{XVISMVQ0v#!sf z!LQGX(O^)tI_;v}=@f^j$3c%V#@M(JN>_m0zq$^ZJ0*Y3IMTno!ihLeP;ew!b3KQR z9nqgMkWHsm0Ht$d8cnMy@j?fWpaFOR9VV4dOZyDzH?q~w4nZJj5RKkzp$zLKK8hht zG%8N_5McJ2vBOduoU(tr+)}5J6Yyo5`uh$w*;ZF&XB7Ykoj#r&Tc<^>@g*(vck~qm z{(?0gHPYEB`5siX85Ed_HhIt^P>6ETmi7!=c=b4 z5dsR1GMk!shrmzg>cgzaAiJh~j4ik`SqxE+SjxM7+SwboY#I$mAvpIhuLDOL2>*MB zLk8AmeAVCW+pFAlTe&Uk%~hYe{CK%#7Hl0Y9^@uvD4l5>A*iPv9csl*=|E4W6T?H8 zPi`tXPoL$c!*N5qd-I%^)YhN zYasYHV4Tc1Od@Q@9KeD;AsO zwidx8$2gIGqV*-n1nK1%n(QtD?h-Mg%~b}8n^W4}+c;H)9fg2RYkSWa(`fZTm+%Py zUD#)*dX?KP@_`TFLMWg>3ejwVz%W~O8PdC!`yH?3G^mOl+toHT z+NW_y9Ky!CAmF6`G%QJc(`Vh1Ov?cU8B^coY?jx&chz8dWPTL>y9oN-QUPGz*-?Ft zs36E;A)8NCX$64~AfQmCU@gLnrebQ0VQ9TZ2;r{H_o9w$U4Q!)!+dDD;2X6fP>7%L z<2#L7;S7=HM_gkGe9@%uh{s`ZkcyECNM6QY#hbKOc{$VI=xW}(B_Uu=J+%T*8Cw_5 zIACa?4Uv{beKxQzQa|aMw0A;7jM@6c)MJJJa41XpBjmO#>yewsr62P^-n!D1DAH3D z_4vPq2G5_WGP?o+`nfbfepx+sw7V%$huZF}g`Zw5m6A`AG^w^8dbBGbtm<)h2<)r^ zV2J0&E*>1GvAsj_3kLWTz=Eeo0-dJ{>xFo#oeDQ3k~{=hXoiqpy)}b|@J*m^GQV5qoDFWx~?TleO5yntWJp&Hk;!UCJFb z;wPuJk$kG{rXb;LqQZrOkqn3Mmj*(-AS%)n z1vCYJ6^tHh2@nneVV0^;LEt_R5YO}Iq=2uniwdPKW7wE==wq~C_JO3#Uby)_A9?>? z-eu3#p)&6fpbe^xMFZHOK!*NKQ~;1_y%0*+XyAqNzTYlB@8$A*RMkVC zvkh~MTgrRPS7CP{Kvz%va!HY#r&RgZLMf&aK%CGT&s3|uv?xe;R0y)WSn+9UwzCR= zPXn&%twKNo<&T6%hQ*+OpJJM!Q}7R_s!Y>e)$guAAch)#GH*6dx2=L!gu=_fjhVS| z_Q_~{CNSb5Of%9h&R##}GL~G9m=)7hG=%M74EbD@HV z)$}2x$bk4)_WsCuPT|9|X{)~^=nK1fubFD)I?!flH1KZY=UcD*%gjr+r^KYlU#ZSh;E^7e;H0kHjT`0zC-iIK!7 z_9mqSuDk6x^Iw|cIveM#Trlh0CjqO%?m$3ZOomkRRcDJeJvHy3w7{ore1$XPd2`(_ zF3-=42J6U2%s{o;?c(UOlVa~+uNd#Ku(rhCdbtPBV=$mH!73ySt~Zt0)Y;U<*;#S^ z>P<0*alSggDsFHHs3pLCnNK-PnNDXO6+X}R!KD|Z8xiU5 zUO+-X>5xXGr9m2&?(XjHP(osnl$Mn466wyp_y64Y^J?Gib^XqqGiPSbcRmwk-0k-? zM$^@{jK+YJEw)PfiQO?4vyZpBtt)zgm2uUPkQ=FmX?zl8MS+PcBQn<$HYU;aS?!P=)vF3N~FugIFtyi^pcxJOA(6fITHfEcS+e1nCiJy(dYeF^8AC91Q$`8B27)c6#1ws4kLMWf z3a;T!AwH_|2|gZT(<%srvq0({qU12|?~PR~T4GI`9QoSjGeckH8)^JO@4owOo#o-@ z!1kh|$o4fMo-GBL&y!_GS7u*g5Vhx}e!({RO`^O}}w6)M+dmXu5lxdZDR3XrJxP@P_J2#5? zD8Fna&qSnlTKqjpoBt>BJDqqKai2KUn((B!y+wsKT~?WDW1QVL@2Ay^fu*@C45#7( zbOb;VxLTjjj?y^M`)PW$2FqA_gF0H&0b<~E_d|NzcAk~-^TQNP6gtu8=`H`XikCzH zIgEv6G53VZBVvVIZ97VgICt+~ zw)d;}pEYgnF7PX_(g_`p>JvPgLs1)OF!Pi=KA0b8+@;-(MwcxScZJ?PK2NOmYljx~ zgxi(ZZhBQqby`wP26OjvBh<_zj;od1KwX$Qbax6_fprKmg(Z`3!Qcq+?G z8I*_Z>uu$7Y1o;9s!W$X-s>J+vxCm3qL}AtOjBLmxuNo>;`bSI?1SFFL_0(YU*`cX z#W78~DWJg`nfJTu$Z#@TUV$i?DBai}(caq8g^PkfSGkv(sI8U0O?zXxVT{Gz(jO(~ zQGPxp)Sw?=S92Y*?i`Z!40G;gpq4HH zu$lz~Gu%vr5)Ga;DXsR26mROo9!po#JMKPp-}?P(ahNgt-|i<-O!|UfyNL02=GT4I zlFE_F9$>__xgIu;A~Q)CGAn>WujO|IEuP<*VZZ~?;NLa5E#^#j4S@Lz|Hj~Um*}$Y zb%U$pI^bFR6F!d2F(0PIZo}LG2jbTI$VW< z8v$Mi3;@3nm4(tw1YU+$y@M#KjsD)cIQ0*~2BeTGVL!{cS{;fa7xuXT`@Db%N(4#u zBlS$#angVN{+Vb1R}Jd*z69{72DWd@*3y$QAp?i2pr--C9WXzmn+vA_nyI$J$NQ@u ztDTI{eF~zAUmTz7cZuGEZu*eB9cT=M|D9tbMxIqiPVLUDC(wI zRNp($ee0v^2Sf({{(a6&<`^lrfe zN(CxP7XzVUe}d3kK~VC^0{`;-Gz77v0Wl$JvDS{)_WLSFe{dfGjP_vS;3)4mw(2e5 zP;!c++m<*cNX>s>zy1l?4*JEPLP8Oz`2F~I^Ff6Lvb5;jl&h;COu9u8?D(NDPHN$b6>@|UB-TYON8bT5^>_pM`Jdx6Hnu(@v)Do zm-3H!8(T{xVC3JI49vjl*R2lv+b5rse946h(2{hdnsp(% z%rhkAL*Hb`-%-AOhKx3|B_1o-63%?G9@q@h>I#G>LLhrnx}ijX+b=_A*N~G`ZJ+vF9{7TN1Wiw9%ue@_FjLe?3kHaLAisoC`b@5oaK2d0_#8v_(Nk&wo z${?*Jtjy^Ai}8w~zzpXQ72@GaqM$p-^WILUx2W5Bcy9)!j ze7CV%bk+O>Vb-TIB&~}sM7Eb+t~jN-ArCX{r)i4}oJGin+3SWb;3Fka>Pr|KYl=sa z!2Ij1I=|`X|I_PU;+C!7RdXA3Pd@Q+v!C##QZtMp0K92WQ1dM)Y zU4FydKE8>1bNj*Va90NEWtGgG!0h(@RY`>DuX(}lH*>vH>LeM;Eb20uIXo%iQe=)n z%zF&uIpbVSCp;t5Q0Dpx-y`cfs}?$@g3LWr$SQv&#@Uuz)zi=|%KR6LOv~*1^c15e z&7Q{Vx>~cNii$x0WU>u^#XGYwz-q$b*R`Lg`v$OyJ2r##VGpKTyo>);u+gg$Mb zxv~|6#cMYpUUHL|%l0IGuYGKGUG@j*c-&IHR=}mp?vrxEIFk&YRhESs}qIu6VXVw_**C0Y2ik5;s_l?xfVDEpy3K; z*3C$+wj+DF(@jMqD|{d=^y1qhrH-i`1RDD`w|X|WWeEvd80@~g+50y4Wh!%Kz@Kc? zi01Lxq{A*2#a$ZZ^!*tQg?ZO8v0zgYaSZBt+L9n&E-z2^@QNt^yP=DwXi%V3XKWBl*7A76r}&2K57WwB(A2YRrpqRQIk(aT z`BR$4Yt1tMd(?Qf@Bdi1Kj)`?L^w1Y?S8iPve%9`ulAP%D@ zCR4McHM=gY%*!rhZFA~NH#Xk0tkBn+gfE5cL!It8`6M8%a#{$a-gt@RJr3p__gur9 z0k^yN^A_~+cgHv*Z%M^% zRb)u@(I(H%h-Ov-4vdx?8ykfI^FOS)A9XvOSmklP*1ctBN8dgr|1geG%@10Vuwt%| zisBM~sv=F@tqkuJ6Sa&s@o~%m9_A0EKeIk6#gXK7syIDn=3y`Xh)?Zrov5nxc1Hsi z3O2kOoM&PBj`|u$ewk}k99`zle$bkW@r-Blq;|%i_iq4Yh1f@LqpJ5DhsW4lpDbZs zPA*(jSDZ)&A+uk%;s4robA+U3@93EM({mc%!2Gv^f5ijTv92k5Oo{$z{QWWR?1zzp zs43s82_wZOv%)he-+5dwecNn$lG6q5qe7KnsJ=kQqQ2pWs`@NQyl}*~O0F+-fLGXW zTpZDJ?8x1is(L?z7woOW*2O`dMEPs$26ex_YNGl8|0~Na9TJ);YvRcd_FtX?x&f_Q z(scRqHZ2uAV?TdR)!?2^IfF*uWvC@$q@3|}-H+V#->f||oa>>s;;d(-)kWG`6m+;_ zYuL1v=>l(;JLUbCV)g@D!ncol|DBRZ`-5pNopLm;@E<3ffTw>m?dl=QwGDvyyZs<+ z)6VrD+SmcXsP4IIjh5K*6 zCJLZMWC(O#**=}GFgybYZaQyoJ1?eQnXl6+hS*wFuIUsA1SYh-{q?n{B~$@0MV4YG z=reuNzJ=riUOqq!JSWK8*E>hLFHwn8??*2)X`&DnG@?k^H-FR#;*yH`x~{j!h(sPH zg6e9Ss^Gnh$2KJp@NW`GSKc3pL{AgoVag1GtgVd(Ib+1le$2Y~)ap{tBM!yp40Sk+ zx+0qELWqxuC(b&$Fpr6XQGb{1x~BUK1X<10*!=OH?P|Xx2LHLn{_$>K{!h!@5dw!W zUfz3blMMCpi*}ro@PO68HkqsSCY+r{HH2Sk{l!=G^!ICgi(`U0SLi_+U`8ib~SfdL_sV_O(|a3FCIPnMha7`;FPIu}?{Z)1atS1dJrWV`EN zGx?QD@^wQO)1nE_jQOI1UfMGI`Et;o<#_|~B(v?H3TT}}Ty^{C7<6_q-FrN@D5HEO zvnHEhSfnLYfy1kMoAY!+%;Rvw?%&q^*!e?-J52bDxI||Kh0q4SluT=()8Cw?fhvm> z-62TAjvRR^Gq$J+4fp&-WO!0=cTA3t)Fkvvh(M`9Fy-q)X_YuO!WL%P_4TfS+iY(e|g zGQnf@!y5*TKqli>h6q;`H0VEHc}C($8E@Xm0R&#=yxg75vZM$MmxeHGFf8GN@b{Qz z%yg$t=J!V5#<|}xK`T7i#qV6kYd&>9>`9VvK0l^yV)s}1>HK^JV69F=Jlg4+c_Il| zy^HooAzzRgbhL-^4c8dhLZ^_Dm1K%+J*a9xm zy9>9SiUWEaOBc%a-Ijh8s}`6@OD*eS0s?=eYV_!DbdP^g!6GQ?6-T>F-!Pq%yp9Or zb(u8O>G#!vr`*`!thX$`89BGQt}9|r0p`y0&}Kh78CO4%SQc`P(5uuAe; zrv*qj82W8{erk)=95ULODw`>WoC6vj9FvZ1!#4@nanj)KyY1;3Hq#iiOcNQjk&|mFT!)^tqxiwBy1}9u7^(Lm2E( z@`ox=Hm_P=c<(m9;Bf&1-->F@v)&zP4ye^ZhLfHK3YX|+d?y8o%z`bkiq_o^DZpe% z{lv=n&^wRC$5v4OSaopIEBs$r=JS8GC%Y)DTJCB`^M{f-kj8=G0ca2L=}1*#Z}ZxR zaL+k{bS&-4e%2nUxZTfME_K>k1d5E_vc`wWuvp}XJd6&hI4a7HQbEf!@g?TyHj^@b zZ7*@f_j37mnrb658#m$w;TEN&&3E)5>819hlrrp`=S|Q#oe~P zys`wuh$LWai6DgO!2a0#-SOqdIhyaoYwRgFe~N%4ad5_jJdmsRTrtd_iRR}9&E92C;j|W2gV7d+oa`5-&uYUK zNbh8>gWI-H$dy_Z&D!1S)uSx~KdY^VndNbNIPn2iwBLEdiJrEE?03C^Ige+iQ$njY zP%-U|pm?8pUVx*x>T~uCm&$TuJufDVJ?KH3NT>RPemt`G2|K=tx(DVmb{OSU#iB#> z)e|g~*g=!#zqBj%agN!-KRe{r-`J&(9^%J%ihJHX-eYs5jD}nlTv0~uZ-&P6>s9N_ z=+@Is_gdLcbNG5S{erx+WfLnXFWm%Y@j~D+)TO2ZF@7>7Fms-PYJdBnrLKm)HVAFu zml6}xE8&AoGa`EKEfu34`eVaQ3dzfGK8AJq!uXd7Er@p2S&81X zT3?qo62p0X9dm4jXz;IYaJ&{o7DT720pVAOHX1MM92x_&g)}d%k9qZ=_|*ysn4Gn< zpA$)?YG{L`Co%#Z`| z)$F7q$zXrdcT~rQ&7xmM_*s{U@+)*~DAC%L;WK9M)rlVU+0TnN3huwR19-`l-ha$F z7+#}XH@`<`4Z29P-#pe8g#TQ=8vMDH$1azxfrW{`=8 z1UFnagkOcQSJR_c^SZuKmxZ2=8MHj%m?z>M-?1JjeCQz9B6z5!w-!EJlWkk2Zzo=4 zO43Nk^ai%J`uUc* zLi|)yj;)~7$AqegV(rL;Vt*&+Rv8Fyr;8_AzX!n??Qc=jLGNL2ekIwW!T&J79}=o1 z=tfqmi0P0NTH*q=9Z47r+Y>L+dC`t|BJMOemY$>2DewAkze;RoKR0#6YoT1;^r38> z4Gbs@g~1}=N-TvDl(q~Y|MT6*C zIv9#5@M`iMqf~XwD9GCm80R*>690Knekr>lzvo~2f9Gb+Rct|Oa$=gjhn{@F&e@bk9UeK!m6Ksugf;Is@7&kqZH$@_t37@V{yv&w-p|6W>HV#vk1e{T zvr8ufo^dfJVX8cU?(`$hryrjsIXMxJhE5TMJv->!hZ9#W`_6eQUCp=7yOLkU=&o)N zmM4apTZ*-yF$xu0Xgn>iQVH98t#7YK-p2{(U4I?5QL}0KxLZnMcQx(BPjMvQY1OoN zt)5j$MlqsY@kMibhV@-MElQtD*U9ab2?Ue@f}p$w3f`~r#W(ziZNC8dHEHCY<_?T6kd!6+YnYR~m+bLN?xi zvXV|>v(o%;Ij{k|K$VbBA0z~(B%yVf-O^=OZq9DUUFSN%FDwyQn7PF0039?i9sm?M zJGhjX^*RP%{}@J>Kbh%H4LGjTpUTXFiqv(ofcjeCO1DPqVs3j&xMW__FHPbomuNrt z7;)_x91OhoeyjFRW}ytV{hvv{|H;3h?x(db>p;r4OZLw*&fTIz` zv?7Npne)ls9)?2qYcd?a!z)F*M!p?O#pU>Y6P!{Sw$}5olB!#A=O!D_*Zb?rx>ZrW zZ=1}&3Nu2l`*?jaRyE(SriBvS>-;p4s68)4d?6~Wcgv^apO$)WQX5SULeZ4qtlxj!5neu1F6a-1>#DMKv% z>jlYPtTzTqh*HKFPk*dBRzUdu-;EtlQT6hZ(#AU4?o3x+!}pk3y+KdA>rsOVC)NNs z+Ux;}Oz>}>_xYd_z0UDQdWH5I(ME(GD!{&coLig$K4ZI#Fn-2I1rT&eAJ~=LCi_W# zCuO76X-lu~{Vk6gL*_GTj-Yfa-*X&}N&Fd|)!fG{TVRzS+OLp?NJj^ZZ8&~046OM$ zo(ySy4-R&+*$Fs@|M>~Y$Imy}355D?p0+#AJns+rm1*p@d!LM`AH|8jhb@s@AFD{1m!eu6}c(r18y59UHv=o%rR$OveWq3 z0jJmNwokDSJ0h_KHBni?g z0@{wVPZ6|XitCawx+@)||7~nAd|XN3ab&T3EkFHW@vCvdIBwA5Zsqtv1CzG8BkN>o z>W?t`60^%IFY#1sbJdmKDiGs@OFAgCZ0b`?|b$NSt1=25o(6Np$l>n7f;BL(8;DF6Dbi@-Anmmy0OX z4kyLfUX~4@Uf90MJnrt-%3Q+R9EbtLhz}stSSh{Zr!i*h`Hh-!TSa;umzI_~vTT-u z_8!xDM$Vg&et(t+-=G zc!7>jmc{-*i3bdhcf@=9yGt$(i@uWRpZgguw|((OR97>ARyQK*A-z;jX>lU#zZhiP zKEI{JAd}emW)G(Q7t)L5gw5%TnPN^&ThynEXHqHqBd&gJXE}6dd;<0 zaau3K#j!VqAB?qm9$voe)^+?8qS&=CNSiPF(Ev^%>TKQ^W?^2d_;^NR6wGQvljpXV z$?~PkVr_6Y7jruO$B2PO(1*6_lp-~9$R(JINek4ocxg2~Jb0yse(U|7mbWM>FNTQL z@y@dQ;z@;hCc!@_E5{5DCef38#7Qc8YQICAp5=RX=xj!lIv&9qT|;$?l-vgt%j|E5 zI>hK4&A>wE4kpVq)C3UsJPDa@51IA$q>GOiVLF|p6?x@v?_c}Gd~ zzD17mr9c<;D`vmpzq7|$?=abx7h(ekD!xu3-JQXL1FF#nXY~~kP;RPKjkR@?cxRppCwuG(7Qy%K?y8LkC9^C^V z6(ORZ_u5Hp+Ko;%98-=RGxfo-kZyPsj9Yx}FryYHyx3q6JMxlOTnr3KYz|k)-wlwW z1NB;vUtEh`zKpSK_(FT?adGa(Wp$WVa!LuKqZo3CLc5~)T-T2w8A9_D5|0i+PWPB z=B<2KmHhk+@S`m!(Vs_TcW;=#!Ce>#HRv!%<&eZ#YR%Ror7$zagv&o|2o5aLFE&>r zTKR-t#Le1^6qh)e*wtE(Ihgrs0@1jBFTi;_F(~tI8h>#^t8TupzdCZlHCF`pmHS-= z5P!SLmyhGlR#`9LKrNR-jCsVW5r|wx9sn?!+++ZUg>z}g69+A9WFhQ{Xi58h6IsXG z^J7*vE3(Pod5*GCOgvUi<_sY_{rHI8x{YXmoO)RVopw6@RgQOAO;2=cTl0b_JwP;? ztzCt!N$ssLhXx=TN*=$KNir0=Ob&uaz+YaZuzy|Eqd$I@qQibA)K2UQD#6qReNGS3 zb54VFh%x)H%+P;u#@iWIpGWHk=cF086m*_F@+{K4;*cZ=gkisCe1FKU|BCXX2guPl26%kx=soqV%lM)EWbD!PyG|xDU0M_MIf=Ol z_I?qNZuR$9y`9zGeQdi)a1QTMcJBj>-ll_Xx+_g2$e#~nEH>@7Y!c7Hv@OGg9 zg8P$B%4!N*v)jmpe?Q*WWL*37XS9+^{> z2P`VtT;>l^OCx<}Ri!%&gsD3xKYm%}-TDeH{@fzMJrX58m{7z3wuZ2GX(y$MX$jjB zh1I&XbnF{RLzi_uxpQy~bC;~&U1NC)Z!=-vOcg30hC&%B-C*DDt%^zedhQB;kp3G( zzYnu>Sp13#e}Rv1a`#k!#ewO;|EzVnW&}M^9MSy8K-dtYUHI}03}ih_FZ7DMeo`~CDAW7ZSj2S_}z-}u)D_7TQo zOi-~*hSxhm#y(F`ro8?Qh;rfE2z!K{ zrsD+y^U@-V>|b0wNyZ!+3Yv37|5m3F1bZa*i`W}rnu}A+3tD_`r*iP=p_j+1r916< zOWLo~>A`;I*zZV*ti&0;YOq)7N)Rk^_`&|UBsT0zE5Yh*Oy5g;0!4{%0@q-w7qO2r zX4gv7`?RR-!1pRm$ACm!| z#b(zzqyDdECCF)1NMYc28dlNM0wMxeI==Na5#-gqMST3E(6$-nx(9iWfS|~VTnH6D zrzJ#^{#tLHN2Uv3C_sUDQ{^aV4`D{C^d9?!&v$46t=nbZPXwY{$tDI42?K8UD>Isz zc0nrN_x?g4Wzekrn9aB>H^DfhyY#^Z7KS|=Kkmuvackg&TQfzvr25;T`0 z$2dseH&Wstfb?BgLK}|jWh^QK?;>3OPlA3Vx+ws_$R;ZN&)^KHACgI+^33{T0R;%d znV(-ul=?1BkSfMJ;1kTzFyLGuDU&hnX zqPI?}p8>i4M0?0M6uZb3fBYN4@FRlGPYDfGGJlP$59mL4CDqB9RrC_S8hl@<<`hgv zD?s>$p41k=bzoS?wZ)%709*p2I3-E7nA13c#$hlOu1wqgMS6mk0F;ylEWDM0mLUN< zRM2ae%_Kg~Pn-ct2`vwsw*q{8OZ6!bi2L8>w#sQ0E}?cU>Z++3Gevl?nCGFk-+;dD zj5Y z1|`yj#+kg{eY9Lu1GA<0E~zm5y)s;_n#q};kZHhcSH}iy{KZQM*bbIj_f;wCt2~qu zsq4T2D`45aVpGiM#qqP<&pb#}LL|fuc(U%z+487>HNoN3_FCJOo`vs2RCn_Np{Lqy z?n=plErDX{;VchNPH+%@#lLdn)^cU=Ewvoxf&&h z#PKh`UyPI*kO7GX`3ooXchBez)p_ zJNJ_c;#pGq#Jja>VxU{nxI^gi+VOs5H|^GB*;f%2zTzMB!#cp{?N4}ITUKD}@H2$O zKyZ%}SwP_^hHnZZ_klB(6CH@~^k=b26{KpNz$39$9gx(0A_Z-BOd9`J^ca_ZQdjud z-q4gmZBIPj+@OXnX7R4tTXIqK(idyX3^`4ztlKFq z!x~jR3O>#w$h4gW^p?Vj@s>T(KX1d|placdzxt_Ngg*fTC)b3lA^4K+-vji971fNF zpICh98)-XZ*u}2{Waef$*7BYlP>Z$Fal8v*dW#}U(~{qaF6iR?`&?duox}K&G+x z9_<8Kn-sKZI&)>|bosb~qJ4?tI*F1V-#L`h%SZW+D2ZnkynOS#36CRst~f+Mh&25U z?adxL%w-b=w?2i_yrjuT#i=Z26jO((*Q|NNYM#(Y=to&6+G~yGk2-=UX{P-rxUM8l za2_~s#y=RJ4_$ndye1b^#)H`cjwB3T$*3uiakKq)(F{D7*xfA(^1J9SGI9w%_wPIR ze}fywuz11C0Q$_e8HK-0gHJn^?!;oZrx7EYEZpP5!vNwyh7b_<*zox0UdE?gPMVuu zeziD&U<`m8$3zy^<`GxT2>MP2+-!|R;Mzd(dp-hD z?I+37Hv;|({4MV#4hHga(@P~hJ>+}j`DsH;2~lD4SG4a|$xVjyRHOiuEPpgVBoYg^ z==gcN0>lG%J4CVs2dfp&C7+k+pw>OTLKMJl_0o6YnEz_;yuHE&-Z;1{`$BEIpFy5<+35_zrAy+48@gd*8Hk}Iw8iWum|KvYtyMZ$rrdq zNLDd^7N&E=2ePv#l=dalL*xVIoju|dEEDGpg7xOy!9b#r9NZGxqj0^^1ML7aoiFUM zgKdurO8CUce*pqFWp^re=T?xbRf~yY9i_j~LQ1{lywgHG(yR^MxA{GImA8JCBYc|D z*o#ga)@k28A${Elt5I0`4A}RX_L;WxyB<_?iOi$K2p3233(E;<(rQ*>D6K{MDRrjw z3ayV=1=2!6FS!FLIIOG?1Yiw(U3HO&tI68%4yOM-0J)ZzSj4Jqjalr1e@@VBYj zJHW`WOZ^JBx3XzpFHRb*rpU{v4^qnq6)*(!92IU3ZU#B23Hr_rPtZV^l`q(n-4!3u zHx@gYZYAJLvDScggiH9Ze6&6G7Y)&3{?tIw<9WZ3X1URthtgypwdhU>K7`Av@UdC8 zIt-BRGx$x4)=`K=-}VB58vHvOZO*fIiJ8kzWS2qWi<*c2$1}~qzx{v ze9`aUCJwjF2kg0A^p_BFjCK||3q2ALp};mH%iTD;22)&&*AB1KhX?rVdHZ7g&4L;j zhFvA}Y0wpm(mGDoT;7;7tMd)G`7QnK+aRv?mbv#QuLMJobT&8r>7)w*i~!LvXw-8E z#}v8sws!|{E(HpTIQMi=Fw95up42(Lku)n(vb>=$OH0fv9y*+6gau-W(^1g({f)E#ByxzH0H9wEO62+W$%d8;h; zkNfWg3sJJDF@KcsgQ))W+!5^0m>%6$!Y`qdZuC2s4=rwukReu_^1HFzbe%OuaQI6e z0{Q2h5u4^u;sDM4)g*((v|6F$)SZW5v^+nO?zXJZe#jT8^nAfl>y$ZcDPkP}lXTr> zIhZD|%a-~*_#Y6|XI^1g;4>xrM2^#_BnI8xNKFvvEH4ECl2g&`l!0wnH16znguIW( z<|QQr`F8n_XoZ*gaSTFD9(}tnw=CJ?#XzHO-{Sh82QAm3s|JK zJ7+5aKQOWx|E+f`b(7A=TIVz*1DL_1NUE_<$6D5w@6TcSMVd6nIT93nL3O8$&o54| z4okafabvfZ)e=+U|!N zd=kw^bLwzry4Fz9h0njmq~R-vWc!grjEaJBM6S0dWMI(f#~?=I=zIJ}Wv{JW zuYE}oasf#XF{@mBY*jN@dSU!49G;0}$R91RromjRR?@(A8EE$D055cGp^pfKcr5+s zM@!T0-C?Px+ju2FPNSbI{=gIp?KdL%Cl504`zjQz_VHlL7!rKP%;&Pbjvc;bp+^p+ zM)x>ppGUUT$(v2ld1znVcWIoWU+g;M9q}{nUF`B_p}aGsT3?GDY_R!-eV=Uo9zvVT z=zjmM?8ohkU@C2pZuZ4T-C(CPySg8@R~xGf{&s#}y`~m8@u z6M|UT{MjE{y{Yt5&`zH<;J@s~{3ZLu19*)mQ=ThVua4e1y+1&^9g$ruzTiiIbX%oJpqA;i}>7bqNrXF(3f}s zT3=W4HS|Znjem&7rSKJ0Q^5P7_<6h2-fR0HK~n(TPLGU3&2B8utU>j)cH9#tzK}M0 zzFBu+E&jUoe)m|#-m=Xid5IUd(Eb&1RHtXtE#~%ve!gRITjf4hMC{=JCS9d@J?5@n z?l%GvDi{KfsMSr7_M~MR0`dR5Ec;Ndm~DhI&WKl(b06?TYEzsh*L7FOr? z&7H|ZO$=qbz?3wER_|JOr$pz6ZEg8Zihnkz@)vMM{c!*h8l zS1P)=A9i`utq|6!$R_qS;wA;#a6va*b{EMUIVr@|x3@@dn^=i|~ zji%z!c5$}h>Yqd{){ydkJloD!+Riw}0UiGYsHQ$-b^l=mZdytF#VK>)3*o>=?8!YE z*ZY0SU+P3T7w8Con#WxkrsE!_2^4-lnix+$~NqGuQ=iQQ&kCXy8N%p)5%)u~m&EPU7go>%(+h>Q+8>~_H2LWyttTf@zXDWvN zpM~T>AkW4%yI9IW_xKEO=LuSE1JOhK+AZex>K?XG(?9S zUyf$V>5JF8y)^d=F3%kQSW`DygH_)k!)WBFp6#*rXT**vUJIfT6UK`2uiF|7RH%oZ zj+4S7ZS9BcrfN0`>W}BPpN=tsE%pF#i@$bn0M#?-@3jl%N>?a3H4r8~wBk3NAxTg3 zy08Y1fih(l?FN1q>gqMZaPH5UK1Qd!hmSq@dYah z2x>!OzvhjD;kK$7ys0U|d9VIily&)sJ8s1AVG}97VrO>wC{vDOW`+S|ig7x2X{88+ zUOi&94BJSruR>|}8GPJ9hHTRXd|+34aK2o?AnAuNh2`^I7oD{Xs33Q~+!`{3c!Hze zl{if8`dJv(-Cdf3ROu+_!eT}jpdrOEH(MhPEBd4m)wmAEYUNLti1_JnQ27+61NB6{ zSvg6?h#!DMiATkg$J`W&VP0ld@1rK!{22i}*LB^JlADdI3##ugfGL7MFw=*- zn}-6V56^A4goGia)BryH9Z94;;58J0fs4VM9_sPDsJq`CNl3SYRiZi5e0%WqkSR!* z7g!HXOK3&DL5Z{bx7U>JxO^o91~9TWG7a@T`kyKW%rQ0cq*6!IV*_Sh)_uK{F)mo} z+q%z@*(l&iUWM9TJ9&P+@L1bNM7-32eaUS~HHjro(BB)rL1pUOd=hx>Wy6XAL%#`(x^9p0k^p!Z4D&!c!1&0X{78ff zyWQs&eA}9@gdB8v;3ox0OgfN|Hu=_6nZ1yq)u(w$UuHnaD{}q!*9|`D-G>$+%J1?I zf<4pxr%FE>Qa`6ioqdV0rBq#ipT}e3NRzmhdJ{jVpu;vFJ~t5Q*lqMG0G0B>Ly-_j zHysoL^9`&y-#rEA7ya?)n_9BW*6T;`yi?&ldINfljjh%u*Ok}HiKBXWoICoS<&a*p zN(QbbAfi~A>z6!J7w>tWxlLEbE=z|oIqtJLVQobYka(f^<7=$=T&&Fpb%6WZ4g3p7 z8s4?F*ry>+#OUnT?m#)@nSW19i}_28@=3513DcFXceDGrGVB#?`UFi>C1QXIHXNyBV4|z^8S;y{0 z2_kO|JTzd{KF4#_DH-f$L>_(1CR1a-O#1E)H^(lX7N z9n-A=4hAY{_>m`rPI8ONH}`uN%i_F5mPVx(Ji>KM2)tiB)&g+thj`5TBzN8?X-)Z z(c`=1D@JR8({GA*&*J2x1xnHPGRf#GWUKG0Ri{ip!jk_0wQqJ9aR4_vO0`{KRC@0U zj{aaSvD|$?@%Z^YEWQ9%OSJeD`xaT@T?PRQ<2{N2MbN6uItnwIdG52LDg2m7cjnnu z(51eU-P1E>OW51y#P3#s(;qq;bB&j82uNlox`{aBV-JfJ@R2bm0@D$rgSZT-t= zb43r|ei@$@*$Uxj9&2-yf!L=|^@$dV$rT#u@)xCi=lI0=sKc=xBBWaq8V|qR|FJi< z{Xj=C6PPXT*c~=0pohiLvjK6}TY>OHr?Wp*dlr6@wDIV$hcl`*Z>%4#M{~O$P#l}3 zOt)4P0kXI80K&568~cQmV0a4h4PrnARKNL`V6>7H9XKJRefNHwdDMt(V96_BO`=PZ zJW47!m4dHK$xzlpD`R2qX6P#e$o?hpe3wI`=pScdFLT?Ox+|jW@NHhBafXeN&D{@M z$fOcS^rqihs+?Y$v6pi0ekwDaqb<^!g2TAPk#T=9T~23cVRL4gr+?d8=HNb)Z|Lf+ zQK5TTyIJ-Z(5?T|59j(W*tIqzaITWc>V+BshXw$?O0u{~Zn3i+Jp`CoU_xMwZgjvd@K z-;FgTA+;EUKT*zER~~s)R?AVKJBaPWr01rdQ|2SjIld$rqswy96Y5vujSdZX!*taD zRRR9>@emtgn7vM=dT8Ao_CQx4akJ+5;AuH~QHYH1`JqrLfn=X!qG{#*mG)K<_Iz?c zGmAU~9?o(oBPh2vcPi%@H9|}KYqClvq+e6MIT9tG92j{{1LW9aHb8Fo*PvCY{L3P( z%m_fOIygwcv9ek1C5+Fd%aOyLEJZ2u4mWg38?)0`Vo0!=O-1wg{72<2jR|%gIv}Rl z&-RQK>78qlefEl9S$x$VC<__dRj4^}Z3*M4g#lbe1bYJ$hAn=3xoc?Y4}eo^hu?RH zo|a#MpW_Nh8G|}EqDYMMvVAX(1T~@o6j+_DYN&7;|Hr|eY3ru7l{Ewg;ql-Q%s4X-{H_Snvch5GQwny?P{;6bZ7qPN1~@_WIpb6lril|L`kTaz<6{%%z^5pFZc>lOu|bk(i> zc%e40wV=|C{Mj!r*t7c2&rN9E7oMq^sS3NiSmn%0hqR)BL3q2uReUZ1vhP=qhRKX6 zug!>vtV=&PUZmz%zD2`ctR&2}W~GCy@o#hC6c3w#+aS_=+LFFAapuk?Ny8Qm6V6nc#2J+WuU+C52hT2<$*UId*;t7>K#z_qQ%`HR8zt z(Xlwu&Rt2@y&T?s06Of>MWNT>=EFoR;&yAgX}2Xy-`lhZ9fm0J7XjtqejK^pD+eM_ zUf&s`OyhS%-5gSWnx|IV%P#hrm(Ek-P+u4Ldp(UG9U5D;Zg*Vz>N-8Q#@qBgPZ@Go zxj~`T&Y2NHmW^T|5k6Oa<&A7^Zg8(gaMNtc>?2=j^XBCvQv84GrlqI(d_zWFU;A54 z7D1*~GiGQvwDR+An&cZ7GxUWDKjY9UBZ^-g=xRF33Q@#$oE9N@} z0sDX)Xiiep{8GFQCO`kS{-B$Bxev*fHFGsjdD8j@>wjh?n z?sgF$L^7g7A80%2H)5b~0gQ7$zO>XO!`*Cxx5TZKP@yX>=P$xbHuC_kOo{Ud89Xkx zTSsEyw-P_Gy1&tvvj0-)77vvco@=F}`)&rpI-<{4FELB-5sWrR3<9x`NIF0b7aXd^ zE+`+hcyN(!?>~qYWVV$w?)wO$1G_S|mZ@5DY%XoFGh!F}g`eb--XlJS0MbFAv)6}8 z6*DqbYt5fmEyK!*^D-sYuz?cl2?I|NH6!SOWd9V$9J?$>P1hc&X?4f?^)73oj>5Cb z^r%dYu3#?4XevSTNtOWW;Z8e)L2Yd=q0JR2Vf)$jbxqcHQ_DOwfSyivBH59#m=9GHkiLuoK0NGh+z1GDoDHf z`3+u^e8>G*;-Nk{i)=D5LaAN2u`$UWC=fsAdT~ap6i0$`eSPs-H%j$|{EYQ_$kZ-3 zfbX+@G2h7!h-Hh9{^FUS+Iqgz#Aw#xaJYtsPi)XHKg359%i*PBXd_p`gL-IpH9tja zZ&Cf(^($IkWqN^|hGPK-tT3S~{Z3+gFOj_xXO;YyTGw$!lQ?bie{95V72nXjH2Cx` zTa3Isos^Dd>H@@bzm*J27a-s(eu@vjx`WYITYxJ~nZkWcq6k59G*@+kxaj_ukgZcn zbGvnbL!`g~qaYak%>CXKi!l4EBWFEBQ$Qc?c}CJZlu9HZDy1N*=5*CQasQ_dwO%&R z59G{~O;+IiLPEJ2D=CldYZQ%BvnNWoe+~p)RUS4P-z<5ja&}zS8r_C(-=Z{r)dTS{FZ=btop{I^}7!lB#p!Oj``gMKWKsn zTHFETwgSoXq#e=%2bt0UC3>a7Pz={e`adV@6pH(v04ifO-?kw~qzUr5+}tQSx|Htp2=1R3A3s6jY#RScDo|RF9P{brlT_&JWam!-gcR&Gfl4!np?YOz zE;{4>AH<&=MtW>BW6rF6+%;d~9_zwWhb_j1ffW6pr3SO(+%XPXo<2AHR&9o7iQK<; zY?;aF3Srh*{_SUp5ZEguT2OYx5ih4+|JxQBy&SP^Zc4aDD#~RF!&HU~#J}rzCoR~~ z+%+F)Y%acVfV;r6RFFg}lS{X0{c`0n{WMW03F{kr-j$JKTBN?qjuiEJ$oOe5W!ym! z8jE*7Y>K23MevJQcSxN9-uE$?=-S;6tuiqiY%bSoj)L;E1f5JpXxLIXsy7e!Hx^O+ zRqh`OCFs6PD0i-hs{$>tuWVsj%4jvOz-GY)alIeel*>y{J<@`@pM$_p`U73OvtOwW zXi_}}f0jU|Czgty52J?y52*xa#5IAo0MmIm<^e^+Nu!qpeO-oq!OwgtKRNJgC)rO2 zZfFKCEBLapdUtIn;?HK@OFUwwow4;bDxwspfOojs;%9lg&B<|`&i6LyWdh;1p}Oi=|3UCf3hb*Ri&Ka zKXDo5;YE}7V#dJfOB@r7F;IE z`5y&;x-k9!>&vq@#*e)yku>J0^tX8Y~E0r;oU zedoN0qt3_t1;9luTZC9kaB;2+q3otz;krC5ZCjkX-Ms$y>9q0k*FN$qq&p0x8Ty<=Z?FKTPj8UY4ffk?a7{f|J8 z8@Vv|xAD{{a-y5TocEUkg~08->Xm;CuAR&Gv6sW&OSiMfx-^eN`>7x0%?pwd)w+y8 zr9|sN@VKy4<d&r0>KDWW5pCD$qJB@^-*c&Z! zX6H`@TdF_LxGzbn1UR>p++oQh@qS!#=JNhu%2_JX(F7vYg*=Rg02{Am*{<| zCyLDngT(~F&u#+G;!7=ETnM=W=;SQ{(2spGOD|@C}|9dXd5WTlp<-9?k0(5dWl(mKl;-D6z@(> zOQ_J>!8a_9>3lfE9lN_?0-HzP|M?%$zzzOauJ&)$9p`X|2$7tEFU8*-q9XMW_jE^5 zI=L=aVdvnCI<<2kkSPaUHh_^se8tU>0>&V#8=rRQENZXBv~lR&xzsLxiryD{9I=u~ z8_m!}GyaR#j%Xc1&y{87XF#gix&F(9KHbZksVjl7;@LIIN=%H7YdX&mHbDOX1d`f@(Pe*t$~kh*%LY4&JmfwEWT*MTY{=^`*ss-qPkx=0g5l~x zjus4XYh}rwPnVnf4Hg}<#evR-QyUpzHv{@z`9B9?ZFtR&s$Ji*(FQ1lmxN(+KL zWyB~v?0!E%!!bC!D!9fG!U1axIn5vM8Pw1<adTNh+7MLMeQC&Cuxvlkk{>P zavyc4&Xaey6WcHW%HB=6XP!)CWc|$*j$MXxL_BbbCN|cV_1g0ni4P~Tc4DHy>_Bu?j!e6T;4yn$x9^z*&Bz$j z3DRWK78VkXm1$iJPgUAsI;*>fpxw)r_6Jy+4(xG>Xuswgm4m}2u@v7Nc6T`M*;|xZ zKy_T!nL%D(E{+!F5+7pHoIZD=dx?A*M?A=e zN5j)wOSg1I)vQ3F!z$CCxLUQuOx3jH`5Y+M2^*+4(Y}}K(iwlOLaGo>v#W}FX)C$+ zsvHbBaBE#&M92#eg93Em2lZps;9JZ6rY;IWv+dB=&%O}}AT!=Up2`tk2W|K+$g>J^YKpXaJc^UaHf(aIuan`rNt; z2R+5Kkic*DgjUvcJg{WgDpm`>UEtYI%$#sreOvoPA|yvB1(4+86QxMDk=1|KVXuBSa?z-6v{?DWpHNO@4OvyUJ~G(vWX_O0VDf)pO_Z0q z#gPte_!p)7S1a$fv8a+iPXFHE4Ppq}ZM#lr@ntrwA>f03BEdZ~N`Uw(#qks6r-S1t zu8=$07H8fgBne4s2Kdogr7P&o{^E~eDO!riA6vuF4P%0U_y2zu>aTi+q$L3`Q)Of-<^65hM?K=I({>c360mY{gYj^^}3~z9_XaGml5hn>|$#E_tg*fk%$wgdK zg)h%<_uSn+Fb)(yQWM|)W+54I+wT${DmL#TB~5d;?~-?M5#s@nUiT6D85qj)4K}{3 zVBUI*0MBV)upz(eBS!!4{&yL5Rcp|1D4xsKcn&jXE}Jco!K3uZSh;fg&moDggQc=s znY$}hRsKY$f|@tpuPG5__Qv1`oVYJR|<_^Esfk z`Y*LBmuxo_?Hb<0a%;kADNJgsY@e>~@)XG&<^9!?MuyN57_v-}#K;NW#{jhKGw}kr zGltgdYk5HU?+&nlb{ADY7J1%Rten2`gd)xMelnSlEhGfgD2F(V`tIZbyPdoho3!G| zx(BY_y$5`~%(b_n?bMRMiUFh5#rb8W@=vA)Jup4Y+?hvCa0Mu@2D@xe2!F zp={Jw-5uL6(!yZwLiUq%saGDjMMZHG4DJyOl0=Nyf{K^knMIInGpFM@ag<~3sRp=L zw`)f3d-Ck*pX7|fd?wCF(cj+cxK`##uAhDw`N5I3)yR-K8O;-O;iO^Yc`72-vt1Ia zE)5Rr7>we?e=OJU^F7LjJmCJ?rv#3Vy!njB-ndZ9i+EX@06~6@D_e73lx>i4+qJr@S!@Lb*A%)DbM0DcEEQ%+8zj4sy*AI z3pKgxhe)6y74iz?9`#9{_dF}p8U3{j5&~W|z8Sh}VEbTvyzD&JYqKmEIQQ~k+vlB; zyFC{AU7tsg3i(ZFOaxnExn!~1%c^)w#;Q{aJ%J`v7vA|}|1zlCg&t7K9*2}93A~Yu zCA4hF+}^VSrH-T*RE-Ned^w*_bRv&yagj~_Zj3F{J@_S6B%SJvEua9)mBPdgWwZ16 z$}5Ub2M>zAsh2!FuY+U5V1HJZPfZEvq`EPEkU`l04E0O_UswB;3lW&b_>7?1d}Ziw zNF=vf@!jPeEeM8pmy=SC;xIa#@umD4w7MO}LWyi=Lwvns_tZ~oHuMxCcGxam_BAw6 z-0|0x4<>c;lq#_ea^R~*{z6n0h@Z5^=&g@$*pj7kk`1a&PTA~|!!df)Y;%|K%BM)c z><(A6LzH}$c<)gTBp7o4d7M6S%+vPspMI?0;47Y5`}mbQ=}@X2C0X23kR+7{;R z8W}U(JYaw;bEGU33H9ok=_NAMldIf(*R&LS31E$^iEEHAsHEcBf-uLnNUO&JB zP=O8kOxVx>oKBxmFe)UF*Mz-!)vjOk4>=$MG+HT3YuliO9e5lY!U2 zklF*pkY2hqOpqTfsftHmUdyP?vy|$ST#+v=C*{4jVQ=y|0tWt*slw~%_HT1SV$PZ( z&G4g%HjjAO19GKKD!t1asp@=A2yf$%^vw(L+27>@c%yKwb$b0KX#mlD@=f;_)aiM> zQF06)C*wep;u1j#{%nmqFJ%D{{8ZGKo*}XwE761wM0B;K?*`ZoKknFtfyV5Dxl{y* zee#U%9JP*3SV>^G;IA?{@*SI|g0Ng|Z0s!&6vgIX8DL^g)Sbq%vj=`p!^n(Vm;aZL zGZmmC@vOPLbQs_-1p>(+YPxi`J9W?xXY?s`Oio0agS{j~_??YSf8XJ70ykG!2>6q0s|I%v zZBWH85I;(?NOEpd48tSD#@tg)ibJ$eu%Hr7_2T@mb1$p&Lo^m}4`7@m3H>Pmx#aYN zZSf(k>muaH75@LuX}(J|ILU?-IlUHZ4!_DS*L9$%|CT-46dafO$~sN($a?vR<+NA2e`7@ekdes3fV2`iZeHOw;oG`4$ZE6oZ!;oJDQEr}cXMU!!abdWgzW z{oqP*3FT34Wxl$-=79NN6bhX6WmW5v5l7I!f%`RA2Biy<&hL5Ue)-K;SIxW5NkpGIw&>fIE zW+-CHMCDBWr~Dgw>3>6)64ad{fHRna?lIKaT;9|PEDTvcUDus zz&_LLIJwLfN*QX$(LSw3Ug`h-xcZ>6w_WcTn(z9m?iZc^Y>IZk{_u&u(7^;S&0G?z zw#xT5x$JxhaW8JrY!ei`L|?UhG9{BMQv9)LF3x#p?KULhx$YL({=A^4!1>$NZJP6# zWjhoR3#}N3rqh^e|Gt(N(Veoiq7@&INze51V-|5vMF1fiWjAPk?`@`D-voe;u2H`r{QzC-VAi-dFF zgP}rvxwDm11!kAv1llUjYckZdpMn-lACp96MV2vj=p@II1>pz&<|GH3r|I_PX*Ji{ zsf-(6=~K95vu7)_2WqzD_VmX!#ZV&H2!>WD+-_xs;{ABw7Gy}DnI9Kd=zS=SN6P)x z`K>fR8D{SCQyLf4M1Q-^iiKx;jY=sxz1`@mR?`Ar7_5*?Z9C{I|2Gj9A;>0L=2y5y zAG`7+@s6}TuQTfK9&7j2+;jNcMGpAwq`et^^s;lJymTWf=n^nXHg+>JrI|^DKbGZ% z?av2TUsHZO9~fn(C+&WyznU;!=jntRDhB{Z*Qr!}6tb(D_5um6O)K>9L@8U}DmjK@ zpAS`Z2&|ueZ-mk$AlyobYZz8y@u5^BF05VZfQZ-f%Jz$`fjHJ#|9g*^@OSLH>MkIq*w-`ok*0bNy^JnJjIf{ zy@4z^CH1F{La-_;fgqh^gPDdF@d7F-v;nbtoy;Z_}gC$@sR z59#UtX^Gn#If(RaLy9|j{9SOta92@(G&(k}-KxI?bEqTKpwNfsTeB*NFA=)zZs$OD z_SYS!@0k-_w&FVi|Iv7B;44zmKws)giJt-<6s>p>IPVlD^w9*tl4B^;{QSu(w4-#~ z;Krb@hDdup3jCZ6Pl^~ect}{^`F|IAW-?!n^uI41)>F~eFu8dOC}X~Ej{YDh5MlgR zOQk%Gy61F*xsrWlaC!A{l_uEKDR{nh1+N1IN1(tlJ3^@>sOfRd?b5I_cZqGEjc#a| z0(Ysprn3JG8E@xB*(-0t)^6o?^;fE9n2Xloe=Mgh2s~*HuT%b_Oy!?cq^J2g;YWEw zvCn?SiK)9c^WbdVEBdbaf@yIZRS6lU+a#rjxEZ)LKqiY*f)XvyQ{h>CD52AH%WsG_q-(us z!Y6U$nFb;0MsR=Aj8d+@U!)5TDHpDBFvt)+y&cuO+;+{$A@oLLkES#HMgboi>9ru> zx-Ho=?M0XmhBE8%p27GS+2ZJ1=uct5Dcwj4!V++_{6ub7dwVrH{6hhw6vzGza{n9c zL=+Y*O|Fj|>Ys%fW0b3HI1(KvjsJR2+$HBK@V6riJV<#!(NX=s`pdVIm<+VPi5j_I z#x_Wy51^rA3>(6V3w2LdPtJjD;<&CbpbZ9??k$Q{V&zls8Y}cblA9>C4O+u`OO^j= z*NWNE%PsPpsbfeO$B&50q*#2xT>QGOP*zp~X{NYWX4K2!kGH)l;G%@~dd>b(dC@Dh zYxCyB^Q(ou_~R_$C7iA2$aejg6E-Z*^45@Qo(o8O(2z{1?J

QVdPOP4Qw$!LiJ`c1}GdW_#rsAylGs|l;>u*4gFFc&+*Uvdc~C-Ee9*aqDfgkdDuo%Rs$rT8QadKmr3maqK+8(22f=~lIOC7v-9 z4RfQxM2t9~TKjiR?pUltWcUCayX&ue-X!>7s7IRn{at!o2*Sl8UF6^}iN8+CIgoCSyS@p~_Tb!Zt>aWpsR@heN6wVr{OvJcL?r&&g(4rv@BsAA z3c;PHHV61_;gd79A&3hBG~lJYK2DC(lPQsG?CF~@r#4+<=5ZO%P1$43;`a=y=~KaG z92+8?Y=H-U3HU~*uHAx9RTHe+gG}>qUeiuY%vsIZEP-Bab>dIl*kfG$0&5{dR`a6I8q(zx8C7QSQ zWoTU^d#0%?E8Ay~?N@rYSOTHKv!0wm*?gv%#YWr4Ek(|c%BqtXroaT2 zkD{wnksV1){dCA41xftxFg>4N(ah=`mra%26={zi9%!;J6#ikMuVy45fNvpAX#QP2N|Wj`-jK0*l7gac}6@F*8e?aXyiWX+;bu9&y#Smx14WK z_P7(7k)#LXjt>{5TE=lBJ;8Nw*06_r#JgUwa?w95p`zh}U{Q`GcBg&)07W=%Lf}j` zIYvOs)t(M3b^;4}-H)vQyjTL5Ku5)#>RsAQxc?GQ_h*q@zE7XEm`RIVSl9eyLJHFIbAWTKGoq@JZ8&SU^*Dt;hWLA3E zW@d_M`8#B$5iE0|9920lWmEJN9giVfVo_H4xa|)Mx(w?w2B64Npt|y81IM@ySTYvqSYD$hvmOMOw;7WWChY#1Xe)2%j^F^hYxdax0__8u6rEJN`Rv#0qG}`2Ux`A%>L*n znj*lYjr1)lE0Q5tq|#o$v~Y&t=0W;`_@UCmPE0-+|TTMx*4`FOs&V`Y{qq z0o`B4=OF2Notq7*wY+3vL2m6QLovU)6QyUgdJ+s%&@7t$#0EhUM?~#))3^y}OuayEoNd|z?etU$On<#V z(3;Dh$8LLV=;$L0%=9m<=L&h={T)-?;hlYDd1>(g1%DCeyUF3L_1 z0t!Yi)5_ayuIe2GBIdQP`2kP04+l^GG{&nDigpe z(W3WM*S{carIAP9J z$zc0td1c5-;?0lfymK`wOT!#7t(M23sRfl1dbO_aw6+E*##CXUe04T7aX*)pY>*_a zZDPY(WH$vXza*_tyS?2bo(lLTYv?O#c&`3MbCu1#aH=dxuM$XT+7+_uRM1jZa&EMd zbP&{S00IdQyyW-V!Bt}2-fkuMDNTN9VSXAf8AJw|rt`F2g)jXoiXH^KR?dLm{VYSU zDN@E?4L4Cfl2QVFXP!|v%gf~*OOZ*D9vp`CFI#9T)iLb(@9lOUJf_M(*^H#GVnzuj z4q#?%r!S@PI6C1Q;5U=hNA$T84`X&6DCLZpkg^N2InypS_j*#ovnikmBOl72RcbesHs=uiq zYeauDe0r6yrrlxV5t>oA89J?>#~jgo5PizkNrXTNTTe7c{Qu8Vx=&tPV)#=vuI}H<@4AJT;m2qHYkVS3js;**AezRt z3KBkCaWvY(# z=mML;Zb#drP|}R^11rn|**tU$`B(e6mfm-X=H$fZDCER5`PPf$(ILNU7FnP(_I2`_ z*V$z*TcHILuG3_-*<--79yG%0{tPb{x)s#!ee1~7f^{G3(l>E5 zYuQoA4&2;%6);ulH_H#WC%NGo-kQD#JoX={a+9IWa-RWd+_2ZLLTVF=yV@p-* zEb8z7hIS63lMriZvpjX~&ypaEp|W3lcKI%%aEE-L={;i!4XgBgj^JHOqg1O!T&g-s0a+C3(G1iR_^|QrU}b`~q$-8nMzgdtflD$bmH!6ecf+SO8=BL1hZ)(^3>ys0=wS ze7RTN!Ldh3E<=ZD>B@2#-mj2ycUeEM_J?hhJ#uVj8#?LuUal)I$aAtz)!Fw6F z@^ha0*7M31h$LLEbNs*Tf*D~J&d3X z`e*2qChzQw%5jYK!mNK@HtYJ_DrP1vy+d&&M1tNl9dSbcMGR@|X6WwKOW*>oC~%Qj zpO4`5WX!Lbv6~UYl&Sg~f^GRriX9c!r^)l=o+U$*Q1KK&Se-CwmdV}qqJ$YGppnM$ zyxE`KpU{g@NJg1VzTqQ>5}njvjbt?shxiM86nv3yb>-MKNZn*8sG{qfGXrJ4gw_!Z zvF-*W7v6IV+jeeGhY$CUSU>{1uLKS40M&`MoNN**w=e!q=BCbf4AmEuRnFIA?VDQZ zu3{&;@Zp1=?Y@aD-|thb1zUThTW$5PBy{8Fyf^5b>IoM90ov)A>&^sYFA1UpHNHC(ef#?hk!2n`4p}!9#?gPZ+@Z*p4_LCT869;C*Tb|w)ZaCyN zy*d1GL0?rzW8E_sgHY`5);kSo6`kterW-6XZHcWT>$WfV5Qz4QKo=eYKqe?xL7Z7q zMjza-P?ZAAiDDGi-%Dh{f8C^hdEF;QvG42n!TROQ{S_Cjq;z`N#e!+Sf<>4N#@g4p zHnoj752@`JDY|%%e@540hU#Wpv5!p`c#n}oH`y~VDErk7_oJw+_**u|&aO1G$Fw_P zGrXvwr1sO70Q;e>2zp=5-aedv(bL^YzDn(YbTl|s*pps=M_kf`h4&g~3W7NJbsr^O z)!Z3ST5r^IO{|wFbxK^Km07u+1HSoJzSjQsPxAr&`c_MSoJW^%+Kskww?+3sJKh+Q ziz-*lRG4vdp2NrwC=E*g4rn!@@Gb^yR_woh=3A4ooM;9gZ6OQa?V>SYtF=x4MtiSu z<-z0gIFmdvlr`6)RD;p}rh3Zlio(Y?S}TIOO=&V<^SoN` zWPiZjD%nK`ppVtU?%#byduPCc{=g5MX=)G0kLt;65gEHhZj+LM{J)BRnca4MA0E^< zjba*lh}i9Ks@>dF_tY69-KC+z_OH~ZTzcP}6p#r?Hc&tf(d)z6VsW_G{p@DYWHgPI zV26rJ@;9137J{0I`^E62^qKs3&CmG{3;2I5DACv_dG8ITNX(m4MvFKu-fI&*$q|E0-xwwJ~+ze%OB>K zEnrpqzBVovdRUnbqN z&A_3P%hfc*3dBN+zEgdtBB1}c6t_YWgxv;!NKeP>iuQlf<0RD(G00 zonEhQmR%g-ZM=yEr@Q6Pzd1k>tUmJe-7PP=H&Zh_F39nhUYP-4h;FB)+GPzb-tV#2 zka)X!zsFLuU{^uezgGWn2I( z62H)nWnCl6u*Dg`?IDr9`+$dBEZ||U&a9;IjX1>9r*mikQ{7gtBw;Je-j`vd=(_P- zPYhZ)wj1eQ5Wvyt^FHA=(5ahrga4m_2WR~4?MC_!SDh=}NR+GBvh983xDmL*f9rh8 zAwr)NNr+3rVWU?r33vVT5zSrZP&?LYfcbXC?k{(?&P-J@(w$>EV>afVy|Th)KI4&u zGav6LSkg_*%vg6P-L|>tT|Rv#1!d59<>*D` z+RbCaH1Qrr6*+ClW?#dp)7X{LZbZE1xsukUhXIuRLUCy%8UlZ^yroZ;8y+&HSH@#D1&cxX~yu3Y9Q z#+!eML{-TAzSq+Qv}!SJyoyOVr$X9(3x^hj-`wFqar;9;7~sZ>jSAVr+4k` z!kk1caX)|RA=Ht@HXV`Ja3Nf*2&t7p*Hm^N{`{c`aQH(<8jKYs1Lppcz`%)Ri)F@y zVSAV}>|__QEuz~eUUegDaGKs~#7pdR)hsd8UIp<`ke0*CS$U7y zKI+fN_+G!C0hLSrn-Tb_pH4;4Jh@JLsZ6yHKK^8Sf8>j+sUGs=pOJEBJS}XjQ$%+( z(dWn9+q<%#rvTi;I%=(>S}ie3JAcFmw+KV)@$p?PL3eQ-y!|S--c6_ZSR7Tj>t-SD zGCw9S{qKcM2%ov2qXnej_+Q4oOzN65jQ@<~`{Nev>n+qh4CzHh4h!oydrMFHug z1ZdzR(Yx(u+6y*N6k{ypNmT0rF7Z+YhZ!{isocH+j@+oxkF)w{lRqiF{rDcr9H?P3 zGn~i8J`~1#Mvih{?0-OyqmpRHGR#UWzwT_ViHJ2N=MCO2*9sCtvy(E$&S4J{<3M|D z_0tb=?|p&*@l_;fVc&`qx>$p;uxE-^$(p<~h&P64?Ll@k;sv|;v$PELtQa;?nVWYn z?|gKlO7+laqEt$!$qw4QNEMk+Jx2Wt^pT@^QWPm zEn9I0m!e9XQ;c_=s`ht|R`aLA`nQt1@mJI=LJqbYwrOhH6u4K$fdmzgwJkN?*G~09&7l(CBc~F73)7TNeg@nP$+NqgyW0 z=~0%+t_f6VxCWZ`lcNz)%*!qq#JaF^_JCh=n5Vu@9-rQC$$m;@?x+VJ=TWLF zV0vU(1Z#4iNt@3r2sIHvi44jyZl#+hFGD4_7$+>>ee(s7=OgR1iRp#KyeO@*V%!E> z{2$rK;~Bx)!~kFoX@v_16y~;+lFO{MZ=-!Sll`3rSNV4qjxHS#NXR$dZ9>yj=|8Cs zHT@$#(EbI?F7#EKc#_IPb*1V{JEX(k?tyyUsFL^gGxzroKlfa6?wwSUCUkEHY0@qo zC-Nqv@3ya&A^A~4`{H=yq(YMn;R2TlyUnycy#w>US?*ELZ$4w4I($R4Mv;o%FWN)} zP&>3uob|eMHj|Cy9bTQonl?z^zTr|!*7hFsu8iSjx5j&b9I&Z}3=ZouK_GAZv(oNrb1v3N47*+SK!T>^0AH_`+RMf?g=0r=SR#c6Xp8igu%)j<0#%=HAwlYOS{sg%F5`HM&&=g^j-Y&4-XOFw8m z=tP!ffBW~UM?r9$r_~5W5Mo9d&w>L;&!Zy?BT`Q}N&kT=YDcz|8givX^QEdUBi_fZ z&DtZwB~FMdu+gKdHqfgdD5EGwb0BopqwGyjJ34gcd6?+SV=!Y5cu?oYVzq@@-IB@5 zle)hi{oV#ergNtuy8|cA(d#=IX71dWXcuPJ9^*mtxK~=|3@tRtQTuP0J`VeRj7g<` zyb0aOTCm|$VTdTd4oSA_nXvL|8PH@*-RO-FXk=cH#oXFs@D@Hnr)9r4`q$0f!-jm9 zp7k@3IFm!)p4bWZrFYer|8RwJ_OBi~a$xf4h$17tsr(9|7mSI%TArXfyhS*`=(s(s z9jlY@QF^9`BP9_c>L>Qw=`8TJZTCf)V8xXLeDQ6TOBV85eBpM-AmDSOiyPg4r%L;D ziyjwOrA&%mH`&#VZ20DoisMHHjs6Z>a<5j#`CmwNH79Uq^Bl~rhX^~;6#hfE!(+MQ z{1L)9rq@Z2R>;88O3zA_t`Lc%FJr2q@aC0pZ!a)x1c^S!9fi2S48+HT%5%`1s`jHe zX4pH4PQ_EkvgSh;j!^-O1PPosDvu2Cg;1jRVi1U({}-^>o)K{X3(aXF1Y{lBg*I-XSI1D_bpKU$X z8c21i)ClXz7hJanAoCl;NEHFvlc9IWq*-dsV-35mJupvB$K}&~>ca-AG1Q+i24j|- zevZpS*n`^q1xD_i%_K|2rrMZv%ZB%74LC_7caL~vnY(ZUwt=9KYM)-gMz@Lfmp<6&f64*S zKMqdq$aClk_f?EU4dpKn=11GC$BoNoMCLe}p+~f+96kkfz|ur0jobLp=i$FlBytqH z4FD*=PX}hp3m;hOa`$a^`v*Ck*Y1b_;D`9F9gizjV`p_cC3x|k(V;v!V0jm6zJLLf zsfe-asqT~6)2nghMAUieIiF~Zm;8&H!x_;a9xa>;Q9?ckN$+C8e1uV?`jmj6;Eb*H z`4fRKi8`v2K=3b0N#Czn&6qkG4+!V1tgu!QKy;HcL*8t}wIaSE>~>Z_WOA#cIWKXIMqjt6rl^kXjVhZbus|u1-gow z6_3l-qX0DSlPZlX_pI&)@=}+qCJFb>!9<>VJ6n-B`^JBp%MTZoY?

ezG)WNrBMw zwhbZwklhlWkry+bG(wbW6dv|pN<}~AOmBDY23>~#R{=xMt{fkJ3)zF0d67)Iez^&q zR3W_PIcad1)?6(ES7 zkGJ4MS{tA4P!3DmV=NH~i9KXRYmW9WytB?_ ztZXpgwDTkCXLoo5Ns-TZbBiQEi{xz@S(flWh6ESB?N;2W(^p7>!GL>?QS=Y$7@bl+ zSYp4A0n@SDob&t~ZSzJM0f)h>J*+D*aRmR`gzDeM16W^tJaB5nLkjLMxu(+#G5)5L zv*uA=bvadm6b9p`&N0!I+vg|G9tx4;BMT-&4UcDRJP$Mv)WryWIIF){W_V$HTkIVQ zXOzV(IG0;(Jp`;W@zn}X466D)PB66FrlX%B9)X)xZM$yOqZ_*x0dfSrH>MXZTa?Vn zAv{d6;G?8MUbs4vK4*p%`8TQ1y*H)uKK4tS5^G+JO6z&=u%E-5t;iCc~2F z?q6>9{J!{la_*ukaw22NBGp*KGlsQsw_FJr1H3QiI7ae>pTNR!rzIReF%7(B`U;<;DNF>S>ng^D8`2OVmKfZmvLw$>fE_RxsRUHk9o7`{bRlQn^LRcWq z?(y&oNnW){$%PwN@_dUartw0wDqZg!!Zr$`6j;-RK4oLEVBr()TPimx$Z*Fq%V8O> zzrzIpBcE1eFf0zVngyEhnYqZ{4yyu#O>qH-2$wCn`{W0;47h~UgWG5%gCF|-qUqb1 z$vM|Nv(6~U=7AW}B8F+N7TGvhkcW{4b<>P?;PMBx07AjQ+O;)R%uZ;_OGa z4Y5JL-z#|P%$6hH8C9qpvi_CE z5yzr*j#B$C!1wf;cGL#>5FICZcU#k>#=>$+(a}g7d(CRw*)wj01(WEJ+S^xYKuD6` zmY2%;H#RwALH+B?uI9J<_}a{mcJ>YrRR>%EFnp8Yyd?nElTL#s zM?0$bjo)tazH*Ab&72%~;cV2r{|x)odeG6_c4`_@_r?ym$ z`lFIbZ~Uz1_A2c6m##cwEQ5Zv-|dSVMZ`a7%+4VkO+fTv#89>o3yH^vqeMXCmBM@2c|U=jEBQf^A!89k2hB0W>Ta+9t~m6qA2QQZtgzIOED82k09 zNw<#(dPq8(kgCB?JWjPDNa@u5x;b;(WM32xsT#_;FS}+NZ&5}qm|VIxq(tk*P_a*a z=m3K-ZqL$$h)uV=!v#J_a1IJRhM|EuQcIp^W~@BI2kTb6r{Cj+sZu`byp5cqBpGkw zUU|P(H8Vw*)yY}L?!RVm1cvTgc;|dp@^as+;U}QY+VlCV#W*VL=&N0&djBOvM)Yyl zw2?Sy?Ovx&@%1loGYc(Yv|@GGA&PG&JsfJFxnoKL^oOW(7Zn(?;a=vhD>;01TfAhH67q0TrccKyZ-fg z7_k*ucHNn!GJPL=4a^yYt-jZCr%&mQf!D|Id zDn}^abi9jyW=(GpycA&u6go8WX2IidBd1n<*ZwJpH-#3IyBJM*nv2|MGD`)n+psf&whO9QqJ>i z5%*}Me9R2HU3+cNzI?&wgRR)Pq#}13pt9u@Q6fOxxep^-F!;03YU?!;hQt08wG>FS zzG|7tDr=?!KA)?bS<|x=>k7qA&@dO6!{TkHDgIl9;1;-3>Tbn8C zHM@Z?ZF$-0r+xo^RgNA_qpCUe7poyO2R|+cSn)hioga_w4B^cXJ?1CPtoLd%9=~K!2u3WYv%_zH zFl8k?FlIZ_)_4H)yJNaWAA?EbT!~{NQRAjY6~hSwSX6RscRO(AMy&`!=p7`6<>$To z)g`Um=`ukQTwMqywk|1Dh$o|apbHL!OV>+BIVG-)XGCB#uNQk9sB!e>gLKKR56;OQ z34t|1KwMWCC_-=IVt4>ZU_O<;=kTX4&12gMfz?mC zEQd%|YP##H7&|4+o+OGmMaHQ!dX+eB(uF>em4Vzw@TQ4*Z4*;wyg3YFNfU)IF z34x75-~vKxa94hGcg=O@7kBL!?vrOIcfLX6f7^Nh$V>F68-1sh zn_t$4e78*Nh5%pNz%ZZ&)N7933YfdcV9_ztn90Y-zx>UABj3)nH3op0Ty@Y)2fe`w z5IhEqYIwcQqSIe2n0SYEfKB}_(*uEk1k26H>275tLBXMn;3;@1q?pga47;Af3-OF1 zx!)+~40^sNR8Y;75V&0kfRo~9%nwAMQ2BI(z`6>C#{${c&?*IK4PPR?xUL>!KPbkq zQnznd524$0cRUF{K?pb_7pCD+0o^EbGNl{s2refDym$Pav-z8afcPYyh*PG(DSlL# z^&k<`YTxo4`_ir?n`(PCoQG!+FQQFj3Q<@$O$bQwBaGhT<j zsmS5Vr>`($o=oRVd@|bU7N@N-0E~%@NC3ApVD+4?c!=bB2oKn!zE2qs0CM=l7Q-@| zwq^)ObYs*sgf07-qXSavE^{^wn%0FV6Syyx1O`e(mbdt&XY<~7-%y1(wr zd>s(@AmTq@Mu#N^z?Mb)zwBkV#sF|~K6-IDpPUXyo&Q32J6y~iig~`3Y8l9~=_o46R?m%z#5jqfYae z>9@~iw0!w7KGr{3-Ln%IS(D=HYxU%_n5 z&!1R6ufMZ{AJ0obVit(RL zC$WNtJVw6biW>bN?w6G{DA!bj+tvDA=Us_In^gQeoo@5vqha(7`;RWqeD7;(80c3%pWOKqpv^L_^FKPzGD=7V#7vX&_FM5eh#lbCP42A0Yc>|p8JMl=j5=OFba6BeEtna zW^?P__g%-iW<#%eS@aqju$o7ER@$_zU-KDx*(wA&FF*tX{{0s|`f~;;p9B1JgsZd3 z>Ed5bx+4d*w>E8!0pRf4!|BP_CufhsdNLT!I(Sla7t@|Ss)J_$eR6A|m<`(q1QZ?| zJ(xJxpIoA*0_VXKBT$Fo%jLf)D`4-n zg0|;5LK$+lm0eCDcmZE&MeCl4ttAwiGj_a1%TT#-G)H#gcUTj+cNuEg_K!iJ?T7N7 z=#=yiyU3Uym*?zJAiz_2+SD$qau5dV=*XpSNdvak3&8h*6hsUF1M3c;K@k%G{^|br z=z=fl1@&bqepb2!j_CSLx9y|4Sf8VGwIGXU+CMWT1U3f&5X}F|1IX>?dH`rNJ_8>$ z6w8}WFbcSejH`p_u_A7s8motEJT9ubFY>ltaXFr0pSg=v`|9fRZDiel7j3$~&%^^j z&-KOgS(k|a^Um+)gT-us&FG{%I8(3P=2zbu1Avhczxno?>6@SZ@uWG`ITg$BaQI~2 znVocrs0e`RrmGme&0x*8tqTH$xNy)V{QucHMi_$vhP@ItFdnve=2Ov z?d!;DA=H_NL4ch#-h$siXfFk|U3}qe^Q}MtR0(|YVfGR6$&o>o0hLz=)c2UQK4BB@ zJbzbG^nG5+>2fyQ_fx{E)91nfu%?$0F2cuT?xwNMP5#_v(=gCyYfmdD=Wjw_0}v3$ zE^hYiijU%?d_$x0IS0;g_pb&3y<&oAbBDr9+Iw-m_ssOrAn;+t|KVh?c(eC*zHJfz zFZ(iEV*sFW3yc7>=AT>zprrr&v%e#HVxJWO8uAiZF;@nt&{==35TGRBvPY4(k0|?# z`@Fk6Tm`3R{q|0cZZ7Op!d$>66BUPMY z-W@RfKVx%#w5tiI1bDA1=#;t8am$-FJFia&1O!$`1ZUu+QLl9`jA+7)GAM9Vcf;y2 za8uAZ;;2WAuKoK^ck$i$7`olM--z4pt7%(DC~NyZHwLjC`By*B+7ReRMU&1zdqnUz&rx3x@)E>mVw3MojCATa~| z{~f>n_((Ar1n}@MT?g)SzkVn8y7oA4%G9x>KqNX(Zc0q6~ z&0T;1WmBiE`Z1c!Q=8d-eG<*kC#~tPU1A0c0JMUo-rM;55SIcV_c!5RpB-?+r-0xC za|jtOEhN$DbomMErt!Vhq=q8F^mwH3k39~Mr%SFX2xETVzZzA-u z>f4=`o4Ar+VHI%N^Ytvo7SGb( zpQe{T3z7KWLnfZ3#!oL?>+yk)Lm)^4A>PUg49q}7I6UdMHN;z)E+9}?e}51Jxu1o$ zyz!&qkj^v=$(fw#rMV>X;(GV+0W$_>RhQxb?EL(w%BcKXS^NCQl=%;@RvLqmSx`F# zJdTsJueOty&Q$w1r{0e;^|4{C^q?G2PW2atsGW?zBLsc~0{SDt^L4CIx)4ec_2TV$ zoC+B;XOmYMJBG|@1`mcq8u7krFn&4x`^nB)9AGY2e}0rc&mktqfV`CVS%j;8>$W@( zc0980##z(9zCJGh3KRg=Zi2J*n(i6H2!X3WKp#5DjNn-iV{MGMr z{x)gOc>fgPLRT)@n~Se`_CKN=XtVvU?WT{u&AoVCV>?-;xS}juWT?KMZ{oil&TFTi z6k=<=`S!8f6}qm@FW#O^-ej!q2QL}hV;CXuLlC&t3V_4at^Kv5jM$a+U#BN;#{7RH zR3JM6EPu1KeERq4^0o;a6#zffx);oc*P3VWW+sgL>LCpP06+jqL_t(}aZYgHT<^)& zN|XW1#T4wt>uG#5Ox^@>l3Y5PVOZ7E7ySHZ-CplbLCwD42t=Ivne~60pOd)bV0j4{ z23N-8dAzLepA@r3xPKg`=GnGxAI76hquc$dOg&RV{~-B;$5MSLi7|{2_)G}YOmrBA zW_i*~&zZa$D;SNvFcs2hGsdK584~NuR}GX}n1shsXg$js0L-&vGsk*5JjUTZC;(<* z1%?2#GA(Vmupr+yk~{0h83bYUzG(P%wk70`KpDZ;@;xzlk70zs4?#fJatP@@HG@xig&yd4mWELXzsN+< zzw92DIX{8lHGSN@EJZ23jCET~*V3Mxc<=1t8;c6)wXwj7uU$u^+eFn;cK2DStvoj< z_mgkg+p&7&IEphBKu@8p9etLW`$8CpWRb>3RQ3$+S=ihpZU3co5*&xHDIg8|M@@v9~rRcAr;wck=!?i zxj_KJplLVr9~=^9prr{AW)KM!%mg9Mlh`@iQVh~T$`_4nW^phC4k=RtVEBdNYf`9{ z2>&aY85C$xGbfXnkkA|+nc>eZd9Dc&>XOrGeLT77y5*B&xWf?8XY@&Z(OAJ)^uu{S zuA_?jD$5rFza=onZJUv7WA$G4)fT%!{+ z5O?cENo@1YCfU14|IHXFfaJgGs_sHJ$7*Ne?+Aeb0y7=E`uot&n9z11p)(h~KVAiP zLwBq(S-mwG@>bHBo2UQQZ>{;SG3%mE+&l7BK7Pxd1^OBA|3&)4>$j}^xmoKJzTYZvv{!a8xho4ONH^kPxvP9gxOa+-=6() zFTof;R0vS`PD7HG(Gm9TwxeH3o9*vHK7$h$Vfa4%i=Q6%)sYuX2->K`}cz zi~0OXx;-JMKeHd?>M&FYJVpfVDwD!GU(^uk-)Z=UbH)(&g~ z%pN}C2r&8ZYB^3@OMSzM~^!UCz1p(bh-$75S59Pdk z?&Mq^dRvP2j6Y8B_A=YgFO2?WjsIR|{`E0RfHLt9q43UG`n@oKq)+2BjVY5*f_`pd z4bQom0pPqT;Fcy|JX)Q6xgA=c`{iK(j8`r^4x<5B%2+*&2B7g6sa0Um25Y_g{q&GE zMi+uWdeh{2l#wrDRk#=Up9cKT(-6;(CntM3CJEQ~z}i19MMq znTAmTFa^bfOJGEd0kJ^%=RoLt-gu*I+RK62z@zixdIQAIB*W zkJ>zVa~RZ}16BXcbvSt1U?l5f`W<>_d>e-tdik#3 zl|UFl51(w>F(m$T;tZbP82?^g?Bihyr6zXW~f=|?7S(At-wxyGQGa5VUBM7Ebv zyt~KvbRT-mS)O~0TYa#`YVKKdfGY^|cx&T88jo%A>8XAnJkt2lrSPMxH5Mr6j8(-| z7k&)XOC2acO1Y7`8J}vj_*yL*iVOSRK^p1X0RDIRVg&yzLtoC;^ylX<&rVie9v>XX z1^L|yqXOV=wb!+m7O^M5ECqW&GPM8OHWdvTfrt=f;XLL5=Nph`Joe_?tQ&;!0LN#m*#agW z)gw@~J=MV^TG=4z7)F#;09cIS;X**qs{WrJ1X&7yP9i^2abj;94M%&Sl)Q z00=K0+8MeKragXUEB`KazMAz9VYAy0dX09MSMj7@@5kp)Dt2JjKY+}o$w^YVbg3#5 z>bmj(N&>&k9mF}CiGtIjx2*9fF$r`O6TaKSJw;=^dSg5TfmsDW@ssIvtm^bplR90w z{xTit`KyyA+^2uxKi$_Sq|o5<%ai=WJgDzFglJ zKH>kHCwAjt#b?$(+nr@w-)Euk(l!24_|w-B{wV-bCv~F;IFyg>*8W?)+@|*Wu`Xvvj`93Lr{=<%83sqZiq=Zd3q#E-1(QZXpC9i4?0G=>=K9 z7Y>M1$j)LxMudq78@WK>F!NaV8e`O8`aP{Z>)brJ!8XTQhkhp>Fg(i1CS{l!3_Ma8bPqJM# z`relUJc~6d1%Ue~Le%g3@E7N?{kH_bIpM#Jm(=G&1T?K|tMd4JQ3#yJ#DD2I;s5aP z^uMGPcGs8J&W|?Uo=txH_5QCWzXb6+6-EnyJJnp*TiC)6`RTE*L7e@Zd<*%A8`5es zkcE(P69#BKQy=ygUdld6C$Svh<+nYPhZY@X%I?!u4~Zbk6t%*s@+GwrX68II92WI- zVWJ33Fm0lhiS(e(mB_My1=g=={FQ<^A<>rt-dA@gI-K} z`mt?1YZ;Fl9#GH}A07{=`eRjZ0s`r9)#t?hP!5@QD8Lj&3xhMV%B|tBq|thMMAtet z>G7qj=lvcYV8k37jx3xBPv%ln);FA4c=OU98y~zk5EurSzEVei2>Ug=pV_8z<{Y6v zBzJHC7{NH_?v?k+@MG6M`S8Ur8bkd+rX3;Mq2bN!pS5kDh?36|fPz5U#F~etP3_nC zF@|y!h-42(qA|<`0!!zSeGVfi>=FKd`P+Zmoy%))@FjOn0kCxZ_VjFR^>A&n_B{Q5 zwV4Oi-0S-J(%Dve{Z2*(3+*w?0RoT+#7T&;y_T5`W=pkZDe-2UA!EDMH?l%OgtK^6F9V4PYGsm+o50Tba9Eh@O`LE+r zP)XnF-770ms=w)zX8pG_FwL6PR-K8fANHFs^|8D6(7+qM6T$#{3?l?a2z(v{27EC^ z8%e+;1QM2hn=ws(1kYt$m6c$mjgxp~zZs|1hNd-HyN2Pon%1t~j^CLt1n!&y;AmrVvNl<=*5Y}_0p(YgqMc`KE$y72FMW|gvYG5r0Whb2 z0*RD%H3)|}6`{770Y(@u376@6Lui1`JRU-We9ao1gn&b6#Qv4^wo(A(r3XRS5EzZW zZxq5x;wTB{?MGrUp$JSqT0|pARo}AyaWBq~oS0dq)L4n2udF9A2Uw1#pqWMHmd(J1 zMhd1*C@aMQ}`Ki0B3*Vj*TPz<t`A)a-|2W0=ea%I1pKV)k7s{MDCZ`VIM_8oMOlESOjbQpGvYS@9R1S7 zkiIG{#vAf!LGZ^qi_Pd8S0>4Fx%^M;q0gn6d=gvFcb9`s&|7c$RXUg`0tBQ;f-rsF|#-dUd3J%>sZggY6(_>$23J zhSn_XHokQecuoNo+`xiU!VJ6wL#`q3HKtDIr-#z@Bm3S3qR4}9X?pSpzR zQ=Yz!M0cDi&W5|<7%lm4;-6<_PzuN|i@Ye`OUd~V5ZtE#cZaXd(m`|-ZLAsW#Q7ft z-1EsFvttv%z69PdiYAHViRIzlIKR^;@n?8U&+AVo(Ev#2e_;WMMyJ`3}Up+rwUWv^1BtQR4T-X0^VR?U^ ziaz^TWyidWLjYoC(#( ze8~A(>b)IP$c-RvN&(gmTRA8}#LgjY0@`X!9++xB2q`Spe276G$F%(UEen~(h+tUj zNZ;NMLu(D9KISJzAs6r3o=ND8g!Edt2eV_(wpQaUcy02t$E#&NnBiBuR$$t@vcUcK zIsk6Cqhj>Z9_JXqmR0!;`1cwDW93E&%mo5ujTMr~G@NJFax(d1duj4lUo;#?z`_qK z?PZVkeFm6?-KI}mCV}P?0C&$$rn9DIH;+$NCNJKc#FDC6-obArB&~+FrmS${816m< zbRm6Ue@1{e*|Z;8`rGf0Dt4~#*)sBZ`b2FVnRED^&X8CBJx-`_i}Zjp(**w%2laSE^Bb^y^G4 zycrxCnoPr&8ykPl@uw(6_&=z7hq1KidVe;6#v5-91n!&y;Fs~V_>15C;^fW$`rY38 z+pXo}vrNxsjBISI>}Ae1bGG!+vc@nk2tXu=Gt(lFh1^PDhFufn^#kt!2?^FJ!Ox`} zU=H813(;9B5E-TX0uYKIaBK&nUN`oa49{G53F{zE`BJ1{Sf>IXwQQDZ=cdlJnp)G)~prh!Ecrnr1m$jcxy zj@&bb5dtFwMhIL40<(BSnm`itQkHycL2B?FLA*?yFY@9axz~G{Y(reWJH@5BfS#hS z{PRfXe@d^D9K$_JNEvIBk6+nWb=FFl>tU z4t-cZ#xEN`4!*N&*^4Jer|qgGO<|9gQyBE|=KlN_51|T`2AAOgr_OiuI9cx$ARZdS z2!T&P;La%kQkU~60Zu3XbrMgEE&Sr|{vkrw@?Jd7$3tTm-}==tz7hl?_)`EtFi|ij zL?{lDLY&RBnfHg7`WASQpX+sJVo1u2fBZ8{g_7f3rrli2n!eQt)AW{PYKW~pS&uK| zC!cJgCldFhvR4I$Tb$Fnb_GBazOqMg3JyNC-pupEF+4U#_Z$K>KJr5;=3vbt&VTg< z@ife)rvz&s!@ zON4_<%He2m3;cpcLt$n#(Z5Fgul~_Oi}}fdg7$&D@-#&CG_wdL+$6i4tOoHFKAjHk z*}`7E9OD~q76R3*x`6i1QG91Q-PFmQbPjyjtODQ%OPzPzR47`E9uG&iUZJQj1wby? zG0SpNN`YjT0st?qS9d-dAX+sVPf>6In*lzjVLXi$JjWPD2wVgLcToXwk@}7IUJ(LN z3b`L{A@mC?(a2is%<0HM4qDdpsOBN9TAQ4NbPt4;(r)G*kf6Zv3Dc*uEHb1ZfXtb6 zy^X2gS~Lya8H`V!=ELGC&Ch#S49xHf&K3|L;0Sm5I3Y_KA+lTX>!_ufgR6@fSq#$B z@@bnjK>@%ZM_U(`)1-TloIUUPV|@D{Fztgw(J&|ouYRzfe##TE*t!1U{;P)SA9)D| zoR?tdmYY~Bh?!VR=h3%1qv85{1JLEr&+Le+VkEeZQl+rvWy9b%s5AAe~`F||~ z2tR0PXz_@7ZdSRzNI+Hp%1=!(iu3V4*4GBQ?7WtLX3#vzQqi8)cXx8WmHW_e{g{9+ zAHAw~{Xk^~5EajDnIV{oSwu8AzCR9r;5B3RXG5U4wzD+KUM80J1MZ_hI9mWN{XT8~ z@_6a^aDDl;X1ViWQ~=!hCcOR%7!C+KkLHD}GYTvR;OYD@2jqoGC?u91m8DPbLNZ-r z?u3c3Uqhl2V7Beokf$K@rp;ZB0w5lTL*NP1KIgmpxvReK^TSb$hR1WhsNhp2@TFkt z#6c2o2g_M?@_Ik(6=TuB#{s6uQ3!rW7h%ZH(uQZ7z^2^t!9zTT+Yf;n3n`{pU=`yy zj7H2Fydunr%MnlR8F=A#^QC{A^^A4@d~E1@6abiTj{n+hT$_<|&GGQMQ-hH)S2HJp zy|u~Nxi?TB8tc;VA0Hec@WT+$!wQclF&%$N%fAmnozQN}_5^ihy1Z7$I|YDIhCY_^ z-i=orc3}aE51>q44NfH}od(}gx#JmDGqykc#C?G|@ZFa%OEY^~n-cU-k|EE;iGy=! zJd{2pF2#D86=nIYg@qF3(*%cOS}`W!X4?$>+8J!_0?LC zTnIph*Fi8HEhmS8V&}zRq&%;qedtPpA1?iTuRy|Ym4qju0RKfBnT;nS5FE<#{keFg9|M_*G^wj2C^AqaN{91Exa%`Eq-8@^7B3SK2*^SmBj6#OoyJm~2dZU+R4-%C+uX{I(M!2aRY@#JM_rQLIC0w|d#f&Q?2T(kb@ zramO@+WvqZJX`%X&+D&sEx4n)Zv6~=@=!qh2H5=R&C%rB@EOv^1vq{mC!BUZNY)tU1%aA# zT(ZRcKb@Z~{W_TKH1BGwOX&58@d+%Dqq*nGPH_;*vKA2 z6h_t!w(W!x=c2J^hD^{0J@b%4)j2=2767%I$3p|eLu&L}QdeC}|AYVgabLz@uN(Yw zJ}B`amu3ObERcc@vu$o$o4<10aF6G%8j9WqmmWm9aGLj@#2)}0XErtS0ZkaZ>PN=s zuN?xjG!uV_UYg{wQx4De6aeMmFZ7c#o#MPEozljhJsgC6>HJq-rJXD|)Hu%|M`xt< zMS}fSG=C;_SmqJ_DFtkOEp}e_<{5MwKRipTk?pct8|qR#L+&x@CU8cLpP&d}0buFv z+r(grpGH%mD%bA8F+LWsc|P=PUm$sOQz$+WG0)^5)*&$>G7T;*5R{-?{Nu zj0%7oZ-To~fR20-b_j=%;|LxqrbjFkGHS``uV*ku6%xZ&!Wl$3oVe_OFzfw!-&u%0 z!Vg&nhfbL*y9>DYCIUG0k>AVE5ODK>pkS&*VFFQ?Ps-%Er_UUFeyDcEJ$)1P8NYck z2xzO>LtAQF3=77l6XaT=l6?2hC8Q6vnS#b})G`yya1QzUqT@k_03e`fM*vaj2Mv(1 z@{58%AAhs)qP~5t-!Y!@0YXYhbNTxrcu1qmWo=YAsFlLK^9qG$2i&?81-o~{ujpv! zM>>9Wt?o}e&oa!bE)V%Vy}^%Ia*XR|A}kNc)<{+ym8O?AXaAweL$aZ5QAZ#Kr6U> z@Z5NMIS8b!qJydF&e*7&?HwJR?&eLq>&G#3K6!bx^X=EewCCmCf194VHwu8Im2;D- z2NAZCL?Xj)lASis%V6y`0sgL(S&qz>gTyND|U{3MPykoh}!m%H{Wj z{uzANkY^gJZP?JRG{*gW@+g>-X(0ko-cWPlQJe*~bk_dmo7oyT-;E|UGwI7YS1b&H zA4qa2W`TG%T^U#Ssdwe4zWBk5vm5gC%}sHU-x?^3>{B^5m=a&fj^pnGT;t?(I{gWAuvY>7;<&w z$N19+I}We>IezP=UZoyo#iw!!r;%XRqPgG5INOSFBb#L(zJRkn_Zn*Xt^57FOHH$W zG+`z*!#H@g366xFjimVd==xrL-T6Q$FTL4%}!s3HMebHg)LY?fEpsp9!curx^6OJ$Yl1}+%{I|0FZhMH! z0w7}|#-BCYFQxzxN`YU>fV{JEe)y++UJWqd0{c{P7s{{JC}BSUSYPKrS)}8ZuN_tP_{|8zBI3|-re_r z5sZ(?Dgh)J@QM5}j1ahP2vlF|eQt=a`fncZ8+KmFbF=U4?_Ow} zzN8O#T2eGN8D{Wp*JpO0qifeKg(2i~=4ULR%TwB%cX*FO=y5yy;opKme!x!}&(UEy zo387_JwN=m@yZB+ABMoaPyqb%$=1;~tE#j;nQ#5u$O7Jlw~D83>ra*W-3JDI8+a85R3emmY74tFSfF+Q2TdF%fU=;9Bo8rOM+pr^e!s*6k z-Re6-{ziD9ON2D(=~~6y~Urt9qp))MA%QHqtV{PPviBH zAE(V4%Z)eV#BV1SsT-N`$D85T%UMK**{`TwR)4zL>?HW=Y z%K*-^+1k?CPKM~uC(BgCOaEhjaE7A-;1iwUb1y+kkUKN3rKKnVg3;a{H6i6^k5?!A zxmK2Wxeu~~SeGpmknY$oiJtV`V%zhg?e(^#097e#f_!XaWvzsK=gj{gz?J2bik%}u zZ$e;l*W} zVO$zvp-g$Y6#|mDW_oX6lX?$(X013Sq-s!k{AdNLU2BZUqT24bTV#y7L^32kk z;D_0J9|(BHYAz50)&GLaG5`Ib^uBNXuGa@Wqu|(y5cFk)q@cla)@ye6y9_vEF)~RL zsD^g9jDP}#Qs6Ot?hZZIA`GoAuLS>6GBmnQ`L~s^4qIA&H;(uL=+_i!OFRyK>?ntJ zu$^C;sXY6Czh{Rg4x*bW|93gpUg#vg(esv$);557QEsjGp|4W~`ZbmPhhD4KG-`_% z4NVO}-Cob`I+>W+$&|l7X14z)+biMMnEf}pD<9wXq!dr#>yplY9hU6%bpP&;(P_$$ zW+Bm>B-YqUp9d@OqezjaI01KCa~mVlV;CWDQ3%`%1;8)kk?|jw|M6gZbK~XlS;)}w z(#q2L%9Bu_osbC20m23rp1CM|#``}Cfrjj9bC;rlgghHT$f6lYKly|bjCfs3`o|Cy z#7MZsICaN>h&hCq-y#~$F&VU@uflk&C2cKYBPzLed|b%*I4|=(Xz?cY0Y~5vv*UER zVq8*|s$c5JI-sW!j+0ZWkE|t&u&?eqD&GcMadhBbFe#qKP1A&vF)SJa`cxy%1pda% znEAIYmiE@}jtbH`|0P42v!{P}MOl8NZ(zD@#kHB)Hx?qUF-1)FgAT@1jj0S4V~P+a zNS8_SaQ#68X{`LUKtNCH_?)u0mgOW)*Q@;26R+jd3w>$TLq!wR4SiqB&!hG50>O2z ztb}gCQGGmCrU&3eH+0-6?{fkin)?Wvp7#p0-f=`I%J&X)l#)CbPojC4;@{7C|9H=( zA)sA~=R^0BK8?mFq_Bij>-wcTFG%?3-JkGUyjt!TQyO@OHuDZKI(p%Ll1Hu@^sVY+ zNz|lG_3tQ_LSVPD-45Q2vYnv+B(}t?0MM58nT9k&9rq5p%Qz*QZP-2}{hb_{nL0{21zkR~|))o*@vnAj14e=_;S-~GeW&Goe} z!$|LDaGYj9F4D3{5$LXd>Ji3n{5e0kUW~uw?WW&|KQ;I=4X6z=1cQS`fd@k0ZDsv)ha)S7QZES zUIKr>N7&ohXi5`8e}M{!%}@m?42@ZR>5*bBz6PzY$##@H~yYmBJ=c$l=d>PmoV zp{l{AXP=eOtPV_e2@y%_(++D7F(D3C0PZU$Bnjz<97;Bmg(7TM)K2F7b?0)u3?$C|K8lju8|0u+Y5XZt#z2y;nTK(IYyX?rT=kh}- z-iL2w%Ph5Og63hA|0Vz^BjCoHnmxWlO*g-yrd_m83Gj*0zxQclGhL@Jw`A^R6abdW zO$Ara8#u#z=2x)Gze#Z%crJa}a#l-vSl;0^(cQ}i#XU!y=a?SkNxHG^| z5d%kG(*^lMrZtma6hG4JvQNVd%A^K!)hJS~dxSPxfXy)>PK^3X7^x5Gm1vXFbD6G&)=YMs!~ zI!j+2vi0)uXBGl!m-pIBTct12&LhC>(BE6zy?^)Yi!PSdo)~C5L`ZiBLuMO3+Rz%` z@qmNs8w<@OQ+otQ1b*Q7z(MnXd&PXKUj4`qq%ia=K8LzM91>ItCw!=JyW5_PNZkg>xK*bbija>@lqBQ4SxDK*#5-?!v$VH-ufvB z=+R3@xr6br8M@uXuOcrVoMm^pPd$0puiak-z}Cs>$z*A7eX{aA}oI@g}@jt0)dXK0>qv~dm(zWp>Jw^blVmPQ&h+^BZ{z|(+dU66^c`F zo`ir`f(#!;6T4bV01*BlW+ll+569Ui;PBY2L>ea9Y4ox4H{voDooSou96wT)D_z#?zBe>~@3ZA_I)hX;x?!Adr4QNS zFJ!m+6B_$fiCy8m8nayY8f%1&bH2Ro8mU0o!-lRM)V}hofkSM~n zk7y;sJst6S_fQqdYYVgAP$uSGU}i}JLWGfg69on(3dD~BWA(IK0L5-I0$0n zV7Mo(eS_6VAF5fxbU1ykk5%J!Gm?XY;x4Dv9}Q9W+{d79@DOk4a3vbeca0rw>;iNc z+H5?caj$ayJY!M%s?rNwjAp*fP6NE8@2xgo<2?K665@{HQ)lC>Q9`gM=H1sexQ>2> zN2}R-ZI#e@l!N0*dH2U};hn7W=kfph2&rhFWti^W4IlF=dD7^o7~Rxn()nLV_}H-1 zNA+1_)NV1o4;!z~;_Qz@=B@Nu;~719wzZmxrtAV>Oy|64ag+hJ0`wlw3;LJja9Vkv z`_ti#hZR^>@GE2|13D}sCPLRa!&XjJxp}wQ+5{c`3IZ@8H`8hdg@IYuX(Hn(^b#qNA0G0^O(TPO> zoCQXyV&)%U!gLk$RcSx%J(jzS;c^fFIYhS190`)?Pr5n9(d*1^nBlOEQO*1(AMvIH z=tTT-Uu{nrAC|LVM!t%&N88sL#9dvdxuD{Rh=xS}T+tz8Lg^#l~+oVH=eg(Kx&<6Lh z9jbD3{sFp>vZ8mDPP@USdohsy{-Dk3<5}=eCN2K(>bUr=qJMdSu+PJP9;vwOX00`_U09>~Vz_<+H^Wb`c zyQ11!+7D{_-TB$_uLJ4dNy|)U0>uJ!}*(_pQ8-wn%uinEp`hEJK)p5*ye|c7h>@M-9RG0I*7{p zWk`!KYc>wy?$2Gc3XpFP5HG?GLHGA%+sDai6b#!TGEFf+A;DBT2!B7G^byl zV@S+Wn3ceMPyk@IEdg*1<5FhGzBM*-!2n(;Wl6Ln*+8R|W3Q8Du36|~gwqv!=J*rT5c;hX~>LM>=NsC{$1YkFo zw6*Lo{4`int`Jx{T(AD^@eOImfb=|)!qRiX|KZ{3e+g7}*O%AM*G>+OCK3M0lJ^x- z_f`R*!K&fDtu1LYzgjy3L}*>M#^dmPb)>6fxDW(_kP3g-Ptu7)7itz1(SSe+;Rtj` zZWnt0CvNs`OxQH!yTQ{_hCo?-8>3*`wDrssxSg+nV%FT7haLSb;yiZkVfGu9?mq*H zL0BjSR${Vp8YO@b)+tzuc_ZR##$R39n?&l{&aS=q5AFj6+YaNzlmd2~=uQ9&?ymQa zGzSRjtF!(e^x5j)gxTI{=lH!j6!}J9LF6(fF#8xhqnb0#f>|!KKl#;K5@Q%4aA61- za&_dV^|uz6V37-y72O;TB=x{(C0|LCoI?Iy$0u9RH;CG)Hh_d7nL6 zX{Lz-a#hA+B2tG!v?(S+#zRP}+hU>CGWWiv`jiDq1BI}ELGmG*0I7=IrM z1mI5nX(OGBUlufsrH-9vDC}IH9h=g#=l2;->Ioj;W4p9Y`!kVQuxf%-gHgdD1Fw{( zmmGk`aS!MR?C))s02RGUo1CXSto?+C(iW|@fB8EDvch{{SW1(`t~qa(lN|ds z9NEq}`)elXXrVlo0Nt_^{Ab(aFTZ@fIoefw6Vz)QPR|?7tle~yr*HC6@g&Pcx3grF zf(mblR-h5pM?E`pOiz+G?*Qhl?>1OE2VXn}tBXW+6nuw!-QyU;Wgt+!^eDY#Ke%Tv zfW!qfkNXYjn8}(0!9uEG1F)a0Hkczou=5#HNs(18-m3x zD5Et8Vx|7X~2x|M9hq9Yn@g)pA228`bqhFGDRptKp=e^(?^=?lPq~C!WHJ`EDn%3;VIM9JjY~w)AbOz z4rB0WGgWz6(mix|_46@qa0h$v)|UzX7^U*Y8#pJsF+9D20Btt?xj@t=Ihe}66I?MQ z5&DCfr$^d2O*y^ib6@|E?&}QTY7+XnzK`K^tVc*Qhms~NJa8I~8n4#XWS42ThXmNM zu2TrWmHLx)!4CMA)391coNL`Y*aI^tcKZB%PpC5m=9XF15UtFsO^$LNa#Vs@0c8d- zJ$a!aINV&y0|3prEI7GL)q|_~0D%u``A@D{BIo>}hXHg22Ds2+Ib=^$e4}|UV|tD^ zLh~qA_BmV3#Dk@8hjE7{{~($jywmjA0k|b%&3trq$%AM zs(r?A1qduHr8l1+1duP&!p{TMKspKl9{-omUj}O&9B*u4w;xOx6#x&Wsjs)9P!nQc zof0qUknrK)gzNP|9IOQCK8n^ggRGFNJyr>@&gMa!;c7^~UM`&ioa@~w6DtH|GaIoT zg#Rd#70quc00sj1DvsT(bEDPngza@6SD!XpsD_IPzEm^J9pOibqWrH8FrCe`88K7G zH%DmT$Z+Er z-h%-65c-V+V+Bsx3ir1i<=;*u06PP$WGN6uS(-G=Rr6GRi6(pRyW`2jfq*_{d_Ilx zxVE892B))jp5Ppe!nv(EpOT?_4V+mt1U3XI%I7cQ1TZLavL7t~db+)LGTF?UaquBL z3T0(=8HDNC9B0hp5|qK@!eLxIy^XfP?kohSQqBZ#o~3OzQzk|L1VmxB+?DYFe!vVL z)S%xiCbY$?tn2TBz8JsrJdXV+1}r($T()-RsCYMRNLWQ5%O6hY;c+&s*VZOUgm+Z5 zP8af{J+-%O{I)VS@s;h|*0~&i#X=`Hp`Tl&09cKKT}}X-LND31_P>^kChcAUKRG1p zeBM7mhR-nT|0=MpF%$UXMe!OY&a{K&7DBf=oOd}M%=-e$wCIb}_uF*TU*`x%9gqKM zg33Evd$D)CdT$;6eXN~F1;EFkGUiB3D`j7UA7i+GX#KlVB`rgBfMcsXte7*X@u{Od(FE$#B0_UV$S?U zAzccAdcgvAL+b`B9Uxo>L z5+`!q3_7=!mU}rcp*RpI1AI^X&;vMh4ZHrJ&0oX|@AEi0>fU(u8{YrR%W>Lf;CTvk zv+2);WwqnbF18!eEEwp=X}70aQRL(>ekiNlt3P)W$It!7L%2`(H6FlkRR>Stnf)L_ z!L#l2$xn9LmW{@?T}t+1&T4P+8ef-bxQ77Lv5tQM0W*da*+1Q0t0iP5P-JX2r5ynz z3ReSl2s6o^b#LRYnB+vwtSfxFpY3d!8Zk(uj%eswb0K4{L{o~A5rd-kZ|%}>7g0bHDAWAH(Q%kQ&8lDJQN z5I+B(KOU3-ijM>DrXX;C^WEbOnlifI2+>c31 zwUdSuZUVUN%lGB?C^+MJW^R6+ssH~lIa_`e0c7`d<>YK_d2{c@!QuX|e)Fq11bjGQ zQ~*4jw!Yq)206h`2yWK~p9H~0<_`gBhnaNOg&}GubNZdd-Ch}dUNeE^I4x;J$~|I2 zm9jwkgndlQda!@ChA3ge>~3dj2pBgSQqKGrAx3xwj3#2m>~Sd-iXy%dGg$noOe+(aiK*1$H#=OwS zN5K?hmjF!E1n9hn*Q|2jvae5mdVp0Gz#}-jjUS zZ#o!rb>s(rFF`)BaoLstPzW?%FqC$b0P;I+Q2t5}?{U7U0DQ%#&9p)9x5;`3>qQml z9)Bk|$Q)0^k}Ly=@PBl4WcI&2{!dO`9_@VlwIu{T;$ZzCzCo>d$1rTfTyZ^X-7s}0F}cr9 z5k(YkWOjUjQAb6}DS%=733H=-IbC7a7pfN@fGKbCk|wA!8*BB5<3ogT1}@$;hEGF4 z0*XUF002M$Nkll zX$W)>i5oN6;Hb@NmM0emaDMtl7G(5S`#@hy~ec zC*)!)=j%6*7CzQgcPFpYh|(PAgaE~x`kQE!3E!9j%K_FiHGLkzb)iBwq#sa1{>rH= z%21l(n9tj8k6DLeg-FQ_O#eF=o~MFrm~#wUq@wG;xe+AGaH zhdmfmOe!L%ep{)Z{gS)6w0dr44F*98JbIu(e@FowF+tk2%<>}nml7oztSKmnno^}H z1)A!}Q&$Y(TC@RX#X5g|@lj%4wB^;f=p6GEKlR`q4=nX*eg9GHL9IVFMwtB{#x&VA zJ8R^B8s^ZV(|3)Dy9) z6tN4Al%Hr@Ep1FJA6yP2;J-d-anZTlWY!Pz{VT4JUU+Rm{NrlqF| z{MPpGgkR&6!wkPXc%P*wi3R07vogxE4>3JU+%l?e8_Aj;zKG9grS$zf9D>7U@fDQDl)q2D6wXT? zrND01mV*(HgA2awb==RFg&R7T^Zj^8O)?6Uw2CA5*3lCS1XT^aG*WHjr~QV*#Kv|6 zeX%?-KOEcP*PfKGspjFIKcubnJ-#Q<*2eR+XKteL!BamK3)+bu>^Ox7y_%lLa-Vg{0dqCm8e!YMEcQ1aozCL*w1n_@n=65yb=})6e+zGPz_kqOU z=lFRJih(g)00QqJsvu>-f*Kg_k8e&Uhe2iEh1d{q2s~#?3@tEEEAKH*W~Z+!cs0$= zQohggy3mQ-BMN{ZU;<#7UPbleO1PsF#mvr2zOzoZ--jBbb}C(c8DVaoZkfrz$Y6X1$oKfFAHJv0&-PEuo}DmY^3|2~Vrb2- zXv=i_$)DrgvH8x>0~({%)RXrVIAgUw9|Gwg`Vz-t=n-6K=xxVQ=NH>6lb=0ao%|Fo z&ESM1U58)A4>_nky4oH4X7rWt_E%5V$`b9%IA}8oK)Ctm7e{eGcM=+!-8h=={L-K! z?6w)yIQNZ2rA^?@*V!S*$o}e4%)nVzg~oMLa7tF?yXzWg?E76HAjwRer#1rJqCgf+CAf;MsBDgGSz|8sU-_>@$&fW@MPt5ZTbBC^y%r!`IFVj z5~V=)h%Ck1b;0{#ytgB*5E#j30YFF%iyg#kZ90##h|GB~920zjR-TuR>$Czu0E!q4 z-}K!oE@F{9ri0xaT-of!z8SEMiqa@xuEa^ug3u`~3w+e(VZFhT$XidGndIi!u%5=4SnSpYbU&wdPD>0tveKnPXE{-bfq#PZ<2 z(6_#GO^?enf9i=|p=HhwL(2}bxtZ;mce9HP-UYpLyvY`~Jq?6t8zncGILyv;MySYpcDXs;`z>Rc1|XL^WDK@b?a>ZY`;*!dQ1Y&m(T14 zc@kXGap|=I{h_=V@^T=~wEn9hNRS=boAw!jxhVuUO9P~fScI`FkNL@fglUfLo$GpO z!;@!H(X4DP6;bpYCwuO>i2!~bhE;}~P+WjN?cB;3S?o^izF7UjFAEz?^yFl*vhlB4kWw{pbG0hw(yiEuk$x7(i`%61Yl( zw`=Yh!w7*-L7@6kZVzhhfxB=2(x%FbPY^Q z05qjRqi>$it=B^9E?T?7(di=kkVDrose$*xn-Taa%PHt6%s0}hHj+k51H=TfkKTTZFWvaXhX#R%rU0143gC3|EnRVD`xAcgcmF_;gE0>B zD}M}EfB?kS13fZi)-;?+1j@YYBQUH2c&fKn&rGyX2>U~9-Cs7xSYv<4dtg#}+0R2Y zdb!<**`o-U#Dc*L?RJPL!m3`Y(crFTdcn-%a;_sdf_HUIdOw$0nsl+uD}?FuJu&l6 zWv>bh?`I)k*5hr=+?AW)Uu8+eO?%6!?3SL*XxVq; zH}$Om6sa>J%qoQz9!#o#rfuc-S(U3)tiS|@pw|Aws8|k@8H>OfL*p=$;T-I1ooYRJ zl`lWsXDLzibu*Bfn{^M19x$$DNNIQY$2IM2Y)H2si7`Ax2pDqkon<8YNmIx*p`+IP zC!aGw*W10us)rsTv_4Yj0S>m`l`Q-gJa-(&zi=((bK$sLPxfRbd41enhCcRZx%ciI z+)8NjQHa?Qi@hA^o9~Ka_nABWDc{{`G|COUQ-#EmHG5z5y=92Lv`^=bsEecKwqNzq>evb`shG_^V62>uLO#bIx)(&UJ3= zx!#llZM}d0Vt-dZzsXqp)r8AHYufC8<0Frf*AH!6{c}z*zsWk^=(oeprjHr@+1(pd zmv%$#_kv>%SC@{?Gdv!AcyJ1U2jAj1004N*@Iz!A80`-ah}w}$Qv#6WjRu5;@as^n z(3oQSy1*viJp?txr~M6e&Hc5;IB`ShttvOAVN(9y1_*D#Ka3Y=cNinvV5pya4w45k zWW#`J4EY4tV(60Yq3YjH?obRCJrrDWf-qC)lyjsqpNv45&I%s$3P8t1st@F+GhK6R@&^g;`%r0phHy&wr!X=m zDUys|FUZuHT3&VkOcKAbKA$e4c*uWQ6R5_)|U zS33V?&R@QVk|B?9Z!LK{)W6X*v<)m${f6&`IMIy+VAt*jQAPQIpN+S^bdgX`Nxm6o z1itP{07|$Thk3v|(9<5@NM%0bquQ!?-=aB<->~Em4bm>XJwM}B<1L>B0Rw^&?6Qg7 zy-eVprJqCru(TJOmxHVTKU-gptvi;U^^l{9! zc6UxEJ8_@97Qu5PGXpgf5QLsB8G`H^Qoqi!{n)z^6$EExX(bF`Bd#0^@ofBT+e|Tp z-psblG<+53P~s9IgVDu*io>yf4i=F8ez1ODPVi%8nAdfL2eEl`(Aog8d>fAFb?SGX zau%UYrZ(Ux=_fmDjo@N<0|;wft+#vn-uk)_=iI*zCJI_V^D6Q6y{GzQe$09Z7$(P` z-yBc=5Ql#+vPP1lDhh$`leQOfE?+rN5)mZ6*jO(nhu}|X^po_PpKPy9zIe1+=gb;i z!{lhEI|9}#!)M+vFn-fLs~>x}CX9_voGY%x>Eik1ufN==cGuR%<)7oI^mUv+m^qtu zs^!@+JS+&*TK{yOuQDdT%)u!q0fbXY4m)?^By}T1im)Q+YhC&Kupss6+QNBg%68Ui zOFxQ6c|U1u(!YDOoEhoW%z8FwXwWgbh&~#RHDEve`cGcRwH(tgGAZx#a z&@M3TJjibD?e~FCy?6MA?<^$LSZP*R-8(<9H(@{YFypHn6(_R^{mYY|hDLTvIeGZQ z#-lX+?seE zVwRq2mpoIJ4v8^OG9ZwE=*|eLz@F+gt$X6|Q)#zJgugJ05%~WU%1}aKZp2t=(|Pbn zU~ZYoPWBGN%;;NSi6MO)#&jnzrXY}4pv{kG@;`=4LO{O+z$E}pSPtX39q#ez=6Nxi z+P!-vH)anr*bkPHIfe%f0pku%B=~>1wHoCdC2(7B{3JU8*j|qEGto47W(@B`07wkI zwoZF_a=J#@-vrs6!12+VrE_IlR{Q~b-CC0#;&@f^h~9F7w-+aO-@Q34p5jqx7(N7! z&2VotZ>Gq%stH4M2#tceRDDwDx1r%J-Ik~QG4q)(v#?)1yLbOrQS`st1)$ZLB|yh@ zUR!S--g{+W&%p4-&dvW8jE7=qGrYuJn5$Zfk#?M(r=zwdXldI;Hpb0^=sHV~>BjRHVYa_L zKbidJz;!RZWH*xldr^wLI9%P@|C&Fz2OdTRzyt39*INA`JxH)j?!*Iv#uS*5=576$ zkRHXgMiv97S@d|4dl3NlBUHod z+DjMk+X+6O1l?hwaxnWpPdliWb42$jOwv}EGs6EyxFvb#*`{GV76Zq27Kp!w_AI6( z4{6{;cPD5FZPA#_vrq&5lQ!OL|C_?<^6wO4kn zFAf(|@Xh^q3anQF&NV$dh2wx=jDvu=VE-sEKaN5Gl#O5QQsdy7$}+z8?m(c%YKqHJ zjhAtX077F^(D5>^uiUN~r+(Sw%oNAZuAc4TMF&Lbd8h1WB- z3BH6wdu6K>=A$fuc2YCJ_CLUS|e*Bm60dl;2wf)Qq-alUYcX#+?hEOTlx&pxR8Z?m?e@eDCA=DHAthV|&9PMah zu%-K(<=@r-)UDm3XbzvjnxpZJtPNWmRbQ-zdDky*YklrOx#HYvDSqCTUWVuW{p57z zReD@308WpN50CbDe*49VUE%IPJ+8HgqXOVs!TYdZKH#!){L?j?k&O$1BVhgLL|+UE z_UCRIddh=w=z+X|V1-TSSyQ2(Sr>%vFgxTqiUKnYlpV<~h9j4X7*AiYfJDJFKRhFU z7!JDEq+B5EABGq`3InkbbIqe*PJ|a>-3oCwW2$c8LXq&1*M0c<+jXw`IfiH&Bh1?G zAdKRhfq8O|w#0PJBLrOO6BGcA(E-oK^yFM#j;;XkJ@=2{>JR`&{Z+tp>dyo@=D^0L zOYo3=)4By z#_P=Z*Yk zWA2Hihr@aQrhanQ!j#WQpJbt-4U2C~o4KYb7Tg?n4TPUv-q3tQ<}l;@WhStGe|&WG z@|VB;CrbdvFav>60T5uUE?X|LdKf%ZW+Kf~ed&bPqnuvLs0?}48%P&J`KuoJG1E(!SP%0UT;GKM)YbL1ndF(pzzoDZP8(HAGgGd9q0i_SiA$MOIKMyHo*#cd z69NOhG|oz)68y))3_S$Vnt_*qe6J8Z-4Eki5a%;B<3xQDquyt~} zANgi;vidw^=r#gWO?*dh-+DTo|Hua*e;fLosv65gOC@2>mqlabZxiHsOiedd>_E#Ctnx`(mO7 z;Yr=hl=0dtjcyA_e^0lZoB35g%#x)iZ{q$1v$P*)j+|2Q*FeFcJv!s1%`OwF+I~Eq zSdxH2nAq9i467ZK0A{56y8qh%qs!FyEFXRJ=jAsL1llew1mH%=d=@9twrldtKj zFC>vJv$#Qcmw%4*CRpWWXH0fZw|W0LQaM@geitO4Lc3L|}H44)5yS@U|Z14p#} zN&KInA7-l2#>1TK9yAmE&wu+5-wD6hBtzbZoOUmJ6Hik&oqWg4X*pBZo3=&l4UsU5Y4*ttHAzu3!q|Gl(V z`E1B_OEowI)L!l#!w7*zLg2wG0KR)OIe8X^>c-CGTT)IC&GQVzr%7yPeu2_k$Ef^Zx`6S9Z+d09TlT6%<`6JhaE1nPc1haz0*6(15J0)}#NXM0md!dJ! zMJi{lnH+?F1RHa45HoX*|nQ)Lj;xr zK$$U4m?~|8I2RKYLD1g37_MrA{FJHag`oKE=2rXa9$|#qY{%oRrIyFg<+KXa{`DO#kiC(&UeO zQNqmuxW=pT?cImbV0)#*H+&2J&t~8m-%UtGoR)KrR?Xq_`O;^El&_ohAB8X}GakYY zLvK?s0zZ0;UvM8MwJ)-(fpq67nEs<(v)=vHS>3&FVPOCo_~8cx$};W!G)^>47U1g| zpTPNTc4Ejm=YO6tu3USTdnp1em-8C$O0(RlF^A8nHVQ8x))v}JJMHdgx0&?4*WvlJ zHKjo9*0AXIT3E*zKXjWR@Zc2yUw`}c@!rq(&L5pF{Vr0<%jjHpBA7j0K3m!e4tkd5 z0MQRDeU(O}Bp$;RAyCLML=6!>j|V!A*C6@*5QM{140tom>x39734N&0$jWeXgVAg` z5Gh{9w`!}vW>$=7JJy!h3duwC&N-M{SCtOLrjggQHqu$L1w z=T#P9zRv7F>zsMPmhhFKJvnD{oe+we8CN?4EXUHI?MMLkr>P@+^4*gI5u8^6>Z|Yn zczqAj%2!H?C>x;})`4%r3+#q3_`_bdL`>1m_M0?$k!}3G&yEJt=olPDN$Gjdc_01? zzG~`({-wpl+L@wzHN;HX7&5u@EE5H`x$Vw0!Q0LncD^y&-<1Z&l|0XsB|+YyDexC! z@4O2^J7^n9o#OqH)`oA=rgmu9jRN3R6gTd1{V0dYAkU3qguo&p@Zc2y@dSA~`OW0) zAAkOj&v!QV|2)~}mL|KNt*<={4#Y{?K1o=&vWqtu_y=>06!l)QIh&+Zw@J#+& zU1D8x(`~37s(vY}wg~SO1pub2wFyfAWk-}NT%pM7T`=r0NDMwq{w)J|ve}p#W|8_y zhgJLV_z$A{!bm49H6AYFByba!`7l7)G|Hn!9`>y=$3%sCcQhrJZOj*hrErf}J`ulA0 zXE^{NAKm&sv;W^mgCX5%lro>^9(5HemIsDCv#u=WM?02+ET7YsX7yQCJ&PxQ2SpoY z_m_{_^0%$vT|AIBH@5l?8`BPx_n?h;Du90uZHMnGo+JIvOuvZ+ih$QSG>`kbmB5{< z<5<{x0f7gv02o^F{8zvE)nU?($uIuyA0SX@=ODi!4P%%q1cLMsuoLUt^I`~|FiiM@ z5G#l~;_rF=e7NN{Mcv%Jmq|UA!tHbua?XZA1Hx!F-F&Ye0*L`>1h7^4rm9(B&QFo1 zI%(yJoL@cwILB1BPS|O+SpY;=lUbcSkpEQk<9x^PzG<6wKrJ|K1h*+AQ}Vj8%+l zKeRi%B|NMZINs6=AuKe3W3ON%0}RHpTr(FFK$%{ZNiB;ImG^7uBRl^2N>6zm@xw<;?wy z3}9GCeu|fIo-~Dz@nnL8wZ}#jYDt#Dt@+w$eL-29axMMCgZCixHtlh>ZQ2u`TfDA1 zYWw?9-Fwht53dTN0^s4b`Hj}N(Scw&5kgQ^4Fe?tQLsWX0N%pL3iTig_gephPPNcn7 zc}+`Qjx%%0#O+1AYHG7Gn@#>^m^=HdPZb?09R_el4x5%Yx6d_HnTLW;%bhY?dSd3E z4*>PRXwk$Ew_d3Xyu%+KtYGZAe&x}e`$8e&~?&G=V-6IOcD z-xoOChI$xd@L(+Oi~yZS~k^`uW#r+xY?ebk_fRi+>|;$4B7PinmC^ zbtR7TvIxlaH0PApkRHQTAYe32;@x9;KVx?&`^Am9a@eeRc9EgAhwg0bm{O%1X|$6El6I_1gLLmCr8tc;nR}kcO)< zm-aROgu+u9tHvk6pFa*UxcCj!1$ z5TE4dtsQm}tDEc1!S1joJUEB2&rAH9C;;q=P?G|ov*>N-1KJxG_-oumvNCpnLtNt~ zw2ZRGIXb6JYv+u#pL7ZQNl>b8hKI{MM`&Ve`>C}@u6w?Rj+guSU5Pu7p}hL+N%b+- z8_mfuMGt>N)rJ&9L%*o{&8913{J9VWmh4k?evm=-GQH_}x@s)B&u!`cJjctUjY$^W zZh`t3K7hce0QdkP1W=Pili9TUUJQbTC-?|;$_N&*dyqeYD^AIw=@L+P5+nM=e4%h>_ArvSZB)_ z8+*XqS|MkdeQ8ReA?r$!GQ;`>uf!=<&9GpuUdFtFNhQy`uGfrE5>?MDeHDY#W=5WF zt(L&AAM+~C5t22Kn;2a{ODNbSseiYeD}H=@{6`@$3uk?AHNdy_`#7A#YWk$UfDwNc z$v_{sX1KQH%)J=Bn{cr8qm>$O_$&z2*i3s9PV@ug67GE*6M$zKpI^o~89bFCJM{%% zjL!C%&-&1K(+@%bED4YZ{Yu6YoJ+6Gg6vt^;W)g&QIy}chC1W0Or~=Wf%z(-eJ40S z`2R&5?4eQnp+jb>ze>A5=F~Pcg7QJo2g~~`ES-Zi>%BweQUN!(tK&F=Cc8R!QYj3-Hmmm{B(n| zGO1_$1P<5D!d+9fZ^vr`-T?7M3iEB?`0E@mgW@Rxo=-S;KU&@2TVBgOWB3pPqXOVV zh>Yjg3xNSsB6f%w1u2q zRT~(@H6v~JgK!?s5sEj_6z?8PvL%ukfOoUh|GG<1&5UGV=U#z;D-5UYWRAb}-u!4I zA8}5@eV78U03b{Yh`Qqst8r);=2{v9fYJMRU#wRf^0dx8Fqfkg@UEu)ueQxQe8Y3| z3V~9}8pbhL@U83INxR0)NXcW;OYjEr&Xr8TmTe9y=m%c%?r- zs~rGriPUBr`}fLW$K(^O1oI>47ZX|s*<$yXVF6j+$>SLrJlxV@US zYvVJMhG{E3E1`aV;3H%9t%HCuM(AUi^C;7ia1CLfF!J@z>g3-%S($vj-Efq9;VL+c z(qIhpfdEi~X3Y*~{Ja}N4bDG%vYuh%KWl22(6Sx=gsW@?)&gJRkt>~PuaK^!SMUN z=G>36Be#|!-810heF9qM+M)w!5dOWahLOR9lakc|>SEb85;x z0mFJL2&VQ(d0^BoFo#cSX41vLH0J6w*hL%Cdbe2*j8J=4TYE=8S`FKIPv`QLLw~bL^%HJMea3KhQz4qu9E5_zY ze)LaF>W6K0p_h+0-#Q5N_J&Jmk7|G8HTcTmK6%=I4D*LTfi*uJykK1l@&KH$30I<( zHS6rYe&ZF3g68&_cCVyx_w6*a?a z-$tuEBit6=2fX@GZIqKv+iJEyaJ0mcbpV)B0Jxup4MnT=?CmJOr<1%_L9=Q1Hn}tS za7c!8`9rhbt2*CgWzvtZp)o$qGJvz?SI0+3QN1K=ot*A1ZygW9{|NSvG}5R5_y|zO ztm}iobQJbPVo8V*(L<1#mlq?RjHwp<`iiu$HzRs|w!1!>IwOkAsRl_v`}xIb);jX8Ht-12)V*h+NhgWD@g%dc@%1@5%Xiu= zt}oAo{>fIPnF~Ljtnc}zBlrZ}DVl@F@!iy~d;o;tn;Gtw0zdxd#owX&(<2Gv2R}ma zN8c=?fG=6oPy&M>Q*33&@Hr3|6#yTB()#l2jNYx&^No$&7z#cQ{{C-4;ZH(|R$`{UlVQA- z$6_q@AsPF~%f_tFf&hfcw7j(AA&8jp)l3Pjg(+uhev-Hg!d{5L-1#sNLrWhzUDTu0 z%`B8P@|Xf-^f-A>Vs`f|YwkJ|k;e*Nr(s@d20Q1N4MH|-DD7BenEhrkJ*f{ShxyxD z6dIHpn7qw61ys0`IA1EoM()E@SoVSc>my5Sjdnl(+`6cC=xxD@slRh&u?)&?FDPA?6Hq?mDl^?@{88REvbLim}yx`e@pXQ!8s) zIj=GnnGjp6T&B7+#n=2}xMdJXJN9v@WJ_sW!o97?m@pfsOu)@h%4;UdO=!ub8q4+L zWDM5|ff|2-Go0C-FxnoFu&$k02TW-|23xZ8bMYd1#F3#~O&d}WS{XWOY=<_W5ju_6 zZ|2T$eDz)DLeaODZ0AEBn%u8-%Jasdnnz zw$0mGZ?gh?38?$y!#Lf^A>A2{{V*6TCJ-A+6hg1{>?loF3M>RNt!awt_atIh<;l-} z)&UqSCu?>KVBL@+o^klZ1hX-DVt*Kn+%jcCtiuGC+4k=6`c&O?7ogbBH>7#tr3?T& z=gR4rhQG*|Uis04*{i||xpO7uUnNwejx0GidDN$!zsJb_=v&7d76pOADQP1{0A;n2 z*!pk|NcSTw5=?5$rN{6}u3uLtQb6f6|NEWEg)!U|1T;qliQz;W8=E}-?*swCLwt19 z3}*JBE4-7N;Ffyi-Ouo{F}f}Y05ufKKf^|FrkNHq=7gq>GqsHnLmB# zdIvh-8-zB3xh;q}{8X0s=dC_6`&vSNZZBb9zUR&AQXGh#!xN6(a`^X7$bS=JL;=vv z{weCwKe6{NG8Fv$_-s zh2Hfb#!=v>?eJgPUYatgmOQ2poThzCS)Bad&Pk48gutzUz^DNDSTp?BulM(UzIVK~ zw)CHZ?0y~eymNlKv=S`&O%U6^&m(`I9e zWd7~b)d;l_g5xYNw5G1_L<8Lg$BTdM_B^4U*iFoaaGneV_tE-!IgLXsYDPbafBS4X z+w85E@PWaQ|L1r!=Bx~3fsrw@@TY7c@Op4yhACNOIL)-batJA6F#`X*dph}UkMlpw zLt_*$;9FrDx|1!81dE5i{%XBuTrm|Gbj=RdJ(4Vk8!y4Hm;^IeSHbQr8vnHKrt{p7*}!rGrPD_4yFFPXR8%IO<#4Nx|TJ7 zo5pY}AfR2ehFuw$e;Y2F5ddrr_P1ZIXAG{?%pLrsKmG1chm-GKA6LI}@3hYiiRt;R zsNKUZgm&f7t(0@xIQYg+9Dns>b@D&`Y_rY@J7~%manMQG11G^zLJ2S}S?A-=d?7Fk zT6jLKHTfnu^I2#ZVd_5=3(@_a--E32y?q|_2;M!Q%M0(Jyl z^c4osVWc}RI_ziL(_DJbLmdR@I!6Kv?aTs=CP$!FB~oNPI;-*-8T=Hq~Ba11bd|{SnVJ&<}kJeH+g~A2O8m_m^`V7lzeCZ~})i;p_83812_S z<=VC&?@(4a}LVnOz!ajdTaWLll>tO8xUxwue?e;tIVWjL^U!nHGDoJd6a0Ipi z0apOrQwwk8G4ivwpN&VKo}ITkEiEU}Y-MSdrnOuvxXCIYO|<5u(e!)n_Ct#x08K<# zMS!i@h$8*0Ji4s*7`~CooQ8Fkt-f zGCY*E18Xt&HA-*R0d$lwg-UH?g}N^|3f>iK2hzs^IGF_iqyA#F7~R1dfKiue`-#u1 z6b5k7V8XvRA($aX8F?=)D?haW;87Vl;gz0v>_s2J?^^-^>V)<=r6<~l@g2!;1?`M) zjc+f^Z`Iez#c=fm+K{eaNFw2W57i?;eGqRFt3Rd_;j!G-CYJz%;*oFm2nztdUq#x2 zg_st*o~#Z5pr#&Br1Bv_y9crsYl&7vGdXJY66tlUo47ygqv?8XG5W8Ra?}+C3jp;G z*A4m^^`R<|K7;e+ze1Q(ZA86gTw7nzEu4e^!QI`ZP~6?!-HH~c6fI70hvHh?t$2ar+5*Mh z-6<6Jy!@Z%z4v}QUy|S1XV1)DGi&ym0$YqGqf9(edMAJ=wXN&W;bsu8TXQBtF|0snF2bbw@&4^Pj@4! zl@?EWdWjOgKi2f9!5v~7VCR&4=}R$>S25arxe?Rb^tlW&fMXD}wIFmcV3`;u2b5bD zzj{%ur}|iMMZ~OGo9MAx{3$p;;YPV`G6b>U zeO*#Pz-DYO^E`&fAfG!U)jXXC$x84)vgL%1p(ZpJ4m0mJ=K{ZdW?^HjhXZG^g~LF} zrVFuvfA7509gQOwnN(5>+rqBxj15DOma)!airLrqj;bNz6hwpJ@{$aHd^<8eooJV6 z$oy1|NU54J*UmWUPi?7R5i5xg!XJ1a7mW#T=KXEfHt^5-PpTYD+XNn0ayjNn-m=S{ z1SHEqT2r84f*9}<;2oD(BWiyDe}G%A)+oYAecvbS6T<$4T+@x%PLgO;JO0m~A_wKX zjK-?DESWJohNAW1uICV{appdT$8k<>p1NmKvl1HL;$#S}S-A-fh9+*?5k-V%92q=q z&7B3sJdLgl7L;MzKP79v};9C+MmUK(1CR7*Hd^baK{C6c+a zCWUFF=j@QwxNrzbV>rn4nCbZ&1eT(Ij7+$2vmlD6}p|N)Kw3UoujOD)w62 z^IUOSoL(kjlPya&ILP7sVw$%c8^hEH!xn_{k?}x*ptYp*yvNr=eKLVU?1W=u)oXnKWkxW-3eCu8F@Lv-IpP`E}@w^aEoRANaF_?*ia-lq<4g6fC3Im#|BeU!*|xY zyn6&CihAZ`^d4i_3b)3>+0-F+6767256on^D}Eeub|wF~X@5L)aI6P<&; zP&KH%_?D9boBk4GC!x~g>k&G9@^ocF-bgKrdj+#?lUvL7-~qU#Qfc*eir<}=nf~&8 zFb1oS1NYdNRX$WobR`)c=M{(fl$?GTPb@Jtlof{(|6zeJd=qwS>24cKs*=dfxtml( zWaUw{I~;PPto_w5NT>6wE9_pe<6V({Pf^A1`G2$wXaa{wG^??(JvnvSW)>4=R+UIisJnk{P8w+mO1#(wKpUI+bp{CVz1QW9biRU{U8 zGS|=el@QkwVKBcVU_JxcQkajBh(=3WjOLYqe>>5QfmCT|icCqTTvd;{8osn5?YaM| z^iPCUD3*=IM5OEMg6@dmuYW{DY)jqv_TPW-iOpgJi+LE#6fe^JkqOU~x`#axur^+M zy=Nuaz}+B0-8kBt)Vvj`CICL=-UEF~9N)xh<@c*pX&$6E=$n!?#?oDn?~4v5$2J28 zQsCOj+ap|7Y;=qVxG+a@f#UJ;Y)!gTNIvl3h8^_5Ek#`NtOTz<~B5&K>f@!ha1RKa(+?>Fw;@@ucl%zCOn%VPXZp&0(U zaYQQ426*Md#Jplm-MyOa9UG}&r0JDhF>;A2l78$a>FtCoxAKT)f;Cd%&V5*hAQ5`{ z)A=RD_JSw%ITa?+m-L{0tevMsWbZB-no>4#(;-vDDU`y)4OGm`7x)R59LqM^njY8e zUg--vs&MCf=0C9_J-|B-qluYowOu?fTX@EK3Cou1fXPyQlkJ};^@M@nrug>E;UEyt zr>1CJ7}cZ%_=V#4NjDP5Bd^AIXA-+NO9`nYMpL-B=CK0r1t`LVwi)osxKxS^nA523 zI-{A=wH2X1TDa{jG1QTr2Uuryuik}o8F;`KIOmJaV&~O=7x)zCq56_AA{|*u@nZCa zg|2rowA1XPEJZf0NeY&tN}Fn>p9Z+6A9uH3P;_PfAr)aS@We>nJoZ?Opg&X-RU+>T zZJ}}03ghI1wWtq$*w0!jNt7v5zUBg>Goh#n@*}I&jk^^4q8fb>0*_ymc+t7%2KAC1 z2vzbuAzQYDj4$UyHOQg1h^=)b-$ZP`9WEL2$*(I`nZ&`zd2k_#SlZd$D#07-8 z#7|*-??Rh1(O}^=*5Ry?s-l^CvfTesVR1tjp8R-wp+9jm8or2_j~cO&FP3@XbQ02~ z890>|I*i?UEp|dyEzyJVvS@~@*{*vC&%wOs?7>~%^O54Y{;4<271<9} z&&e__98dZA{bAV$vl3Z~s&nw;;1@}XaEsXUkRF$qN2Z3RojoU^&j2xdI+>U7=&=FI z(4ds48qQF7KsD0vW@qfM0Kj_qbJS1 z_*ze!`%bw3%pF=GhxG{;z}Y)XFw|x6{uixL2>VbQn%Ts-adiHJ9n=Q036 z@{pSwK`3V`u?&C2C$5qBX}sm1?)hTs^nRm|Q*IOh(Zm1I0w7O0Lc1x-fHksE>WWXg z;vPX@2ZrkyqnujAhaW!m2gj#$@RC94B&8GsZ6&*jh0)pTt)cPtFOv8w#it<*8-r1z zq;+fygQXjG??%d8V+31#<`c41HZ|z(cOckf)(Y7d>P(DD`F(ia=zka<77I`NPM#yCX^d<#; z2@UE^dZPhx$(r!M+!lVf8KRmOon_z(7u`}DfazHuZH2sLB_U7HRCMB~$SDS>pi8Iw z5o+rY%C3o@qE%X_S=4k}LV?4W@_u<;66y7c7UUNbUH@V6Q#2+wE>UOLtHG>Gm)^-c zYAin%x9_dPlLik0hb$boYLidu!^e%10(f8d7d?X{}^$bcedqmF=jL=*rojopofX_c);MvNK z^iE{r^J&EZ@*ozSB(`Ml$nCJrqE|bGrkpLl^{fpi2!K|g(072A0{my9zcK(&W!FN^ z3p+u=l#{GSBtN2~qR@t>V#LR>ARkTyltWw$UTBy(u6SL%{CJ9HTvnhvscae18xFMg zE^?e^M~Lvh2wyT+ZDcZb@A<1Y0znLJHEE*%>`hMiUiM#&TZtc8#-T)4_sGEPFKI2d zw@Kr_LU}O>iAfv3nT#SrJC(c=jvAByQ5W>I)FRl#rr#RSk1I3EDEuq;E5?nPYh{)y z4|^jbeOun(Drq+jnE^oD?Ven{u%v-cEO8HU81sFB0VBQBNYsDG1|8U2D=M*3U>n40 zQd()Nu<6Y-13MtTNv9C6;?tFj+Mv?`33v-1GJpR$Ol7!bfb+8i*i(r7IB_SJH#qG_ z|BG2>$zG?JBHOv!)_?FRWqJCO58LBchq!Ak{=K>cJfiuJV`nX0iFXI M-JO|pAI zQYP0Cq7;6Ia9H;RTEbi})cY@tk78+T@YUvZ#HhOVg0-SC6)No?eZ(Go?lFOf@QV3C z)fkzI3NLpN%fri>(z$jK7b=f==*s>XF%&M~7DS-`BROf%nKVE}NSQaj<#>K+1LEyv zTry(`j*m}3k5$Fc>XiE1J-N8}UxvR2l0Cgtmy0;egvW5qmkxZWeZ@?$K#;rGLJ_S1 zMk)q z{trZvmsL(WSTvY8s4KEVnADw|!9@utK0@h&D}fUFRYklv8eUGyA8-;7ceBPL1I<20 zA-=q|)v>~>bVf@Cwh%!6@q+n@5zrBrNXu~wvmR=Z(yeJ*+M zW_Oi7fuy>szS~|35Xkf2cz-wNzUh9Sb^INjF-L+U__Jvhj|w4z-i&@sqFq#Dbe||H zx+E8|6E3isoBvOd)(q=!#*f@q{{Szc#wpRz&i706_ z8gm`&JnrGqh?{UGg-_;H92EN_I4>EJi5EFEq2L18BksSM+12yQRBX%rJ5_|Jvt>Nz z*+#-{qC!c*0sc8`@xj=enYz+Qx&nTU(vg;g0s)6V8+Wil1%9)Q5jQkum)*xLbmnO~ zWLbjH3$ZjABX%3T&SRsZfK43ArYq(O4P0aCMi? z#M^2d)MuS-ohNi2c;e)jiB}4h*ms&zFTb=>Fgk7+T|`k`e4$0{oQ<&kFg8b!^NSiW zEXSRzv`NCHs1ikjNpjs1xFyE^Pv%X3xIV9nG$>M#{#buSodAy2$f2+Mn1GID!{8Y| zU9)AdFJ&kIv5Q8>`aIuglFZ{3r;|`9gRS--}%Lx4Bhb;&c2XbsF>F zDlBYS)JZh{HqclZ7%GcZoiLrfc2@NF@58ao?&vf(zifq2#^;$ zKHj0SB;SN1i1R0W$AR{bknb~@2FwBNZZ6Od;4aB=VH}jXC19}BbI)~BH#JOx zV@eux$C8xfJP?cznOf}$G9nS50dqgEQjA=sxSJD&lx=VI3P!sqbshTOuTrgU5%6FO4Pj;7whVo}@ z-&*R0OHUnmgWSihwW#@{6U2|W;{}GqNVdNP;f>9Wo!t3Pg&!fiC+3Klb#b^Dpef=KWF87^WxNFiOq%+c@I{X^bnrWLRkI@rS#va z6pF2{0+%5N92l^%JGsQ~@q)#- zLL2wnF`rbJwY4$}?T5k)FQjM0!y10;n{d2jg)waZHD>}F1M6`8IDv{o{nkmIMhZJM z4ad=3=vZ->D;k-0vZ<^QS_TF!#|;9LqgIOdaGA)WI9k0v2V=fcJ#F}WYtv!;vv6OudJg$fiJoRr zfH#0AbD`ps`x5#6&-gMCDKF9QC`usb@j~FwT!^U_OxfPq%i)K#w;QiwDhjg)C@b&WwV}M}8oOgkxM{+rq6f%IB z5Ah{rJxKW`WhVl6`eIF&cgww?{59$=tpM4s*8xP8Bqag-E=*VyHq`T1rNSgd?cE!; z&$3Ot8!QN@z>rwSPYU{VU(pplqrv%?`M4wEBPQdh;^?kh>)kD}h_{)*jO3naxp|kS z+KpzMj@|3XEsmxhKzmMnn{lEo%`0Ro{J!vp{G*2wN9Q-&Jj{Smkb#a`ef9%bxUZ2! zT5vKkp8_5))JPd3cc|MO;sJkv>FVO{#v=D+%%cb$)6Zv~Ci-F8pWO^h#*3&>kaF|d zSA2!rVdIsR-4x9rQQzwuKZw7nB_W(zI5IoglQ(|M`whgPbPLWYQumBW`!(-RC3@n2 z@VuBtWrn6KCQ~9~leM7;#O)^1EaVD1sSxT?(&d!kf36i$ZO0D2=hgVsGPvdUg$^Ei z8v+=R^{XWb6Bp)GNo5*QJ6>W1l$KX}^ow%5#8%X)<4J7M%S@z(vZqY=+32bWCK$A= zHO`k+x3Fi?Yw1*zQIq>=KH-j+vSjeYs3z0tB<6k^%}b^UsQ|3)i?l20vY& zn3-b)S>OwqmEL>Xr4)2Qew`g88RQPN9oh($8@Bfj`7;b zk!X14xa?UkzaPx@c^$GAf*)h>!N?z%7oPf!IWC>AH2d=d@dmxgg`j{9$i|4cF5qAG z79m#Y*V%r1EcSPFTVmu1P71a1SN*~+Dp)gj6@e()a>XGDx^b$KgJYWciqs8;jJNOa z^iAt)w|{uN>36KQxXlvak+7|5(At(&JIA;-+ z2S!C3Qko&p@p7>%wKE|=GW8}(PyY<$qHQL2c5a0d5<*OB8CLAPo{;o?LhiRIn;bDz z@{pir!NcEHWFJJ%EKNDrbiB)*SKK{Adh(qo`|+eH(dJJzuPDi$4a2$Ubg>Y|DCHY+ z`8YY#>72xuJh2|Y+x0m3V{P7}j5iB)Mz`w5BFTEqFt;*@)2&=OES+V-nJtnlB|RT} zr5ZXtO(g_>Y%Ft4%r5`wrN@!|yc-qWpAa>|BUn(5fq$McFpI&z^wS=0sFC4)@5QPO;I>vaJ+NSsT-sOqO}o4Xn(BHNIgRg{Z#G2^(HliKhB_BQux}q}a49DMWa)Db ze9ay;{E&RhOrX|4d56Vyb&U_=Wu}RaHM0*jAO(_9Jn>>oE;So@i8SM6hI^6K@TAJZ zn}@6GO@t$mB7QlOH4EQ*XZA)o^;qdQRq;LHOX26)w=&MwU%?e|gPE648rvuU5vo-$x=WALxh4r*sn^fGzQFI|GZr5Q^o?ne2}1K6$tF?6Wktc7{%x{Rnb&8A#*igabpN>lCvopY0Y;yWd(a%2V;3mk;j60H$!*`wyC$q!Ym z(+U!mKgvGQj#_fsIMGC7?&gyM<_{Gd+1Qd8w^Fm`$jsRvdj|+~Qa{$PlphebilX zmGQ`z3XHxhCl#02EHDJyT3#^o3zOS#(!^hLZBS5XF%lJ^C9a>^XIJAle{`EMtH@6^ zJW=-AepYj+C^4<_&WoR(q9hCIh^Tx+o8ohkJ^>CAfP(2@M(~Pt5}n1`g5OQrJiPQ` zy2YxGanr%N+PE%jOF*X}zmf+EzlG!q?m`ee;3@Nhd+ejmWkRu6TA-|1q{*vun1IZzLcEiYaV$WqLpn9_iB z_qHLHb4HbUm3sO5JMYenHOvA|epYXzS&$iq=_VE&Xb^#Z>M|UY9-fcvKU@=7E0n#a zX!wPC;{q9%^Azso+QvC#AL>D7@BrOybD^=TGUwYw*vPUr6HOLD(F<=mZ7;<-f+#`> z!0koj-e{M+r9$~FKtR7wbk(J#!*beSW4qHWet_BO*K$_Vd~#;FH&QHB>X@h&8wlYT zd{)6r*zll$Q*#6J(F}K=o37*Ph6!#I^-sIn54_36u`BmC+9nrObT{JB$ zDQ40HBX+8`3)El{>v}1q#}^l_wnq&i+fkm?iRae0uHg;bCEmEUB_v5vDZJ7?rtr7}CIC|x&`)bco~#ZA$`rTEbstfqXzI6OSA!nHuWFClWkHi$>}deKF(nP@h}D=pxvvc`UrTgH)9fm}StP$Lix$iVT2; z@Yl6IKpjI0E$q(f*PtgW+R0AKz}f6^W+oS>q8w#V1e2#g(@k%#j@Xbd(>GBe#OtoD zf%)!mF|#?yB<5ecv9Q{z<7we;A;{vF@;r>LQK1Fa2BDWBg0`h|12T}1KF4(leccKq zfbhk}9j4#+ih^bwIu*EoUF2?aA*uAr@tMjlujLOnfOw|L5__odD;=BiJ1j~7mP}6$OLqA&f11}J;(`Jl$?y98 z3lG4je?N~-1;C=8B(g{vjO20~kAmdom? zk40|%VZ~&o^}c6An5O_j=^BA3*Ms#LO!_v`?gFz;bmp;dks&-FxfeO-e#)8pnF}83 z4bulrevyR-;KyD!{_vSL5{ig88d-1MB-^qhT&;s7R2=rbGOKt0yB8z1=70M6IRRGH z^Wd3V3uuj#qw|f+3NJ`TaMQOMO8O9Om2szt72jU|MGhCt`M1~?*o4Jf*4N>=DECWl zarm8snzbQ7$!27lUuq6?P!3r z42*21-t_3!Z9RWCR<$Xr{k+JGm2rv9Eqb;Xu&8{zEwL>c|B87@v}98(=5sr|M1c>G z-Z4!9z)vE%Ck{kf1(Cm1-PNM#MsMsx_(AbdnmS!@@kvX5Eeqpl zD)p|L@O7Lf0JqQIecJ}qT`R$n53`f5`1GJ3|JsNS-Xx^Ku$V6%v=xKHgFI_o$Z?o0 zz~j4-CG3CX2fdSei(>DfxAr;i3B++T)>$xB3)nlNA060pwpHb7#ruy8L@M3Wx1)Ht z{$C9^+b5o@N7omd_T0-erqE>3&iIljM5~MrR&*W8+y-~!RWnrQ`z;_)awXi~YV{v` zcyD?t0r!YzzA1LIrX&+WhGh90%C#sef(P%LKWsLi9#7UaCM1*9zP$R7rmao2pl7!L z5TeOeeoQ|P|F13gU@fwMu2lwF=85UUy2a}v5FZLipAs7=(yyN09_rNwiYby|HioKq za~$KqEtTLf!05{nwjQ{;)kh^mkEmKR6rQHS$m7ongzByDN1_ZC*+9X*%@@Bb+f%9qTss zwGnr%ISo6Eon~{#I0F5;O&54LssQbX8TlnY%%~+bo}|G#Y3G|k0krfCH6Os>C;Dx( z2cO1lLE+p^XvzM#asEr9$g^&I^Zv_PR>#S_;|K!r7eRegoAzU~;kSo#$Aj+_-TnW& zezRsskLWCIDNXCn0YYy7{<%^eY^t#o>Aqzk&nqb&fiB?LI0UO50?a!hiZr804m{Di z3BeBXh86Fgo4@_Giv1ol?70hpSsm0tVfDN1KCzKpvb*E;q_1Gy#fd|>ei`vccA8<-~ z#AzmbL*9`wgX}Q%v9v|Bp->J+pmGMmaRFUgm=57$F2wdGb*q*8z1*?Hk(CAj^%|ZX zmKVhl`?|0z1+WBVn{^Vuxt_}e_VANA4traNy7;%w+{fOv#Fqdx1 z#G*_TSto(UK{Fm-UGSkJGWCBJDgI3FolNJf(j^eL6nJ~|{R4S(7DY@shCAxgu#^r& zyeCzU)TDGLG+(MUf_J;x-=v3wbml42m8XX!#6pZpjQ1mw-F_S?(B?&{Je|5UDk`v? zM)>$v9bEp2TZEoS={~(ud_l=uJ=RY>kK^nAi^^u%nE4Yliu*qzVI;qwqpBa1ZLpuC z_#cZ*g|L6TRBHX03Y2|8YU!xe%mMo>l7b{!<$7!cEh4Q`%i$m}e$e%sos#@#rBOR6t$@NqEdxP&t*kVH+764NVJAN?OZ4T=b6Yn4acWulu|-V(%PD4_Ai{7M$SQ( zT1{5C$_aWAIea7SrwRe|H;uxZ!e6s!xB`sug&<&D$6QT7bpR`xP>a|d+ymPG-H~TV zN+vhhOZfroaOe4@rNzm#=+UDv^a=BTZS-8974#YZ0yFkB0*H*jB#BE>C{g4!XrH%J z=Ni$d6ybA_DEiHUeO6cL#QxK#`au0i$73fGVBPy&p&iGZ;x*lAuPW-F(+mM@BSp2; zB2$Y8a>v@Di7B(2i|yogc_TrQO0|3z7ADHc79j7l zZR6Tt5NxX5d)3DXfGuP_1+tE6kiKl807?K0UgZ!VIN-<-d{jZ- znPXR3iy)1oGOuFA`Yks;$%QYmT3f>|_YV%F6|{*2trUDY)8TyGEf2`$zGCA>eN@Ds zs0LHMO>PT@soiMUmOyvqhu=zUl!qS$x?p?dayirjE_TaszSc1Kb^ z>g1~M@nXpT`rX@+Vh{f7ihRno(H+;{eGs>+-vS%Pel_@uVGg570Vyt(u{pk}Li&uo zSZ=f)+uS~jN%ypd2Y-RyiT^r}RvXyb2ty!w>uz;6RsaG_w;hKf2;cije?D37%DDIr zK22Y?&bk)2bUj!W%ZoiZ*tnJZOXFk?J@`3=2e{g5zL@*o?ISWGDkAjL`6IM==(+H^ z`vh?-4-X{F0u|?;FSL|ZC$)W@PX!O2(~38oq9a4=-QyhH$b+K=QO zaM;eu3R$g@QoQjPgnPD|?LPN6*`Bbig!ezgWNt@v-j{;=--WzaYy6o8%4Hw=* z(sO>q`}H@1&4>AVQpxqRc3W;u{zj{O-EKrj z_gOV}Etdv!1>KHmb&2&`g{{`vy!JfFgDta91Y!eIx8^{--)Ydw^94SY*-a_MiBhy; ztOwByGe!EX3!iRg_kj)?h}fTZi~Kd=YZQg)jiY3*eKZK?Z z6I9|k`ZE9F1_wTGF@Rv?DJZ*i14J@7e!U2Hce-uPUOh~}@}Xy4OAMv^+#k=?)cVe3 zm}~XR8E??f=z}h{iKowuCc~vN;4V&4(%tpx=C`5PPE{1ap$qb?_Survf$irK1k8iZ zhWNla0m;s|>zIUmq)p3T!mr8E45TAAvvr$j22qhGZR2k*lkgU0%_tn^HWWZsll$m< ze@fmRy#JMI>$6wJa5SQUQ3V(K5%hI$UWb9qW3jbNYLa_@iNp9Eh#syg-Vpo#L>@MO zfMouEdZRxQb> zT(HGJqVQSAA-cc5z-e%5R^ngZToUtE&9PmUmYfa53?xCETg{w7{78b&S;ij#z9d_m z%Z<)gq!$Fr@RSWIEMgm{Z4aqclF)W*chl4S2ZBO>_K84BiEg2<3xN*+GakUR>{Mtb zde6-xr3gRBH|j0s+%_1#lu&NQzSO zmdK-n&2Y;=;w-|!n-w&lDM0XAiku!4MP#=-kv1;B{lBn%iFFPw7wa;ROk_zv$6xzn zH1{!R0JpeRyPE9Xdgj^1{*?%KQS_I7|It2`m)Vr}2$r>L`*m+tg28pX|A{eO1tvC+ zfx@EVnnupnh2oU4v4l~z^~sa#>3>0NX)LBS$0pGJmTnr3sw5S$>}!hW5r|J&d+p?nDbX$ z>mHI5`xwegxg|hQ4srBKKk`jSfo&>^Z9-N<#3eSiiLxfYN&V9AKNqFS_+oe%Wo`2A6*ElXF@1NWUZ=sG#GT-=&Sa+E5~gG z9`aga3B4~H_p0S|)Cr*AFr6lB_O24X6>&rau>hUoVY846M6>aNR9Dh>*oBC4o0yny z;R?Z_O~*G+ZTcX@dR;i(V0pnOMQJeoSlZBA@F)_Ff2!;8yT8SEUPOMYZ&sJ`B3$!ZAhNO;wcht7A*{f%pC z;7Psse-Z&wSYN_474svxgzpgu@<3dU>p#G#BL-L7Evl7Y)!Wi(No5{P zj7>WH3mUtLUXb(&?r#EU|0w%wGn8;rlUaW~gCd?8ANJ04Xoe3}t zw5o=<_-Hif{?=$(2uL_j7%k=3F-g5!`E4;e7=Gb*nkpaMcZ?#49svnEpUl}r&?*fyxFja}7Is}IB zTi11PSGz2(d^i7_3FP;zc*IZ<)c{F-#Af`r-nJj@j0>A2-0aj>EaKH;FnC%ucnnV z&o(!jjPQ}K;AGJWjD1J;7HQ?d2_81wA^rCO1`H z0a{=S519{~f7RH^%|fP_x9?ny2bmdI97aS@&+;ZB1VvTf@3FY`P1+0s!xl$(d@eEA zjD82g{v|Sv#P4?Cv{5J#v%oEW_|2g8@8@csNqN1|Mu~rg55uR)i9Qb>Q;*g3CI1Dy zj~lQtBUg9434hbvJ2PGUb^VtEXV=r)^ca`R!0w8$lJ#q+MeNTO;71y}C%ln6N2K6a zLOd!cotQ2Ke_}C`IT5_d^U24wju%hcr;CuU@`vRHVe1R?%s#mxqq2hh8j7K>V!vh| zP?lD^@VY&yngOWyN46Pxbmt#Vh%vGy{{vN$ANINw)|CTHo;+Yn8c%-QM6oBCe!upY zl-g+0d-C#5c6uk;76mbMC;?6piQsu}rnaa5_ag+K1>obV7>tZgecH!Wq4P>7! zMr2w{L_eQ@OCaq~>uF`{eyYtD?V)*aew<~_ed-dX=wbOFNLYq_k+j>vQ{NIz0j%a@ zf8^Rr`5e9QiI|fuPzO>T{y=~sXOayMy%wDU0nT9;L0{iQ1t>2As(cFJMhTW=OsN`_ z1F{Zo#E#x|5b_KsTc4G$kYPOLDIo~5E4LhbyyewP$G_Cpb7PZXw@zYq?)04s6Yb5O zKi5+;l}#!BNRQ!Pk4nivpmbMo!b7zNd;$~?%VQ-XOn3*e>|RQcSJ-^@j-mmNx0^J*qdWoh+hOU zF4P9Go!_t5q+gePBLY^ok|y%*a8KGkI;Vji|LwDLRdbmzKC&%^@w85N&9ba*vHzTm zBd9UdLcM>u*sWSX{VaIORnV3d^OhnL8{3TQ=mHEbQ%fD_qrk7U52rJxzXAF4sjFhG z2Akgdr_X(InBvKxdYfHk-9}zT9Yk*qIH?jEmsuJ#yjh+l>{n>mR=-bIg4J=EZnXF* znY?9jkC)$49}OEMWD&D$@hkfbchH+^9e3q#VH+(#k4!Bkp%gO>MJZsF*_?v59B;E( z$zZ*5BAUXl+JmFO@@!~8`J%v-JG1nVV1xyyOYa^qDU)L1Ct$l#T7DMg*U>u(W} z8azK+5&$JoeV$=6i%MenBa0szh-G+HkdLzKtY9#Zu(KaOd>MD}_ro7iNxGmjuiVw8 z_!8ThenBkEd(^{4GmH=+W?xzZ_u?apu~)))T)F`45KulLIBR}Wkis)>bb5iF%!4{p zwlY2Djmk{G?&(kakN9S-fCR?gP>Rc83Wlqb2LK~uOMg>46359*zBa+-eX2eX=u9C2 zmw*5HZ{N|wNcUhQ9=A6~K61GK!%cV9c=sd2Ehz>W@UZ;fNQrZ@=a>ui3i4D0d!J6E*NTxi-6l9gwd&DmbMY%^GkxNkMAMcm5|f0wbKlQf4I^@^HMg+Yli zbsBveSCCtE+j^{thm~OD;N2CC(@D;}c?NQbXA!I#K8W$Uv6#q(R>yxe{-Dweb2%t8 zYNVLxJ8bGoOczKzhI-Iy3r}T5BCD+TwbSMZQVjuCS6m2T>S_hm64S7%wY-1*4so5h zRRB|GmcpzD^6Z$=ZVrxzaEQR6c7aXmXDiLz%ih7YKa8TE2t|f`QD&Tnvb6X$n;KFD zw0^gajbxUgc6-`^d%h*UIta*h9)@|pdcBIx+{&;i)mm|eVL-d$!yNi~{<|`c4*zx* zlPan_`4WSAX_de6i?jqU>T@(0mb)*eMCfPEC?5AKP=d_0tXo`1Gi--6HwjPCVFnB! zaSB#$OwPzty?KrPZ%lod(Rv+Z0=173iJ=unI+PB2z-r{RmodI3B8PSo@7{7~fBn34 z@vJV%W|zSC0ybR zv)1rF2{2}xz*>a-w~3I`4~Mdt>I~usvEx73LGs7!@W-7lkbb|qp#5;F(=BkkK#L5l zE^+uqG7QSBJZE+zZlVt+WNzpKWQR{;%Q=@a4aex4yZmUN4#bG>`AY?j#2LuRcE4Ni z%kv7vV$X^P{lFDy`5gJ8Dli9-Jj|At+Sf%`8BJ-vka~=x6#tenSavUT^)!$Z=)~0% zf@6ydHs8ao$7$!)ttdKxkRrdmB!5cQ?HwvOR~5*DY;T(Eu3+P|Y^L0Y>p)EZOA#4x zPRKjs)w5O@aF~BISyM#cS<$CR9?$!kFNx#XC^i*$H}fr13huc!irv`D?MUw)5~ai} zzctAke|3shRpT^e+`xJ&c$$82y{W2_hCV_~#V`1ROqqnqS9nv^pK_!mHhZ!7wf}39 z3**U~zfz|8>Q7W>a{NypNj(^&itT{asac>YETJR`v-Ed7J7b1-l=qvucC0FBDPv z438-vj-~7)Tm1`jUR77gSCk#D>S#gqhbLuh9v8kfXp#0G%eMkad;Mn>l5uLBzx}Sq zb3;TLoAbO;>gfw41In3kL8OBM1$AWbmE0jMvze-rMXf_2G@r{Mq0R$7rxrQ{E&X4% zHiiP4kq1{gI;=3SLc7AE7r9}=^R~a`#8Y%vPq)lOfv(zOdTKYRJ~e?eo3J-V z`;6iU?x~~!CV!Op*5}3&#q(pvWwZ4_aopU#(40R`;L}`Fmq*s~MQd({ts1u}i`VU* zPqtF|$Y(l$t=Aim8l*xEmDj@I=(DTfO+_%=1C7%=434hjgYVJxSL7b+?TBXqE0>|K zq&k#6_pt_V!@Tm&sK#Nm7p97vFe5&$8JNS_9q!i6m;nuePdP#m{}seoD^bb!ZuGv(EtT2WxJn^PE#%E6g^y)?n5RFA z5-rzD5`MepP4(=+42}4RC5{t&oG~7ZzH3;yvo4Pes3Dmp;~ryxs%FBATU9FT+&1hz zz5GuZpCuTI#P=w{^gAgvv;tmy@y1kc(+rdG?)R#(TWFI;$L7-8s;?ha|BTe@4M zI|Zb>hVDi{x&;MMX%QHtr9)D>yBmfx`2NoKoqxHwVDJ5`xYvEJwI0CF{F5O6_k7ZzI@nTPULDeCx(r3Sz# zhV`T1<);i^k{R=B!Ede^JWER%fXVG4SyP+Gl0pF2$YVJ@Bm?A+(iLf<@Q`ekYJ|>S z{!u?4Uho7nN}fo|pF)S&&ru)y_{sc4IWK`vZv)`U5p|^c{bsX@GA(<(Rz3Lrvaxi| zoWPEIT2iC8^@Q(?8&|Q#b5ATf`xt8gPHvx!DmsxS61V}?Rbv09<{#beAWNM}Jx=Tk z^$DoPUW*X=C+VVsyV5!`I=*!GF_VY`^-hyIC}_6?hwCXn$-%7B+N=n2GfjW`Uiqg3 zC>w|o*~QUyFgTfVMc#15TQd~Ib7}at={UjlVJjeqs^-IK7 zWECx}CbITmW_JpE@Wu$Iz|P*=EuZ1{R`Uk_XA(sCDEaL1$58}%TxYfgYssI9w?-C@ zt^vS=YvsS0#@$XykDsDm(NS!^+j|6B82Tchx}iQh9MPKr*nHg~92H8pjFX4VXl2a) z)@>|JNMCybi8tj~W=r_??e1YH?u_V@xEDeT7z$VGfnbcPA>zF*bs;>Lg@^Eu_eYuA zYvR^#?_~Md?{IYJ${)v03aSB6PNol!*u1+n!G<)@J+__%Yjp9+yOV3G&Fg|4{~rjd z{g2C>dQWGmCE;LH?C9{%Mpl4V`KS9=Z(<}zspA4zxPn#Gh#2l|@!$@M%F>V3J@oeW zq3k9NWUKU~7K}5O|0#~yn~+cA7f6QrCom;9Q6=LJ+hP7-?&UV8bDs(MmKj;P+0)^uGFXTqdQ$;N^qct zn+`|PqwmUzDdXeZ(E!KtfYr>lr-P~hD2r_7VhQ2x7nX+%?{Goi=*)Rp>#4Zw#+{{F zIsEj;N58gbr0}h@)Ad3z72(XFL>D|n8hJWPX=wnXK}t$+c<{*_A$RO5@fMq43>);= ziVlK*+Qz={ncv>t)sb#wE#eCpXg918UcqYqIDbVa>_=vvC?WuSKOD?rBaV<9lJx!bS{jf37GRbHM`v({-+Nk?av-j zf_CZRVrdBJc3O#oon*Asp)mvJG?lYk$t1abHCte!v z&i!=G0gim+Kvi+#qU(%N(N2BT69?{or>WbpvN{va$e5j)=irIkbCG!8Sm6ECmQ(lw zylh?chO3#I^x@Mdz{pdF1EmbCO=6gkEWn>IU2TLQO0*Ig=+ce%QVNrk-)6Dhhi~(7 z4IQDc++i%w{H8xyeM{Nl?h3vHA^d+|k`HU1BBjMDAiZ=e^|+l-v6JLMnZ363c0eM9 zRx+xdUH(GkSXksF`kcsv63LS>`;_xzu;8PmH9+XS=9UzSc8M;&M$2R=nkOQKcm7M^ zn2>0N563md#mCr+JSNj5i+nmzq7?{K(uK9x`Fbrn3-s`#6?m;fB#EDGn~|k7n>4_S z-JNsMRf8);=*#!zQLuSZ3E+wP7=PKmJ(;%FkPe*dLGd!g0%~HpAVku4 zEO-Bv73=5`QzYLW8(~k_Zl1 z?q~Z|!@XOBzsFp=>0xn=6CZRB!#!pJv`hd_N0x%bN7wMuM#LZ9XDpJx{p?D!Vmjbb zs#F#{tnC40S52J7+o{(fHzC~(4=H|GG@5Y*Soj@eO8}t(q))Y|SIw;j zAsZ5iC;LhWrVTb!dt-c@F88U>t@-5o)4|F@@6%S{7><>c6!8Mo)XSQDvfYBs0!hs!mAh5ls0>5-{GI`*QH{ zgQ2eW#}eaLRXSuc|6j8T;%O-$%XB1+%%!f*BKboF7UE#q0_&3d)egtkjB@t`D?_;` zIoKl~YY2-&-xSN3Qfa*L|3DwB94Nb>$g`uqZvXA|p3v4EZ?d6p1Y!j8yFq?Xg+!7_ z=NrXwF(IXPh4I`!z_!shKBY-#WfjYuWz~0;i3ohY-Y_gp08!}3&F$|{sTEi98|Q6Y zKdG5eNn5jF7qXA2GZ}B3taB{yGi@{&%ty{&rN z^)rRlFDO`6E2e+gmr*uQ0W(hGfFV?zk4t?!rpMP*>42?ieYZu+a|vc(xwndd1y=rW z6g2WWZC!^BZu+}{m;_1LG9WQesponZ(L^?qhE?82>4o!To@QkyeNVFf_;0|CYerC| zE8N|FP3@GeIu7+tCU2fKuyh=|RARIY0pcqXKWKYZ4{sTHW*q6MfsrF785aV>Q0d-=UXFPNv=alt7hVk{1|{X@A=qZm^TU>;^Y z`MLo~CjgLOW%1#;(?kuvkrA?!V>zajA6gyC`YyzGN5uWzDLOSu7T3%;?j`W%wYVBw zRXbjt>B0!!>tOu4d$w?Fr!^z&RQBc79e*4YTk{>^V}vqi4Mw4gnm&nSgOKUXZveXV z?Rvtht5b7zn~k(_Wo^;fD;gLYaMMLj+9HGoOH8eK&5y^u{a;gN2!%8t+ehGVhV^>w zkOh=_;Z!QuTwL^jcL7i#L5WFjwwj_AWB>+Xm5-95dg>tbTAc*qt-EO;eS}v z79u0wl6yGIdb_vJ1KKUYy$m}pc=38t{9S4oDXgs1__Ill7rn`6PRe-PU}SS13abKZ zfF_ntweFkGw>qQE81xFcTfPT-!8cdI4JVScRR+8(5oYrw8ris9X?f!#I`1yR7)dut5?;=4;KQ2-rE3rV zLr{TI%U^cd`~K$}FJxhI7wD_MF7>!ijrQdi2YRetu+P492VassS+MuR?fmO#K%SbZ z`w*j>??R>nz6$yVakpP-tpmmMGrQ6cgpvW3XRa;3dXKCA73O+qAryS+3;YT zDFq#lXtre>-ixYxow=!RVz*Oju$zl>zZ1V5W${C&D5&-y5`0YuIC){0<#hV|gYCcS zM>SXogPpg>FkVQxs{dK6PJsaLQHbG3-|NVKzn??{zzm7{b;5(hd3wV%pzOmg`Z$1U zwgMToO9-;^Kc^n@t0I>`@H#J^c%8}YtE+snkUL@0K{(TDxF$w?y_1@K7mg*v|FE)? z9Duc)_~VFu_`4rh%gC7v3D_q4O#et(mEBxt8>p>7- znhiXExj0$TFaE+Fqifo;d(OD@R3~%mt50^(2Sy54N79l@4FV89noXXT2B5IgMoDr( zn58I@7Fr)=F)H+5de8|4+#f9nO7|qwFtR;cX+O3tw&W?UU$atbP>%?eFE9XqJwi}J z_~2`$;wV3H;`s(^-d|qVF#@$ef{ zu?sANoov!%jFfd_zMWTevJJ;y)t`8sg16d7>sN=)fC-m8EdNR^ActF zgI^>b$xt=10$+Uc7N!-ZC!Z+HD=*pQEwi_uN?nP$2Ti_Jtd-%{kNaOo*lTKN5H2bB zAZ9H5l!+$_t*F9r2SpxQrT6xRS(?n8b*u{e`?%0=*vgbb${!5{AvwyP4`~P=#sUN9 z9u|D4YfN?=EL2I50{&=pZMXV-Zc~ACKOFs@#^ZhgT&>U7g#v!HhX-CjOqJ08zEQQK zPI)-5#_#+X*iT51zN+tti#Xz$;@=hPntll;<4Wx+yaL9e3zggUhGaEed9A7Z>Y8_? zg7i<$iXAbRhmY#!tJ#0B6A`cfbc7_s1R& ztj$UlHcQj1{FT^*JPY`%Tlg;szWt<@4dpkIgAG%8qlDL zEM$m!HH4ZPR}q77NlgQ$Xf*DnIwr`~#D)Em2FIR+1=#kf#>eYD!?YH_D6a8G1~xQb z3)vtmXs1XDEHm7lmtU!UBel~^dfa>)6Jl^`pITH>9Yt=`#Uxm<)oa)rkpeiN+O-8F?_XhI9;f3HWOZD}w zr}zs!>H@zh3y9n9RB6^FPx`fg*gET1pyv`}qfi-bW(u9kEL{4t{rK2IoE@=YtsdGA z!TuGjKDEuvDy9<)(}>@(7gr4a_};i}_-t-Ep^l^V9sfRdl$ceVlJeOgf8tY;FnRVz z9G0^_v_4&LdX=^=tn7#RT+Fa~Do!!m)4^9*cZ)agyRK7z)CV$(H&&55jf`4=_%4}F z@=hEs(q6vstVpp(&nKj9<_j)l^C{qx0 z7Y@ya|5{oWU-`PM=gblx70Oa?UB3wfP?XDdGj!DsMhOG`H=GI z%~BS1Vsqgad!^fD7U3O?U)7`U%txN8E<}8%3QZiUD!#50Y$(C@*{YIR7!~)ElB^4mb9IP}0 zVriV|DXHD?J!@ch>A#*(bQ+v~Opo3SN^2{X6sN3BA9&GVk;EwdbGU5sF%nEnF2|-w zU6v$W-Iqr0jK|Fl?F#2IC_oI-#ZNhRbI@>5?JnlY<-nqW5t&IM+Ros80ke3U|Stge7pN<7ET*oo~vA$C80u`#+$mLRQeGyU?dIfYz z6Azj$!Cqsc2>Fc*n^(IodEHz6JMIJ{QwHz@$Zl@Ze*Zw?6cYSB@I*_qIp#Pib>ivO z*^&@en9)1xE@%ce3*AYMZ1!X49=)9wH4v`wJKZ{SbsPJ_Wb0JjQc+Z&mXYhkr=<;g z3g%A)jVuoI7uW7$rmCZN76Z`(o6pdc3rVk87L{chjf5cEfn9ehoe*e0nq$Qb9n|A` z0}SAj=_n|B^!tIrNc;Llucr29o&zF%=g3GdCRHyv^%H0*uYL|&{=LV^xcmm14 z2AyF(xQLjz6U{FDB0t6#KJH-9Re(1Z9^}TlJ*Yx*CyNNh$JB`M?t?=ru>u)P1jIXI zaE;3dbJHd|EDO(gAqy3+S3Vs>m}DVCe0aCewYdn|YSNZ*mUJ8kw^+=Wme%%ULbOi# z+pDd}ypD4r55<|5M+1pnNEKPgXoDpg_Rf!jj)5G^kP2{gz;l-0-C5tA@MEa#-v3MR zPXU+h+0whee=F?ACQCyi%BN0LIJG`h3}{z-)pyxud(?%g=B73Y~e8_=Uw_Ao12vlhz>eBlLT7_P{D^#Xc z?2u9v1MtQKChk3snOScmvlTRD=+FLIO5lq7c;-dXM~l3DYR=@Tr#Bmqro+|XuA)5Wjii@hdzEfP&akX2U zZw?usvgMFp&R%70NQK=?5`n#;FKwsjfx9kF*`MvWTa=ORC z&)qcC=4@CbFB@XU-E4aTjEM3711^JcfU5ZgK?h0|^;q2Ou1l2=9?un&4Y$$%qJlcL z1|xyRd&k@HnL&71f3TjKerP+h>dhR}_lt>I8frcFX1_|$4N^-s;e5pt|H_q5hd#x! zMDjL`!iH)=e}>or6W@wPIbkm?nrtkh$%m`eKP&&f_9zeo= zl{<9Wkk0ltyMBF6<+h3F*Z8-ZJQk?1NkzmAbn+$reZfZBEzzfVZBDI%)~6h)K~QTUVzK}NhC15uPchfD;SX(_03-Wm2{w?*D>Dyk zuk418F4e2`J>#cJeIx1&7r}Yz<27W8(tN3yE%i23AIu3N{PO4jZKC=tu6xRh*a)D1 zs;gb40>p`ZO6W)@f3DSeMsbII5#t@g`~ZrI=h78XMRIFH;@R&un>*ML)8j8oUJele zj}TM^O1Pw1p{&ID@F)@OJmHNmz05`lyXK3Ir3~gjV*4du113Q*G%Sxe9@L4`0qhOZ zAwc3IqVkrMq#kD|bDaAUw49msdor_q*Fp8!cK^9VDqgwJtY@P6dkAsd3p;gO_~XXX z^c=BQr6@0i2evl{SoT3gV}=v|gD~Mv|Ik8PJuyh?`|&j=PttOYnP|KJJ+>M7 zehs3+tlK@R!f;{1bLA+8!PE!dAnUyrDr8{HqEN|8&#zDqn-_mkJ-7{G##jxXlCL9wGr6q}fWN}(^L)0># za0`cuteltgw49RdIXsISs)uwh0B^Sfus?+B(Qfe6JKuiOeLYOM9+re>*BP<>$6w_k zL3g9y|G*TB=b$z_X7e$wf#ha1iRGE2EjD0gkX1zt{`p#)5B|zv0&1FH(9-)jq|?`s zzKaHRs*K^2koUC)U@nm>GR+K6^vBG=3J?U^!0X71*IXoy%nW1H?Ly5$nPFrX{^b~U zvCmijYHEAw?`~xIn_m%jcS`%{iGh|KrD6pen;3=T>3+3OckG^A;n_&TwxeA;lC=1S;%2Hdj$^Q>al=$#bm=>*MsNSSjN(S zbo(A~5bPUjUoJrtvX7J_>-+2R^ zVTL|?2t!H^%-KZH%n2cNmVsB-?OYZ9t|f@PP)xcN9P>n^-?QB~23*8BFEz;J&AHGL zT^OK!WhVo02N@voCRvJ{*&cd_FH(`mVyO@k+N|~{Ea;goxUlsYau6F@!0zPvxUqgr zinI*Xk=F-gNy$YPgP>07AjSp9&B~uT^2>@~6$la1(2rS}?dXL3g))XZ>%TH0?Pk&ed#?p-1xH7qt8JfWQGYn<*~3kLN8Yn z%&78P+h~D2hP}FM|0DhX4h4y;FG>!Jkw3vgL6{P!iBRJoAUsQ9273h|urrin9hl|E z>`HxpS|Lj!$;?*nJos3{Qog%wrWSgZQ#@f<%+~HH1g1@TMM@{svo^4C=q&>JLk9>% zrAy-aQ#K}_NEGVZJUNr0h#N}05{0~S!yt@gRj`(uG@T~@A>*6+GV!2jNIK-~umgAI z1=z07+p-&5p|+ijmBYgMI}(#!#EsNA@^9&eZ(ykUk21h=b#q529wa(5Gich!AN3JY zV=a;gai$b4{a|{^f9fyBO)ZIVAU3s|2f7%jCEom|x_Jc@6XLzguZaWsGviy14dC%l zoP-~^IQ*4jnNw99?F3@VloEM`a@7Krh3 zPRU4jwTy-aF>A~}^Chkg$JI+Hp;$mr$wA?kDLV;oBV+^5SCuyz|Hd85oK8;Z>-XkJ zW+_5T<~KxNSDp$YEG*#WnY45vu8KCP9ZK5cXY&jXf&=2-ZWL|+p8a44Vuk$v6B2#{ zs3LLUJmkWatX19eV&`o1Y=+o;xzWhIN)d)#J!BUPOWlBp`5gGMuM|uf&)hvfoDE0K zTHRMugcw4yC~RHnpT8buqwOxl@3TUd#809KKN-(+^=RiPe$EzG$X+F-rEEaUMaWR7 zOjUWXoLR~+H~BWX5g&!|4@%mCR>KQcW=g>x_nEWk;e-ovOBSK-39^Gv_BlVtfH;Uz znM4?z4oZzJX3=kn*aQjGWvB7lIC=ajpf>!a>uOtcI(+Y)-ETPEy7M-~kr44Nj)cLS z50W^AFS{lWzDrYl6W;c=S8R6N9uvQ36 zL4M@X5oL+yRb`falNblOoxmRGWeev-$0eLf#CoHb7P69AM!+l?ZiTEu^o>$Ne?JE+js zk?*UZ!~*Pq_ojp*HCqiG>_)VprfUeGdKV-4iu2GqzKBI;49hZ~4s?>=RKu(H{JoYU z9CCTmg!-`WZp1Xhl;Z>cf>&q+o}Z||^eObANIj_Zes7{_45AfK+iR-n7k1h%23s6`$#OwP>D4*GgV=Tv|s( z)F+%mI`V$}LZ+N~l{VmVv3?|aFn}4tRhGwdDSRO6@wyM8w!+@gz?col+36UHC3!b* zzP$-~$7}(24w9`)NNX3QFjLa;z@1$DQtKcaqa(*i8jkd8U_E8r2g~ktRw7KHpd2*w zO~>0+*B60_FEK*wTRt8Tje!@${6~CEQQ)|uTn|Tay)>8W_em@Oa@Gk>u5*oNAcJ2b zsy_@)Yf#QLtj5l3WPR1;ckBR3rm`&gy+KdCzYi6?N{bdVhbq~Mb8l=uetoM2ikFq3 zKmN!>OGg!xATn-_*wPZ}WtPDm)_H^JzSa}*u`03@uJ%$787BD>PaenrP}Bp8==%!^ zdj&{LfV@ojmFz_JYJ$HvACI4!s_$s5R^l8i3%FyP-V4Au5gx8ZToJ0Jo8Yh}n^YRI zgB>4i^bKm+K?b7}xMh(yBf?#_)R|<$v#_5R*BPd=V=m-|+fa8}!@o|?uc+}plC=^x+lL5>7aEMJ(V28fU_S$akf zwu)saA5!J=rnU$FQ84jBlQ;fBy4k|<2YmqP`#%*43km3B0S=TK<+$rmoT_JwbktE* z;X{b0YdIvX@-x9fI_inp=HZXsnnRa{2|v<-o~YM+3HMuWXnN`tm3KN<~rr=8zwZ8E%L>`(kW!c>E@Zo zu7d>OGv3W_B=I>de~YK>A~7zXG#(1x?ERq^Y8l4cCOqGrH<{_FVP2yo1ZSuM1$jqf zr)wA$8LpXvlUa_4c3BpG;{h08lHpAfyBnsAh>_e+EWPftC{JYscD>%1$j|>fenGM+ zrK*Z})C=C4b{ri(Eq z6R=Ez_z6lQpb3RdqD}%bI(5NcG(^PWd^H)>ECKm@$G8oQW`6dO5D@Vz)jsK1{8*sT zMc8HV@R?g%*y`BIpEGTeBe>%4_XCPAN3!*K4yUpoM!MHVGV@GZN1jZgz`+~tJ@7Oh zQ4@6H)abcTnZPK$wlsI(_h!GK9C@Z`_`dm;;))h(&PGA+{_qVT3FIP76W5KhKU?z< zKTit5y=pr#A$};N`?>^yG51yoL@6Zjc;b9QcxPC6Z}f<~Qjn;LXmF#b8X^FB5rph~eB-FnE__g?NkVaJTU;}?LUr|W6L()0 zn|7@l4Kf);!A$iqn62k6er~6vZ-uFKD+G}v7QG-cM*^Tgi2Ib~n9Ff#)qds;uW-YX z%k?~B3-uH0+t3z!54<7(UWAoD>($pa^?KwQ@F+UsJ6lyzbA!+2(X+Fo%-FxhS7VY8 zp89Ddw}lq@m*eVbbbiJGT=-mO^ZsQfA#!&5Mjf$v9LAM2HTC{lcY8_Ey`|8N<-132 z581oT1*#pB5YuwhIx;?OQ66uqUTFHnWXvj0%jF^VNUuBBoHt)`Wlg*)39626zoPO8 z%T}1tKcVwmDm3nquIZYMd(-J=w%FL~Fl@4w9r2!22NR^Dj7b0lT>mF1K8`~Pd< z6u`CJ9XQXoo^NzAB%>3**xz&-xD~xOIw=&xN5Z5V8i%HpEbEJW-NPzIaIWMaP>)&M zXZ~6P7LZx~tnTSLL?l!sG*B7zm;ymXgEr9Kk#nM7HdsK1`7}Wq73K7NyU%mP5wHql zl)IRPr4+oZn~w5fd#Y*}jcV|(`qQK6(&4lRgW+G&0p%FX>FGyNACUR#V*99HmO;l* zjZp&4=bxBGN8~5qgzU}v2bT-H4LHegT6C#*!UA+lejZ7GEjjw>qeZbMK%wYeJl|mb z|B$cQr?vFLEGcxGA{5eg0rPeadkMMcaP$^BX*^WK#!0FjilOm7t%ID@WmHVSNQe|D zq+qY;O?YLh)YiX`L`hqiwA2=?trqDV#0+PUsfy-tVwJ+cA4H`lQ+qDifhfjtpQ0k} zOsj@`;r87m(11$MBm6u`E0+ij%JnPrEj{K$F!A0hPt?d0*aZXIZiqEOe-lprO1t2@ zwI-~}%R@^M=tBRCZO&_u6**Tz!&}Ll;wZFn*L?OD05dP9i~gk>7WOP0Qh z-_7|LOd&awlw#@lB&Q-))=MI=(Yz+M;RC+C*hm-WuF6i$A^ENT@o=kP=K> z$JdH(buwI&$eRU$Epj+Mo^7pqi$+V{SG>3A7G%lblDi@L$ih8`&3>%G2hy1Gs4$W* zKDsv0QQ)k%coR&UFSdVILwC&e#QqbLNzj1}>aTqHeZOq%Gh0yCwS8yx{nywj~_^Mn~Ck9F*oRXB$ma%hauO17{ zWdLxdDb9^BmT>>{f6w4&au@Z)@YblY29`YaZ>j%Vr4^hK`#CM+>ixEgVMIXj$xgv; zALpMxh_yDBPxGQ5+G`fiz6{4`Ot)$N}$g>1vYc>U>ZY%0mO%`=)XeW z4a+@ulmV70a1(@nNocZ)9xVJI6jritz_tsPO6q19IG(2nvBoL2p_kA8=r4*3;L+5# z;*XmbW8F{1v;S~_p5#t}OBv}tGLXBezBggwHf$)WVRbpvi*Ij!RHve5joFe7*01Vf zh5AGtU1_^x3Kj#bz&HR}&|7H4hn$w2oIsm*Up|$W_xMbVd6o)Xa4h@NVIY@#uax^8 z2M`Bp=Fe6V!n23)`XdZ~f8|t$uCcTLnA2QBV01LF8P(th3bu2VUy*oYE~wrT;A@Vv zwvx(d_t%f@M)9>vRM-cC37YzsnwGliRIWW=T@Wh|K0Jb||9X?^OE6DM;Ho=#(Bi{Oo72UYiFA2bhV>ds^4~Jsj+ftq+vYeam*q4eTeL&%RlzvA(GJp563~ z?NQ`Gay9~B_N?ONT@nop-QMLSB|`AXULF9soCuSf$`m)Iu`tjweGceT@YZEb^*d}B z;~PcyvBMArhxaP>*8F~j$M|PUhB~xmr@3kun4(Vo6%6{*mUsIT!+z&kQ!);~B4-qA zY0)IX*V)q~85~IxwPVXhlc@e6O2F2NWYnusaJYie%XMD5u&ra(^Eu`B1X+?QVy{+6 zOi%e(X-Wd0iEEodjp13#$tzb{96b!+ZRSPbg)s#Gxz~G8&9;A>xzEJwpS>Ev0ckOG zS<}No-_Iq6+A?vVv%z3MuEd*YLb{{iruQ^lT1SVd_t@Qamud>_M5;4?sKqqqR;&cY zL%mFACumEmWvIab!`Z6OpC{+{57vhSoPW%6woxHQH!lNwG zM-?y|L11XghgPY0@o+J-B)%~Iaq+o90MA{oqo*{L(XctWOHR?pcU+PkhF2B*!urI%dn?UdAatCoSIM3<9+4PrLku z8m6upQ#&V`=L@#=Yl2xzdf3^5+{NYrHh6&e_m|X1`>GK!=aK;MZ+JW*V9%gR{l&Y7 z>x?$?W%f`>i|sMca%;ghg)Q5UWc=k)f%k-#cmPQv9$f|?;7z3AGth2laWneR@m3Vq zV_3&}!6X_}wLz_2;$5f8phVr>i+loKM@|q8U}mwwSbxZo`erIV{=ENuD6`!H`3I{d zT`E5h3dK8X2t*Yi`y-JWFs%XUtZ{an6PGVSH`+%FYIt9$h(oUt{U!-$D?7YG=b=88 z-&Wi!!yf`0hv+pPcG9Gq*->+EZuXUw9_-K9q!Xf?8+s2;og@vmWtzf=tz~wf6kti4N z?=nbP>UVy?Z?g<+-w1RtC?r7&+L$*RY=^f>7vU2ggvQZ2Jj-1a&?HHXqelgn0(=#B z`XYS*VoGU)+bq zfGIqe+AxChuow-{b8V#=is!O)oa*JnN`WzTCQ+m^B%lz7RvqVc2T#Gf9C%x(as8x1m z6h()bG=;Yp?8FiRPWx+f7wm9vk>_Ge+53`BnG%l!2Lk3t1}?f22Tt89&;Dg(@-uJ} zhrK+j;fA`x9MiT9dTLxkB&oj!J;khtIjaNZNeU0Sn(Q0R4E1(`KUYb`D_*=kR29qn z$c4s0d`>n>5LBdr(arPGF|qzaN;UFv*c!0B);$^T?Row!g3CE3_1zRY-R@_(z0fah zbjPlS)W=;ANzSuFX{$Br@)MltEuQZidk*jEZA50#%)J&Uj>K!>fiU5R;}$l^v`cyQ^-M3 z91t=7$#X~*0m94lK5x#%{|b13@2%9d-(IYY%!x3S>?{Wcj#sIcr-fCS{7kR5eNFqQ zB%yLljRR=}g9-&oz8+8qI?5}Fj(Z6&<;BOJG*$!4K%*gr(}q6>&P|>SXvPO@_DwUV zAR1}|;R-dFlh_J#^%|#&nNdS-s9@&hp92!WmxvwLYMv)Ktv!pp_hJCdOb`agljPb? zql1BiQ)`guS`nDe&N^z#@E4$J8I4TEUTFF-MfL{kQ9ZA*gvM-7nw9_CLC3;;PjQ9N zxWz<{WI)kRhJp7?RH(oIJAEWzc!C|B@wTzVE={i0M_PM0^MML7z(2c!IuQyZUxOU^ z_%@N$$7>;j=945LfQI%2s^Z9Q_}#-dFXV08gyA}Y0}|BfxAnYbX#*OC{GTT9@np=K z{?Nc2$*FYd4`%?d=In*td_19Npb(fg9c*6g=!5!Ny|+3E2|A%C*VSPfVEI-=$QJ2` zn&DsUFYWhAFD%}R zw~TmE@H_VS*KY&FRV)&4fNbZ}E8Wt;9O7&tQg;y2E)4wV(Nj0cu{`^fC5BLD&~rGqvqa))z>LmVScTg-(kfFNt9L<&~bQ9ctQqo!~^}@ z;u@&>V7(CRcce{jF+aQ`x-%`*16T~6yvVb6F8;-N)Z5dyXG6wu&C-jB{g7WI>p zB~8hO4gD&A2I)sqfd}Z{XN~Ut!YG>OF>}n`UG&)6w^za>f>aR#{ic7K9z552_w)G~ zoF@NF$9GP|xNU|NrwJPr0HXe3eoeD38$||u*HfR) zpFN3(SXMwkEo=0j(V>T&e_>*s*YMPv@0lO>WBT%W-GpIS1d&@?9~mJ!Qrc5e-0PW% zY1xF+G?1osc+8rhdZd?@Wh@wl(WPs~PBjudfnloJ6;xCS=g!bXOPv!+#C4R0BhsWX6~r)A z;#C!Kk{yv8jTZ`j;GP-vn4z)gulJHpJyxW+SJCF#l^)vEuj*+_JZQFrX;2$cBly0Qh?OR8wp(X{eb0d?4|1>zCm4=e)XS)LZj7x3p7N^PVKPNYhFQLOAOOi<*m>&ep>@i4t%P9J?DWceclOKWt@0H7Y`?50 z8ma7{{w)vjC)du>sGZxJlaMtVzHb%Q{-h2j9&cyWO(tv0<8M24}{frdSD=$f<-S*K^9JamG|LQMSZvihLRoF!Jr8f?Rq8T9O`M>0Z8FG%5Sqt|o zFh+t+O})fvx{7t(SE~_I!P31mw787DE!llNcyACSW)r-R8VE?Y$=}6~@>{{l`@wt%3Fd z#tZF)Q>KzlpSh|7kCwGpk9+%0v>1)wZ#65Up8Ntk!f~ApQbi1mMvJy1FJfGBj7-*- zy5hl1h9rCF-y--*U3bpM`kc;<0j|RItKRJJ0BZ5u`hPtG2JGHto`0oH2SAQ zwV+$_H-h=xx&rIA7o$SxpD(Br)=vLoHp;tEf8~bwn0|>Q)4qO&W>8J!8cq%YIW4qD zV@s)`o8^x;pQZ2zDRkhkIYBUAsgSrWz}A>HfYfOUHh{`W2`V zQiGPuWExge5PLiU5ex`FY#x}9$mdkSf^hvPeSC zLWCP^XJT)byto(MJ2z#&fL{Jumo~5LO$zp`s-c)J^5f}qd!M%p!4~kPpfkg803ylw zKUu4P2a$jXNRK8DLwLfVWprhlsq=vLQAZn?o7tnt8q| zmq+$>R+8(#3@-}qVr2>?lOo&vpb>=o$3X54#uW{@Re~4@P`g^w1kci>L!3_>?h*V5y8XYr6Rs2Y5X@03*Yrj~aK>GL71!%Jy7XMj11aY|H zK#g1zj1cqhQo`?kzpyu1U$V!apUy*}#r@M8%gLJ+Q?4mm2OvZ`fBxp=*|KFkCe*~< z%7M4vdY#q&nVF-*Cq5V(s>>VW`8|8wC(FI1PL4jHAKi<&lLtul?KfA>?gnmV=a9$K zMM`q7_d2%PgK&0<7X$TsQums%BMa`isH15+G-5=f&IC3TxlblTPD?6vrlF;n%8vXg zCwm#6wufz3Vdia`&prk66rh503{-EOcRN`!HaoPPI7Xb_9%wbE%)V0ZL;i*eyrtX5 z@@<{wz(4l(Z6kB`8Y(C}*SjByZ9@*XY<-N{RhPOQUw!9v=Lw3<;P=`GYEbSD(Rqh0{f`kfrusw;2XvF7 zT4w8{ctMHW^9`nxq0KKSyFT|$n^W2+#Y0IGyuHJd)uOp@j&gE#YG=Hs~Zz_u;W`HQ4*F z&|GkP(o+(2aYocXF3u1-Bpe@BR>bW$?IgDCAc&2hR-M|c5Q-vm?vuJ5@r26&Q;ODL1|yheN!_&^tQl9I1HDlWGU}HZTt{=zc$0WF z%}6HWSO|peOSGga$l23`Do%rD?&rS9VwTg6-}+r8m5tue#Xa5~Wn0=E)HB=TTdX~S z&mMu5Ii12$7;pcgb?*yYasRW88m#PtDO-acM72Z>pJe(mRX&`L z37~lsp~+<_Di;pMZLV}8-w29?M6dQhnU1*T9k}m$@TiE;O7**QT+MqZZSq_Tt3L$) zcSXqJa$Bm``lF%I;~R`1J2dMGnan!cGE4wqcb)}`nO?KWXv7)5#Ukm_qV zRbyG4*@ z=F&DYIro0JKViT7jkTUy9yFlytzEdN!Gl)Y9zerizo7vM5%H3J z$`zitQi54Vc~M^GT?qM00z)Uf^b zK%zmo-3H5i!K=RBVfJ}!a3C+EGQxhksQD_fm;hESxcLv?7OprsOuv%kg0wtw81nQC z){nWc5IC5}!q#Pc>K^}HSrEk}=EgK31_k(&}|!bV59SJ(F&P`b$%w!n9=FP0~|`;a3$=&pca? z+>+%@BC(W>ntjocN$YYc%L-AK5JKdFB!yYa`gk9Fs9~EZnJypqc73 zBkw!=QlKU>qWpVpaxv;?DB{Cx;Obv5+XglT9BR`{ZaXxtmPVEpTvWUC-kN%?cL|S~ zdSJUgt9h57BI8f$A|@G^YS7%qZ4ebfLh40vRXRHOVi>+~xex>K--k*3`>+K=)io&m z^h`A_t<`{Q>-yffh647YiD&qh#S}$l&O^t6ccCQ>2r{d4RB7h4g->*J;1zoR@-lj%$LQHl^EZh_x7{)eZm7Vv11qD=jYV~s1w7e z{mqI@WVq#$5srM^DRX-5gA{iw%VwtPn$-fNJwSeMJk5bOov9h zs%teB6<879FiM>s*BjO(Xbh~DWB09&=){Y9$4Zt1nNh7Z2bE?Sjs0!?b=}lE0|=Edb3wg49pz*OT;AM zl_?ryl5Apdxibr;mLp2>T5X{4YI^CB{qDbI{J$m6STtAN7VYYo@cJ=G*|z)<*B{=a zh>j8>U`>cMjp?**tzdUzFYmyg+?KIsIyVUqE~ym{Rfsyk+sbR90Bojy&sX8jC7i4e z<;1Y15T|F9rZrMOEatiy*Ny+UWAdGX!oRer2z)S@G4n}%PC(#XH3ipXNW*umjsh@L zi;Qo(HJ?z=WDB^C!*r~GJ#|7*Vg`@{Aw~E;sjotau($n^m^`*F(Ke^sO{g?=R0E0Z z-xS&^+K9ODKQG)CXVS9ko&4luDDw%X=QXP;jb=roLd2N(aQ?S+8}!aCr8(!ap4#rb z?ez14OFKe$=*c;&!WnE200?7YTp4Xx86DzEI<`b-WFWxz{H2u{EZf6Z57XLq?w{xg zjd85Fn@t*3praXY#AJEI@lG*=IZ&iT*#|8(YQ$EzqMz~kd08P@m&>40l`6gd$oV~Us2tcL}9ZJd?nr%CTGi0o8VWw|wVst6Wr3EVHlDAgdV@~~yRH_P`u#IP^T^>k++ z`Zyu>Z8>eNed7!;QWy@ENYT|j-PgU;+hO?!vzR99efK$DIvj_(Bh7u!OLoX_jx2X{ z^1H>isswYHBRTtr#QVF=?+2aD5n7TdGe%f{B<&lHo8PGEvFXd{qf`CqtcW&0mIZ)S zLLO%;Y4OzT9(c#!D-Q-^y%<5EM~hZ}`P|%kzvRF_e9IOs($7mpXN-3x<3}&qAu};2 zByMw^Sy0XO3P88lGh7O9@xPl-66|3ApYoCwXuhEr^O1T4 zAq~y>Gsh<`%|Mq2vHkpUu8h5pS8W@0{DOCv+8gL&uTS}5-a|AWF(LHS6W@uHE66OqGX|z<0QaVWa4pm1^=XsV%n|*w9P!q|pV+8e zM#%h7C!HOd)kpXv=#41_f=ZRc(@Gn=6dgN=MriNm_P#-oaym~3M=R{ zm!jldTrM!HDdv4LD86Dk3-N5Cr&KwPIZ}Wcyq6hxY-AiJiRAi%&PzeDra&-1zIXYf zl68rachajI*{)orvo7CzJSdVv6ap<@xhsdc%etb{iv(*2L^r}KJGy8~HgJMYy6ko{p zg42{IW_J%=?0%F-McYX!0h@1BNfZuC5)4ifokKEE?feH00k~s-)3w{qVaiMmk!hwa zk0ot4zL<3PX*;HRnZI^p7UejarMj!RHIJI$f2`v%)WBfno=etcpU?Ga25k<$3-R9M zKeXLR94L;%*Y(CKH%Lv6nJCUD+vu#J`^9;3GS;c}nFnR0fq60@3Q-!Hz(iDx$BC(x zbxM;Fi>b`h&u8-z*%5#)J8~@eA z;6bIXh8!m<6GyLoJ3va9v5t~am;Bri3g2#a=SD?BCfsJQUOE%F-hb+9r2-aCPs4f1lx@B82t z`A{677`cWY;P(ipvLvSmF%4RNq6J*VhxrxxH9y=@A>mz z4oXt$FVWJ>ZzX@R4w3v#JeIZSWONx?2@e5?l9%^%e-SlY;JTTV&Lrr@Ge%OOJvnew zCUc3b+Pa<|ob=5KyX39 zTAlWA@uHEi5Kcgy)tV)=#3#XDS9CQ5a!H2}O_6XV>9c4e(Hg1FpYxFmc852`~Bk+6T#xQe;H31~#blPou+vMRyPb6o5!m!3Z z^0R}Fkd)_}D~pA{@yGtrOebf3WrogwRcf%ZIc53nJ#y+U_~|P*L@1pYqAa#udy%%2xCI%WlKqqNNWUj94&D||s zq9E_vqG}HD0+J`awM5mj<^nZ{CjOA)8vSl0sKthtVC&YyShcOWd5V$fMH(O*WA4K? z1|t(!IkN_{J|py;RNbcEMsq~GS9|?M9=ShHm9l4Lcp9F2PJPKtdb2lCI~e<`!V1I#DwB0rfwbPT;d=2BGnkDw9}S8$Uw zRZqN4{&sSuwnM8k?t+E4DM|AV6d`t2Hdv%=d#8qOp$TY^e$LRu1eB=FC9QlMbUbiQ z%L3zf^EBm`%T6EoTIN5?dSk<=r~J@MBhb* z3WH#SIAP#N2y)sVE;Z4;rb4}61|Dfdweogs;uf^>u)W(O~` zrz@LehpH9GH?w4Crahoc@(v@+S%I;KLncjYG)u!F#?CHDBEX&qQ`QtfC%g08q&M0S zRx#`1l^gde+|S!d=TeMx*wRl9ANg*QrT?;F0sg)>iA)-3&S0o;COR(BnFo=dAr87t zxX&Xx4~jbtIJ6TkU(+y05;=;JF{3vzz+Q{F`{F6$m--B`d4G9ZeY9T3sd?H!KxJHR z3Z7CkbG^LH*q{W0pu95@bi24=r|wYrb?s0Z27Z5#5nf}(VKpdk|MZT)Rsty|qb_-<^?PeP6>cvW@HC}-);!-N`EH!A3A#RrLT|7>05jCy_W z7d5Eqc=X^9S|PytDN6tAOV<5WUqy4n!NPk|UkJ@gu=s_66w~}H)gqx`ity@UGI%R0|z#RY<6S%fRwriJVDG+7* zM33YrFp*F$FYJ2*MG~oOE-e;vZcC>JF7ZjIql#+wF4#*>U3Gi5zUC5q9!;o-_Zx{q zH~Bc+C4&ibx$k;fRz|itQ}t=_E#x-&&Q)@>HxOWLGGHLMFPK>sJKwHweJ?fd+aa#w zrO-G~7G|+NyHLNPFKn4Z6CrTO0E)ht31Q+9xAT}}LK^hmq-qb8J~W&Br3CigN_ zz6UOl`bipE^DALfNm41OGmAeN#7Jpr;(FWdDM=MplV_uZ3=d&r|2SYa$;LDyKeqys zqf=0(^;-nvQJIg?$I(1qgzupLmL`6wpnY$}KURC^OJMGsIL$G)a%Z-jO~R-@!n=vA zHw%U^k=Sf3{=Oh>378s4UHBnw(u-h3++?VGvy46VJ+ zo6)&LRrbsAC#t71JeTnSWS*>JkVqgcGvhgXIlbh4b$-nbM33#s8dII>sI(%DP4F}Z zB9b}eSWnO9ca#M84h3O5b4zH}1+OQ7rjV|c-8}T|D$O-?z7T|a?Eo93>G9tGT{KVk zw7naO@x-~K1=4~=LdYlkE%&E4H}VCnbiQaTd_@&uKD@}%6WslWlx!O+{~T$ z?>N$1q(m?_5<=fgZZA8Fv;$2NdF^@TbdYn}6Ib$G`s0yiP5V9sMiFkAOr=DiSn}#_|2@ntj-XC7lw#r>8_NCX|mT zhxH@qE)g~lr1Fju)^M%WGEnQo$D26lE`rIO2o08_paQUpu=23hr9ie1PV4r)*eRRB z*+nEtg`W&i-51fLt5Fafdg8w|Fj>7TNaF^`a!f{G#|WhHDaoo1>>{PVOCaFbSSD|)@QNx zbfh;-){Ka^h5oe5AHG?_P@@9MLVR=`8yk|s{QuF(GyjwpRzustqJ|LkO^Gwf65ARA zF~nIz$zY@n#3d$c{+&kd{6cE5%3g9v}_Fcjy zSMc?DmGqAZW&p;6UziEty<0MkU&!nDM)C%K_SHAf68iKj6Y1)!!zQ`k)NY zDCMSi>>cviS!U2-$*ZvT##~I$s4#UU8uU71GI}LYVg0bVDFoIM|8*h1`)HK>^$Q|{Ji~FR%(8TPu zR3Ni#aN!7zA093>G|hS{inb&vswO*fwJho0%^4F!L>BB(C~hfQUY0Ls=|fEuUb7ga1>&1w zQ%F(Ia;&7k{IpdeK?j=tiHexfy;t9$vZrCPqbutIdl8rM*h47s+C$?M;o(H>w%!{V^LFyX&XIg*`46ITjIj~FqYC)V4Ks$f* zZBKR5KRb{dwdLEpE|Ffw&C#!ms0T7b3c^-ykJ#%Zi(g0vuhokM4l>-BGL-an3)rMa z3i6H^tE_|P$4L3~D<>@)_u4sp__kfkIE8=Cpb1>7yT%p#7dz)ND&NQDl&m@R9JC@*K-ul z3P1o7v%Zx^XLgRelNEPtTA#&j*LgPGNCGhTE@On>!lZost-r`i>i$-JVP}Fd^yMqR=P$9}Fns5As>1#$xx3lmCVc za7KLWH`JGp4=fTKs}u@g!E<8Qf!nuvp+maa^EReg#rGyrOK~=TGrO;(MFpFF#Hb)P zU)@`JH$QrhAl^*8hHY`{Ta|?Wsu$ED?_bf1S3Nee%9J8P7l3fTi;)=Ckb6=s&{a#B z`o@iFUr|Qjm6eH$#YPl9xd{2p?BLk|w&gOVRxV)Ievi)$%-0<;5{%g=3}DRYjJD_4f&dz7rnxC*5$7v^C#iBF9|GS1!zVZt_Vm9!1-^eg?VZR zLOG{KD=x1;lwOBUN%rR2blg%_3WrB8;#cr69|+|lyRT;*86*J=V_MUTp1vhKjg#e_ z(bHfY)J!t8)wJ(xk=*)Q5>#cPo38%|CIMk5*Ll$MsY}(9`j0u=y*ItjgWTK3 z!2OPLr>C)P=YGb+c4Am_C;bkqT zQ=y8Ok!&-{KUf=lWDJO{R_b`ltt`ME4Gg2Vp0$FVYNz@a5t>0-J^M>pY+W&9D5`k) zEd{4J_bsM>9H7=8vfzQg0;=Q8fVZcesZhIA?L(9rREkq^Kv~p`P=WpCusw;0@A6p3 z^Kf*3%{AKW%T-F$F+_=J73V$Z))5gQLU;%by?1+`weZ6_Q;@`m*PGGg72-Xf6Iij| z@0CNA@px?JpYufxtCQhGZOx$%8(zev0K4~0hu5vy4oHrorpO>O+nQ;Gub zpYxvFi#{a`M57hVY8DX3Wp0YDAHu?{Q>~=G`s9+3V%PJI3f%^NQuEK|geE2>-akR8 zymW!+&mUYo+9wZzSoK$5jtoMdnlJ5q*8InXq8jZ#TmNxHqWGiLIV5bkn8cy=BrX54>{JBuQ>f2A70Z133g=@INLFUmUD+N__GXB31ynDDGw)W~F1y8V74#OtWqt$wa5!n);g0+sC`i$GO{FK;WQ*i{ai!Da~UV+zR^Oca$d%a4Ft=$b< zzW3KaK^+6)YCUD9>2Z+Mc_F=x#UdavkqIrt91U$ttbPMa2gDS2o6WeB zF^YamV(FhMBQBs)iG;BBJ$;N)IsajgZzPP#~boEk8nQ*rruJfNGj zUoz|xy)u>DCcPO90JHY1|Hv}vBs9=W*iTA+H2H+yBn3qA9)DUj8^TWyEQ?eHt^b8w ztIc?qd+S%+JJ!#qeu}~WyN|y2_V)Vrn)CMJs2q_A=hy#lGaK{vtD{AVHgf=;_fZAl zEHVB@GJ0+H*jn8}GtWIf8eio}(}2+dIO?I_LzN*i_V|JbpA-0L&`j9QUNXBeM0lti z)ihQ4$0AQOS@x>KR76QsYm48ln}ZHVg-0rJB%;t2Z`48RSo_na3DnR$eW|a5*Ig>) z+Wb{Ed}i{|KI zeozIn%fGt#DE&JJ11Fo%r0rTckHZQ9xZ{_5p<|h7SE#!8b(Sm}3?MS-qn1cx_`aCXyzwl1?KIx~afZ~W1TVNETGOvqq5Q&;@abM$n}oT){7=XEr!t#tg7I&)z*jar;y z0`uKmgl^()%i14aNgMenW#1Er(}XTvUeIZ?Mn2Z7 zG=q_tLxJpF9F57^4%St-O80Nle1GV2(wC8fViCl^Nf`~@DR%aDPhsLj3$UX=Ko3?P z($bb0ea`^hq`!2$?}Md0x4G6NT>dXAZ{|`D58TemthTj4UJ=q?YC6l;v&H3goil3# z0e42NuPu>kuD9~b=$0P_L=XX?3Apxr!19XG+uQqZe72MGkrIc=d-xgW3l>S+W1S>N zgHLz2bHdjDbf{PV){iuf*9Q|gF{t&Uc$(LCuixRiJsGO=mL@xf^9~bE5dZ5tmpNlW zzto+8ZBIlxBAw;2geFDySF#fTn$z^VPnxAis4Fw^gAkDd0 zZl4sU8)^0~<@BQ!`p2{AM;d704hQXV-O{qSm33RYMzVKDZ!}D4_vhC204Gcw-413A z_E1xUNmOTqIE4qp+c!7C+E^2VyB7y`*ufiO=QA0nI$Zlz%$OgQQ~^FuCmy#C`n`|T z^fw@7AD^i)pW4-G-oZIch6mQA(gA0H@OGX?v5uC(@lbkMlTUgb3svkG_=mL1iRXtb zDx6vtMiidNtpj4I*~sOon@65;>hKD6(gW>&k!Lf*hWB3|H(!q-w~P}@eFCGkaLL@= z9|NRk5@oEfuitsm7awDmOgfG$c4=u{2av#R^Nz^2QvGFPD!oj7^`eIJ4a~gLBlYp+ zl#wDYG>!sv)#1+}{9WYX!x=rifyQ-qx~VpHM2d;7q;igob8QJn0a!nHB3ZlCCn197 z>9RA(gIRg|7Q5{4Y@t?+ToFI{|1Vjfq)IfP};%P9nL}S20luu zif_(%*yAWm=Fi9kpq}M&PG)So#U1zgi(E$*)MnX(V+W#xEzLcp$rG2wk%P}-ifivHV@=k{B5g=ZBLj(v zoGP9~>icmO=!37(0d4CJguMuGp%`tG{m+n5Sf{z8QY?ikCeJ4McX)k9aibr3&QM!1 zs?ErjZZc#=zvb#7wXBT$X*s2`ny_B)5MsGHJ;WkPQT-{2PS-;j=6rf(WIJAK?NvfI zzC5xv6_2)S&_#2INS7VEEChhloEr+Md#`Lr2-stH5MzE1$l`qGMEM3L;%{Z#Ks@^3D5N{m&Z5=3w%nUuKKM7zgra56^_**aHipdYHDvsO^_A|M z$$|n_FiCqTf4xsJA|E_!#)9bqTodlvzt^S zMJ`hYKpx=5;U^b@p%VWKngAw0LzJJXW$d;4{_}$*$4vfCJ;gkGD)x{fVxEKhK z;~Ymnhlq4qKS{J?2AA9B)`VY#JG3G);P9Z*M43fqv{2uSAL)mmgwR>zg9HWsHSNdg zUUm$bt_b%eQhdV4bi{>PD)!YjdTLa3ZB~d$ul~7e#E(4kGGh@9h2#t+9mZ;phsg;%WPU3$ z{>Xgsku|}hXr`hQi!#-OsEIQoZGt84C|?(;&8mS5MktO3U?<&N@jNG%9Y-v6YfW{a za1i7AzqA|57dL`?&ih)YdodS)Z%wWl_h<6m#(PPb)QKGr4mS$S-fl;Il z^Dr|!r%kC^-VYI&81r)hZwiSG2kM)Z7t}=tqZiRLJvE(BXl9r2Q`Ys+p1haba*MuT zBh>3>h=$&k?J+)MUy}y%ue0wPvpXVlnytN(@#FqkZn&WY3p`@Vdqh!&AQW8ULyCKr z)>3EEqRf*U9XQ&;a$Si49(9Q1~aqMxqE>u<>mIjVz6P#KX)n7vJPZbe{L+h=nD+s;Rd3E{36Ehd%-*^jUkFk zqo_1JaSO%7x*_D0h~{#F6>0?`s@TMn`DoEeXyP9pVXOkj_VQuGNzmaxWT1gO<3mb& zdTkHhGzPES+Sp#qo@k9?R@z6dofQup##xnw5nqW-Id2|2~FDbU?) zf%mWnPYxVkys}jq$CFqsyZ^{|Yf?#Me5$4tOw&tntze_W@p!0-Af8-h1?t}578R-6 zZ^5ghKt{E1HM4@*Y*rWAk3UFiv}t&ZFZTJhDy%q2<6CU-U*A!n`pU10n8LUR{LMHuybbZN`ug+xuR^tJ z+ekR)%zrmXBXD6Sn`2pFgX1sb2b|etEC~TEel{Nktk*4ioj$|tYZua@(fQ1O;AYtp zPcyu{{j#aaelFc?2sC>2Z7Jp?n9R>)^=pgaK)%yBFMFsQRjs#7Bnb92xFI^3(cprF zqrnNqPf5x@1=$>;1*iDX^xzD*?5?_%yA2)zSNS;&-Ks~jPEpaR@OHM z3W0!(!!zijg%_1=W2EX)38IK&@L=(g;C$j^Z7~BUnh0S(z)n*wGGGuyJA-0V$V`{9 z%;Jx^9-_D9Om2jK5(C&wLNWi=#t?u6qPO%N*>}k@Lys$#2Jfj4R_}f|MAj}(4TKR+ zU0Zub({Oz(6Uh!a??Cx;kWaQtVDX#R3=<$IMnP77YZK?B9G`1)6Y+yE6x)2zV;vOOND5*?Ay;{Z%5uIqAJ;UNBrqV^Jb+x42G^E?O3etAfTz& zsTZ|TD!w9H2CFxXn+kEnG7UEPoF&Yzy!QxGfcsmELX{lAO}~C@`=&e+B=P#YtRZOw zh+9OWI{F$D5Paw~eE;wnz|eT9ExoUeo2ckjX65;88q@HxHs@ECcM5O-FN1m9NG2XS z!RSim#m^NWF*to#1{bYIwetkIK#~8jioz#iy*G*xz7HYpVsA>H5Nf(i{H-ZOQ_1Y8 z^`7L)Yh~5grxN9W$Y;Hm8fscRA~E~4d6fpg)Va_Qylx?qu(^Lj>dz1=O`q+ z_DH^IH?C3C_J&kjifw*yFnEPeMoaH-qA2{|i!!VUwe~@dKY%(ew~6 zi~|F<3!G8j!5Q_kg+RS4W*DqRK@<5(=Jd5lSX9|{@_mhF5>6*j@M&fJaKvYbJDGRk z${Cr%u{0-HYAX>BerIK6OI*;is7se%2@*-5NpCf9%^)t)*u-wK%0Ks*#^ekGu6WRB zK?V%XozeDb4NcPi>Z!L%=4W#sgWvi6^(WigF9f3&*F47Aew%aoo9@(0s?59-t(=(r zmuddbmytF7e`NS=Qgq*_bp0AuUUe<+4*HH?*?TTmKQHzw#gDx-E!Ug1jp#Z@xxd`s z&`}OdI4~-sH%*DFb{<~}V>&q2iA=h5WAkdYG62aY+?%`D*U~9PX;bf%zkXps`gDHA zBThfSB)F4?T++n7MvpPISTb}&kmhQ3{pUL;F=@~GtMth+w)QhGZ4Mn}O z+Yo;OpmG5!Lo)A|r8ae0m?jMX#1)Wz?VXk*34(%OBh9= zMNyJh`v8K89BvMpzVOje$J);pEUQIcNn$$kynp&0afXkLC}} z-nwc^rc9WpsV3Dk{Vu4SxBiBk{rsBh2tlGLF!mV2Er+aQ(A!V8mkiQ(DgkPUw z9>Y;R12r9VhA}1wT3wOGIz2z3qV)zw^i*)cUdOnG5o@sJ@gsLjC1?BO7&e;63{VsHbWjnxJP4}Cw*rKFsU8w|q zg@ZGsq+EWG#qyZd6qkD8!7C-UR|j<+=Tlm8e5VwoPXj9VM{HLZP($ptv09AIN!PqUthg+}8^Ly6&lgDL=s<3;{EtJA} zEfG|wG)8$aMQ}~PvS)L?&`IItMqHjq^zXk!_e$yXSr<`==9U*X&SYw9iam66 zY;4&W9%0>h2lV0W41TH1ax0XKZ&mgtwW#k)Z&`}`2=>FQR&ylK^u>es)TV^!G>E`6 z)}$F7y$t83hgslmtxy2$w!Jy~$$mOHjNxhKPU_!X>tt@of*b+QF@)Lvrz+=WIMc>GhS-PoplOhU(RfH-lC zK82HtBd~SR+cLCJ^_1kZEJklr0MJ2Yj_Al^<;J^hdmN>dQb+7_1CPF%1erqeQE^L& zT5^)Ge#(>pxV|xN_z7|;41OcWDDNFbKyLf0HhnfoD9>yr=10~p_GxJrsd^&?z9v2x zZpXvbI%)@%dr^Kp46pz;|IHqE#}#-Bl&Zr3Ou!92r!m#}65fu$barfIGRo{s!iP8A z6Se+Ue7XA)sVvo*k$t(RDp+dhIK$LvAFtzs#=r&J=fquVBzTaT1uXd%xXZ!-mg4c* zKEo%?)yp!2m0W%LvSnAx>F@v}H|+vN`BdnnMt<=bQBJlnJPvA>s$S0<=7|GuCC&V~0@^eurF1G0hj zQK9sT_7n7@AUK+-hDDOlJCCmL&DvU_}>qm}q&!@=iNa zZGHrGbjvPvxI7{ijnW0@uSksG*DDl`>omsSXT#Oh-V>o;5VJfKR1Z=sex1@W&{ORpAf+XW4=pdgM|3w0D&esa)qCPU# zqnF}i3&fs|aEInIas58m*jh2u>#_|$|73JUTX9@vz&$nr8wo-|YGrt?>O`@_!vwN7 z043r?vftbB1dA_@8M078b_D!cTghViO8hHKfU+rQdZ!fjtVxWPlX#e~&nL>Om_BoL z_4%&qHJn;6i_vHzzH~x7QH7v)3cN+m*94~(xK_=Wee-Bh;2WzOtksVASMg4D%E4=A zH`|xVHHpR#V{d5twYR=(!xQxI8f>*5UToa?MY<+tGckx7W$!YRI4L72V#y8y{JJ}R zRpiOSrG(~?1*SIF$!@~YertyymO~GX_P;|@Z=3uP8+6>cLH48b@b*rn*jnzym-^-n zN*rCOj~5b<=ed&C^8;r{kE{kn>)bw5nQCZA^7h?JuEngv8pi2t@lTX7i2teE1$AL> zFMVdmOXw~0I+#Ie*0@-;#X(^w7C@;P&dT)IBi-I@&kk}K95tn~j}i^BNpTKN^t5mi z$S)VfipJSTAK38v5Owq`<=Y{R#aQQIiLc4A&i1j&gZaW;koZLYWhbq9EP+#{vmeaG z1N-`Qri*ehX}sOnjOn6ym{fEid{c#$_dsHDppbtzC$*(-Y9b*X{di3^H!T^Vs5|o- z`q~xUe$-)}fr#(%K@NyczdlD50Vrtu5CQ-w0_L7EUdd662>_8IR=Q>OHC!ieh$Ri% zoK$0NzTx51dU|kxd0+FF>Ax9^fNJ~k7?-)?j|U5a2`4grfAB&+n?EY~gU^b_jb}1V zckC;cLxO*&r%Qcmc>fPi)Jw2e4ysOm+t=b*1_M174brl>m%==YGrWA9H6yqz?o-*meAa>jjWfb9ZPFNz z<`qo!o_mqae>L>W?LD1F+v09|@K8}sHDWreFTM=-O();nX}u#gxc_Z`k%+ru02;(ohwG`o%R~)KHSHX2(LZ^+ zJbSycQe03stJM+^d&tbmi8>bsiFS2evjzZa$TTNIp-)8xt8l*n^nnIlHncK11Hg>6Qw>IVah~GA0q+^!mj&!r?p0U zi~ijZMx125oD#MxSAC~-#KS=a7K%Z;`Lg_FgUI$1>xLBmi;1ph3Sbv6apZ+YhxD9$ z(`E_TdETLYs|4#P%it}zgz8N0gCXN-juX59HAt5KkT{ad95L5cNo%$Csgl#deM0H) zAR58!uV~@jQ+8J^DG^Oni(O61-AaCe&qSW|#bG?o^)-4qD{0&tOcrnBxZrsvD?iS8 z1av?(5SZe_L@076+I7xyc6aafcUkkvzu24J6ub86KCZE2*Ga&_LL_PY_vF>s$nVG> zZ36hLX0kX=vw&D!z!{$2+sKnjdvm<9B;F@cG7?lhqYGcvw(S;|f6n@&IYQOse*goR zro;mTZ;qa-*hPflpfEv*%^hCN1rkl|W$XoESZH8(12m=wjbp2~l??(zxq5WAo+B4< zwr~An^K5xdsIMhL`CG|W`F%(ry)ZXhsVDC*O5B{weLZ6Y>rir*W46>62#A4E^+I+N ze2~Um-chQVXw*BSiVCl!{jvCl6qJCDCY#rjB>fx^H7pseHzDovVOLx zT6ats#eH78;#8ptkW}ChEl_wh=MjE!sREiX!x#m4X<13JD>4Y691~u5<)qtbBl?e_ z7##$LR$f2JphBuH9H9Av-=|be3t8ykDv^aiopf37LQqqXeb6wS)dqumpBQ;H3hXrn ze%|2(pd58ku-YyuWE@~VI*+3$L7rmfHjDi+OdANJX8YxQUy_+3Yc3{*ZVGj==o>w> zKs5^}mkOYwy+XQ$>bq`U;FLKa(f|GU#ZcOA+=X}+O@9QmS-J2` zO9hriud@>nPMoTcHhe*TR@1?nA#E1tS}rWE;}o*eYz0{0e!H%4@0^OdYYQdt9gu{w z=^WO%K#p8`IxY5sFeyJ=WIc-emRYJlBP%05Cizv;MDN?F#X`=0ol9?Q!L_+NR85?5O0UEb>K&qYYbmmwG3nO;rMqgJ}e_hIzC_v+rMt|ma2$UPV7?KkU*9P7iL$U+;Sj}(c$QtFI%t`7su@V#g9{L_lLb4Zp3V?x!L1;lP4u0nX#)RVj00^1wWFjPo|o>I}-DJ2-dtZQZvDP-#+3myl~o@ zg?Egwtc5Or{wnFWo6H-%4|#i&OBV7!uiIwVsh@U+S)UenjBfWKVQjCF#gTEk@^Am~ zbNK;4f$*Z&0Z>@B$sa1l3svzCjbFGaga?nm+L$Z#DXMXNXSEbl()DA0e;NuHi**w> zN&Qro&e0v8%4$9#olwUCIMUMDzH}|1)&DaFN_o;|D7V>5^(xrt}a#ioLh zP;O&3ucR1=PP1Wrge;Irp}o~6h;Qp6?=%Jg#O`{a!-KaV&I6|!hp6x+4`)-^g@yq0 z^+3K)RBTVgHe*I>Mtq<=0!AE|;B9c`zXc<8l{2TX?Zo^jiv7+B8z1iW z5UMr6Popoy%o4T<4-8Ba(+FmE6N~U?ll>-R;$=zez%YZMYpra5gT^$i;zCX! z+F6q9q53piUP6iW8;ouh-sLAEGax*BvSgAj|z5`zC?qemNb8_nPw6)-Z#R7EZiC1d@(4*kjiQM zi@I>YfxH^=pNarWn{I?x!qA^9qsa2b15!49|3%`m2HwcBJo$^iz~kO@F$R1fR8s17 zd5(@JJFg;H0W#Q@1?4t%g~$wuUBgFZREKWNlW#_1r*E-tjTKM=(EAI0Qt`z&j=8(L zv_B7Pq;Lh33BjbcTuz0W`y96w7Lm1ziyVxwDYF8FvbdSu{)t7qv`D?C^%3=`f zshTWu6GDu1cS02lvUfBK%49*sfhWk_(f}U{!HD9=KGM9*K89S9=lohU zYO<3A`*56*)n~^AVx}BJB12Fhng3 zwsq&gGBZjtBN!RT4TABWfI?3h0bLKgN2i>GA>(cLQK7YyrndVWQN4Eao5*plz6_YB zDdA+cN>K}~1ydL$*v7g{i#-3s)?0@~xpn`;_Y8xKl%UcgrGOxzq~y>_2uSxJ(jX`) z0yBt+fPjgD5`uJhGb2i;bf<)L_sqQasOLQA_r2bKyu8Glz4qEGKWpu^&&<4!E&L&A zB8iga9h4hwH@V@=P0pAsfy?p1C3`MMpr7;Gd>JCU2s!^_MPSwNzEGpLE_gw-@ElN z-8$%s;IjJ+7 zd>Sdv?#VWX9-ic~_{?^$2Orp8tnb;TlwR*j0rgBMI%86DwmeE@hD7q{iU=+}*l|zy z)U~ej*Vp3tWmX+mF}EL19GBBg2eobi{bqw##eTiToQphEAeHGu$h{QaG-hYh4sWzZ z$*ko$e@!tD!V;T^rlj;aU0rDzMWt%zb+UVwbA6IWye|d2I?to5ehwO?&Bo55((6M3 zq>2EXgDT~nMh_GR(0=Sgmgc^D3YER;5^*R*?i!r7Efzq_r2m{Oo}Z?EoC(4xI5Za{ zbzbB|asc^lXwX@U!ukL?QJ{A1rPvCENr1hoCgDz~mh<<=>a&aqT{VH9B)PR2uiJF) zARed5mVbg6s?A@BXKJiVu@Lr;lo(_VPN$R_p(BAnnqu;teVVm7n$~;`D;j73zOVKo zD0d5Kh&*AVs;xtzy~<@Wm2VcEoKInc>>CT z0iU}*=@&owZfanJM%u+s15Z*A6bOnb&l(;j7g=4VcF~u{z09WY$r#<~D&9Dxf!0RO zc969y_#83^1gJ-YFHH7I?7DFjTe++by!ndg!)Wn>(_9oG9pMVXz0s#D>4OydFYm6t zel$4iD=5-+N3I~&LLKXZ&nr38jpksrOI$`C7f>LYuSIp=X`Ow`jA`QpqMjVJ`YL|x z<$>ErkrdRuUW_9t7;j1KJ<=j1`0tio_p1A~Vk|$*v0eJT$t6*syYJf(0f8T}1orbOsmu&P&}UaEA-~*q_CDiFGdRH9fz&&hn+n2qff{ew^U}!zHkSsf_iR zoqlE41+}w+Y8}(_N76*Hyu@{S70vVqU$8~ELTIhQicU!OiMarRnXVQZ@uZ_BT4 zCefZOm7Oeop6GSr9T~_hCCm{V2p=DoyA()c4N@mV0Tcb?&{wf@L}__revI$JVZnT& zN#M+o^w;8fO#1F4Spmq@hi$HqG8mHXOq2}q#+5ViUc-R+>UO5FPd8Jo)4ZCA9L~_H z3H7!KJ{2X`AdJW8oFE*fs3B-!dES_;o5FHhV+8xa2tu>Z6i0lK6>MO}<#gsv$x>OP7| zHVB2tORT73E^8a%d?0|M!)ehkbULe}{r;SFcz6aAc$1O`F+SX?pM)`sG1qu@ zviB%(Cg2)g_{8EG-X2%D+VhRN1%7&nmh>@?1Lf~|7>FsNm2p3J{Z-vHIFfN=&G?9|{iU~kNer`^%C9ppC}n@z30Hx-t# z9F@Phb@xPLdGR<-9GA_Ip`{UD<$s%IOar+1e&RI5ZuMGw=w~?}Mv4-=DaX?rW#8M_ zcU^nDmgmMDpo=%bk&0ZFkDl_VF@hr2bsoZE;}Y-+=spnE_2+Z5=^z&_JlPEn!swkI z`*HWK{kgiYY+Y)@XGT~Kg~khC5DF!Q0upVQu;ggs{3`)sOPARMw(E-@ankC8y@>T= zlN1qFE!$+vFORD&)}1W1&5F}mtEG)Yd`uPj)|V&rT1t$|PuWley6 zzIniq3{qse!BrkYBRt_Br2x)5F`rSaY)}FFcWK}`5a`WohepliWqh!k?HGML-B$ov zLHr4fo*0csaMBGDFThbk&R^h=Gk*Vk6LYHfEO{uWBF!nRZ|oqev9&#?*Q0*N6u*0g zvb3@pV$2ZbE9~KS6G$$BA=U3S^l5o*1M_0LuDrfF%rz(0KN=HxY}Bk&XK-=feOP88 zf8YNgFZrk|J*GPn|DauL+Asu7%iwapw|}ok)*2iy@sj?R&ysdiiod8seAzHlsJo<0D6P< zGn(mSTV2@~1=})jJRe?YjJMf+!&Rzk#Cq!bf^arnQ{2R2^a$%cS+KwxNxWOfGVZT? z0A8G+MPqNlT12y3#-K0cWC-qYqh)CO>-zW1NW@D+;=+xZ5y-q(jr&GcCEs7l`wmN8 z7h<_tT#;xdnqNWe6a@*LI%G#`)&-hxQ)-FHOmZC4Us-$NSjY2$@^VeYw~J9X=5G0W z`@e5*eOz-@&?WR2VfyY}P5B2c&>|1T0DldWud)gB<()Jy%~c-6ybh!(nB95Pu}|}WQbmG3FtWwgwLIK(zjuaJ z3#a6MOZjEYX?=K}ih73!d!Xu(wIf7?z3e;UK~M0a39O-l`#0GOVQ9GYl+ zjA`cR%UH|sh=0H-*Lf=@t(|y90J~}NX)c8&J@tH$+p5l;jg*K7d*hqk`UF~l*F<_# z(?tTP3!OH6hStY#=b3I;==R|s-1$kkI@j1+qpKNL8o#EC>b64SUgEDz%CNxZWBsGp>JQ5! zyK#JpN(m0*9F)jLw#2%^qw7cCj1IB(pWdHWz7%}rsrN*pUGJK^r1d=Ei|SN6Nu-_V^~{VM$WQj$*yhK)0HPG0e%)34P4 zH1y)&guy8C$8}lE^1UCtU8hWR-*97p{#=#7sKCJiyx=2-UaVP54+yg|8p`Xg#vJ`QDbPnlATv7DigKymI!f)RVr0G`_ zjPRMP`MiF$o;^l(_hA1r-1o9TKI4wxU{-so26?n$-47235F-tMX+t5e&0U&ebiwPs{QNt*iMnYp1 zHo%mSU*DAO8EGmUeinR}2OOVKz_fZk)%;?RqFE`WC(FfW^1r7pdYg81sZf4e48B3j zimc&Xs2^v8Z0@Nh6{yf&qWAXX=@_FVN$!D1&Ul801Qtx}4B>*pF8npm10;|)V*#2wF!GZi>`Y5;9Pz&w=36~U#rU9b2C7r_C zjbHp^+3GeGV}gHg|2>)Ow@F2&&~BVzf5#50&zHM_wRP{{ou%f5pXRhDZ`UQZ?@eA= zS4kk`YLqi=cwvP{xL)1S&|AK~6FhhPJ8K;Nhr{^edjv z{~qSu`gO|6icPx=`W;}00(nf$TZtA7XVDr5YfO52ntj~Knp;XA2w#y~+O1n(44mmZ z9-uyi9S%m_c_{#rlS?GWZ19WjXJV94q4?%48I6E%fVI6U@G-6)eNq_vf?q;5Fm{%B z$?CUD#n#eh1h#=MgSfSxJX=1nQ>SK!RMmZ4fhE1vIJ6UdyROw0vBN^sT&O0~9ByjV z*V+O;+z~;A1}pFbTwt`A>o${tqzf`Pv~YgZ7?-b?s=*iyo4B`~%h?rogdNBpR6ALN zOV)B8U8WVG5#4$s^O5gqQY-_^JoZB6amsAxUosF=~pBdjey$UCp)UoaNFUB7m z_Cw#M$M!e1%D*qtqkb?LrNoF#a1MLF&kOfqfQilu9VtnLRldLlIT)W{C|*41@@5Aq zT6Jj3XyAlC@O!E`U|zEg%ChlkQ2X=UUeP<xbAZ4YXtnG>A=~lT3xL+z=_j$E0z}V0fh#~r1795d;hY`2Z5Mm88_%vd$vyd zIUs%Iy~qWD^(-IS#Qnj!X0apD)(tD}Z>V2p!@W)(+i`~y%LHypcPjLfUS8X^p8G1; zvIBn6wnVnf;WH}7k(XhTy3snO-jbc%x~xsSfLAdnHn4r+QaJN-FtSXFT6qJfBf z>(;OGBSk`r+`cp%@EnXf5h}eV%TzL_1OR9EJ(~v$?~er^GL$LlN`uK$u4)3#6O&NO z2+n(2g#jzm>kw}h z@i!$D3AJz5vsrca*Ud&s9`&(`)lW@sH8--dgoCC{ykt6gsEuobhzw!yOM{{E&g-9E zqy_Kq-YOOcr*IYdZ5Dmyxgq4eqkCX}gNugfsK<~$!g83bnv-Zp310Ug?&+~G;CNML z)UjWAsPDz$g1^!o;Q6KJCGP*qGaqfnFBUzu?yGfiGs*xh;iP8#n98md6m9)k0Ek%mdDrQQ2PJf-UHQm8SM#)l2dlgAwU~x$h}_GW<~p6+$jGiLVQ^!40l% z2>2K(Q^Gox&r@?0A5k^1B)b$mq@32{RR+7d%z~^L1ouBwNc~9}z$b9PX`nl@KQGSk zd+c4y{%MFyqiuS3=I3aW4^5C{I|1x(yglTjy8!=|3x_z!Wa1+Ruy9Y~6D!)BR~>f5 zcAXBFa^Br=8Zg@`xt=3(Wn}e@pP_VL`QCo(5rSRdL923`$EJu89g5GGxbLDz2GyU4-~JHL%DY@_M=n|y(M+mRd*!+$zZdrT>l1>Bk%_4%txw*s;<{N-1Jbb*}+u zyV@>d^(vI#a`z5ISU@BQEbl?EN=eL*?o&cnC=29ScZ&@13XK2ZJAM<;O5B^Ba&?n8v)Or07Bx^Jj6dt!hs{KvxR!o*Y9*Ta0q!)D^b^R#d(^dse&gBzSWH_suH z!e>G8AzOsp?mCdV`8+ZAhYq;zTYG^+iwK-u(VoH2c|sS)n>TP1M161~Yxw3^!~_ zN$unn>|A!{F!TKDh=S(uI)p5JZ~kgL#XSA(FQiHF30t$)EYs*!p!8WtG$$!EIvBLe z7s(cU)xk}{A${x|o}EXtD|dnI#DQ@i@Vym{L-|C1kHoM(QD0IsWZB8R5;x)HZ)-pUw2!U*$iK8!uiPq45bVqj+ zF?E^->8%tkrlfYZ#~J#)%!=8G0z1vP9tR2Al+b7%19R#>%0M%OfE_&LfFYMLb#)II z&fedqx^$lzkP4j|Jw!#gA!We^cs-MyRgqEomg4oITR8ohi;jTivy`s|<%EvxYmJG6 z88yTL!w(`joT7B`3?(c<2-?{ul9C?@*dvmt9TkCLNOvdNMC&_kJ*39lMp(XDlJj5B zWI=;W!z{JTLhK2-&yV^(&qo`wFarxm%^YL%j9lnX!M_Gk6?E+Eq-UlsJolFTJ6-+& zLWh6a4t8?HJi_#ofGW7V22TXEElUMQ2JM^@BYp8xVN<;Ca0Wo{F7u@?UboUBR1 znp37aU)a5Cv4R4vE0eF$>D^}XV3xK!lH_p`u4UnbASGAon{ksjacuq6`g|lyX2N}pu=H*+iSY$6u z)ID2HtgVtS8ZE;VxeyxqBfRhJ22|J8^oc;t1Gaaw7j$7X2;?jj28iEoCEvG5VymT` zduXf@?EBf%Gjp#?WVK1`;|nR=*J$&CTT5ee!^_8Yr5#r=rNJ)7Kl=ehKo&Zoz#ZSO z;h_$`RD=K^Fq#UQ0^A3SrC8VVr!&;5Ej()dCZJ>4`^e`Ybz|=U2)g?9yvLsERDOeS zbv_p%g#?vDsnE)qt$e+cb#a3ryQ`SUcXq=@cJC&O=+MVVCtro!Uira-X2J(LK@0sn z2IsR@pGRq5zh~xl9Q4M>Jxm_mb%an&a)UE!!R>dmLslzF*?e9#uB(hB4wdfK)usmb z&a#y(;*JPgT`HmSy_sN9Ig$hu^0xlyH@F88hXCuA`)BOOSIUmUy($h5ZUjEMPblq(-9e*qV!J-Tx zl}eq|l&->saao=*XV`z-0L1BDmxifdrYLc)MqP_QQ#BzKh-^Yk$&%)zpmy)Nl^?Se z^c{254%(e9@EUrzntOG6l0SZ#jlUJCH$L(RFGM#~Htcpsh3rlT+J+VH=ZWs=%U6Z? zBWQgohzG6!o}S$ENQqJt#IUu7&rFtfg8%i4RPzM)?JshZn8|g5$I3Uo)n9s~(3K?7 zwr8*$$i349ODEGWsdgj8A|sX`YU?4k+Jc1);~N>^r8b z#&o9T?T~Vz7UQL zh0_2BG-4CIq(SbU4@>%i#)MkX21U^UA7IOsGg8BNki zOQWt{nd-NT6H5>rHT!t=)UqOxSq`5OxAz7GN6X6Sk*^k7 z(1Wtw%?lvO1+EY%FiLNSgR{FkLnxp_4fOFao&7D>4_{>OiCoJ$%V#0;@UZ@<|K#r( za0j&s;q&I^8 zY2Htsig~=D$h^&B&Mim5{Nn2q0GfZA2JoeNS`99|4=roUW6^qFi;|vMUrr(TnLl3l z%Sc;eU}oLeZ3ld;H05ZC0)UJrEn&%A*eKmTTDcp5dhDVXv08uCWHnprN8*zKTYWNM zrA{`mGGer^sW8D$*{M_?`s$BusU*~^b68ctxbdf-KDsuHlt=>*<#_Zmj__7?jMw}-Ul zeD{+L5}Y!o3I+B0Q*Vz`+C)3T7+s9_ zb1SNA&JnrvZKMUJOJ%XO0~0!vCbHkez{3O0mjs2entTub(vQ^iadqv%vdO|GaWV3# zyBVlCymx{#J)q(6_<`w1R-6@YZ*B(-wn#Tflj>Pv+G#1nyoSlAH1U_^s9*6HtHgAS zZGqT2P5egFk}uVHRXCy7*C$P#z5N>J8iLz0V(}tp-Zio<4R@V9-Lm z@&l|5-?O^0rJ{+U_*oS~u;7FlOb13uEFvFaQN5 zP9Q^B91;}3kg?yHZBdeEB(c1j++VWZuW?8Va(Et|G3ZN6TA+iqV;Hp<&Jh>?AaKV{ zN_xlTSVX$C!!K*4Q+(MXzvI+H2Xx45d_0>R!KExj8~F}iavNmea@Jd_kr;FrhVL6v zIKtvym|b1>+6h;@B%?E1qJsnhk(iCrp4cKUBr*`>BBB?Tvkuyoxl#8ESg8|llSpZ+ z@p5N+@U)XogQg-*hvKcZ5UePOpZZf`R)NjxgqoA+i@y7ei**+b6NZzPBfi%^E|fig z_K$pY!uwcJm*4hR9`0Th@y@k;6mM_$O#zUOcP&+4KFGiAw){ejB^-c8(z65Ii%Xx~ zBhP-|@fh3}GS3;dql0zp)CJui8~jjrJkq~>chC!wq}z}t{QI!so+enSSu39%S1GjL zMTEtuwV%bXhg7`3YjQucdhU>%hZi|owyr7%$B{!c0>*l102_y{sa1lua##Dx+`z9D z5Efb+aGYmwEjoSk2@^Z@BI$vv{8WPr1}BLPF^peuHu3uDQ&1B|)lf^lY+V+vDtpbSckZczRq=MQsNED`Yp z9bX2MP>jyL)q-`EhppR9v9lh!0u-v&Yjsh)UlTz4JoF|!`NN+Nev{w{J{zOJKt7Pc{aj`Ahu4!peRRzQFnsU&E?{m*m{pTxocOPhAGA%Hxb7+-0n;-k3 zZ!`K`qzM<$+N#~-FIib#v~df*Li`YyC$Ap;mu=uLK#Y=#La{vXadkntUjZhTQ1uBdu z(cIwRr{mT+eB}d(}vCetYW{O!pfwvzdh+p@Sku9F{KKL7YSrqsjaBn43R0+ z2x1YzGZm;f5?6%=a`sq}eYgNhyYTaB`8a=R0gXWmukr;zV^{v_0K z=it`k68GxDecVD-ex|r_Bt4JR@KnxV2syf|7RG7tWu9q3pt|X|$QP7DkC>u8Z|$VJ zN$m7mE{)t~E^k`l2wY+@sFuo@Nq17h-FD5+%yORRVnv>|Hhp%lhL_xy0O~-{89H`g z`hA-a>S%kLS6Q8$v9-#p(C^7|Be}8BBz?7|h2j2c+;4}9 z>>?FJEee!9-o4}2Kj-ndE<}JYkTp!GcH|Co?t{RClVXzy@hB|?R|V6kebyTqcW#ZsAHemieqpSV4Y7Nsi zpfCLGQouhdL?9n=PeF!>^CCq^Di+T47_DW$k?~0sDmwA4yh=b0yY!FOd}2Zdvo&Yk z7b;l7<@@2Xoz1>2RgUII8vV(1ezT^)2sE!l*-Oa<11z<(@UQC+W5uiDc`Y0cTq+ndsigXsULx2}6+#{q^=`rl9f5&VC=M^uMho zWkNsqz|qMM4dQH5qfV+#qsng_HQQgb*<*qt$$ODzEYp7M$D}Kb0Pw#!3}5> zuU(f!Bt1jkXRM2?K zgabMVKiQm?LCHM&!FFU-$#)^6R~}x+tRm<)lf0oHghm-GpuYHwg#5dW-$V>m0O^3s zr?+&R8)@jvg||{HWuffrmBl^BdQ`xHq7B4lVl;EY+T{K%&uwtl$6r7FTMe+!V2qFg zj+=(TSZUaA*PgE%O1S%I?_l;Z8E#^~c3*@QXQYYXA$>`TZ1^X{{Y$kmNcA%X6ILi( z)Cx&wU&z7WeI;vZtLR}YH)pt*U{+^u5`)1jpCgpbwuoq8T&UkS(w>7S;sohEztf)s zEWV8IAM${&&lsTnx%i8w{8*7zxC6I)xj8>^q&f8dY5Q|oBU>}L-S(TAy_&byF>(_@ z0F;dr0Zb|N`JREoOH{FOTuaCN0L@qtTrgae>($7oxDP`O?=Wz5=bW(cQm|2BD@Ryg-keM?mfT()n3~BUVz#062rpN-c@{tNv{f zwnLf@FYO?}R(We}%ubmvtEPbI&A=gS0<<3Gpfh)u=CKR`psqB=Vt~*`#tq_g`jQNgg;ZV5kmyMsjJhffrpWZpB~nlz-O&YT>q&F<@$2HQ!M-9u|pC z+J6|?87hD8lXuvzh3Pncqja;An<59lI)w2uGu#(i+uSdM?qbg2!e9>1-hV0&IA+Oj z>pxChII11w&97onm)NFCS(Qhs;lC+z?AgR`9w;VC7&Zt7{7lPw>&Wzavx0F%Fcy@T zva1LH*@n%z2!@FqI_3L09F7u#W^Z;kP^G8rDWu!=D~wF$j>rMy6py}gBx2==9w?YN zW{wE>8Fv^|4#&?TDF+V$ot%CgNO3ulr?TgI|R#M)xC9YoUgmV-UFw_8Q)+n|uW>dAJisWbZ{Z+zynS}HfJp7KfMnLkQ?1!VI1 zX2!21w->d1>kajz0+SsVGf=Y1LQLdA$x=i1FICr+aVdzOOiJOIpD)gv=#UwI!}pJv zUvM~&n7jf{YGKYLt!ljM7F}}ItUT9wym5tDrE)XZkelC(@DPTr8ugzg^V&Kcf17hu zL3=lUb>HiJ)*G7K$wet_dR3HAu&j6D9N1%vk9QcVBfX?jVMn>g*%cv0*n6zKPQWeO zoLkqCjcN7F;W`O)i6_hzodoY;iv#;8hNrSyWl@rYFK+#W48g>g5mT(7L!5wo?*)Me z!5{-lY*}lXMZxjz4~}?hWN2JYfJ6Lb&>9c3pc9q$a!r2mP&HTXkW}FA>aJn=ZQc8U z`K|Zj`}`hzk1l)o89H*UkUD!%o;%yeDq3Q)k*Cx@ zUQra2bh~=I;9SF?Ne**HxED-f*V4_P($o8=RXpL_LSlz_sog*@D7Fls*godB48t~( z))Y--i69rg$2-qS8GoV9q^4i;*6yHVMOha27jK>|cMcgiERQI?1|o1j{Oo|;C$&&W zVXZ^QJ|E7O-@N6GXw$l=%M-BnlP zjLWc+)E?L6FT;l;12=O#0GFgDddC2U8$gatca5dO_ygTSQT5ujr2EJ>&f0i$PuTK$ z-<+;>bw`gS^9xrVk$>-nieB*v_#B1G>aFd3x2U!8AM|EypkqANGkRNM3;p%*gZ$!Q*mi%W)FH-*YXJ)doSqps5!6P;9{ZWw zC!Yg`4EqNy0&!RRq)JrN8UXU$&YVKWUVuC90yb*QjnmOgw#2fSd}v`PH7vqaq8M|6Ch^0 zLDk=+doXX|kpT}`pVseW`5Y*aQo0>3_s)^3@}zL0WMcP@TQW_O0)m&?_Y9iJC8G4wGbT9MT5k^kx%dfDdPR#h^_q(iEZXrV%ertG_0@7-PD+Yy@2R|y^x&d))`Y8mV4snXPm2(r)P5l$mF zIWm{Un1mqjuCLuyS$?wAG#H3ly_-_)VzO!$jKh!GFq|3e+9U(Lw?i=)B#I9Tjt=++ z`L%x3v+td_no~dwzjA))X5B-#zHT$O9?iq%k3P;Wn;bl|_-1+3h;7K*(HOK$ptP2g z9#G-S$o9clAZQm1KyuugnQ4YTAmsg+_z1POLOW?v8xPXxMpuA+&mqY>lE^P2^qBPH z7<$L?Sq>I{5&qx6Akl}{(PlIm89$8ICli||pS|aIKP{%Ma^!mN*n)=hy8pJ{eGW9% z0@Mv+5>6511HTt+-R}`Noh;nn-6Zmn3E@^43)yiqXo1ADWUY6#1MBWT9f{BwG3fL8aiSnnjj82wn(fOF{?>>il&EmyGz-fs_1;t z^f&558bPH-Cl6CQR|j_*c0X0AA1v6nTFU5gW2?4N2PY!ccA%J_0E^ZbV063O~bBQ6nf)Bw;PKHZ3ws{7+j`1c&h5sYQf=J)HI6LWm!{61Jb#p1H zjV8-+|6TkU(O;ditOWV))$}-@{idZu`5uwr+VPv@QUCn~W}ura@VYApYBcO(f?Rtq zkK@7ZUsj7(t5xVKzhu?dW^8e>Ze#L$A?MjPp0ORO5pa0O`?F1fSZl0xh9u+in zt-zBf@91X>&+NLiL`zovv!lu7{n^KbXSDkdwdvqG&;)T8fzf|d;P*C0!E#N3fXP4v z*6t73mR%C@c<;a_9-n^|=oHu~FKIIVD~5kEkf3rRZvTt9s@e-AbUO)cfXnQVR- zdOiM0Mqy}Rn(p(3xQ`C82UmYea-b2j;NghYh4Zrihk4TU8EO}v>Br;4q;NJ~1PW4! z9rMc%hMkcNdCg-lb;G3dF3VjE@&S*>mxmV2@cxz2q*!h{qE_az8V2#btCfBMj*XG9&at-j2E6E z<;|<#LZ?NsMj+3S7Jbz@ z?PKv^>O_Cg#tX03S^diIm}ETg%V^cFb1J`PWyfT~U;IBZ2O>2Er22+o*fP?dIbk+~ z!?E3WUJ?T?HRMoUjxL16+H!FspF%X9!dCzaoFdk0{zfk_DjT%_TNu6)0p^`68d|!yVH4-!%%=P!fijw-2aEo!2>l5&wtf)#mv#vX-dRi>ZRKsz_P&H}1=3P} z^P65V)7Ljmn%x3F@89ih2twAyMqkF?VN6@viC)dXtff1tIm~m&m+n3D z*nOeAZhEeZ3yt_jw4ynRywuQTxPv$PR{jDCw3NnV1hrNQ#_UHawj z)n)%v>wgiOpco36#2|fLw`BdSPNlY&)U@%kRn8J>Km*?L~z_g12DE)dUvvwEdwOYuldm2k3N5m3PJ+5wD)9$%1KmH>3L@$E`1W8%3{`O$k`nTNrSkcSc^fh z=zpp}{F||=J23|RpOql~&7m4xx@=H*^mnEEKOdk$R&I#>pZ|?WA|VE_IxQ}o|B?9L z3&DT`>i@GKdp886Wd(7$IBc9A^f>1Tm*W}-Q#%i1_Z2)KPEXhvI2Xose9ps5jzK)^ z_uAdJAnWQ3W9F+t>Z`C)mOe{&4yp-(b8f3}kaUL#BCUOwMuV_aP!FSfvJ1T+T-mof zh`Rq}4UZ*7(U|Yr)UG1lf2DF;u>QMMhhnrBzJGXM(C;$|2}S9-n1RFn&P08|7Yt5n zC6Bg;#bH(42q4K>taHy>8=25Po+R-=*DL4SiA}>U*~r()!B>Ri@C~rc1Ouw}8;}uy z5XYAY_sS8HbEOnfUb6kTOzQJjcfSw4!-ur>MS-RGpaEj2p=D6S0XG+Vsao6M5UX|k z4gX*+OE<$m2f%V^LARO@w=%u=#G&YaM5XGAPDZgj6^EQ54T6mPq#|62sgk$G`nQgScczMt_3O?RR1JK|q4 zFxnar?% zXZ2TC>2acIsFf@}5N@FT(G5fOpp9ur=Fg_8xRW|jyfqO$Qx>0bsLXgmrH`CIRt+d8 zWZ<&@Mj_Ne%cMm?QiM*SofQy~G~+^0LA?%ei+5pY>F#eZAa7xo*gpg6=AJ=PcVOUW zTA++zr?Q=|Wal%`x$Jmd$_N~t(Zuh)hR%aI!wu=U9Im8z1oBoPP| zAbR(iR@+{hid#s}>wG;tzh3ra}Gm?}h`Bl<*6Xik7IRKSwcS1BnC* zXwlZ@e~NC(9l%2C)kr@IXSS8iGF?v)o(}<)*NlDhENq0^eUTdMDN%PF^euG-vpfIw zq{-vDXD}y8ByMc!O5WeoO=5+Xty?XPp;$=W_m<^xdqS&2SIeLf*nNHwb1b`4Lmfm7 zKk#Fm5)&bjOr`Yv>=p)<#$x}9Qa2RhwJjuqcV2g8MG5S-%ilY}V5fwj@x*dli<5l{ zt*;)jGs$+hcG>>a#=Epr_w3LT{l2i@-Z-d{-2cs{e1&Gv2?D&FGeAeGG}Q^p#2Awp z+fzg_Dc+Q_SnbyJwZn}}ZdeT)pO2MmNRT@2W++Wy&W$IDOMA6;ZltMyLGvIDh}Q0M z`Ur|}d%vj_kJ)*ZhNJp_s$_X!#JFf0SzHSrW8c`=v9^>;&@>;D$M3ZRg?6V39D{JE zA8cSf_Fz-^k6Lw{-SF_j^L!4E<~K?>ph=~Nx$Bnj7P}N_HXlsp93_jw$08A)5^Uen zLC4)o`j)Yvh!V?}-a+YZ=!arfJPENSAF$18YM~t4QUBuq$N2hlZFn`JMiWN^$M@F# z{4^U0ZZ9iB5Q88h-CfFT|CqMIg#AMNg<(p-t&?&Jr+EVqJO>}IfhPQ84;tbA@tg8( zMMBhxC~;@a7G5|MzM=`nd!37iNZ6hd*6!5v8nZ>2dXGq02FsaN{tMYVU~!7g;O3Rz zM{X*VBwM_#zO%5kd>vmNxIK&j#io=fHhPeNE9mmwRlK!_RxD> z<10OvdoJ+JJKuI%0Y)Pn)HS{^ZT|*!38;AfOh_EoMELXYL;57b3)?&R3pOCe`ek{&@X zR61M6>u|!DFY8D!-(F?{Ru-QSa~*q+(~sF=1byQlHvbdrR?}@Lv=Bixb8eEK4}3LtGGpkTSm3rHb_k9!14kpNr*PSw;c+|(LXhvPO%sD`b{jI zPbdbaG3l|8*L(+roL(q(-@d&a+SLfQOzgs;Sxi71K@(`my$;jEi?sU=^F{E>dqaC| zaYa6_oaM)58Os>|V^Sb6r=UUmQwVD6WB?#@xuvY2uah#1XyOvICv(@{<+M9BRsDwr zSaH~sVi&x(`h*x0Vf+dcp**Nliq_#hpyWAfkmy1M_2Kychg^6c_6v!gonqkouJ-)q@O5PDnd@DeXz(HVU^%F`5myV-`QZF#gv7*>o| z2JByOql1gfCLRBnWDsOB+;*}gJ9`|Ub&6(GR9T%ii85%{B;3t{O8c*S#U8_XtE2Yv z6_ieJzw38%n(K+X+m`MPqAc-pCBN{CCzvyQ`6@{^#4yir+C>ohk*4Cl9a^%}{QPB+ z4h5G;+EhqI9Zol$@8RcsE_zup-4EfO=jiIC{` zZJFIY5Q>JI)&m+Vu z)2_`_(iFbX`*d)ce~xUTYgs>A{rjU$$p?Rvl)nxr7V|ArhmQ-PXVn05Ne^t=A8Q)BE&9$ameN8sIFNy;BT z>%3k>MEn05{Q8%>#T41*+VZy0kBp$EAcW_D(Tb-|5&ju^H>6cq{~Uq6_%KC@K+yw!Z=iE zCKgwD+DKBYCi67J_w1-5jhU=FwYX8LW^rzf>+IUsTh=T3A8$8)B?Q?B0j~kd8oC?< z*0mlm|K1wO-djhA<<#yiWfQ6C3bOwiWPeS2w<3pTs*t=^D=aRxF{_*bF_>;DHyGr%6gC*`wun z?$c}MU2ugL*`O~SB2$jrEQSP-H)-6>7G>UO4pl6HL*S*C+$4y5rVnAxD8Ip5>vq%Fb zr`U%Ne*yr&L<|C~=n3-A_ zLIbu3n~P?oy(ADN;*1BSoQGANKcj|SKIi+mSyeKG=F;R^ovM`Tbm2-+&gy(M&-rJR zmZz}F!IP{wr5P3gT?Pj7(k>9**y5`chTxytve!?z{Scb?IBZ_wG&WvA)~so}!aQhm zQlqpu8Pd|qriZMvV-;tMFMcLZWS9qrrC$1qF8&hGX;xW#w-k9N%kpPR9fQ6AOAFA&OQR zjZbDx!tujWOrGAj6+_BMG^)75e<_L`o{vPq;hZ#apx(_HIIL=3P~wnL_75GMDWDiE zBqu6=Ne(czFnX9L#;I!MJv1l9HM^MXstY<9QyH)qmS(98TA%}E>80+<)8X7eRWayC zeCBu7pJnieym9m&dlvE3U!aPyy)?M-QA^QKrq-$7h}Hh$d^dvdaecrM7=kJT2Nls1rA@=q}4PL#v$1YQ|<~k-!-XJHOUk?C0#Y( zQLjW2ZhF@ZLR2In;15kaU}Bk3#TTMmXGPP#d-INCr(}m0si<+O&tjw2>9F6ozY%b{ zobxE;ksE^e?T}ALr@C2Fe^uT14$)yz+bWEzYAuVto+-8pr%esJ0V|cpxkZ8dIJG`G zV=X~i6DN@+9Byti!*Ut10n(XknUB!$Wz)L+wSZ=K=gwNnk;Q<`6KBO77y_d)Ro)Wp zXHfXEg=cg}e67ilF6PPND9TxL;s^LZr((SPdtboRK~|&*n|9kX+Qk5M%)>0A>@@9L z+b8b>=SeCAsCS2p-y6{=g79EC-A+yzN9sg74(Y}$<3HTBzv_Gs+LCgF#O)SX3IR(u zxJ$ZZBwu#NH)r#xB+O_>V&DJ4I6OEsIAQ=cym^yfiPxbFdvWnxJMmA4ioBxBO@)3w z)KwpnDrb|gD^Nn?GNv7#2fa@K3f_NG+JBRA(_~EZgFpO$9#Jc-XcaUZqI2g zJjK4=eO^-dIA<03L_hG+U0^_L1C&7Q$U{G~{MNClyxQ8K;8mPy8k>(fJh*iP_~ji4 z=wpcR+vw@A<XvvbCo>SZ z%KWrvMd2UBujT!uRp+}_)B}PPxZh3542`_4eE<1I+oCGli5GQ5Ag_4P+lxT?+e@_~ zm(%*R#}auEny&<Sb?GET{3+bL(!Zk_sIQT0b9z z%cCsY<(vs`#>29Up&z3yBw01=h)G^Few_DH14=H|8tK;NU;U8|&!qe_%_F1A@ofo@ z%SyN#=e^RdTE-D;OTVzd>v<*|AEQvI@1g z=cEkN>+6PoF;t2pf!wZ7#QDr=V9Kr3tf=4H(r=G_MK8p5d1>x6O-rF;)n-C!BA0X;?6BvO7An zdt4ZXKBr(?Q1;wa6lvt_@RNf#h&YJ2Sb|aA5B0btpWK5t6Ptv{w6nz&KS%*q4CWusn=IrKX5n$6UF)%uh@=|KK9qeJ`) z$X*clP0TLklp5_J+Of4$xX%``+ZX13r<9XCPfqK!J zXU*{fL81%RCTGg;LG>MMAC^9QwMrG|z@2DVUwd9ORcE0Jp=8$jyd>S=u>eo(&8x8T zQMf_{vj^XrPmf5irHAswa+2Qdv0L_VnF7uM{VPcJpaTcJ-3rN*$zwtIn|eT zEZ-S%>Fs;-)hxq+!=6jaNe>#jV|*oY|LNj@bTP^4qI6<7K7K+WAitH@{Al&hJtn;v(1Zvz?3%0C_1u18SahL@ z`jx;FJnfc549K#w0L6?zQZo!4K4!G2uAj6*QL|I!xsDBs1h|_f8>z1ZQlN1fup)Cd zkf!(g64T~fE1}ZCN0=!(qBQDD?tJi*U`5Ls>oT9y z?|oWFaZhx(xL6%uK;8Er1d=7!-W-^WGPiJ(PZJ}AU#W2ygv zVE&}vg4hM#L}EHlHDO|-x;Pe!Ho}haIV|{+hD$)hhMw zdVeV{sc1nVQH?X2yGoE`euEH!d3lw*p$V}motl+i;^ys@}Xs^ zJ~rHgTeH4$T*{AMdA+*YQT}u#OIqXFcC}qP3sbzH09I5~RJCg6UWQ>~`1W|8AZ1>` zQ$LD#TdF-uJx4*7L$SKkaYQ~Dp;flJ5)HhyD?TPzdnAIR%GyKm&wKYs-RxQjZbR{& zL^y*C)4NQbAFq)w4oWkRH9h&4C1V|My5QrdxA4|~#_1;-?M|H3XI+f8PK_@JHEMp3{BTww^xMoq$lrXvb zwO=(q13~{lVs}RmGIE03d%9w8zsJZxW5Q)HPDW zpm5}l*m*z56%;blHPZX512in$5Me19q_%;Pg`Kmfe`rKhbZlHgQd(J6Lt|4*$Kb@$ U>KUiV+&Kf5X94m%`zQVJFD104SpWb4 literal 0 HcmV?d00001 diff --git a/info.plist b/info.plist new file mode 100644 index 0000000000000..e1267df8c1788 --- /dev/null +++ b/info.plist @@ -0,0 +1,16 @@ + + + + + CFBundleExecutable + telegraf_entry_mac + CFBundleIconFile + icon.icns + CFBundleIdentifier + com.influxdata.telegraf + NSHighResolutionCapable + + LSUIElement + + + \ No newline at end of file diff --git a/scripts/mac-signing.sh b/scripts/mac-signing.sh new file mode 100644 index 0000000000000..dc0630fc82873 --- /dev/null +++ b/scripts/mac-signing.sh @@ -0,0 +1,72 @@ +# Acquire the necessary certificates. +base64 -D -o MacCertificate.p12 <<< $MacCertificate +sudo security import MacCertificate.p12 -k /Library/Keychains/System.keychain -P $MacCertificatePassword -A +base64 -D -o AppleSigningAuthorityCertificate.cer <<< $AppleSigningAuthorityCertificate +sudo security import AppleSigningAuthorityCertificate.cer -k '/Library/Keychains/System.keychain' -A + +# Extract the built mac binary and sign it. +cd dist +tarFile=$(find . -name "*darwin_amd64.tar*") +tar -xzvf $tarFile +baseName=$(basename $tarFile .tar.gz) +cd $(find . -name "*telegraf-*" -type d) +cd usr/bin +codesign -s "Developer ID Application: InfluxData Inc. (M7DN9H35QT)" --timestamp --options=runtime telegraf +codesign -v telegraf + +# Reset back out to the main directory. +cd +cd project/dist +extractedFolder=$(find . -name "*telegraf-*" -type d) + +# Sign the 'telegraf entry' script, which is required to open Telegraf upon opening the .app bundle. +codesign -s "Developer ID Application: InfluxData Inc. (M7DN9H35QT)" --timestamp --options=runtime ../scripts/telegraf_entry_mac +codesign -v ../scripts/telegraf_entry_mac + +# Create the .app bundle. +mkdir Telegraf +cd Telegraf +mkdir Contents +cd Contents +mkdir MacOS +mkdir Resources +cd ../.. +cp ../info.plist Telegraf/Contents +cp -R "$extractedFolder"/ Telegraf/Contents/Resources +cp ../scripts/telegraf_entry_mac Telegraf/Contents/MacOS +cp ../assets/icon.icns Telegraf/Contents/Resources +chmod +x Telegraf/Contents/MacOS/telegraf_entry_mac +mv Telegraf Telegraf.app + +# Sign the entire .app bundle, and wrap it in a DMG. +codesign -s "Developer ID Application: InfluxData Inc. (M7DN9H35QT)" --timestamp --options=runtime --deep --force Telegraf.app +hdiutil create -size 500m -volname Telegraf -srcfolder Telegraf.app "$baseName".dmg +codesign -s "Developer ID Application: InfluxData Inc. (M7DN9H35QT)" --timestamp --options=runtime "$baseName".dmg + +# Send the DMG to be notarized. +uuid=$(xcrun altool --notarize-app --primary-bundle-id "com.influxdata.telegraf" --username "$AppleUsername" --password "$ApplePassword" --file "$baseName".dmg | awk '/RequestUUID/ { print $NF; }') +echo $uuid +if [[ $uuid == "" ]]; then + echo "Could not upload for notarization." + exit 1 +fi + +# Wait until the status returns something other than 'in progress'. +request_status="in progress" +while [[ "$request_status" == "in progress" ]]; do + sleep 10 + request_status=$(xcrun altool --notarization-info $uuid --username "$AppleUsername" --password "$ApplePassword" 2>&1 | awk -F ': ' '/Status:/ { print $2; }' ) +done + +if [[ $request_status != "success" ]]; then + echo "Failed to notarize." + exit 1 +fi + +# Attach the notarization to the DMG. +xcrun stapler staple "$baseName".dmg +rm -rf Telegraf.app +rm -rf $extractedFolder +ls + +echo "Signed and notarized!" diff --git a/scripts/telegraf_entry_mac b/scripts/telegraf_entry_mac new file mode 100644 index 0000000000000..2031d6c1fc309 --- /dev/null +++ b/scripts/telegraf_entry_mac @@ -0,0 +1,13 @@ +#!/bin/bash +currentDir="$( cd "$(dirname "$0")" ; pwd -P )" + +if [[ $currentDir == *"AppTranslocation"* || $currentDir == *"Volumes"* ]]; then + osascript -e "display alert \"Please copy Telegraf to somewhere on your machine. It can't be run from the image.\" as critical" +else + cd $currentDir + osascript< Date: Fri, 5 Mar 2021 07:20:24 -0800 Subject: [PATCH 268/761] Add field key length output by field type in schema_sizing Starlark script (#8934) --- .../starlark/testdata/schema_sizing.star | 22 ++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/plugins/processors/starlark/testdata/schema_sizing.star b/plugins/processors/starlark/testdata/schema_sizing.star index 6bc53377f7bd4..d382749cb06a5 100644 --- a/plugins/processors/starlark/testdata/schema_sizing.star +++ b/plugins/processors/starlark/testdata/schema_sizing.star @@ -5,7 +5,7 @@ # logstash,environment_id=EN456,property_id=PR789,request_type=ingress,stack_id=engd asn=1313i,cache_response_code=202i,colo_code="LAX",colo_id=12i,compute_time=28736i,edge_end_timestamp=1611085500320i,edge_start_timestamp=1611085496208i,id="1b5c67ed-dfd0-4d30-99bd-84f0a9c5297b_76af1809-29d1-4b35-a0cf-39797458275c",parent_ray_id="00",processing_details="ok",rate_limit_id=0i,ray_id="76af1809-29d1-4b35-a0cf-39797458275c",request_bytes=7777i,request_host="engd-08364a825824e04f0a494115.reactorstream.dev",request_id="1b5c67ed-dfd0-4d30-99bd-84f0a9c5297b",request_result="succeeded",request_uri="/ENafcb2798a9be4bb7bfddbf35c374db15",response_code=200i,subrequest=false,subrequest_count=1i,user_agent="curl/7.64.1" 1611085496208 # # Example Output: -# sizing,measurement=logstash,environment_id=EN456,property_id=PR789,request_type=ingress,stack_id=engd tag_count=4,tag_key_avg_length=11.25,tag_value_avg_length=5.25,int_avg_length=4.9,int_count=10,bool_avg_length=5,bool_count=1,str_avg_length=25.4,str_count=10 1611085496208 +# sizing,measurement=logstash,environment_id=EN456,property_id=PR789,request_type=ingress,stack_id=engd tag_count=4,tag_key_avg_length=11.25,tag_value_avg_length=5.25,int_key_avg_length=13.4,int_avg_length=4.9,int_count=10,bool_key_avg_length=10,bool_avg_length=5,bool_count=1,str_key_avg_length=10.5,str_avg_length=25.4,str_count=10 1611085496208 def apply(metric): new_metric = Metric("sizing") @@ -20,7 +20,7 @@ def apply(metric): ints, floats, bools, strs = [], [], [], [] for field in metric.fields.items(): - value = field[1] + key, value = field[0], field[1] if type(value) == "int": ints.append(field) elif type(value) == "float": @@ -31,26 +31,38 @@ def apply(metric): strs.append(field) if len(ints) > 0: + int_keys = [i[0] for i in ints] int_vals = [i[1] for i in ints] + produce_pairs(new_metric, int_keys, "int", key=True) produce_pairs(new_metric, int_vals, "int") if len(floats) > 0: + float_keys = [i[0] for i in floats] float_vals = [i[1] for i in floats] + produce_pairs(new_metric, float_keys, "float", key=True) produce_pairs(new_metric, float_vals, "float") if len(bools) > 0: + bool_keys = [i[0] for i in bools] bool_vals = [i[1] for i in bools] + produce_pairs(new_metric, bool_keys, "bool", key=True) produce_pairs(new_metric, bool_vals, "bool") if len(strs) > 0: + str_keys = [i[0] for i in strs] str_vals = [i[1] for i in strs] + produce_pairs(new_metric, str_keys, "str", key=True) produce_pairs(new_metric, str_vals, "str") + return new_metric -def produce_pairs(metric, li, field_type): +def produce_pairs(metric, li, field_type, key=False): lens = elem_lengths(li) counts = count_lengths(lens) + metric.fields["{}_count".format(field_type)] = float(len(li)) + if key: + metric.fields["{}_key_avg_length".format(field_type)] = float(mean(lens)) + else: + metric.fields["{}_avg_length".format(field_type)] = float(mean(lens)) - metric.fields["{}_avg_length".format(field_type)] = float(mean(lens)) - metric.fields["{}_count".format(field_type)] = float(len(li)) def elem_lengths(li): if type(li[0]) in ("int", "float", "bool"): From 1b7f445ee52748e8eb056259316a53eb3dc0ef31 Mon Sep 17 00:00:00 2001 From: "Peter (Stig) Edwards" Date: Fri, 5 Mar 2021 15:25:45 +0000 Subject: [PATCH 269/761] Correct Q+A about state (#8918) --- plugins/processors/starlark/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index 03d9f7a939250..c14c3e8bca2ed 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -165,8 +165,8 @@ def apply(metric): **How can I save values across multiple calls to the script?** -Telegraf freezes the global scope, which prevents it from being modified. -Attempting to modify the global scope will fail with an error. +A shared global dictionary named `state` exists, this can be used by the `apply` function. +See an example of this in [compare with previous metric](/plugins/processors/starlark/testdata/compare_metrics.star) **How to manage errors that occur in the apply function?** From aabec054a7a378781db2c183e797967a2dce1c73 Mon Sep 17 00:00:00 2001 From: Paul Choi Date: Fri, 5 Mar 2021 09:30:45 -0800 Subject: [PATCH 270/761] Set $HOSTIP in default URL (#8721) Telegraf daemonset pod cannot contact kube-apiserver via localhost. Should be `$HOSTIP`, and the default port is 6443. Co-authored-by: Paul Choi --- plugins/inputs/kube_inventory/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index 06c84a92ef89d..79adb3c05a994 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -38,7 +38,7 @@ avoid cardinality issues: ```toml [[inputs.kube_inventory]] ## URL for the Kubernetes API - url = "https://127.0.0.1" + url = "https://$HOSTIP:6443" ## Namespace to use. Set to "" to use all namespaces. # namespace = "default" From d7df2c546ba706bcdb3405d3ca0d2495e563b845 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Mon, 8 Mar 2021 08:00:56 -0800 Subject: [PATCH 271/761] Prometheus [Input] plugin - Optimizing for bigger kubernetes clusters (500+ pods) when scraping thru 'monitor_kubernetes_pods' (#8762) --- docs/LICENSE_OF_DEPENDENCIES.md | 2 + plugins/inputs/prometheus/README.md | 21 +++ plugins/inputs/prometheus/kubernetes.go | 181 ++++++++++++++++++- plugins/inputs/prometheus/kubernetes_test.go | 59 ++++++ plugins/inputs/prometheus/prometheus.go | 96 +++++++++- plugins/inputs/prometheus/prometheus_test.go | 48 +++++ 6 files changed, 396 insertions(+), 11 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 66e19e2b49683..0aff4fb299f4c 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -204,6 +204,8 @@ following works: - gopkg.in/tomb.v2 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v2/LICENSE) - gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE) - gopkg.in/yaml.v3 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v3/LICENSE) +- k8s.io/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE) +- k8s.io/klog [Apache License 2.0](https://github.com/kubernetes/klog/blob/master/LICENSE) - modernc.org/libc [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/libc/-/blob/master/LICENSE) - modernc.org/memory [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/memory/-/blob/master/LICENSE) - modernc.org/sqlite [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/sqlite/-/blob/master/LICENSE) diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index e9dd119cc12d4..57a1753536888 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -33,6 +33,16 @@ in Prometheus format. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation # monitor_kubernetes_pods = true + ## Get the list of pods to scrape with either the scope of + ## - cluster: the kubernetes watch api (default), no need to specify + ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. + # pod_scrape_scope = "cluster" + ## Only for node scrape scope: node IP of the node that telegraf is running on. + ## Either this config or the environment variable NODE_IP must be set. + # node_ip = "10.180.1.1" + ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping + ## Default is 60 seconds. + # pod_scrape_interval = 60 ## Restricts Kubernetes monitoring to a single namespace ## ex: monitor_kubernetes_pods_namespace = "default" # monitor_kubernetes_pods_namespace = "" @@ -88,6 +98,17 @@ Currently the following annotation are supported: Using the `monitor_kubernetes_pods_namespace` option allows you to limit which pods you are scraping. +Using `pod_scrape_scope = "node"` allows more scalable scraping for pods which will scrape pods only in the node that telegraf is running. It will fetch the pod list locally from the node's kubelet. This will require running Telegraf in every node of the cluster. Note that either `node_ip` must be specified in the config or the environment variable `NODE_IP` must be set to the host IP. ThisThe latter can be done in the yaml of the pod running telegraf: +``` +env: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + ``` + +If using node level scrape scope, `pod_scrape_interval` specifies how often (in seconds) the pod list for scraping should updated. If not specified, the default is 60 seconds. + #### Bearer Token If set, the file specified by the `bearer_token` parameter will be read on diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 16f69cbd14228..2ed08dd3fca7b 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -2,10 +2,12 @@ package prometheus import ( "context" + "encoding/json" "fmt" "io/ioutil" "log" "net" + "net/http" "net/url" "os/user" "path/filepath" @@ -15,6 +17,8 @@ import ( "github.com/ericchiang/k8s" corev1 "github.com/ericchiang/k8s/apis/core/v1" "github.com/ghodss/yaml" + "github.com/kubernetes/apimachinery/pkg/fields" + "github.com/kubernetes/apimachinery/pkg/labels" ) type payload struct { @@ -22,6 +26,20 @@ type payload struct { pod *corev1.Pod } +type podMetadata struct { + ResourceVersion string `json:"resourceVersion"` + SelfLink string `json:"selfLink"` +} + +type podResponse struct { + Kind string `json:"kind"` + ApiVersion string `json:"apiVersion"` + Metadata podMetadata `json:"metadata"` + Items []*corev1.Pod `json:"items,string,omitempty"` +} + +const cAdvisorPodListDefaultInterval = 60 + // loadClient parses a kubeconfig from a file and returns a Kubernetes // client. It does not support extensions or client auth providers. func loadClient(kubeconfigPath string) (*k8s.Client, error) { @@ -66,9 +84,16 @@ func (p *Prometheus) start(ctx context.Context) error { case <-ctx.Done(): return case <-time.After(time.Second): - err := p.watch(ctx, client) - if err != nil { - p.Log.Errorf("Unable to watch resources: %s", err.Error()) + if p.isNodeScrapeScope { + err = p.cAdvisor(ctx, client) + if err != nil { + p.Log.Errorf("Unable to monitor pods with node scrape scope: %s", err.Error()) + } + } else { + err = p.watch(ctx, client) + if err != nil { + p.Log.Errorf("Unable to watch resources: %s", err.Error()) + } } } } @@ -126,6 +151,147 @@ func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error { } } +func (p *Prometheus) cAdvisor(ctx context.Context, client *k8s.Client) error { + // Set InsecureSkipVerify for cAdvisor client since Node IP will not be a SAN for the CA cert + tlsConfig := client.Client.Transport.(*http.Transport).TLSClientConfig + tlsConfig.InsecureSkipVerify = true + + // The request will be the same each time + podsUrl := fmt.Sprintf("https://%s:10250/pods", p.NodeIP) + req, err := http.NewRequest("GET", podsUrl, nil) + if err != nil { + return fmt.Errorf("Error when creating request to %s to get pod list: %w", podsUrl, err) + } + client.SetHeaders(req.Header) + + // Update right away so code is not waiting the length of the specified scrape interval initially + err = updateCadvisorPodList(ctx, p, client, req) + if err != nil { + return fmt.Errorf("Error initially updating pod list: %w", err) + } + + scrapeInterval := cAdvisorPodListDefaultInterval + if p.PodScrapeInterval != 0 { + scrapeInterval = p.PodScrapeInterval + } + + for { + select { + case <-ctx.Done(): + return nil + case <-time.After(time.Duration(scrapeInterval) * time.Second): + err := updateCadvisorPodList(ctx, p, client, req) + if err != nil { + return fmt.Errorf("Error updating pod list: %w", err) + } + } + } +} + +func updateCadvisorPodList(ctx context.Context, p *Prometheus, client *k8s.Client, req *http.Request) error { + + resp, err := client.Client.Do(req) + if err != nil { + return fmt.Errorf("Error when making request for pod list: %w", err) + } + + // If err is nil, still check response code + if resp.StatusCode != 200 { + return fmt.Errorf("Error when making request for pod list with status %s", resp.Status) + } + + defer resp.Body.Close() + + cadvisorPodsResponse := podResponse{} + + // Will have expected type errors for some parts of corev1.Pod struct for some unused fields + // Instead have nil checks for every used field in case of incorrect decoding + json.NewDecoder(resp.Body).Decode(&cadvisorPodsResponse) + pods := cadvisorPodsResponse.Items + + // Updating pod list to be latest cadvisor response + p.lock.Lock() + p.kubernetesPods = make(map[string]URLAndAddress) + + // Register pod only if it has an annotation to scrape, if it is ready, + // and if namespace and selectors are specified and match + for _, pod := range pods { + if necessaryPodFieldsArePresent(pod) && + pod.GetMetadata().GetAnnotations()["prometheus.io/scrape"] == "true" && + podReady(pod.GetStatus().GetContainerStatuses()) && + podHasMatchingNamespace(pod, p) && + podHasMatchingLabelSelector(pod, p.podLabelSelector) && + podHasMatchingFieldSelector(pod, p.podFieldSelector) { + registerPod(pod, p) + } + + } + p.lock.Unlock() + + // No errors + return nil +} + +func necessaryPodFieldsArePresent(pod *corev1.Pod) bool { + return pod.GetMetadata() != nil && + pod.GetMetadata().GetAnnotations() != nil && + pod.GetMetadata().GetLabels() != nil && + pod.GetSpec() != nil && + pod.GetStatus() != nil && + pod.GetStatus().GetContainerStatuses() != nil +} + +/* See the docs on kubernetes label selectors: + * https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + */ +func podHasMatchingLabelSelector(pod *corev1.Pod, labelSelector labels.Selector) bool { + if labelSelector == nil { + return true + } + + var labelsSet labels.Set = pod.GetMetadata().GetLabels() + return labelSelector.Matches(labelsSet) +} + +/* See ToSelectableFields() for list of fields that are selectable: + * https://github.com/kubernetes/kubernetes/release-1.20/pkg/registry/core/pod/strategy.go + * See docs on kubernetes field selectors: + * https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ + */ +func podHasMatchingFieldSelector(pod *corev1.Pod, fieldSelector fields.Selector) bool { + if fieldSelector == nil { + return true + } + + podSpec := pod.GetSpec() + podStatus := pod.GetStatus() + + // Spec and Status shouldn't be nil. + // Error handling just in case something goes wrong but won't crash telegraf + if podSpec == nil || podStatus == nil { + return false + } + + fieldsSet := make(fields.Set) + fieldsSet["spec.nodeName"] = podSpec.GetNodeName() + fieldsSet["spec.restartPolicy"] = podSpec.GetRestartPolicy() + fieldsSet["spec.schedulerName"] = podSpec.GetSchedulerName() + fieldsSet["spec.serviceAccountName"] = podSpec.GetServiceAccountName() + fieldsSet["status.phase"] = podStatus.GetPhase() + fieldsSet["status.podIP"] = podStatus.GetPodIP() + fieldsSet["status.nominatedNodeName"] = podStatus.GetNominatedNodeName() + + return fieldSelector.Matches(fieldsSet) +} + +/* + * If a namespace is specified and the pod doesn't have that namespace, return false + * Else return true + */ +func podHasMatchingNamespace(pod *corev1.Pod, p *Prometheus) bool { + return !(p.PodNamespace != "" && pod.GetMetadata().GetNamespace() != p.PodNamespace) +} + func podReady(statuss []*corev1.ContainerStatus) bool { if len(statuss) == 0 { return false @@ -180,14 +346,19 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { return } podURL := p.AddressToURL(URL, URL.Hostname()) - p.lock.Lock() + + // Locks earlier if using cAdvisor calls - makes a new list each time + // rather than updating and removing from the same list + if !p.isNodeScrapeScope { + p.lock.Lock() + defer p.lock.Unlock() + } p.kubernetesPods[podURL.String()] = URLAndAddress{ URL: podURL, Address: URL.Hostname(), OriginalURL: URL, Tags: tags, } - p.lock.Unlock() } func getScrapeURL(pod *corev1.Pod) *string { diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 8568ac946437e..5a6860191f589 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -9,6 +9,9 @@ import ( v1 "github.com/ericchiang/k8s/apis/core/v1" metav1 "github.com/ericchiang/k8s/apis/meta/v1" + + "github.com/kubernetes/apimachinery/pkg/fields" + "github.com/kubernetes/apimachinery/pkg/labels" ) func TestScrapeURLNoAnnotations(t *testing.T) { @@ -142,6 +145,62 @@ func TestPodSelector(t *testing.T) { } } +func TestPodHasMatchingNamespace(t *testing.T) { + prom := &Prometheus{Log: testutil.Logger{}, PodNamespace: "default"} + + pod := pod() + pod.Metadata.Name = str("Pod1") + pod.Metadata.Namespace = str("default") + shouldMatch := podHasMatchingNamespace(pod, prom) + assert.Equal(t, true, shouldMatch) + + pod.Metadata.Name = str("Pod2") + pod.Metadata.Namespace = str("namespace") + shouldNotMatch := podHasMatchingNamespace(pod, prom) + assert.Equal(t, false, shouldNotMatch) +} + +func TestPodHasMatchingLabelSelector(t *testing.T) { + labelSelectorString := "label0==label0,label1=label1,label2!=label,label3 in (label1,label2, label3),label4 notin (label1, label2,label3),label5,!label6" + prom := &Prometheus{Log: testutil.Logger{}, KubernetesLabelSelector: labelSelectorString} + + pod := pod() + pod.Metadata.Labels = make(map[string]string) + pod.Metadata.Labels["label0"] = "label0" + pod.Metadata.Labels["label1"] = "label1" + pod.Metadata.Labels["label2"] = "label2" + pod.Metadata.Labels["label3"] = "label3" + pod.Metadata.Labels["label4"] = "label4" + pod.Metadata.Labels["label5"] = "label5" + + labelSelector, err := labels.Parse(prom.KubernetesLabelSelector) + assert.Equal(t, err, nil) + assert.Equal(t, true, podHasMatchingLabelSelector(pod, labelSelector)) +} + +func TestPodHasMatchingFieldSelector(t *testing.T) { + fieldSelectorString := "status.podIP=127.0.0.1,spec.restartPolicy=Always,spec.NodeName!=nodeName" + prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} + pod := pod() + pod.Spec.RestartPolicy = str("Always") + pod.Spec.NodeName = str("node1000") + + fieldSelector, err := fields.ParseSelector(prom.KubernetesFieldSelector) + assert.Equal(t, err, nil) + assert.Equal(t, true, podHasMatchingFieldSelector(pod, fieldSelector)) +} + +func TestInvalidFieldSelector(t *testing.T) { + fieldSelectorString := "status.podIP=127.0.0.1,spec.restartPolicy=Always,spec.NodeName!=nodeName,spec.nodeName" + prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} + pod := pod() + pod.Spec.RestartPolicy = str("Always") + pod.Spec.NodeName = str("node1000") + + _, err := fields.ParseSelector(prom.KubernetesFieldSelector) + assert.NotEqual(t, err, nil) +} + func pod() *v1.Pod { p := &v1.Pod{Metadata: &metav1.ObjectMeta{}, Status: &v1.PodStatus{}, Spec: &v1.PodSpec{}} p.Status.PodIP = str("127.0.0.1") diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 8ec316bb8aaf6..f94fcf1043bfa 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -8,6 +8,8 @@ import ( "net" "net/http" "net/url" + "os" + "strings" "sync" "time" @@ -16,6 +18,8 @@ import ( "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" parser_v2 "github.com/influxdata/telegraf/plugins/parsers/prometheus" + "github.com/kubernetes/apimachinery/pkg/fields" + "github.com/kubernetes/apimachinery/pkg/labels" ) const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1` @@ -57,12 +61,21 @@ type Prometheus struct { client *http.Client // Should we scrape Kubernetes services for prometheus annotations - MonitorPods bool `toml:"monitor_kubernetes_pods"` - PodNamespace string `toml:"monitor_kubernetes_pods_namespace"` - lock sync.Mutex - kubernetesPods map[string]URLAndAddress - cancel context.CancelFunc - wg sync.WaitGroup + MonitorPods bool `toml:"monitor_kubernetes_pods"` + PodScrapeScope string `toml:"pod_scrape_scope"` + NodeIP string `toml:"node_ip"` + PodScrapeInterval int `toml:"pod_scrape_interval"` + PodNamespace string `toml:"monitor_kubernetes_pods_namespace"` + lock sync.Mutex + kubernetesPods map[string]URLAndAddress + cancel context.CancelFunc + wg sync.WaitGroup + + // Only for monitor_kubernetes_pods=true and pod_scrape_scope="node" + podLabelSelector labels.Selector + podFieldSelector fields.Selector + nodeIP string + isNodeScrapeScope bool } var sampleConfig = ` @@ -94,6 +107,16 @@ var sampleConfig = ` ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation # monitor_kubernetes_pods = true + ## Get the list of pods to scrape with either the scope of + ## - cluster: the kubernetes watch api (default, no need to specify) + ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. + # pod_scrape_scope = "cluster" + ## Only for node scrape scope: node IP of the node that telegraf is running on. + ## Either this config or the environment variable NODE_IP must be set. + # node_ip = "10.180.1.1" + ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. + ## Default is 60 seconds. + # pod_scrape_interval = 60 ## Restricts Kubernetes monitoring to a single namespace ## ex: monitor_kubernetes_pods_namespace = "default" # monitor_kubernetes_pods_namespace = "" @@ -137,6 +160,43 @@ func (p *Prometheus) Init() error { p.Log.Warnf("Use of deprecated configuration: 'metric_version = 1'; please update to 'metric_version = 2'") } + // Config proccessing for node scrape scope for monitor_kubernetes_pods + p.isNodeScrapeScope = strings.EqualFold(p.PodScrapeScope, "node") + if p.isNodeScrapeScope { + + // Need node IP to make cAdvisor call for pod list. Check if set in config and valid IP address + if p.NodeIP == "" || net.ParseIP(p.NodeIP) == nil { + p.Log.Infof("The config node_ip is empty or invalid. Using NODE_IP env var as default.") + + // Check if set as env var and is valid IP address + envVarNodeIP := os.Getenv("NODE_IP") + if envVarNodeIP == "" || net.ParseIP(envVarNodeIP) == nil { + errorMessage := "The node_ip config and the environment variable NODE_IP are not set or invalid. Cannot get pod list for monitor_kubernetes_pods using node scrape scope" + return errors.New(errorMessage) + } + + p.NodeIP = envVarNodeIP + } + + // Parse label and field selectors - will be used to filter pods after cAdvisor call + var err error + p.podLabelSelector, err = labels.Parse(p.KubernetesLabelSelector) + if err != nil { + return fmt.Errorf("Error parsing the specified label selector(s): %s", err.Error()) + } + p.podFieldSelector, err = fields.ParseSelector(p.KubernetesFieldSelector) + if err != nil { + return fmt.Errorf("Error parsing the specified field selector(s): %s", err.Error()) + } + isValid, invalidSelector := fieldSelectorIsSupported(p.podFieldSelector) + if !isValid { + return fmt.Errorf("The field selector %s is not supported for pods", invalidSelector) + } + + p.Log.Infof("Using pod scrape scope at node level to get pod list using cAdvisor.") + p.Log.Infof("Using the label selector: %v and field selector: %v", p.podLabelSelector, p.podFieldSelector) + } + return nil } @@ -372,6 +432,30 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return nil } +/* Check if the field selector specified is valid. + * See ToSelectableFields() for list of fields that are selectable: + * https://github.com/kubernetes/kubernetes/release-1.20/pkg/registry/core/pod/strategy.go + */ +func fieldSelectorIsSupported(fieldSelector fields.Selector) (bool, string) { + supportedFieldsToSelect := map[string]bool{ + "spec.nodeName": true, + "spec.restartPolicy": true, + "spec.schedulerName": true, + "spec.serviceAccountName": true, + "status.phase": true, + "status.podIP": true, + "status.nominatedNodeName": true, + } + + for _, requirement := range fieldSelector.Requirements() { + if !supportedFieldsToSelect[requirement.Field] { + return false, requirement.Field + } + } + + return true, "" +} + // Start will start the Kubernetes scraping if enabled in the configuration func (p *Prometheus) Start(a telegraf.Accumulator) error { if p.MonitorPods { diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index 1c1411b881a1d..a727af49cab34 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -6,11 +6,13 @@ import ( "net/http" "net/http/httptest" "net/url" + "os" "testing" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/kubernetes/apimachinery/pkg/fields" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -234,3 +236,49 @@ func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") assert.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0))) } + +func TestUnsupportedFieldSelector(t *testing.T) { + fieldSelectorString := "spec.containerName=container" + prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} + + fieldSelector, _ := fields.ParseSelector(prom.KubernetesFieldSelector) + isValid, invalidSelector := fieldSelectorIsSupported(fieldSelector) + assert.Equal(t, false, isValid) + assert.Equal(t, "spec.containerName", invalidSelector) +} + +func TestInitConfigErrors(t *testing.T) { + p := &Prometheus{ + MetricVersion: 2, + Log: testutil.Logger{}, + URLs: nil, + URLTag: "url", + MonitorPods: true, + PodScrapeScope: "node", + PodScrapeInterval: 60, + } + + // Both invalid IP addresses + p.NodeIP = "10.240.0.0.0" + os.Setenv("NODE_IP", "10.000.0.0.0") + err := p.Init() + expectedMessage := "The node_ip config and the environment variable NODE_IP are not set or invalid. Cannot get pod list for monitor_kubernetes_pods using node scrape scope" + assert.Equal(t, expectedMessage, err.Error()) + os.Setenv("NODE_IP", "10.000.0.0") + + p.KubernetesLabelSelector = "label0==label0, label0 in (=)" + err = p.Init() + expectedMessage = "Error parsing the specified label selector(s): unable to parse requirement: found '=', expected: ',', ')' or identifier" + assert.Equal(t, expectedMessage, err.Error()) + p.KubernetesLabelSelector = "label0==label" + + p.KubernetesFieldSelector = "field," + err = p.Init() + expectedMessage = "Error parsing the specified field selector(s): invalid selector: 'field,'; can't understand 'field'" + assert.Equal(t, expectedMessage, err.Error()) + + p.KubernetesFieldSelector = "spec.containerNames=containerNames" + err = p.Init() + expectedMessage = "The field selector spec.containerNames is not supported for pods" + assert.Equal(t, expectedMessage, err.Error()) +} From 372004a99376663dfdb53e2791d5383f28151e10 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Mon, 8 Mar 2021 11:38:35 -0500 Subject: [PATCH 272/761] Fix max open requests to one if idempotent writes is set to true (#8954) --- plugins/common/kafka/config.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/common/kafka/config.go b/plugins/common/kafka/config.go index 1ed01d95b78a5..56e70a26b4a95 100644 --- a/plugins/common/kafka/config.go +++ b/plugins/common/kafka/config.go @@ -38,6 +38,9 @@ func (k *WriteConfig) SetConfig(config *sarama.Config) error { config.Producer.MaxMessageBytes = k.MaxMessageBytes } config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks) + if config.Producer.Idempotent { + config.Net.MaxOpenRequests = 1 + } return k.Config.SetConfig(config) } From 188163c1e7207b4d862f71a4e82878027b65a71d Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Mon, 8 Mar 2021 09:52:57 -0800 Subject: [PATCH 273/761] NFS Client input plugin README cleanup (#8943) * NFS Client input plugin README cleanup * reorder --- plugins/inputs/nfsclient/README.md | 186 ++++++++++++++--------------- 1 file changed, 93 insertions(+), 93 deletions(-) diff --git a/plugins/inputs/nfsclient/README.md b/plugins/inputs/nfsclient/README.md index 965bcc5240fc5..1ed1a08424bbb 100644 --- a/plugins/inputs/nfsclient/README.md +++ b/plugins/inputs/nfsclient/README.md @@ -1,20 +1,11 @@ -#### Description +# NFS Client Input Plugin -The NFSClient plugin collects data from /proc/self/mountstats. By default, only a limited number of general system-level metrics are collected, including basic read/write counts. +The NFS Client input plugin collects data from /proc/self/mountstats. By default, only a limited number of general system-level metrics are collected, including basic read/write counts. If `fullstat` is set, a great deal of additional metrics are collected, detailed below. **NOTE** Many of the metrics, even if tagged with a mount point, are really _per-server_. Thus, if you mount these two shares: `nfs01:/vol/foo/bar` and `nfs01:/vol/foo/baz`, there will be two near identical entries in /proc/self/mountstats. This is a limitation of the metrics exposed by the kernel, not the telegraf plugin. -#### Plugin arguments: -- **fullstat** bool: Collect per-operation type metrics. Defaults to false. -- **include_mounts** list(string): gather metrics for only these mounts. Default is to watch all mounts. -- **exclude_mounts** list(string): gather metrics for all mounts, except those listed in this option. Excludes take precedence over includes. -- **include_operations** list(string): List of specific NFS operations to track. See /proc/self/mountstats (the "per-op statistics" section) for complete lists of valid options for NFSv3 and NFSV4. The default is to gather all metrics, but this is almost certainly *not* what you want (there are 22 operations for NFSv3, and well over 50 for NFSv4). A suggested 'minimal' list of operations to collect for basic usage: `['READ','WRITE','ACCESS','GETATTR','READDIR','LOOKUP','LOOKUP']` -- **exclude_operations** list(string): Gather all metrics, except those listed. Excludes take precedence over includes. - -*N.B.* the `include_mounts` and `exclude_mounts` arguments are both applied to the local mount location (e.g. /mnt/NFS), not the server export (e.g. nfsserver:/vol/NFS). Go regexp patterns can be used in either. - -#### Examples +### Configuration ```toml [[inputs.nfsclient]] @@ -44,28 +35,14 @@ If `fullstat` is set, a great deal of additional metrics are collected, detailed # include_operations = [] # exclude_operations = [] ``` +#### Configuration Options +- **fullstat** bool: Collect per-operation type metrics. Defaults to false. +- **include_mounts** list(string): gather metrics for only these mounts. Default is to watch all mounts. +- **exclude_mounts** list(string): gather metrics for all mounts, except those listed in this option. Excludes take precedence over includes. +- **include_operations** list(string): List of specific NFS operations to track. See /proc/self/mountstats (the "per-op statistics" section) for complete lists of valid options for NFSv3 and NFSV4. The default is to gather all metrics, but this is almost certainly *not* what you want (there are 22 operations for NFSv3, and well over 50 for NFSv4). A suggested 'minimal' list of operations to collect for basic usage: `['READ','WRITE','ACCESS','GETATTR','READDIR','LOOKUP','LOOKUP']` +- **exclude_operations** list(string): Gather all metrics, except those listed. Excludes take precedence over includes. -Example output for basic metrics showing server-wise read and write data: - -``` -nfsstat,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS ops=600i,retrans=1i,bytes=1207i,rtt=606i,exe=607i 1612651512000000000 -nfsstat,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS bytes=1407i,rtt=706i,exe=707i,ops=700i,retrans=1i 1612651512000000000 - -``` - -Example output for `fullstat=true` metrics, which includes additional measurements for `nfs_bytes`, `nfs_events`, and `nfs_xprt_tcp` (and `nfs_xprt_udp` if present). -Additionally, per-OP metrics are collected, with examples for READ, LOOKUP, and NULL shown. -Please refer to `/proc/self/mountstats` for a list of supported NFS operations, as it changes as it changes periodically. - -``` -nfs_bytes,mountpoint=/home,serverexport=nfs01:/vol/home directreadbytes=0i,directwritebytes=0i,normalreadbytes=42648757667i,normalwritebytes=0i,readpages=10404603i,serverreadbytes=42617098139i,serverwritebytes=0i,writepages=0i 1608787697000000000 -nfs_events,mountpoint=/home,serverexport=nfs01:/vol/home attrinvalidates=116i,congestionwait=0i,datainvalidates=65i,delay=0i,dentryrevalidates=5911243i,extendwrite=0i,inoderevalidates=200378i,pnfsreads=0i,pnfswrites=0i,setattrtrunc=0i,shortreads=0i,shortwrites=0i,sillyrenames=0i,vfsaccess=7203852i,vfsflush=117405i,vfsfsync=0i,vfsgetdents=3368i,vfslock=0i,vfslookup=740i,vfsopen=157281i,vfsreadpage=16i,vfsreadpages=86874i,vfsrelease=155526i,vfssetattr=0i,vfsupdatepage=0i,vfswritepage=0i,vfswritepages=215514i 1608787697000000000 -nfs_xprt_tcp,mountpoint=/home,serverexport=nfs01:/vol/home backlogutil=0i,badxids=0i,bind_count=1i,connect_count=1i,connect_time=0i,idle_time=0i,inflightsends=15659826i,rpcreceives=2173896i,rpcsends=2173896i 1608787697000000000 - -nfs_ops,mountpoint=/NFS,operation=NULL,serverexport=1.2.3.4:/storage/NFS trans=0i,timeouts=0i,bytes_sent=0i,bytes_recv=0i,queue_time=0i,response_time=0i,total_time=0i,ops=0i 1612651512000000000 -nfs_ops,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS bytes=1207i,timeouts=602i,total_time=607i,exe=607i,trans=601i,bytes_sent=603i,bytes_recv=604i,queue_time=605i,ops=600i,retrans=1i,rtt=606i,response_time=606i 1612651512000000000 -nfs_ops,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS ops=700i,bytes=1407i,exe=707i,trans=701i,timeouts=702i,response_time=706i,total_time=707i,retrans=1i,rtt=706i,bytes_sent=703i,bytes_recv=704i,queue_time=705i 1612651512000000000 -``` +*N.B.* the `include_mounts` and `exclude_mounts` arguments are both applied to the local mount location (e.g. /mnt/NFS), not the server export (e.g. nfsserver:/vol/NFS). Go regexp patterns can be used in either. #### References 1. [nfsiostat](http://git.linux-nfs.org/?p=steved/nfs-utils.git;a=summary) @@ -75,9 +52,9 @@ nfs_ops,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS ops=70 -#### Measurements & Fields +### Metrics -Always collected: +#### Fields - nfsstat - bytes (integer, bytes) - The total number of bytes exchanged doing this operation. This is bytes sent *and* received, including overhead *and* payload. (bytes = OP_bytes_sent + OP_bytes_recv. See nfs_ops below) @@ -111,57 +88,56 @@ Please refer to `/proc/self/mountstats` for a list of supported NFS operations, - nfs_bytes - fields: - - normalreadbytes - (int, bytes) - Bytes read from the server via `read()` - - normalwritebytes - (int, bytes) - Bytes written to the server via `write()` - - directreadbytes - (int, bytes) - Bytes read with O_DIRECT set - - directwritebytes - (int, bytes) -Bytes written with O_DIRECT set - - serverreadbytes - (int, bytes) - Bytes read via NFS READ (via `mmap()`) - - serverwritebytes - (int, bytes) - Bytes written via NFS WRITE (via `mmap()`) - - readpages - (int, count) - Number of pages read - - writepages - (int, count) - Number of pages written - -- nfs_events - Per-event metrics + - normalreadbytes (int, bytes): Bytes read from the server via `read()` + - normalwritebytes (int, bytes): Bytes written to the server via `write()` + - directreadbytes (int, bytes): Bytes read with O_DIRECT set + - directwritebytes (int, bytes): Bytes written with O_DIRECT set + - serverreadbytes (int, bytes): Bytes read via NFS READ (via `mmap()`) + - serverwritebytes (int, bytes): Bytes written via NFS WRITE (via `mmap()`) + - readpages (int, count): Number of pages read + - writepages (int, count): Number of pages written + +- nfs_events (Per-event metrics) - fields: - - inoderevalidates - (int, count) - How many times cached inode attributes have to be re-validated from the server. - - dentryrevalidates - (int, count) - How many times cached dentry nodes have to be re-validated. - - datainvalidates - (int, count) - How many times an inode had its cached data thrown out. - - attrinvalidates - (int, count) - How many times an inode has had cached inode attributes invalidated. - - vfsopen - (int, count) - How many times files or directories have been `open()`'d. - - vfslookup - (int, count) - How many name lookups in directories there have been. - - vfsaccess - (int, count) - Number of calls to `access()`. (formerly called "vfspermission") - - - vfsupdatepage - (int, count) - Count of updates (and potential writes) to pages. - - vfsreadpage - (int, count) - Number of pages read. - - vfsreadpages - (int, count) - Count of how many times a _group_ of pages was read (possibly via `mmap()`?). - - vfswritepage - (int, count) - Number of pages written. - - vfswritepages - (int, count) - Count of how many times a _group_ of pages was written (possibly via `mmap()`?) - - vfsgetdents - (int, count) - Count of directory entry reads with getdents(). These reads can be served from cache and don't necessarily imply actual NFS requests. (formerly called "vfsreaddir") - - vfssetattr - (int, count) - How many times we've set attributes on inodes. - - vfsflush - (int, count) - Count of times pending writes have been forcibly flushed to the server. - - vfsfsync - (int, count) - Count of calls to `fsync()` on directories and files. - - vfslock - (int, count) - Number of times a lock was attempted on a file (regardless of success or not). - - vfsrelease - (int, count) - Number of calls to `close()`. - - congestionwait - (int, count) - Believe unused by the Linux kernel, but it is part of the NFS spec. - - setattrtrunc - (int, count) - How many times files have had their size truncated. - - extendwrite - (int, count) - How many times a file has been grown because you're writing beyond the existing end of the file. - - sillyrenames - (int, count) - Number of times an in-use file was removed (thus creating a temporary ".nfsXXXXXX" file) - - shortreads - (int, count) - Number of times the NFS server returned less data than requested. - - shortwrites - (int, count) - Number of times NFS server reports it wrote less data than requested. - - delay - (int, count) - Occurances of EJUKEBOX ("Jukebox Delay", probably unused) - - pnfsreads - (int, count) - Count of NFS v4.1+ pNFS reads. - - pnfswrites - (int, count) - Count of NFS v4.1+ pNFS writes. - - - nfs_xprt_tcp + - inoderevalidates (int, count): How many times cached inode attributes have to be re-validated from the server. + - dentryrevalidates (int, count): How many times cached dentry nodes have to be re-validated. + - datainvalidates (int, count): How many times an inode had its cached data thrown out. + - attrinvalidates (int, count): How many times an inode has had cached inode attributes invalidated. + - vfsopen (int, count): How many times files or directories have been `open()`'d. + - vfslookup (int, count): How many name lookups in directories there have been. + - vfsaccess (int, count): Number of calls to `access()`. (formerly called "vfspermission") + - vfsupdatepage (int, count): Count of updates (and potential writes) to pages. + - vfsreadpage (int, count): Number of pages read. + - vfsreadpages (int, count): Count of how many times a _group_ of pages was read (possibly via `mmap()`?). + - vfswritepage (int, count): Number of pages written. + - vfswritepages (int, count): Count of how many times a _group_ of pages was written (possibly via `mmap()`?) + - vfsgetdents (int, count): Count of directory entry reads with getdents(). These reads can be served from cache and don't necessarily imply actual NFS requests. (formerly called "vfsreaddir") + - vfssetattr (int, count): How many times we've set attributes on inodes. + - vfsflush (int, count): Count of times pending writes have been forcibly flushed to the server. + - vfsfsync (int, count): Count of calls to `fsync()` on directories and files. + - vfslock (int, count): Number of times a lock was attempted on a file (regardless of success or not). + - vfsrelease (int, count): Number of calls to `close()`. + - congestionwait (int, count): Believe unused by the Linux kernel, but it is part of the NFS spec. + - setattrtrunc (int, count): How many times files have had their size truncated. + - extendwrite (int, count): How many times a file has been grown because you're writing beyond the existing end of the file. + - sillyrenames (int, count): Number of times an in-use file was removed (thus creating a temporary ".nfsXXXXXX" file) + - shortreads (int, count): Number of times the NFS server returned less data than requested. + - shortwrites (int, count): Number of times NFS server reports it wrote less data than requested. + - delay (int, count): Occurances of EJUKEBOX ("Jukebox Delay", probably unused) + - pnfsreads (int, count): Count of NFS v4.1+ pNFS reads. + - pnfswrites (int, count): Count of NFS v4.1+ pNFS writes. + +- nfs_xprt_tcp - fields: - - bind_count - (int, count) - Number of _completely new_ mounts to this server (sometimes 0?) - - connect_count - (int, count) - How many times the client has connected to the server in question - - connect_time - (int, jiffies) - How long the NFS client has spent waiting for its connection(s) to the server to be established. - - idle_time - (int, seconds) - How long (in seconds) since the NFS mount saw any RPC traffic. - - rpcsends - (int, count) - How many RPC requests this mount has sent to the server. - - rpcreceives - (int, count) - How many RPC replies this mount has received from the server. - - badxids - (int, count) - Count of XIDs sent by the server that the client doesn't know about. - - inflightsends - (int, count) - Number of outstanding requests; always >1. (See reference #4 for comment on this field) - - backlogutil - (int, count) - Cumulative backlog count + - bind_count (int, count): Number of _completely new_ mounts to this server (sometimes 0?) + - connect_count (int, count): How many times the client has connected to the server in question + - connect_time (int, jiffies): How long the NFS client has spent waiting for its connection(s) to the server to be established. + - idle_time (int, seconds): How long (in seconds) since the NFS mount saw any RPC traffic. + - rpcsends (int, count): How many RPC requests this mount has sent to the server. + - rpcreceives (int, count): How many RPC replies this mount has received from the server. + - badxids (int, count): Count of XIDs sent by the server that the client doesn't know about. + - inflightsends (int, count): Number of outstanding requests; always >1. (See reference #4 for comment on this field) + - backlogutil (int, count): Cumulative backlog count - nfs_xprt_udp - fields: @@ -169,13 +145,37 @@ Please refer to `/proc/self/mountstats` for a list of supported NFS operations, - nfs_ops - fields (In all cases, the `operations` tag is set to the uppercase name of the NFS operation, _e.g._ "READ", "FSINFO", _etc_. See /proc/self/mountstats for a full list): - - ops - (int, count) - Total operations of this type. - - trans - (int, count) - Total transmissions of this type, including retransmissions: `OP_ops - OP_trans = total_retransmissions` (lower is better). - - timeouts - (int, count) - Number of major timeouts. - - bytes_sent - (int, count) - Bytes received, including headers (should also be close to on-wire size). - - bytes_recv - (int, count) - Bytes sent, including headers (should be close to on-wire size). - - queue_time - (int, milliseconds) - Cumulative time a request waited in the queue before sending this OP type. - - response_time - (int, milliseconds) - Cumulative time waiting for a response for this OP type. - - total_time - (int, milliseconds) - Cumulative time a request waited in the queue before sending. - - errors - (int, count) - Total number operations that complete with tk_status < 0 (usually errors). This is a new field, present in kernel >=5.3, mountstats version 1.1 + - ops (int, count): Total operations of this type. + - trans (int, count): Total transmissions of this type, including retransmissions: `OP_ops - OP_trans = total_retransmissions` (lower is better). + - timeouts (int, count): Number of major timeouts. + - bytes_sent (int, count): Bytes received, including headers (should also be close to on-wire size). + - bytes_recv (int, count): Bytes sent, including headers (should be close to on-wire size). + - queue_time (int, milliseconds): Cumulative time a request waited in the queue before sending this OP type. + - response_time (int, milliseconds): Cumulative time waiting for a response for this OP type. + - total_time (int, milliseconds): Cumulative time a request waited in the queue before sending. + - errors (int, count): Total number operations that complete with tk_status < 0 (usually errors). This is a new field, present in kernel >=5.3, mountstats version 1.1 + + +### Example Output +For basic metrics showing server-wise read and write data. +``` +nfsstat,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS ops=600i,retrans=1i,bytes=1207i,rtt=606i,exe=607i 1612651512000000000 +nfsstat,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS bytes=1407i,rtt=706i,exe=707i,ops=700i,retrans=1i 1612651512000000000 + +``` + +For `fullstat=true` metrics, which includes additional measurements for `nfs_bytes`, `nfs_events`, and `nfs_xprt_tcp` (and `nfs_xprt_udp` if present). +Additionally, per-OP metrics are collected, with examples for READ, LOOKUP, and NULL shown. +Please refer to `/proc/self/mountstats` for a list of supported NFS operations, as it changes as it changes periodically. + +``` +nfs_bytes,mountpoint=/home,serverexport=nfs01:/vol/home directreadbytes=0i,directwritebytes=0i,normalreadbytes=42648757667i,normalwritebytes=0i,readpages=10404603i,serverreadbytes=42617098139i,serverwritebytes=0i,writepages=0i 1608787697000000000 +nfs_events,mountpoint=/home,serverexport=nfs01:/vol/home attrinvalidates=116i,congestionwait=0i,datainvalidates=65i,delay=0i,dentryrevalidates=5911243i,extendwrite=0i,inoderevalidates=200378i,pnfsreads=0i,pnfswrites=0i,setattrtrunc=0i,shortreads=0i,shortwrites=0i,sillyrenames=0i,vfsaccess=7203852i,vfsflush=117405i,vfsfsync=0i,vfsgetdents=3368i,vfslock=0i,vfslookup=740i,vfsopen=157281i,vfsreadpage=16i,vfsreadpages=86874i,vfsrelease=155526i,vfssetattr=0i,vfsupdatepage=0i,vfswritepage=0i,vfswritepages=215514i 1608787697000000000 +nfs_xprt_tcp,mountpoint=/home,serverexport=nfs01:/vol/home backlogutil=0i,badxids=0i,bind_count=1i,connect_count=1i,connect_time=0i,idle_time=0i,inflightsends=15659826i,rpcreceives=2173896i,rpcsends=2173896i 1608787697000000000 + +nfs_ops,mountpoint=/NFS,operation=NULL,serverexport=1.2.3.4:/storage/NFS trans=0i,timeouts=0i,bytes_sent=0i,bytes_recv=0i,queue_time=0i,response_time=0i,total_time=0i,ops=0i 1612651512000000000 +nfs_ops,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS bytes=1207i,timeouts=602i,total_time=607i,exe=607i,trans=601i,bytes_sent=603i,bytes_recv=604i,queue_time=605i,ops=600i,retrans=1i,rtt=606i,response_time=606i 1612651512000000000 +nfs_ops,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS ops=700i,bytes=1407i,exe=707i,trans=701i,timeouts=702i,response_time=706i,total_time=707i,retrans=1i,rtt=706i,bytes_sent=703i,bytes_recv=704i,queue_time=705i 1612651512000000000 +``` + From 3af16aa8853876f4caf8a410aa7e1a6af8a554ef Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 8 Mar 2021 16:29:11 -0600 Subject: [PATCH 274/761] Support Go version 1.16 (#8882) * Support 1.16 * Use 1.16 in mod file * update go.sum * update go.mod after updating to 1.16 locally * Fix go vet * Remove exit * Use require.NoError * Only run tidy explicitly in "dep" job * install 1.16 for go * typo * update xcode * specific version not working * Get 1.16 go formula * Change brew call * brew update * typo * Resolve go.sum conflict * go mod tidy --- .circleci/config.yml | 67 ++++++++++--------- Makefile | 9 ++- go.mod | 3 +- go.sum | 20 +----- .../inputs/hddtemp/go-hddtemp/hddtemp_test.go | 6 +- .../riemann_listener/riemann_listener.go | 1 - scripts/alpine.docker | 2 +- scripts/buster.docker | 2 +- scripts/{ci-1.14.docker => ci-1.16.docker} | 2 +- 9 files changed, 47 insertions(+), 65 deletions(-) rename scripts/{ci-1.14.docker => ci-1.16.docker} (95%) diff --git a/.circleci/config.yml b/.circleci/config.yml index 4bd732967ab27..0877b3c91b87b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,21 +3,21 @@ orbs: win: circleci/windows@2.4.0 executors: - go-1_14: + go-1_15: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.14.9' + - image: 'quay.io/influxdb/telegraf-ci:1.15.8' environment: GOFLAGS: -p=8 - go-1_15: + go-1_16: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.15.8' + - image: 'quay.io/influxdb/telegraf-ci:1.16.0' environment: GOFLAGS: -p=8 mac: macos: - xcode: 12.1.0 + xcode: 12.4.0 working_directory: '~/go/src/github.com/influxdata/telegraf' environment: HOMEBREW_NO_AUTO_UPDATE: 1 @@ -72,7 +72,7 @@ commands: - 'dist' jobs: linter: - executor: go-1_15 + executor: go-1_16 steps: - checkout - restore_cache: @@ -80,7 +80,7 @@ jobs: - run: wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.37.0 - run: make lint deps: - executor: go-1_15 + executor: go-1_16 steps: - checkout - restore_cache: @@ -102,7 +102,8 @@ jobs: - checkout - restore_cache: key: mac-go-mod-v2-{{ checksum "go.sum" }} - - run: 'brew install go' # latest + - run: 'brew update' + - run: 'brew install go@1.16' - run: 'make deps' - run: 'make tidy' - save_cache: @@ -120,21 +121,21 @@ jobs: - 'usr/local/Cellar/go' - 'usr/local/bin/gofmt' - 'Users/distiller/go' - test-go-1_14: - executor: go-1_14 + test-go-1_15: + executor: go-1_15 steps: - test-go - test-go-1_14-386: - executor: go-1_14 + test-go-1_15-386: + executor: go-1_15 steps: - test-go: goarch: "386" - test-go-1_15: - executor: go-1_15 + test-go-1_16: + executor: go-1_16 steps: - test-go - test-go-1_15-386: - executor: go-1_15 + test-go-1_16-386: + executor: go-1_16 steps: - test-go: goarch: "386" @@ -154,15 +155,15 @@ jobs: - run: make test-windows package: - executor: go-1_15 + executor: go-1_16 steps: - package release: - executor: go-1_15 + executor: go-1_16 steps: - package nightly: - executor: go-1_15 + executor: go-1_16 steps: - package: nightly: true @@ -217,25 +218,25 @@ workflows: filters: tags: only: /.*/ - - 'test-go-1_14': + - 'test-go-1_15': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1_14-386': + - 'test-go-1_15-386': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1_15': + - 'test-go-1_16': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1_15-386': + - 'test-go-1_16-386': requires: - 'deps' filters: @@ -255,18 +256,18 @@ workflows: requires: - 'test-go-windows' - 'test-go-darwin' - - 'test-go-1_14' - - 'test-go-1_14-386' - 'test-go-1_15' - 'test-go-1_15-386' + - 'test-go-1_16' + - 'test-go-1_16-386' - 'release': requires: - 'test-go-windows' - 'test-go-darwin' - - 'test-go-1_14' - - 'test-go-1_14-386' - 'test-go-1_15' - 'test-go-1_15-386' + - 'test-go-1_16' + - 'test-go-1_16-386' filters: tags: only: /.*/ @@ -289,16 +290,16 @@ workflows: - 'linter' - 'deps' - 'macdeps' - - 'test-go-1_14': + - 'test-go-1_15': requires: - 'deps' - - 'test-go-1_14-386': + - 'test-go-1_15-386': requires: - 'deps' - - 'test-go-1_15': + - 'test-go-1_16': requires: - 'deps' - - 'test-go-1_15-386': + - 'test-go-1_16-386': requires: - 'deps' - 'test-go-darwin': @@ -309,10 +310,10 @@ workflows: requires: - 'test-go-windows' - 'test-go-darwin' - - 'test-go-1_14' - - 'test-go-1_14-386' - 'test-go-1_15' - 'test-go-1_15-386' + - 'test-go-1_16' + - 'test-go-1_16-386' triggers: - schedule: cron: "0 7 * * *" diff --git a/Makefile b/Makefile index 3c6e5f0291f8a..24004ae27d1aa 100644 --- a/Makefile +++ b/Makefile @@ -151,7 +151,6 @@ tidy: .PHONY: check check: fmtcheck vet - @$(MAKE) --no-print-directory tidy .PHONY: test-all test-all: fmtcheck vet @@ -184,10 +183,10 @@ ci-1.15: docker build -t quay.io/influxdb/telegraf-ci:1.15.8 - < scripts/ci-1.15.docker docker push quay.io/influxdb/telegraf-ci:1.15.8 -.PHONY: ci-1.14 -ci-1.14: - docker build -t quay.io/influxdb/telegraf-ci:1.14.9 - < scripts/ci-1.14.docker - docker push quay.io/influxdb/telegraf-ci:1.14.9 +.PHONY: ci-1.16 +ci-1.16: + docker build -t quay.io/influxdb/telegraf-ci:1.16.0 - < scripts/ci-1.16.docker + docker push quay.io/influxdb/telegraf-ci:1.16.0 .PHONY: install install: $(buildbin) diff --git a/go.mod b/go.mod index fcc1fc6cc662f..705ed742ccc09 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/influxdata/telegraf -go 1.15 +go 1.16 require ( cloud.google.com/go v0.53.0 @@ -128,7 +128,6 @@ require ( github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect github.com/tidwall/gjson v1.6.0 github.com/tinylib/msgp v1.1.5 github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect diff --git a/go.sum b/go.sum index b51f150f6182e..6a079a0556f98 100644 --- a/go.sum +++ b/go.sum @@ -22,7 +22,6 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+ cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= @@ -103,7 +102,6 @@ github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/antchfx/xmlquery v1.3.3 h1:HYmadPG0uz8CySdL68rB4DCLKXz2PurCjS3mnkVF4CQ= github.com/antchfx/xmlquery v1.3.3/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= -github.com/antchfx/xpath v1.1.10 h1:cJ0pOvEdN/WvYXxvRrzQH9x5QWKpzHacYO8qzCcDYAg= github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= github.com/antchfx/xpath v1.1.11 h1:WOFtK8TVAjLm3lbgqeP0arlHpvCEeTANeWZ/csPpJkQ= github.com/antchfx/xpath v1.1.11/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= @@ -336,7 +334,6 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -441,9 +438,7 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea h1:g2k+8WR7cHch4g0tBDhfiEvAp7fXxTNBiD1oC1Oxj3E= github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= -github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI= github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= -github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b h1:Rrp0ByJXEjhREMPGTt3aWYjoIsUGCbt21ekbeJcTWv0= github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= @@ -665,8 +660,6 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= -github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg= -github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= @@ -791,18 +784,13 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc h1:zK/HqS5bZxDptfPJNq8v7vJfXtkU7r9TLIoSr1bXaP4= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 h1:lwlPPsmjDKK0J6eG6xDWd5XPehI0R024zxjDnw3esPA= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -851,14 +839,12 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -868,11 +854,9 @@ golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fq golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= @@ -929,7 +913,6 @@ golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:U gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.7.0 h1:Hdks0L0hgznZLG9nzXb8vZ0rRvqNvAcgAp84y7Mwkgw= gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= @@ -1053,6 +1036,7 @@ k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= modernc.org/httpfs v1.0.0 h1:LtuKNg6JMiaBKVQHKd6Phhvk+2GFp+pUcmDQgRjrds0= diff --git a/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go index a3fda2abd2013..57d53270b44c1 100644 --- a/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go @@ -4,6 +4,8 @@ import ( "net" "reflect" "testing" + + "github.com/stretchr/testify/require" ) func TestFetch(t *testing.T) { @@ -104,9 +106,7 @@ func serve(t *testing.T, data []byte) net.Listener { go func(t *testing.T) { conn, err := l.Accept() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) conn.Write(data) conn.Close() diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index 50ef6a9a880e6..662e74287db51 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -386,7 +386,6 @@ func processOsSignals(cancelFunc context.CancelFunc) { func (rsl *RiemannSocketListener) Stop() { rsl.wg.Done() rsl.wg.Wait() - os.Exit(0) } func newRiemannSocketListener() *RiemannSocketListener { diff --git a/scripts/alpine.docker b/scripts/alpine.docker index 7be9a39e6b361..b97bd9628c45e 100644 --- a/scripts/alpine.docker +++ b/scripts/alpine.docker @@ -1,4 +1,4 @@ -FROM golang:1.15.8 as builder +FROM golang:1.16.0 as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/buster.docker b/scripts/buster.docker index e64f7680124ad..e868ad4d7237c 100644 --- a/scripts/buster.docker +++ b/scripts/buster.docker @@ -1,4 +1,4 @@ -FROM golang:1.15.8-buster as builder +FROM golang:1.16.0-buster as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/ci-1.14.docker b/scripts/ci-1.16.docker similarity index 95% rename from scripts/ci-1.14.docker rename to scripts/ci-1.16.docker index af3559460b3bd..b4866457c29cf 100644 --- a/scripts/ci-1.14.docker +++ b/scripts/ci-1.16.docker @@ -1,4 +1,4 @@ -FROM golang:1.14.9 +FROM golang:1.16.0 RUN chmod -R 755 "$GOPATH" From 380911ffb3fd42f321dc6c5374176c6ea189d919 Mon Sep 17 00:00:00 2001 From: Sam Dillard Date: Tue, 9 Mar 2021 08:24:57 -0800 Subject: [PATCH 275/761] Add Starlark parsing example of nested JSON (#8675) * Add Starlark parsing example of nested JSON * add correct Example Input and Output fields to make script testable * removed logging from script * remove logging.star load statement --- .../starlark/testdata/json_nested.star | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 plugins/processors/starlark/testdata/json_nested.star diff --git a/plugins/processors/starlark/testdata/json_nested.star b/plugins/processors/starlark/testdata/json_nested.star new file mode 100644 index 0000000000000..3ffa20d0cbfb2 --- /dev/null +++ b/plugins/processors/starlark/testdata/json_nested.star @@ -0,0 +1,45 @@ +# +# This code assumes the value parser with data_type='string' is used +# in the input collecting the JSON data. The entire JSON obj/doc will +# be set to a Field named `value` with which this code will work. + +# JSON: +# ``` +# { +# "fields": { +# "LogEndOffset": 339238, +# "LogStartOffset": 339238, +# "NumLogSegments": 1, +# "Size": 0, +# "UnderReplicatedPartitions": 0 +# }, +# "name": "partition", +# "tags": { +# "host": "CUD1-001559", +# "jolokia_agent_url": "http://localhost:7777/jolokia", +# "partition": "1", +# "topic": "qa-kafka-connect-logs" +# }, +# "timestamp": 1591124461 +# } ``` +# +# Example Input: +# json value="[{\"fields\": {\"LogEndOffset\": 339238, \"LogStartOffset\": 339238, \"NumLogSegments\": 1, \"Size\": 0, \"UnderReplicatedPartitions\": 0}, \"name\": \"partition\", \"tags\": {\"host\": \"CUD1-001559\", \"jolokia_agent_url\": \"http://localhost:7777/jolokia\", \"partition\": \"1\", \"topic\": \"qa-kafka-connect-logs\"}, \"timestamp\": 1591124461}]" + +# Example Output: +# partition,host=CUD1-001559,jolokia_agent_url=http://localhost:7777/jolokia,partition=1,topic=qa-kafka-connect-logs LogEndOffset=339238i,LogStartOffset=339238i,NumLogSegments=1i,Size=0i,UnderReplicatedPartitions=0i 1610056029037925000 + + +load("json.star", "json") + +def apply(metric): + j_list = json.decode(metric.fields.get('value')) # input JSON may be an arrow of objects + metrics = [] + for obj in j_list: + new_metric = Metric("partition") # We want a new InfluxDB/Telegraf metric each iteration + for tag in obj["tags"].items(): # 4 Tags to iterate through + new_metric.tags[str(tag[0])] = tag[1] + for field in obj["fields"].items(): # 5 Fields to iterate through + new_metric.fields[str(field[0])] = field[1] + metrics.append(new_metric) + return metrics From a6d2c4f254dbe9f7353961d892f8b91d907423ea Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Tue, 9 Mar 2021 14:35:18 -0500 Subject: [PATCH 276/761] Reset the flush interval timer when flush is requested or batch is ready. (#8953) * Reset the flush interval timer when flush is requested or batch is ready, so that timer doesn't expire while one of those flushes is occurring. * Update tick.go --- agent/agent.go | 12 ++++-------- agent/tick.go | 19 ++++++++++++------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index e7ffee322ff20..1ac5f2b0bebb2 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -793,7 +793,7 @@ func (a *Agent) runOutputs( func (a *Agent) flushLoop( ctx context.Context, output *models.RunningOutput, - ticker Ticker, + ticker *RollingTicker, ) { logError := func(err error) { if err != nil { @@ -822,15 +822,11 @@ func (a *Agent) flushLoop( case <-ticker.Elapsed(): logError(a.flushOnce(output, ticker, output.Write)) case <-flushRequested: + ticker.Reset() logError(a.flushOnce(output, ticker, output.Write)) case <-output.BatchReady: - // Favor the ticker over batch ready - select { - case <-ticker.Elapsed(): - logError(a.flushOnce(output, ticker, output.Write)) - default: - logError(a.flushOnce(output, ticker, output.WriteBatch)) - } + ticker.Reset() + logError(a.flushOnce(output, ticker, output.WriteBatch)) } } } diff --git a/agent/tick.go b/agent/tick.go index 91b99712a73b4..6afef2fa70edd 100644 --- a/agent/tick.go +++ b/agent/tick.go @@ -216,6 +216,7 @@ type RollingTicker struct { ch chan time.Time cancel context.CancelFunc wg sync.WaitGroup + timer *clock.Timer } func NewRollingTicker(interval, jitter time.Duration) *RollingTicker { @@ -232,12 +233,12 @@ func newRollingTicker(interval, jitter time.Duration, clock clock.Clock) *Rollin } d := t.next() - timer := clock.Timer(d) + t.timer = clock.Timer(d) t.wg.Add(1) go func() { defer t.wg.Done() - t.run(ctx, timer) + t.run(ctx) }() return t @@ -247,24 +248,28 @@ func (t *RollingTicker) next() time.Duration { return t.interval + internal.RandomDuration(t.jitter) } -func (t *RollingTicker) run(ctx context.Context, timer *clock.Timer) { +func (t *RollingTicker) run(ctx context.Context) { for { select { case <-ctx.Done(): - timer.Stop() + t.timer.Stop() return - case now := <-timer.C: + case now := <-t.timer.C: select { case t.ch <- now: default: } - d := t.next() - timer.Reset(d) + t.Reset() } } } +// Reset the ticker to the next interval + jitter. +func (t *RollingTicker) Reset() { + t.timer.Reset(t.next()) +} + func (t *RollingTicker) Elapsed() <-chan time.Time { return t.ch } From 60e86a914bed2b4ad0f2a5b4f7e2c3dae2fd925c Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 9 Mar 2021 12:21:46 -0800 Subject: [PATCH 277/761] New plugins readme cleanup (#8956) * New plugins readme cleanup * update signalfx output readme --- plugins/outputs/sensu/README.md | 4 ++-- plugins/outputs/signalfx/README.md | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/sensu/README.md b/plugins/outputs/sensu/README.md index 82e6302767d08..bca7a4392577b 100644 --- a/plugins/outputs/sensu/README.md +++ b/plugins/outputs/sensu/README.md @@ -1,6 +1,6 @@ -# HTTP Output Plugin +# Sensu Go Output Plugin -This plugin writes metrics events to [Sensu Go](https://sensu.io) via its +This plugin writes metrics events to [Sensu Go](https://sensu.io) via its HTTP events API. ### Configuration: diff --git a/plugins/outputs/signalfx/README.md b/plugins/outputs/signalfx/README.md index 4736e4bbceb0f..00b39cf30e229 100644 --- a/plugins/outputs/signalfx/README.md +++ b/plugins/outputs/signalfx/README.md @@ -1,5 +1,8 @@ # SignalFx Output Plugin +The SignalFx output plugin sends metrics to [SignalFx](https://docs.signalfx.com/en/latest/). + +### Configuration ```toml [[outputs.signalfx]] ## SignalFx Org Access Token From ed468f4aa7ab1e06b1b3604a81fbf45021d03306 Mon Sep 17 00:00:00 2001 From: Todd Campbell Date: Tue, 9 Mar 2021 17:39:57 -0500 Subject: [PATCH 278/761] Fix output name to match directory name for sensu output plugin (#8960) Signed-off-by: Todd Campbell --- README.md | 2 +- etc/telegraf.conf | 10 +++++----- plugins/outputs/sensu/README.md | 10 +++++----- plugins/outputs/sensu/sensu.go | 10 +++++----- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 59aa7672c5dc4..909e7dfec49c5 100644 --- a/README.md +++ b/README.md @@ -445,7 +445,7 @@ For documentation on the latest development code see the [documentation index][d * [prometheus](./plugins/outputs/prometheus_client) * [riemann](./plugins/outputs/riemann) * [riemann_legacy](./plugins/outputs/riemann_legacy) -* [sensu-go](./plugins/outputs/sensu) +* [sensu](./plugins/outputs/sensu) * [signalfx](./plugins/outputs/signalfx) * [socket_writer](./plugins/outputs/socket_writer) * [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 949b0cd5f9e39..88217ac2de40c 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1328,7 +1328,7 @@ # # Send aggregate metrics to Sensu Monitor -# [[outputs.sensu-go]] +# [[outputs.sensu]] # ## BACKEND API URL is the Sensu Backend API root URL to send metrics to # ## (protocol, host, and port only). The output plugin will automatically # ## append the corresponding backend API path @@ -1393,7 +1393,7 @@ # ## Check specification # ## The check name is the name to give the Sensu check associated with the event # ## created. This maps to check.metatadata.name in the event. -# [outputs.sensu-go.check] +# [outputs.sensu.check] # name = "telegraf" # # ## Entity specification @@ -1404,17 +1404,17 @@ # ## backend_api_url and entity_name is not set, the value returned by # ## os.Hostname() will be used; if the output plugin is configured to send # ## events to an agent_api_url, entity_name and entity_namespace are not used. -# # [outputs.sensu-go.entity] +# # [outputs.sensu.entity] # # name = "server-01" # # namespace = "default" # # ## Metrics specification # ## Configure the tags for the metrics that are sent as part of the Sensu event -# # [outputs.sensu-go.tags] +# # [outputs.sensu.tags] # # source = "telegraf" # # ## Configure the handler(s) for processing the provided metrics -# # [outputs.sensu-go.metrics] +# # [outputs.sensu.metrics] # # handlers = ["influxdb","elasticsearch"] diff --git a/plugins/outputs/sensu/README.md b/plugins/outputs/sensu/README.md index bca7a4392577b..f21159c6426e0 100644 --- a/plugins/outputs/sensu/README.md +++ b/plugins/outputs/sensu/README.md @@ -6,7 +6,7 @@ HTTP events API. ### Configuration: ```toml -[[outputs.sensu-go]] +[[outputs.sensu]] ## BACKEND API URL is the Sensu Backend API root URL to send metrics to ## (protocol, host, and port only). The output plugin will automatically ## append the corresponding backend API path @@ -71,7 +71,7 @@ HTTP events API. ## Check specification ## The check name is the name to give the Sensu check associated with the event ## created. This maps to check.metatadata.name in the event. - [outputs.sensu-go.check] + [outputs.sensu.check] name = "telegraf" ## Entity specification @@ -82,16 +82,16 @@ HTTP events API. ## backend_api_url and entity_name is not set, the value returned by ## os.Hostname() will be used; if the output plugin is configured to send ## events to an agent_api_url, entity_name and entity_namespace are not used. - # [outputs.sensu-go.entity] + # [outputs.sensu.entity] # name = "server-01" # namespace = "default" ## Metrics specification ## Configure the tags for the metrics that are sent as part of the Sensu event - # [outputs.sensu-go.tags] + # [outputs.sensu.tags] # source = "telegraf" ## Configure the handler(s) for processing the provided metrics - # [outputs.sensu-go.metrics] + # [outputs.sensu.metrics] # handlers = ["influxdb","elasticsearch"] ``` diff --git a/plugins/outputs/sensu/sensu.go b/plugins/outputs/sensu/sensu.go index 3674765ef9b44..a3857b2cfceb9 100644 --- a/plugins/outputs/sensu/sensu.go +++ b/plugins/outputs/sensu/sensu.go @@ -167,7 +167,7 @@ var sampleConfig = ` ## Check specification ## The check name is the name to give the Sensu check associated with the event ## created. This maps to check.metatadata.name in the event. - [outputs.sensu-go.check] + [outputs.sensu.check] name = "telegraf" ## Entity specification @@ -178,17 +178,17 @@ var sampleConfig = ` ## backend_api_url and entity_name is not set, the value returned by ## os.Hostname() will be used; if the output plugin is configured to send ## events to an agent_api_url, entity_name and entity_namespace are not used. - # [outputs.sensu-go.entity] + # [outputs.sensu.entity] # name = "server-01" # namespace = "default" ## Metrics specification ## Configure the tags for the metrics that are sent as part of the Sensu event - # [outputs.sensu-go.tags] + # [outputs.sensu.tags] # source = "telegraf" ## Configure the handler(s) for processing the provided metrics - # [outputs.sensu-go.metrics] + # [outputs.sensu.metrics] # handlers = ["influxdb","elasticsearch"] ` @@ -401,7 +401,7 @@ func (s *Sensu) Init() error { } func init() { - outputs.Add("sensu-go", func() telegraf.Output { + outputs.Add("sensu", func() telegraf.Output { // Default configuration values // make a string from the defaultUrl const From 06e97756c8f5f38e74745c1f28ba55adff19161d Mon Sep 17 00:00:00 2001 From: Jeff Ashton Date: Wed, 10 Mar 2021 11:39:09 -0500 Subject: [PATCH 279/761] Writing unit tests for Kinesis output plugin Write method (#8930) --- plugins/outputs/kinesis/kinesis.go | 6 +- plugins/outputs/kinesis/kinesis_test.go | 350 ++++++++++++++++++++++++ 2 files changed, 354 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index fd233e5b80bd8..47d7aa10fb210 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -13,6 +13,9 @@ import ( "github.com/influxdata/telegraf/plugins/serializers" ) +// Limit set by AWS (https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecords.html) +const maxRecordsPerRequest uint32 = 500 + type ( KinesisOutput struct { Region string `toml:"region"` @@ -243,8 +246,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { r = append(r, &d) - if sz == 500 { - // Max Messages Per PutRecordRequest is 500 + if sz == maxRecordsPerRequest { elapsed := k.writeKinesis(r) k.Log.Debugf("Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) sz = 0 diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 49cfcedd5dfc0..4c7063c407521 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -8,8 +8,12 @@ import ( "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" "github.com/gofrs/uuid" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/serializers" + "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const zero int64 = 0 @@ -227,6 +231,272 @@ func TestWriteKinesis_WhenServiceError(t *testing.T) { }) } +func TestWrite_NoMetrics(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + svc := &mockKinesisPutRecords{} + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: "partitionKey", + }, + StreamName: "stream", + serializer: serializer, + svc: svc, + } + + err := k.Write([]telegraf.Metric{}) + assert.Nil(err, "Should not return error") + + svc.AssertRequests(assert, []*kinesis.PutRecordsInput{}) +} + +func TestWrite_SingleMetric(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + partitionKey := "partitionKey" + streamName := "stream" + + svc := &mockKinesisPutRecords{} + svc.SetupGenericResponse(1, 0) + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: partitionKey, + }, + StreamName: streamName, + serializer: serializer, + svc: svc, + } + + metric, metricData := createTestMetric(t, "metric1", serializer) + err := k.Write([]telegraf.Metric{metric}) + assert.Nil(err, "Should not return error") + + svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + { + StreamName: &streamName, + Records: []*kinesis.PutRecordsRequestEntry{ + { + PartitionKey: &partitionKey, + Data: metricData, + }, + }, + }, + }) +} + +func TestWrite_MultipleMetrics_SinglePartialRequest(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + partitionKey := "partitionKey" + streamName := "stream" + + svc := &mockKinesisPutRecords{} + svc.SetupGenericResponse(3, 0) + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: partitionKey, + }, + StreamName: streamName, + serializer: serializer, + svc: svc, + } + + metrics, metricsData := createTestMetrics(t, 3, serializer) + err := k.Write(metrics) + assert.Nil(err, "Should not return error") + + svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + { + StreamName: &streamName, + Records: createPutRecordsRequestEntries( + metricsData, + &partitionKey, + ), + }, + }) +} + +func TestWrite_MultipleMetrics_SingleFullRequest(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + partitionKey := "partitionKey" + streamName := "stream" + + svc := &mockKinesisPutRecords{} + svc.SetupGenericResponse(maxRecordsPerRequest, 0) + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: partitionKey, + }, + StreamName: streamName, + serializer: serializer, + svc: svc, + } + + metrics, metricsData := createTestMetrics(t, maxRecordsPerRequest, serializer) + err := k.Write(metrics) + assert.Nil(err, "Should not return error") + + svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + { + StreamName: &streamName, + Records: createPutRecordsRequestEntries( + metricsData, + &partitionKey, + ), + }, + }) +} + +func TestWrite_MultipleMetrics_MultipleRequests(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + partitionKey := "partitionKey" + streamName := "stream" + + svc := &mockKinesisPutRecords{} + svc.SetupGenericResponse(maxRecordsPerRequest, 0) + svc.SetupGenericResponse(1, 0) + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: partitionKey, + }, + StreamName: streamName, + serializer: serializer, + svc: svc, + } + + metrics, metricsData := createTestMetrics(t, maxRecordsPerRequest+1, serializer) + err := k.Write(metrics) + assert.Nil(err, "Should not return error") + + svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + { + StreamName: &streamName, + Records: createPutRecordsRequestEntries( + metricsData[0:maxRecordsPerRequest], + &partitionKey, + ), + }, + { + StreamName: &streamName, + Records: createPutRecordsRequestEntries( + metricsData[maxRecordsPerRequest:], + &partitionKey, + ), + }, + }) +} + +func TestWrite_MultipleMetrics_MultipleFullRequests(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + partitionKey := "partitionKey" + streamName := "stream" + + svc := &mockKinesisPutRecords{} + svc.SetupGenericResponse(maxRecordsPerRequest, 0) + svc.SetupGenericResponse(maxRecordsPerRequest, 0) + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: partitionKey, + }, + StreamName: streamName, + serializer: serializer, + svc: svc, + } + + metrics, metricsData := createTestMetrics(t, maxRecordsPerRequest*2, serializer) + err := k.Write(metrics) + assert.Nil(err, "Should not return error") + + svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + { + StreamName: &streamName, + Records: createPutRecordsRequestEntries( + metricsData[0:maxRecordsPerRequest], + &partitionKey, + ), + }, + { + StreamName: &streamName, + Records: createPutRecordsRequestEntries( + metricsData[maxRecordsPerRequest:], + &partitionKey, + ), + }, + }) +} + +func TestWrite_SerializerError(t *testing.T) { + assert := assert.New(t) + serializer := influx.NewSerializer() + partitionKey := "partitionKey" + streamName := "stream" + + svc := &mockKinesisPutRecords{} + svc.SetupGenericResponse(2, 0) + + k := KinesisOutput{ + Log: testutil.Logger{}, + Partition: &Partition{ + Method: "static", + Key: partitionKey, + }, + StreamName: streamName, + serializer: serializer, + svc: svc, + } + + metric1, metric1Data := createTestMetric(t, "metric1", serializer) + metric2, metric2Data := createTestMetric(t, "metric2", serializer) + + // metric is invalid because of empty name + invalidMetric := testutil.TestMetric(3, "") + + err := k.Write([]telegraf.Metric{ + metric1, + invalidMetric, + metric2, + }) + assert.Nil(err, "Should not return error") + + // remaining valid metrics should still get written + svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + { + StreamName: &streamName, + Records: []*kinesis.PutRecordsRequestEntry{ + { + PartitionKey: &partitionKey, + Data: metric1Data, + }, + { + PartitionKey: &partitionKey, + Data: metric2Data, + }, + }, + }, + }) +} + type mockKinesisPutRecordsResponse struct { Output *kinesis.PutRecordsOutput Err error @@ -253,6 +523,35 @@ func (m *mockKinesisPutRecords) SetupResponse( }) } +func (m *mockKinesisPutRecords) SetupGenericResponse( + successfulRecordCount uint32, + failedRecordCount uint32, +) { + + errorCode := "InternalFailure" + errorMessage := "Internal Service Failure" + shard := "shardId-000000000003" + + records := []*kinesis.PutRecordsResultEntry{} + + for i := uint32(0); i < successfulRecordCount; i++ { + sequenceNumber := fmt.Sprintf("%d", i) + records = append(records, &kinesis.PutRecordsResultEntry{ + SequenceNumber: &sequenceNumber, + ShardId: &shard, + }) + } + + for i := uint32(0); i < failedRecordCount; i++ { + records = append(records, &kinesis.PutRecordsResultEntry{ + ErrorCode: &errorCode, + ErrorMessage: &errorMessage, + }) + } + + m.SetupResponse(int64(failedRecordCount), records) +} + func (m *mockKinesisPutRecords) SetupErrorResponse(err error) { m.responses = append(m.responses, &mockKinesisPutRecordsResponse{ @@ -323,3 +622,54 @@ func (m *mockKinesisPutRecords) AssertRequests( } } } + +func createTestMetric( + t *testing.T, + name string, + serializer serializers.Serializer, +) (telegraf.Metric, []byte) { + + metric := testutil.TestMetric(1, name) + + data, err := serializer.Serialize(metric) + require.NoError(t, err) + + return metric, data +} + +func createTestMetrics( + t *testing.T, + count uint32, + serializer serializers.Serializer, +) ([]telegraf.Metric, [][]byte) { + + metrics := make([]telegraf.Metric, count) + metricsData := make([][]byte, count) + + for i := uint32(0); i < count; i++ { + name := fmt.Sprintf("metric%d", i) + metric, data := createTestMetric(t, name, serializer) + metrics[i] = metric + metricsData[i] = data + } + + return metrics, metricsData +} + +func createPutRecordsRequestEntries( + metricsData [][]byte, + partitionKey *string, +) []*kinesis.PutRecordsRequestEntry { + + count := len(metricsData) + records := make([]*kinesis.PutRecordsRequestEntry, count) + + for i := 0; i < count; i++ { + records[i] = &kinesis.PutRecordsRequestEntry{ + PartitionKey: partitionKey, + Data: metricsData[i], + } + } + + return records +} From 76516518677681775be2177fb06c71e41b4145b2 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 10 Mar 2021 12:11:19 -0500 Subject: [PATCH 280/761] add more logging to ping plugin --- plugins/inputs/ping/ping.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 203d9c481d54b..94705484555ab 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -208,6 +208,7 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { stats, err := p.nativePingFunc(destination) if err != nil { + p.Log.Errorf("ping failed: %s", err.Error()) if strings.Contains(err.Error(), "unknown") { fields["result_code"] = 1 } else { @@ -224,12 +225,14 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { } if stats.PacketsSent == 0 { + p.Log.Debug("no packets sent") fields["result_code"] = 2 acc.AddFields("ping", fields, tags) return } if stats.PacketsRecv == 0 { + p.Log.Debug("no packets received") fields["result_code"] = 1 fields["percent_packet_loss"] = float64(100) acc.AddFields("ping", fields, tags) From 7726f94621317abc8aa8e7100676cc79e98450fb Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 10 Mar 2021 16:55:39 -0500 Subject: [PATCH 281/761] fix ping tests --- plugins/inputs/ping/ping_test.go | 2 ++ plugins/inputs/ping/ping_windows_test.go | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 752714a868ad7..daebeb1f55635 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -269,6 +269,7 @@ func TestPingGatherIntegration(t *testing.T) { var acc testutil.Accumulator p, ok := inputs.Inputs["ping"]().(*Ping) + p.Log = testutil.Logger{} require.True(t, ok) p.Urls = []string{"localhost", "influxdata.com"} err := acc.GatherError(p.Gather) @@ -491,6 +492,7 @@ func TestPingGatherNative(t *testing.T) { func TestNoPacketsSent(t *testing.T) { p := &Ping{ + Log: testutil.Logger{}, Urls: []string{"localhost", "127.0.0.2"}, Method: "native", Count: 5, diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index 4618ec4db4942..0986d58bc74a8 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -113,6 +113,7 @@ func mockErrorHostPinger(binary string, timeout float64, args ...string) (string func TestBadPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.amazon.com"}, pingHost: mockErrorHostPinger, } @@ -133,6 +134,7 @@ func TestBadPingGather(t *testing.T) { func TestArguments(t *testing.T) { arguments := []string{"-c", "3"} p := Ping{ + Log: testutil.Logger{}, Count: 2, Timeout: 12.0, Arguments: arguments, @@ -169,6 +171,7 @@ func mockLossyHostPinger(binary string, timeout float64, args ...string) (string func TestLossyPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.google.com"}, pingHost: mockLossyHostPinger, } @@ -229,6 +232,7 @@ func mockFatalHostPinger(binary string, timeout float64, args ...string) (string func TestFatalPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.amazon.com"}, pingHost: mockFatalHostPinger, } @@ -274,6 +278,7 @@ func mockUnreachableHostPinger(binary string, timeout float64, args ...string) ( func TestUnreachablePingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.google.com"}, pingHost: mockUnreachableHostPinger, } @@ -321,6 +326,7 @@ func mockTTLExpiredPinger(binary string, timeout float64, args ...string) (strin func TestTTLExpiredPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.google.com"}, pingHost: mockTTLExpiredPinger, } @@ -351,6 +357,7 @@ func TestTTLExpiredPingGather(t *testing.T) { func TestPingBinary(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.google.com"}, Binary: "ping6", pingHost: func(binary string, timeout float64, args ...string) (string, error) { From 6b8d3601fe42413ed934c64a4ca69930a86b7e83 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 10 Mar 2021 17:07:40 -0500 Subject: [PATCH 282/761] Update changelog (cherry picked from commit 5698d96f66197404df3d9bfaeb7e9bd065ece20a) --- CHANGELOG.md | 17 ++++++++++++++--- etc/telegraf.conf | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ed4494c54ce8..88534431b60c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,12 @@ -## v1.18.0-rc0 [2021-03-03] +## v1.18.0-rc1 [2021-03-10] + +#### Release Notes + + - Support Go version 1.16 #### Bugfixes - - [#7312](https://github.com/influxdata/telegraf/pull/7312) `inputs.docker` [plugins/input/docker] Make perdevice affect also cpu and add class granularity through perdevice_include/total_include + - [#7312](https://github.com/influxdata/telegraf/pull/7312) `inputs.docker` CPU stats respect perdevice - [#8397](https://github.com/influxdata/telegraf/pull/8397) `outputs.dynatrace` Dynatrace Plugin: Make conversion to counters possible / Changed large bulk handling - [#8655](https://github.com/influxdata/telegraf/pull/8655) `inputs.sqlserver` SqlServer - fix for default server list - [#8703](https://github.com/influxdata/telegraf/pull/8703) `inputs.docker` Use consistent container name in docker input plugin @@ -15,6 +19,9 @@ - [#8873](https://github.com/influxdata/telegraf/pull/8873) `processors.ifname` Fix mutex locking around ifname cache - [#8720](https://github.com/influxdata/telegraf/pull/8720) `parsers.influx` fix: remove ambiguity on '\v' from line-protocol parser - [#8678](https://github.com/influxdata/telegraf/pull/8678) `inputs.redis` Fix Redis output field type inconsistencies + - [#8953](https://github.com/influxdata/telegraf/pull/8953) `agent` Reset the flush interval timer when flush is requested or batch is ready. + - [#8954](https://github.com/influxdata/telegraf/pull/8954) `common.kafka` Fix max open requests to one if idempotent writes is set to true + - [#8721](https://github.com/influxdata/telegraf/pull/8721) `inputs.kube_inventory` Set $HOSTIP in default URL #### Features @@ -33,6 +40,8 @@ - [#8772](https://github.com/influxdata/telegraf/pull/8772) `processors.starlark` Allow to provide constants to a starlark script - [#8749](https://github.com/influxdata/telegraf/pull/8749) `outputs.newrelic` Add HTTP proxy setting to New Relic output plugin - [#8543](https://github.com/influxdata/telegraf/pull/8543) `inputs.elasticsearch` Add configurable number of 'most recent' date-stamped indices to gather in Elasticsearch input + - [#8675](https://github.com/influxdata/telegraf/pull/8675) `processors.starlark` Add Starlark parsing example of nested JSON + - [#8762](https://github.com/influxdata/telegraf/pull/8762) `inputs.prometheus` Optimize for bigger kubernetes clusters (500+ pods) #### New Input Plugins @@ -40,6 +49,8 @@ - [#8525](https://github.com/influxdata/telegraf/pull/8525) Add CSGO SRCDS input plugin - [#8751](https://github.com/influxdata/telegraf/pull/8751) Adding a new directory monitor input plugin. - [#6653](https://github.com/influxdata/telegraf/pull/6653) Add Beat input plugin + - [#4615](https://github.com/influxdata/telegraf/pull/4615) Add NFS client input + - [#8931](https://github.com/influxdata/telegraf/pull/8931) Add XML parser using XPath queries #### New Output Plugins @@ -126,7 +137,7 @@ #### New External Plugins - - [#8646](https://github.com/influxdata/telegraf/pull/8646) [Open Hardware Monitoring](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) Input Plugin + - [#8646](https://github.com/influxdata/telegraf/pull/8646) [Open Hardware Monitoring](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) Input Plugin ## v1.17.0 [2020-12-18] diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 88217ac2de40c..d7705c5239925 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -4822,6 +4822,35 @@ # # no configuration +# # Read per-mount NFS client metrics from /proc/self/mountstats +# [[inputs.nfsclient]] +# ## Read more low-level metrics (optional, defaults to false) +# # fullstat = false +# +# ## List of mounts to explictly include or exclude (optional) +# ## The pattern (Go regexp) is matched against the mount point (not the +# ## device being mounted). If include_mounts is set, all mounts are ignored +# ## unless present in the list. If a mount is listed in both include_mounts +# ## and exclude_mounts, it is excluded. Go regexp patterns can be used. +# # include_mounts = [] +# # exclude_mounts = [] +# +# ## List of operations to include or exclude from collecting. This applies +# ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: +# ## the default is to collect everything; when include_operations is set, only +# ## those OPs are collected; when exclude_operations is set, all are collected +# ## except those listed. If include and exclude are set, the OP is excluded. +# ## See /proc/self/mountstats for a list of valid operations; note that +# ## NFSv3 and NFSv4 have different lists. While it is not possible to +# ## have different include/exclude lists for NFSv3/4, unused elements +# ## in the list should be okay. It is possible to have different lists +# ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, +# ## with their own lists. See "include_mounts" above, and be careful of +# ## duplicate metrics. +# # include_operations = [] +# # exclude_operations = [] + + # # Read Nginx's basic status information (ngx_http_stub_status_module) # [[inputs.nginx]] # # An array of Nginx stub_status URI to gather stats. @@ -7549,6 +7578,16 @@ # ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. # ## - prometheus.io/port: If port is not 9102 use this annotation # # monitor_kubernetes_pods = true +# ## Get the list of pods to scrape with either the scope of +# ## - cluster: the kubernetes watch api (default, no need to specify) +# ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. +# # pod_scrape_scope = "cluster" +# ## Only for node scrape scope: node IP of the node that telegraf is running on. +# ## Either this config or the environment variable NODE_IP must be set. +# # node_ip = "10.180.1.1" +# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. +# ## Default is 60 seconds. +# # pod_scrape_interval = 60 # ## Restricts Kubernetes monitoring to a single namespace # ## ex: monitor_kubernetes_pods_namespace = "default" # # monitor_kubernetes_pods_namespace = "" From c1a6312ce3175aa18a8cf3fe8158d25ce61d2a08 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Thu, 11 Mar 2021 12:19:35 -0500 Subject: [PATCH 283/761] Include DMG files when syncing release artifacts to AWS (#8972) --- scripts/release.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/release.sh b/scripts/release.sh index 41cb0cd7fddac..b445efc0494b3 100644 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -178,4 +178,5 @@ aws s3 sync ./ "s3://$BUCKET/" \ --include "*.zip" \ --include "*.DIGESTS" \ --include "*.asc" \ + --include "*.dmg" \ --acl public-read From 1b7a52d0b645ea718f0ebc746698aee5fb993493 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Thu, 11 Mar 2021 15:09:22 -0500 Subject: [PATCH 284/761] Mac entry script: Add space before calling path so that zsh doesn't remove first character of path when prompting for update. (#8976) * Add space before calling path so that zsh doesn't remove first character of path when prompting for update. * Updated config.yml * Update config.yml * Update config.yml --- scripts/telegraf_entry_mac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/telegraf_entry_mac b/scripts/telegraf_entry_mac index 2031d6c1fc309..887631e549f73 100644 --- a/scripts/telegraf_entry_mac +++ b/scripts/telegraf_entry_mac @@ -7,7 +7,7 @@ else cd $currentDir osascript< Date: Thu, 11 Mar 2021 14:35:10 -0600 Subject: [PATCH 285/761] Move golangci-lint from circle-ci to github actions (#8975) * Move lint to github actions * Update version * timeout and scheduled trigger --- .circleci/config.yml | 10 ---------- .github/workflows/golangci-lint.yml | 23 +++++++++++++++++++++++ 2 files changed, 23 insertions(+), 10 deletions(-) create mode 100644 .github/workflows/golangci-lint.yml diff --git a/.circleci/config.yml b/.circleci/config.yml index 0877b3c91b87b..76739fdd14abb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -71,14 +71,6 @@ commands: paths: - 'dist' jobs: - linter: - executor: go-1_16 - steps: - - checkout - - restore_cache: - key: go-mod-v1-{{ checksum "go.sum" }} - - run: wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.37.0 - - run: make lint deps: executor: go-1_16 steps: @@ -209,7 +201,6 @@ workflows: version: 2 check: jobs: - - 'linter' - 'macdeps': filters: tags: @@ -287,7 +278,6 @@ workflows: only: /.*/ nightly: jobs: - - 'linter' - 'deps' - 'macdeps' - 'test-go-1_15': diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000000000..e4154182649a0 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,23 @@ +name: golangci-lint +on: + push: + branches: + - master + pull_request: + branches: + - master + schedule: + # Trigger every day at 16:00 UTC + - cron: '0 16 * * *' +jobs: + golangci-pr: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: golangci-lint + uses: golangci/golangci-lint-action@v2 + with: + version: v1.38 + only-new-issues: true + args: --timeout=5m0s From 35b75e959cc708b6b918ab4f6663bd7e1a7afb44 Mon Sep 17 00:00:00 2001 From: Madhushree Sreenivasa Date: Thu, 11 Mar 2021 13:04:09 -0800 Subject: [PATCH 286/761] Filter data out from system databases for Azure SQL DB only (#8849) * Excluding data from system databases like msdb,model which are not relevant for monitoring in Azure SQL Please enter the commit message for your changes. Lines starting * Addressing review comments to handle null scenarios --- plugins/inputs/sqlserver/azuresqlqueries.go | 203 ++++++++++---------- 1 file changed, 104 insertions(+), 99 deletions(-) diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go index 03da02e879642..41c0d384ba557 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqlqueries.go @@ -438,106 +438,111 @@ WITH PerfCounters AS ( ELSE d.[physical_database_name] END WHERE - counter_name IN ( - 'SQL Compilations/sec' - ,'SQL Re-Compilations/sec' - ,'User Connections' - ,'Batch Requests/sec' - ,'Logouts/sec' - ,'Logins/sec' - ,'Processes blocked' - ,'Latch Waits/sec' - ,'Full Scans/sec' - ,'Index Searches/sec' - ,'Page Splits/sec' - ,'Page lookups/sec' - ,'Page reads/sec' - ,'Page writes/sec' - ,'Readahead pages/sec' - ,'Lazy writes/sec' - ,'Checkpoint pages/sec' - ,'Table Lock Escalations/sec' - ,'Page life expectancy' - ,'Log File(s) Size (KB)' - ,'Log File(s) Used Size (KB)' - ,'Data File(s) Size (KB)' - ,'Transactions/sec' - ,'Write Transactions/sec' - ,'Active Transactions' - ,'Log Growths' - ,'Active Temp Tables' - ,'Logical Connections' - ,'Temp Tables Creation Rate' - ,'Temp Tables For Destruction' - ,'Free Space in tempdb (KB)' - ,'Version Store Size (KB)' - ,'Memory Grants Pending' - ,'Memory Grants Outstanding' - ,'Free list stalls/sec' - ,'Buffer cache hit ratio' - ,'Buffer cache hit ratio base' - ,'Backup/Restore Throughput/sec' - ,'Total Server Memory (KB)' - ,'Target Server Memory (KB)' - ,'Log Flushes/sec' - ,'Log Flush Wait Time' - ,'Memory broker clerk size' - ,'Log Bytes Flushed/sec' - ,'Bytes Sent to Replica/sec' - ,'Log Send Queue' - ,'Bytes Sent to Transport/sec' - ,'Sends to Replica/sec' - ,'Bytes Sent to Transport/sec' - ,'Sends to Transport/sec' - ,'Bytes Received from Replica/sec' - ,'Receives from Replica/sec' - ,'Flow Control Time (ms/sec)' - ,'Flow Control/sec' - ,'Resent Messages/sec' - ,'Redone Bytes/sec' - ,'XTP Memory Used (KB)' - ,'Transaction Delay' - ,'Log Bytes Received/sec' - ,'Log Apply Pending Queue' - ,'Redone Bytes/sec' - ,'Recovery Queue' - ,'Log Apply Ready Queue' - ,'CPU usage %' - ,'CPU usage % base' - ,'Queued requests' - ,'Requests completed/sec' - ,'Blocked tasks' - ,'Active memory grant amount (KB)' - ,'Disk Read Bytes/sec' - ,'Disk Read IO Throttled/sec' - ,'Disk Read IO/sec' - ,'Disk Write Bytes/sec' - ,'Disk Write IO Throttled/sec' - ,'Disk Write IO/sec' - ,'Used memory (KB)' - ,'Forwarded Records/sec' - ,'Background Writer pages/sec' - ,'Percent Log Used' - ,'Log Send Queue KB' - ,'Redo Queue KB' - ,'Mirrored Write Transactions/sec' - ,'Group Commit Time' - ,'Group Commits/Sec' - ,'Distributed Query' - ,'DTC calls' - ,'Query Store CPU usage' - ) OR ( - spi.[object_name] LIKE '%User Settable%' - OR spi.[object_name] LIKE '%SQL Errors%' - OR spi.[object_name] LIKE '%Batch Resp Statistics%' - ) OR ( - spi.[instance_name] IN ('_Total') - AND spi.[counter_name] IN ( - 'Lock Timeouts/sec' - ,'Lock Timeouts (timeout > 0)/sec' - ,'Number of Deadlocks/sec' - ,'Lock Waits/sec' + /*filter out unnecessary SQL DB system database counters, other than master and tempdb*/ + NOT (spi.object_name LIKE 'MSSQL%:Databases%' AND spi.instance_name IN ('model','model_masterdb','model_userdb','msdb','mssqlsystemresource')) + AND + ( + counter_name IN ( + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Distributed Query' + ,'DTC calls' + ,'Query Store CPU usage' + ) OR ( + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' + ) OR ( + spi.[instance_name] IN ('_Total') + AND spi.[counter_name] IN ( + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' + ) ) ) ) From 30e189df16e72543522f9a1b1ecc047d293d776f Mon Sep 17 00:00:00 2001 From: Connor Quagliana Date: Thu, 11 Mar 2021 15:07:38 -0600 Subject: [PATCH 287/761] Add an optional health metric for the sqlserver input plugin (#8544) --- etc/telegraf.conf | 5 + plugins/inputs/sqlserver/README.md | 22 +++ plugins/inputs/sqlserver/connectionstring.go | 100 +++++++++++ plugins/inputs/sqlserver/sqlserver.go | 85 +++++++++- plugins/inputs/sqlserver/sqlserver_test.go | 164 +++++++++++++++++++ 5 files changed, 372 insertions(+), 4 deletions(-) create mode 100644 plugins/inputs/sqlserver/connectionstring.go diff --git a/etc/telegraf.conf b/etc/telegraf.conf index d7705c5239925..c6774d5a30ef7 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -5859,6 +5859,11 @@ # ## If you are using AzureDB, setting this to true will gather resource utilization metrics # # azuredb = false +# ## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health". +# ## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers". +# ## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues. +# ## This setting/metric is optional and is disabled by default. +# # health_metric = false # # Gather timeseries from Google Cloud Platform v3 monitoring API # [[inputs.stackdriver]] diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index db15c4af755a6..e69a2d41f9e21 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -101,6 +101,12 @@ GO ## If you are using AzureDB, setting this to true will gather resource utilization metrics # azuredb = false + ## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health". + ## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers". + ## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues. + ## This setting/metric is optional and is disabled by default. + # health_metric = false + ## Possible queries accross different versions of the collectors ## Queries enabled by default for specific Database Type @@ -323,4 +329,20 @@ Version 2 queries have the following tags: - `sql_instance`: Physical host and instance name (hostname:instance) - `database_name`: For Azure SQLDB, database_name denotes the name of the Azure SQL Database as server name is a logical construct. +#### Health Metric +All collection versions (version 1, version 2, and database_type) support an optional plugin health metric called `sqlserver_telegraf_health`. This metric tracks if connections to SQL Server are succeeding or failing. Users can leverage this metric to detect if their SQL Server monitoring is not working as intended. + +In the configuration file, toggling `health_metric` to `true` will enable collection of this metric. By default, this value is set to `false` and the metric is not collected. The health metric emits one record for each connection specified by `servers` in the configuration file. + +The health metric emits the following tags: +- `sql_instance` - Name of the server specified in the connection string. This value is emitted as-is in the connection string. If the server could not be parsed from the connection string, a constant placeholder value is emitted +- `database_name` - Name of the database or (initial catalog) specified in the connection string. This value is emitted as-is in the connection string. If the database could not be parsed from the connection string, a constant placeholder value is emitted + +The health metric emits the following fields: +- `attempted_queries` - Number of queries that were attempted for this connection +- `successful_queries` - Number of queries that completed successfully for this connection +- `database_type` - Type of database as specified by `database_type`. If `database_type` is empty, the `QueryVersion` and `AzureDB` fields are concatenated instead + +If `attempted_queries` and `successful_queries` are not equal for a given connection, some metrics were not successfully gathered for that connection. If `successful_queries` is 0, no metrics were successfully gathered. + [cardinality]: /docs/FAQ.md#user-content-q-how-can-i-manage-series-cardinality diff --git a/plugins/inputs/sqlserver/connectionstring.go b/plugins/inputs/sqlserver/connectionstring.go new file mode 100644 index 0000000000000..54b5cd8ae6460 --- /dev/null +++ b/plugins/inputs/sqlserver/connectionstring.go @@ -0,0 +1,100 @@ +package sqlserver + +import ( + "net/url" + "strings" +) + +const ( + emptySqlInstance = "" + emptyDatabaseName = "" +) + +// getConnectionIdentifiers returns the sqlInstance and databaseName from the given connection string. +// The name of the SQL instance is returned as-is in the connection string +// If the connection string could not be parsed or sqlInstance/databaseName were not present, a placeholder value is returned +func getConnectionIdentifiers(connectionString string) (sqlInstance string, databaseName string) { + if len(connectionString) == 0 { + return emptySqlInstance, emptyDatabaseName + } + + trimmedConnectionString := strings.TrimSpace(connectionString) + + if strings.HasPrefix(trimmedConnectionString, "odbc:") { + connectionStringWithoutOdbc := strings.TrimPrefix(trimmedConnectionString, "odbc:") + return parseConnectionStringKeyValue(connectionStringWithoutOdbc) + } + if strings.HasPrefix(trimmedConnectionString, "sqlserver://") { + return parseConnectionStringURL(trimmedConnectionString) + } + return parseConnectionStringKeyValue(trimmedConnectionString) +} + +// parseConnectionStringKeyValue parses a "key=value;" connection string and returns the SQL instance and database name +func parseConnectionStringKeyValue(connectionString string) (sqlInstance string, databaseName string) { + sqlInstance = "" + databaseName = "" + + keyValuePairs := strings.Split(connectionString, ";") + for _, keyValuePair := range keyValuePairs { + if len(keyValuePair) == 0 { + continue + } + + keyAndValue := strings.SplitN(keyValuePair, "=", 2) + key := strings.TrimSpace(strings.ToLower(keyAndValue[0])) + if len(key) == 0 { + continue + } + + value := "" + if len(keyAndValue) > 1 { + value = strings.TrimSpace(keyAndValue[1]) + } + if strings.EqualFold("server", key) { + sqlInstance = value + continue + } + if strings.EqualFold("database", key) { + databaseName = value + } + } + + if sqlInstance == "" { + sqlInstance = emptySqlInstance + } + if databaseName == "" { + databaseName = emptyDatabaseName + } + + return sqlInstance, databaseName +} + +// parseConnectionStringURL parses a URL-formatted connection string and returns the SQL instance and database name +func parseConnectionStringURL(connectionString string) (sqlInstance string, databaseName string) { + sqlInstance = emptySqlInstance + databaseName = emptyDatabaseName + + u, err := url.Parse(connectionString) + if err != nil { + return emptySqlInstance, emptyDatabaseName + } + + sqlInstance = u.Hostname() + + if len(u.Path) > 1 { + // There was a SQL instance name specified in addition to the host + // E.g. "the.host.com:1234/InstanceName" or "the.host.com/InstanceName" + sqlInstance = sqlInstance + "\\" + u.Path[1:] + } + + query := u.Query() + for key, value := range query { + if strings.EqualFold("database", key) { + databaseName = value[0] + break + } + } + + return sqlInstance, databaseName +} diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index c789ace9b3994..75e52e6e8ed9f 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -21,6 +21,7 @@ type SQLServer struct { DatabaseType string `toml:"database_type"` IncludeQuery []string `toml:"include_query"` ExcludeQuery []string `toml:"exclude_query"` + HealthMetric bool `toml:"health_metric"` queries MapQuery isInitialized bool } @@ -36,8 +37,29 @@ type Query struct { // MapQuery type type MapQuery map[string]Query +// HealthMetric struct tracking the number of attempted vs successful connections for each connection string +type HealthMetric struct { + AttemptedQueries int + SuccessfulQueries int +} + const defaultServer = "Server=.;app name=telegraf;log=1;" +const ( + typeAzureSQLDB = "AzureSQLDB" + typeAzureSQLManagedInstance = "AzureSQLManagedInstance" + typeSQLServer = "SQLServer" +) + +const ( + healthMetricName = "sqlserver_telegraf_health" + healthMetricInstanceTag = "sql_instance" + healthMetricDatabaseTag = "database_name" + healthMetricAttemptedQueries = "attempted_queries" + healthMetricSuccessfulQueries = "successful_queries" + healthMetricDatabaseType = "database_type" +) + const sampleConfig = ` ## Specify instances to monitor with a list of connection strings. ## All connection parameters are optional. @@ -124,7 +146,7 @@ func initQueries(s *SQLServer) error { // Constant defintiions for type "AzureSQLDB" start with sqlAzureDB // Constant defintiions for type "AzureSQLManagedInstance" start with sqlAzureMI // Constant defintiions for type "SQLServer" start with sqlServer - if s.DatabaseType == "AzureSQLDB" { + if s.DatabaseType == typeAzureSQLDB { queries["AzureSQLDBResourceStats"] = Query{ScriptName: "AzureSQLDBResourceStats", Script: sqlAzureDBResourceStats, ResultByRow: false} queries["AzureSQLDBResourceGovernance"] = Query{ScriptName: "AzureSQLDBResourceGovernance", Script: sqlAzureDBResourceGovernance, ResultByRow: false} queries["AzureSQLDBWaitStats"] = Query{ScriptName: "AzureSQLDBWaitStats", Script: sqlAzureDBWaitStats, ResultByRow: false} @@ -135,7 +157,7 @@ func initQueries(s *SQLServer) error { queries["AzureSQLDBPerformanceCounters"] = Query{ScriptName: "AzureSQLDBPerformanceCounters", Script: sqlAzureDBPerformanceCounters, ResultByRow: false} queries["AzureSQLDBRequests"] = Query{ScriptName: "AzureSQLDBRequests", Script: sqlAzureDBRequests, ResultByRow: false} queries["AzureSQLDBSchedulers"] = Query{ScriptName: "AzureSQLDBSchedulers", Script: sqlAzureDBSchedulers, ResultByRow: false} - } else if s.DatabaseType == "AzureSQLManagedInstance" { + } else if s.DatabaseType == typeAzureSQLManagedInstance { queries["AzureSQLMIResourceStats"] = Query{ScriptName: "AzureSQLMIResourceStats", Script: sqlAzureMIResourceStats, ResultByRow: false} queries["AzureSQLMIResourceGovernance"] = Query{ScriptName: "AzureSQLMIResourceGovernance", Script: sqlAzureMIResourceGovernance, ResultByRow: false} queries["AzureSQLMIDatabaseIO"] = Query{ScriptName: "AzureSQLMIDatabaseIO", Script: sqlAzureMIDatabaseIO, ResultByRow: false} @@ -145,7 +167,7 @@ func initQueries(s *SQLServer) error { queries["AzureSQLMIPerformanceCounters"] = Query{ScriptName: "AzureSQLMIPerformanceCounters", Script: sqlAzureMIPerformanceCounters, ResultByRow: false} queries["AzureSQLMIRequests"] = Query{ScriptName: "AzureSQLMIRequests", Script: sqlAzureMIRequests, ResultByRow: false} queries["AzureSQLMISchedulers"] = Query{ScriptName: "AzureSQLMISchedulers", Script: sqlAzureMISchedulers, ResultByRow: false} - } else if s.DatabaseType == "SQLServer" { //These are still V2 queries and have not been refactored yet. + } else if s.DatabaseType == typeSQLServer { //These are still V2 queries and have not been refactored yet. queries["SQLServerPerformanceCounters"] = Query{ScriptName: "SQLServerPerformanceCounters", Script: sqlServerPerformanceCounters, ResultByRow: false} queries["SQLServerWaitStatsCategorized"] = Query{ScriptName: "SQLServerWaitStatsCategorized", Script: sqlServerWaitStatsCategorized, ResultByRow: false} queries["SQLServerDatabaseIO"] = Query{ScriptName: "SQLServerDatabaseIO", Script: sqlServerDatabaseIO, ResultByRow: false} @@ -222,18 +244,33 @@ func (s *SQLServer) Gather(acc telegraf.Accumulator) error { } var wg sync.WaitGroup + var mutex sync.Mutex + var healthMetrics = make(map[string]*HealthMetric) for _, serv := range s.Servers { for _, query := range s.queries { wg.Add(1) go func(serv string, query Query) { defer wg.Done() - acc.AddError(s.gatherServer(serv, query, acc)) + queryError := s.gatherServer(serv, query, acc) + + if s.HealthMetric { + mutex.Lock() + s.gatherHealth(healthMetrics, serv, queryError) + mutex.Unlock() + } + + acc.AddError(queryError) }(serv, query) } } wg.Wait() + + if s.HealthMetric { + s.accHealth(healthMetrics, acc) + } + return nil } @@ -323,6 +360,46 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e return nil } +// gatherHealth stores info about any query errors in the healthMetrics map +func (s *SQLServer) gatherHealth(healthMetrics map[string]*HealthMetric, serv string, queryError error) { + if healthMetrics[serv] == nil { + healthMetrics[serv] = &HealthMetric{} + } + + healthMetrics[serv].AttemptedQueries++ + if queryError == nil { + healthMetrics[serv].SuccessfulQueries++ + } +} + +// accHealth accumulates the query health data contained within the healthMetrics map +func (s *SQLServer) accHealth(healthMetrics map[string]*HealthMetric, acc telegraf.Accumulator) { + for connectionString, connectionStats := range healthMetrics { + sqlInstance, databaseName := getConnectionIdentifiers(connectionString) + tags := map[string]string{healthMetricInstanceTag: sqlInstance, healthMetricDatabaseTag: databaseName} + fields := map[string]interface{}{ + healthMetricAttemptedQueries: connectionStats.AttemptedQueries, + healthMetricSuccessfulQueries: connectionStats.SuccessfulQueries, + healthMetricDatabaseType: s.getDatabaseTypeToLog(), + } + + acc.AddFields(healthMetricName, fields, tags, time.Now()) + } +} + +// getDatabaseTypeToLog returns the type of database monitored by this plugin instance +func (s *SQLServer) getDatabaseTypeToLog() string { + if s.DatabaseType == typeAzureSQLDB || s.DatabaseType == typeAzureSQLManagedInstance || s.DatabaseType == typeSQLServer { + return s.DatabaseType + } + + logname := fmt.Sprintf("QueryVersion-%d", s.QueryVersion) + if s.AzureDB { + logname += "-AzureDB" + } + return logname +} + func (s *SQLServer) Init() error { if len(s.Servers) == 0 { log.Println("W! Warning: Server list is empty.") diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index 9af7003e08c84..b271c08d69519 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -138,6 +138,7 @@ func TestSqlServer_MultipleInstanceIntegration(t *testing.T) { require.NoError(t, err) assert.Equal(t, s.isInitialized, true) assert.Equal(t, s2.isInitialized, true) + // acc includes size metrics, and excludes memory metrics assert.False(t, acc.HasMeasurement("Memory breakdown (%)")) assert.True(t, acc.HasMeasurement("Log size (bytes)")) @@ -147,6 +148,89 @@ func TestSqlServer_MultipleInstanceIntegration(t *testing.T) { assert.False(t, acc2.HasMeasurement("Log size (bytes)")) } +func TestSqlServer_MultipleInstanceWithHealthMetricIntegration(t *testing.T) { + // Invoke Gather() from two separate configurations and + // confirm they don't interfere with each other. + // This test is intentionally similar to TestSqlServer_MultipleInstanceIntegration. + // It is separated to ensure that the health metric code does not affect other metrics + t.Skip("Skipping as unable to open tcp connection with host '127.0.0.1:1433") + + testServer := "Server=127.0.0.1;Port=1433;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" + s := &SQLServer{ + Servers: []string{testServer}, + ExcludeQuery: []string{"MemoryClerk"}, + } + s2 := &SQLServer{ + Servers: []string{testServer}, + ExcludeQuery: []string{"DatabaseSize"}, + HealthMetric: true, + } + + var acc, acc2 testutil.Accumulator + err := s.Gather(&acc) + require.NoError(t, err) + assert.Equal(t, s.isInitialized, true) + assert.Equal(t, s2.isInitialized, false) + + err = s2.Gather(&acc2) + require.NoError(t, err) + assert.Equal(t, s.isInitialized, true) + assert.Equal(t, s2.isInitialized, true) + + // acc includes size metrics, and excludes memory metrics and the health metric + assert.False(t, acc.HasMeasurement(healthMetricName)) + assert.False(t, acc.HasMeasurement("Memory breakdown (%)")) + assert.True(t, acc.HasMeasurement("Log size (bytes)")) + + // acc2 includes memory metrics and the health metric, and excludes size metrics + assert.True(t, acc2.HasMeasurement(healthMetricName)) + assert.True(t, acc2.HasMeasurement("Memory breakdown (%)")) + assert.False(t, acc2.HasMeasurement("Log size (bytes)")) + + sqlInstance, database := getConnectionIdentifiers(testServer) + tags := map[string]string{healthMetricInstanceTag: sqlInstance, healthMetricDatabaseTag: database} + assert.True(t, acc2.HasPoint(healthMetricName, tags, healthMetricAttemptedQueries, 9)) + assert.True(t, acc2.HasPoint(healthMetricName, tags, healthMetricSuccessfulQueries, 9)) +} + +func TestSqlServer_HealthMetric(t *testing.T) { + fakeServer1 := "localhost\\fakeinstance1;Database=fakedb1" + fakeServer2 := "localhost\\fakeinstance2;Database=fakedb2" + + s1 := &SQLServer{ + Servers: []string{fakeServer1, fakeServer2}, + IncludeQuery: []string{"DatabaseSize", "MemoryClerk"}, + HealthMetric: true, + } + + s2 := &SQLServer{ + Servers: []string{fakeServer1}, + IncludeQuery: []string{"DatabaseSize"}, + } + + // acc1 should have the health metric because it is specified in the config + var acc1 testutil.Accumulator + s1.Gather(&acc1) + assert.True(t, acc1.HasMeasurement(healthMetricName)) + + // There will be 2 attempted queries (because we specified 2 queries in IncludeQuery) + // Both queries should fail because the specified SQL instances do not exist + sqlInstance1, database1 := getConnectionIdentifiers(fakeServer1) + tags1 := map[string]string{healthMetricInstanceTag: sqlInstance1, healthMetricDatabaseTag: database1} + assert.True(t, acc1.HasPoint(healthMetricName, tags1, healthMetricAttemptedQueries, 2)) + assert.True(t, acc1.HasPoint(healthMetricName, tags1, healthMetricSuccessfulQueries, 0)) + + sqlInstance2, database2 := getConnectionIdentifiers(fakeServer2) + tags2 := map[string]string{healthMetricInstanceTag: sqlInstance2, healthMetricDatabaseTag: database2} + assert.True(t, acc1.HasPoint(healthMetricName, tags2, healthMetricAttemptedQueries, 2)) + assert.True(t, acc1.HasPoint(healthMetricName, tags2, healthMetricSuccessfulQueries, 0)) + + // acc2 should not have the health metric because it is not specified in the config + var acc2 testutil.Accumulator + s2.Gather(&acc2) + assert.False(t, acc2.HasMeasurement(healthMetricName)) +} + func TestSqlServer_MultipleInit(t *testing.T) { s := &SQLServer{} @@ -169,6 +253,86 @@ func TestSqlServer_MultipleInit(t *testing.T) { assert.Equal(t, s2.isInitialized, true) } +func TestSqlServer_ConnectionString(t *testing.T) { + // URL format + connectionString := "sqlserver://username:password@hostname.database.windows.net?database=databasename&connection+timeout=30" + sqlInstance, database := getConnectionIdentifiers(connectionString) + assert.Equal(t, "hostname.database.windows.net", sqlInstance) + assert.Equal(t, "databasename", database) + + connectionString = " sqlserver://hostname2.somethingelse.net:1433?database=databasename2" + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, "hostname2.somethingelse.net", sqlInstance) + assert.Equal(t, "databasename2", database) + + connectionString = "sqlserver://hostname3:1433/SqlInstanceName3?database=databasename3" + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, "hostname3\\SqlInstanceName3", sqlInstance) + assert.Equal(t, "databasename3", database) + + connectionString = " sqlserver://hostname4/SqlInstanceName4?database=databasename4&connection%20timeout=30" + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, "hostname4\\SqlInstanceName4", sqlInstance) + assert.Equal(t, "databasename4", database) + + connectionString = " sqlserver://username:password@hostname5?connection%20timeout=30" + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, "hostname5", sqlInstance) + assert.Equal(t, emptyDatabaseName, database) + + // odbc format + connectionString = "odbc:server=hostname.database.windows.net;user id=sa;database=master;Trusted_Connection=Yes;Integrated Security=true;" + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, "hostname.database.windows.net", sqlInstance) + assert.Equal(t, "master", database) + + connectionString = " odbc:server=192.168.0.1;user id=somethingelse;Integrated Security=true;Database=mydb " + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, "192.168.0.1", sqlInstance) + assert.Equal(t, "mydb", database) + + connectionString = " odbc:Server=servername\\instancename;Database=dbname;" + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, "servername\\instancename", sqlInstance) + assert.Equal(t, "dbname", database) + + connectionString = "server=hostname2.database.windows.net;user id=sa;Trusted_Connection=Yes;Integrated Security=true;" + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, "hostname2.database.windows.net", sqlInstance) + assert.Equal(t, emptyDatabaseName, database) + + connectionString = "invalid connection string" + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, emptySqlInstance, sqlInstance) + assert.Equal(t, emptyDatabaseName, database) + + // Key/value format + connectionString = " server=hostname.database.windows.net;user id=sa;database=master;Trusted_Connection=Yes;Integrated Security=true" + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, "hostname.database.windows.net", sqlInstance) + assert.Equal(t, "master", database) + + connectionString = " server=192.168.0.1;user id=somethingelse;Integrated Security=true;Database=mydb;" + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, "192.168.0.1", sqlInstance) + assert.Equal(t, "mydb", database) + + connectionString = "Server=servername\\instancename;Database=dbname; " + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, "servername\\instancename", sqlInstance) + assert.Equal(t, "dbname", database) + + connectionString = "server=hostname2.database.windows.net;user id=sa;Trusted_Connection=Yes;Integrated Security=true " + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, "hostname2.database.windows.net", sqlInstance) + assert.Equal(t, emptyDatabaseName, database) + + connectionString = "invalid connection string" + sqlInstance, database = getConnectionIdentifiers(connectionString) + assert.Equal(t, emptySqlInstance, sqlInstance) + assert.Equal(t, emptyDatabaseName, database) +} + func TestSqlServer_AGQueriesApplicableForDatabaseTypeSQLServer(t *testing.T) { // This test case checks where Availability Group (AG / HADR) queries return an output when included for processing for DatabaseType = SQLServer // And they should not be processed when DatabaseType = AzureSQLDB From ba8452d61dcd8c8ce750c4db5f1fcf527d9a4864 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Thu, 11 Mar 2021 16:19:23 -0500 Subject: [PATCH 288/761] resolved conflicts --- docs/LICENSE_OF_DEPENDENCIES.md | 5 + go.mod | 3 + go.sum | 19 + plugins/inputs/aliyuncms/README.md | 142 ++++++ plugins/inputs/aliyuncms/aliyuncms.go | 566 +++++++++++++++++++++ plugins/inputs/aliyuncms/aliyuncms_test.go | 410 +++++++++++++++ plugins/inputs/aliyuncms/discovery.go | 511 +++++++++++++++++++ plugins/inputs/all/all.go | 1 + 8 files changed, 1657 insertions(+) create mode 100644 plugins/inputs/aliyuncms/README.md create mode 100644 plugins/inputs/aliyuncms/aliyuncms.go create mode 100644 plugins/inputs/aliyuncms/aliyuncms_test.go create mode 100644 plugins/inputs/aliyuncms/discovery.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 0aff4fb299f4c..6b811a5a9bcb5 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -20,6 +20,7 @@ following works: - github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE) - github.com/aerospike/aerospike-client-go [Apache License 2.0](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE) - github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING) +- github.com/aliyun/alibaba-cloud-sdk-go [Apache License 2.0](https://github.com/aliyun/alibaba-cloud-sdk-go/blob/master/LICENSE) - github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE) - github.com/antchfx/xmlquery [MIT License](https://github.com/antchfx/xmlquery/blob/master/LICENSE) - github.com/antchfx/xpath [MIT License](https://github.com/antchfx/xpath/blob/master/LICENSE) @@ -105,6 +106,7 @@ following works: - github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE) - github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) - github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE) +- github.com/json-iterator/go [MIT License](https://github.com/json-iterator/go/blob/master/LICENSE) - github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE) - github.com/karrick/godirwalk [BSD 2-Clause "Simplified" License](https://github.com/karrick/godirwalk/blob/master/LICENSE) - github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE) @@ -121,6 +123,8 @@ following works: - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) - github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE) - github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) +- github.com/modern-go/concurrent [Apache License 2.0](https://github.com/modern-go/concurrent/blob/master/LICENSE) +- github.com/modern-go/reflect2 [Apache License 2.0](https://github.com/modern-go/reflect2/blob/master/LICENSE) - github.com/multiplay/go-ts3 [BSD 2-Clause "Simplified" License](https://github.com/multiplay/go-ts3/blob/master/LICENSE) - github.com/naoina/go-stringutil [MIT License](https://github.com/naoina/go-stringutil/blob/master/LICENSE) - github.com/nats-io/jwt [Apache License 2.0](https://github.com/nats-io/jwt/blob/master/LICENSE) @@ -193,6 +197,7 @@ following works: - gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE) - gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE) - gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE) +- gopkg.in/ini.v1 [Apache License 2.0](https://github.com/go-ini/ini/blob/master/LICENSE) - gopkg.in/jcmturner/aescts.v1 [Apache License 2.0](https://github.com/jcmturner/aescts/blob/v1.0.1/LICENSE) - gopkg.in/jcmturner/dnsutils.v1 [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/v1.0.1/LICENSE) - gopkg.in/jcmturner/gokrb5.v7 [Apache License 2.0](https://github.com/jcmturner/gokrb5/tree/v7.5.0/LICENSE) diff --git a/go.mod b/go.mod index 705ed742ccc09..ff29eb2acf816 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,7 @@ require ( github.com/Shopify/sarama v1.27.2 github.com/aerospike/aerospike-client-go v1.27.0 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 + github.com/aliyun/alibaba-cloud-sdk-go v1.61.785 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 github.com/antchfx/xmlquery v1.3.3 github.com/antchfx/xpath v1.1.11 @@ -88,6 +89,7 @@ require ( github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect github.com/jackc/pgx v3.6.0+incompatible github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a + github.com/jmespath/go-jmespath v0.4.0 github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 @@ -128,6 +130,7 @@ require ( github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 + github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect github.com/tidwall/gjson v1.6.0 github.com/tinylib/msgp v1.1.5 github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect diff --git a/go.sum b/go.sum index 6a079a0556f98..7ec88378d2ad2 100644 --- a/go.sum +++ b/go.sum @@ -22,6 +22,7 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+ cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= @@ -98,6 +99,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.785 h1:3PVbcCSPY0f4timzlCQbDzL/7y/Z0d4YdEl23iAhSTE= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.785/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/antchfx/xmlquery v1.3.3 h1:HYmadPG0uz8CySdL68rB4DCLKXz2PurCjS3mnkVF4CQ= @@ -286,6 +289,7 @@ github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5 github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= @@ -334,6 +338,7 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -415,6 +420,7 @@ github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPI github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQDsAXYfUuF/Z0rtK5eT8x9D6Pi7S3PjXAg= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -427,6 +433,7 @@ github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+ github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= @@ -438,7 +445,9 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea h1:g2k+8WR7cHch4g0tBDhfiEvAp7fXxTNBiD1oC1Oxj3E= github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI= github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b h1:Rrp0ByJXEjhREMPGTt3aWYjoIsUGCbt21ekbeJcTWv0= github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= @@ -637,7 +646,11 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff h1:JcVn27VGCEwd33jyNj+3IqEbOmzAX9f9LILt3SoGPHU= github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= @@ -660,6 +673,8 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= +github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg= +github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= @@ -873,6 +888,7 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -913,6 +929,7 @@ golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:U gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.7.0 h1:Hdks0L0hgznZLG9nzXb8vZ0rRvqNvAcgAp84y7Mwkgw= gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= @@ -985,6 +1002,8 @@ gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= diff --git a/plugins/inputs/aliyuncms/README.md b/plugins/inputs/aliyuncms/README.md new file mode 100644 index 0000000000000..4304de593bbc2 --- /dev/null +++ b/plugins/inputs/aliyuncms/README.md @@ -0,0 +1,142 @@ +# Alibaba (aka Aliyun) CloudMonitor Service Statistics Input +Here and after we use `Aliyun` instead `Alibaba` as it is default naming across web console and docs. + +This plugin will pull Metric Statistics from Aliyun CMS. + +### Aliyun Authentication + +This plugin uses an [AccessKey](https://www.alibabacloud.com/help/doc-detail/53045.htm?spm=a2c63.p38356.b99.127.5cba21fdt5MJKr&parentId=28572) credential for Authentication with the Aliyun OpenAPI endpoint. +In the following order the plugin will attempt to authenticate. +1. Ram RoleARN credential if `access_key_id`, `access_key_secret`, `role_arn`, `role_session_name` is specified +2. AccessKey STS token credential if `access_key_id`, `access_key_secret`, `access_key_sts_token` is specified +3. AccessKey credential if `access_key_id`, `access_key_secret` is specified +4. Ecs Ram Role Credential if `role_name` is specified +5. RSA keypair credential if `private_key`, `public_key_id` is specified +6. Environment variables credential +7. Instance metadata credential + +### Configuration: + +```toml + ## Aliyun Credentials + ## Credentials are loaded in the following order + ## 1) Ram RoleArn credential + ## 2) AccessKey STS token credential + ## 3) AccessKey credential + ## 4) Ecs Ram Role credential + ## 5) RSA keypair credential + ## 6) Environment variables credential + ## 7) Instance metadata credential + + # access_key_id = "" + # access_key_secret = "" + # access_key_sts_token = "" + # role_arn = "" + # role_session_name = "" + # private_key = "" + # public_key_id = "" + # role_name = "" + + # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all + # metrics are made available to the 1 minute period. Some are collected at + # 3 minute, 5 minute, or larger intervals. + # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv + # Note that if a period is configured that is smaller than the minimum for a + # particular metric, that metric will not be returned by the Aliyun OpenAPI + # and will not be collected by Telegraf. + # + ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) + period = "5m" + + ## Collection Delay (required - must account for metrics availability via AliyunCMS API) + delay = "1m" + + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid + ## gaps or overlap in pulled data + interval = "5m" + + ## Metric Statistic Project (required) + project = "acs_slb_dashboard" + + ## Maximum requests per second, default value is 200 + ratelimit = 200 + + ## Discovery regions set the scope for object discovery, the discovered info can be used to enrich + ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then + ## it will be reported on the start - foo example for 'acs_cdn' project: + ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) + ## Currently, discovery supported for the following projects: + ## - acs_ecs_dashboard + ## - acs_rds_dashboard + ## - acs_slb_dashboard + ## - acs_vpc_eip + ## + ## If not set, all regions would be covered, it can provide a significant load on API, so the recommendation here + ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm + discovery_regions = ["cn-hongkong"] + + ## how often the discovery API call executed (default 1m) + #discovery_interval = "1m" + + ## Metrics to Pull (Required) + [[inputs.aliyuncms.metrics]] + ## Metrics names to be requested, + ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + names = ["InstanceActiveConnection", "InstanceNewConnection"] + + ## Dimension filters for Metric (these are optional). + ## This allows to get additional metric dimension. If dimension is not specified it can be returned or + ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + ## + ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) + ## Values specified here would be added into the list of discovered objects. + ## You can specify either single dimension: + #dimensions = '{"instanceId": "p-example"}' + + ## Or you can specify several dimensions at once: + #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' + + ## Enrichment tags, can be added from discovery (if supported) + ## Notation is : + ## To figure out which fields are available, consult the Describe API per project. + ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO + #tag_query_path = [ + # "address:Address", + # "name:LoadBalancerName", + # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" + # ] + ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. + + ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery + ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage + ## of discovery scope vs monitoring scope + #allow_dps_without_discovery = false +``` + +#### Requirements and Terminology + +Plugin Configuration utilizes [preset metric items references](https://www.alibabacloud.com/help/doc-detail/28619.htm?spm=a2c63.p38356.a3.2.389f233d0kPJn0) + +- `discovery_region` must be a valid Aliyun [Region](https://www.alibabacloud.com/help/doc-detail/40654.htm) value +- `period` must be a valid duration value +- `project` must be a preset project value +- `names` must be preset metric names +- `dimensions` must be preset dimension values + +### Measurements & Fields: + +Each Aliyun CMS Project monitored records a measurement with fields for each available Metric Statistic +Project and Metrics are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case) + +- aliyuncms_{project} + - {metric}_average (metric Average value) + - {metric}_minimum (metric Minimum value) + - {metric}_maximum (metric Maximum value) + - {metric}_value (metric Value value) + +### Example Output: + +``` +$ ./telegraf --config telegraf.conf --input-filter aliyuncms --test +> aliyuncms_acs_slb_dashboard,instanceId=p-example,regionId=cn-hangzhou,userId=1234567890 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875 +``` \ No newline at end of file diff --git a/plugins/inputs/aliyuncms/aliyuncms.go b/plugins/inputs/aliyuncms/aliyuncms.go new file mode 100644 index 0000000000000..794f398f7dcb5 --- /dev/null +++ b/plugins/inputs/aliyuncms/aliyuncms.go @@ -0,0 +1,566 @@ +package aliyuncms + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" + "github.com/aliyun/alibaba-cloud-sdk-go/services/cms" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/limiter" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/jmespath/go-jmespath" + "github.com/pkg/errors" +) + +const ( + description = "Pull Metric Statistics from Aliyun CMS" + sampleConfig = ` + ## Aliyun Credentials + ## Credentials are loaded in the following order + ## 1) Ram RoleArn credential + ## 2) AccessKey STS token credential + ## 3) AccessKey credential + ## 4) Ecs Ram Role credential + ## 5) RSA keypair credential + ## 6) Environment variables credential + ## 7) Instance metadata credential + + # access_key_id = "" + # access_key_secret = "" + # access_key_sts_token = "" + # role_arn = "" + # role_session_name = "" + # private_key = "" + # public_key_id = "" + # role_name = "" + + # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all + # metrics are made available to the 1 minute period. Some are collected at + # 3 minute, 5 minute, or larger intervals. + # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv + # Note that if a period is configured that is smaller than the minimum for a + # particular metric, that metric will not be returned by the Aliyun OpenAPI + # and will not be collected by Telegraf. + # + ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) + period = "5m" + + ## Collection Delay (required - must account for metrics availability via AliyunCMS API) + delay = "1m" + + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid + ## gaps or overlap in pulled data + interval = "5m" + + ## Metric Statistic Project (required) + project = "acs_slb_dashboard" + + ## Maximum requests per second, default value is 200 + ratelimit = 200 + + ## Discovery regions set the scope for object discovery, the discovered info can be used to enrich + ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then + ## it will be reported on the start - foo example for 'acs_cdn' project: + ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) + ## Currently, discovery supported for the following projects: + ## - acs_ecs_dashboard + ## - acs_rds_dashboard + ## - acs_slb_dashboard + ## - acs_vpc_eip + ## + ## If not set, all regions would be covered, it can provide a significant load on API, so the recommendation here + ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm + discovery_regions = ["cn-hongkong"] + + ## how often the discovery API call executed (default 1m) + #discovery_interval = "1m" + + ## Metrics to Pull (Required) + [[inputs.aliyuncms.metrics]] + ## Metrics names to be requested, + ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + names = ["InstanceActiveConnection", "InstanceNewConnection"] + + ## Dimension filters for Metric (these are optional). + ## This allows to get additional metric dimension. If dimension is not specified it can be returned or + ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + ## + ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) + ## Values specified here would be added into the list of discovered objects. + ## You can specify either single dimension: + #dimensions = '{"instanceId": "p-example"}' + + ## Or you can specify several dimensions at once: + #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' + + ## Enrichment tags, can be added from discovery (if supported) + ## Notation is : + ## To figure out which fields are available, consult the Describe API per project. + ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO + #tag_query_path = [ + # "address:Address", + # "name:LoadBalancerName", + # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" + # ] + ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. + + ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery + ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage + ## of discovery scope vs monitoring scope + #allow_dps_without_discovery = false +` +) + +type ( + // AliyunCMS is aliyun cms config info. + AliyunCMS struct { + AccessKeyID string `toml:"access_key_id"` + AccessKeySecret string `toml:"access_key_secret"` + AccessKeyStsToken string `toml:"access_key_sts_token"` + RoleArn string `toml:"role_arn"` + RoleSessionName string `toml:"role_session_name"` + PrivateKey string `toml:"private_key"` + PublicKeyID string `toml:"public_key_id"` + RoleName string `toml:"role_name"` + + DiscoveryRegions []string `toml:"discovery_regions"` + DiscoveryInterval internal.Duration `toml:"discovery_interval"` + Period internal.Duration `toml:"period"` + Delay internal.Duration `toml:"delay"` + Project string `toml:"project"` + Metrics []*Metric `toml:"metrics"` + RateLimit int `toml:"ratelimit"` + + Log telegraf.Logger `toml:"-"` + + client aliyuncmsClient + windowStart time.Time + windowEnd time.Time + dt *discoveryTool + dimensionKey string + discoveryData map[string]interface{} + measurement string + } + + // Metric describes what metrics to get + Metric struct { + ObjectsFilter string `toml:"objects_filter"` + MetricNames []string `toml:"names"` + Dimensions string `toml:"dimensions"` //String representation of JSON dimensions + TagsQueryPath []string `toml:"tag_query_path"` + AllowDataPointWODiscoveryData bool `toml:"allow_dps_without_discovery"` //Allow data points without discovery data (if no discovery data found) + + dtLock sync.Mutex //Guard for discoveryTags & dimensions + discoveryTags map[string]map[string]string //Internal data structure that can enrich metrics with tags + dimensionsUdObj map[string]string + dimensionsUdArr []map[string]string //Parsed Dimesnsions JSON string (unmarshalled) + requestDimensions []map[string]string //this is the actual dimensions list that would be used in API request + requestDimensionsStr string //String representation of the above + + } + + // Dimension describe how to get metrics + Dimension struct { + Value string `toml:"value"` + } + + aliyuncmsClient interface { + DescribeMetricList(request *cms.DescribeMetricListRequest) (response *cms.DescribeMetricListResponse, err error) + } +) + +// SampleConfig implements telegraf.Inputs interface +func (s *AliyunCMS) SampleConfig() string { + return sampleConfig +} + +// Description implements telegraf.Inputs interface +func (s *AliyunCMS) Description() string { + return description +} + +func (s *AliyunCMS) Init() error { + + if s.Project == "" { + return errors.New("project is not set") + } + + var ( + roleSessionExpiration = 600 + sessionExpiration = 600 + ) + configuration := &providers.Configuration{ + AccessKeyID: s.AccessKeyID, + AccessKeySecret: s.AccessKeySecret, + AccessKeyStsToken: s.AccessKeyStsToken, + RoleArn: s.RoleArn, + RoleSessionName: s.RoleSessionName, + RoleSessionExpiration: &roleSessionExpiration, + PrivateKey: s.PrivateKey, + PublicKeyID: s.PublicKeyID, + SessionExpiration: &sessionExpiration, + RoleName: s.RoleName, + } + credentialProviders := []providers.Provider{ + providers.NewConfigurationCredentialProvider(configuration), + providers.NewEnvCredentialProvider(), + providers.NewInstanceMetadataProvider(), + } + credential, err := providers.NewChainProvider(credentialProviders).Retrieve() + if err != nil { + return errors.Errorf("failed to retrieve credential: %v", err) + } + s.client, err = cms.NewClientWithOptions("", sdk.NewConfig(), credential) + if err != nil { + return errors.Errorf("failed to create cms client: %v", err) + } + + //check metrics dimensions consistency + for _, metric := range s.Metrics { + if metric.Dimensions != "" { + metric.dimensionsUdObj = map[string]string{} + metric.dimensionsUdArr = []map[string]string{} + err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdObj) + if err != nil { + err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdArr) + return errors.Errorf("Can't parse dimensions (it is neither obj, nor array) %q :%v", metric.Dimensions, err) + } + } + } + + s.measurement = formatMeasurement(s.Project) + + //Init discovery... + if s.dt == nil { //Support for tests + s.dt, err = NewDiscoveryTool(s.DiscoveryRegions, s.Project, s.Log, credential, int(float32(s.RateLimit)*0.2), s.DiscoveryInterval.Duration) + if err != nil { + s.Log.Errorf("Discovery tool is not activated: %v", err) + s.dt = nil + return nil + } + } + + s.discoveryData, err = s.dt.getDiscoveryDataAllRegions(nil) + if err != nil { + s.Log.Errorf("Discovery tool is not activated: %v", err) + s.dt = nil + return nil + } + + s.Log.Infof("%d object(s) discovered...", len(s.discoveryData)) + + //Special setting for acs_oss project since the API differs + if s.Project == "acs_oss" { + s.dimensionKey = "BucketName" + } + + return nil +} + +func (s *AliyunCMS) Start(telegraf.Accumulator) error { + //Start periodic discovery process + if s.dt != nil { + s.dt.Start() + } + + return nil +} + +// Gather implements telegraf.Inputs interface +func (s *AliyunCMS) Gather(acc telegraf.Accumulator) error { + + s.updateWindow(time.Now()) + + // limit concurrency or we can easily exhaust user connection limit + lmtr := limiter.NewRateLimiter(s.RateLimit, time.Second) + defer lmtr.Stop() + + var wg sync.WaitGroup + for _, metric := range s.Metrics { + //Prepare internal structure with data from discovery + s.prepareTagsAndDimensions(metric) + wg.Add(len(metric.MetricNames)) + for _, metricName := range metric.MetricNames { + + <-lmtr.C + go func(metricName string, metric *Metric) { + defer wg.Done() + acc.AddError(s.gatherMetric(acc, metricName, metric)) + }(metricName, metric) + } + wg.Wait() + } + + return nil +} + +func (s *AliyunCMS) Stop() { + if s.dt != nil { + s.dt.Stop() + } +} + +func (s *AliyunCMS) updateWindow(relativeTo time.Time) { + + //https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.6.701.54025679zh6wiR + //The start and end times are executed in the mode of + //opening left and closing right, and startTime cannot be equal + //to or greater than endTime. + + windowEnd := relativeTo.Add(-s.Delay.Duration) + + if s.windowEnd.IsZero() { + // this is the first run, no window info, so just get a single period + s.windowStart = windowEnd.Add(-s.Period.Duration) + } else { + // subsequent window, start where last window left off + s.windowStart = s.windowEnd + } + + s.windowEnd = windowEnd +} + +// Gather given metric and emit error +func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, metric *Metric) error { + + req := cms.CreateDescribeMetricListRequest() + req.Period = strconv.FormatInt(int64(s.Period.Duration.Seconds()), 10) + req.MetricName = metricName + req.Length = "10000" + req.Namespace = s.Project + req.EndTime = strconv.FormatInt(s.windowEnd.Unix()*1000, 10) + req.StartTime = strconv.FormatInt(s.windowStart.Unix()*1000, 10) + req.Dimensions = metric.requestDimensionsStr + + for more := true; more; { + resp, err := s.client.DescribeMetricList(req) + if err != nil { + return errors.Errorf("failed to query metricName list: %v", err) + } else if resp.Code != "200" { + s.Log.Errorf("failed to query metricName list: %v", resp.Message) + break + } + + var datapoints []map[string]interface{} + if err = json.Unmarshal([]byte(resp.Datapoints), &datapoints); err != nil { + return errors.Errorf("failed to decode response datapoints: %v", err) + } + + if len(datapoints) == 0 { + s.Log.Debugf("No metrics returned from CMS, response msg: %s", resp.Message) + break + } + + NextDataPoint: + for _, datapoint := range datapoints { + fields := map[string]interface{}{} + datapointTime := int64(0) + tags := map[string]string{} + for key, value := range datapoint { + switch key { + case "instanceId", "BucketName": + tags[key] = value.(string) + if metric.discoveryTags != nil { //discovery can be not activated + + //Skipping data point if discovery data not exist + if _, ok := metric.discoveryTags[value.(string)]; !ok && + !metric.AllowDataPointWODiscoveryData { + s.Log.Warnf("Instance %q is not found in discovery, skipping monitoring datapoint...", value.(string)) + continue NextDataPoint + } + + for k, v := range metric.discoveryTags[value.(string)] { + tags[k] = v + } + } + case "userId": + tags[key] = value.(string) + case "timestamp": + datapointTime = int64(value.(float64)) / 1000 + default: + fields[formatField(metricName, key)] = value + } + } + //Log.logW("Datapoint time: %s, now: %s", time.Unix(datapointTime, 0).Format(time.RFC3339), time.Now().Format(time.RFC3339)) + acc.AddFields(s.measurement, fields, tags, time.Unix(datapointTime, 0)) + } + + req.NextToken = resp.NextToken + more = req.NextToken != "" + } + + return nil +} + +//Tag helper +func parseTag(tagSpec string, data interface{}) (string, string, error) { + + tagKey := tagSpec + queryPath := tagSpec + + //Split query path to tagKey and query path + if splitted := strings.Split(tagSpec, ":"); len(splitted) == 2 { + tagKey = splitted[0] + queryPath = splitted[1] + } + + tagRawValue, err := jmespath.Search(queryPath, data) + if err != nil { + return "", "", errors.Errorf("Can't query data from discovery data using query path %q: %v", + queryPath, err) + } + + if tagRawValue == nil { //Nothing found + return "", "", nil + } + + tagValue, ok := tagRawValue.(string) + if !ok { + return "", "", errors.Errorf("Tag value %v parsed by query %q is not a string value", + tagRawValue, queryPath) + } + + return tagKey, tagValue, nil +} + +func (s *AliyunCMS) prepareTagsAndDimensions(metric *Metric) { + var ( + newData bool + defaulTags = []string{"RegionId:RegionId"} + ) + + if s.dt == nil { //Discovery is not activated + return + } + + //Reading all data from buffered channel +L: + for { + select { + case s.discoveryData = <-s.dt.dataChan: + newData = true + continue + default: + break L + } + } + + if newData || //new data arrives, process it + len(metric.discoveryTags) == 0 { //or this is the first call + + metric.dtLock.Lock() + defer metric.dtLock.Unlock() + + if metric.discoveryTags == nil { + metric.discoveryTags = make(map[string]map[string]string, len(s.discoveryData)) + } + + metric.requestDimensions = nil //erasing + metric.requestDimensions = make([]map[string]string, 0, len(s.discoveryData)) + + //Preparing tags & dims... + for instanceId, elem := range s.discoveryData { + + //Start filing tags + //Remove old value if exist + delete(metric.discoveryTags, instanceId) + metric.discoveryTags[instanceId] = make(map[string]string, len(metric.TagsQueryPath)+len(defaulTags)) + + for _, tagQueryPath := range metric.TagsQueryPath { + + tagKey, tagValue, err := parseTag(tagQueryPath, elem) + if err != nil { + s.Log.Errorf("%v", err) + continue + } + if err == nil && tagValue == "" { //Nothing found + s.Log.Debugf("Data by query path %q: is not found, for instance %q", tagQueryPath, instanceId) + continue + } + + metric.discoveryTags[instanceId][tagKey] = tagValue + } + + //Adding default tags if not already there + for _, defaultTagQP := range defaulTags { + tagKey, tagValue, err := parseTag(defaultTagQP, elem) + + if err != nil { + s.Log.Errorf("%v", err) + continue + } + + if err == nil && tagValue == "" { //Nothing found + s.Log.Debugf("Data by query path %q: is not found, for instance %q", + defaultTagQP, instanceId) + continue + } + + metric.discoveryTags[instanceId][tagKey] = tagValue + } + + //Preparing dimensions (first adding dimensions that comes from discovery data) + metric.requestDimensions = append( + metric.requestDimensions, + map[string]string{s.dimensionKey: instanceId}) + + } + + //Get final dimension (need to get full lis of + //what was provided in config + what comes from discovery + if len(metric.dimensionsUdArr) != 0 { + metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdArr...) + } + if len(metric.dimensionsUdObj) != 0 { + metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdObj) + } + + //Unmarshalling to string + reqDim, err := json.Marshal(metric.requestDimensions) + if err != nil { + s.Log.Errorf("Can't marshal metric request dimensions %v :%v", + metric.requestDimensions, err) + metric.requestDimensionsStr = "" + } else { + metric.requestDimensionsStr = string(reqDim) + } + + } +} + +// Formatting helpers +func formatField(metricName string, statistic string) string { + if metricName == statistic { + statistic = "value" + } + return fmt.Sprintf("%s_%s", snakeCase(metricName), snakeCase(statistic)) +} + +func formatMeasurement(project string) string { + project = strings.Replace(project, "/", "_", -1) + project = snakeCase(project) + return fmt.Sprintf("aliyuncms_%s", project) +} + +func snakeCase(s string) string { + s = internal.SnakeCase(s) + s = strings.Replace(s, "__", "_", -1) + return s +} + +func init() { + inputs.Add("aliyuncms", func() telegraf.Input { + return &AliyunCMS{ + RateLimit: 200, + DiscoveryInterval: internal.Duration{Duration: time.Minute}, + dimensionKey: "instanceId", + } + }) +} diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go new file mode 100644 index 0000000000000..37430bbddf79d --- /dev/null +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -0,0 +1,410 @@ +package aliyuncms + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + "github.com/aliyun/alibaba-cloud-sdk-go/services/cms" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +const inputTitle = "inputs.aliyuncms" + +type mockGatherAliyunCMSClient struct{} + +func (m *mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) { + + resp := new(cms.DescribeMetricListResponse) + + //switch request.Metric { + switch request.MetricName { + case "InstanceActiveConnection": + resp.Code = "200" + resp.Period = "60" + resp.Datapoints = ` + [{ + "timestamp": 1490152860000, + "Maximum": 200, + "userId": "1234567898765432", + "Minimum": 100, + "instanceId": "i-abcdefgh123456", + "Average": 150, + "Value": 300 + }]` + case "ErrorCode": + resp.Code = "404" + resp.Message = "ErrorCode" + case "ErrorDatapoint": + resp.Code = "200" + resp.Period = "60" + resp.Datapoints = ` + [{ + "timestamp": 1490152860000, + "Maximum": 200, + "userId": "1234567898765432", + "Minimum": 100, + "instanceId": "i-abcdefgh123456", + "Average": 150, + }]` + case "EmptyDatapoint": + resp.Code = "200" + resp.Period = "60" + resp.Datapoints = `[]` + case "ErrorResp": + return nil, errors.New("error response") + } + return resp, nil +} + +type mockAliyunSDKCli struct { + resp *responses.CommonResponse +} + +func (m *mockAliyunSDKCli) ProcessCommonRequest(req *requests.CommonRequest) (response *responses.CommonResponse, err error) { + return m.resp, nil +} + +func getDiscoveryTool(project string, discoverRegions []string) (*discoveryTool, error) { + var ( + err error + credential auth.Credential + ) + + configuration := &providers.Configuration{ + AccessKeyID: "dummyKey", + AccessKeySecret: "dummySecret", + } + credentialProviders := []providers.Provider{ + providers.NewConfigurationCredentialProvider(configuration), + providers.NewEnvCredentialProvider(), + providers.NewInstanceMetadataProvider(), + } + credential, err = providers.NewChainProvider(credentialProviders).Retrieve() + if err != nil { + return nil, errors.Errorf("failed to retrieve credential: %v", err) + } + + dt, err := NewDiscoveryTool(discoverRegions, project, testutil.Logger{Name: inputTitle}, credential, 1, time.Minute*2) + + if err != nil { + return nil, errors.Errorf("Can't create discovery tool object: %v", err) + } + return dt, nil +} + +func getMockSdkCli(httpResp *http.Response) (mockAliyunSDKCli, error) { + resp := responses.NewCommonResponse() + if err := responses.Unmarshal(resp, httpResp, "JSON"); err != nil { + return mockAliyunSDKCli{}, errors.Errorf("Can't parse response: %v", err) + } + return mockAliyunSDKCli{resp: resp}, nil +} + +func TestPluginDefaults(t *testing.T) { + require.Equal(t, &AliyunCMS{RateLimit: 200, + DiscoveryInterval: internal.Duration{Duration: time.Minute}, + dimensionKey: "instanceId", + }, inputs.Inputs["aliyuncms"]()) +} + +func TestPluginInitialize(t *testing.T) { + var err error + + plugin := new(AliyunCMS) + plugin.DiscoveryRegions = []string{"cn-shanghai"} + plugin.dt, err = getDiscoveryTool("acs_slb_dashboard", plugin.DiscoveryRegions) + if err != nil { + t.Fatalf("Can't create discovery tool object: %v", err) + } + + plugin.Log = testutil.Logger{Name: inputTitle} + + httpResp := &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewBufferString( + `{ + "LoadBalancers": + { + "LoadBalancer": [ + {"LoadBalancerId":"bla"} + ] + }, + "TotalCount": 1, + "PageSize": 1, + "PageNumber": 1 + }`)), + } + mockCli, err := getMockSdkCli(httpResp) + if err != nil { + t.Fatalf("Can't create mock sdk cli: %v", err) + } + plugin.dt.cli = map[string]aliyunSdkClient{plugin.DiscoveryRegions[0]: &mockCli} + + tests := []struct { + name string + project string + accessKeyID string + accessKeySecret string + expectedErrorString string + }{ + { + name: "Empty project", + expectedErrorString: "project is not set", + }, + { + name: "Valid project", + project: "acs_slb_dashboard", + accessKeyID: "dummy", + accessKeySecret: "dummy", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin.Project = tt.project + plugin.AccessKeyID = tt.accessKeyID + plugin.AccessKeySecret = tt.accessKeySecret + + if tt.expectedErrorString != "" { + require.EqualError(t, plugin.Init(), tt.expectedErrorString) + } else { + require.Equal(t, nil, plugin.Init()) + } + }) + } +} + +func TestUpdateWindow(t *testing.T) { + duration, _ := time.ParseDuration("1m") + internalDuration := internal.Duration{ + Duration: duration, + } + + plugin := &AliyunCMS{ + Project: "acs_slb_dashboard", + Period: internalDuration, + Delay: internalDuration, + Log: testutil.Logger{Name: inputTitle}, + } + + now := time.Now() + + require.True(t, plugin.windowEnd.IsZero()) + require.True(t, plugin.windowStart.IsZero()) + + plugin.updateWindow(now) + + newStartTime := plugin.windowEnd + + // initial window just has a single period + require.EqualValues(t, plugin.windowEnd, now.Add(-plugin.Delay.Duration)) + require.EqualValues(t, plugin.windowStart, now.Add(-plugin.Delay.Duration).Add(-plugin.Period.Duration)) + + now = time.Now() + plugin.updateWindow(now) + + // subsequent window uses previous end time as start time + require.EqualValues(t, plugin.windowEnd, now.Add(-plugin.Delay.Duration)) + require.EqualValues(t, plugin.windowStart, newStartTime) +} + +func TestGatherMetric(t *testing.T) { + + plugin := &AliyunCMS{ + Project: "acs_slb_dashboard", + client: new(mockGatherAliyunCMSClient), + measurement: formatMeasurement("acs_slb_dashboard"), + Log: testutil.Logger{Name: inputTitle}, + } + + metric := &Metric{ + MetricNames: []string{}, + Dimensions: `"instanceId": "i-abcdefgh123456"`, + } + + tests := []struct { + name string + metricName string + expectedErrorString string + }{ + { + name: "Datapoint with corrupted JSON", + metricName: "ErrorDatapoint", + expectedErrorString: `failed to decode response datapoints: invalid character '}' looking for beginning of object key string`, + }, + { + name: "General CMS response error", + metricName: "ErrorResp", + expectedErrorString: "failed to query metricName list: error response", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc telegraf.Accumulator + require.EqualError(t, plugin.gatherMetric(acc, tt.metricName, metric), tt.expectedErrorString) + + }) + } +} + +func TestGather(t *testing.T) { + + metric := &Metric{ + MetricNames: []string{}, + Dimensions: `{"instanceId": "i-abcdefgh123456"}`, + } + plugin := &AliyunCMS{ + AccessKeyID: "my_access_key_id", + AccessKeySecret: "my_access_key_secret", + Project: "acs_slb_dashboard", + Metrics: []*Metric{metric}, + RateLimit: 200, + measurement: formatMeasurement("acs_slb_dashboard"), + DiscoveryRegions: []string{"cn-shanghai"}, + client: new(mockGatherAliyunCMSClient), + Log: testutil.Logger{Name: inputTitle}, + } + + //test table: + tests := []struct { + name string + hasMeasurment bool + metricNames []string + expected []telegraf.Metric + }{ + { + name: "Empty data point", + metricNames: []string{"EmptyDatapoint"}, + expected: []telegraf.Metric{ + testutil.MustMetric( + "aliyuncms_acs_slb_dashboard", + nil, + nil, + time.Time{}), + }, + }, + { + name: "Data point with fields & tags", + hasMeasurment: true, + metricNames: []string{"InstanceActiveConnection"}, + expected: []telegraf.Metric{ + testutil.MustMetric( + "aliyuncms_acs_slb_dashboard", + map[string]string{ + "instanceId": "i-abcdefgh123456", + "userId": "1234567898765432", + }, + map[string]interface{}{ + "instance_active_connection_minimum": float64(100), + "instance_active_connection_maximum": float64(200), + "instance_active_connection_average": float64(150), + "instance_active_connection_value": float64(300), + }, + time.Unix(1490152860000, 0)), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + plugin.Metrics[0].MetricNames = tt.metricNames + require.Empty(t, acc.GatherError(plugin.Gather)) + require.Equal(t, acc.HasMeasurement("aliyuncms_acs_slb_dashboard"), tt.hasMeasurment) + if tt.hasMeasurment { + acc.AssertContainsTaggedFields(t, "aliyuncms_acs_slb_dashboard", tt.expected[0].Fields(), tt.expected[0].Tags()) + } + }) + } +} + +func TestGetDiscoveryDataAllRegions(t *testing.T) { + + //test table: + tests := []struct { + name string + project string + region string + httpResp *http.Response + discData map[string]interface{} + totalCount int + pageSize int + pageNumber int + expectedErrorString string + }{ + { + name: "No root key in discovery response", + project: "acs_slb_dashboard", + region: "cn-hongkong", + httpResp: &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewBufferString(`{}`)), + }, + totalCount: 0, + pageSize: 0, + pageNumber: 0, + expectedErrorString: `Didn't find root key "LoadBalancers" in discovery response`, + }, + { + name: "1 object discovered", + project: "acs_slb_dashboard", + region: "cn-hongkong", + httpResp: &http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(bytes.NewBufferString( + `{ + "LoadBalancers": + { + "LoadBalancer": [ + {"LoadBalancerId":"bla"} + ] + }, + "TotalCount": 1, + "PageSize": 1, + "PageNumber": 1 + }`)), + }, + discData: map[string]interface{}{"bla": map[string]interface{}{"LoadBalancerId": "bla"}}, + totalCount: 1, + pageSize: 1, + pageNumber: 1, + expectedErrorString: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dt, err := getDiscoveryTool(tt.project, []string{tt.region}) + if err != nil { + t.Fatalf("Can't create discovery tool object: %v", err) + } + + mockCli, err := getMockSdkCli(tt.httpResp) + if err != nil { + t.Fatalf("Can't create mock sdk cli: %v", err) + } + dt.cli = map[string]aliyunSdkClient{tt.region: &mockCli} + data, err := dt.getDiscoveryDataAllRegions(nil) + + require.Equal(t, tt.discData, data) + if err != nil { + require.EqualError(t, err, tt.expectedErrorString) + } + + }) + } + +} diff --git a/plugins/inputs/aliyuncms/discovery.go b/plugins/inputs/aliyuncms/discovery.go new file mode 100644 index 0000000000000..39e0044b6e77e --- /dev/null +++ b/plugins/inputs/aliyuncms/discovery.go @@ -0,0 +1,511 @@ +package aliyuncms + +import ( + "encoding/json" + "github.com/influxdata/telegraf" + "reflect" + "regexp" + "strconv" + "sync" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + "github.com/aliyun/alibaba-cloud-sdk-go/services/ecs" + "github.com/aliyun/alibaba-cloud-sdk-go/services/rds" + "github.com/aliyun/alibaba-cloud-sdk-go/services/slb" + "github.com/aliyun/alibaba-cloud-sdk-go/services/vpc" + "github.com/influxdata/telegraf/internal/limiter" + "github.com/pkg/errors" +) + +// https://www.alibabacloud.com/help/doc-detail/40654.htm?gclid=Cj0KCQjw4dr0BRCxARIsAKUNjWTAMfyVUn_Y3OevFBV3CMaazrhq0URHsgE7c0m0SeMQRKlhlsJGgIEaAviyEALw_wcB +var aliyunRegionList = []string{ + "cn-qingdao", + "cn-beijing", + "cn-zhangjiakou", + "cn-huhehaote", + "cn-hangzhou", + "cn-shanghai", + "cn-shenzhen", + "cn-heyuan", + "cn-chengdu", + "cn-hongkong", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-5", + "ap-south-1", + "ap-northeast-1", + "us-west-1", + "us-east-1", + "eu-central-1", + "eu-west-1", + "me-east-1", +} + +type discoveryRequest interface { +} + +type aliyunSdkClient interface { + ProcessCommonRequest(req *requests.CommonRequest) (response *responses.CommonResponse, err error) +} + +type discoveryTool struct { + req map[string]discoveryRequest //Discovery request (specific per object type) + rateLimit int //Rate limit for API query, as it is limited by API backend + reqDefaultPageSize int //Default page size while querying data from API (how many objects per request) + cli map[string]aliyunSdkClient //API client, which perform discovery request + + respRootKey string //Root key in JSON response where to look for discovery data + respObjectIdKey string //Key in element of array under root key, that stores object ID + //for ,majority of cases it would be InstanceId, for OSS it is BucketName. This key is also used in dimension filtering// ) + wg sync.WaitGroup //WG for primary discovery goroutine + interval time.Duration //Discovery interval + done chan bool //Done channel to stop primary discovery goroutine + dataChan chan map[string]interface{} //Discovery data + lg telegraf.Logger //Telegraf logger (should be provided) +} + +//getRpcReqFromDiscoveryRequest - utility function to map between aliyun request primitives +//discoveryRequest represents different type of discovery requests +func getRpcReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, error) { + + if reflect.ValueOf(req).Type().Kind() != reflect.Ptr || + reflect.ValueOf(req).IsNil() { + return nil, errors.Errorf("Not expected type of the discovery request object: %q, %q", reflect.ValueOf(req).Type(), reflect.ValueOf(req).Kind()) + } + + ptrV := reflect.Indirect(reflect.ValueOf(req)) + + for i := 0; i < ptrV.NumField(); i++ { + + if ptrV.Field(i).Type().String() == "*requests.RpcRequest" { + if !ptrV.Field(i).CanInterface() { + return nil, errors.Errorf("Can't get interface of %v", ptrV.Field(i)) + } + + rpcReq, ok := ptrV.Field(i).Interface().(*requests.RpcRequest) + + if !ok { + return nil, errors.Errorf("Cant convert interface of %v to '*requests.RpcRequest' type", ptrV.Field(i).Interface()) + } + + return rpcReq, nil + } + } + return nil, errors.Errorf("Didn't find *requests.RpcRequest embedded struct in %q", ptrV.Type()) +} + +//NewDiscoveryTool function returns discovery tool object. +//The object is used to periodically get data about aliyun objects and send this +//data into channel. The intention is to enrich reported metrics with discovery data. +//Discovery is supported for a limited set of object types (defined by project) and can be extended in future. +//Discovery can be limited by region if not set, then all regions is queried. +//Request against API can inquire additional costs, consult with aliyun API documentation. +func NewDiscoveryTool(regions []string, project string, lg telegraf.Logger, credential auth.Credential, rateLimit int, discoveryInterval time.Duration) (*discoveryTool, error) { + var ( + dscReq = map[string]discoveryRequest{} + cli = map[string]aliyunSdkClient{} + parseRootKey = regexp.MustCompile(`Describe(.*)`) + responseRootKey string + responseObjectIdKey string + err error + noDiscoverySupportErr = errors.Errorf("no discovery support for project %q", project) + ) + + if len(regions) == 0 { + regions = aliyunRegionList + lg.Warnf("Discovery regions are not provided! Data will be queried across %d regions!", len(aliyunRegionList)) + } + + if rateLimit == 0 { //Can be a rounding case + rateLimit = 1 + } + + for _, region := range regions { + switch project { + case "acs_ecs_dashboard": + dscReq[region] = ecs.CreateDescribeInstancesRequest() + responseObjectIdKey = "InstanceId" + case "acs_rds_dashboard": + dscReq[region] = rds.CreateDescribeDBInstancesRequest() + responseObjectIdKey = "DBInstanceId" + case "acs_slb_dashboard": + dscReq[region] = slb.CreateDescribeLoadBalancersRequest() + responseObjectIdKey = "LoadBalancerId" + case "acs_memcache": + return nil, noDiscoverySupportErr + case "acs_ocs": + return nil, noDiscoverySupportErr + case "acs_oss": + //oss is really complicated + //it is on it's own format + return nil, noDiscoverySupportErr + + //As a possible solution we can + //mimic to request format supported by oss + + //req := DescribeLOSSRequest{ + // RpcRequest: &requests.RpcRequest{}, + //} + //req.InitWithApiInfo("oss", "2014-08-15", "DescribeDBInstances", "oss", "openAPI") + case "acs_vpc_eip": + dscReq[region] = vpc.CreateDescribeEipAddressesRequest() + responseObjectIdKey = "AllocationId" + case "acs_kvstore": + return nil, noDiscoverySupportErr + case "acs_mns_new": + return nil, noDiscoverySupportErr + case "acs_cdn": + //API replies are in its own format. + return nil, noDiscoverySupportErr + case "acs_polardb": + return nil, noDiscoverySupportErr + case "acs_gdb": + return nil, noDiscoverySupportErr + case "acs_ads": + return nil, noDiscoverySupportErr + case "acs_mongodb": + return nil, noDiscoverySupportErr + case "acs_express_connect": + return nil, noDiscoverySupportErr + case "acs_fc": + return nil, noDiscoverySupportErr + case "acs_nat_gateway": + return nil, noDiscoverySupportErr + case "acs_sls_dashboard": + return nil, noDiscoverySupportErr + case "acs_containerservice_dashboard": + return nil, noDiscoverySupportErr + case "acs_vpn": + return nil, noDiscoverySupportErr + case "acs_bandwidth_package": + return nil, noDiscoverySupportErr + case "acs_cen": + return nil, noDiscoverySupportErr + case "acs_ens": + return nil, noDiscoverySupportErr + case "acs_opensearch": + return nil, noDiscoverySupportErr + case "acs_scdn": + return nil, noDiscoverySupportErr + case "acs_drds": + return nil, noDiscoverySupportErr + case "acs_iot": + return nil, noDiscoverySupportErr + case "acs_directmail": + return nil, noDiscoverySupportErr + case "acs_elasticsearch": + return nil, noDiscoverySupportErr + case "acs_ess_dashboard": + return nil, noDiscoverySupportErr + case "acs_streamcompute": + return nil, noDiscoverySupportErr + case "acs_global_acceleration": + return nil, noDiscoverySupportErr + case "acs_hitsdb": + return nil, noDiscoverySupportErr + case "acs_kafka": + return nil, noDiscoverySupportErr + case "acs_openad": + return nil, noDiscoverySupportErr + case "acs_pcdn": + return nil, noDiscoverySupportErr + case "acs_dcdn": + return nil, noDiscoverySupportErr + case "acs_petadata": + return nil, noDiscoverySupportErr + case "acs_videolive": + return nil, noDiscoverySupportErr + case "acs_hybriddb": + return nil, noDiscoverySupportErr + case "acs_adb": + return nil, noDiscoverySupportErr + case "acs_mps": + return nil, noDiscoverySupportErr + case "acs_maxcompute_prepay": + return nil, noDiscoverySupportErr + case "acs_hdfs": + return nil, noDiscoverySupportErr + case "acs_ddh": + return nil, noDiscoverySupportErr + case "acs_hbr": + return nil, noDiscoverySupportErr + case "acs_hdr": + return nil, noDiscoverySupportErr + case "acs_cds": + return nil, noDiscoverySupportErr + default: + return nil, errors.Errorf("project %q is not recognized by discovery...", project) + } + + cli[region], err = sdk.NewClientWithOptions(region, sdk.NewConfig(), credential) + if err != nil { + return nil, err + } + } + + if len(dscReq) == 0 || len(cli) == 0 { + return nil, errors.Errorf("Can't build discovery request for project: %q,\nregions: %v", project, regions) + } + + //Getting response root key (if not set already). This is to be able to parse discovery responses + //As they differ per object type + //Discovery requests are of the same type per every region, so pick the first one + rpcReq, err := getRpcReqFromDiscoveryRequest(dscReq[regions[0]]) + //This means that the discovery request is not of proper type/kind + if err != nil { + return nil, errors.Errorf("Can't parse rpc request object from discovery request %v", dscReq[regions[0]]) + } + + /* + The action name is of the following format Describe, + For example: DescribeLoadBalancers -> for SLB project, or DescribeInstances for ECS project + We will use it to construct root key name in the discovery API response. + It follows the following logic: for 'DescribeLoadBalancers' action in discovery request we get the response + in json of the following structure: + { + ... + "LoadBalancers": { + "LoadBalancer": [ here comes objects, one per every instance] + } + } + As we can see, the root key is a part of action name, except first word (part) 'Describe' + */ + result := parseRootKey.FindStringSubmatch(rpcReq.GetActionName()) + if result == nil || len(result) != 2 { + return nil, errors.Errorf("Can't parse the discovery response root key from request action name %q", rpcReq.GetActionName()) + } + responseRootKey = result[1] + + return &discoveryTool{ + req: dscReq, + cli: cli, + respRootKey: responseRootKey, + respObjectIdKey: responseObjectIdKey, + rateLimit: rateLimit, + interval: discoveryInterval, + reqDefaultPageSize: 20, + dataChan: make(chan map[string]interface{}, 1), + lg: lg, + }, nil +} + +func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) (discData []interface{}, totalCount int, pageSize int, pageNumber int, err error) { + var ( + fullOutput = map[string]interface{}{} + data []byte + foundDataItem bool + foundRootKey bool + ) + + data = resp.GetHttpContentBytes() + if data == nil { //No data + return nil, 0, 0, 0, errors.Errorf("No data in response to be parsed") + } + + err = json.Unmarshal(data, &fullOutput) + if err != nil { + return nil, 0, 0, 0, errors.Errorf("Can't parse JSON from discovery response: %v", err) + } + + for key, val := range fullOutput { + switch key { + case dt.respRootKey: + foundRootKey = true + rootKeyVal, ok := val.(map[string]interface{}) + if !ok { + return nil, 0, 0, 0, errors.Errorf("Content of root key %q, is not an object: %v", key, val) + } + + //It should contain the array with discovered data + for _, item := range rootKeyVal { + + if discData, foundDataItem = item.([]interface{}); foundDataItem { + break + } + } + if !foundDataItem { + return nil, 0, 0, 0, errors.Errorf("Didn't find array item in root key %q", key) + } + case "TotalCount": + totalCount = int(val.(float64)) + case "PageSize": + pageSize = int(val.(float64)) + case "PageNumber": + pageNumber = int(val.(float64)) + } + + } + if !foundRootKey { + return nil, 0, 0, 0, errors.Errorf("Didn't find root key %q in discovery response", dt.respRootKey) + } + + return +} + +func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.CommonRequest, limiter chan bool) (map[string]interface{}, error) { + var ( + err error + resp *responses.CommonResponse + data []interface{} + discoveryData []interface{} + totalCount int + pageNumber int + ) + defer delete(req.QueryParams, "PageNumber") + + for { + if limiter != nil { + <-limiter //Rate limiting + } + + resp, err = cli.ProcessCommonRequest(req) + if err != nil { + return nil, err + } + + data, totalCount, _, pageNumber, err = dt.parseDiscoveryResponse(resp) + if err != nil { + return nil, err + } + discoveryData = append(discoveryData, data...) + + //Pagination + pageNumber++ + req.QueryParams["PageNumber"] = strconv.Itoa(pageNumber) + + if len(discoveryData) == totalCount { //All data received + //Map data to appropriate shape before return + preparedData := map[string]interface{}{} + + for _, raw := range discoveryData { + if elem, ok := raw.(map[string]interface{}); ok { + if objectId, ok := elem[dt.respObjectIdKey].(string); ok { + preparedData[objectId] = elem + } + } else { + return nil, errors.Errorf("Can't parse input data element, not a map[string]interface{} type") + } + + } + + return preparedData, nil + } + + } + +} + +func (dt *discoveryTool) getDiscoveryDataAllRegions(limiter chan bool) (map[string]interface{}, error) { + var ( + data map[string]interface{} + resultData = map[string]interface{}{} + ) + + for region, cli := range dt.cli { + //Building common request, as the code below is the same no matter + //which aliyun object type (project) is used + dscReq, ok := dt.req[region] + if !ok { + return nil, errors.Errorf("Error building common discovery request: not valid region %q", region) + } + + rpcReq, err := getRpcReqFromDiscoveryRequest(dscReq) + if err != nil { + return nil, err + } + + commonRequest := requests.NewCommonRequest() + commonRequest.Method = rpcReq.GetMethod() + commonRequest.Product = rpcReq.GetProduct() + commonRequest.Domain = rpcReq.GetDomain() + commonRequest.Version = rpcReq.GetVersion() + commonRequest.Scheme = rpcReq.GetScheme() + commonRequest.ApiName = rpcReq.GetActionName() + commonRequest.QueryParams = rpcReq.QueryParams + commonRequest.QueryParams["PageSize"] = strconv.Itoa(dt.reqDefaultPageSize) + commonRequest.TransToAcsRequest() + + //Get discovery data using common request + data, err = dt.getDiscoveryData(cli, commonRequest, limiter) + if err != nil { + return nil, err + } + + for k, v := range data { + resultData[k] = v + } + } + return resultData, nil +} + +func (dt *discoveryTool) Start() { + var ( + err error + data map[string]interface{} + lastData map[string]interface{} + ) + + //Initializing channel + dt.done = make(chan bool) + + dt.wg.Add(1) + go func() { + defer dt.wg.Done() + + ticker := time.NewTicker(dt.interval) + defer ticker.Stop() + + lmtr := limiter.NewRateLimiter(dt.rateLimit, time.Second) + defer lmtr.Stop() + + for { + select { + case <-dt.done: + return + case <-ticker.C: + + data, err = dt.getDiscoveryDataAllRegions(lmtr.C) + if err != nil { + dt.lg.Errorf("Can't get discovery data: %v", err) + continue + } + + if !reflect.DeepEqual(data, lastData) { + lastData = nil + lastData = map[string]interface{}{} + for k, v := range data { + lastData[k] = v + } + + //send discovery data in blocking mode + dt.dataChan <- data + } + + } + } + }() +} + +func (dt *discoveryTool) Stop() { + + close(dt.done) + + //Shutdown timer + timer := time.NewTimer(time.Second * 3) + defer timer.Stop() +L: + for { //Unblock go routine by reading from dt.dataChan + select { + case <-timer.C: + break L + case <-dt.dataChan: + } + } + + dt.wg.Wait() +} diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index d5eeead0a8bb6..9b22cd442a04c 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -4,6 +4,7 @@ import ( //Blank imports for plugins to register themselves _ "github.com/influxdata/telegraf/plugins/inputs/activemq" _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" + _ "github.com/influxdata/telegraf/plugins/inputs/aliyuncms" _ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/apache" _ "github.com/influxdata/telegraf/plugins/inputs/apcupsd" From 67904b07ee862b7830d4c64d79091d48d120461c Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 11 Mar 2021 16:32:28 -0500 Subject: [PATCH 289/761] update external plugin docs --- docs/EXTERNAL_PLUGINS.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/EXTERNAL_PLUGINS.md b/docs/EXTERNAL_PLUGINS.md index abef068f50f48..83759ed72bb63 100644 --- a/docs/EXTERNAL_PLUGINS.md +++ b/docs/EXTERNAL_PLUGINS.md @@ -8,6 +8,8 @@ more flexibility compared to internal Telegraf plugins. - External plugins can access to libraries not written in Go - Utilize licensed software that isn't available to the open source community - Can include large dependencies that would otherwise bloat Telegraf +- You don't need to wait on the Telegraf team to publish your plugin and start working with it. +- using the [shim](/plugins/common/shim) you can easily convert plugins between internal and external use ### External Plugin Guidelines The guidelines of writing external plugins would follow those for our general [input](/docs/INPUTS.md), @@ -59,10 +61,9 @@ This is a guide to help you set up your plugin to use it with `execd` [openvpn](https://github.com/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](https://github.com/vipinvkmenon/awsalarms#installation) for examples. Include the following steps: 1. How to download the release package for your platform or how to clone the binary for your external plugin - 1. The commands to unpack or build your binary + 1. The commands to build your binary 1. Location to edit your `telegraf.conf` 1. Configuration to run your external plugin with [inputs.execd](/plugins/inputs/execd), [processors.execd](/plugins/processors/execd) or [outputs.execd](/plugins/outputs/execd) - 1. Note that restart or reload of Telegraf is required 1. Submit your plugin by opening a PR to add your external plugin to the [/EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) list. Please include the plugin name, link to the plugin repository and a short description of the plugin. From b263c1fedf28fd18cb25300175d6893cb6f0ed6f Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 11 Mar 2021 16:35:26 -0500 Subject: [PATCH 290/761] accurate state documentation for Starlark --- plugins/processors/starlark/README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index c14c3e8bca2ed..2922fc42ecb5a 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -165,9 +165,12 @@ def apply(metric): **How can I save values across multiple calls to the script?** -A shared global dictionary named `state` exists, this can be used by the `apply` function. +Telegraf freezes the global scope, which prevents it from being modified, except for a special shared global dictionary +named `state`, this can be used by the `apply` function. See an example of this in [compare with previous metric](/plugins/processors/starlark/testdata/compare_metrics.star) +Other than the `state` variable, attempting to modify the global scope will fail with an error. + **How to manage errors that occur in the apply function?** In case you need to call some code that may return an error, you can delegate the call From 21ca31c48182cd640d086c815de6baf8c4684c77 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 11 Mar 2021 15:48:16 -0600 Subject: [PATCH 291/761] Mute linting errors when running against master branch (#8977) * Mute errors when running on master branch * One config, with conditional statement * improve names --- .github/workflows/golangci-lint.yml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index e4154182649a0..651b838fda060 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -11,7 +11,8 @@ on: - cron: '0 16 * * *' jobs: golangci-pr: - name: lint + if: github.ref != 'refs/heads/master' + name: lint-pr-changes runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -21,3 +22,15 @@ jobs: version: v1.38 only-new-issues: true args: --timeout=5m0s + golangci-master: + if: github.ref == 'refs/heads/master' + name: lint-master-all + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: golangci-lint + uses: golangci/golangci-lint-action@v2 + with: + version: v1.38 + only-new-issues: true + args: --timeout=5m0s --issues-exit-code=0 From 9db703deea108dc16498d71028207b7554ca34c2 Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Thu, 11 Mar 2021 14:11:16 -0800 Subject: [PATCH 292/761] feat: Adding Plex Webhooks external plugin (#8898) --- EXTERNAL_PLUGINS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index 8f361c62915f9..ffefd065c4e11 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -9,6 +9,7 @@ Pull requests welcome. - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. - [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open Hardware Monitor](http://openhardwaremonitor.org) +- [plex](https://github.com/russorat/telegraf-webhooks-plex) - Listens for events from Plex Media Server [Webhooks](https://support.plex.tv/articles/115002267687-webhooks/). - [rand](https://github.com/ssoroka/rand) - Generate random numbers - [SMCIPMITool](https://github.com/jhpope/smc_ipmi) - Python script to parse the output of [SMCIPMITool](https://www.supermicro.com/en/solutions/management-software/ipmi-utilities) into [InfluxDB line protocol](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/). - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. From f32230259cbf63e3141dc4c473045a01dd962f09 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 11 Mar 2021 19:05:50 -0600 Subject: [PATCH 293/761] Update to 1.16.1 (#8978) --- .circleci/config.yml | 2 +- Makefile | 4 ++-- go.mod | 1 - go.sum | 8 -------- scripts/alpine.docker | 2 +- scripts/buster.docker | 2 +- scripts/ci-1.16.docker | 2 +- 7 files changed, 6 insertions(+), 15 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 76739fdd14abb..c4d0f8f4797f8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,7 +12,7 @@ executors: go-1_16: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.16.0' + - image: 'quay.io/influxdb/telegraf-ci:1.16.1' environment: GOFLAGS: -p=8 mac: diff --git a/Makefile b/Makefile index 24004ae27d1aa..d41cfe3646074 100644 --- a/Makefile +++ b/Makefile @@ -185,8 +185,8 @@ ci-1.15: .PHONY: ci-1.16 ci-1.16: - docker build -t quay.io/influxdb/telegraf-ci:1.16.0 - < scripts/ci-1.16.docker - docker push quay.io/influxdb/telegraf-ci:1.16.0 + docker build -t quay.io/influxdb/telegraf-ci:1.16.1 - < scripts/ci-1.16.docker + docker push quay.io/influxdb/telegraf-ci:1.16.1 .PHONY: install install: $(buildbin) diff --git a/go.mod b/go.mod index ff29eb2acf816..e0737bb89d167 100644 --- a/go.mod +++ b/go.mod @@ -130,7 +130,6 @@ require ( github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect github.com/tidwall/gjson v1.6.0 github.com/tinylib/msgp v1.1.5 github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect diff --git a/go.sum b/go.sum index 7ec88378d2ad2..44312d66131d5 100644 --- a/go.sum +++ b/go.sum @@ -22,7 +22,6 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+ cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= @@ -338,7 +337,6 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -445,9 +443,7 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea h1:g2k+8WR7cHch4g0tBDhfiEvAp7fXxTNBiD1oC1Oxj3E= github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= -github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI= github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= -github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b h1:Rrp0ByJXEjhREMPGTt3aWYjoIsUGCbt21ekbeJcTWv0= github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= @@ -649,7 +645,6 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff h1:JcVn27VGCEwd33jyNj+3IqEbOmzAX9f9LILt3SoGPHU= github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= @@ -673,8 +668,6 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= -github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg= -github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= @@ -929,7 +922,6 @@ golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:U gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.7.0 h1:Hdks0L0hgznZLG9nzXb8vZ0rRvqNvAcgAp84y7Mwkgw= gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= diff --git a/scripts/alpine.docker b/scripts/alpine.docker index b97bd9628c45e..ecf6c15573ea5 100644 --- a/scripts/alpine.docker +++ b/scripts/alpine.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.0 as builder +FROM golang:1.16.1 as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/buster.docker b/scripts/buster.docker index e868ad4d7237c..465d367f5b482 100644 --- a/scripts/buster.docker +++ b/scripts/buster.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.0-buster as builder +FROM golang:1.16.1-buster as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/ci-1.16.docker b/scripts/ci-1.16.docker index b4866457c29cf..a8e05b54600f1 100644 --- a/scripts/ci-1.16.docker +++ b/scripts/ci-1.16.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.0 +FROM golang:1.16.1 RUN chmod -R 755 "$GOPATH" From c905116ade5260c2c839406d23dc2acb2be83387 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Thu, 11 Mar 2021 21:53:32 -0500 Subject: [PATCH 294/761] Add 'field name' config option to customize field name when using Value parser. (#8979) --- config/config.go | 4 +- plugins/inputs/exec/exec_test.go | 6 +- .../kafka_consumer/kafka_consumer_test.go | 6 +- plugins/parsers/registry.go | 12 +- plugins/parsers/value/parser.go | 16 ++- plugins/parsers/value/parser_test.go | 111 +++++------------- 6 files changed, 61 insertions(+), 94 deletions(-) diff --git a/config/config.go b/config/config.go index 58483428adb36..be64fba4bcf48 100644 --- a/config/config.go +++ b/config/config.go @@ -1342,6 +1342,8 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, c.getFieldStringSlice(tbl, "form_urlencoded_tag_keys", &pc.FormUrlencodedTagKeys) + c.getFieldString(tbl, "value_field_name", &pc.ValueFieldName) + //for XML parser if node, ok := tbl.Fields["xml"]; ok { if subtbls, ok := node.([]*ast.Table); ok { @@ -1468,7 +1470,7 @@ func (c *Config) missingTomlField(typ reflect.Type, key string) error { "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", "separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys", "tagdrop", "tagexclude", "taginclude", "tagpass", "tags", "template", "templates", - "wavefront_source_override", "wavefront_use_strict", "xml": + "value_field_name", "wavefront_source_override", "wavefront_use_strict", "xml": // ignore fields that are common to all plugins. default: diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index 5e614f61dd9ec..ba1bc2078c9f7 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -160,7 +160,7 @@ func TestCommandError(t *testing.T) { } func TestExecCommandWithGlob(t *testing.T) { - parser, _ := parsers.NewValueParser("metric", "string", nil) + parser, _ := parsers.NewValueParser("metric", "string", "", nil) e := NewExec() e.Commands = []string{"/bin/ech* metric_value"} e.SetParser(parser) @@ -176,7 +176,7 @@ func TestExecCommandWithGlob(t *testing.T) { } func TestExecCommandWithoutGlob(t *testing.T) { - parser, _ := parsers.NewValueParser("metric", "string", nil) + parser, _ := parsers.NewValueParser("metric", "string", "", nil) e := NewExec() e.Commands = []string{"/bin/echo metric_value"} e.SetParser(parser) @@ -192,7 +192,7 @@ func TestExecCommandWithoutGlob(t *testing.T) { } func TestExecCommandWithoutGlobAndPath(t *testing.T) { - parser, _ := parsers.NewValueParser("metric", "string", nil) + parser, _ := parsers.NewValueParser("metric", "string", "", nil) e := NewExec() e.Commands = []string{"echo metric_value"} e.SetParser(parser) diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index d7804a01b87e1..90c362b9c01ea 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -257,7 +257,7 @@ func (c *FakeConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage { func TestConsumerGroupHandler_Lifecycle(t *testing.T) { acc := &testutil.Accumulator{} - parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} + parser := value.NewValueParser("cpu", "int", "", nil) cg := NewConsumerGroupHandler(acc, 1, parser) ctx, cancel := context.WithCancel(context.Background()) @@ -282,7 +282,7 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) { func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { acc := &testutil.Accumulator{} - parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} + parser := value.NewValueParser("cpu", "int", "", nil) cg := NewConsumerGroupHandler(acc, 1, parser) ctx, cancel := context.WithCancel(context.Background()) @@ -392,7 +392,7 @@ func TestConsumerGroupHandler_Handle(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { acc := &testutil.Accumulator{} - parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} + parser := value.NewValueParser("cpu", "int", "", nil) cg := NewConsumerGroupHandler(acc, 1, parser) cg.MaxMessageLen = tt.maxMessageLen cg.TopicTag = tt.topicTag diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 44cde6c85b7ba..d01b0ee676565 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -152,6 +152,9 @@ type Config struct { // FormData configuration FormUrlencodedTagKeys []string `toml:"form_urlencoded_tag_keys"` + // Value configuration + ValueFieldName string `toml:"value_field_name"` + // XML configuration XMLConfig []XMLConfig `toml:"xml"` } @@ -182,7 +185,7 @@ func NewParser(config *Config) (Parser, error) { ) case "value": parser, err = NewValueParser(config.MetricName, - config.DataType, config.DefaultTags) + config.DataType, config.ValueFieldName, config.DefaultTags) case "influx": parser, err = NewInfluxParser() case "nagios": @@ -291,13 +294,10 @@ func NewGraphiteParser( func NewValueParser( metricName string, dataType string, + fieldName string, defaultTags map[string]string, ) (Parser, error) { - return &value.ValueParser{ - MetricName: metricName, - DataType: dataType, - DefaultTags: defaultTags, - }, nil + return value.NewValueParser(metricName, dataType, fieldName, defaultTags), nil } func NewCollectdParser( diff --git a/plugins/parsers/value/parser.go b/plugins/parsers/value/parser.go index a495033c47b28..95c87a2eae982 100644 --- a/plugins/parsers/value/parser.go +++ b/plugins/parsers/value/parser.go @@ -15,6 +15,7 @@ type ValueParser struct { MetricName string DataType string DefaultTags map[string]string + FieldName string } func (v *ValueParser) Parse(buf []byte) ([]telegraf.Metric, error) { @@ -46,7 +47,7 @@ func (v *ValueParser) Parse(buf []byte) ([]telegraf.Metric, error) { return nil, err } - fields := map[string]interface{}{"value": value} + fields := map[string]interface{}{v.FieldName: value} metric, err := metric.New(v.MetricName, v.DefaultTags, fields, time.Now().UTC()) if err != nil { @@ -73,3 +74,16 @@ func (v *ValueParser) ParseLine(line string) (telegraf.Metric, error) { func (v *ValueParser) SetDefaultTags(tags map[string]string) { v.DefaultTags = tags } + +func NewValueParser(metricName, dataType, fieldName string, defaultTags map[string]string) *ValueParser { + if fieldName == "" { + fieldName = "value" + } + + return &ValueParser{ + MetricName: metricName, + DataType: dataType, + DefaultTags: defaultTags, + FieldName: fieldName, + } +} diff --git a/plugins/parsers/value/parser_test.go b/plugins/parsers/value/parser_test.go index 667fb108cfbfb..5a74085d82980 100644 --- a/plugins/parsers/value/parser_test.go +++ b/plugins/parsers/value/parser_test.go @@ -7,10 +7,7 @@ import ( ) func TestParseValidValues(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) metrics, err := parser.Parse([]byte("55")) assert.NoError(t, err) assert.Len(t, metrics, 1) @@ -20,10 +17,7 @@ func TestParseValidValues(t *testing.T) { }, metrics[0].Fields()) assert.Equal(t, map[string]string{}, metrics[0].Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "float", - } + parser = NewValueParser("value_test", "float", "", nil) metrics, err = parser.Parse([]byte("64")) assert.NoError(t, err) assert.Len(t, metrics, 1) @@ -33,10 +27,7 @@ func TestParseValidValues(t *testing.T) { }, metrics[0].Fields()) assert.Equal(t, map[string]string{}, metrics[0].Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "string", - } + parser = NewValueParser("value_test", "string", "", nil) metrics, err = parser.Parse([]byte("foobar")) assert.NoError(t, err) assert.Len(t, metrics, 1) @@ -46,10 +37,7 @@ func TestParseValidValues(t *testing.T) { }, metrics[0].Fields()) assert.Equal(t, map[string]string{}, metrics[0].Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "boolean", - } + parser = NewValueParser("value_test", "boolean", "", nil) metrics, err = parser.Parse([]byte("true")) assert.NoError(t, err) assert.Len(t, metrics, 1) @@ -61,10 +49,7 @@ func TestParseValidValues(t *testing.T) { } func TestParseMultipleValues(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) metrics, err := parser.Parse([]byte(`55 45 223 @@ -80,11 +65,19 @@ func TestParseMultipleValues(t *testing.T) { assert.Equal(t, map[string]string{}, metrics[0].Tags()) } +func TestParseCustomFieldName(t *testing.T) { + parser := NewValueParser("value_test", "integer", "", nil) + parser.FieldName = "penguin" + metrics, err := parser.Parse([]byte(`55`)) + + assert.NoError(t, err) + assert.Equal(t, map[string]interface{}{ + "penguin": int64(55), + }, metrics[0].Fields()) +} + func TestParseLineValidValues(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) metric, err := parser.ParseLine("55") assert.NoError(t, err) assert.Equal(t, "value_test", metric.Name()) @@ -93,10 +86,7 @@ func TestParseLineValidValues(t *testing.T) { }, metric.Fields()) assert.Equal(t, map[string]string{}, metric.Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "float", - } + parser = NewValueParser("value_test", "float", "", nil) metric, err = parser.ParseLine("64") assert.NoError(t, err) assert.Equal(t, "value_test", metric.Name()) @@ -105,10 +95,7 @@ func TestParseLineValidValues(t *testing.T) { }, metric.Fields()) assert.Equal(t, map[string]string{}, metric.Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "string", - } + parser = NewValueParser("value_test", "string", "", nil) metric, err = parser.ParseLine("foobar") assert.NoError(t, err) assert.Equal(t, "value_test", metric.Name()) @@ -117,10 +104,7 @@ func TestParseLineValidValues(t *testing.T) { }, metric.Fields()) assert.Equal(t, map[string]string{}, metric.Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "boolean", - } + parser = NewValueParser("value_test", "boolean", "", nil) metric, err = parser.ParseLine("true") assert.NoError(t, err) assert.Equal(t, "value_test", metric.Name()) @@ -131,59 +115,38 @@ func TestParseLineValidValues(t *testing.T) { } func TestParseInvalidValues(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) metrics, err := parser.Parse([]byte("55.0")) assert.Error(t, err) assert.Len(t, metrics, 0) - parser = ValueParser{ - MetricName: "value_test", - DataType: "float", - } + parser = NewValueParser("value_test", "float", "", nil) metrics, err = parser.Parse([]byte("foobar")) assert.Error(t, err) assert.Len(t, metrics, 0) - parser = ValueParser{ - MetricName: "value_test", - DataType: "boolean", - } + parser = NewValueParser("value_test", "boolean", "", nil) metrics, err = parser.Parse([]byte("213")) assert.Error(t, err) assert.Len(t, metrics, 0) } func TestParseLineInvalidValues(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) _, err := parser.ParseLine("55.0") assert.Error(t, err) - parser = ValueParser{ - MetricName: "value_test", - DataType: "float", - } + parser = NewValueParser("value_test", "float", "", nil) _, err = parser.ParseLine("foobar") assert.Error(t, err) - parser = ValueParser{ - MetricName: "value_test", - DataType: "boolean", - } + parser = NewValueParser("value_test", "boolean", "", nil) _, err = parser.ParseLine("213") assert.Error(t, err) } func TestParseValidValuesDefaultTags(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) parser.SetDefaultTags(map[string]string{"test": "tag"}) metrics, err := parser.Parse([]byte("55")) assert.NoError(t, err) @@ -194,10 +157,7 @@ func TestParseValidValuesDefaultTags(t *testing.T) { }, metrics[0].Fields()) assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "float", - } + parser = NewValueParser("value_test", "float", "", nil) parser.SetDefaultTags(map[string]string{"test": "tag"}) metrics, err = parser.Parse([]byte("64")) assert.NoError(t, err) @@ -208,10 +168,7 @@ func TestParseValidValuesDefaultTags(t *testing.T) { }, metrics[0].Fields()) assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "string", - } + parser = NewValueParser("value_test", "string", "", nil) parser.SetDefaultTags(map[string]string{"test": "tag"}) metrics, err = parser.Parse([]byte("foobar")) assert.NoError(t, err) @@ -222,10 +179,7 @@ func TestParseValidValuesDefaultTags(t *testing.T) { }, metrics[0].Fields()) assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) - parser = ValueParser{ - MetricName: "value_test", - DataType: "boolean", - } + parser = NewValueParser("value_test", "boolean", "", nil) parser.SetDefaultTags(map[string]string{"test": "tag"}) metrics, err = parser.Parse([]byte("true")) assert.NoError(t, err) @@ -238,10 +192,7 @@ func TestParseValidValuesDefaultTags(t *testing.T) { } func TestParseValuesWithNullCharacter(t *testing.T) { - parser := ValueParser{ - MetricName: "value_test", - DataType: "integer", - } + parser := NewValueParser("value_test", "integer", "", nil) metrics, err := parser.Parse([]byte("55\x00")) assert.NoError(t, err) assert.Len(t, metrics, 1) From 5085f595acc893d5f5c32783be4a7a3d7c47c88e Mon Sep 17 00:00:00 2001 From: gkatzioura Date: Fri, 12 Mar 2021 17:16:45 +0000 Subject: [PATCH 295/761] Bigquery output Plugin (#8634) --- README.md | 1 + go.mod | 4 +- go.sum | 26 +-- plugins/outputs/bigquery/README.md | 56 +++++ plugins/outputs/bigquery/bigquery.go | 247 ++++++++++++++++++++++ plugins/outputs/bigquery/bigquery_test.go | 165 +++++++++++++++ 6 files changed, 477 insertions(+), 22 deletions(-) create mode 100644 plugins/outputs/bigquery/README.md create mode 100644 plugins/outputs/bigquery/bigquery.go create mode 100644 plugins/outputs/bigquery/bigquery_test.go diff --git a/README.md b/README.md index 909e7dfec49c5..5535b9527fe40 100644 --- a/README.md +++ b/README.md @@ -420,6 +420,7 @@ For documentation on the latest development code see the [documentation index][d * [aws kinesis](./plugins/outputs/kinesis) * [aws cloudwatch](./plugins/outputs/cloudwatch) * [azure_monitor](./plugins/outputs/azure_monitor) +* [bigquery](./plugins/outputs/bigquery) * [cloud_pubsub](./plugins/outputs/cloud_pubsub) Google Cloud Pub/Sub * [cratedb](./plugins/outputs/cratedb) * [datadog](./plugins/outputs/datadog) diff --git a/go.mod b/go.mod index e0737bb89d167..edb407d8ed09c 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ go 1.16 require ( cloud.google.com/go v0.53.0 - cloud.google.com/go/datastore v1.1.0 // indirect - cloud.google.com/go/pubsub v1.2.0 + cloud.google.com/go/bigquery v1.3.0 + cloud.google.com/go/pubsub v1.1.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.3.0 github.com/Azure/azure-event-hubs-go/v3 v3.2.0 diff --git a/go.sum b/go.sum index 44312d66131d5..369efcde64d88 100644 --- a/go.sum +++ b/go.sum @@ -7,21 +7,18 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= @@ -337,10 +334,10 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -734,7 +731,6 @@ golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm0 golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= @@ -789,7 +785,6 @@ golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -834,7 +829,6 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -842,7 +836,6 @@ golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -866,9 +859,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -900,10 +892,7 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9 h1:sEvmEcJVKBNUvgCUClbUQeHOAa9U0I2Ce1BooMvVCY4= @@ -955,9 +944,6 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= diff --git a/plugins/outputs/bigquery/README.md b/plugins/outputs/bigquery/README.md new file mode 100644 index 0000000000000..96e659956c815 --- /dev/null +++ b/plugins/outputs/bigquery/README.md @@ -0,0 +1,56 @@ +# BigQuery Google Cloud Output Plugin + +This plugin writes to the [Google Cloud BigQuery][bigquery] and requires [authentication][] +with Google Cloud using either a service account or user credentials + +This plugin accesses APIs which are [chargeable][pricing]; you might incur +costs. + +Requires `project` to specify where BigQuery entries will be persisted. + +Requires `dataset` to specify under which BigQuery dataset the corresponding metrics tables reside. + +Each metric should have a corresponding table to BigQuery. +The schema of the table on BigQuery: +* Should contain the field `timestamp` which is the timestamp of a telegraph metrics +* Should contain the metric's tags with the same name and the column type should be set to string. +* Should contain the metric's fields with the same name and the column type should match the field type. + +### Configuration + +```toml +[[outputs.bigquery]] + ## GCP Project + project = "erudite-bloom-151019" + + ## The BigQuery dataset + dataset = "telegraf" + + ## Timeout for BigQuery operations. + # timeout = "5s" + + ## Character to replace hyphens on Metric name + # replace_hyphen_to = "_" +``` + +### Restrictions + +Avoid hyphens on BigQuery tables, underlying SDK cannot handle streaming inserts to Table with hyphens. + +In cases of metrics with hyphens please use the [Rename Processor Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/processors/rename). + +In case of a metric with hyphen by default hyphens shall be replaced with underscores (_). +This can be altered using the `replace_hyphen_to` configuration property. + +Available data type options are: +* integer +* float or long +* string +* boolean + +All field naming restrictions that apply to BigQuery should apply to the measurements to be imported. + +Tables on BigQuery should be created beforehand and they are not created during persistence + +Pay attention to the column `timestamp` since it is reserved upfront and cannot change. +If partitioning is required make sure it is applied beforehand. diff --git a/plugins/outputs/bigquery/bigquery.go b/plugins/outputs/bigquery/bigquery.go new file mode 100644 index 0000000000000..fd1f3c7bc6f4b --- /dev/null +++ b/plugins/outputs/bigquery/bigquery.go @@ -0,0 +1,247 @@ +package bigquery + +import ( + "context" + "fmt" + "reflect" + "strings" + "sync" + "time" + + "cloud.google.com/go/bigquery" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" +) + +const timeStampFieldName = "timestamp" + +var defaultTimeout = internal.Duration{Duration: 5 * time.Second} + +const sampleConfig = ` + ## Credentials File + credentials_file = "/path/to/service/account/key.json" + + ## Google Cloud Platform Project + project = "my-gcp-project" + + ## The namespace for the metric descriptor + dataset = "telegraf" + + ## Timeout for BigQuery operations. + # timeout = "5s" + + ## Character to replace hyphens on Metric name + # replace_hyphen_to = "_" +` + +type BigQuery struct { + CredentialsFile string `toml:"credentials_file"` + Project string `toml:"project"` + Dataset string `toml:"dataset"` + + Timeout internal.Duration `toml:"timeout"` + ReplaceHyphenTo string `toml:"replace_hyphen_to"` + + Log telegraf.Logger `toml:"-"` + + client *bigquery.Client + + warnedOnHyphens map[string]bool +} + +// SampleConfig returns the formatted sample configuration for the plugin. +func (s *BigQuery) SampleConfig() string { + return sampleConfig +} + +// Description returns the human-readable function definition of the plugin. +func (s *BigQuery) Description() string { + return "Configuration for Google Cloud BigQuery to send entries" +} + +func (s *BigQuery) Connect() error { + if s.Project == "" { + return fmt.Errorf("Project is a required field for BigQuery output") + } + + if s.Dataset == "" { + return fmt.Errorf("Dataset is a required field for BigQuery output") + } + + if s.client == nil { + return s.setUpDefaultClient() + } + + s.warnedOnHyphens = make(map[string]bool) + + return nil +} + +func (s *BigQuery) setUpDefaultClient() error { + var credentialsOption option.ClientOption + + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, s.Timeout.Duration) + defer cancel() + + if s.CredentialsFile != "" { + credentialsOption = option.WithCredentialsFile(s.CredentialsFile) + } else { + creds, err := google.FindDefaultCredentials(ctx) + if err != nil { + return fmt.Errorf( + "unable to find Google Cloud Platform Application Default Credentials: %v. "+ + "Either set ADC or provide CredentialsFile config", err) + } + credentialsOption = option.WithCredentials(creds) + } + + client, err := bigquery.NewClient(ctx, s.Project, credentialsOption) + s.client = client + return err +} + +// Write the metrics to Google Cloud BigQuery. +func (s *BigQuery) Write(metrics []telegraf.Metric) error { + groupedMetrics := s.groupByMetricName(metrics) + + var wg sync.WaitGroup + + for k, v := range groupedMetrics { + wg.Add(1) + go func(k string, v []bigquery.ValueSaver) { + defer wg.Done() + s.insertToTable(k, v) + }(k, v) + } + + wg.Wait() + + return nil +} + +func (s *BigQuery) groupByMetricName(metrics []telegraf.Metric) map[string][]bigquery.ValueSaver { + groupedMetrics := make(map[string][]bigquery.ValueSaver) + + for _, m := range metrics { + bqm := newValuesSaver(m) + groupedMetrics[m.Name()] = append(groupedMetrics[m.Name()], bqm) + } + + return groupedMetrics +} + +func newValuesSaver(m telegraf.Metric) *bigquery.ValuesSaver { + s := make(bigquery.Schema, 0) + r := make([]bigquery.Value, 0) + timeSchema := timeStampFieldSchema() + s = append(s, timeSchema) + r = append(r, m.Time()) + + s, r = tagsSchemaAndValues(m, s, r) + s, r = valuesSchemaAndValues(m, s, r) + + return &bigquery.ValuesSaver{ + Schema: s.Relax(), + Row: r, + } +} + +func timeStampFieldSchema() *bigquery.FieldSchema { + return &bigquery.FieldSchema{ + Name: timeStampFieldName, + Type: bigquery.TimestampFieldType, + } +} + +func tagsSchemaAndValues(m telegraf.Metric, s bigquery.Schema, r []bigquery.Value) ([]*bigquery.FieldSchema, []bigquery.Value) { + for _, t := range m.TagList() { + s = append(s, tagFieldSchema(t)) + r = append(r, t.Value) + } + + return s, r +} + +func tagFieldSchema(t *telegraf.Tag) *bigquery.FieldSchema { + return &bigquery.FieldSchema{ + Name: t.Key, + Type: bigquery.StringFieldType, + } +} + +func valuesSchemaAndValues(m telegraf.Metric, s bigquery.Schema, r []bigquery.Value) ([]*bigquery.FieldSchema, []bigquery.Value) { + for _, f := range m.FieldList() { + s = append(s, valuesSchema(f)) + r = append(r, f.Value) + } + + return s, r +} + +func valuesSchema(f *telegraf.Field) *bigquery.FieldSchema { + return &bigquery.FieldSchema{ + Name: f.Key, + Type: valueToBqType(f.Value), + } +} + +func valueToBqType(v interface{}) bigquery.FieldType { + switch reflect.ValueOf(v).Kind() { + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + return bigquery.IntegerFieldType + case reflect.Float32, reflect.Float64: + return bigquery.FloatFieldType + case reflect.Bool: + return bigquery.BooleanFieldType + default: + return bigquery.StringFieldType + } +} + +func (s *BigQuery) insertToTable(metricName string, metrics []bigquery.ValueSaver) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, s.Timeout.Duration) + defer cancel() + + tableName := s.metricToTable(metricName) + table := s.client.DatasetInProject(s.Project, s.Dataset).Table(tableName) + inserter := table.Inserter() + + if err := inserter.Put(ctx, metrics); err != nil { + s.Log.Errorf("inserting metric %q failed: %v", metricName, err) + } +} + +func (s *BigQuery) metricToTable(metricName string) string { + if !strings.Contains(metricName, "-") { + return metricName + } + + dhm := strings.ReplaceAll(metricName, "-", s.ReplaceHyphenTo) + + if warned := s.warnedOnHyphens[metricName]; !warned { + s.Log.Warnf("Metric %q contains hyphens please consider using the rename processor plugin, falling back to %q", metricName, dhm) + s.warnedOnHyphens[metricName] = true + } + + return dhm +} + +// Close will terminate the session to the backend, returning error if an issue arises. +func (s *BigQuery) Close() error { + return s.client.Close() +} + +func init() { + outputs.Add("bigquery", func() telegraf.Output { + return &BigQuery{ + Timeout: defaultTimeout, + ReplaceHyphenTo: "_", + } + }) +} diff --git a/plugins/outputs/bigquery/bigquery_test.go b/plugins/outputs/bigquery/bigquery_test.go new file mode 100644 index 0000000000000..34d889fcb4c14 --- /dev/null +++ b/plugins/outputs/bigquery/bigquery_test.go @@ -0,0 +1,165 @@ +package bigquery + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "cloud.google.com/go/bigquery" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "google.golang.org/api/option" +) + +const ( + successfulResponse = "{\"kind\": \"bigquery#tableDataInsertAllResponse\"}" +) + +var testingHost string +var testDuration = internal.Duration{Duration: 5 * time.Second} +var receivedBody map[string]json.RawMessage + +type Row struct { + Tag1 string `json:"tag1"` + Timestamp string `json:"timestamp"` + Value float64 `json:"value"` +} + +func TestConnect(t *testing.T) { + srv := localBigQueryServer(t) + testingHost = strings.ReplaceAll(srv.URL, "http://", "") + defer srv.Close() + + b := &BigQuery{ + Project: "test-project", + Dataset: "test-dataset", + Timeout: testDuration, + } + + cerr := b.setUpTestClient() + require.NoError(t, cerr) + berr := b.Connect() + require.NoError(t, berr) +} + +func TestWrite(t *testing.T) { + srv := localBigQueryServer(t) + testingHost = strings.ReplaceAll(srv.URL, "http://", "") + defer srv.Close() + + b := &BigQuery{ + Project: "test-project", + Dataset: "test-dataset", + Timeout: testDuration, + } + + mockMetrics := testutil.MockMetrics() + + if err := b.setUpTestClient(); err != nil { + require.NoError(t, err) + } + if err := b.Connect(); err != nil { + require.NoError(t, err) + } + + if err := b.Write(mockMetrics); err != nil { + require.NoError(t, err) + } + + var rows []map[string]json.RawMessage + if err := json.Unmarshal(receivedBody["rows"], &rows); err != nil { + require.NoError(t, err) + } + + var row Row + if err := json.Unmarshal(rows[0]["json"], &row); err != nil { + require.NoError(t, err) + } + + pt, _ := time.Parse(time.RFC3339, row.Timestamp) + require.Equal(t, mockMetrics[0].Tags()["tag1"], row.Tag1) + require.Equal(t, mockMetrics[0].Time(), pt) + require.Equal(t, mockMetrics[0].Fields()["value"], row.Value) +} + +func TestMetricToTableDefault(t *testing.T) { + b := &BigQuery{ + Project: "test-project", + Dataset: "test-dataset", + Timeout: testDuration, + warnedOnHyphens: make(map[string]bool), + ReplaceHyphenTo: "_", + Log: testutil.Logger{}, + } + + otn := "table-with-hyphens" + ntn := b.metricToTable(otn) + + require.Equal(t, "table_with_hyphens", ntn) + require.True(t, b.warnedOnHyphens[otn]) +} + +func TestMetricToTableCustom(t *testing.T) { + log := testutil.Logger{} + + b := &BigQuery{ + Project: "test-project", + Dataset: "test-dataset", + Timeout: testDuration, + warnedOnHyphens: make(map[string]bool), + ReplaceHyphenTo: "*", + Log: log, + } + + otn := "table-with-hyphens" + ntn := b.metricToTable(otn) + + require.Equal(t, "table*with*hyphens", ntn) + require.True(t, b.warnedOnHyphens[otn]) +} + +func (b *BigQuery) setUpTestClient() error { + noAuth := option.WithoutAuthentication() + endpoints := option.WithEndpoint("http://" + testingHost) + + ctx := context.Background() + + c, err := bigquery.NewClient(ctx, b.Project, noAuth, endpoints) + + if err != nil { + return err + } + + b.client = c + + return nil +} + +func localBigQueryServer(t *testing.T) *httptest.Server { + srv := httptest.NewServer(http.NotFoundHandler()) + + srv.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/projects/test-project/datasets/test-dataset/tables/test1/insertAll": + decoder := json.NewDecoder(r.Body) + + if err := decoder.Decode(&receivedBody); err != nil { + require.NoError(t, err) + } + + w.WriteHeader(http.StatusOK) + if _, err := w.Write([]byte(successfulResponse)); err != nil { + require.NoError(t, err) + } + default: + w.WriteHeader(http.StatusNotFound) + } + }) + + return srv +} From 97da596275ec442087ad250d0f105d64ca02b6e1 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Fri, 12 Mar 2021 11:42:55 -0600 Subject: [PATCH 296/761] Update mac cache version number in circle-ci to avoid conflicts (#8982) * Have mac use 1.16.1 * minor version not necessary Co-authored-by: Bas <3441183+BattleBas@users.noreply.github.com> --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index c4d0f8f4797f8..6a9162aa104cd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -93,14 +93,14 @@ jobs: steps: - checkout - restore_cache: - key: mac-go-mod-v2-{{ checksum "go.sum" }} + key: mac-go-mod-v3-{{ checksum "go.sum" }} - run: 'brew update' - run: 'brew install go@1.16' - run: 'make deps' - run: 'make tidy' - save_cache: name: 'go module cache' - key: mac-go-mod-v2-{{ checksum "go.sum" }} + key: mac-go-mod-v3-{{ checksum "go.sum" }} paths: - '~/go/pkg/mod' - '/usr/local/Cellar/go' From 9ddd189cd31536ec0d393029841d0d2a7e6a9b84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Fri, 12 Mar 2021 19:14:31 +0100 Subject: [PATCH 297/761] Proposal of enabled linters and their settings. (#8951) * Proposal of enabled linters and their settings. * Workaround to skip weird "plugins/parsers/influx/plugins/parsers/influx" directory * Update to reflect changes in github actions * Back to the last state * Fix Co-authored-by: Pawel Zak Co-authored-by: Sebastian Spaink --- .github/workflows/golangci-lint.yml | 3 +- .golangci.yml | 152 ++++++++++++++++++++++++---- Makefile | 2 +- 3 files changed, 133 insertions(+), 24 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 651b838fda060..a219934109fd5 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -21,7 +21,6 @@ jobs: with: version: v1.38 only-new-issues: true - args: --timeout=5m0s golangci-master: if: github.ref == 'refs/heads/master' name: lint-master-all @@ -33,4 +32,4 @@ jobs: with: version: v1.38 only-new-issues: true - args: --timeout=5m0s --issues-exit-code=0 + args: --issues-exit-code=0 diff --git a/.golangci.yml b/.golangci.yml index 0961510f943c9..8eeb577c52ff0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,36 +1,138 @@ linters: enable: + - bodyclose + - dogsled + - errcheck + - goprintffuncname + - gosimple + - govet + - ineffassign + - nakedret + - nilerr + - predeclared - revive + - sqlclosecheck + - staticcheck + - typecheck + - unconvert + - unparam + - unused + - varcheck + disable: + - asciicheck + - deadcode + - depguard + - dupl + - exhaustive + - funlen + - gci + - gochecknoglobals + - gochecknoinits + - gocognit + - goconst + - gocritic + - gocyclo + - godot + - godox + - goerr113 + - gofmt + - gofumpt + - goheader + - goimports + - golint + - gomnd + - gomodguard + - gosec + - ifshort + - interfacer + - lll + - makezero + - maligned + - megacheck + - misspell + - nestif + - nlreturn + - noctx + - nolintlint + - paralleltest + - prealloc + - rowserrcheck + - scopelint + - structcheck + - stylecheck + - testpackage + - thelper + - tparallel + - wastedassign + - whitespace + - wrapcheck + - wsl linters-settings: revive: rules: + - name: argument-limit + arguments: [ 6 ] + - name: atomic + - name: bare-return - name: blank-imports + - name: bool-literal-in-expr + - name: call-to-gc + - name: confusing-naming + - name: confusing-results + - name: constant-logical-expr - name: context-as-argument - name: context-keys-type + - name: deep-exit + - name: defer - name: dot-imports + - name: duplicated-imports + - name: early-return + - name: empty-block + - name: empty-lines + - name: error-naming - name: error-return - name: error-strings - - name: error-naming - - name: exported + - name: errorf + - name: flag-parameter + - name: function-result-limit + arguments: [ 3 ] + - name: identical-branches - name: if-return + - name: imports-blacklist + arguments: [ "log" ] + - name: import-shadowing - name: increment-decrement - - name: var-naming - - name: var-declaration + - name: indent-error-flow + - name: modifies-parameter + - name: modifies-value-receiver - name: package-comments - name: range + - name: range-val-address + - name: range-val-in-closure - name: receiver-naming - - name: time-naming - - name: unexported-return - - name: indent-error-flow - - name: errorf - - name: empty-block + - name: redefines-builtin-id + - name: string-of-int + - name: struct-tag - name: superfluous-else - - name: unused-parameter + - name: time-naming + - name: unconditional-recursion + - name: unexported-naming + - name: unhandled-error + - name: unnecessary-stmt - name: unreachable-code - - name: redefines-builtin-id + - name: unused-parameter + - name: var-declaration + - name: var-naming + - name: waitgroup-by-value + nakedret: + # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 + max-func-lines: 1 run: + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 5m + # which dirs to skip: issues from them won't be reported; # can use regexp here: generated.*, regexp is applied on full path; # default value is empty list, but default dirs are skipped independently @@ -38,9 +140,10 @@ run: # "/" will be replaced by current OS file path separator to properly work # on Windows. skip-dirs: - - scripts + - assets - docs - etc + - scripts # which files to skip: they will be analyzed, but issues from them # won't be reported. Default value is empty list, but there is @@ -52,17 +155,24 @@ run: - plugins/parsers/influx/machine.go* issues: - # List of regexps of issue texts to exclude, empty list by default. - # But independently from this option we use default exclude patterns, - # it can be disabled by `exclude-use-default: false`. To list all - # excluded by default patterns execute `golangci-lint run --help` - exclude: - - don't use an underscore in package name - - exported.*should have comment.*or be unexported - - comment on exported.*should be of the form - # Maximum issues count per one linter. Set to 0 to disable. Default is 50. max-issues-per-linter: 0 # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. max-same-issues: 0 + + exclude: + - don't use an underscore in package name #revive:var-naming + + exclude-rules: + - path: plugins/parsers/influx + linters: + - govet + + # Show only new issues created after git revision `HEAD~` + # Great for CI setups + # It's not practical to fix all existing issues at the moment of integration: much better to not allow issues in new code. + # new-from-rev: "HEAD~" + +output: + format: tab diff --git a/Makefile b/Makefile index d41cfe3646074..ca2ed70647eea 100644 --- a/Makefile +++ b/Makefile @@ -138,7 +138,7 @@ ifeq (, $(shell which golangci-lint)) exit 1 endif - golangci-lint run --timeout 5m0s --issues-exit-code 0 + golangci-lint -v run .PHONY: tidy tidy: From 38c61c07efa77ea6f385cca9566cb3f3af629cd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Fri, 12 Mar 2021 21:21:51 +0100 Subject: [PATCH 298/761] Revive fixes - part 4: (#8981) empty-lines Co-authored-by: Pawel Zak --- agent/agent.go | 1 - config/config.go | 1 - internal/content_coding.go | 1 - internal/globpath/globpath_test.go | 1 - internal/http.go | 2 -- metric/tracking.go | 1 - models/filter_test.go | 3 --- models/running_output.go | 1 - plugins/aggregators/basicstats/basicstats.go | 1 - .../aggregators/basicstats/basicstats_test.go | 17 ----------------- .../aggregators/derivative/derivative_test.go | 1 - plugins/aggregators/histogram/histogram_test.go | 1 - plugins/common/kafka/sasl.go | 1 - plugins/common/shim/config.go | 2 -- plugins/inputs/activemq/activemq_test.go | 3 --- plugins/inputs/aerospike/aerospike.go | 7 ------- plugins/inputs/aerospike/aerospike_test.go | 8 -------- plugins/inputs/aliyuncms/aliyuncms.go | 16 ++-------------- plugins/inputs/aliyuncms/aliyuncms_test.go | 7 ------- plugins/inputs/aliyuncms/discovery.go | 10 ---------- plugins/inputs/apache/apache.go | 1 - plugins/inputs/apcupsd/apcupsd_test.go | 2 -- plugins/inputs/beat/beat_test.go | 1 - plugins/inputs/cassandra/cassandra.go | 6 +----- plugins/inputs/cassandra/cassandra_test.go | 4 +--- plugins/inputs/ceph/ceph.go | 3 --- plugins/inputs/ceph/ceph_test.go | 1 - plugins/inputs/chrony/chrony_test.go | 2 -- .../cisco_telemetry_mdt_test.go | 1 - plugins/inputs/clickhouse/clickhouse.go | 9 --------- plugins/inputs/clickhouse/clickhouse_test.go | 1 - plugins/inputs/couchbase/couchbase_test.go | 1 - plugins/inputs/csgo/csgo.go | 4 ++-- plugins/inputs/dcos/client.go | 1 - plugins/inputs/dcos/client_test.go | 3 --- plugins/inputs/dcos/dcos_test.go | 1 - plugins/inputs/diskio/diskio.go | 1 - plugins/inputs/diskio/diskio_linux_test.go | 1 - plugins/inputs/disque/disque.go | 1 - plugins/inputs/docker/docker_test.go | 3 --- plugins/inputs/dovecot/dovecot.go | 3 --- plugins/inputs/dovecot/dovecot_test.go | 2 -- plugins/inputs/ecs/client.go | 1 - plugins/inputs/ecs/client_test.go | 1 - plugins/inputs/ecs/stats.go | 1 - plugins/inputs/elasticsearch/elasticsearch.go | 2 -- plugins/inputs/exec/exec.go | 1 - plugins/inputs/execd/execd_test.go | 1 - plugins/inputs/fibaro/fibaro.go | 1 - plugins/inputs/filecount/filecount.go | 1 - plugins/inputs/filecount/filecount_test.go | 4 +--- plugins/inputs/filecount/filesystem_helpers.go | 1 - .../inputs/filecount/filesystem_helpers_test.go | 4 +--- plugins/inputs/fireboard/fireboard.go | 2 -- plugins/inputs/fluentd/fluentd.go | 4 ---- plugins/inputs/fluentd/fluentd_test.go | 3 --- plugins/inputs/gnmi/gnmi.go | 1 - plugins/inputs/haproxy/haproxy.go | 1 - plugins/inputs/hddtemp/hddtemp_test.go | 2 -- .../inputs/http_listener_v2/http_listener_v2.go | 1 - plugins/inputs/infiniband/infiniband_test.go | 1 - .../influxdb_listener/influxdb_listener.go | 1 - plugins/inputs/intel_powerstat/rapl.go | 5 ++--- plugins/inputs/intel_rdt/intel_rdt.go | 6 ++---- plugins/inputs/ipmi_sensor/ipmi_test.go | 2 -- plugins/inputs/jenkins/jenkins.go | 2 -- plugins/inputs/jenkins/jenkins_test.go | 1 - plugins/inputs/jolokia/jolokia.go | 1 - plugins/inputs/jolokia/jolokia_test.go | 8 ++------ plugins/inputs/jolokia2/gatherer.go | 1 - plugins/inputs/jolokia2/jolokia_agent.go | 3 +-- plugins/inputs/jolokia2/point_builder.go | 4 ---- .../openconfig_telemetry.go | 1 - plugins/inputs/kapacitor/kapacitor.go | 1 - plugins/inputs/kernel/kernel.go | 1 - plugins/inputs/kernel_vmstat/kernel_vmstat.go | 1 - plugins/inputs/kibana/kibana.go | 3 --- plugins/inputs/kubernetes/kubernetes.go | 1 - plugins/inputs/kubernetes/kubernetes_test.go | 2 -- plugins/inputs/lanz/lanz.go | 1 - plugins/inputs/lanz/lanz_test.go | 2 -- .../inputs/linux_sysctl_fs/linux_sysctl_fs.go | 1 - plugins/inputs/logparser/logparser.go | 2 -- plugins/inputs/logstash/logstash.go | 11 +++++------ plugins/inputs/logstash/logstash_test.go | 3 --- plugins/inputs/lustre2/lustre2_test.go | 2 -- plugins/inputs/mailchimp/mailchimp_test.go | 1 - plugins/inputs/marklogic/marklogic.go | 1 - plugins/inputs/marklogic/marklogic_test.go | 1 - plugins/inputs/mesos/mesos_test.go | 1 - plugins/inputs/minecraft/client.go | 1 - plugins/inputs/modbus/modbus.go | 1 - plugins/inputs/mongodb/mongostat.go | 3 +-- plugins/inputs/mongodb/mongostat_test.go | 3 --- plugins/inputs/monit/monit.go | 7 +------ plugins/inputs/monit/monit_test.go | 6 ------ plugins/inputs/mysql/mysql.go | 3 +-- .../inputs/neptune_apex/neptune_apex_test.go | 1 - plugins/inputs/nfsclient/nfsclient.go | 4 ---- plugins/inputs/nfsclient/nfsclient_test.go | 1 - plugins/inputs/nginx_plus/nginx_plus.go | 1 - plugins/inputs/nginx_plus/nginx_plus_test.go | 1 - .../nginx_plus_api_metrics_test.go | 1 - .../nginx_upstream_check.go | 3 --- .../nginx_upstream_check_test.go | 1 - plugins/inputs/nsd/nsd_test.go | 1 - .../inputs/nsq_consumer/nsq_consumer_test.go | 1 - plugins/inputs/opcua/opcua_util.go | 2 -- plugins/inputs/openntpd/openntpd.go | 7 ------- plugins/inputs/opensmtpd/opensmtpd.go | 1 - plugins/inputs/openweathermap/openweathermap.go | 1 - plugins/inputs/pf/pf.go | 1 - plugins/inputs/phpfpm/phpfpm_test.go | 1 - plugins/inputs/ping/ping.go | 1 - plugins/inputs/ping/ping_test.go | 1 - plugins/inputs/powerdns/powerdns_test.go | 1 - plugins/inputs/procstat/native_finder.go | 1 - plugins/inputs/procstat/procstat.go | 1 - plugins/inputs/prometheus/kubernetes.go | 4 ---- plugins/inputs/prometheus/kubernetes_test.go | 1 - plugins/inputs/prometheus/parser.go | 1 - plugins/inputs/prometheus/parser_test.go | 1 - plugins/inputs/prometheus/prometheus.go | 1 - plugins/inputs/prometheus/prometheus_test.go | 1 - plugins/inputs/puppetagent/puppetagent.go | 1 - plugins/inputs/raindrops/raindrops.go | 1 - plugins/inputs/redfish/redfish_test.go | 12 ------------ .../inputs/riemann_listener/riemann_listener.go | 4 ---- .../riemann_listener/riemann_listener_test.go | 2 -- plugins/inputs/sensors/sensors.go | 1 - plugins/inputs/sensors/sensors_test.go | 1 - plugins/inputs/sflow/sflow.go | 1 - plugins/inputs/snmp/snmp.go | 1 - plugins/inputs/snmp_trap/snmp_trap.go | 1 - plugins/inputs/snmp_trap/snmp_trap_test.go | 1 - plugins/inputs/solr/solr.go | 1 - plugins/inputs/solr/solr_test.go | 1 - plugins/inputs/sqlserver/sqlserver_test.go | 6 ------ plugins/inputs/stackdriver/stackdriver_test.go | 1 - plugins/inputs/suricata/suricata_test.go | 2 -- plugins/inputs/sysstat/sysstat.go | 2 -- plugins/inputs/temp/temp_test.go | 1 - plugins/inputs/trig/trig_test.go | 1 - plugins/inputs/unbound/unbound.go | 3 --- plugins/inputs/uwsgi/uwsgi.go | 1 - plugins/inputs/vsphere/endpoint.go | 2 -- plugins/inputs/vsphere/vsphere.go | 1 - plugins/inputs/vsphere/vsphere_test.go | 1 - .../webhooks/papertrail/papertrail_webhooks.go | 3 --- plugins/inputs/webhooks/webhooks.go | 1 - plugins/inputs/x509_cert/x509_cert_test.go | 1 - .../application_insights.go | 8 ++++---- .../application_insights_test.go | 2 -- plugins/outputs/cloud_pubsub/pubsub_test.go | 2 -- plugins/outputs/cloudwatch/cloudwatch.go | 9 --------- plugins/outputs/cratedb/cratedb.go | 1 - plugins/outputs/elasticsearch/elasticsearch.go | 9 --------- .../outputs/elasticsearch/elasticsearch_test.go | 3 --- plugins/outputs/graphite/graphite.go | 3 +-- plugins/outputs/graylog/graylog.go | 1 - plugins/outputs/health/health.go | 1 - plugins/outputs/influxdb/udp.go | 1 - plugins/outputs/kafka/kafka.go | 1 - plugins/outputs/kinesis/kinesis.go | 2 -- plugins/outputs/kinesis/kinesis_test.go | 12 ------------ plugins/outputs/librato/librato.go | 2 -- plugins/outputs/librato/librato_test.go | 1 - .../outputs/prometheus_client/v1/collector.go | 2 -- .../timestream/timestream_internal_test.go | 1 - plugins/outputs/warp10/warp10.go | 3 --- plugins/outputs/warp10/warp10_test.go | 1 - plugins/outputs/wavefront/wavefront.go | 3 --- plugins/outputs/wavefront/wavefront_test.go | 6 ------ plugins/parsers/dropwizard/parser.go | 4 ---- plugins/parsers/grok/parser.go | 1 - plugins/parsers/grok/parser_test.go | 1 - plugins/parsers/json/parser.go | 2 -- plugins/parsers/json/parser_test.go | 1 - plugins/parsers/wavefront/element.go | 1 - plugins/parsers/wavefront/parser.go | 3 --- plugins/parsers/wavefront/parser_test.go | 5 ----- plugins/parsers/wavefront/scanner.go | 1 - plugins/processors/date/date_test.go | 1 - plugins/processors/filepath/filepath.go | 1 - plugins/processors/port_name/port_name.go | 1 - plugins/processors/starlark/builtins.go | 1 - plugins/processors/topk/topk.go | 1 - plugins/processors/topk/topk_test.go | 14 -------------- plugins/serializers/msgpack/msgpack.go | 1 - plugins/serializers/prometheus/collection.go | 2 -- .../prometheusremotewrite.go | 3 --- .../serializers/splunkmetric/splunkmetric.go | 4 ---- testutil/accumulator.go | 1 - testutil/testutil_test.go | 2 -- 194 files changed, 28 insertions(+), 457 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 1ac5f2b0bebb2..fb2c75b902f37 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -364,7 +364,6 @@ func (a *Agent) testStartInputs( if err != nil { log.Printf("E! [agent] Starting input %s: %v", input.LogName(), err) } - } unit.inputs = append(unit.inputs, input) diff --git a/config/config.go b/config/config.go index be64fba4bcf48..55d101cbf8754 100644 --- a/config/config.go +++ b/config/config.go @@ -908,7 +908,6 @@ func loadConfig(config string) ([]byte, error) { // If it isn't a https scheme, try it as a file. } return ioutil.ReadFile(config) - } func fetchConfig(u *url.URL) ([]byte, error) { diff --git a/internal/content_coding.go b/internal/content_coding.go index daefa20eea633..b1a30bde1bfe1 100644 --- a/internal/content_coding.go +++ b/internal/content_coding.go @@ -65,7 +65,6 @@ func (r *GzipReader) Read(b []byte) (int, error) { return n, nil } return n, err - } // NewContentEncoder returns a ContentEncoder for the encoding type. diff --git a/internal/globpath/globpath_test.go b/internal/globpath/globpath_test.go index 4897ab2f8f879..33779f912a027 100644 --- a/internal/globpath/globpath_test.go +++ b/internal/globpath/globpath_test.go @@ -19,7 +19,6 @@ var ( ) func TestCompileAndMatch(t *testing.T) { - type test struct { path string matches int diff --git a/internal/http.go b/internal/http.go index 1c3dd49577557..12adfe729df34 100644 --- a/internal/http.go +++ b/internal/http.go @@ -37,7 +37,6 @@ func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) if !ok || subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.username)) != 1 || subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.password)) != 1 { - rw.Header().Set("WWW-Authenticate", "Basic realm=\""+h.realm+"\"") h.onError(rw) http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) @@ -73,7 +72,6 @@ func (h *genericAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request // Scheme checking authorization := req.Header.Get("Authorization") if subtle.ConstantTimeCompare([]byte(authorization), []byte(h.credentials)) != 1 { - h.onError(rw) http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) return diff --git a/metric/tracking.go b/metric/tracking.go index e370d9f2a7ccc..e0bf5ff8e6596 100644 --- a/metric/tracking.go +++ b/metric/tracking.go @@ -117,7 +117,6 @@ func newTrackingMetricGroup(group []telegraf.Metric, fn NotifyFunc) ([]telegraf. d: d, } group[i] = dm - } if finalizer != nil { runtime.SetFinalizer(d, finalizer) diff --git a/models/filter_test.go b/models/filter_test.go index d241244b9d704..7e82ba0007240 100644 --- a/models/filter_test.go +++ b/models/filter_test.go @@ -402,7 +402,6 @@ func TestFilter_FilterTagsMatches(t *testing.T) { // both parameters were defined // see: https://github.com/influxdata/telegraf/issues/2860 func TestFilter_FilterNamePassAndDrop(t *testing.T) { - inputData := []string{"name1", "name2", "name3", "name4"} expectedResult := []bool{false, true, false, false} @@ -422,7 +421,6 @@ func TestFilter_FilterNamePassAndDrop(t *testing.T) { // both parameters were defined // see: https://github.com/influxdata/telegraf/issues/2860 func TestFilter_FilterFieldPassAndDrop(t *testing.T) { - inputData := []string{"field1", "field2", "field3", "field4"} expectedResult := []bool{false, true, false, false} @@ -479,7 +477,6 @@ func TestFilter_FilterTagsPassAndDrop(t *testing.T) { for i, tag := range inputData { require.Equal(t, f.shouldTagsPass(tag), expectedResult[i]) } - } func BenchmarkFilter(b *testing.B) { diff --git a/models/running_output.go b/models/running_output.go index fd048df6f0d4d..b7f3fe03f9000 100644 --- a/models/running_output.go +++ b/models/running_output.go @@ -125,7 +125,6 @@ func (r *RunningOutput) Init() error { if err != nil { return err } - } return nil } diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index b335ec4f0a71d..4ad6c77056314 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -167,7 +167,6 @@ func (b *BasicStats) Push(acc telegraf.Accumulator) { for _, aggregate := range b.cache { fields := map[string]interface{}{} for k, v := range aggregate.fields { - if b.statsConfig.count { fields[k+"_count"] = v.count } diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go index 8b2e9c7397872..8e8ee6da7915a 100644 --- a/plugins/aggregators/basicstats/basicstats_test.go +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -184,7 +184,6 @@ func TestBasicStatsDifferentPeriods(t *testing.T) { // Test only aggregating count func TestBasicStatsWithOnlyCount(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"count"} aggregator.Log = testutil.Logger{} @@ -213,7 +212,6 @@ func TestBasicStatsWithOnlyCount(t *testing.T) { // Test only aggregating minimum func TestBasicStatsWithOnlyMin(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"min"} aggregator.Log = testutil.Logger{} @@ -242,7 +240,6 @@ func TestBasicStatsWithOnlyMin(t *testing.T) { // Test only aggregating maximum func TestBasicStatsWithOnlyMax(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"max"} aggregator.Log = testutil.Logger{} @@ -271,7 +268,6 @@ func TestBasicStatsWithOnlyMax(t *testing.T) { // Test only aggregating mean func TestBasicStatsWithOnlyMean(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"mean"} aggregator.Log = testutil.Logger{} @@ -300,7 +296,6 @@ func TestBasicStatsWithOnlyMean(t *testing.T) { // Test only aggregating sum func TestBasicStatsWithOnlySum(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"sum"} aggregator.Log = testutil.Logger{} @@ -331,7 +326,6 @@ func TestBasicStatsWithOnlySum(t *testing.T) { // implementations of sum were calculated from mean and count, which // e.g. summed "1, 1, 5, 1" as "7.999999..." instead of 8. func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) { - var sum1, _ = metric.New("m1", map[string]string{}, map[string]interface{}{ @@ -383,7 +377,6 @@ func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) { // Test only aggregating variance func TestBasicStatsWithOnlyVariance(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"s2"} aggregator.Log = testutil.Logger{} @@ -410,7 +403,6 @@ func TestBasicStatsWithOnlyVariance(t *testing.T) { // Test only aggregating standard deviation func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"stdev"} aggregator.Log = testutil.Logger{} @@ -437,7 +429,6 @@ func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) { // Test only aggregating minimum and maximum func TestBasicStatsWithMinAndMax(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"min", "max"} aggregator.Log = testutil.Logger{} @@ -473,7 +464,6 @@ func TestBasicStatsWithMinAndMax(t *testing.T) { // Test only aggregating diff func TestBasicStatsWithDiff(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"diff"} aggregator.Log = testutil.Logger{} @@ -499,7 +489,6 @@ func TestBasicStatsWithDiff(t *testing.T) { } func TestBasicStatsWithRate(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"rate"} aggregator.Log = testutil.Logger{} @@ -524,7 +513,6 @@ func TestBasicStatsWithRate(t *testing.T) { } func TestBasicStatsWithNonNegativeRate(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"non_negative_rate"} aggregator.Log = testutil.Logger{} @@ -548,7 +536,6 @@ func TestBasicStatsWithNonNegativeRate(t *testing.T) { acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) } func TestBasicStatsWithInterval(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"interval"} aggregator.Log = testutil.Logger{} @@ -575,7 +562,6 @@ func TestBasicStatsWithInterval(t *testing.T) { // Test only aggregating non_negative_diff func TestBasicStatsWithNonNegativeDiff(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"non_negative_diff"} aggregator.Log = testutil.Logger{} @@ -666,7 +652,6 @@ func TestBasicStatsWithAllStats(t *testing.T) { // Test that if an empty array is passed, no points are pushed func TestBasicStatsWithNoStats(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{} aggregator.Log = testutil.Logger{} @@ -683,7 +668,6 @@ func TestBasicStatsWithNoStats(t *testing.T) { // Test that if an unknown stat is configured, it doesn't explode func TestBasicStatsWithUnknownStat(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"crazy"} aggregator.Log = testutil.Logger{} @@ -703,7 +687,6 @@ func TestBasicStatsWithUnknownStat(t *testing.T) { // otherwise user's working systems will suddenly (and surprisingly) start // capturing sum without their input. func TestBasicStatsWithDefaultStats(t *testing.T) { - aggregator := NewBasicStats() aggregator.Log = testutil.Logger{} aggregator.getConfiguredStats() diff --git a/plugins/aggregators/derivative/derivative_test.go b/plugins/aggregators/derivative/derivative_test.go index 1549500f74003..9a2f34ac7dc20 100644 --- a/plugins/aggregators/derivative/derivative_test.go +++ b/plugins/aggregators/derivative/derivative_test.go @@ -119,7 +119,6 @@ func TestTwoFullEventsWithoutParameter(t *testing.T) { "value_rate": float64(5), }, ) - } func TestTwoFullEventsInSeperatePushes(t *testing.T) { diff --git a/plugins/aggregators/histogram/histogram_test.go b/plugins/aggregators/histogram/histogram_test.go index dfb3f5d12dfa8..aa6214b3babab 100644 --- a/plugins/aggregators/histogram/histogram_test.go +++ b/plugins/aggregators/histogram/histogram_test.go @@ -210,7 +210,6 @@ func TestHistogramWithAllFieldsNonCumulative(t *testing.T) { // TestHistogramWithTwoPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates // getting added in different periods) for all fields func TestHistogramWithTwoPeriodsAndAllFields(t *testing.T) { - var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) histogram := NewTestHistogram(cfg, false, true) diff --git a/plugins/common/kafka/sasl.go b/plugins/common/kafka/sasl.go index e565aea5813ce..06ab64dab34be 100644 --- a/plugins/common/kafka/sasl.go +++ b/plugins/common/kafka/sasl.go @@ -105,5 +105,4 @@ func gssapiAuthType(authType string) int { default: return 0 } - } diff --git a/plugins/common/shim/config.go b/plugins/common/shim/config.go index 07888752707da..a0bb3ce0de696 100644 --- a/plugins/common/shim/config.go +++ b/plugins/common/shim/config.go @@ -53,14 +53,12 @@ func LoadConfig(filePath *string) (loaded loadedConfig, err error) { var data string conf := config{} if filePath != nil && *filePath != "" { - b, err := ioutil.ReadFile(*filePath) if err != nil { return loadedConfig{}, err } data = expandEnvVars(b) - } else { conf, err = DefaultImportedPlugins() if err != nil { diff --git a/plugins/inputs/activemq/activemq_test.go b/plugins/inputs/activemq/activemq_test.go index 407a381775adc..0cec2e3306a46 100644 --- a/plugins/inputs/activemq/activemq_test.go +++ b/plugins/inputs/activemq/activemq_test.go @@ -11,7 +11,6 @@ import ( ) func TestGatherQueuesMetrics(t *testing.T) { - s := ` @@ -57,7 +56,6 @@ func TestGatherQueuesMetrics(t *testing.T) { } func TestGatherTopicsMetrics(t *testing.T) { - s := ` @@ -104,7 +102,6 @@ func TestGatherTopicsMetrics(t *testing.T) { } func TestGatherSubscribersMetrics(t *testing.T) { - s := ` diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index e470b58a40f25..b5c13ddc45a7b 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -248,7 +248,6 @@ func (a *Aerospike) parseNodeInfo(stats map[string]string, hostPort string, node for k, v := range stats { key := strings.Replace(k, "-", "_", -1) fields[key] = parseAerospikeValue(key, v) - } acc.AddFields("aerospike_node", fields, tags, time.Now()) @@ -279,7 +278,6 @@ func (a *Aerospike) getNamespaceInfo(namespace string, n *as.Node) (map[string]s return stats, err } func (a *Aerospike) parseNamespaceInfo(stats map[string]string, hostPort string, namespace string, nodeName string, acc telegraf.Accumulator) { - nTags := map[string]string{ "aerospike_host": hostPort, "node_name": nodeName, @@ -348,7 +346,6 @@ func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node) (map[string]stri } func (a *Aerospike) parseSetInfo(stats map[string]string, hostPort string, namespaceSet string, nodeName string, acc telegraf.Accumulator) { - stat := strings.Split( strings.TrimSuffix( stats[fmt.Sprintf("sets/%s", namespaceSet)], ";"), ":") @@ -383,7 +380,6 @@ func (a *Aerospike) getTTLHistogram(hostPort string, namespace string, set strin } func (a *Aerospike) getObjectSizeLinearHistogram(hostPort string, namespace string, set string, n *as.Node, acc telegraf.Accumulator) error { - stats, err := a.getHistogram(namespace, set, "object-size-linear", n) if err != nil { return err @@ -406,11 +402,9 @@ func (a *Aerospike) getHistogram(namespace string, set string, histogramType str return nil, err } return stats, nil - } func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, namespace string, set string, histogramType string, nodeName string, acc telegraf.Accumulator) { - nTags := map[string]string{ "aerospike_host": hostPort, "node_name": nodeName, @@ -463,7 +457,6 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam nFields[strconv.Itoa(bucketName)] = bucketSum } } - } } } diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go index efc10b5d99bae..57d37a06c5d4d 100644 --- a/plugins/inputs/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -31,7 +31,6 @@ func TestAerospikeStatisticsIntegration(t *testing.T) { namespaceName := acc.TagValue("aerospike_namespace", "namespace") assert.Equal(t, namespaceName, "test") - } func TestAerospikeStatisticsPartialErrIntegration(t *testing.T) { @@ -165,7 +164,6 @@ func TestQuerySetsIntegration(t *testing.T) { assert.True(t, acc.HasMeasurement("aerospike_set")) assert.True(t, acc.HasTag("aerospike_set", "set")) assert.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) - } func TestSelectQuerySetsIntegration(t *testing.T) { @@ -215,7 +213,6 @@ func TestSelectQuerySetsIntegration(t *testing.T) { assert.True(t, acc.HasMeasurement("aerospike_set")) assert.True(t, acc.HasTag("aerospike_set", "set")) assert.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) - } func TestDisableTTLHistogramIntegration(t *testing.T) { @@ -264,7 +261,6 @@ func TestTTLHistogramIntegration(t *testing.T) { assert.True(t, acc.HasMeasurement("aerospike_histogram_ttl")) assert.True(t, FindTagValue(&acc, "aerospike_histogram_ttl", "namespace", "test")) - } func TestDisableObjectSizeLinearHistogramIntegration(t *testing.T) { if testing.Short() { @@ -287,7 +283,6 @@ func TestDisableObjectSizeLinearHistogramIntegration(t *testing.T) { assert.False(t, acc.HasMeasurement("aerospike_histogram_object_size_linear")) } func TestObjectSizeLinearHistogramIntegration(t *testing.T) { - if testing.Short() { t.Skip("Skipping aerospike integration tests.") } else { @@ -419,7 +414,6 @@ func TestParseHistogramSet(t *testing.T) { a.parseHistogram(stats, "127.0.0.1:3000", "test", "foo", "object-size-linear", "TestNodeName", &acc) acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags) - } func TestParseHistogramNamespace(t *testing.T) { a := &Aerospike{ @@ -450,7 +444,6 @@ func TestParseHistogramNamespace(t *testing.T) { a.parseHistogram(stats, "127.0.0.1:3000", "test", "", "object-size-linear", "TestNodeName", &acc) acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags) - } func TestAerospikeParseValue(t *testing.T) { // uint64 with value bigger than int64 max @@ -484,7 +477,6 @@ func FindTagValue(acc *testutil.Accumulator, measurement string, key string, val if ok && v == value { return true } - } } return false diff --git a/plugins/inputs/aliyuncms/aliyuncms.go b/plugins/inputs/aliyuncms/aliyuncms.go index 794f398f7dcb5..e5ce3824101dc 100644 --- a/plugins/inputs/aliyuncms/aliyuncms.go +++ b/plugins/inputs/aliyuncms/aliyuncms.go @@ -187,7 +187,6 @@ func (s *AliyunCMS) Description() string { } func (s *AliyunCMS) Init() error { - if s.Project == "" { return errors.New("project is not set") } @@ -275,7 +274,6 @@ func (s *AliyunCMS) Start(telegraf.Accumulator) error { // Gather implements telegraf.Inputs interface func (s *AliyunCMS) Gather(acc telegraf.Accumulator) error { - s.updateWindow(time.Now()) // limit concurrency or we can easily exhaust user connection limit @@ -288,7 +286,6 @@ func (s *AliyunCMS) Gather(acc telegraf.Accumulator) error { s.prepareTagsAndDimensions(metric) wg.Add(len(metric.MetricNames)) for _, metricName := range metric.MetricNames { - <-lmtr.C go func(metricName string, metric *Metric) { defer wg.Done() @@ -308,7 +305,6 @@ func (s *AliyunCMS) Stop() { } func (s *AliyunCMS) updateWindow(relativeTo time.Time) { - //https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.6.701.54025679zh6wiR //The start and end times are executed in the mode of //opening left and closing right, and startTime cannot be equal @@ -329,7 +325,6 @@ func (s *AliyunCMS) updateWindow(relativeTo time.Time) { // Gather given metric and emit error func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, metric *Metric) error { - req := cms.CreateDescribeMetricListRequest() req.Period = strconv.FormatInt(int64(s.Period.Duration.Seconds()), 10) req.MetricName = metricName @@ -368,7 +363,6 @@ func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, me case "instanceId", "BucketName": tags[key] = value.(string) if metric.discoveryTags != nil { //discovery can be not activated - //Skipping data point if discovery data not exist if _, ok := metric.discoveryTags[value.(string)]; !ok && !metric.AllowDataPointWODiscoveryData { @@ -401,7 +395,6 @@ func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, me //Tag helper func parseTag(tagSpec string, data interface{}) (string, string, error) { - tagKey := tagSpec queryPath := tagSpec @@ -452,9 +445,8 @@ L: } } - if newData || //new data arrives, process it - len(metric.discoveryTags) == 0 { //or this is the first call - + //new data arrives (so process it) or this is the first call + if newData || len(metric.discoveryTags) == 0 { metric.dtLock.Lock() defer metric.dtLock.Unlock() @@ -467,14 +459,12 @@ L: //Preparing tags & dims... for instanceId, elem := range s.discoveryData { - //Start filing tags //Remove old value if exist delete(metric.discoveryTags, instanceId) metric.discoveryTags[instanceId] = make(map[string]string, len(metric.TagsQueryPath)+len(defaulTags)) for _, tagQueryPath := range metric.TagsQueryPath { - tagKey, tagValue, err := parseTag(tagQueryPath, elem) if err != nil { s.Log.Errorf("%v", err) @@ -510,7 +500,6 @@ L: metric.requestDimensions = append( metric.requestDimensions, map[string]string{s.dimensionKey: instanceId}) - } //Get final dimension (need to get full lis of @@ -531,7 +520,6 @@ L: } else { metric.requestDimensionsStr = string(reqDim) } - } } diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go index 37430bbddf79d..f0ac60e932761 100644 --- a/plugins/inputs/aliyuncms/aliyuncms_test.go +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -25,7 +25,6 @@ const inputTitle = "inputs.aliyuncms" type mockGatherAliyunCMSClient struct{} func (m *mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) { - resp := new(cms.DescribeMetricListResponse) //switch request.Metric { @@ -221,7 +220,6 @@ func TestUpdateWindow(t *testing.T) { } func TestGatherMetric(t *testing.T) { - plugin := &AliyunCMS{ Project: "acs_slb_dashboard", client: new(mockGatherAliyunCMSClient), @@ -255,13 +253,11 @@ func TestGatherMetric(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var acc telegraf.Accumulator require.EqualError(t, plugin.gatherMetric(acc, tt.metricName, metric), tt.expectedErrorString) - }) } } func TestGather(t *testing.T) { - metric := &Metric{ MetricNames: []string{}, Dimensions: `{"instanceId": "i-abcdefgh123456"}`, @@ -332,7 +328,6 @@ func TestGather(t *testing.T) { } func TestGetDiscoveryDataAllRegions(t *testing.T) { - //test table: tests := []struct { name string @@ -403,8 +398,6 @@ func TestGetDiscoveryDataAllRegions(t *testing.T) { if err != nil { require.EqualError(t, err, tt.expectedErrorString) } - }) } - } diff --git a/plugins/inputs/aliyuncms/discovery.go b/plugins/inputs/aliyuncms/discovery.go index 39e0044b6e77e..904f9d1948d3e 100644 --- a/plugins/inputs/aliyuncms/discovery.go +++ b/plugins/inputs/aliyuncms/discovery.go @@ -72,7 +72,6 @@ type discoveryTool struct { //getRpcReqFromDiscoveryRequest - utility function to map between aliyun request primitives //discoveryRequest represents different type of discovery requests func getRpcReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, error) { - if reflect.ValueOf(req).Type().Kind() != reflect.Ptr || reflect.ValueOf(req).IsNil() { return nil, errors.Errorf("Not expected type of the discovery request object: %q, %q", reflect.ValueOf(req).Type(), reflect.ValueOf(req).Kind()) @@ -81,7 +80,6 @@ func getRpcReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, ptrV := reflect.Indirect(reflect.ValueOf(req)) for i := 0; i < ptrV.NumField(); i++ { - if ptrV.Field(i).Type().String() == "*requests.RpcRequest" { if !ptrV.Field(i).CanInterface() { return nil, errors.Errorf("Can't get interface of %v", ptrV.Field(i)) @@ -323,7 +321,6 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) //It should contain the array with discovered data for _, item := range rootKeyVal { - if discData, foundDataItem = item.([]interface{}); foundDataItem { break } @@ -338,7 +335,6 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) case "PageNumber": pageNumber = int(val.(float64)) } - } if !foundRootKey { return nil, 0, 0, 0, errors.Errorf("Didn't find root key %q in discovery response", dt.respRootKey) @@ -390,14 +386,11 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com } else { return nil, errors.Errorf("Can't parse input data element, not a map[string]interface{} type") } - } return preparedData, nil } - } - } func (dt *discoveryTool) getDiscoveryDataAllRegions(limiter chan bool) (map[string]interface{}, error) { @@ -468,7 +461,6 @@ func (dt *discoveryTool) Start() { case <-dt.done: return case <-ticker.C: - data, err = dt.getDiscoveryDataAllRegions(lmtr.C) if err != nil { dt.lg.Errorf("Can't get discovery data: %v", err) @@ -485,14 +477,12 @@ func (dt *discoveryTool) Start() { //send discovery data in blocking mode dt.dataChan <- data } - } } }() } func (dt *discoveryTool) Stop() { - close(dt.done) //Shutdown timer diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index f6d5831702bd1..12edc62cc845c 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -162,7 +162,6 @@ func (n *Apache) gatherScores(data string) map[string]interface{} { var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0 for _, s := range strings.Split(data, "") { - switch s { case "_": waiting++ diff --git a/plugins/inputs/apcupsd/apcupsd_test.go b/plugins/inputs/apcupsd/apcupsd_test.go index a7dbd2c7de1b7..7ab64ba114cc6 100644 --- a/plugins/inputs/apcupsd/apcupsd_test.go +++ b/plugins/inputs/apcupsd/apcupsd_test.go @@ -102,7 +102,6 @@ func TestConfig(t *testing.T) { } }) } - } func TestApcupsdGather(t *testing.T) { @@ -155,7 +154,6 @@ func TestApcupsdGather(t *testing.T) { ) for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) diff --git a/plugins/inputs/beat/beat_test.go b/plugins/inputs/beat/beat_test.go index 30dd48569f3a6..777fe6b98175c 100644 --- a/plugins/inputs/beat/beat_test.go +++ b/plugins/inputs/beat/beat_test.go @@ -224,5 +224,4 @@ func Test_BeatRequest(test *testing.T) { if err != nil { test.Logf("Can't gather stats") } - } diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index ea7001fe334a0..7f9fe98b2a49e 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -129,9 +129,7 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) { } } -func addCassandraMetric(mbean string, c cassandraMetric, - values map[string]interface{}) { - +func addCassandraMetric(mbean string, c cassandraMetric, values map[string]interface{}) { tags := make(map[string]string) fields := make(map[string]interface{}) tokens := parseJmxMetricRequest(mbean) @@ -139,11 +137,9 @@ func addCassandraMetric(mbean string, c cassandraMetric, tags["cassandra_host"] = c.host addValuesAsFields(values, fields, tags["mname"]) c.acc.AddFields(tokens["class"]+tokens["type"], fields, tags) - } func (c cassandraMetric) addTagsFields(out map[string]interface{}) { - r := out["request"] tokens := parseJmxMetricRequest(r.(map[string]interface{})["mbean"].(string)) diff --git a/plugins/inputs/cassandra/cassandra_test.go b/plugins/inputs/cassandra/cassandra_test.go index 43a9a0c1eb105..9b0798207ef16 100644 --- a/plugins/inputs/cassandra/cassandra_test.go +++ b/plugins/inputs/cassandra/cassandra_test.go @@ -198,9 +198,7 @@ func TestHttpJsonJavaMultiType(t *testing.T) { // Test that the proper values are ignored or collected func TestHttp404(t *testing.T) { - - jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, - []string{HeapMetric}) + jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, []string{HeapMetric}) var acc testutil.Accumulator err := acc.GatherError(jolokia.Gather) diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index 5a820a2382ba6..b4e83844fcfcb 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -211,17 +211,14 @@ var findSockets = func(c *Ceph) ([]*socket, error) { if strings.HasPrefix(f, c.OsdPrefix) { sockType = typeOsd sockPrefix = osdPrefix - } if strings.HasPrefix(f, c.MdsPrefix) { sockType = typeMds sockPrefix = mdsPrefix - } if strings.HasPrefix(f, c.RgwPrefix) { sockType = typeRgw sockPrefix = rgwPrefix - } if sockType == typeOsd || sockType == typeMon || sockType == typeMds || sockType == typeRgw { diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index f6cf8e8a946fc..e46a18049c354 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -105,7 +105,6 @@ func TestGather(t *testing.T) { acc := &testutil.Accumulator{} c := &Ceph{} c.Gather(acc) - } func TestFindSockets(t *testing.T) { diff --git a/plugins/inputs/chrony/chrony_test.go b/plugins/inputs/chrony/chrony_test.go index a5fd9dd028e57..4b8ad85ccf932 100644 --- a/plugins/inputs/chrony/chrony_test.go +++ b/plugins/inputs/chrony/chrony_test.go @@ -49,7 +49,6 @@ func TestGather(t *testing.T) { t.Fatal(err) } acc.AssertContainsTaggedFields(t, "chrony", fields, tags) - } // fackeExecCommand is a helper function that mock @@ -102,7 +101,6 @@ Leap status : Not synchronized } else { fmt.Fprint(os.Stdout, "command not found") os.Exit(1) - } os.Exit(0) } diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index ea200bc744a7d..f967044da881b 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -588,5 +588,4 @@ func TestGRPCDialoutMultiple(t *testing.T) { tags = map[string]string{"path": "type:model/other/path", "name": "str", "source": "hostname", "subscription": "subscription"} fields = map[string]interface{}{"value": int64(-1)} acc.AssertContainsTaggedFields(t, "other", fields, tags) - } diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go index 187ead5cf6790..866ddeb8c164f 100644 --- a/plugins/inputs/clickhouse/clickhouse.go +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -195,7 +195,6 @@ func (ch *ClickHouse) Gather(acc telegraf.Accumulator) (err error) { } for _, conn := range connects { - metricsFuncs := []func(acc telegraf.Accumulator, conn *connect) error{ ch.tables, ch.zookeeper, @@ -212,7 +211,6 @@ func (ch *ClickHouse) Gather(acc telegraf.Accumulator) (err error) { if err := metricFunc(acc, &conn); err != nil { acc.AddError(err) } - } for metric := range commonMetrics { @@ -342,7 +340,6 @@ func (ch *ClickHouse) replicationQueue(acc telegraf.Accumulator, conn *connect) } func (ch *ClickHouse) detachedParts(acc telegraf.Accumulator, conn *connect) error { - var detachedParts []struct { DetachedParts chUInt64 `json:"detached_parts"` } @@ -363,7 +360,6 @@ func (ch *ClickHouse) detachedParts(acc telegraf.Accumulator, conn *connect) err } func (ch *ClickHouse) dictionaries(acc telegraf.Accumulator, conn *connect) error { - var brokenDictionaries []struct { Origin string `json:"origin"` BytesAllocated chUInt64 `json:"bytes_allocated"` @@ -397,7 +393,6 @@ func (ch *ClickHouse) dictionaries(acc telegraf.Accumulator, conn *connect) erro } func (ch *ClickHouse) mutations(acc telegraf.Accumulator, conn *connect) error { - var mutationsStatus []struct { Failed chUInt64 `json:"failed"` Running chUInt64 `json:"running"` @@ -424,7 +419,6 @@ func (ch *ClickHouse) mutations(acc telegraf.Accumulator, conn *connect) error { } func (ch *ClickHouse) disks(acc telegraf.Accumulator, conn *connect) error { - var disksStatus []struct { Name string `json:"name"` Path string `json:"path"` @@ -448,14 +442,12 @@ func (ch *ClickHouse) disks(acc telegraf.Accumulator, conn *connect) error { }, tags, ) - } return nil } func (ch *ClickHouse) processes(acc telegraf.Accumulator, conn *connect) error { - var processesStats []struct { QueryType string `json:"query_type"` Percentile50 float64 `json:"p50"` @@ -479,7 +471,6 @@ func (ch *ClickHouse) processes(acc telegraf.Accumulator, conn *connect) error { }, tags, ) - } return nil diff --git a/plugins/inputs/clickhouse/clickhouse_test.go b/plugins/inputs/clickhouse/clickhouse_test.go index 68a4438442d12..c69c455b94c5e 100644 --- a/plugins/inputs/clickhouse/clickhouse_test.go +++ b/plugins/inputs/clickhouse/clickhouse_test.go @@ -583,5 +583,4 @@ func TestAutoDiscovery(t *testing.T) { ) defer ts.Close() ch.Gather(acc) - } diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go index df7f1b4c14cf7..66a1d08abad1f 100644 --- a/plugins/inputs/couchbase/couchbase_test.go +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -41,7 +41,6 @@ func TestGatherServer(t *testing.T) { } func TestSanitizeURI(t *testing.T) { - var sanitizeTest = []struct { input string expected string diff --git a/plugins/inputs/csgo/csgo.go b/plugins/inputs/csgo/csgo.go index fe82962669ced..0fa18cab21d43 100644 --- a/plugins/inputs/csgo/csgo.go +++ b/plugins/inputs/csgo/csgo.go @@ -74,8 +74,8 @@ func init() { func (s *CSGO) gatherServer( server []string, request func(string, string) (string, error), - acc telegraf.Accumulator) error { - + acc telegraf.Accumulator, +) error { if len(server) != 2 { return errors.New("incorrect server config") } diff --git a/plugins/inputs/dcos/client.go b/plugins/inputs/dcos/client.go index 8f171638a5844..32eab80867cd4 100644 --- a/plugins/inputs/dcos/client.go +++ b/plugins/inputs/dcos/client.go @@ -229,7 +229,6 @@ func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Conta containers := make([]Container, 0, len(list)) for _, c := range list { containers = append(containers, Container{ID: c}) - } return containers, nil diff --git a/plugins/inputs/dcos/client_test.go b/plugins/inputs/dcos/client_test.go index 0b7772dccb994..ece4b178f4556 100644 --- a/plugins/inputs/dcos/client_test.go +++ b/plugins/inputs/dcos/client_test.go @@ -142,7 +142,6 @@ func TestGetSummary(t *testing.T) { require.Equal(t, tt.expectedValue, summary) }) } - } func TestGetNodeMetrics(t *testing.T) { @@ -184,7 +183,6 @@ func TestGetNodeMetrics(t *testing.T) { require.Equal(t, tt.expectedValue, m) }) } - } func TestGetContainerMetrics(t *testing.T) { @@ -226,5 +224,4 @@ func TestGetContainerMetrics(t *testing.T) { require.Equal(t, tt.expectedValue, m) }) } - } diff --git a/plugins/inputs/dcos/dcos_test.go b/plugins/inputs/dcos/dcos_test.go index 3914fa5777714..828fd0af647ab 100644 --- a/plugins/inputs/dcos/dcos_test.go +++ b/plugins/inputs/dcos/dcos_test.go @@ -203,7 +203,6 @@ func TestAddNodeMetrics(t *testing.T) { } }) } - } func TestAddContainerMetrics(t *testing.T) { diff --git a/plugins/inputs/diskio/diskio.go b/plugins/inputs/diskio/diskio.go index 5250b704a5370..c586233a8e447 100644 --- a/plugins/inputs/diskio/diskio.go +++ b/plugins/inputs/diskio/diskio.go @@ -104,7 +104,6 @@ func (d *DiskIO) Gather(acc telegraf.Accumulator) error { } for _, io := range diskio { - match := false if d.deviceFilter != nil && d.deviceFilter.Match(io.Name) { match = true diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index 4d7dc5c821ee5..9362b195cd3da 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -71,7 +71,6 @@ func TestDiskInfo(t *testing.T) { assert.Equal(t, "myval1", di["MY_PARAM_1"]) assert.Equal(t, "myval2", di["MY_PARAM_2"]) assert.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) - // unfortunately we can't adjust mtime on /dev/null to test cache invalidation } diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index 021e865bfe4df..6cdff83ee3f16 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -102,7 +102,6 @@ const defaultPort = "7711" func (d *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { if d.c == nil { - _, _, err := net.SplitHostPort(addr.Host) if err != nil { addr.Host = addr.Host + ":" + defaultPort diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 3272abec066f9..b0c9f9791ec8b 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -1122,7 +1122,6 @@ func TestHostnameFromID(t *testing.T) { } }) } - } func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) { @@ -1269,7 +1268,6 @@ func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) { []string{"docker_container_cpu", "docker_container_net", "docker_container_blkio"}) }) testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.OnlyTags(), testutil.SortMetrics()) - }) } } @@ -1360,7 +1358,6 @@ func TestDocker_Init(t *testing.T) { t.Errorf("Total include: got '%v', want '%v'", d.TotalInclude, tt.wantTotalInclude) } } - }) } } diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index 6c85acadcb39d..b9875079d9feb 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -110,7 +110,6 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype stri } func gatherStats(buf *bytes.Buffer, acc telegraf.Accumulator, host string, qtype string) error { - lines := strings.Split(buf.String(), "\n") head := strings.Split(lines[0], "\t") vals := lines[1:] @@ -169,13 +168,11 @@ func splitSec(tm string) (sec int64, msec int64) { } func timeParser(tm string) time.Time { - sec, msec := splitSec(tm) return time.Unix(sec, msec) } func secParser(tm string) float64 { - sec, msec := splitSec(tm) return float64(sec) + (float64(msec) / 1000000.0) } diff --git a/plugins/inputs/dovecot/dovecot_test.go b/plugins/inputs/dovecot/dovecot_test.go index 86efdbb4f8e1d..97c1d2f88d964 100644 --- a/plugins/inputs/dovecot/dovecot_test.go +++ b/plugins/inputs/dovecot/dovecot_test.go @@ -10,7 +10,6 @@ import ( ) func TestDovecotIntegration(t *testing.T) { - if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -103,7 +102,6 @@ func TestDovecotIntegration(t *testing.T) { require.NoError(t, err) acc.AssertContainsTaggedFields(t, "dovecot", fields, tags) - } const sampleGlobal = `reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits diff --git a/plugins/inputs/ecs/client.go b/plugins/inputs/ecs/client.go index d7ce10cb2a2e0..ac7ed2e1b09ef 100644 --- a/plugins/inputs/ecs/client.go +++ b/plugins/inputs/ecs/client.go @@ -152,7 +152,6 @@ func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) { // PollSync executes Task and ContainerStats in parallel. If both succeed, both structs are returned. // If either errors, a single error is returned. func PollSync(c Client) (*Task, map[string]types.StatsJSON, error) { - var task *Task var stats map[string]types.StatsJSON var err error diff --git a/plugins/inputs/ecs/client_test.go b/plugins/inputs/ecs/client_test.go index 333aec80c2709..2f37ca0cfa456 100644 --- a/plugins/inputs/ecs/client_test.go +++ b/plugins/inputs/ecs/client_test.go @@ -27,7 +27,6 @@ func (p *pollMock) ContainerStats() (map[string]types.StatsJSON, error) { } func TestEcsClient_PollSync(t *testing.T) { - tests := []struct { name string mock *pollMock diff --git a/plugins/inputs/ecs/stats.go b/plugins/inputs/ecs/stats.go index d2a8ee5d34cfd..13d9aa3bc5326 100644 --- a/plugins/inputs/ecs/stats.go +++ b/plugins/inputs/ecs/stats.go @@ -284,7 +284,6 @@ func blkstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags m } else { totalStatMap[field] = uintV } - } } diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 33755c5ce5a28..f8064e606e57f 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -277,7 +277,6 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { e.serverInfoMutex.Lock() e.serverInfo[s] = info e.serverInfoMutex.Unlock() - }(serv, acc) } wgC.Wait() @@ -640,7 +639,6 @@ func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now if e.IndicesLevel == "shards" { for shardNumber, shards := range index.Shards { for _, shard := range shards { - // Get Shard Stats flattened := jsonparser.JSONFlattener{} err := flattened.FullFlattenJSON("", shard, true, true) diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index adb16c953ed8c..26e2ab0ba0301 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -137,7 +137,6 @@ func removeCarriageReturns(b bytes.Buffer) bytes.Buffer { b = buf } return b - } func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) { diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index 6cb254eb5b8f4..ab67af0ab84cd 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -186,5 +186,4 @@ func runCounterProgram() { } fmt.Fprint(os.Stdout, string(b)) } - } diff --git a/plugins/inputs/fibaro/fibaro.go b/plugins/inputs/fibaro/fibaro.go index 62889cc8dd6f7..20b993576b664 100644 --- a/plugins/inputs/fibaro/fibaro.go +++ b/plugins/inputs/fibaro/fibaro.go @@ -121,7 +121,6 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error { // Gather fetches all required information to output metrics func (f *Fibaro) Gather(acc telegraf.Accumulator) error { - if f.client == nil { f.client = &http.Client{ Transport: &http.Transport{ diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index c2b572c12d52f..87c98075487e1 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -292,7 +292,6 @@ func (fc *FileCount) initGlobPaths(acc telegraf.Accumulator) { fc.globPaths = append(fc.globPaths, *glob) } } - } func NewFileCount() *FileCount { diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 39bbafb36de99..2136c348d1d6c 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -229,9 +229,7 @@ func getFakeFileSystem(basePath string) fakeFileSystem { basePath + "/subdir/nested2/qux": {name: "qux", filemode: uint32(fmask), modtime: mtime, size: int64(400)}, } - fs := fakeFileSystem{files: fileList} - return fs - + return fakeFileSystem{files: fileList} } func fileCountEquals(t *testing.T, fc FileCount, expectedCount int, expectedSize int) { diff --git a/plugins/inputs/filecount/filesystem_helpers.go b/plugins/inputs/filecount/filesystem_helpers.go index 2bd6c095142cf..f43bb4ad5f394 100644 --- a/plugins/inputs/filecount/filesystem_helpers.go +++ b/plugins/inputs/filecount/filesystem_helpers.go @@ -69,5 +69,4 @@ func (f fakeFileSystem) Stat(name string) (os.FileInfo, error) { return fakeInfo, nil } return nil, &os.PathError{Op: "Stat", Path: name, Err: errors.New("No such file or directory")} - } diff --git a/plugins/inputs/filecount/filesystem_helpers_test.go b/plugins/inputs/filecount/filesystem_helpers_test.go index 62e7e2f814531..2203500726ba8 100644 --- a/plugins/inputs/filecount/filesystem_helpers_test.go +++ b/plugins/inputs/filecount/filesystem_helpers_test.go @@ -89,7 +89,5 @@ func getTestFileSystem() fakeFileSystem { "/testdata/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime}, } - fs := fakeFileSystem{files: fileList} - return fs - + return fakeFileSystem{files: fileList} } diff --git a/plugins/inputs/fireboard/fireboard.go b/plugins/inputs/fireboard/fireboard.go index a92930aae9598..c9a79396ee313 100644 --- a/plugins/inputs/fireboard/fireboard.go +++ b/plugins/inputs/fireboard/fireboard.go @@ -69,7 +69,6 @@ func (r *Fireboard) Description() string { // Init the things func (r *Fireboard) Init() error { - if len(r.AuthToken) == 0 { return fmt.Errorf("You must specify an authToken") } @@ -88,7 +87,6 @@ func (r *Fireboard) Init() error { // Gather Reads stats from all configured servers. func (r *Fireboard) Gather(acc telegraf.Accumulator) error { - // Perform the GET request to the fireboard servers req, err := http.NewRequest("GET", r.URL, nil) if err != nil { diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go index 7d4a0cd5eecb4..1d23259fa736b 100644 --- a/plugins/inputs/fluentd/fluentd.go +++ b/plugins/inputs/fluentd/fluentd.go @@ -81,14 +81,12 @@ func (h *Fluentd) SampleConfig() string { return sampleConfig } // Gather - Main code responsible for gathering, processing and creating metrics func (h *Fluentd) Gather(acc telegraf.Accumulator) error { - _, err := url.Parse(h.Endpoint) if err != nil { return fmt.Errorf("Invalid URL \"%s\"", h.Endpoint) } if h.client == nil { - tr := &http.Transport{ ResponseHeaderTimeout: time.Duration(3 * time.Second), } @@ -127,7 +125,6 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { // Go through all plugins one by one for _, p := range dataPoints { - skip := false // Check if this specific type was excluded in configuration @@ -149,7 +146,6 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { if p.BufferQueueLength != nil { tmpFields["buffer_queue_length"] = *p.BufferQueueLength - } if p.RetryCount != nil { tmpFields["retry_count"] = *p.RetryCount diff --git a/plugins/inputs/fluentd/fluentd_test.go b/plugins/inputs/fluentd/fluentd_test.go index c7699c3384906..795b6dbcf546d 100644 --- a/plugins/inputs/fluentd/fluentd_test.go +++ b/plugins/inputs/fluentd/fluentd_test.go @@ -111,14 +111,12 @@ var ( ) func Test_parse(t *testing.T) { - t.Log("Testing parser function") _, err := parse([]byte(sampleJSON)) if err != nil { t.Error(err) } - } func Test_Gather(t *testing.T) { @@ -159,5 +157,4 @@ func Test_Gather(t *testing.T) { assert.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"]) assert.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"]) assert.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"]) - } diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index 5e99092f82927..5d4c80edd5538 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -403,7 +403,6 @@ func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string } else { tags[key] = val } - } } } diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 73cf9d3345dc1..a3fe09072abc8 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -79,7 +79,6 @@ func (h *haproxy) Gather(acc telegraf.Accumulator) error { endpoints := make([]string, 0, len(h.Servers)) for _, endpoint := range h.Servers { - if strings.HasPrefix(endpoint, "http") { endpoints = append(endpoints, endpoint) continue diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index f299c2ac66c4b..6d7301bbcf320 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -27,7 +27,6 @@ func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) { Unit: "C", }, }, nil - } func newMockFetcher() *mockFetcher { return &mockFetcher{} @@ -79,5 +78,4 @@ func TestFetch(t *testing.T) { for _, test := range tests { acc.AssertContainsTaggedFields(t, "hddtemp", test.fields, test.tags) } - } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 41ce35df504e4..b88e1eb8c0280 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -313,7 +313,6 @@ func (h *HTTPListenerV2) authenticateIfSet(handler http.HandlerFunc, res http.Re if !ok || subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.BasicUsername)) != 1 || subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.BasicPassword)) != 1 { - http.Error(res, "Unauthorized.", http.StatusUnauthorized) return } diff --git a/plugins/inputs/infiniband/infiniband_test.go b/plugins/inputs/infiniband/infiniband_test.go index 57e8ad4da85c9..7f747eb5fd89f 100644 --- a/plugins/inputs/infiniband/infiniband_test.go +++ b/plugins/inputs/infiniband/infiniband_test.go @@ -130,5 +130,4 @@ func TestInfiniband(t *testing.T) { addStats("m1x5_0", "1", sampleRdmastatsEntries, &acc) acc.AssertContainsTaggedFields(t, "infiniband", fields, tags) - } diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go index 8d87b38f83d65..de814a19806be 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -327,7 +327,6 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { } h.acc.AddMetric(m) - } if err != influx.EOF { h.Log.Debugf("Error parsing the request body: %v", err.Error()) diff --git a/plugins/inputs/intel_powerstat/rapl.go b/plugins/inputs/intel_powerstat/rapl.go index 17d66ff3aea4b..1e4b465fd7974 100644 --- a/plugins/inputs/intel_powerstat/rapl.go +++ b/plugins/inputs/intel_powerstat/rapl.go @@ -92,7 +92,6 @@ func (r *raplServiceImpl) getConstraintMaxPowerWatts(socketID string) (float64, socketMaxPower, _, err := r.fs.readFileToFloat64(socketMaxPowerFile) return convertMicroWattToWatt(socketMaxPower), err - } func (r *raplServiceImpl) prepareData() { @@ -176,8 +175,8 @@ func (r *raplServiceImpl) findDramFolder(raplFolders []string, socketID string) } func (r *raplServiceImpl) calculateData(socketID string, socketEnergyUjFile io.Reader, dramEnergyUjFile io.Reader, - socketMaxEnergyUjFile io.Reader, dramMaxEnergyUjFile io.Reader) error { - + socketMaxEnergyUjFile io.Reader, dramMaxEnergyUjFile io.Reader, +) error { newSocketEnergy, _, err := r.readEnergyInJoules(socketEnergyUjFile) if err != nil { return err diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index 3b56d76e7f972..ba47234a751fd 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -298,11 +298,9 @@ func (r *IntelRDT) processOutput(cmdReader io.ReadCloser, processesPIDsAssociati */ toOmit := pqosInitOutputLinesNumber - // omit first measurements which are zeroes - if len(r.parsedCores) != 0 { + if len(r.parsedCores) != 0 { // omit first measurements which are zeroes toOmit = toOmit + len(r.parsedCores) - // specify how many lines should pass before stopping - } else if len(processesPIDsAssociation) != 0 { + } else if len(processesPIDsAssociation) != 0 { // specify how many lines should pass before stopping toOmit = toOmit + len(processesPIDsAssociation) } for omitCounter := 0; omitCounter < toOmit; omitCounter++ { diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index 55a1da6b124e2..422d2ab38471e 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -378,7 +378,6 @@ OS RealTime Mod | 0x00 | ok } else { fmt.Fprint(os.Stdout, "command not found") os.Exit(1) - } os.Exit(0) } @@ -573,7 +572,6 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected } else { fmt.Fprint(os.Stdout, "command not found") os.Exit(1) - } os.Exit(0) } diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index 78820da55f6ad..a4b34baaa4f1c 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -197,7 +197,6 @@ func (j *Jenkins) initialize(client *http.Client) error { } func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error { - tags := map[string]string{} if n.DisplayName == "" { return fmt.Errorf("error empty node name") @@ -249,7 +248,6 @@ func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error { } func (j *Jenkins) gatherNodesData(acc telegraf.Accumulator) { - nodeResp, err := j.client.getAllNodes(context.Background()) if err != nil { acc.AddError(err) diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index ffac5d8305647..f877c700da77c 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -846,7 +846,6 @@ func TestGatherJobs(t *testing.T) { } } } - } }) } diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 5c72ba7133153..ed5922ddaa063 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -217,7 +217,6 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request } jolokiaURL = proxyURL - } else { serverURL, err := url.Parse("http://" + server.Host + ":" + server.Port + context) if err != nil { diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index 88f2ab6a19068..3f05274eb11a6 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -239,9 +239,7 @@ func TestHttpJsonThreeLevelMultiValue(t *testing.T) { // Test that the proper values are ignored or collected func TestHttp404(t *testing.T) { - - jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, - []Metric{UsedHeapMetric}) + jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, []Metric{UsedHeapMetric}) var acc testutil.Accumulator acc.SetDebug(true) @@ -254,9 +252,7 @@ func TestHttp404(t *testing.T) { // Test that the proper values are ignored or collected func TestHttpInvalidJson(t *testing.T) { - - jolokia := genJolokiaClientStub(invalidJSON, 200, Servers, - []Metric{UsedHeapMetric}) + jolokia := genJolokiaClientStub(invalidJSON, 200, Servers, []Metric{UsedHeapMetric}) var acc testutil.Accumulator acc.SetDebug(true) diff --git a/plugins/inputs/jolokia2/gatherer.go b/plugins/inputs/jolokia2/gatherer.go index f24918998248e..7ee8438dd18e6 100644 --- a/plugins/inputs/jolokia2/gatherer.go +++ b/plugins/inputs/jolokia2/gatherer.go @@ -195,7 +195,6 @@ func tagSetsMatch(a, b map[string]string) bool { func makeReadRequests(metrics []Metric) []ReadRequest { var requests []ReadRequest for _, metric := range metrics { - if len(metric.Paths) == 0 { requests = append(requests, ReadRequest{ Mbean: metric.Mbean, diff --git a/plugins/inputs/jolokia2/jolokia_agent.go b/plugins/inputs/jolokia2/jolokia_agent.go index 58b67ce5a1c9a..2489ac19ddc66 100644 --- a/plugins/inputs/jolokia2/jolokia_agent.go +++ b/plugins/inputs/jolokia2/jolokia_agent.go @@ -83,9 +83,8 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error { err := ja.gatherer.Gather(client, acc) if err != nil { - acc.AddError(fmt.Errorf("Unable to gather metrics for %s: %v", client.URL, err)) + acc.AddError(fmt.Errorf("unable to gather metrics for %s: %v", client.URL, err)) } - }(client) } diff --git a/plugins/inputs/jolokia2/point_builder.go b/plugins/inputs/jolokia2/point_builder.go index f5ae1d31410ec..c1b985155b4b8 100644 --- a/plugins/inputs/jolokia2/point_builder.go +++ b/plugins/inputs/jolokia2/point_builder.go @@ -40,7 +40,6 @@ func (pb *pointBuilder) Build(mbean string, value interface{}) []point { points := make([]point, 0) for mbean, value := range valueMap { - points = append(points, point{ Tags: pb.extractTags(mbean), Fields: pb.extractFields(mbean, value), @@ -99,13 +98,11 @@ func (pb *pointBuilder) extractFields(mbean string, value interface{}) map[strin // if there were no attributes requested, // then the keys are attributes pb.fillFields("", valueMap, fieldMap) - } else if len(pb.objectAttributes) == 1 { // if there was a single attribute requested, // then the keys are the attribute's properties fieldName := pb.formatFieldName(pb.objectAttributes[0], pb.objectPath) pb.fillFields(fieldName, valueMap, fieldMap) - } else { // if there were multiple attributes requested, // then the keys are the attribute names @@ -199,7 +196,6 @@ func (pb *pointBuilder) applySubstitutions(mbean string, fieldMap map[string]int properties := makePropertyMap(mbean) for i, subKey := range pb.substitutions[1:] { - symbol := fmt.Sprintf("$%d", i+1) substitution := properties[subKey] diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go index acc56b187b3e4..e423588eed41f 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go @@ -272,7 +272,6 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int { m.sensorsConfig = append(m.sensorsConfig, sensorConfig{ measurementName: measurementName, pathList: pathlist, }) - } return len(m.sensorsConfig) diff --git a/plugins/inputs/kapacitor/kapacitor.go b/plugins/inputs/kapacitor/kapacitor.go index ecb99877d2846..ac037a183b667 100644 --- a/plugins/inputs/kapacitor/kapacitor.go +++ b/plugins/inputs/kapacitor/kapacitor.go @@ -216,7 +216,6 @@ func (k *Kapacitor) gatherURL( if s.Kapacitor != nil { for _, obj := range *s.Kapacitor { - // Strip out high-cardinality or duplicative tags excludeTags := []string{"host", "cluster_id", "server_id"} for _, key := range excludeTags { diff --git a/plugins/inputs/kernel/kernel.go b/plugins/inputs/kernel/kernel.go index cd682c779e7ed..484c819cf7794 100644 --- a/plugins/inputs/kernel/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -35,7 +35,6 @@ func (k *Kernel) Description() string { func (k *Kernel) SampleConfig() string { return "" } func (k *Kernel) Gather(acc telegraf.Accumulator) error { - data, err := k.getProcStat() if err != nil { return err diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat.go b/plugins/inputs/kernel_vmstat/kernel_vmstat.go index 7ebb9ab25153b..7b0292937b1c0 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat.go @@ -35,7 +35,6 @@ func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error { dataFields := bytes.Fields(data) for i, field := range dataFields { - // dataFields is an array of {"stat1_name", "stat1_value", "stat2_name", // "stat2_value", ...} // We only want the even number index as that contain the stat name. diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index 83523eb37bf27..1626f78a25e1d 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -183,7 +183,6 @@ func (k *Kibana) createHTTPClient() (*http.Client, error) { } func (k *Kibana) gatherKibanaStatus(baseURL string, acc telegraf.Accumulator) error { - kibanaStatus := &kibanaStatus{} url := baseURL + statusPath @@ -229,9 +228,7 @@ func (k *Kibana) gatherKibanaStatus(baseURL string, acc telegraf.Accumulator) er fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapUsedInBytes - } - acc.AddFields("kibana", fields, tags) return nil diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index 7845417e03173..2516d084c3285 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -89,7 +89,6 @@ func (k *Kubernetes) Description() string { } func (k *Kubernetes) Init() error { - // If neither are provided, use the default service account. if k.BearerToken == "" && k.BearerTokenString == "" { k.BearerToken = defaultServiceAccountPath diff --git a/plugins/inputs/kubernetes/kubernetes_test.go b/plugins/inputs/kubernetes/kubernetes_test.go index faf40be3e1000..eb6d285525eb3 100644 --- a/plugins/inputs/kubernetes/kubernetes_test.go +++ b/plugins/inputs/kubernetes/kubernetes_test.go @@ -21,7 +21,6 @@ func TestKubernetesStats(t *testing.T) { w.WriteHeader(http.StatusOK) fmt.Fprintln(w, responsePods) } - })) defer ts.Close() @@ -155,7 +154,6 @@ func TestKubernetesStats(t *testing.T) { "pod_name": "foopod", } acc.AssertContainsTaggedFields(t, "kubernetes_pod_network", fields, tags) - } var responsePods = ` diff --git a/plugins/inputs/lanz/lanz.go b/plugins/inputs/lanz/lanz.go index b1adcd6e77e7c..6a833175fde9f 100644 --- a/plugins/inputs/lanz/lanz.go +++ b/plugins/inputs/lanz/lanz.go @@ -48,7 +48,6 @@ func (l *Lanz) Gather(acc telegraf.Accumulator) error { } func (l *Lanz) Start(acc telegraf.Accumulator) error { - if len(l.Servers) == 0 { l.Servers = append(l.Servers, "tcp://127.0.0.1:50001") } diff --git a/plugins/inputs/lanz/lanz_test.go b/plugins/inputs/lanz/lanz_test.go index 26e1f52920398..684bfc8902bb8 100644 --- a/plugins/inputs/lanz/lanz_test.go +++ b/plugins/inputs/lanz/lanz_test.go @@ -51,7 +51,6 @@ var testProtoBufGlobalBufferUsageRecord = &pb.LanzRecord{ } func TestLanzGeneratesMetrics(t *testing.T) { - var acc testutil.Accumulator l := NewLanz() @@ -133,5 +132,4 @@ func TestLanzGeneratesMetrics(t *testing.T) { acc.AssertContainsFields(t, "lanz_global_buffer_usage_record", gburVals1) acc.AssertContainsTaggedFields(t, "lanz_global_buffer_usage_record", gburVals1, gburTags1) - } diff --git a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go index adb111836c683..5aa75f07514e7 100644 --- a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go @@ -91,7 +91,6 @@ func GetHostProc() string { } func init() { - inputs.Add("linux_sysctl_fs", func() telegraf.Input { return &SysctlFS{ path: path.Join(GetHostProc(), "/sys/fs"), diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 4fbd2e90d921c..fc38b467e2505 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -271,7 +271,6 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) { var line *tail.Line for line = range tailer.Lines { - if line.Err != nil { l.Log.Errorf("Error tailing file %s, Error: %s", tailer.Filename, line.Err) @@ -321,7 +320,6 @@ func (l *LogParserPlugin) parser() { } else { l.Log.Errorf("Error parsing log line: %s", err.Error()) } - } } diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index e9218278f77b8..92b392d67c36d 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -270,8 +270,8 @@ func (logstash *Logstash) gatherPluginsStats( plugins []Plugin, pluginType string, tags map[string]string, - accumulator telegraf.Accumulator) error { - + accumulator telegraf.Accumulator, +) error { for _, plugin := range plugins { pluginTags := map[string]string{ "plugin_name": plugin.Name, @@ -295,9 +295,8 @@ func (logstash *Logstash) gatherPluginsStats( func (logstash *Logstash) gatherQueueStats( queue *PipelineQueue, tags map[string]string, - accumulator telegraf.Accumulator) error { - - var err error + accumulator telegraf.Accumulator, +) error { queueTags := map[string]string{ "queue_type": queue.Type, } @@ -311,7 +310,7 @@ func (logstash *Logstash) gatherQueueStats( if queue.Type != "memory" { flattener := jsonParser.JSONFlattener{} - err = flattener.FlattenJSON("", queue.Capacity) + err := flattener.FlattenJSON("", queue.Capacity) if err != nil { return err } diff --git a/plugins/inputs/logstash/logstash_test.go b/plugins/inputs/logstash/logstash_test.go index e849fa57db100..b0d020b487003 100644 --- a/plugins/inputs/logstash/logstash_test.go +++ b/plugins/inputs/logstash/logstash_test.go @@ -549,7 +549,6 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { "queue_type": string("persisted"), }, ) - } func Test_Logstash5GatherJVMStats(test *testing.T) { @@ -618,7 +617,6 @@ func Test_Logstash5GatherJVMStats(test *testing.T) { "node_version": string("5.3.0"), }, ) - } func Test_Logstash6GatherJVMStats(test *testing.T) { @@ -687,5 +685,4 @@ func Test_Logstash6GatherJVMStats(test *testing.T) { "node_version": string("6.4.2"), }, ) - } diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 9614eb0597f38..e89c33b5a46e3 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -133,7 +133,6 @@ const mdtJobStatsContents = `job_stats: ` func TestLustre2GeneratesMetrics(t *testing.T) { - tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" ostName := "OST0001" @@ -206,7 +205,6 @@ func TestLustre2GeneratesMetrics(t *testing.T) { } func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { - tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" ostName := "OST0001" jobNames := []string{"cluster-testjob1", "testjob2"} diff --git a/plugins/inputs/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go index ba711bf745080..c35a706600742 100644 --- a/plugins/inputs/mailchimp/mailchimp_test.go +++ b/plugins/inputs/mailchimp/mailchimp_test.go @@ -137,7 +137,6 @@ func TestMailChimpGatherReport(t *testing.T) { "industry_type": "Social Networks and Online Communities", } acc.AssertContainsTaggedFields(t, "mailchimp", fields, tags) - } func TestMailChimpGatherError(t *testing.T) { diff --git a/plugins/inputs/marklogic/marklogic.go b/plugins/inputs/marklogic/marklogic.go index b79908caab618..d2ef139bfc7a3 100644 --- a/plugins/inputs/marklogic/marklogic.go +++ b/plugins/inputs/marklogic/marklogic.go @@ -108,7 +108,6 @@ var sampleConfig = ` // Init parse all source URLs and place on the Marklogic struct func (c *Marklogic) Init() error { - if len(c.URL) == 0 { c.URL = "http://localhost:8002/" } diff --git a/plugins/inputs/marklogic/marklogic_test.go b/plugins/inputs/marklogic/marklogic_test.go index 34e4bbd6bb7e9..e6057f6e088af 100644 --- a/plugins/inputs/marklogic/marklogic_test.go +++ b/plugins/inputs/marklogic/marklogic_test.go @@ -76,7 +76,6 @@ func TestMarklogic(t *testing.T) { } acc.AssertContainsTaggedFields(t, "marklogic", expectFields, expectTags) - } var response = ` diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index cdc5eada1792f..f06052c07f469 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -278,7 +278,6 @@ func generateMetrics() { for _, k := range slaveMetricNames { slaveMetrics[k] = rand.Float64() } - // slaveTaskMetrics = map[string]interface{}{ // "executor_id": fmt.Sprintf("task_name.%s", randUUID()), // "executor_name": "Some task description", diff --git a/plugins/inputs/minecraft/client.go b/plugins/inputs/minecraft/client.go index 3f3f54c17d1cb..bb829f6903581 100644 --- a/plugins/inputs/minecraft/client.go +++ b/plugins/inputs/minecraft/client.go @@ -157,7 +157,6 @@ func parsePlayers(input string) ([]string, error) { continue } players = append(players, name) - } return players, nil } diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index dbd952b524a85..7345aef68bebc 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -483,7 +483,6 @@ func (m *Modbus) getFields() error { register.Fields[i].value = convertDataType(register.Fields[i], valuesT) } - } } diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index ee96d5f8b3ad1..6210b782e7884 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -1131,8 +1131,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.NodeType = "ARB" } else { returnVal.NodeType = "UNK" - } - // END code modification + } // END code modification } else if returnVal.IsMongos { returnVal.NodeType = "RTR" } diff --git a/plugins/inputs/mongodb/mongostat_test.go b/plugins/inputs/mongodb/mongostat_test.go index 5506602a9e692..9f6ef04892ac9 100644 --- a/plugins/inputs/mongodb/mongostat_test.go +++ b/plugins/inputs/mongodb/mongostat_test.go @@ -9,7 +9,6 @@ import ( ) func TestLatencyStats(t *testing.T) { - sl := NewStatLine( MongoStatus{ ServerStatus: &ServerStatus{ @@ -65,7 +64,6 @@ func TestLatencyStats(t *testing.T) { } func TestLatencyStatsDiffZero(t *testing.T) { - sl := NewStatLine( MongoStatus{ ServerStatus: &ServerStatus{ @@ -135,7 +133,6 @@ func TestLatencyStatsDiffZero(t *testing.T) { } func TestLatencyStatsDiff(t *testing.T) { - sl := NewStatLine( MongoStatus{ ServerStatus: &ServerStatus{ diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go index 606bf0d4cb9ec..a25efad58e723 100644 --- a/plugins/inputs/monit/monit.go +++ b/plugins/inputs/monit/monit.go @@ -229,7 +229,6 @@ func (m *Monit) Init() error { } func (m *Monit) Gather(acc telegraf.Accumulator) error { - req, err := http.NewRequest("GET", fmt.Sprintf("%s/_status?format=xml", m.Address), nil) if err != nil { return err @@ -245,7 +244,6 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error { defer resp.Body.Close() if resp.StatusCode == 200 { - var status Status decoder := xml.NewDecoder(resp.Body) decoder.CharsetReader = charset.NewReaderLabel @@ -345,10 +343,7 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error { } } } else { - return fmt.Errorf("received status code %d (%s), expected 200", - resp.StatusCode, - http.StatusText(resp.StatusCode)) - + return fmt.Errorf("received status code %d (%s), expected 200", resp.StatusCode, http.StatusText(resp.StatusCode)) } return nil } diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index d17db8d2acc6d..1a2970f1e2338 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -555,7 +555,6 @@ func checkAuth(r *http.Request, username, password string) bool { } func TestAllowHosts(t *testing.T) { - r := &Monit{ Address: "http://127.0.0.1:2812", Username: "test", @@ -591,9 +590,7 @@ func TestConnection(t *testing.T) { } func TestInvalidUsernameOrPassword(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "testing", "testing") { http.Error(w, "Unauthorized.", 401) return @@ -625,9 +622,7 @@ func TestInvalidUsernameOrPassword(t *testing.T) { } func TestNoUsernameOrPasswordConfiguration(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "testing", "testing") { http.Error(w, "Unauthorized.", 401) return @@ -657,7 +652,6 @@ func TestNoUsernameOrPasswordConfiguration(t *testing.T) { } func TestInvalidXMLAndInvalidTypes(t *testing.T) { - tests := []struct { name string filename string diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 8e09a357f47e6..30b7b68f21038 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -1439,7 +1439,6 @@ func (m *Mysql) gatherPerfSummaryPerAccountPerEvent(db *sql.DB, serv string, acc "sum_no_good_index_used": sumNoGoodIndexUsed, } acc.AddFields("mysql_perf_acc_event", sqlLWFields, sqlLWTags) - } return nil @@ -1662,8 +1661,8 @@ func (m *Mysql) gatherPerfFileEventsStatuses(db *sql.DB, serv string, acc telegr fields["file_events_seconds_total"] = sumTimerWrite / picoSeconds fields["file_events_bytes_totals"] = sumNumBytesWrite acc.AddFields("mysql_perf_schema", fields, writeTags) - } + return nil } diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go index cefa5fad14662..6cca64952637f 100644 --- a/plugins/inputs/neptune_apex/neptune_apex_test.go +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -51,7 +51,6 @@ func TestGather(t *testing.T) { t.Errorf("Number of servers mismatch. got=%d, want=%d", len(acc.Errors), len(test.servers)) } - }) } } diff --git a/plugins/inputs/nfsclient/nfsclient.go b/plugins/inputs/nfsclient/nfsclient.go index 37fa64fef498e..e4005b57685a9 100644 --- a/plugins/inputs/nfsclient/nfsclient.go +++ b/plugins/inputs/nfsclient/nfsclient.go @@ -239,7 +239,6 @@ func (n *NFSClient) parseStat(mountpoint string, export string, version string, acc.AddFields("nfs_ops", fields, tags) } } - } return nil @@ -304,7 +303,6 @@ func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator } func (n *NFSClient) getMountStatsPath() string { - path := "/proc/self/mountstats" if os.Getenv("MOUNT_PROC") != "" { path = os.Getenv("MOUNT_PROC") @@ -314,7 +312,6 @@ func (n *NFSClient) getMountStatsPath() string { } func (n *NFSClient) Gather(acc telegraf.Accumulator) error { - file, err := os.Open(n.mountstatsPath) if err != nil { n.Log.Errorf("Failed opening the [%s] file: %s ", file, err) @@ -334,7 +331,6 @@ func (n *NFSClient) Gather(acc telegraf.Accumulator) error { } func (n *NFSClient) Init() error { - var nfs3Fields = []string{ "NULL", "GETATTR", diff --git a/plugins/inputs/nfsclient/nfsclient_test.go b/plugins/inputs/nfsclient/nfsclient_test.go index f4f008fbce0ad..72813cc10be4e 100644 --- a/plugins/inputs/nfsclient/nfsclient_test.go +++ b/plugins/inputs/nfsclient/nfsclient_test.go @@ -9,7 +9,6 @@ import ( ) func getMountStatsPath() string { - path := "./testdata/mountstats" if os.Getenv("MOUNT_PROC") != "" { path = os.Getenv("MOUNT_PROC") diff --git a/plugins/inputs/nginx_plus/nginx_plus.go b/plugins/inputs/nginx_plus/nginx_plus.go index 80811cc8d4d07..d41c03c801a96 100644 --- a/plugins/inputs/nginx_plus/nginx_plus.go +++ b/plugins/inputs/nginx_plus/nginx_plus.go @@ -318,7 +318,6 @@ func (s *Status) gatherProcessesMetrics(tags map[string]string, acc telegraf.Acc }, tags, ) - } func (s *Status) gatherConnectionsMetrics(tags map[string]string, acc telegraf.Accumulator) { diff --git a/plugins/inputs/nginx_plus/nginx_plus_test.go b/plugins/inputs/nginx_plus/nginx_plus_test.go index d7531de975393..caaea7dcb59d4 100644 --- a/plugins/inputs/nginx_plus/nginx_plus_test.go +++ b/plugins/inputs/nginx_plus/nginx_plus_test.go @@ -409,5 +409,4 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { "upstream_address": "1.2.3.123:80", "id": "0", }) - } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go index be155f073400f..5e05e9f0d4ac9 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -1159,7 +1159,6 @@ func TestGatherStreamUpstreams(t *testing.T) { "upstream_address": "10.0.0.1:12348", "id": "1", }) - } func TestGatherStreamServerZonesMetrics(t *testing.T) { diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index 34aff538cf983..da2f7b08f989c 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -123,7 +123,6 @@ func (check *NginxUpstreamCheck) createHTTPClient() (*http.Client, error) { // gatherJSONData query the data source and parse the response JSON func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) error { - var method string if check.Method != "" { method = check.Method @@ -187,7 +186,6 @@ func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error } return nil - } func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegraf.Accumulator) error { @@ -199,7 +197,6 @@ func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegr } for _, server := range checkData.Servers.Server { - tags := map[string]string{ "upstream": server.Upstream, "type": server.Type, diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go index 1b70770d01075..df6b08b09fb12 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go @@ -116,7 +116,6 @@ func TestNginxUpstreamCheckRequest(test *testing.T) { require.Equal(test, request.Header.Get("X-Test"), "test-value") require.Equal(test, request.Header.Get("Authorization"), "Basic dXNlcjpwYXNzd29yZA==") require.Equal(test, request.Host, "status.local") - })) defer testServer.Close() diff --git a/plugins/inputs/nsd/nsd_test.go b/plugins/inputs/nsd/nsd_test.go index ee527f7b7f0b2..fbe66ca9e3bdb 100644 --- a/plugins/inputs/nsd/nsd_test.go +++ b/plugins/inputs/nsd/nsd_test.go @@ -35,7 +35,6 @@ func TestParseFullOutput(t *testing.T) { acc.AssertContainsFields(t, "nsd", parsedFullOutput) acc.AssertContainsFields(t, "nsd_servers", parsedFullOutputServerAsTag) - } var parsedFullOutputServerAsTag = map[string]interface{}{ diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go index e07b125ccdb8f..bcc1fdf321129 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go @@ -68,7 +68,6 @@ func TestReadsMetricsFromNSQ(t *testing.T) { } else { t.Errorf("No points found in accumulator, expected 1") } - } // Waits for the metric that was sent to the kafka broker to arrive at the kafka diff --git a/plugins/inputs/opcua/opcua_util.go b/plugins/inputs/opcua/opcua_util.go index c0eac2483eb22..a115f8558aad0 100644 --- a/plugins/inputs/opcua/opcua_util.go +++ b/plugins/inputs/opcua/opcua_util.go @@ -32,7 +32,6 @@ func newTempDir() (string, error) { } func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.Duration) (string, string) { - dir, _ := newTempDir() if len(host) == 0 { @@ -310,7 +309,6 @@ func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua log.Printf("unknown auth-mode, defaulting to Anonymous") authMode = ua.UserTokenTypeAnonymous authOption = opcua.AuthAnonymous() - } return authMode, authOption diff --git a/plugins/inputs/openntpd/openntpd.go b/plugins/inputs/openntpd/openntpd.go index e7723b480a581..c3fc3e92e74fa 100644 --- a/plugins/inputs/openntpd/openntpd.go +++ b/plugins/inputs/openntpd/openntpd.go @@ -156,16 +156,13 @@ func (n *Openntpd) Gather(acc telegraf.Accumulator) error { } if key == "next" || key == "poll" { - m, err := strconv.ParseInt(strings.TrimSuffix(fields[index], "s"), 10, 64) if err != nil { acc.AddError(fmt.Errorf("integer value expected, got: %s", fields[index])) continue } mFields[key] = m - } else { - m, err := strconv.ParseInt(fields[index], 10, 64) if err != nil { acc.AddError(fmt.Errorf("integer value expected, got: %s", fields[index])) @@ -185,23 +182,19 @@ func (n *Openntpd) Gather(acc telegraf.Accumulator) error { } if key == "offset" || key == "delay" || key == "jitter" { - m, err := strconv.ParseFloat(strings.TrimSuffix(fields[index], "ms"), 64) if err != nil { acc.AddError(fmt.Errorf("float value expected, got: %s", fields[index])) continue } mFields[key] = m - } else { - m, err := strconv.ParseFloat(fields[index], 64) if err != nil { acc.AddError(fmt.Errorf("float value expected, got: %s", fields[index])) continue } mFields[key] = m - } } acc.AddFields("openntpd", mFields, tags) diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go index 11660a84080db..cea266cc81991 100644 --- a/plugins/inputs/opensmtpd/opensmtpd.go +++ b/plugins/inputs/opensmtpd/opensmtpd.go @@ -92,7 +92,6 @@ func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { fields := make(map[string]interface{}) scanner := bufio.NewScanner(out) for scanner.Scan() { - cols := strings.Split(scanner.Text(), "=") // Check split correctness diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index bda43438dc5a2..688f97782b027 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -124,7 +124,6 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { gatherWeather(acc, status) }() } - } } diff --git a/plugins/inputs/pf/pf.go b/plugins/inputs/pf/pf.go index 035c44fbe1404..429169d543ab5 100644 --- a/plugins/inputs/pf/pf.go +++ b/plugins/inputs/pf/pf.go @@ -164,7 +164,6 @@ func parseCounterTable(lines []string, fields map[string]interface{}) error { } func storeFieldValues(lines []string, regex *regexp.Regexp, fields map[string]interface{}, entryTable []*Entry) error { - for _, v := range lines { entries := regex.FindStringSubmatch(v) if entries != nil { diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 645782289e008..b077f7955b037 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -328,7 +328,6 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testi err = acc.GatherError(r.Gather) require.Error(t, err) assert.Equal(t, `socket doesn't exist "/tmp/invalid.sock"`, err.Error()) - } const outputSample = ` diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 94705484555ab..6249677eab6e2 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -202,7 +202,6 @@ func (p *Ping) nativePing(destination string) (*pingStats, error) { } func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { - tags := map[string]string{"url": destination} fields := map[string]interface{}{} diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index daebeb1f55635..e42f4b97a81f4 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -487,7 +487,6 @@ func TestPingGatherNative(t *testing.T) { assert.True(t, acc.HasField("ping", "maximum_response_ms")) assert.True(t, acc.HasField("ping", "standard_deviation_ms")) } - } func TestNoPacketsSent(t *testing.T) { diff --git a/plugins/inputs/powerdns/powerdns_test.go b/plugins/inputs/powerdns/powerdns_test.go index 0fe3e31058bd0..19be4a7dfb825 100644 --- a/plugins/inputs/powerdns/powerdns_test.go +++ b/plugins/inputs/powerdns/powerdns_test.go @@ -51,7 +51,6 @@ var intOverflowMetrics = "corrupt-packets=18446744073709550195,deferred-cache-in "signature-cache-size=0,sys-msec=2889,uptime=86317,user-msec=2167," func (s statServer) serverSocket(l net.Listener) { - for { conn, err := l.Accept() if err != nil { diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index 799f0183854d1..d5d8b8b36fe70 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -54,7 +54,6 @@ func (pg *NativeFinder) PidFile(path string) ([]PID, error) { } pids = append(pids, PID(pid)) return pids, nil - } //FullPattern matches on the command line when the process was executed diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index a27ea2c938d69..62de739b212bd 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -111,7 +111,6 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { p.PidFinder = "pgrep" p.createPIDFinder = defaultPIDFinder } - } if p.createProcess == nil { p.createProcess = defaultProcess diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 2ed08dd3fca7b..bb599bf38a0c3 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -107,7 +107,6 @@ func (p *Prometheus) start(ctx context.Context) error { // pod, causing errors in the logs. This is only true if the pod going offline is not // directed to do so by K8s. func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error { - selectors := podSelector(p) pod := &corev1.Pod{} @@ -189,7 +188,6 @@ func (p *Prometheus) cAdvisor(ctx context.Context, client *k8s.Client) error { } func updateCadvisorPodList(ctx context.Context, p *Prometheus, client *k8s.Client, req *http.Request) error { - resp, err := client.Client.Do(req) if err != nil { return fmt.Errorf("Error when making request for pod list: %w", err) @@ -224,7 +222,6 @@ func updateCadvisorPodList(ctx context.Context, p *Prometheus, client *k8s.Clien podHasMatchingFieldSelector(pod, p.podFieldSelector) { registerPod(pod, p) } - } p.lock.Unlock() @@ -316,7 +313,6 @@ func podSelector(p *Prometheus) []k8s.Option { } return options - } func registerPod(pod *corev1.Pod, p *Prometheus) { diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 5a6860191f589..7e6e62409e34d 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -100,7 +100,6 @@ func TestDeletePods(t *testing.T) { } func TestPodSelector(t *testing.T) { - cases := []struct { expected []k8s.Option labelselector string diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index c4b3cb3406f15..d62602dc169c1 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -69,7 +69,6 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { fields = makeBuckets(m) fields["count"] = float64(m.GetHistogram().GetSampleCount()) fields["sum"] = float64(m.GetHistogram().GetSampleSum()) - } else { // standard metric fields = getNameAndValue(m) diff --git a/plugins/inputs/prometheus/parser_test.go b/plugins/inputs/prometheus/parser_test.go index 7b2bfeca2e128..a80cdbd5a48a3 100644 --- a/plugins/inputs/prometheus/parser_test.go +++ b/plugins/inputs/prometheus/parser_test.go @@ -163,5 +163,4 @@ func TestParseValidPrometheus(t *testing.T) { assert.Equal(t, map[string]string{"verb": "POST", "resource": "bindings"}, metrics[0].Tags()) - } diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index f94fcf1043bfa..cdf3bc84bd7dd 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -163,7 +163,6 @@ func (p *Prometheus) Init() error { // Config proccessing for node scrape scope for monitor_kubernetes_pods p.isNodeScrapeScope = strings.EqualFold(p.PodScrapeScope, "node") if p.isNodeScrapeScope { - // Need node IP to make cAdvisor call for pod list. Check if set in config and valid IP address if p.NodeIP == "" || net.ParseIP(p.NodeIP) == nil { p.Log.Infof("The config node_ip is empty or invalid. Using NODE_IP env var as default.") diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index a727af49cab34..230934d0e5e67 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -149,7 +149,6 @@ func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum")) assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count")) assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") - } func TestSummaryMayContainNaN(t *testing.T) { diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index 1d0e30aa88ed5..741de4a0dc013 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -84,7 +84,6 @@ func (pa *PuppetAgent) Description() string { // Gather reads stats from all configured servers accumulates stats func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { - if len(pa.Location) == 0 { pa.Location = "/var/lib/puppet/state/last_run_summary.yaml" } diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index fbf9f929fd880..5973390e94a82 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -147,7 +147,6 @@ func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { "ip": listener[0], "port": listener[1], } - } else { tags = map[string]string{ "socket": listenName, diff --git a/plugins/inputs/redfish/redfish_test.go b/plugins/inputs/redfish/redfish_test.go index c31f650136cb5..568db00092e2e 100644 --- a/plugins/inputs/redfish/redfish_test.go +++ b/plugins/inputs/redfish/redfish_test.go @@ -15,9 +15,7 @@ import ( ) func TestDellApis(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "test", "test") { http.Error(w, "Unauthorized.", 401) return @@ -502,9 +500,7 @@ func TestDellApis(t *testing.T) { } func TestHPApis(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "test", "test") { http.Error(w, "Unauthorized.", 401) return @@ -672,9 +668,7 @@ func checkAuth(r *http.Request, username, password string) bool { } func TestInvalidUsernameorPassword(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "testing", "testing") { http.Error(w, "Unauthorized.", 401) return @@ -704,9 +698,7 @@ func TestInvalidUsernameorPassword(t *testing.T) { require.EqualError(t, err, "received status code 401 (Unauthorized) for address http://"+u.Host+", expected 200") } func TestNoUsernameorPasswordConfiguration(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "testing", "testing") { http.Error(w, "Unauthorized.", 401) return @@ -732,7 +724,6 @@ func TestNoUsernameorPasswordConfiguration(t *testing.T) { } func TestInvalidDellJSON(t *testing.T) { - tests := []struct { name string thermalfilename string @@ -771,7 +762,6 @@ func TestInvalidDellJSON(t *testing.T) { } for _, tt := range tests { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "test", "test") { http.Error(w, "Unauthorized.", 401) return @@ -809,7 +799,6 @@ func TestInvalidDellJSON(t *testing.T) { } func TestInvalidHPJSON(t *testing.T) { - tests := []struct { name string thermalfilename string @@ -842,7 +831,6 @@ func TestInvalidHPJSON(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "test", "test") { http.Error(w, "Unauthorized.", 401) return diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index 662e74287db51..de872e948722c 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -223,9 +223,7 @@ func (rsl *riemannListener) read(conn net.Conn) { rsl.AddMetric(singleMetric) } riemannReturnResponse(conn) - } - } func riemannReturnResponse(conn net.Conn) { @@ -358,7 +356,6 @@ func (rsl *RiemannSocketListener) Start(acc telegraf.Accumulator) error { go func() { defer rsl.wg.Done() rsl.listen(ctx) - }() default: return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, rsl.ServiceAddress) @@ -380,7 +377,6 @@ func processOsSignals(cancelFunc context.CancelFunc) { return } } - } func (rsl *RiemannSocketListener) Stop() { diff --git a/plugins/inputs/riemann_listener/riemann_listener_test.go b/plugins/inputs/riemann_listener/riemann_listener_test.go index f1ce824c6a731..6948354a85b8d 100644 --- a/plugins/inputs/riemann_listener/riemann_listener_test.go +++ b/plugins/inputs/riemann_listener/riemann_listener_test.go @@ -40,7 +40,6 @@ func testStats(t *testing.T, sl *RiemannSocketListener) { Service: "hello", }) assert.Equal(t, result.GetOk(), true) - } func testMissingService(t *testing.T, sl *RiemannSocketListener) { c := riemanngo.NewTCPClient("127.0.0.1:5555", 5*time.Second) @@ -51,5 +50,4 @@ func testMissingService(t *testing.T, sl *RiemannSocketListener) { defer c.Close() result, err := riemanngo.SendEvent(c, &riemanngo.Event{}) assert.Equal(t, result.GetOk(), false) - } diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index 1df88466be2e9..01fd2a45af6ee 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -41,7 +41,6 @@ func (*Sensors) SampleConfig() string { ## Timeout is the maximum amount of time that the sensors command can run. # timeout = "5s" ` - } func (s *Sensors) Gather(acc telegraf.Accumulator) error { diff --git a/plugins/inputs/sensors/sensors_test.go b/plugins/inputs/sensors/sensors_test.go index 2a24fa6f9212f..75fd3e46825ed 100644 --- a/plugins/inputs/sensors/sensors_test.go +++ b/plugins/inputs/sensors/sensors_test.go @@ -377,7 +377,6 @@ Vcore Voltage: } else { fmt.Fprint(os.Stdout, "command not found") os.Exit(1) - } os.Exit(0) } diff --git a/plugins/inputs/sflow/sflow.go b/plugins/inputs/sflow/sflow.go index 2e3fbc0cf73f5..f4283cabec095 100644 --- a/plugins/inputs/sflow/sflow.go +++ b/plugins/inputs/sflow/sflow.go @@ -131,7 +131,6 @@ func (s *SFlow) read(acc telegraf.Accumulator, conn net.PacketConn) { } func (s *SFlow) process(acc telegraf.Accumulator, buf []byte) { - if err := s.decoder.Decode(bytes.NewBuffer(buf)); err != nil { acc.AddError(fmt.Errorf("unable to parse incoming packet: %s", err)) } diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index df23eeeb7300f..35d00629acc4c 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -674,7 +674,6 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { split := strings.Split(conv, ":") if split[0] == "hextoint" && len(split) == 3 { - endian := split[1] bit := split[2] diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index e14d129e84f1b..a887d53897461 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -206,7 +206,6 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { AuthenticationPassphrase: s.AuthPassword, AuthenticationProtocol: authenticationProtocol, } - } // wrap the handler, used in unit tests diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index b5f8da27aa7b3..a8c3d01c78bb5 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -1318,5 +1318,4 @@ func TestReceiveTrap(t *testing.T) { testutil.SortMetrics()) }) } - } diff --git a/plugins/inputs/solr/solr.go b/plugins/inputs/solr/solr.go index 78652c6aab618..9a850bbdb0362 100644 --- a/plugins/inputs/solr/solr.go +++ b/plugins/inputs/solr/solr.go @@ -285,7 +285,6 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa "handler": name}, time, ) - } return nil } diff --git a/plugins/inputs/solr/solr_test.go b/plugins/inputs/solr/solr_test.go index 270816909c37d..f4451ccc4c2f3 100644 --- a/plugins/inputs/solr/solr_test.go +++ b/plugins/inputs/solr/solr_test.go @@ -104,7 +104,6 @@ func TestNoCoreDataHandling(t *testing.T) { acc.AssertDoesNotContainMeasurement(t, "solr_queryhandler") acc.AssertDoesNotContainMeasurement(t, "solr_updatehandler") acc.AssertDoesNotContainMeasurement(t, "solr_handler") - } func createMockServer() *httptest.Server { diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index b271c08d69519..f462ebbf876bc 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -43,7 +43,6 @@ func TestSqlServer_QueriesInclusionExclusion(t *testing.T) { } func TestSqlServer_ParseMetrics(t *testing.T) { - var acc testutil.Accumulator queries := make(MapQuery) @@ -63,7 +62,6 @@ func TestSqlServer_ParseMetrics(t *testing.T) { var fields = make(map[string]interface{}) for _, query := range queries { - mock = strings.Split(query.Script, "\n") idx := 0 @@ -78,7 +76,6 @@ func TestSqlServer_ParseMetrics(t *testing.T) { tags[headers[2]] = row[2] // tag 'type' if query.ResultByRow { - // set value by converting to float64 value, err := strconv.ParseFloat(row[3], 64) // require @@ -90,11 +87,9 @@ func TestSqlServer_ParseMetrics(t *testing.T) { tags, time.Now()) // assert acc.AssertContainsTaggedFields(t, measurement, map[string]interface{}{"value": value}, tags) - } else { // set fields for i := 3; i < len(row); i++ { - // set value by converting to float64 value, err := strconv.ParseFloat(row[i], 64) // require @@ -232,7 +227,6 @@ func TestSqlServer_HealthMetric(t *testing.T) { } func TestSqlServer_MultipleInit(t *testing.T) { - s := &SQLServer{} s2 := &SQLServer{ ExcludeQuery: []string{"DatabaseSize"}, diff --git a/plugins/inputs/stackdriver/stackdriver_test.go b/plugins/inputs/stackdriver/stackdriver_test.go index 8010ad4817924..ee1f7ac9b01eb 100644 --- a/plugins/inputs/stackdriver/stackdriver_test.go +++ b/plugins/inputs/stackdriver/stackdriver_test.go @@ -803,7 +803,6 @@ func TestGatherAlign(t *testing.T) { } testutil.RequireMetricsEqual(t, tt.expected, actual) - }) } } diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index 3308f28b5715f..7259e658da5a9 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -202,7 +202,6 @@ func TestSuricataTooLongLine(t *testing.T) { c.Close() acc.WaitError(1) - } func TestSuricataEmptyJSON(t *testing.T) { @@ -224,7 +223,6 @@ func TestSuricataEmptyJSON(t *testing.T) { c, err := net.Dial("unix", tmpfn) if err != nil { log.Println(err) - } c.Write([]byte("\n")) c.Close() diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 9f530024b52d8..528f4ec43d0e5 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -273,7 +273,6 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option string, ts time.Time) e tags[k] = v } } - } } @@ -299,7 +298,6 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option string, ts time.Time) e } acc.AddFields(measurement, fields, tags, ts) } - } if s.Group { for _, v := range m { diff --git a/plugins/inputs/temp/temp_test.go b/plugins/inputs/temp/temp_test.go index 080ff66ac1848..9ced8ac14a2ef 100644 --- a/plugins/inputs/temp/temp_test.go +++ b/plugins/inputs/temp/temp_test.go @@ -34,5 +34,4 @@ func TestTemperature(t *testing.T) { "sensor": "coretemp_sensor1_crit", } acc.AssertContainsTaggedFields(t, "temp", expectedFields, expectedTags) - } diff --git a/plugins/inputs/trig/trig_test.go b/plugins/inputs/trig/trig_test.go index 1471edbeaec2d..27bee81dde2e1 100644 --- a/plugins/inputs/trig/trig_test.go +++ b/plugins/inputs/trig/trig_test.go @@ -13,7 +13,6 @@ func TestTrig(t *testing.T) { } for i := 0.0; i < 10.0; i++ { - var acc testutil.Accumulator sine := math.Sin((i*math.Pi)/5.0) * s.Amplitude diff --git a/plugins/inputs/unbound/unbound.go b/plugins/inputs/unbound/unbound.go index bb4ecde5860dd..9b04bbf27f888 100644 --- a/plugins/inputs/unbound/unbound.go +++ b/plugins/inputs/unbound/unbound.go @@ -125,7 +125,6 @@ func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Serv // // All the dots in stat name will replaced by underscores. Histogram statistics will not be collected. func (s *Unbound) Gather(acc telegraf.Accumulator) error { - // Always exclude histogram statistics statExcluded := []string{"histogram.*"} filterExcluded, err := filter.Compile(statExcluded) @@ -144,7 +143,6 @@ func (s *Unbound) Gather(acc telegraf.Accumulator) error { scanner := bufio.NewScanner(out) for scanner.Scan() { - cols := strings.Split(scanner.Text(), "=") // Check split correctness @@ -191,7 +189,6 @@ func (s *Unbound) Gather(acc telegraf.Accumulator) error { field := strings.Replace(stat, ".", "_", -1) fields[field] = fieldValue } - } acc.AddFields("unbound", fields, nil) diff --git a/plugins/inputs/uwsgi/uwsgi.go b/plugins/inputs/uwsgi/uwsgi.go index b13a7b3e6c5d3..81dd3c350cf3d 100644 --- a/plugins/inputs/uwsgi/uwsgi.go +++ b/plugins/inputs/uwsgi/uwsgi.go @@ -210,7 +210,6 @@ func (u *Uwsgi) gatherCores(acc telegraf.Accumulator, s *StatsServer) { } acc.AddFields("uwsgi_cores", fields, tags) } - } } diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 7da90949e7445..0eb1b0ab935dc 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -827,7 +827,6 @@ func (e *Endpoint) Close() { // Collect runs a round of data collections as specified in the configuration. func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error { - // If we never managed to do a discovery, collection will be a no-op. Therefore, // we need to check that a connection is available, or the collection will // silently fail. @@ -896,7 +895,6 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim for _, object := range res.objects { timeBuckets := make(map[int64]*types.PerfQuerySpec, 0) for metricIdx, metric := range res.metrics { - // Determine time of last successful collection metricName := e.getMetricNameForID(metric.CounterId) if metricName == "" { diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 600780a57bee2..300261358270d 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -315,7 +315,6 @@ func (v *VSphere) Gather(acc telegraf.Accumulator) error { defer wg.Done() err := endpoint.Collect(context.Background(), acc) if err == context.Canceled { - // No need to signal errors if we were merely canceled. err = nil } diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index 20e26d293bece..60e7e1c8dbf27 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -471,7 +471,6 @@ func testCollection(t *testing.T, excludeClusters bool) { v.Username = username v.Password = password } else { - // Don't run test on 32-bit machines due to bug in simulator. // https://github.com/vmware/govmomi/issues/1330 var i int diff --git a/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go index 42453c1309d93..7f11e31e79a11 100644 --- a/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go +++ b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go @@ -41,7 +41,6 @@ func (pt *PapertrailWebhook) eventHandler(w http.ResponseWriter, r *http.Request } if payload.Events != nil { - // Handle event-based payload for _, e := range payload.Events { // Warning: Duplicate event timestamps will overwrite each other @@ -54,9 +53,7 @@ func (pt *PapertrailWebhook) eventHandler(w http.ResponseWriter, r *http.Request } pt.acc.AddFields("papertrail", fields, tags, e.ReceivedAt) } - } else if payload.Counts != nil { - // Handle count-based payload for _, c := range payload.Counts { for ts, count := range *c.TimeSeries { diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go index 5c39903dbce7f..d54951bca5956 100644 --- a/plugins/inputs/webhooks/webhooks.go +++ b/plugins/inputs/webhooks/webhooks.go @@ -112,7 +112,6 @@ func (wb *Webhooks) Start(acc telegraf.Accumulator) error { ln, err := net.Listen("tcp", fmt.Sprintf("%s", wb.ServiceAddress)) if err != nil { return fmt.Errorf("error starting server: %v", err) - } go func() { diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 0a0d0575adadc..35e41018d82a4 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -305,7 +305,6 @@ func TestGatherChain(t *testing.T) { } }) } - } func TestStrings(t *testing.T) { diff --git a/plugins/outputs/application_insights/application_insights.go b/plugins/outputs/application_insights/application_insights.go index 5e41d629a7e2f..6bcb924865780 100644 --- a/plugins/outputs/application_insights/application_insights.go +++ b/plugins/outputs/application_insights/application_insights.go @@ -222,8 +222,8 @@ func (a *ApplicationInsights) addContextTags(metric telegraf.Metric, telemetry a func getFloat64TelemetryPropertyValue( candidateFields []string, metric telegraf.Metric, - usedFields *[]string) (float64, error) { - + usedFields *[]string, +) (float64, error) { for _, fieldName := range candidateFields { fieldValue, found := metric.GetField(fieldName) if !found { @@ -248,8 +248,8 @@ func getFloat64TelemetryPropertyValue( func getIntTelemetryPropertyValue( candidateFields []string, metric telegraf.Metric, - usedFields *[]string) (int, error) { - + usedFields *[]string, +) (int, error) { for _, fieldName := range candidateFields { fieldValue, found := metric.GetField(fieldName) if !found { diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go index 4553444244dae..ceaad4a4e9472 100644 --- a/plugins/outputs/application_insights/application_insights_test.go +++ b/plugins/outputs/application_insights/application_insights_test.go @@ -397,7 +397,6 @@ func verifyAggregateTelemetry( countField string, telemetry *appinsights.AggregateMetricTelemetry, ) { - verifyAggregateField := func(fieldName string, telemetryValue float64) { metricRawFieldValue, found := metric.Fields()[fieldName] if !found { @@ -426,7 +425,6 @@ func verifySimpleTelemetry( expectedTelemetryName string, telemetry *appinsights.MetricTelemetry, ) { - assert.Equal(expectedTelemetryName, telemetry.Name, "Telemetry name is not what was expected") assert.EqualValues(metric.Fields()[valueField], telemetry.Value, "Telemetry value does not match metric value field") assert.Equal(metric.Time(), telemetry.Timestamp, "Telemetry and metric timestamps do not match") diff --git a/plugins/outputs/cloud_pubsub/pubsub_test.go b/plugins/outputs/cloud_pubsub/pubsub_test.go index 6911ef139cb1e..b3948573b7163 100644 --- a/plugins/outputs/cloud_pubsub/pubsub_test.go +++ b/plugins/outputs/cloud_pubsub/pubsub_test.go @@ -14,7 +14,6 @@ import ( ) func TestPubSub_WriteSingle(t *testing.T) { - testMetrics := []testMetric{ {testutil.TestMetric("value_1", "test"), false /*return error */}, } @@ -126,7 +125,6 @@ func TestPubSub_WriteOverByteThreshold(t *testing.T) { } func TestPubSub_WriteBase64Single(t *testing.T) { - testMetrics := []testMetric{ {testutil.TestMetric("value_1", "test"), false /*return error */}, {testutil.TestMetric("value_2", "test"), false}, diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 82aebbdcca233..3042a0b89dfe3 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -64,7 +64,6 @@ func (f *statisticField) addValue(sType statisticType, value float64) { } func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { - var datums []*cloudwatch.MetricDatum if f.hasAllFields() { @@ -88,7 +87,6 @@ func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { } datums = append(datums, datum) - } else { // If we don't have all required fields, we build each field as independent datum for sType, value := range f.values { @@ -120,7 +118,6 @@ func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { } func (f *statisticField) hasAllFields() bool { - _, hasMin := f.values[statisticTypeMin] _, hasMax := f.values[statisticTypeMax] _, hasSum := f.values[statisticTypeSum] @@ -145,7 +142,6 @@ func (f *valueField) addValue(sType statisticType, value float64) { } func (f *valueField) buildDatum() []*cloudwatch.MetricDatum { - return []*cloudwatch.MetricDatum{ { MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), @@ -226,7 +222,6 @@ func (c *CloudWatch) Close() error { } func (c *CloudWatch) Write(metrics []telegraf.Metric) error { - var datums []*cloudwatch.MetricDatum for _, m := range metrics { d := BuildMetricDatum(c.WriteStatistics, c.HighResolutionMetrics, m) @@ -263,7 +258,6 @@ func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error { // Partition the MetricDatums into smaller slices of a max size so that are under the limit // for the AWS API calls. func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch.MetricDatum { - numberOfPartitions := len(datums) / size if len(datums)%size != 0 { numberOfPartitions++ @@ -288,7 +282,6 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch // cloudwatch.StatisticSet are available. If so, it would build MetricDatum from statistic values. // Otherwise, fields would still been built independently. func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point telegraf.Metric) []*cloudwatch.MetricDatum { - fields := make(map[string]cloudwatchField) tags := point.Tags() storageResolution := int64(60) @@ -297,7 +290,6 @@ func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point tel } for k, v := range point.Fields() { - val, ok := convert(v) if !ok { // Only fields with values that can be converted to float64 (and within CloudWatch boundary) are supported. @@ -412,7 +404,6 @@ func getStatisticType(name string) (sType statisticType, fieldName string) { } func convert(v interface{}) (value float64, ok bool) { - ok = true switch t := v.(type) { diff --git a/plugins/outputs/cratedb/cratedb.go b/plugins/outputs/cratedb/cratedb.go index c520ee3d86e7f..1c8aff47aa248 100644 --- a/plugins/outputs/cratedb/cratedb.go +++ b/plugins/outputs/cratedb/cratedb.go @@ -79,7 +79,6 @@ func (c *CrateDB) Write(metrics []telegraf.Metric) error { func insertSQL(table string, metrics []telegraf.Metric) (string, error) { rows := make([]string, len(metrics)) for i, m := range metrics { - cols := []interface{}{ hashID(m), m.Time().UTC(), diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go index 97ef94bbb4c34..c41495ec07df5 100644 --- a/plugins/outputs/elasticsearch/elasticsearch.go +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -250,7 +250,6 @@ func (a *Elasticsearch) Connect() error { // GetPointID generates a unique ID for a Metric Point func GetPointID(m telegraf.Metric) string { - var buffer bytes.Buffer //Timestamp(ns),measurement name and Series Hash for compute the final SHA256 based hash ID @@ -294,7 +293,6 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { } bulkRequest.Add(br) - } ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration) @@ -315,7 +313,6 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { } return nil - } func (a *Elasticsearch) manageTemplate(ctx context.Context) error { @@ -360,17 +357,13 @@ func (a *Elasticsearch) manageTemplate(ctx context.Context) error { } log.Printf("D! Elasticsearch template %s created or updated\n", a.TemplateName) - } else { - log.Println("D! Found existing Elasticsearch template. Skipping template management") - } return nil } func (a *Elasticsearch) GetTagKeys(indexName string) (string, []string) { - tagKeys := []string{} startTag := strings.Index(indexName, "{{") @@ -379,7 +372,6 @@ func (a *Elasticsearch) GetTagKeys(indexName string) (string, []string) { if endTag < 0 { startTag = -1 - } else { tagName := indexName[startTag+2 : endTag] @@ -423,7 +415,6 @@ func (a *Elasticsearch) GetIndexName(indexName string, eventTime time.Time, tagK } return fmt.Sprintf(indexName, tagValues...) - } func getISOWeek(eventTime time.Time) string { diff --git a/plugins/outputs/elasticsearch/elasticsearch_test.go b/plugins/outputs/elasticsearch/elasticsearch_test.go index b0caf8448c6ec..d21d191cb0e52 100644 --- a/plugins/outputs/elasticsearch/elasticsearch_test.go +++ b/plugins/outputs/elasticsearch/elasticsearch_test.go @@ -35,7 +35,6 @@ func TestConnectAndWriteIntegration(t *testing.T) { // Verify that we can successfully write data to Elasticsearch err = e.Write(testutil.MockMetrics()) require.NoError(t, err) - } func TestTemplateManagementEmptyTemplateIntegration(t *testing.T) { @@ -58,7 +57,6 @@ func TestTemplateManagementEmptyTemplateIntegration(t *testing.T) { err := e.manageTemplate(ctx) require.Error(t, err) - } func TestTemplateManagementIntegration(t *testing.T) { @@ -164,7 +162,6 @@ func TestGetTagKeys(t *testing.T) { t.Errorf("Expected tagKeys %s, got %s\n", test.ExpectedTagKeys, tagKeys) } } - } func TestGetIndexName(t *testing.T) { diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index ff26b24ba422c..455c7c785e7d2 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -189,9 +189,8 @@ func (g *Graphite) send(batch []byte) error { if _, e := g.conns[n].Write(batch); e != nil { // Error g.Log.Errorf("Graphite Error: " + e.Error()) - // Close explicitly + // Close explicitly and let's try the next one g.conns[n].Close() - // Let's try the next one } else { // Success err = nil diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index 34f2ec6d93932..c6cad89c0ba51 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -63,7 +63,6 @@ func (g *Gelf) Write(message []byte) (n int, err error) { length := compressed.Len() if length > chunksize { - chunkCountInt := int(math.Ceil(float64(length) / float64(chunksize))) id := make([]byte, 8) diff --git a/plugins/outputs/health/health.go b/plugins/outputs/health/health.go index e271b0b14c26a..c7b584076e779 100644 --- a/plugins/outputs/health/health.go +++ b/plugins/outputs/health/health.go @@ -240,7 +240,6 @@ func (h *Health) getOrigin(listener net.Listener) string { } return origin.String() } - } func (h *Health) setHealthy(healthy bool) { diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go index 0add3c6c39de6..510df9463265b 100644 --- a/plugins/outputs/influxdb/udp.go +++ b/plugins/outputs/influxdb/udp.go @@ -134,7 +134,6 @@ func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { if i := bytes.IndexByte(data, '\n'); i >= 0 { // We have a full newline-terminated line. return i + 1, data[0 : i+1], nil - } return 0, nil, nil } diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index ceb2b93a6e14b..dde2c8e62e4d0 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -66,7 +66,6 @@ func (*DebugLogger) Print(v ...interface{}) { args := make([]interface{}, 0, len(v)+1) args = append(append(args, "D! [sarama] "), v...) log.Print(args...) - } func (*DebugLogger) Printf(format string, v ...interface{}) { diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 47d7aa10fb210..333214614d813 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -159,7 +159,6 @@ func (k *KinesisOutput) SetSerializer(serializer serializers.Serializer) { } func (k *KinesisOutput) writeKinesis(r []*kinesis.PutRecordsRequestEntry) time.Duration { - start := time.Now() payload := &kinesis.PutRecordsInput{ Records: r, @@ -252,7 +251,6 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { sz = 0 r = nil } - } if sz > 0 { elapsed := k.writeKinesis(r) diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 4c7063c407521..24de7413c1718 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -19,7 +19,6 @@ import ( const zero int64 = 0 func TestPartitionKey(t *testing.T) { - assert := assert.New(t) testPoint := testutil.TestMetric(1) @@ -104,7 +103,6 @@ func TestPartitionKey(t *testing.T) { } func TestWriteKinesis_WhenSuccess(t *testing.T) { - assert := assert.New(t) partitionKey := "partitionKey" @@ -150,7 +148,6 @@ func TestWriteKinesis_WhenSuccess(t *testing.T) { } func TestWriteKinesis_WhenRecordErrors(t *testing.T) { - assert := assert.New(t) errorCode := "InternalFailure" @@ -196,7 +193,6 @@ func TestWriteKinesis_WhenRecordErrors(t *testing.T) { } func TestWriteKinesis_WhenServiceError(t *testing.T) { - assert := assert.New(t) partitionKey := "partitionKey" @@ -513,7 +509,6 @@ func (m *mockKinesisPutRecords) SetupResponse( failedRecordCount int64, records []*kinesis.PutRecordsResultEntry, ) { - m.responses = append(m.responses, &mockKinesisPutRecordsResponse{ Err: nil, Output: &kinesis.PutRecordsOutput{ @@ -527,7 +522,6 @@ func (m *mockKinesisPutRecords) SetupGenericResponse( successfulRecordCount uint32, failedRecordCount uint32, ) { - errorCode := "InternalFailure" errorMessage := "Internal Service Failure" shard := "shardId-000000000003" @@ -553,7 +547,6 @@ func (m *mockKinesisPutRecords) SetupGenericResponse( } func (m *mockKinesisPutRecords) SetupErrorResponse(err error) { - m.responses = append(m.responses, &mockKinesisPutRecordsResponse{ Err: err, Output: nil, @@ -561,7 +554,6 @@ func (m *mockKinesisPutRecords) SetupErrorResponse(err error) { } func (m *mockKinesisPutRecords) PutRecords(input *kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) { - reqNum := len(m.requests) if reqNum > len(m.responses) { return nil, fmt.Errorf("Response for request %+v not setup", reqNum) @@ -577,7 +569,6 @@ func (m *mockKinesisPutRecords) AssertRequests( assert *assert.Assertions, expected []*kinesis.PutRecordsInput, ) { - assert.Equal( len(expected), len(m.requests), @@ -628,7 +619,6 @@ func createTestMetric( name string, serializer serializers.Serializer, ) (telegraf.Metric, []byte) { - metric := testutil.TestMetric(1, name) data, err := serializer.Serialize(metric) @@ -642,7 +632,6 @@ func createTestMetrics( count uint32, serializer serializers.Serializer, ) ([]telegraf.Metric, [][]byte) { - metrics := make([]telegraf.Metric, count) metricsData := make([][]byte, count) @@ -660,7 +649,6 @@ func createPutRecordsRequestEntries( metricsData [][]byte, partitionKey *string, ) []*kinesis.PutRecordsRequestEntry { - count := len(metricsData) records := make([]*kinesis.PutRecordsRequestEntry, count) diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 9f390046c74c7..c91955ced2e0d 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -110,7 +110,6 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { } else { l.Log.Infof("Unable to build Gauge for %s, skipping", m.Name()) l.Log.Debugf("Couldn't build gauge: %v", err) - } } @@ -192,7 +191,6 @@ func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) { fmt.Errorf("undeterminable Source type from Field, %s", l.Template) } for fieldName, value := range m.Fields() { - metricName := m.Name() if fieldName != "value" { metricName = fmt.Sprintf("%s.%s", m.Name(), fieldName) diff --git a/plugins/outputs/librato/librato_test.go b/plugins/outputs/librato/librato_test.go index 5e78d9645ab6d..5a425afca5f1f 100644 --- a/plugins/outputs/librato/librato_test.go +++ b/plugins/outputs/librato/librato_test.go @@ -64,7 +64,6 @@ func TestBadStatusCode(t *testing.T) { } func TestBuildGauge(t *testing.T) { - mtime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix() var gaugeTests = []struct { ptIn telegraf.Metric diff --git a/plugins/outputs/prometheus_client/v1/collector.go b/plugins/outputs/prometheus_client/v1/collector.go index 7932bbc59f44d..a77f94d9ffc44 100644 --- a/plugins/outputs/prometheus_client/v1/collector.go +++ b/plugins/outputs/prometheus_client/v1/collector.go @@ -153,7 +153,6 @@ func CreateSampleID(tags map[string]string) SampleID { } func addSample(fam *MetricFamily, sample *Sample, sampleID SampleID) { - for k := range sample.Labels { fam.LabelSet[k]++ } @@ -363,7 +362,6 @@ func (c *Collector) Add(metrics []telegraf.Metric) error { continue } c.addMetricFamily(point, sample, mname, sampleID) - } } } diff --git a/plugins/outputs/timestream/timestream_internal_test.go b/plugins/outputs/timestream/timestream_internal_test.go index 27b19487ac898..1f1194d707b69 100644 --- a/plugins/outputs/timestream/timestream_internal_test.go +++ b/plugins/outputs/timestream/timestream_internal_test.go @@ -36,7 +36,6 @@ func TestGetTimestreamTime(t *testing.T) { } func TestPartitionRecords(t *testing.T) { - assertions := assert.New(t) testDatum := timestreamwrite.Record{ diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index 1b2a31934c605..2e601fc16f29a 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -106,9 +106,7 @@ func (w *Warp10) Connect() error { func (w *Warp10) GenWarp10Payload(metrics []telegraf.Metric) string { collectString := make([]string, 0) for _, mm := range metrics { - for _, field := range mm.FieldList() { - metric := &MetricLine{ Metric: fmt.Sprintf("%s%s", w.Prefix, mm.Name()+"."+field.Key), Timestamp: mm.Time().UnixNano() / 1000, @@ -171,7 +169,6 @@ func (w *Warp10) Write(metrics []telegraf.Metric) error { } func buildTags(tags []*telegraf.Tag) []string { - tagsString := make([]string, len(tags)+1) indexSource := 0 for index, tag := range tags { diff --git a/plugins/outputs/warp10/warp10_test.go b/plugins/outputs/warp10/warp10_test.go index 0b32ce4f158b2..afe931182a1d1 100644 --- a/plugins/outputs/warp10/warp10_test.go +++ b/plugins/outputs/warp10/warp10_test.go @@ -117,5 +117,4 @@ func TestHandleWarp10Error(t *testing.T) { payload := w.HandleError(handledError.Message, 511) require.Exactly(t, handledError.Expected, payload) } - } diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 885bee8c0b74a..f793d62b89a80 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -125,7 +125,6 @@ type MetricPoint struct { } func (w *Wavefront) Connect() error { - if len(w.StringToNumber) > 0 { w.Log.Warn("The string_to_number option is deprecated; please use the enum processor instead") } @@ -168,7 +167,6 @@ func (w *Wavefront) Connect() error { } func (w *Wavefront) Write(metrics []telegraf.Metric) error { - for _, m := range metrics { for _, point := range w.buildMetrics(m) { err := w.sender.SendMetric(point.Metric, point.Value, point.Timestamp, point.Source, point.Tags) @@ -233,7 +231,6 @@ func (w *Wavefront) buildMetrics(m telegraf.Metric) []*MetricPoint { } func (w *Wavefront) buildTags(mTags map[string]string) (string, map[string]string) { - // Remove all empty tags. for k, v := range mTags { if v == "" { diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go index 40707e6d6c8b0..e96e05919b6ef 100644 --- a/plugins/outputs/wavefront/wavefront_test.go +++ b/plugins/outputs/wavefront/wavefront_test.go @@ -73,7 +73,6 @@ func TestBuildMetrics(t *testing.T) { } } } - } func TestBuildMetricsStrict(t *testing.T) { @@ -113,7 +112,6 @@ func TestBuildMetricsStrict(t *testing.T) { } } } - } func TestBuildMetricsWithSimpleFields(t *testing.T) { @@ -152,11 +150,9 @@ func TestBuildMetricsWithSimpleFields(t *testing.T) { } } } - } func TestBuildTags(t *testing.T) { - w := defaultWavefront() var tagtests = []struct { @@ -284,7 +280,6 @@ func TestBuildValue(t *testing.T) { t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", vt.out, value) } } - } func TestBuildValueString(t *testing.T) { @@ -315,7 +310,6 @@ func TestBuildValueString(t *testing.T) { t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", vt.out, value) } } - } func TestTagLimits(t *testing.T) { diff --git a/plugins/parsers/dropwizard/parser.go b/plugins/parsers/dropwizard/parser.go index d8dcc92040aa4..179de7dd77e37 100644 --- a/plugins/parsers/dropwizard/parser.go +++ b/plugins/parsers/dropwizard/parser.go @@ -69,7 +69,6 @@ func NewParser() *parser { // Parse parses the input bytes to an array of metrics func (p *parser) Parse(buf []byte) ([]telegraf.Metric, error) { - metrics := make([]telegraf.Metric, 0) metricTime, err := p.parseTime(buf) @@ -147,7 +146,6 @@ func (p *parser) SetDefaultTags(tags map[string]string) { } func (p *parser) readTags(buf []byte) map[string]string { - if p.TagsPath != "" { var tagsBytes []byte tagsResult := gjson.GetBytes(buf, p.TagsPath) @@ -173,7 +171,6 @@ func (p *parser) readTags(buf []byte) map[string]string { } func (p *parser) parseTime(buf []byte) (time.Time, error) { - if p.TimePath != "" { timeFormat := p.TimeFormat if timeFormat == "" { @@ -195,7 +192,6 @@ func (p *parser) parseTime(buf []byte) (time.Time, error) { } func (p *parser) unmarshalMetrics(buf []byte) (map[string]interface{}, error) { - var registryBytes []byte if p.MetricRegistryPath != "" { regResult := gjson.GetBytes(buf, p.MetricRegistryPath) diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index cdf787d8f229b..7037473e851da 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -377,7 +377,6 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { - metrics := make([]telegraf.Metric, 0) scanner := bufio.NewScanner(bytes.NewReader(buf)) diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 1c409e8a542b6..d51f30385a964 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -1013,7 +1013,6 @@ func TestSyslogTimestamp(t *testing.T) { } func TestReplaceTimestampComma(t *testing.T) { - p := &Parser{ Patterns: []string{`%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05.000"} successfulMatches=%{NUMBER:value:int}`}, } diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index e8a748e7052db..1830e2a6a4d0e 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -88,7 +88,6 @@ func (p *Parser) parseArray(data []interface{}, timestamp time.Time) ([]telegraf results = append(results, metrics...) default: return nil, ErrWrongType - } } @@ -155,7 +154,6 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) ( // will delete any strings/bools that shouldn't be fields // assumes that any non-numeric values in TagKeys should be displayed as tags func (p *Parser) switchFieldToTag(tags map[string]string, fields map[string]interface{}) (map[string]string, map[string]interface{}) { - for name, value := range fields { if p.tagKeys == nil { continue diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index 525c8fd2804c2..9abe853eca0c5 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -1357,5 +1357,4 @@ func TestParseArrayWithWildcardTagKeys(t *testing.T) { testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) }) } - } diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go index 7c9487369d5b8..86fd166ffc8c3 100644 --- a/plugins/parsers/wavefront/element.go +++ b/plugins/parsers/wavefront/element.go @@ -108,7 +108,6 @@ func (ep *TimestampParser) parse(p *PointParser, pt *Point) error { } func setTimestamp(pt *Point, ts int64, numDigits int) error { - if numDigits == 19 { // nanoseconds ts = ts / 1e9 diff --git a/plugins/parsers/wavefront/parser.go b/plugins/parsers/wavefront/parser.go index d7984fb99b5b1..5fba2d5ded74b 100644 --- a/plugins/parsers/wavefront/parser.go +++ b/plugins/parsers/wavefront/parser.go @@ -90,7 +90,6 @@ func (p *WavefrontParser) Parse(buf []byte) ([]telegraf.Metric, error) { } func (p *PointParser) Parse(buf []byte) ([]telegraf.Metric, error) { - // parse even if the buffer begins with a newline buf = bytes.TrimPrefix(buf, []byte("\n")) // add newline to end if not exists: @@ -133,7 +132,6 @@ func (p *WavefrontParser) SetDefaultTags(tags map[string]string) { } func (p *PointParser) convertPointToTelegrafMetric(points []Point) ([]telegraf.Metric, error) { - metrics := make([]telegraf.Metric, 0) for _, point := range points { @@ -211,7 +209,6 @@ func (p *PointParser) unscanTokens(n int) { } func (p *PointParser) reset(buf []byte) { - // reset the scan buffer and write new byte p.scanBuf.Reset() p.scanBuf.Write(buf) diff --git a/plugins/parsers/wavefront/parser_test.go b/plugins/parsers/wavefront/parser_test.go index fed31b5f247b5..1f1801730c73b 100644 --- a/plugins/parsers/wavefront/parser_test.go +++ b/plugins/parsers/wavefront/parser_test.go @@ -86,7 +86,6 @@ func TestParse(t *testing.T) { testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.NoError(t, err) assert.EqualValues(t, parsedMetrics[0], testMetric) - } func TestParseLine(t *testing.T) { @@ -172,7 +171,6 @@ func TestParseMultiple(t *testing.T) { assert.NoError(t, err) testMetrics = []telegraf.Metric{testMetric1, testMetric2, testMetric3} assert.EqualValues(t, parsedMetrics, testMetrics) - } func TestParseSpecial(t *testing.T) { @@ -189,7 +187,6 @@ func TestParseSpecial(t *testing.T) { testMetric, err = metric.New("test.metric", map[string]string{"tag1": "val\\\"ue1"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.NoError(t, err) assert.EqualValues(t, parsedMetric, testMetric) - } func TestParseInvalid(t *testing.T) { @@ -221,7 +218,6 @@ func TestParseInvalid(t *testing.T) { _, err = parser.Parse([]byte("\"test.metric\" -1.12-34 1530939936 \"source\"=\"mysource\" tag2=value2")) assert.Error(t, err) - } func TestParseDefaultTags(t *testing.T) { @@ -244,5 +240,4 @@ func TestParseDefaultTags(t *testing.T) { testMetric, err = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.NoError(t, err) assert.EqualValues(t, parsedMetrics[0], testMetric) - } diff --git a/plugins/parsers/wavefront/scanner.go b/plugins/parsers/wavefront/scanner.go index abdbd6d1b4b8f..5d22c53896762 100644 --- a/plugins/parsers/wavefront/scanner.go +++ b/plugins/parsers/wavefront/scanner.go @@ -31,7 +31,6 @@ func (s *PointScanner) unread() { // Scan returns the next token and literal value. func (s *PointScanner) Scan() (Token, string) { - // Read the next rune ch := s.read() if isWhitespace(ch) { diff --git a/plugins/processors/date/date_test.go b/plugins/processors/date/date_test.go index 42e094c939c17..9bafc0654adff 100644 --- a/plugins/processors/date/date_test.go +++ b/plugins/processors/date/date_test.go @@ -30,7 +30,6 @@ func TestTagAndField(t *testing.T) { } err := dateFormatTagAndField.Init() require.Error(t, err) - } func TestNoOutputSpecified(t *testing.T) { diff --git a/plugins/processors/filepath/filepath.go b/plugins/processors/filepath/filepath.go index 70013de174a9a..26a0a7abdccf0 100644 --- a/plugins/processors/filepath/filepath.go +++ b/plugins/processors/filepath/filepath.go @@ -95,7 +95,6 @@ func (o *Options) applyFunc(bo BaseOpts, fn ProcessorFunc, metric telegraf.Metri if v, ok := v.(string); ok { metric.AddField(targetField, fn(v)) } - } } } diff --git a/plugins/processors/port_name/port_name.go b/plugins/processors/port_name/port_name.go index 7fc459c034ff1..60817dbdd244c 100644 --- a/plugins/processors/port_name/port_name.go +++ b/plugins/processors/port_name/port_name.go @@ -108,7 +108,6 @@ func readServices(r io.Reader) sMap { func (pn *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { for _, m := range metrics { - var portProto string var fromField bool diff --git a/plugins/processors/starlark/builtins.go b/plugins/processors/starlark/builtins.go index 66a7c9f9e0308..53e31fb3a988c 100644 --- a/plugins/processors/starlark/builtins.go +++ b/plugins/processors/starlark/builtins.go @@ -191,7 +191,6 @@ func dictUpdate(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tupl iter2 := starlark.Iterate(pair) if iter2 == nil { return nil, fmt.Errorf("dictionary update sequence element #%d is not iterable (%s)", i, pair.Type()) - } defer iter2.Done() len := starlark.Len(pair) diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index 7653283724d2c..24e1f7c9516a7 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -280,7 +280,6 @@ func (t *TopK) push() []telegraf.Metric { // Get the top K metrics for each field and add them to the return value addedKeys := make(map[string]bool) for _, field := range t.Fields { - // Sort the aggregations sortMetrics(aggregations, field, t.Bottomk) diff --git a/plugins/processors/topk/topk_test.go b/plugins/processors/topk/topk_test.go index 928111b29d7da..858859de6261b 100644 --- a/plugins/processors/topk/topk_test.go +++ b/plugins/processors/topk/topk_test.go @@ -139,7 +139,6 @@ func runAndCompare(topk *TopK, metrics []telegraf.Metric, answer []telegraf.Metr // Smoke tests func TestTopkAggregatorsSmokeTests(t *testing.T) { - // Build the processor var topk TopK topk = *New() @@ -162,7 +161,6 @@ func TestTopkAggregatorsSmokeTests(t *testing.T) { // AddAggregateFields + Mean aggregator func TestTopkMeanAddAggregateFields(t *testing.T) { - // Build the processor var topk TopK topk = *New() @@ -192,7 +190,6 @@ func TestTopkMeanAddAggregateFields(t *testing.T) { // AddAggregateFields + Sum aggregator func TestTopkSumAddAggregateFields(t *testing.T) { - // Build the processor var topk TopK topk = *New() @@ -222,7 +219,6 @@ func TestTopkSumAddAggregateFields(t *testing.T) { // AddAggregateFields + Max aggregator func TestTopkMaxAddAggregateFields(t *testing.T) { - // Build the processor var topk TopK topk = *New() @@ -252,7 +248,6 @@ func TestTopkMaxAddAggregateFields(t *testing.T) { // AddAggregateFields + Min aggregator func TestTopkMinAddAggregateFields(t *testing.T) { - // Build the processor var topk TopK topk = *New() @@ -282,7 +277,6 @@ func TestTopkMinAddAggregateFields(t *testing.T) { // GroupBy func TestTopkGroupby1(t *testing.T) { - // Build the processor var topk TopK topk = *New() @@ -308,7 +302,6 @@ func TestTopkGroupby1(t *testing.T) { runAndCompare(&topk, input, answer, "GroupBy test 1", t) } func TestTopkGroupby2(t *testing.T) { - // Build the processor var topk TopK topk = *New() @@ -338,7 +331,6 @@ func TestTopkGroupby2(t *testing.T) { runAndCompare(&topk, input, answer, "GroupBy test 2", t) } func TestTopkGroupby3(t *testing.T) { - // Build the processor var topk TopK topk = *New() @@ -365,7 +357,6 @@ func TestTopkGroupby3(t *testing.T) { // GroupBy + Fields func TestTopkGroupbyFields1(t *testing.T) { - // Build the processor var topk TopK topk = *New() @@ -393,7 +384,6 @@ func TestTopkGroupbyFields1(t *testing.T) { } func TestTopkGroupbyFields2(t *testing.T) { - // Build the processor var topk TopK topk = *New() @@ -422,7 +412,6 @@ func TestTopkGroupbyFields2(t *testing.T) { // GroupBy metric name func TestTopkGroupbyMetricName1(t *testing.T) { - // Build the processor var topk TopK topk = *New() @@ -449,7 +438,6 @@ func TestTopkGroupbyMetricName1(t *testing.T) { } func TestTopkGroupbyMetricName2(t *testing.T) { - // Build the processor var topk TopK topk = *New() @@ -478,7 +466,6 @@ func TestTopkGroupbyMetricName2(t *testing.T) { // BottomK func TestTopkBottomk(t *testing.T) { - // Build the processor var topk TopK topk = *New() @@ -505,7 +492,6 @@ func TestTopkBottomk(t *testing.T) { // GroupByKeyTag func TestTopkGroupByKeyTag(t *testing.T) { - // Build the processor var topk TopK topk = *New() diff --git a/plugins/serializers/msgpack/msgpack.go b/plugins/serializers/msgpack/msgpack.go index d850bb8b004ca..cd5f6ceb2dc87 100644 --- a/plugins/serializers/msgpack/msgpack.go +++ b/plugins/serializers/msgpack/msgpack.go @@ -38,7 +38,6 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { if err != nil { return nil, err } - } return buf, nil } diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go index d37ecaaaf2f1d..ed442e23c85fd 100644 --- a/plugins/serializers/prometheus/collection.go +++ b/plugins/serializers/prometheus/collection.go @@ -168,7 +168,6 @@ func (c *Collection) createLabels(metric telegraf.Metric) []LabelPair { labels = append(labels, LabelPair{Name: name, Value: value}) addedFieldLabel = true - } if addedFieldLabel { @@ -201,7 +200,6 @@ func (c *Collection) Add(metric telegraf.Metric, now time.Time) { Metrics: make(map[MetricKey]*Metric), } c.Entries[family] = entry - } metricKey := MakeMetricKey(labels) diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go index a2dfee71f9892..670d3d346a702 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go @@ -201,7 +201,6 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { } entries[metrickey] = promts } - } var promTS = make([]*prompb.TimeSeries, len(entries)) @@ -235,7 +234,6 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { return false }) - } data, err := proto.Marshal(&prompb.WriteRequest{Timeseries: promTS}) if err != nil { @@ -302,7 +300,6 @@ func (s *Serializer) createLabels(metric telegraf.Metric) []*prompb.Label { labels = append(labels, &prompb.Label{Name: name, Value: value}) addedFieldLabel = true - } if addedFieldLabel { diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go index f23de80df7f3b..f3a3ecebc5c1c 100644 --- a/plugins/serializers/splunkmetric/splunkmetric.go +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -40,7 +40,6 @@ func NewSerializer(splunkmetricHecRouting bool, splunkmetricMultimetric bool) (* } func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { - m, err := s.createObject(metric) if err != nil { return nil, fmt.Errorf("D! [serializer.splunkmetric] Dropping invalid metric: %s", metric.Name()) @@ -50,7 +49,6 @@ func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { } func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { - var serialized []byte for _, metric := range metrics { @@ -121,7 +119,6 @@ func (s *serializer) createSingle(metric telegraf.Metric, dataGroup HECTimeSerie var metricJSON []byte for _, field := range metric.FieldList() { - value, valid := verifyValue(field.Value) if !valid { @@ -161,7 +158,6 @@ func (s *serializer) createSingle(metric telegraf.Metric, dataGroup HECTimeSerie } func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, err error) { - /* Splunk supports one metric json object, and does _not_ support an array of JSON objects. ** Splunk has the following required names for the metric store: ** metric_name: The name of the metric diff --git a/testutil/accumulator.go b/testutil/accumulator.go index f1b6469b79ec8..c09857d15f3e2 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -119,7 +119,6 @@ func (a *Accumulator) addFields( } else { t = a.TimeFunc() } - } if a.debug { diff --git a/testutil/testutil_test.go b/testutil/testutil_test.go index 52a807514c94d..e401b10a73930 100644 --- a/testutil/testutil_test.go +++ b/testutil/testutil_test.go @@ -6,7 +6,6 @@ import ( ) func TestDockerHost(t *testing.T) { - os.Unsetenv("DOCKER_HOST") host := GetLocalHost() @@ -30,5 +29,4 @@ func TestDockerHost(t *testing.T) { if host != "1.1.1.1" { t.Fatalf("Host should take DOCKER_HOST value when set. Current value is [%s] and DOCKER_HOST is [%s]", host, os.Getenv("DOCKER_HOST")) } - } From 4e9bc06eb2da52622c26583f5c26f7f9c72b38d1 Mon Sep 17 00:00:00 2001 From: Rene Kaufmann Date: Mon, 15 Mar 2021 17:09:10 +0100 Subject: [PATCH 299/761] nats_output: use the configured credentials file (#8986) --- plugins/outputs/nats/nats.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/nats/nats.go b/plugins/outputs/nats/nats.go index 50102b43a47dd..f4cf35b16e4f7 100644 --- a/plugins/outputs/nats/nats.go +++ b/plugins/outputs/nats/nats.go @@ -73,10 +73,14 @@ func (n *NATS) Connect() error { } // override authentication, if any was specified - if n.Username != "" { + if n.Username != "" && n.Password != "" { opts = append(opts, nats.UserInfo(n.Username, n.Password)) } + if n.Credentials != "" { + opts = append(opts, nats.UserCredentials(n.Credentials)) + } + if n.Name != "" { opts = append(opts, nats.Name(n.Name)) } From 89f5e8a9d9f5c7a1f4abf791025ed82a732e7da2 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 16 Mar 2021 14:23:44 -0500 Subject: [PATCH 300/761] Update to Go 1.16.2 (#8989) --- .circleci/config.yml | 75 +++++++++++++--------------------------- Makefile | 4 +-- scripts/alpine.docker | 2 +- scripts/buster.docker | 2 +- scripts/ci-1.16.docker | 2 +- scripts/mac_installgo.sh | 27 +++++++++++++++ 6 files changed, 56 insertions(+), 56 deletions(-) create mode 100644 scripts/mac_installgo.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 6a9162aa104cd..ce38bf07f5bb5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,7 +12,7 @@ executors: go-1_16: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.16.1' + - image: 'quay.io/influxdb/telegraf-ci:1.16.2' environment: GOFLAGS: -p=8 mac: @@ -37,14 +37,6 @@ commands: - run: 'GOARCH=<< parameters.goarch >> make check' - run: 'GOARCH=<< parameters.goarch >> make check-deps' - run: 'GOARCH=<< parameters.goarch >> make test' - test-go-mac: - steps: - - checkout - - attach_workspace: - at: '/' - - run: 'make' - - run: 'make check' - - run: 'make test' package: parameters: nightly: @@ -88,31 +80,6 @@ jobs: root: '/go' paths: - '*' - macdeps: - executor: mac - steps: - - checkout - - restore_cache: - key: mac-go-mod-v3-{{ checksum "go.sum" }} - - run: 'brew update' - - run: 'brew install go@1.16' - - run: 'make deps' - - run: 'make tidy' - - save_cache: - name: 'go module cache' - key: mac-go-mod-v3-{{ checksum "go.sum" }} - paths: - - '~/go/pkg/mod' - - '/usr/local/Cellar/go' - - '/usr/local/bin/go' - - '/usr/local/bin/gofmt' - - persist_to_workspace: - root: '/' - paths: - - 'usr/local/bin/go' - - 'usr/local/Cellar/go' - - 'usr/local/bin/gofmt' - - 'Users/distiller/go' test-go-1_15: executor: go-1_15 steps: @@ -131,17 +98,32 @@ jobs: steps: - test-go: goarch: "386" - test-go-darwin: + test-go-mac: executor: mac steps: - - test-go-mac + - checkout + - restore_cache: + key: mac-go-mod-v0-{{ checksum "go.sum" }} + - run: 'sh ./scripts/mac_installgo.sh' + - save_cache: + name: 'Saving cache' + key: mac-go-mod-v0-{{ checksum "go.sum" }} + paths: + - '/usr/local/Cellar/go' + - '/usr/local/bin/go' + - '/usr/local/bin/gofmt' + - run: 'make deps' + - run: 'make tidy' + - run: 'make' + - run: 'make check' + - run: 'make test' test-go-windows: executor: name: win/default shell: powershell.exe steps: - checkout - - run: choco upgrade golang --version=1.15.8 + - run: choco upgrade golang --version=1.16.2 - run: choco install make - run: git config --system core.longpaths true - run: make test-windows @@ -201,10 +183,6 @@ workflows: version: 2 check: jobs: - - 'macdeps': - filters: - tags: - only: /.*/ - 'deps': filters: tags: @@ -233,9 +211,7 @@ workflows: filters: tags: only: /.*/ - - 'test-go-darwin': - requires: - - 'macdeps' + - 'test-go-mac': filters: tags: # only runs on tags if you specify this filter only: /.*/ @@ -246,7 +222,7 @@ workflows: - 'package': requires: - 'test-go-windows' - - 'test-go-darwin' + - 'test-go-mac' - 'test-go-1_15' - 'test-go-1_15-386' - 'test-go-1_16' @@ -254,7 +230,7 @@ workflows: - 'release': requires: - 'test-go-windows' - - 'test-go-darwin' + - 'test-go-mac' - 'test-go-1_15' - 'test-go-1_15-386' - 'test-go-1_16' @@ -279,7 +255,6 @@ workflows: nightly: jobs: - 'deps' - - 'macdeps' - 'test-go-1_15': requires: - 'deps' @@ -292,14 +267,12 @@ workflows: - 'test-go-1_16-386': requires: - 'deps' - - 'test-go-darwin': - requires: - - 'macdeps' + - 'test-go-mac' - 'test-go-windows' - 'nightly': requires: - 'test-go-windows' - - 'test-go-darwin' + - 'test-go-mac' - 'test-go-1_15' - 'test-go-1_15-386' - 'test-go-1_16' diff --git a/Makefile b/Makefile index ca2ed70647eea..992ddb18993f1 100644 --- a/Makefile +++ b/Makefile @@ -185,8 +185,8 @@ ci-1.15: .PHONY: ci-1.16 ci-1.16: - docker build -t quay.io/influxdb/telegraf-ci:1.16.1 - < scripts/ci-1.16.docker - docker push quay.io/influxdb/telegraf-ci:1.16.1 + docker build -t quay.io/influxdb/telegraf-ci:1.16.2 - < scripts/ci-1.16.docker + docker push quay.io/influxdb/telegraf-ci:1.16.2 .PHONY: install install: $(buildbin) diff --git a/scripts/alpine.docker b/scripts/alpine.docker index ecf6c15573ea5..4bd3489bc4463 100644 --- a/scripts/alpine.docker +++ b/scripts/alpine.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.1 as builder +FROM golang:1.16.2 as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/buster.docker b/scripts/buster.docker index 465d367f5b482..65e96acb6efad 100644 --- a/scripts/buster.docker +++ b/scripts/buster.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.1-buster as builder +FROM golang:1.16.2-buster as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/ci-1.16.docker b/scripts/ci-1.16.docker index a8e05b54600f1..cc316dec00dcd 100644 --- a/scripts/ci-1.16.docker +++ b/scripts/ci-1.16.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.1 +FROM golang:1.16.2 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/mac_installgo.sh b/scripts/mac_installgo.sh new file mode 100644 index 0000000000000..93d674daf2973 --- /dev/null +++ b/scripts/mac_installgo.sh @@ -0,0 +1,27 @@ +#!/bin/sh + +version="1.16.2" +# This path is cachable, while saving directly in /usr/local/ will cause issues restoring the cache +path="/usr/local/Cellar" + +# Download Go directly from tar, the reason we aren't using brew: it is slow to update and we can't pull specific minor versions +setup_go () { + echo "installing go" + curl -OL https://golang.org/dl/go${version}.darwin-amd64.tar.gz --output go${version}.darwin-amd64.tar.gz + sudo rm -rf ${path}/go + sudo tar -C $path -xzf go${version}.darwin-amd64.tar.gz + ln -sf ${path}/go/bin/go /usr/local/bin/go + ln -sf ${path}/go/bin/gofmt /usr/local/bin/gofmt +} + +if command -v go &> /dev/null; then + echo "Go is already installed" + v=`go version | { read _ _ v _; echo ${v#go}; }` + echo "$v is installed, required version is $version" + if [ "$v" != $version ]; then + setup_go + go version + fi +else + setup_go +fi From 71757e8039696d4791edf207e980f56afbc37a00 Mon Sep 17 00:00:00 2001 From: viperstars Date: Wed, 17 Mar 2021 03:50:56 +0800 Subject: [PATCH 301/761] Close running outputs when reloading (#8769) --- agent/agent.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/agent/agent.go b/agent/agent.go index fb2c75b902f37..3d40e74a1bcdf 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -446,6 +446,13 @@ func stopServiceInputs(inputs []*models.RunningInput) { } } +// stopRunningOutputs stops all running outputs. +func stopRunningOutputs(outputs []*models.RunningOutput) { + for _, output := range outputs { + output.Close() + } +} + // gather runs an input's gather function periodically until the context is // done. func (a *Agent) gatherLoop( @@ -784,6 +791,9 @@ func (a *Agent) runOutputs( cancel() wg.Wait() + log.Println("I! [agent] Stopping running outputs") + stopRunningOutputs(unit.outputs) + return nil } From 1b09a9f3dc045cff909f07a6c6535a00116829a0 Mon Sep 17 00:00:00 2001 From: Daniel Moran Date: Tue, 16 Mar 2021 16:02:59 -0400 Subject: [PATCH 302/761] chore: remove references to outdated InfluxDB settings. (#8754) --- docs/FAQ.md | 4 ---- plugins/inputs/dcos/README.md | 3 --- plugins/inputs/kube_inventory/README.md | 4 ---- plugins/inputs/kubernetes/README.md | 4 ---- plugins/inputs/passenger/README.md | 3 --- plugins/inputs/sflow/README.md | 4 ---- 6 files changed, 22 deletions(-) diff --git a/docs/FAQ.md b/docs/FAQ.md index 4fe28db8b9cbc..3667c10ebe8c4 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -50,8 +50,6 @@ You can use the following techniques to avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. - Write to a database with an appropriate [retention policy][]. -- Limit series cardinality in your database using the - [max-series-per-database][] and [max-values-per-tag][] settings. - Consider using the [Time Series Index][tsi]. - Monitor your databases using the [show cardinality][] commands. - Consult the [InfluxDB documentation][influx docs] for the most up-to-date techniques. @@ -59,8 +57,6 @@ You can use the following techniques to avoid cardinality issues: [series cardinality]: https://docs.influxdata.com/influxdb/v1.7/concepts/glossary/#series-cardinality [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ -[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 -[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [show cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx docs]: https://docs.influxdata.com/influxdb/latest/ diff --git a/plugins/inputs/dcos/README.md b/plugins/inputs/dcos/README.md index 790590aeaf94b..4c9d46a921a6b 100644 --- a/plugins/inputs/dcos/README.md +++ b/plugins/inputs/dcos/README.md @@ -13,9 +13,6 @@ your database. options to exclude unneeded tags. - Write to a database with an appropriate [retention policy](https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/). -- Limit series cardinality in your database using the - [`max-series-per-database`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000) and - [`max-values-per-tag`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000) settings. - Consider using the [Time Series Index](https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/). - Monitor your databases diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index 79adb3c05a994..c9d6fb0be467d 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -27,8 +27,6 @@ avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. - Write to a database with an appropriate [retention policy][]. -- Limit series cardinality in your database using the - [max-series-per-database][] and [max-values-per-tag][] settings. - Consider using the [Time Series Index][tsi]. - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. @@ -309,8 +307,6 @@ kubernetes_statefulset,namespace=default,selector_select1=s1,statefulset_name=et [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ -[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 -[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx-docs]: https://docs.influxdata.com/influxdb/latest/ diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index a574bed06ffe4..8ef5ef7b1dfca 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -28,8 +28,6 @@ avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. - Write to a database with an appropriate [retention policy][]. -- Limit series cardinality in your database using the - [max-series-per-database][] and [max-values-per-tag][] settings. - Consider using the [Time Series Index][tsi]. - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. @@ -154,8 +152,6 @@ kubernetes_system_container [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ -[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 -[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx-docs]: https://docs.influxdata.com/influxdb/latest/ diff --git a/plugins/inputs/passenger/README.md b/plugins/inputs/passenger/README.md index 688f4e69aef0d..6821635103d78 100644 --- a/plugins/inputs/passenger/README.md +++ b/plugins/inputs/passenger/README.md @@ -15,9 +15,6 @@ manage your series cardinality: `tagexclude` to remove the `pid` and `process_group_id` tags. - Write to a database with an appropriate [retention policy](https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/). -- Limit series cardinality in your database using the - [`max-series-per-database`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000) and - [`max-values-per-tag`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000) settings. - Consider using the [Time Series Index](https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/). - Monitor your databases diff --git a/plugins/inputs/sflow/README.md b/plugins/inputs/sflow/README.md index 66d556e17c694..9e5366706e5df 100644 --- a/plugins/inputs/sflow/README.md +++ b/plugins/inputs/sflow/README.md @@ -14,8 +14,6 @@ avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. - Write to a database with an appropriate [retention policy][]. -- Limit series cardinality in your database using the - [max-series-per-database][] and [max-values-per-tag][] settings. - Consider using the [Time Series Index][tsi]. - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. @@ -113,8 +111,6 @@ This sflow implementation was built from the reference document [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ -[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 -[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx-docs]: https://docs.influxdata.com/influxdb/latest/ From 808b7c1ba540fecb51b1825f746954a826ba2c62 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 16 Mar 2021 21:15:18 +0100 Subject: [PATCH 303/761] Improve xml parser's handling of missing values (#8971) --- plugins/parsers/xml/parser.go | 8 ++++ plugins/parsers/xml/parser_test.go | 24 +++++----- .../parsers/xml/testcases/earthquakes.conf | 44 +++++++++++++++++++ .../parsers/xml/testcases/earthquakes.quakeml | 20 +++++++++ 4 files changed, 86 insertions(+), 10 deletions(-) create mode 100644 plugins/parsers/xml/testcases/earthquakes.conf create mode 100644 plugins/parsers/xml/testcases/earthquakes.quakeml diff --git a/plugins/parsers/xml/parser.go b/plugins/parsers/xml/parser.go index bbe99286bafdd..8ee002ff3b0e7 100644 --- a/plugins/parsers/xml/parser.go +++ b/plugins/parsers/xml/parser.go @@ -58,6 +58,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { p.debugEmptyQuery("metric selection", doc, config.Selection) return nil, fmt.Errorf("cannot parse with empty selection node") } + p.Log.Debugf("Number of selected metric nodes: %d", len(selectedNodes)) for _, selected := range selectedNodes { m, err := p.parseQuery(t, doc, selected, config) @@ -165,6 +166,8 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c case float64: // Assume the value to contain a timestamp in seconds and fractions thereof. timestamp = time.Unix(0, int64(v.(float64)*1e9)) + case nil: + // No timestamp found. Just ignore the time and use "starttime" default: return nil, fmt.Errorf("unknown format '%T' for timestamp query '%v'", v, config.Timestamp) } @@ -185,6 +188,8 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c tags[name] = strconv.FormatBool(v.(bool)) case float64: tags[name] = strconv.FormatFloat(v.(float64), 'G', -1, 64) + case nil: + continue default: return nil, fmt.Errorf("unknown format '%T' for tag '%s'", v, name) } @@ -214,6 +219,8 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c } case float64: fields[name] = int64(v.(float64)) + case nil: + continue default: return nil, fmt.Errorf("unknown format '%T' for field (int) '%s'", v, name) } @@ -244,6 +251,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c if err != nil { return nil, err } + p.Log.Debugf("Number of selected field nodes: %d", len(selectedFieldNodes)) if len(selectedFieldNodes) > 0 && selectedFieldNodes[0] != nil { for _, selectedfield := range selectedFieldNodes { n, err := executeQuery(doc, selectedfield, fieldnamequery) diff --git a/plugins/parsers/xml/parser_test.go b/plugins/parsers/xml/parser_test.go index 91896172d3679..023e9d20a6090 100644 --- a/plugins/parsers/xml/parser_test.go +++ b/plugins/parsers/xml/parser_test.go @@ -126,7 +126,7 @@ func TestParseInvalidXML(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} _, err := parser.ParseLine(tt.input) require.Error(t, err) @@ -162,7 +162,7 @@ func TestInvalidTypeQueriesFail(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} _, err := parser.ParseLine(tt.input) require.Error(t, err) @@ -227,7 +227,7 @@ func TestInvalidTypeQueries(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} actual, err := parser.ParseLine(tt.input) require.NoError(t, err) @@ -356,7 +356,7 @@ func TestParseTimestamps(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} actual, err := parser.ParseLine(tt.input) require.NoError(t, err) @@ -560,7 +560,7 @@ func TestParseSingleValues(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} actual, err := parser.ParseLine(tt.input) require.NoError(t, err) @@ -771,7 +771,7 @@ func TestParseSingleAttributes(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} actual, err := parser.ParseLine(tt.input) require.NoError(t, err) @@ -857,7 +857,7 @@ func TestParseMultiValues(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} actual, err := parser.ParseLine(tt.input) require.NoError(t, err) @@ -969,7 +969,7 @@ func TestParseMultiNodes(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} actual, err := parser.Parse([]byte(tt.input)) require.NoError(t, err) @@ -1014,7 +1014,7 @@ func TestParseMetricQuery(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags} + parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} actual, err := parser.ParseLine(tt.input) require.NoError(t, err) @@ -1114,6 +1114,10 @@ func TestTestCases(t *testing.T) { name: "openweathermap forecast", filename: "testcases/openweathermap.conf", }, + { + name: "earthquakes quakeml", + filename: "testcases/earthquakes.conf", + }, } parser := influx.NewParser(influx.NewMetricHandler()) @@ -1141,7 +1145,7 @@ func TestTestCases(t *testing.T) { expectedErrors, _ := testutil.ParseRawLinesFrom(header, "Expected Error:") // Setup the parser and run it. - parser := Parser{Configs: []Config{*cfg}} + parser := &Parser{Configs: []Config{*cfg}, Log: testutil.Logger{Name: "parsers.xml"}} outputs, err := parser.Parse(content) if len(expectedErrors) == 0 { require.NoError(t, err) diff --git a/plugins/parsers/xml/testcases/earthquakes.conf b/plugins/parsers/xml/testcases/earthquakes.conf new file mode 100644 index 0000000000000..8f02f4384721f --- /dev/null +++ b/plugins/parsers/xml/testcases/earthquakes.conf @@ -0,0 +1,44 @@ +# Example for parsing QuakeML measurement data. +# +# File: +# testcases/earthquakes.quakeml +# +# Expected Output: +# earthquakes,agency=us,type=mww depth=13000,eventid="7000dg8x",lat=-37.6099,lon=179.6102,mag=6.3,station_count=33i 1614989782185000000 +# earthquakes,agency=us,type=mww depth=17000,eventid="7000dft1",lat=-28.7146,lon=-176.5582,mag=6.3,station_count=15i 1614911436571000000 +# earthquakes,agency=us,type=mww depth=26450,eventid="7000dflf",lat=-29.7347,lon=-177.2817,mag=8.1,station_count=81i 1614886112819000000 +# earthquakes,agency=us,type=mb depth=10000,eventid="7000dfku",lat=39.7886,lon=22.1189,mag=5.8,station_count=279i 1614883099415000000 +# earthquakes,agency=us,type=mww depth=53090,eventid="7000dfk3",lat=-29.6647,lon=-177.8343,mag=7.4,station_count=40i 1614879684425000000 +# earthquakes,agency=us,type=mww depth=20780,eventid="7000dffl",lat=-37.5628,lon=179.4443,mag=7.3,station_count=33i 1614864456464000000 +# earthquakes,agency=us,type=mww depth=10000,eventid="7000df40",lat=39.7641,lon=22.1756,mag=6.3,station_count=81i 1614766570197000000 +# earthquakes,type=mww depth=42100,eventid="0212o88mof",lat=61.3286,lon=-149.9991,mag=5.3 1614452365398000000 +# earthquakes,agency=us,type=mww depth=10000,eventid="6000dkmk",lat=63.9602,lon=-22.2736,mag=5.6,station_count=64i 1614161159873000000 +# earthquakes,agency=NC,type=mw depth=6220,eventid="73526151",lat=37.0456667,lon=-121.4781667,mag=3.76,station_count=3i 1613957893840000000 +# earthquakes,agency=US,type=mwr depth=7000,eventid="2021dmpg",lat=36.96366667,lon=-98.09383333,mag=4.2,station_count=39i 1613743017950000000 +# earthquakes,agency=us,type=mww depth=5590,eventid="6000dhxn",lat=-17.8192,lon=167.5901,mag=6.2,station_count=24i 1613436564078000000 +# earthquakes,agency=us,type=mww depth=49940,eventid="6000dher",lat=37.7453,lon=141.7494,mag=7.1,station_count=74i 1613225270397000000 +# earthquakes,agency=us,type=mww depth=98950,eventid="6000dh48",lat=38.1314,lon=73.545,mag=5.9,station_count=34i 1613149295308000000 +# earthquakes,agency=us,type=mww depth=10000,eventid="6000dg77",lat=-23.0508,lon=171.657,mag=7.7,station_count=54i 1612963195532000000 +# + +metric_selection = "//event" +metric_name = "string('earthquakes')" + +# Convert from milliseconds to nanoseconds as golang unfortunately +# only supports RFC3339 with second OR nanosecond precision. +timestamp = "replace(normalize-space(origin/time), 'Z', '000000Z')" +timestamp_format = "2006-01-02T15:04:05.999999999Z" + +[fields] + eventid = "@catalog:eventid" + lon = "number(origin/longitude/value)" + lat = "number(origin/latitude/value)" + depth = "number(origin/depth/value)" + mag = "number(magnitude/mag/value)" + +[fields_int] + station_count = "magnitude/stationCount" + +[tags] + agency = "magnitude/creationInfo/agencyID" + type = "magnitude/type" diff --git a/plugins/parsers/xml/testcases/earthquakes.quakeml b/plugins/parsers/xml/testcases/earthquakes.quakeml new file mode 100644 index 0000000000000..fa5f5fb7f63bd --- /dev/null +++ b/plugins/parsers/xml/testcases/earthquakes.quakeml @@ -0,0 +1,20 @@ + + + +earthquake name182 km NE of Gisborne, New Zealand179.6102-37.60991300017008100horizontal uncertainty2901.04341.036manualus2021-03-09T03:01:59.040Z6.30.054mww33quakeml:earthquake.usgs.gov/realtime/product/origin/us7000dg8x/us/1615258919040/product.xmlmanualus2021-03-09T03:01:59.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dg8x/us/1615258919040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dg8x/us/1615258919040/product.xml#magnitudeearthquakeus2021-03-09T03:05:51.084Z +earthquake nameKermadec Islands region-176.5582-28.71461700018009800horizontal uncertainty891.25419.815manualus2021-03-05T18:47:44.040Z6.30.08mww15quakeml:earthquake.usgs.gov/realtime/product/origin/us7000dft1/us/1614970064040/product.xmlmanualus2021-03-05T18:47:44.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dft1/us/1614970064040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dft1/us/1614970064040/product.xml#magnitudeearthquakeus2021-03-06T02:34:07.561Z +earthquake nameKermadec Islands, New Zealand-177.2817-29.73472645037007800horizontal uncertainty1300.67210.746manualus2021-03-05T18:08:31.040Z8.10.034mww81quakeml:earthquake.usgs.gov/realtime/product/origin/us7000dflf/us/1614967711040/product.xmlmanualus2021-03-05T18:08:31.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dflf/us/1614967711040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dflf/us/1614967711040/product.xml#magnitudeearthquakeus2021-03-09T18:52:08.298Z +earthquake nameGreece22.118939.78861000018005200horizontal uncertainty1400.9190.424manualus2021-03-05T15:03:03.040Z5.80.036mb279quakeml:earthquake.usgs.gov/realtime/product/origin/us7000dfku/us/1614956583040/product.xmlmanualus2021-03-05T15:03:03.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dfku/us/1614956583040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dfku/us/1614956583040/product.xml#magnitudeearthquakeus2021-03-07T08:43:06.987Z +earthquake nameKermadec Islands, New Zealand-177.8343-29.66475309036007800horizontal uncertainty1321.14300.426manualus2021-03-05T13:49:34.040Z7.40.049mww40quakeml:earthquake.usgs.gov/realtime/product/origin/us7000dfk3/us/1614952174040/product.xmlmanualus2021-03-05T13:49:34.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dfk3/us/1614952174040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dfk3/us/1614952174040/product.xml#magnitudeearthquakeus2021-03-09T18:42:04.756Z +earthquake name174 km NE of Gisborne, New Zealand179.4443-37.56282078032006600horizontal uncertainty1411.35230.904manualus2021-03-04T15:08:47.040Z7.30.054mww33quakeml:earthquake.usgs.gov/realtime/product/origin/us7000dffl/us/1614870527040/product.xmlmanualus2021-03-04T15:08:47.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dffl/us/1614870527040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000dffl/us/1614870527040/product.xml#magnitudeearthquakeus2021-03-10T21:54:32.975Z +earthquake name10 km WNW of Týrnavos, Greece22.175639.76411000018005400horizontal uncertainty1291.05170.415manualus2021-03-03T10:31:58.040Z6.30.034mww81quakeml:earthquake.usgs.gov/realtime/product/origin/us7000df40/us/1614767518040/product.xmlmanualus2021-03-03T10:31:58.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us7000df40/us/1614767518040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us7000df40/us/1614767518040/product.xml#magnitudeearthquakeus2021-03-08T04:19:29.249Z +earthquake name3 km SSW of Point MacKenzie, Alaska-149.999161.3286421003000horizontal uncertainty1340.86manual2021-02-27T19:20:59.442Z25.3mwwquakeml:earthquake.usgs.gov/realtime/product/origin/AK0212o88mof/ak/1614453659442/product.xmlmanual2021-02-27T19:20:59.442Zquakeml:earthquake.usgs.gov/realtime/product/origin/AK0212o88mof/ak/1614453659442/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/AK0212o88mof/ak/1614453659442/product.xml#magnitudeearthquakeak2021-03-10T19:09:33.840Z2 +earthquake name5 km ESE of Vogar, Iceland-22.273663.96021000018005600horizontal uncertainty1291.22460.891manualus2021-02-24T15:05:24.040Z5.60.039mww64quakeml:earthquake.usgs.gov/realtime/product/origin/us6000dkmk/us/1614179124040/product.xmlmanualus2021-02-24T15:05:24.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dkmk/us/1614179124040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dkmk/us/1614179124040/product.xml#magnitudeearthquakeus2021-03-07T02:32:18.760Z +earthquake name9km ENE of Gilroy, CA-121.478166737.0456667622024090horizontal uncertainty1781640.15330.02089manualNC2021-02-23T00:54:06.560Z103.76mw3quakeml:earthquake.usgs.gov/realtime/product/origin/nc73526151/nc/1614041646560/product.xmlmanualNC2021-02-23T00:54:06.560Zquakeml:earthquake.usgs.gov/realtime/product/origin/nc73526151/nc/1614041646560/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/nc73526151/nc/1614041646560/product.xml#magnitudeearthquakenc2021-03-04T06:33:36.782Z10 +earthquake name6 km SW of Manchester, Oklahoma-98.0938333336.9636666770003000horizontal uncertainty182980.15960manualOK2021-02-19T14:42:10.861Z4.2mwr39quakeml:earthquake.usgs.gov/realtime/product/origin/ogs2021dmpg/ok/1613745730861/product.xmlmanualUS2021-02-19T14:42:10.861Zquakeml:earthquake.usgs.gov/realtime/product/origin/ogs2021dmpg/ok/1613745730861/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/ogs2021dmpg/ok/1613745730861/product.xml#magnitudeearthquakeok2021-03-05T02:13:24.659Z +earthquake name77 km W of Port-Vila, Vanuatu167.5901-17.8192559033007400horizontal uncertainty3860.86323.666manualus2021-02-19T03:36:41.040Z6.20.063mww24quakeml:earthquake.usgs.gov/realtime/product/origin/us6000dhxn/us/1613705801040/product.xmlmanualus2021-02-19T03:36:41.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dhxn/us/1613705801040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dhxn/us/1613705801040/product.xml#magnitudeearthquakeus2021-03-04T11:07:03.880Z +earthquake name72 km ENE of Namie, Japan141.749437.74534994035007000horizontal uncertainty1441.12333.073manualus2021-02-14T22:04:22.040Z7.10.036mww74quakeml:earthquake.usgs.gov/realtime/product/origin/us6000dher/us/1613340262040/product.xmlmanualus2021-02-14T22:04:22.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dher/us/1613340262040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dher/us/1613340262040/product.xml#magnitudeearthquakeus2021-03-05T13:32:14.760Z +earthquake name37 km W of Murghob, Tajikistan73.54538.13149895012005400horizontal uncertainty2980.91161.915manualus2021-02-18T17:53:33.040Z5.90.053mww34quakeml:earthquake.usgs.gov/realtime/product/origin/us6000dh48/us/1613670813040/product.xmlmanualus2021-02-18T17:53:33.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dh48/us/1613670813040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dh48/us/1613670813040/product.xml#magnitudeearthquakeus2021-03-04T10:24:38.562Z +earthquake namesoutheast of the Loyalty Islands171.657-23.05081000018007800horizontal uncertainty2700.42157.988manualus2021-03-08T07:54:50.040Z7.70.042mww54quakeml:earthquake.usgs.gov/realtime/product/origin/us6000dg77/us/1615190090040/product.xmlmanualus2021-03-08T07:54:50.040Zquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dg77/us/1615190090040/product.xmlquakeml:earthquake.usgs.gov/realtime/product/origin/us6000dg77/us/1615190090040/product.xml#magnitudeearthquakeus2021-03-08T08:07:24.427Z +2021-03-11T11:55:37.000Z + \ No newline at end of file From c4a126073cbae5a4d78d42bfd85d9450f55c40e5 Mon Sep 17 00:00:00 2001 From: Josh Keegan Date: Tue, 16 Mar 2021 20:48:19 +0000 Subject: [PATCH 304/761] Teamspeak input plugin query clients (#8950) * Add query_clients_online to the teamspeak input plugin * Update readme for teamspeak input plugin following introduction of new query_clients_online field --- plugins/inputs/teamspeak/README.md | 3 ++- plugins/inputs/teamspeak/teamspeak.go | 1 + plugins/inputs/teamspeak/teamspeak_test.go | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/teamspeak/README.md b/plugins/inputs/teamspeak/README.md index 4767bb7e35171..ef3f0d8d9377b 100644 --- a/plugins/inputs/teamspeak/README.md +++ b/plugins/inputs/teamspeak/README.md @@ -31,6 +31,7 @@ the [Teamspeak 3 ServerQuery Manual](http://media.teamspeak.com/ts3_literature/T - packets_received_total - bytes_sent_total - bytes_received_total + - query_clients_online ### Tags: @@ -41,5 +42,5 @@ the [Teamspeak 3 ServerQuery Manual](http://media.teamspeak.com/ts3_literature/T ### Example output: ``` -teamspeak,virtual_server=1,name=LeopoldsServer,host=vm01 bytes_received_total=29638202639i,uptime=13567846i,total_ping=26.89,total_packet_loss=0,packets_sent_total=415821252i,packets_received_total=237069900i,bytes_sent_total=55309568252i,clients_online=11i 1507406561000000000 +teamspeak,virtual_server=1,name=LeopoldsServer,host=vm01 bytes_received_total=29638202639i,uptime=13567846i,total_ping=26.89,total_packet_loss=0,packets_sent_total=415821252i,packets_received_total=237069900i,bytes_sent_total=55309568252i,clients_online=11i,query_clients_online=1i 1507406561000000000 ``` \ No newline at end of file diff --git a/plugins/inputs/teamspeak/teamspeak.go b/plugins/inputs/teamspeak/teamspeak.go index 91fdf1135d742..ed565f086fa78 100644 --- a/plugins/inputs/teamspeak/teamspeak.go +++ b/plugins/inputs/teamspeak/teamspeak.go @@ -83,6 +83,7 @@ func (ts *Teamspeak) Gather(acc telegraf.Accumulator) error { "packets_received_total": sc.PacketsReceivedTotal, "bytes_sent_total": sc.BytesSentTotal, "bytes_received_total": sc.BytesReceivedTotal, + "query_clients_online": sm.QueryClientsOnline, } acc.AddFields("teamspeak", fields, tags) diff --git a/plugins/inputs/teamspeak/teamspeak_test.go b/plugins/inputs/teamspeak/teamspeak_test.go index b66948f289f3d..5faa5d795fe97 100644 --- a/plugins/inputs/teamspeak/teamspeak_test.go +++ b/plugins/inputs/teamspeak/teamspeak_test.go @@ -51,6 +51,7 @@ func TestGather(t *testing.T) { "packets_received_total": uint64(370), "bytes_sent_total": uint64(28058), "bytes_received_total": uint64(17468), + "query_clients_online": int(1), } acc.AssertContainsFields(t, "teamspeak", fields) From 8a47d6f10458f034b9714e5a7c6e0c74b28b5443 Mon Sep 17 00:00:00 2001 From: Denis Pershin <48222861+denispershin@users.noreply.github.com> Date: Tue, 16 Mar 2021 23:54:57 +0300 Subject: [PATCH 305/761] Add top stat info to mongodb plugin (#8861) * Add top stat info * fixes after review * fix README.md --- plugins/inputs/mongodb/README.md | 28 +++++ plugins/inputs/mongodb/mongodb.go | 8 +- plugins/inputs/mongodb/mongodb_data.go | 108 +++++++++++++----- plugins/inputs/mongodb/mongodb_data_test.go | 68 +++++++++-- plugins/inputs/mongodb/mongodb_server.go | 28 ++++- plugins/inputs/mongodb/mongodb_server_test.go | 2 +- plugins/inputs/mongodb/mongostat.go | 72 +++++++++++- 7 files changed, 268 insertions(+), 46 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index cce93dc07376a..ddcb1971f9667 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -21,6 +21,10 @@ ## When true, collect per collection stats # gather_col_stats = false + + ## When true, collect usage statistics for each collection + ## (insert, update, queries, remove, getmore, commands etc...). + # gather_top_stat = false ## List of db where collections stats are collected ## If empty, all db are concerned @@ -263,6 +267,29 @@ by running Telegraf with the `--debug` argument. - available (integer) - created (integer) - refreshing (integer) + +- mongodb_top_stats + - tags: + - collection + - fields: + - total_time (integer) + - total_count (integer) + - read_lock_time (integer) + - read_lock_count (integer) + - write_lock_time (integer) + - write_lock_count (integer) + - queries_time (integer) + - queries_count (integer) + - get_more_time (integer) + - get_more_count (integer) + - insert_time (integer) + - insert_count (integer) + - update_time (integer) + - update_count (integer) + - remove_time (integer) + - remove_count (integer) + - commands_time (integer) + - commands_count (integer) ### Example Output: ``` @@ -272,4 +299,5 @@ mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collect mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000 mongodb_col_stats,collection=foo,db_name=local,hostname=127.0.0.1:27017 size=375005928i,avg_obj_size=5494,type="col_stat",storage_size=249307136i,total_index_size=2138112i,ok=1i,count=68251i 1547159491000000000 mongodb_shard_stats,hostname=127.0.0.1:27017,in_use=3i,available=3i,created=4i,refreshing=0i 1522799074000000000 +mongodb_top_stats,collection=foo,total_time=1471,total_count=158,read_lock_time=49614,read_lock_count=657,write_lock_time=49125456,write_lock_count=9841,queries_time=174,queries_count=495,get_more_time=498,get_more_count=46,insert_time=2651,insert_count=1265,update_time=0,update_count=0,remove_time=0,remove_count=0,commands_time=498611,commands_count=4615 ``` diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 77ea2744b300f..355c12caffef6 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -23,6 +23,7 @@ type MongoDB struct { GatherClusterStatus bool GatherPerdbStats bool GatherColStats bool + GatherTopStat bool ColStatsDbs []string tlsint.ClientConfig @@ -53,6 +54,10 @@ var sampleConfig = ` ## When true, collect per collection stats # gather_col_stats = false + ## When true, collect usage statistics for each collection + ## (insert, update, queries, remove, getmore, commands etc...). + # gather_top_stat = false + ## List of db where collections stats are collected ## If empty, all db are concerned # col_stats_dbs = ["local"] @@ -183,7 +188,7 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { } server.Session = sess } - return server.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.ColStatsDbs) + return server.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.GatherTopStat, m.ColStatsDbs) } func init() { @@ -193,6 +198,7 @@ func init() { GatherClusterStatus: true, GatherPerdbStats: false, GatherColStats: false, + GatherTopStat: false, ColStatsDbs: []string{"local"}, } }) diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 6a2c0a86ebd12..e26c0e45231eb 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -15,6 +15,7 @@ type MongodbData struct { DbData []DbData ColData []ColData ShardHostData []DbData + TopStatsData []DbData } type DbData struct { @@ -37,7 +38,7 @@ func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData { } } -var DefaultStats = map[string]string{ +var defaultStats = map[string]string{ "uptime_ns": "UptimeNanos", "inserts": "InsertCnt", "inserts_per_sec": "Insert", @@ -94,7 +95,7 @@ var DefaultStats = map[string]string{ "total_docs_scanned": "TotalObjectsScanned", } -var DefaultAssertsStats = map[string]string{ +var defaultAssertsStats = map[string]string{ "assert_regular": "Regular", "assert_warning": "Warning", "assert_msg": "Msg", @@ -102,7 +103,7 @@ var DefaultAssertsStats = map[string]string{ "assert_rollovers": "Rollovers", } -var DefaultCommandsStats = map[string]string{ +var defaultCommandsStats = map[string]string{ "aggregate_command_total": "AggregateCommandTotal", "aggregate_command_failed": "AggregateCommandFailed", "count_command_total": "CountCommandTotal", @@ -123,7 +124,7 @@ var DefaultCommandsStats = map[string]string{ "update_command_failed": "UpdateCommandFailed", } -var DefaultLatencyStats = map[string]string{ +var defaultLatencyStats = map[string]string{ "latency_writes_count": "WriteOpsCnt", "latency_writes": "WriteLatency", "latency_reads_count": "ReadOpsCnt", @@ -132,7 +133,7 @@ var DefaultLatencyStats = map[string]string{ "latency_commands": "CommandLatency", } -var DefaultReplStats = map[string]string{ +var defaultReplStats = map[string]string{ "repl_inserts": "InsertRCnt", "repl_inserts_per_sec": "InsertR", "repl_queries": "QueryRCnt", @@ -164,37 +165,37 @@ var DefaultReplStats = map[string]string{ "repl_executor_unsignaled_events": "ReplExecutorUnsignaledEvents", } -var DefaultClusterStats = map[string]string{ +var defaultClusterStats = map[string]string{ "jumbo_chunks": "JumboChunksCount", } -var DefaultShardStats = map[string]string{ +var defaultShardStats = map[string]string{ "total_in_use": "TotalInUse", "total_available": "TotalAvailable", "total_created": "TotalCreated", "total_refreshing": "TotalRefreshing", } -var ShardHostStats = map[string]string{ +var shardHostStats = map[string]string{ "in_use": "InUse", "available": "Available", "created": "Created", "refreshing": "Refreshing", } -var MmapStats = map[string]string{ +var mmapStats = map[string]string{ "mapped_megabytes": "Mapped", "non-mapped_megabytes": "NonMapped", "page_faults": "FaultsCnt", "page_faults_per_sec": "Faults", } -var WiredTigerStats = map[string]string{ +var wiredTigerStats = map[string]string{ "percent_cache_dirty": "CacheDirtyPercent", "percent_cache_used": "CacheUsedPercent", } -var WiredTigerExtStats = map[string]string{ +var wiredTigerExtStats = map[string]string{ "wtcache_tracked_dirty_bytes": "TrackedDirtyBytes", "wtcache_current_bytes": "CurrentCachedBytes", "wtcache_max_bytes_configured": "MaxBytesConfigured", @@ -215,7 +216,7 @@ var WiredTigerExtStats = map[string]string{ "wtcache_unmodified_pages_evicted": "UnmodifiedPagesEvicted", } -var DefaultTCMallocStats = map[string]string{ +var defaultTCMallocStats = map[string]string{ "tcmalloc_current_allocated_bytes": "TCMallocCurrentAllocatedBytes", "tcmalloc_heap_size": "TCMallocHeapSize", "tcmalloc_central_cache_free_bytes": "TCMallocCentralCacheFreeBytes", @@ -237,13 +238,13 @@ var DefaultTCMallocStats = map[string]string{ "tcmalloc_pageheap_total_reserve_bytes": "TCMallocPageheapTotalReserveBytes", } -var DefaultStorageStats = map[string]string{ +var defaultStorageStats = map[string]string{ "storage_freelist_search_bucket_exhausted": "StorageFreelistSearchBucketExhausted", "storage_freelist_search_requests": "StorageFreelistSearchRequests", "storage_freelist_search_scanned": "StorageFreelistSearchScanned", } -var DbDataStats = map[string]string{ +var dbDataStats = map[string]string{ "collections": "Collections", "objects": "Objects", "avg_obj_size": "AvgObjSize", @@ -255,7 +256,7 @@ var DbDataStats = map[string]string{ "ok": "Ok", } -var ColDataStats = map[string]string{ +var colDataStats = map[string]string{ "count": "Count", "size": "Size", "avg_obj_size": "AvgObjSize", @@ -264,6 +265,27 @@ var ColDataStats = map[string]string{ "ok": "Ok", } +var topDataStats = map[string]string{ + "total_time": "TotalTime", + "total_count": "TotalCount", + "read_lock_time": "ReadLockTime", + "read_lock_count": "ReadLockCount", + "write_lock_time": "WriteLockTime", + "write_lock_count": "WriteLockCount", + "queries_time": "QueriesTime", + "queries_count": "QueriesCount", + "get_more_time": "GetMoreTime", + "get_more_count": "GetMoreCount", + "insert_time": "InsertTime", + "insert_count": "InsertCount", + "update_time": "UpdateTime", + "update_count": "UpdateCount", + "remove_time": "RemoveTime", + "remove_count": "RemoveCount", + "commands_time": "CommandsTime", + "commands_count": "CommandsCount", +} + func (d *MongodbData) AddDbStats() { for _, dbstat := range d.StatLine.DbStatsLines { dbStatLine := reflect.ValueOf(&dbstat).Elem() @@ -272,7 +294,7 @@ func (d *MongodbData) AddDbStats() { Fields: make(map[string]interface{}), } newDbData.Fields["type"] = "db_stat" - for key, value := range DbDataStats { + for key, value := range dbDataStats { val := dbStatLine.FieldByName(value).Interface() newDbData.Fields[key] = val } @@ -289,7 +311,7 @@ func (d *MongodbData) AddColStats() { Fields: make(map[string]interface{}), } newColData.Fields["type"] = "col_stat" - for key, value := range ColDataStats { + for key, value := range colDataStats { val := colStatLine.FieldByName(value).Interface() newColData.Fields[key] = val } @@ -305,7 +327,7 @@ func (d *MongodbData) AddShardHostStats() { Fields: make(map[string]interface{}), } newDbData.Fields["type"] = "shard_host_stat" - for k, v := range ShardHostStats { + for k, v := range shardHostStats { val := hostStatLine.FieldByName(v).Interface() newDbData.Fields[k] = val } @@ -313,16 +335,32 @@ func (d *MongodbData) AddShardHostStats() { } } +func (d *MongodbData) AddTopStats() { + for _, topStat := range d.StatLine.TopStatLines { + topStatLine := reflect.ValueOf(&topStat).Elem() + newTopStatData := &DbData{ + Name: topStat.CollectionName, + Fields: make(map[string]interface{}), + } + newTopStatData.Fields["type"] = "top_stat" + for key, value := range topDataStats { + val := topStatLine.FieldByName(value).Interface() + newTopStatData.Fields[key] = val + } + d.TopStatsData = append(d.TopStatsData, *newTopStatData) + } +} + func (d *MongodbData) AddDefaultStats() { statLine := reflect.ValueOf(d.StatLine).Elem() - d.addStat(statLine, DefaultStats) + d.addStat(statLine, defaultStats) if d.StatLine.NodeType != "" { - d.addStat(statLine, DefaultReplStats) + d.addStat(statLine, defaultReplStats) d.Tags["node_type"] = d.StatLine.NodeType } if d.StatLine.ReadLatency > 0 { - d.addStat(statLine, DefaultLatencyStats) + d.addStat(statLine, defaultLatencyStats) } if d.StatLine.ReplSetName != "" { @@ -337,23 +375,23 @@ func (d *MongodbData) AddDefaultStats() { d.add("version", d.StatLine.Version) } - d.addStat(statLine, DefaultAssertsStats) - d.addStat(statLine, DefaultClusterStats) - d.addStat(statLine, DefaultCommandsStats) - d.addStat(statLine, DefaultShardStats) - d.addStat(statLine, DefaultStorageStats) - d.addStat(statLine, DefaultTCMallocStats) + d.addStat(statLine, defaultAssertsStats) + d.addStat(statLine, defaultClusterStats) + d.addStat(statLine, defaultCommandsStats) + d.addStat(statLine, defaultShardStats) + d.addStat(statLine, defaultStorageStats) + d.addStat(statLine, defaultTCMallocStats) if d.StatLine.StorageEngine == "mmapv1" || d.StatLine.StorageEngine == "rocksdb" { - d.addStat(statLine, MmapStats) + d.addStat(statLine, mmapStats) } else if d.StatLine.StorageEngine == "wiredTiger" { - for key, value := range WiredTigerStats { + for key, value := range wiredTigerStats { val := statLine.FieldByName(value).Interface() percentVal := fmt.Sprintf("%.1f", val.(float64)*100) floatVal, _ := strconv.ParseFloat(percentVal, 64) d.add(key, floatVal) } - d.addStat(statLine, WiredTigerExtStats) + d.addStat(statLine, wiredTigerExtStats) d.add("page_faults", d.StatLine.FaultsCnt) } } @@ -409,4 +447,14 @@ func (d *MongodbData) flush(acc telegraf.Accumulator) { ) host.Fields = make(map[string]interface{}) } + for _, col := range d.TopStatsData { + d.Tags["collection"] = col.Name + acc.AddFields( + "mongodb_top_stats", + col.Fields, + d.Tags, + d.StatLine.Time, + ) + col.Fields = make(map[string]interface{}) + } } diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 4a1730211b594..378268916054d 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -64,7 +64,7 @@ func TestAddNonReplStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultStats { + for key := range defaultStats { assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } } @@ -85,7 +85,7 @@ func TestAddReplStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range MmapStats { + for key := range mmapStats { assert.True(t, acc.HasInt64Field("mongodb", key), key) } } @@ -119,11 +119,11 @@ func TestAddWiredTigerStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range WiredTigerStats { + for key := range wiredTigerStats { assert.True(t, acc.HasFloatField("mongodb", key), key) } - for key := range WiredTigerExtStats { + for key := range wiredTigerExtStats { assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } @@ -146,7 +146,7 @@ func TestAddShardStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultShardStats { + for key := range defaultShardStats { assert.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -169,7 +169,7 @@ func TestAddLatencyStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultLatencyStats { + for key := range defaultLatencyStats { assert.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -191,7 +191,7 @@ func TestAddAssertsStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultAssertsStats { + for key := range defaultAssertsStats { assert.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -226,7 +226,7 @@ func TestAddCommandsStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultCommandsStats { + for key := range defaultCommandsStats { assert.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -262,7 +262,7 @@ func TestAddTCMallocStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultTCMallocStats { + for key := range defaultTCMallocStats { assert.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -282,7 +282,7 @@ func TestAddStorageStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultStorageStats { + for key := range defaultStorageStats { assert.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -312,7 +312,7 @@ func TestAddShardHostStats(t *testing.T) { var hostsFound []string for host := range hostStatLines { - for key := range ShardHostStats { + for key := range shardHostStats { assert.True(t, acc.HasInt64Field("mongodb_shard_stats", key)) } @@ -485,3 +485,49 @@ func TestStateTag(t *testing.T) { } acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags) } + +func TestAddTopStats(t *testing.T) { + collections := []string{"collectionOne", "collectionTwo"} + var topStatLines []TopStatLine + for _, collection := range collections { + topStatLine := TopStatLine{ + CollectionName: collection, + TotalTime: 0, + TotalCount: 0, + ReadLockTime: 0, + ReadLockCount: 0, + WriteLockTime: 0, + WriteLockCount: 0, + QueriesTime: 0, + QueriesCount: 0, + GetMoreTime: 0, + GetMoreCount: 0, + InsertTime: 0, + InsertCount: 0, + UpdateTime: 0, + UpdateCount: 0, + RemoveTime: 0, + RemoveCount: 0, + CommandsTime: 0, + CommandsCount: 0, + } + topStatLines = append(topStatLines, topStatLine) + } + + d := NewMongodbData( + &StatLine{ + TopStatLines: topStatLines, + }, + tags, + ) + + var acc testutil.Accumulator + d.AddTopStats() + d.flush(&acc) + + for range topStatLines { + for key := range topDataStats { + assert.True(t, acc.HasInt64Field("mongodb_top_stats", key)) + } + } +} diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 9553a578c04da..e362a0bd7f008 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -73,6 +73,20 @@ func (s *Server) gatherReplSetStatus() (*ReplSetStatus, error) { return replSetStatus, nil } +func (s *Server) gatherTopStatData() (*TopStats, error) { + topStats := &TopStats{} + err := s.Session.DB("admin").Run(bson.D{ + { + Name: "top", + Value: 1, + }, + }, topStats) + if err != nil { + return nil, err + } + return topStats, nil +} + func (s *Server) gatherClusterStatus() (*ClusterStatus, error) { chunkCount, err := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count() if err != nil { @@ -192,7 +206,7 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) return results, nil } -func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, gatherDbStats bool, gatherColStats bool, colStatsDbs []string) error { +func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, gatherDbStats bool, gatherColStats bool, gatherTopStat bool, colStatsDbs []string) error { s.Session.SetMode(mgo.Eventual, true) s.Session.SetSocketTimeout(0) @@ -257,6 +271,16 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, } } + topStatData := &TopStats{} + if gatherTopStat { + topStats, err := s.gatherTopStatData() + if err != nil { + s.Log.Debugf("Unable to gather top stat data: %s", err.Error()) + return err + } + topStatData = topStats + } + result := &MongoStatus{ ServerStatus: serverStatus, ReplSetStatus: replSetStatus, @@ -265,6 +289,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, ColStats: collectionStats, ShardStats: shardStats, OplogStats: oplogStats, + TopStats: topStatData, } result.SampleTime = time.Now() @@ -282,6 +307,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, data.AddDbStats() data.AddColStats() data.AddShardHostStats() + data.AddTopStats() data.flush(acc) } diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index 91a3c0709f0d4..463d7af1b1f65 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -35,7 +35,7 @@ func TestAddDefaultStats(t *testing.T) { err = server.gatherData(&acc, false) require.NoError(t, err) - for key := range DefaultStats { + for key := range defaultStats { assert.True(t, acc.HasInt64Field("mongodb", key)) } } diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 6210b782e7884..3cfa9e9747bc9 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -37,6 +37,7 @@ type MongoStatus struct { ColStats *ColStats ShardStats *ShardStats OplogStats *OplogStats + TopStats *TopStats } type ServerStatus struct { @@ -169,6 +170,31 @@ type ShardHostStatsData struct { Refreshing int64 `bson:"refreshing"` } +type TopStats struct { + Totals map[string]TopStatCollections `bson:"totals"` +} + +type TopStatCollections struct { + TSCollection TopStatCollection `bson:",inline"` +} + +type TopStatCollection struct { + Total TopStatCollectionData `bson:"total"` + ReadLock TopStatCollectionData `bson:"readLock"` + WriteLock TopStatCollectionData `bson:"writeLock"` + Queries TopStatCollectionData `bson:"queries"` + GetMore TopStatCollectionData `bson:"getmore"` + Insert TopStatCollectionData `bson:"insert"` + Update TopStatCollectionData `bson:"update"` + Remove TopStatCollectionData `bson:"remove"` + Commands TopStatCollectionData `bson:"commands"` +} + +type TopStatCollectionData struct { + Time int64 `bson:"time"` + Count int64 `bson:"count"` +} + type ConcurrentTransactions struct { Write ConcurrentTransStats `bson:"write"` Read ConcurrentTransStats `bson:"read"` @@ -768,6 +794,8 @@ type StatLine struct { // Shard Hosts stats field ShardHostStatsLines map[string]ShardHostStatLine + TopStatLines []TopStatLine + // TCMalloc stats field TCMallocCurrentAllocatedBytes int64 TCMallocHeapSize int64 @@ -825,6 +853,19 @@ type ShardHostStatLine struct { Refreshing int64 } +type TopStatLine struct { + CollectionName string + TotalTime, TotalCount int64 + ReadLockTime, ReadLockCount int64 + WriteLockTime, WriteLockCount int64 + QueriesTime, QueriesCount int64 + GetMoreTime, GetMoreCount int64 + InsertTime, InsertCount int64 + UpdateTime, UpdateCount int64 + RemoveTime, RemoveCount int64 + CommandsTime, CommandsCount int64 +} + func parseLocks(stat ServerStatus) map[string]LockUsage { returnVal := map[string]LockUsage{} for namespace, lockInfo := range stat.Locks { @@ -1101,7 +1142,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.Time = newMongo.SampleTime returnVal.IsMongos = - (newStat.ShardCursorType != nil || strings.HasPrefix(newStat.Process, MongosProcess)) + newStat.ShardCursorType != nil || strings.HasPrefix(newStat.Process, MongosProcess) // BEGIN code modification if oldStat.Mem.Supported.(bool) { @@ -1209,7 +1250,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.GlobalLock != nil { - hasWT := (newStat.WiredTiger != nil && oldStat.WiredTiger != nil) + hasWT := newStat.WiredTiger != nil && oldStat.WiredTiger != nil //If we have wiredtiger stats, use those instead if newStat.GlobalLock.CurrentQueue != nil { if hasWT { @@ -1364,5 +1405,32 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } } + if newMongo.TopStats != nil { + for collection, data := range newMongo.TopStats.Totals { + topStatDataLine := &TopStatLine{ + CollectionName: collection, + TotalTime: data.TSCollection.Total.Time, + TotalCount: data.TSCollection.Total.Count, + ReadLockTime: data.TSCollection.ReadLock.Time, + ReadLockCount: data.TSCollection.ReadLock.Count, + WriteLockTime: data.TSCollection.WriteLock.Time, + WriteLockCount: data.TSCollection.WriteLock.Count, + QueriesTime: data.TSCollection.Queries.Time, + QueriesCount: data.TSCollection.Queries.Count, + GetMoreTime: data.TSCollection.GetMore.Time, + GetMoreCount: data.TSCollection.GetMore.Count, + InsertTime: data.TSCollection.Insert.Time, + InsertCount: data.TSCollection.Insert.Count, + UpdateTime: data.TSCollection.Update.Time, + UpdateCount: data.TSCollection.Update.Count, + RemoveTime: data.TSCollection.Remove.Time, + RemoveCount: data.TSCollection.Remove.Count, + CommandsTime: data.TSCollection.Commands.Time, + CommandsCount: data.TSCollection.Commands.Count, + } + returnVal.TopStatLines = append(returnVal.TopStatLines, *topStatDataLine) + } + } + return returnVal } From f5552944d153a01d8c54be838e28c1e310371025 Mon Sep 17 00:00:00 2001 From: Shangxin Du Date: Tue, 16 Mar 2021 14:49:07 -0700 Subject: [PATCH 306/761] cisco_telemetry_mdt enhancement (#8661) --- plugins/inputs/cisco_telemetry_mdt/README.md | 64 +- .../cisco_telemetry_mdt.go | 159 +++- .../cisco_telemetry_mdt_test.go | 161 +++- .../cisco_telemetry_util.go | 892 ++++++++++++++++++ 4 files changed, 1269 insertions(+), 7 deletions(-) create mode 100644 plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go diff --git a/plugins/inputs/cisco_telemetry_mdt/README.md b/plugins/inputs/cisco_telemetry_mdt/README.md index 9c4eb3645d491..d15f122081d05 100644 --- a/plugins/inputs/cisco_telemetry_mdt/README.md +++ b/plugins/inputs/cisco_telemetry_mdt/README.md @@ -1,8 +1,8 @@ -# Cisco Model-Driven Telemetry (MDT) Input Plugin +# Cisco model-driven telemetry (MDT) Cisco model-driven telemetry (MDT) is an input plugin that consumes telemetry data from Cisco IOS XR, IOS XE and NX-OS platforms. It supports TCP & GRPC dialout transports. -GRPC-based transport can utilize TLS for authentication and encryption. +RPC-based transport can utilize TLS for authentication and encryption. Telemetry data is expected to be GPB-KV (self-describing-gpb) encoded. The GRPC dialout transport is supported on various IOS XR (64-bit) 6.1.x and later, IOS XE 16.10 and later, as well as NX-OS 7.x and later platforms. @@ -21,6 +21,9 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l ## Address and port to host telemetry listener service_address = ":57000" + ## Grpc Maximum Message Size, default is 4MB, increase the size. + max_msg_size = 20000000 + ## Enable TLS; grpc transport only. # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" @@ -35,6 +38,19 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l ## Define aliases to map telemetry encoding paths to simple measurement names [inputs.cisco_telemetry_mdt.aliases] ifstats = "ietf-interfaces:interfaces-state/interface/statistics" + [inputs.cisco_telemetry_mdt.dmes] +# Global Property Xformation. +# prop1 = "uint64 to int" +# prop2 = "uint64 to string" +# prop3 = "string to uint64" +# prop4 = "string to int64" +# prop5 = "string to float64" +# auto-prop-xfrom = "auto-float-xfrom" #Xform any property which is string, and has float number to type float64 +# Per Path property xformation, Name is telemetry configuration under sensor-group, path configuration "WORD Distinguished Name" +# Per Path configuration is better as it avoid property collision issue of types. +# dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}' +# dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}' +# dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' ``` ### Example Output: @@ -42,3 +58,47 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet2,source=csr1kv,subscription=101 in-unicast-pkts=27i,in-multicast-pkts=0i,discontinuity-time="2019-05-23T07:40:23.000362+00:00",in-octets=5233i,in-errors=0i,out-multicast-pkts=0i,out-discards=0i,in-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,out-unicast-pkts=0i,out-broadcast-pkts=0i,out-octets=0i,out-errors=0i 1559150462624000000 ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet1,source=csr1kv,subscription=101 in-octets=3394770806i,in-broadcast-pkts=0i,in-multicast-pkts=0i,out-broadcast-pkts=0i,in-unknown-protos=0i,out-octets=350212i,in-unicast-pkts=9477273i,in-discards=0i,out-unicast-pkts=2726i,out-discards=0i,discontinuity-time="2019-05-23T07:40:23.000363+00:00",in-errors=30i,out-multicast-pkts=0i,out-errors=0i 1559150462624000000 ``` + +### NX-OS Configuration Example: +``` +Requirement DATA-SOURCE Configuration +----------------------------------------- +Environment DME path sys/ch query-condition query-target=subtree&target-subtree-class=eqptPsuSlot,eqptFtSlot,eqptSupCSlot,eqptPsu,eqptFt,eqptSensor,eqptLCSlot + DME path sys/ch depth 5 (Another configuration option) +Environment NXAPI show environment power + NXAPI show environment fan + NXAPI show environment temperature +Interface Stats DME path sys/intf query-condition query-target=subtree&target-subtree-class=rmonIfIn,rmonIfOut,rmonIfHCIn,rmonIfHCOut,rmonEtherStats +Interface State DME path sys/intf depth unbounded query-condition query-target=subtree&target-subtree-class=l1PhysIf,pcAggrIf,l3EncRtdIf,l3LbRtdIf,ethpmPhysIf +VPC DME path sys/vpc query-condition query-target=subtree&target-subtree-class=vpcDom,vpcIf +Resources cpu DME path sys/procsys query-condition query-target=subtree&target-subtree-class=procSystem,procSysCore,procSysCpuSummary,procSysCpu,procIdle,procIrq,procKernel,procNice,procSoftirq,procTotal,procUser,procWait,procSysCpuHistory,procSysLoad +Resources Mem DME path sys/procsys/sysmem/sysmemused + path sys/procsys/sysmem/sysmemusage + path sys/procsys/sysmem/sysmemfree +Per Process cpu DME path sys/proc depth unbounded query-condition rsp-foreign-subtree=ephemeral +vxlan(svi stats) DME path sys/bd query-condition query-target=subtree&target-subtree-class=l2VlanStats +BGP DME path sys/bgp query-condition query-target=subtree&target-subtree-class=bgpDom,bgpPeer,bgpPeerAf,bgpDomAf,bgpPeerAfEntry,bgpOperRtctrlL3,bgpOperRttP,bgpOperRttEntry,bgpOperAfCtrl +mac dynamic DME path sys/mac query-condition query-target=subtree&target-subtree-class=l2MacAddressTable +bfd DME path sys/bfd/inst depth unbounded +lldp DME path sys/lldp depth unbounded +urib DME path sys/urib depth unbounded query-condition rsp-foreign-subtree=ephemeral +u6rib DME path sys/u6rib depth unbounded query-condition rsp-foreign-subtree=ephemeral +multicast flow DME path sys/mca/show/flows depth unbounded +multicast stats DME path sys/mca/show/stats depth unbounded +multicast igmp NXAPI show ip igmp groups vrf all +multicast igmp NXAPI show ip igmp interface vrf all +multicast igmp NXAPI show ip igmp snooping +multicast igmp NXAPI show ip igmp snooping groups +multicast igmp NXAPI show ip igmp snooping groups detail +multicast igmp NXAPI show ip igmp snooping groups summary +multicast igmp NXAPI show ip igmp snooping mrouter +multicast igmp NXAPI show ip igmp snooping statistics +multicast pim NXAPI show ip pim interface vrf all +multicast pim NXAPI show ip pim neighbor vrf all +multicast pim NXAPI show ip pim route vrf all +multicast pim NXAPI show ip pim rp vrf all +multicast pim NXAPI show ip pim statistics vrf all +multicast pim NXAPI show ip pim vrf all + + +``` diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index db34ba94d5f0a..e0cbb87d4371c 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -3,6 +3,7 @@ package cisco_telemetry_mdt import ( "bytes" "encoding/binary" + "encoding/json" "fmt" "io" "net" @@ -37,6 +38,7 @@ type CiscoTelemetryMDT struct { ServiceAddress string `toml:"service_address"` MaxMsgSize int `toml:"max_msg_size"` Aliases map[string]string `toml:"aliases"` + Dmes map[string]string `toml:"dmes"` EmbeddedTags []string `toml:"embedded_tags"` Log telegraf.Logger @@ -50,13 +52,24 @@ type CiscoTelemetryMDT struct { // Internal state aliases map[string]string + dmesFuncs map[string]string warned map[string]struct{} extraTags map[string]map[string]struct{} + nxpathMap map[string]map[string]string //per path map + propMap map[string]func(field *telemetry.TelemetryField, value interface{}) interface{} mutex sync.Mutex acc telegraf.Accumulator wg sync.WaitGroup } +type NxPayloadXfromStructure struct { + Name string `json:"Name"` + Prop []struct { + Key string `json:"Key"` + Value string `json:"Value"` + } `json:"prop"` +} + // Start the Cisco MDT service func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { var err error @@ -66,12 +79,55 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { return err } + c.propMap = make(map[string]func(field *telemetry.TelemetryField, value interface{}) interface{}, 100) + c.propMap["test"] = nxosValueXformUint64Toint64 + c.propMap["asn"] = nxosValueXformUint64ToString //uint64 to string. + c.propMap["subscriptionId"] = nxosValueXformUint64ToString //uint64 to string. + c.propMap["operState"] = nxosValueXformUint64ToString //uint64 to string. + // Invert aliases list c.warned = make(map[string]struct{}) c.aliases = make(map[string]string, len(c.Aliases)) for alias, path := range c.Aliases { c.aliases[path] = alias } + c.initDb() + + c.dmesFuncs = make(map[string]string, len(c.Dmes)) + for dme, path := range c.Dmes { + c.dmesFuncs[path] = dme + switch path { + case "uint64 to int": + c.propMap[dme] = nxosValueXformUint64Toint64 + case "uint64 to string": + c.propMap[dme] = nxosValueXformUint64ToString + case "string to float64": + c.propMap[dme] = nxosValueXformStringTofloat + case "string to uint64": + c.propMap[dme] = nxosValueXformStringToUint64 + case "string to int64": + c.propMap[dme] = nxosValueXformStringToInt64 + case "auto-float-xfrom": + c.propMap[dme] = nxosValueAutoXformFloatProp + default: + if !strings.HasPrefix(dme, "dnpath") { // not path based property map + continue + } + + var jsStruct NxPayloadXfromStructure + err := json.Unmarshal([]byte(path), &jsStruct) + if err != nil { + continue + } + + // Build 2 level Hash nxpathMap Key = jsStruct.Name, Value = map of jsStruct.Prop + // It will override the default of code if same path is provided in configuration. + c.nxpathMap[jsStruct.Name] = make(map[string]string, len(jsStruct.Prop)) + for _, prop := range jsStruct.Prop { + c.nxpathMap[jsStruct.Name][prop.Key] = prop.Value + } + } + } // Fill extra tags c.extraTags = make(map[string]map[string]struct{}) @@ -296,7 +352,9 @@ func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) { // Parse keys tags = make(map[string]string, len(keys.Fields)+3) tags["source"] = msg.GetNodeIdStr() - tags["subscription"] = msg.GetSubscriptionIdStr() + if msgID := msg.GetSubscriptionIdStr(); msgID != "" { + tags["subscription"] = msgID + } tags["path"] = msg.GetEncodingPath() for _, subfield := range keys.Fields { @@ -391,9 +449,72 @@ func (c *CiscoTelemetryMDT) parseKeyField(tags map[string]string, field *telemet } } +func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string, path string, tags map[string]string, timestamp time.Time) { + // RIB + measurement := path + for _, subfield := range field.Fields { + //For Every table fill the keys which are vrfName, address and masklen + switch subfield.Name { + case "vrfName", "address", "maskLen": + tags[subfield.Name] = decodeTag(subfield) + } + if value := decodeValue(subfield); value != nil { + grouper.Add(measurement, tags, timestamp, subfield.Name, value) + } + if subfield.Name != "nextHop" { + continue + } + //For next hop table fill the keys in the tag - which is address and vrfname + for _, subf := range subfield.Fields { + for _, ff := range subf.Fields { + switch ff.Name { + case "address", "vrfName": + key := "nextHop/" + ff.Name + tags[key] = decodeTag(ff) + } + if value := decodeValue(ff); value != nil { + name := "nextHop/" + ff.Name + grouper.Add(measurement, tags, timestamp, name, value) + } + } + } + } +} + +func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string, path string, tags map[string]string, timestamp time.Time) { + // DME structure: https://developer.cisco.com/site/nxapi-dme-model-reference-api/ + var nxAttributes *telemetry.TelemetryField + isDme := strings.Contains(path, "sys/") + if path == "rib" { + //handle native data path rib + c.parseRib(grouper, field, prefix, path, tags, timestamp) + return + } + if field == nil || !isDme || len(field.Fields) == 0 || len(field.Fields[0].Fields) == 0 || len(field.Fields[0].Fields[0].Fields) == 0 { + return + } + + if field.Fields[0] != nil && field.Fields[0].Fields != nil && field.Fields[0].Fields[0] != nil && field.Fields[0].Fields[0].Fields[0].Name != "attributes" { + return + } + nxAttributes = field.Fields[0].Fields[0].Fields[0].Fields[0] + + for _, subfield := range nxAttributes.Fields { + if subfield.Name == "dn" { + tags["dn"] = decodeTag(subfield) + } else { + c.parseContentField(grouper, subfield, "", path, tags, timestamp) + } + } +} + func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string, path string, tags map[string]string, timestamp time.Time) { name := strings.Replace(field.Name, "-", "_", -1) + + if (name == "modTs" || name == "createTs") && decodeValue(field) == "never" { + return + } if len(name) == 0 { name = prefix } else if len(prefix) > 0 { @@ -416,7 +537,11 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie c.mutex.Unlock() } - grouper.Add(measurement, tags, timestamp, name, value) + if val := c.nxosValueXform(field, value, path); val != nil { + grouper.Add(measurement, tags, timestamp, name, val) + } else { + grouper.Add(measurement, tags, timestamp, name, value) + } return } @@ -430,11 +555,28 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie var nxAttributes, nxChildren, nxRows *telemetry.TelemetryField isNXOS := !strings.ContainsRune(path, ':') // IOS-XR and IOS-XE have a colon in their encoding path, NX-OS does not + isEVENT := isNXOS && strings.Contains(path, "EVENT-LIST") + nxChildren = nil + nxAttributes = nil for _, subfield := range field.Fields { if isNXOS && subfield.Name == "attributes" && len(subfield.Fields) > 0 { nxAttributes = subfield.Fields[0] } else if isNXOS && subfield.Name == "children" && len(subfield.Fields) > 0 { - nxChildren = subfield + if !isEVENT { + nxChildren = subfield + } else { + sub := subfield.Fields + if len(sub) > 0 && sub[0] != nil && sub[0].Fields[0].Name == "subscriptionId" && len(sub[0].Fields) >= 2 { + nxAttributes = sub[0].Fields[1].Fields[0].Fields[0].Fields[0].Fields[0].Fields[0] + } + } + //if nxAttributes == NULL then class based query. + if nxAttributes == nil { + //call function walking over walking list. + for _, sub := range subfield.Fields { + c.parseClassAttributeField(grouper, sub, name, path, tags, timestamp) + } + } } else if isNXOS && strings.HasPrefix(subfield.Name, "ROW_") { nxRows = subfield } else if _, isExtraTag := extraTags[subfield.Name]; !isExtraTag { // Regular telemetry decoding @@ -450,9 +592,16 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie for i, subfield := range row.Fields { if i == 0 { // First subfield contains the index, promote it from value to tag tags[prefix] = decodeTag(subfield) + //We can have subfield so recursively handle it. + if len(row.Fields) == 1 { + tags["row_number"] = strconv.FormatInt(int64(i), 10) + c.parseContentField(grouper, subfield, "", path, tags, timestamp) + } } else { c.parseContentField(grouper, subfield, "", path, tags, timestamp) } + // Nxapi we can't identify keys always from prefix + tags["row_number"] = strconv.FormatInt(int64(i), 10) } delete(tags, prefix) } @@ -531,6 +680,10 @@ const sampleConfig = ` ## Define aliases to map telemetry encoding paths to simple measurement names [inputs.cisco_telemetry_mdt.aliases] ifstats = "ietf-interfaces:interfaces-state/interface/statistics" + ##Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. + [inputs.cisco_telemetry_mdt.dmes] + ModTs = "ignore" + CreateTs = "ignore" ` // SampleConfig of plugin diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index f967044da881b..0a9bde81acaeb 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -311,14 +311,171 @@ func TestHandleNXAPI(t *testing.T) { c.handleTelemetry(data) require.Empty(t, acc.Errors) - tags1 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i1", "source": "hostname", "subscription": "subscription"} + tags1 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i1", "row_number": "0", "source": "hostname", "subscription": "subscription"} fields1 := map[string]interface{}{"value": "foo"} - tags2 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i2", "source": "hostname", "subscription": "subscription"} + tags2 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i2", "row_number": "0", "source": "hostname", "subscription": "subscription"} fields2 := map[string]interface{}{"value": "bar"} acc.AssertContainsTaggedFields(t, "nxapi", fields1, tags1) acc.AssertContainsTaggedFields(t, "nxapi", fields2, tags2) } +func TestHandleNXAPIXformNXAPI(t *testing.T) { + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "dummy", Aliases: map[string]string{"nxapi": "show nxapi"}} + acc := &testutil.Accumulator{} + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) + + telemetry := &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "show processes cpu", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "TABLE_process_cpu", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "ROW_process_cpu", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "index", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i1"}, + }, + { + Name: "value", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, _ := proto.Marshal(telemetry) + + c.handleTelemetry(data) + require.Empty(t, acc.Errors) + + tags1 := map[string]string{"path": "show processes cpu", "foo": "bar", "TABLE_process_cpu": "i1", "row_number": "0", "source": "hostname", "subscription": "subscription"} + fields1 := map[string]interface{}{"value": "foo"} + acc.AssertContainsTaggedFields(t, "show processes cpu", fields1, tags1) +} + +func TestHandleNXXformMulti(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"dme": "sys/lldp"}} + acc := &testutil.Accumulator{} + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) + + telemetry := &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "sys/lldp", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "fooEntity", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "attributes", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "rn", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "some-rn"}, + }, + { + Name: "portIdV", + ValueByType: &telemetry.TelemetryField_Uint32Value{Uint32Value: 12}, + }, + { + Name: "portDesc", + ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 100}, + }, + { + Name: "test", + ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 281474976710655}, + }, + { + Name: "subscriptionId", + ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 2814749767106551}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, _ := proto.Marshal(telemetry) + + c.handleTelemetry(data) + require.Empty(t, acc.Errors) + //validate various transformation scenaarios newly added in the code. + fields := map[string]interface{}{"portIdV": "12", "portDesc": "100", "test": int64(281474976710655), "subscriptionId": "2814749767106551"} + acc.AssertContainsFields(t, "dme", fields) +} + func TestHandleNXDME(t *testing.T) { c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"dme": "sys/dme"}} acc := &testutil.Accumulator{} diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go new file mode 100644 index 0000000000000..60140c030fed2 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go @@ -0,0 +1,892 @@ +package cisco_telemetry_mdt + +import ( + telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" + "strconv" + "strings" +) + +//xform Field to string +func xformValueString(field *telemetry.TelemetryField) string { + var str string + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if len(val.StringValue) > 0 { + return val.StringValue + } + case *telemetry.TelemetryField_Uint32Value: + str = strconv.FormatUint(uint64(val.Uint32Value), 10) + return str + case *telemetry.TelemetryField_Uint64Value: + str = strconv.FormatUint(val.Uint64Value, 10) + return str + case *telemetry.TelemetryField_Sint32Value: + str = strconv.FormatInt(int64(val.Sint32Value), 10) + return str + case *telemetry.TelemetryField_Sint64Value: + str = strconv.FormatInt(val.Sint64Value, 10) + return str + } + return "" +} + +//xform Uint64 to int64 +func nxosValueXformUint64Toint64(field *telemetry.TelemetryField, value interface{}) interface{} { + if field.GetUint64Value() != 0 { + return int64(value.(uint64)) + } + return nil +} + +//xform string to float +func nxosValueXformStringTofloat(field *telemetry.TelemetryField, value interface{}) interface{} { + //convert property to float from string. + vals := field.GetStringValue() + if vals != "" { + if valf, err := strconv.ParseFloat(vals, 64); err == nil { + return valf + } + } + return nil +} + +//xform string to uint64 +func nxosValueXformStringToUint64(field *telemetry.TelemetryField, value interface{}) interface{} { + //string to uint64 + vals := field.GetStringValue() + if vals != "" { + if val64, err := strconv.ParseUint(vals, 10, 64); err == nil { + return val64 + } + } + return nil +} + +//xform string to int64 +func nxosValueXformStringToInt64(field *telemetry.TelemetryField, value interface{}) interface{} { + //string to int64 + vals := field.GetStringValue() + if vals != "" { + if val64, err := strconv.ParseInt(vals, 10, 64); err == nil { + return val64 + } + } + return nil +} + +//auto-xform +func nxosValueAutoXform(field *telemetry.TelemetryField, value interface{}) interface{} { + //check if we want auto xformation + vals := field.GetStringValue() + if vals != "" { + if val64, err := strconv.ParseUint(vals, 10, 64); err == nil { + return val64 + } + if valf, err := strconv.ParseFloat(vals, 64); err == nil { + return valf + } + if val64, err := strconv.ParseInt(vals, 10, 64); err == nil { + return val64 + } + } // switch + return nil +} + +//auto-xform float properties +func nxosValueAutoXformFloatProp(field *telemetry.TelemetryField, value interface{}) interface{} { + //check if we want auto xformation + vals := field.GetStringValue() + if vals != "" { + if valf, err := strconv.ParseFloat(vals, 64); err == nil { + return valf + } + } // switch + return nil +} + +//xform uint64 to string +func nxosValueXformUint64ToString(field *telemetry.TelemetryField, value interface{}) interface{} { + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if len(val.StringValue) > 0 { + return val.StringValue + } + case *telemetry.TelemetryField_Uint64Value: + return strconv.FormatUint(val.Uint64Value, 10) + } + return nil +} + +//Xform value field. +func (c *CiscoTelemetryMDT) nxosValueXform(field *telemetry.TelemetryField, value interface{}, path string) interface{} { + if strings.ContainsRune(path, ':') { + // not NXOS + return nil + } + if _, ok := c.propMap[field.Name]; ok { + return c.propMap[field.Name](field, value) + } + //check if we want auto xformation + if _, ok := c.propMap["auto-prop-xfromi"]; ok { + return c.propMap["auto-prop-xfrom"](field, value) + } + //Now check path based conversion. + //If mapping is found then do the required transformation. + if c.nxpathMap[path] == nil { + return nil + } + switch c.nxpathMap[path][field.Name] { + //Xformation supported is only from String, Uint32 and Uint64 + case "integer": + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if vali, err := strconv.ParseInt(val.StringValue, 10, 32); err == nil { + return vali + } + case *telemetry.TelemetryField_Uint32Value: + vali, ok := value.(uint32) + if ok == true { + return vali + } + case *telemetry.TelemetryField_Uint64Value: + vali, ok := value.(uint64) + if ok == true { + return vali + } + } //switch + return nil + //Xformation supported is only from String + case "float": + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if valf, err := strconv.ParseFloat(val.StringValue, 64); err == nil { + return valf + } + } //switch + return nil + case "string": + return xformValueString(field) + case "int64": + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if vali, err := strconv.ParseInt(val.StringValue, 10, 64); err == nil { + return vali + } + case *telemetry.TelemetryField_Uint64Value: + return int64(value.(uint64)) + } //switch + } //switch + return nil +} + +func (c *CiscoTelemetryMDT) initMemPhys() { + c.nxpathMap["show processes memory physical"] = map[string]string{"processname": "string"} +} + +func (c *CiscoTelemetryMDT) initBgpV4() { + key := "show bgp ipv4 unicast" + c.nxpathMap[key] = make(map[string]string, 1) + c.nxpathMap[key]["aspath"] = "string" +} + +func (c *CiscoTelemetryMDT) initCpu() { + key := "show processes cpu" + c.nxpathMap[key] = make(map[string]string, 5) + c.nxpathMap[key]["kernel_percent"] = "float" + c.nxpathMap[key]["idle_percent"] = "float" + c.nxpathMap[key]["process"] = "string" + c.nxpathMap[key]["user_percent"] = "float" + c.nxpathMap[key]["onesec"] = "float" +} + +func (c *CiscoTelemetryMDT) initResources() { + key := "show system resources" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["cpu_state_user"] = "float" + c.nxpathMap[key]["kernel"] = "float" + c.nxpathMap[key]["current_memory_status"] = "string" + c.nxpathMap[key]["load_avg_15min"] = "float" + c.nxpathMap[key]["idle"] = "float" + c.nxpathMap[key]["load_avg_1min"] = "float" + c.nxpathMap[key]["user"] = "float" + c.nxpathMap[key]["cpu_state_idle"] = "float" + c.nxpathMap[key]["load_avg_5min"] = "float" + c.nxpathMap[key]["cpu_state_kernel"] = "float" +} + +func (c *CiscoTelemetryMDT) initPower() { + key := "show environment power" + c.nxpathMap[key] = make(map[string]string, 100) + c.nxpathMap[key]["reserve_sup"] = "string" + c.nxpathMap[key]["det_volt"] = "string" + c.nxpathMap[key]["heatsink_temp"] = "string" + c.nxpathMap[key]["det_pintot"] = "string" + c.nxpathMap[key]["det_iinb"] = "string" + c.nxpathMap[key]["ps_input_current"] = "string" + c.nxpathMap[key]["modnum"] = "string" + c.nxpathMap[key]["trayfannum"] = "string" + c.nxpathMap[key]["modstatus_3k"] = "string" + c.nxpathMap[key]["fan2rpm"] = "string" + c.nxpathMap[key]["amps_alloced"] = "string" + c.nxpathMap[key]["all_inlets_connected"] = "string" + c.nxpathMap[key]["tot_pow_out_actual_draw"] = "string" + c.nxpathMap[key]["ps_redun_op_mode"] = "string" + c.nxpathMap[key]["curtemp"] = "string" + c.nxpathMap[key]["mod_model"] = "string" + c.nxpathMap[key]["fanmodel"] = "string" + c.nxpathMap[key]["ps_output_current"] = "string" + c.nxpathMap[key]["majthres"] = "string" + c.nxpathMap[key]["input_type"] = "string" + c.nxpathMap[key]["allocated"] = "string" + c.nxpathMap[key]["fanhwver"] = "string" + c.nxpathMap[key]["clkhwver"] = "string" + c.nxpathMap[key]["fannum"] = "string" + c.nxpathMap[key]["watts_requested"] = "string" + c.nxpathMap[key]["cumulative_power"] = "string" + c.nxpathMap[key]["tot_gridB_capacity"] = "string" + c.nxpathMap[key]["pow_used_by_mods"] = "string" + c.nxpathMap[key]["tot_pow_alloc_budgeted"] = "string" + c.nxpathMap[key]["psumod"] = "string" + c.nxpathMap[key]["ps_status_3k"] = "string" + c.nxpathMap[key]["temptype"] = "string" + c.nxpathMap[key]["regval"] = "string" + c.nxpathMap[key]["inlet_temp"] = "string" + c.nxpathMap[key]["det_cord"] = "string" + c.nxpathMap[key]["reserve_fan"] = "string" + c.nxpathMap[key]["det_pina"] = "string" + c.nxpathMap[key]["minthres"] = "string" + c.nxpathMap[key]["actual_draw"] = "string" + c.nxpathMap[key]["sensor"] = "string" + c.nxpathMap[key]["zone"] = "string" + c.nxpathMap[key]["det_iin"] = "string" + c.nxpathMap[key]["det_iout"] = "string" + c.nxpathMap[key]["det_vin"] = "string" + c.nxpathMap[key]["fan1rpm"] = "string" + c.nxpathMap[key]["tot_gridA_capacity"] = "string" + c.nxpathMap[key]["fanperc"] = "string" + c.nxpathMap[key]["det_pout"] = "string" + c.nxpathMap[key]["alarm_str"] = "string" + c.nxpathMap[key]["zonespeed"] = "string" + c.nxpathMap[key]["det_total_cap"] = "string" + c.nxpathMap[key]["reserve_xbar"] = "string" + c.nxpathMap[key]["det_vout"] = "string" + c.nxpathMap[key]["watts_alloced"] = "string" + c.nxpathMap[key]["ps_in_power"] = "string" + c.nxpathMap[key]["tot_pow_input_actual_draw"] = "string" + c.nxpathMap[key]["ps_output_voltage"] = "string" + c.nxpathMap[key]["det_name"] = "string" + c.nxpathMap[key]["tempmod"] = "string" + c.nxpathMap[key]["clockname"] = "string" + c.nxpathMap[key]["fanname"] = "string" + c.nxpathMap[key]["regnumstr"] = "string" + c.nxpathMap[key]["bitnumstr"] = "string" + c.nxpathMap[key]["ps_slot"] = "string" + c.nxpathMap[key]["actual_out"] = "string" + c.nxpathMap[key]["ps_input_voltage"] = "string" + c.nxpathMap[key]["psmodel"] = "string" + c.nxpathMap[key]["speed"] = "string" + c.nxpathMap[key]["clkmodel"] = "string" + c.nxpathMap[key]["ps_redun_mode_3k"] = "string" + c.nxpathMap[key]["tot_pow_capacity"] = "string" + c.nxpathMap[key]["amps"] = "string" + c.nxpathMap[key]["available_pow"] = "string" + c.nxpathMap[key]["reserve_supxbarfan"] = "string" + c.nxpathMap[key]["watts"] = "string" + c.nxpathMap[key]["det_pinb"] = "string" + c.nxpathMap[key]["det_vinb"] = "string" + c.nxpathMap[key]["ps_state"] = "string" + c.nxpathMap[key]["det_sw_alarm"] = "string" + c.nxpathMap[key]["regnum"] = "string" + c.nxpathMap[key]["amps_requested"] = "string" + c.nxpathMap[key]["fanrpm"] = "string" + c.nxpathMap[key]["actual_input"] = "string" + c.nxpathMap[key]["outlet_temp"] = "string" + c.nxpathMap[key]["tot_capa"] = "string" +} + +func (c *CiscoTelemetryMDT) initPtpCorrection() { + key := "show ptp corrections" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["sup-time"] = "string" + c.nxpathMap[key]["correction-val"] = "int64" + c.nxpathMap[key]["ptp-header"] = "string" + c.nxpathMap[key]["intf-name"] = "string" + c.nxpathMap[key]["ptp-end"] = "string" +} + +func (c *CiscoTelemetryMDT) initTrans() { + key := "show interface transceiver details" + c.nxpathMap[key] = make(map[string]string, 100) + c.nxpathMap[key]["uncorrect_ber_alrm_hi"] = "string" + c.nxpathMap[key]["uncorrect_ber_cur_warn_lo"] = "string" + c.nxpathMap[key]["current_warn_lo"] = "float" + c.nxpathMap[key]["pre_fec_ber_max_alrm_hi"] = "string" + c.nxpathMap[key]["serialnum"] = "string" + c.nxpathMap[key]["pre_fec_ber_acc_warn_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_max_warn_lo"] = "string" + c.nxpathMap[key]["laser_temp_warn_hi"] = "float" + c.nxpathMap[key]["type"] = "string" + c.nxpathMap[key]["rx_pwr_0"] = "float" + c.nxpathMap[key]["rx_pwr_warn_hi"] = "float" + c.nxpathMap[key]["uncorrect_ber_warn_hi"] = "string" + c.nxpathMap[key]["qsfp_or_cfp"] = "string" + c.nxpathMap[key]["protocol_type"] = "string" + c.nxpathMap[key]["uncorrect_ber"] = "string" + c.nxpathMap[key]["uncorrect_ber_cur_alrm_hi"] = "string" + c.nxpathMap[key]["tec_current"] = "float" + c.nxpathMap[key]["pre_fec_ber"] = "string" + c.nxpathMap[key]["uncorrect_ber_max_warn_lo"] = "string" + c.nxpathMap[key]["uncorrect_ber_min"] = "string" + c.nxpathMap[key]["current_alrm_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_acc_warn_lo"] = "string" + c.nxpathMap[key]["snr_warn_lo"] = "float" + c.nxpathMap[key]["rev"] = "string" + c.nxpathMap[key]["laser_temp_alrm_lo"] = "float" + c.nxpathMap[key]["current"] = "float" + c.nxpathMap[key]["rx_pwr_1"] = "float" + c.nxpathMap[key]["tec_current_warn_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur_warn_lo"] = "string" + c.nxpathMap[key]["cisco_part_number"] = "string" + c.nxpathMap[key]["uncorrect_ber_acc_warn_hi"] = "string" + c.nxpathMap[key]["temp_warn_hi"] = "float" + c.nxpathMap[key]["laser_freq_warn_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_max_alrm_lo"] = "string" + c.nxpathMap[key]["snr_alrm_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur_alrm_lo"] = "string" + c.nxpathMap[key]["tx_pwr_alrm_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_min_warn_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_min_warn_hi"] = "string" + c.nxpathMap[key]["rx_pwr_alrm_hi"] = "float" + c.nxpathMap[key]["tec_current_warn_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_acc_alrm_hi"] = "string" + c.nxpathMap[key]["rx_pwr_4"] = "float" + c.nxpathMap[key]["uncorrect_ber_cur"] = "string" + c.nxpathMap[key]["pre_fec_ber_alrm_hi"] = "string" + c.nxpathMap[key]["rx_pwr_warn_lo"] = "float" + c.nxpathMap[key]["bit_encoding"] = "string" + c.nxpathMap[key]["pre_fec_ber_acc"] = "string" + c.nxpathMap[key]["sfp"] = "string" + c.nxpathMap[key]["pre_fec_ber_acc_alrm_hi"] = "string" + c.nxpathMap[key]["pre_fec_ber_min"] = "string" + c.nxpathMap[key]["current_warn_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_max_alrm_lo"] = "string" + c.nxpathMap[key]["uncorrect_ber_cur_warn_hi"] = "string" + c.nxpathMap[key]["current_alrm_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_acc_alrm_lo"] = "string" + c.nxpathMap[key]["snr_alrm_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_acc"] = "string" + c.nxpathMap[key]["tx_len"] = "string" + c.nxpathMap[key]["uncorrect_ber_alrm_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_alrm_lo"] = "string" + c.nxpathMap[key]["txcvr_type"] = "string" + c.nxpathMap[key]["tec_current_alrm_lo"] = "float" + c.nxpathMap[key]["volt_alrm_lo"] = "float" + c.nxpathMap[key]["temp_alrm_hi"] = "float" + c.nxpathMap[key]["uncorrect_ber_min_warn_lo"] = "string" + c.nxpathMap[key]["laser_freq"] = "float" + c.nxpathMap[key]["uncorrect_ber_min_warn_hi"] = "string" + c.nxpathMap[key]["uncorrect_ber_cur_alrm_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_max_warn_hi"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["fiber_type_byte0"] = "string" + c.nxpathMap[key]["laser_freq_alrm_lo"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur_warn_hi"] = "string" + c.nxpathMap[key]["partnum"] = "string" + c.nxpathMap[key]["snr"] = "float" + c.nxpathMap[key]["volt_alrm_hi"] = "float" + c.nxpathMap[key]["connector_type"] = "string" + c.nxpathMap[key]["tx_medium"] = "string" + c.nxpathMap[key]["tx_pwr_warn_hi"] = "float" + c.nxpathMap[key]["cisco_vendor_id"] = "string" + c.nxpathMap[key]["cisco_ext_id"] = "string" + c.nxpathMap[key]["uncorrect_ber_max_warn_hi"] = "string" + c.nxpathMap[key]["pre_fec_ber_max"] = "string" + c.nxpathMap[key]["uncorrect_ber_min_alrm_hi"] = "string" + c.nxpathMap[key]["pre_fec_ber_warn_hi"] = "string" + c.nxpathMap[key]["tx_pwr_alrm_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_warn_lo"] = "string" + c.nxpathMap[key]["10gbe_code"] = "string" + c.nxpathMap[key]["cable_type"] = "string" + c.nxpathMap[key]["laser_freq_alrm_hi"] = "float" + c.nxpathMap[key]["rx_pwr_3"] = "float" + c.nxpathMap[key]["rx_pwr"] = "float" + c.nxpathMap[key]["volt_warn_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur_alrm_hi"] = "string" + c.nxpathMap[key]["temperature"] = "float" + c.nxpathMap[key]["voltage"] = "float" + c.nxpathMap[key]["tx_pwr"] = "float" + c.nxpathMap[key]["laser_temp_alrm_hi"] = "float" + c.nxpathMap[key]["tx_speeds"] = "string" + c.nxpathMap[key]["uncorrect_ber_min_alrm_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_min_alrm_hi"] = "string" + c.nxpathMap[key]["ciscoid"] = "string" + c.nxpathMap[key]["tx_pwr_warn_lo"] = "float" + c.nxpathMap[key]["cisco_product_id"] = "string" + c.nxpathMap[key]["info_not_available"] = "string" + c.nxpathMap[key]["laser_temp"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur"] = "string" + c.nxpathMap[key]["fiber_type_byte1"] = "string" + c.nxpathMap[key]["tx_type"] = "string" + c.nxpathMap[key]["pre_fec_ber_min_alrm_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_warn_lo"] = "string" + c.nxpathMap[key]["temp_alrm_lo"] = "float" + c.nxpathMap[key]["volt_warn_lo"] = "float" + c.nxpathMap[key]["rx_pwr_alrm_lo"] = "float" + c.nxpathMap[key]["rx_pwr_2"] = "float" + c.nxpathMap[key]["tec_current_alrm_hi"] = "float" + c.nxpathMap[key]["uncorrect_ber_acc_alrm_lo"] = "string" + c.nxpathMap[key]["uncorrect_ber_max_alrm_hi"] = "string" + c.nxpathMap[key]["temp_warn_lo"] = "float" + c.nxpathMap[key]["snr_warn_hi"] = "float" + c.nxpathMap[key]["laser_temp_warn_lo"] = "float" + c.nxpathMap[key]["pre_fec_ber_acc_warn_hi"] = "string" + c.nxpathMap[key]["laser_freq_warn_hi"] = "float" + c.nxpathMap[key]["uncorrect_ber_max"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmp() { + key := "show ip igmp groups vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["group-type"] = "string" + c.nxpathMap[key]["translate"] = "string" + c.nxpathMap[key]["sourceaddress"] = "string" + c.nxpathMap[key]["vrf-cntxt"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["group-addr"] = "string" + c.nxpathMap[key]["uptime"] = "string" +} + +func (c *CiscoTelemetryMDT) initVrfAll() { + key := "show ip igmp interface vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["if-name"] = "string" + c.nxpathMap[key]["static-group-map"] = "string" + c.nxpathMap[key]["rll"] = "string" + c.nxpathMap[key]["host-proxy"] = "string" + c.nxpathMap[key]["il"] = "string" + c.nxpathMap[key]["join-group-map"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["host-proxy-group-map"] = "string" + c.nxpathMap[key]["next-query"] = "string" + c.nxpathMap[key]["q-ver"] = "string" + c.nxpathMap[key]["if-status"] = "string" + c.nxpathMap[key]["un-solicited"] = "string" + c.nxpathMap[key]["ip-sum"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmpSnoop() { + key := "show ip igmp snooping" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["repsup"] = "string" + c.nxpathMap[key]["omf_enabled"] = "string" + c.nxpathMap[key]["v3repsup"] = "string" + c.nxpathMap[key]["grepsup"] = "string" + c.nxpathMap[key]["lkupmode"] = "string" + c.nxpathMap[key]["description"] = "string" + c.nxpathMap[key]["vlinklocalgrpsup"] = "string" + c.nxpathMap[key]["gv3repsup"] = "string" + c.nxpathMap[key]["reportfloodall"] = "string" + c.nxpathMap[key]["leavegroupaddress"] = "string" + c.nxpathMap[key]["enabled"] = "string" + c.nxpathMap[key]["omf"] = "string" + c.nxpathMap[key]["sq"] = "string" + c.nxpathMap[key]["sqr"] = "string" + c.nxpathMap[key]["eht"] = "string" + c.nxpathMap[key]["fl"] = "string" + c.nxpathMap[key]["reportfloodenable"] = "string" + c.nxpathMap[key]["snoop-on"] = "string" + c.nxpathMap[key]["glinklocalgrpsup"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmpSnoopGroups() { + key := "show ip igmp snooping groups" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["src-uptime"] = "string" + c.nxpathMap[key]["source"] = "string" + c.nxpathMap[key]["dyn-if-name"] = "string" + c.nxpathMap[key]["raddr"] = "string" + c.nxpathMap[key]["old-host"] = "string" + c.nxpathMap[key]["snoop-enabled"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["omf-enabled"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["src-expires"] = "string" + c.nxpathMap[key]["addr"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmpSnoopGroupDetails() { + key := "show ip igmp snooping groups detail" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["src-uptime"] = "string" + c.nxpathMap[key]["source"] = "string" + c.nxpathMap[key]["dyn-if-name"] = "string" + c.nxpathMap[key]["raddr"] = "string" + c.nxpathMap[key]["old-host"] = "string" + c.nxpathMap[key]["snoop-enabled"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["omf-enabled"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["src-expires"] = "string" + c.nxpathMap[key]["addr"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmpSnoopGroupsSumm() { + key := "show ip igmp snooping groups summary" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["src-uptime"] = "string" + c.nxpathMap[key]["source"] = "string" + c.nxpathMap[key]["dyn-if-name"] = "string" + c.nxpathMap[key]["raddr"] = "string" + c.nxpathMap[key]["old-host"] = "string" + c.nxpathMap[key]["snoop-enabled"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["omf-enabled"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["src-expires"] = "string" + c.nxpathMap[key]["addr"] = "string" +} + +func (c *CiscoTelemetryMDT) initMrouter() { + key := "show ip igmp snooping mrouter" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["expires"] = "string" +} + +func (c *CiscoTelemetryMDT) initSnoopStats() { + key := "show ip igmp snooping statistics" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["ut"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimInterface() { + key := "show ip pim interface vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["if-is-border"] = "string" + c.nxpathMap[key]["cached_if_status"] = "string" + c.nxpathMap[key]["genid"] = "string" + c.nxpathMap[key]["if-name"] = "string" + c.nxpathMap[key]["last-cleared"] = "string" + c.nxpathMap[key]["is-pim-vpc-svi"] = "string" + c.nxpathMap[key]["if-addr"] = "string" + c.nxpathMap[key]["is-pim-enabled"] = "string" + c.nxpathMap[key]["pim-dr-address"] = "string" + c.nxpathMap[key]["hello-timer"] = "string" + c.nxpathMap[key]["pim-bfd-enabled"] = "string" + c.nxpathMap[key]["vpc-peer-nbr"] = "string" + c.nxpathMap[key]["nbr-policy-name"] = "string" + c.nxpathMap[key]["is-auto-enabled"] = "string" + c.nxpathMap[key]["if-status"] = "string" + c.nxpathMap[key]["jp-out-policy-name"] = "string" + c.nxpathMap[key]["if-addr-summary"] = "string" + c.nxpathMap[key]["if-dr"] = "string" + c.nxpathMap[key]["jp-in-policy-name"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimNeigh() { + key := "show ip pim neighbor vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["longest-hello-intvl"] = "string" + c.nxpathMap[key]["if-name"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["bfd-state"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimRoute() { + key := "show ip pim route vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["rpf-nbr-1"] = "string" + c.nxpathMap[key]["rpf-nbr-addr"] = "string" + c.nxpathMap[key]["register"] = "string" + c.nxpathMap[key]["sgexpire"] = "string" + c.nxpathMap[key]["oif-bf-str"] = "string" + c.nxpathMap[key]["mcast-addrs"] = "string" + c.nxpathMap[key]["rp-addr"] = "string" + c.nxpathMap[key]["immediate-bf-str"] = "string" + c.nxpathMap[key]["sgr-prune-list-bf-str"] = "string" + c.nxpathMap[key]["context-name"] = "string" + c.nxpathMap[key]["intf-name"] = "string" + c.nxpathMap[key]["immediate-timeout-bf-str"] = "string" + c.nxpathMap[key]["rp-local"] = "string" + c.nxpathMap[key]["sgrexpire"] = "string" + c.nxpathMap[key]["timeout-bf-str"] = "string" + c.nxpathMap[key]["timeleft"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimRp() { + key := "show ip pim rp vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["is-bsr-forward-only"] = "string" + c.nxpathMap[key]["is-rpaddr-local"] = "string" + c.nxpathMap[key]["bsr-expires"] = "string" + c.nxpathMap[key]["autorp-expire-time"] = "string" + c.nxpathMap[key]["rp-announce-policy-name"] = "string" + c.nxpathMap[key]["rp-cand-policy-name"] = "string" + c.nxpathMap[key]["is-autorp-forward-only"] = "string" + c.nxpathMap[key]["rp-uptime"] = "string" + c.nxpathMap[key]["rp-owner-flags"] = "string" + c.nxpathMap[key]["df-bits-recovered"] = "string" + c.nxpathMap[key]["bs-timer"] = "string" + c.nxpathMap[key]["rp-discovery-policy-name"] = "string" + c.nxpathMap[key]["arp-rp-addr"] = "string" + c.nxpathMap[key]["auto-rp-addr"] = "string" + c.nxpathMap[key]["autorp-expires"] = "string" + c.nxpathMap[key]["is-autorp-enabled"] = "string" + c.nxpathMap[key]["is-bsr-local"] = "string" + c.nxpathMap[key]["is-autorp-listen-only"] = "string" + c.nxpathMap[key]["autorp-dis-timer"] = "string" + c.nxpathMap[key]["bsr-rp-expires"] = "string" + c.nxpathMap[key]["static-rp-group-map"] = "string" + c.nxpathMap[key]["rp-source"] = "string" + c.nxpathMap[key]["autorp-cand-address"] = "string" + c.nxpathMap[key]["autorp-up-time"] = "string" + c.nxpathMap[key]["is-bsr-enabled"] = "string" + c.nxpathMap[key]["bsr-uptime"] = "string" + c.nxpathMap[key]["is-bsr-listen-only"] = "string" + c.nxpathMap[key]["rpf-nbr-address"] = "string" + c.nxpathMap[key]["is-rp-local"] = "string" + c.nxpathMap[key]["is-autorp-local"] = "string" + c.nxpathMap[key]["bsr-policy-name"] = "string" + c.nxpathMap[key]["grange-grp"] = "string" + c.nxpathMap[key]["rp-addr"] = "string" + c.nxpathMap[key]["anycast-rp-addr"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimStats() { + key := "show ip pim statistics vrf all" + c.nxpathMap[key] = make(map[string]string, 1) + c.nxpathMap[key]["vrf-name"] = "string" +} + +func (c *CiscoTelemetryMDT) initIntfBrief() { + key := "show interface brief" + c.nxpathMap[key] = make(map[string]string, 2) + c.nxpathMap[key]["speed"] = "string" + c.nxpathMap[key]["vlan"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimVrf() { + key := "show ip pim vrf all" + c.nxpathMap[key] = make(map[string]string, 1) + c.nxpathMap[key]["table-id"] = "string" +} + +func (c *CiscoTelemetryMDT) initIpMroute() { + key := "show ip mroute summary vrf all" + c.nxpathMap[key] = make(map[string]string, 40) + c.nxpathMap[key]["nat-mode"] = "string" + c.nxpathMap[key]["oif-name"] = "string" + c.nxpathMap[key]["nat-route-type"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["mofrr-nbr"] = "string" + c.nxpathMap[key]["extranet_addr"] = "string" + c.nxpathMap[key]["stale-route"] = "string" + c.nxpathMap[key]["pending"] = "string" + c.nxpathMap[key]["bidir"] = "string" + c.nxpathMap[key]["expry_timer"] = "string" + c.nxpathMap[key]["mofrr-iif"] = "string" + c.nxpathMap[key]["group_addrs"] = "string" + c.nxpathMap[key]["mpib-name"] = "string" + c.nxpathMap[key]["rpf"] = "string" + c.nxpathMap[key]["mcast-addrs"] = "string" + c.nxpathMap[key]["route-mdt-iod"] = "string" + c.nxpathMap[key]["sr-oif"] = "string" + c.nxpathMap[key]["stats-rate-buf"] = "string" + c.nxpathMap[key]["source_addr"] = "string" + c.nxpathMap[key]["route-iif"] = "string" + c.nxpathMap[key]["rpf-nbr"] = "string" + c.nxpathMap[key]["translated-route-src"] = "string" + c.nxpathMap[key]["group_addr"] = "string" + c.nxpathMap[key]["lisp-src-rloc"] = "string" + c.nxpathMap[key]["stats-pndg"] = "string" + c.nxpathMap[key]["rate_buf"] = "string" + c.nxpathMap[key]["extranet_vrf_name"] = "string" + c.nxpathMap[key]["fabric-interest"] = "string" + c.nxpathMap[key]["translated-route-grp"] = "string" + c.nxpathMap[key]["internal"] = "string" + c.nxpathMap[key]["oif-mpib-name"] = "string" + c.nxpathMap[key]["oif-uptime"] = "string" + c.nxpathMap[key]["omd-vpc-svi"] = "string" + c.nxpathMap[key]["source_addrs"] = "string" + c.nxpathMap[key]["stale-oif"] = "string" + c.nxpathMap[key]["core-interest"] = "string" + c.nxpathMap[key]["oif-list-bitfield"] = "string" +} + +func (c *CiscoTelemetryMDT) initIpv6Mroute() { + key := "show ipv6 mroute summary vrf all" + c.nxpathMap[key] = make(map[string]string, 40) + c.nxpathMap[key]["nat-mode"] = "string" + c.nxpathMap[key]["oif-name"] = "string" + c.nxpathMap[key]["nat-route-type"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["mofrr-nbr"] = "string" + c.nxpathMap[key]["extranet_addr"] = "string" + c.nxpathMap[key]["stale-route"] = "string" + c.nxpathMap[key]["pending"] = "string" + c.nxpathMap[key]["bidir"] = "string" + c.nxpathMap[key]["expry_timer"] = "string" + c.nxpathMap[key]["mofrr-iif"] = "string" + c.nxpathMap[key]["group_addrs"] = "string" + c.nxpathMap[key]["mpib-name"] = "string" + c.nxpathMap[key]["rpf"] = "string" + c.nxpathMap[key]["mcast-addrs"] = "string" + c.nxpathMap[key]["route-mdt-iod"] = "string" + c.nxpathMap[key]["sr-oif"] = "string" + c.nxpathMap[key]["stats-rate-buf"] = "string" + c.nxpathMap[key]["source_addr"] = "string" + c.nxpathMap[key]["route-iif"] = "string" + c.nxpathMap[key]["rpf-nbr"] = "string" + c.nxpathMap[key]["translated-route-src"] = "string" + c.nxpathMap[key]["group_addr"] = "string" + c.nxpathMap[key]["lisp-src-rloc"] = "string" + c.nxpathMap[key]["stats-pndg"] = "string" + c.nxpathMap[key]["rate_buf"] = "string" + c.nxpathMap[key]["extranet_vrf_name"] = "string" + c.nxpathMap[key]["fabric-interest"] = "string" + c.nxpathMap[key]["translated-route-grp"] = "string" + c.nxpathMap[key]["internal"] = "string" + c.nxpathMap[key]["oif-mpib-name"] = "string" + c.nxpathMap[key]["oif-uptime"] = "string" + c.nxpathMap[key]["omd-vpc-svi"] = "string" + c.nxpathMap[key]["source_addrs"] = "string" + c.nxpathMap[key]["stale-oif"] = "string" + c.nxpathMap[key]["core-interest"] = "string" + c.nxpathMap[key]["oif-list-bitfield"] = "string" +} + +func (c *CiscoTelemetryMDT) initVpc() { + key := "sys/vpc" + c.nxpathMap[key] = make(map[string]string, 5) + c.nxpathMap[key]["type2CompatQualStr"] = "string" + c.nxpathMap[key]["compatQualStr"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["issuFromVer"] = "string" + c.nxpathMap[key]["issuToVer"] = "string" +} + +func (c *CiscoTelemetryMDT) initBgp() { + key := "sys/bgp" + c.nxpathMap[key] = make(map[string]string, 18) + c.nxpathMap[key]["dynRtMap"] = "string" + c.nxpathMap[key]["nhRtMap"] = "string" + c.nxpathMap[key]["epePeerSet"] = "string" + c.nxpathMap[key]["asn"] = "string" + c.nxpathMap[key]["peerImp"] = "string" + c.nxpathMap[key]["wght"] = "string" + c.nxpathMap[key]["assocDom"] = "string" + c.nxpathMap[key]["tblMap"] = "string" + c.nxpathMap[key]["unSupprMap"] = "string" + c.nxpathMap[key]["sessionContImp"] = "string" + c.nxpathMap[key]["allocLblRtMap"] = "string" + c.nxpathMap[key]["defMetric"] = "string" + c.nxpathMap[key]["password"] = "string" + c.nxpathMap[key]["retainRttRtMap"] = "string" + c.nxpathMap[key]["clusterId"] = "string" + c.nxpathMap[key]["localAsn"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["defOrgRtMap"] = "string" +} + +func (c *CiscoTelemetryMDT) initCh() { + key := "sys/ch" + c.nxpathMap[key] = make(map[string]string, 10) + c.nxpathMap[key]["fanName"] = "string" + c.nxpathMap[key]["typeCordConnected"] = "string" + c.nxpathMap[key]["vendor"] = "string" + c.nxpathMap[key]["model"] = "string" + c.nxpathMap[key]["rev"] = "string" + c.nxpathMap[key]["vdrId"] = "string" + c.nxpathMap[key]["hardwareAlarm"] = "string" + c.nxpathMap[key]["unit"] = "string" + c.nxpathMap[key]["hwVer"] = "string" +} + +func (c *CiscoTelemetryMDT) initIntf() { + key := "sys/intf" + c.nxpathMap[key] = make(map[string]string, 10) + c.nxpathMap[key]["descr"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["lastStCause"] = "string" + c.nxpathMap[key]["description"] = "string" + c.nxpathMap[key]["unit"] = "string" + c.nxpathMap[key]["operFECMode"] = "string" + c.nxpathMap[key]["operBitset"] = "string" + c.nxpathMap[key]["mdix"] = "string" +} + +func (c *CiscoTelemetryMDT) initProcsys() { + key := "sys/procsys" + c.nxpathMap[key] = make(map[string]string, 10) + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["id"] = "string" + c.nxpathMap[key]["upTs"] = "string" + c.nxpathMap[key]["interval"] = "string" + c.nxpathMap[key]["memstatus"] = "string" +} + +func (c *CiscoTelemetryMDT) initProc() { + key := "sys/proc" + c.nxpathMap[key] = make(map[string]string, 2) + c.nxpathMap[key]["processName"] = "string" + c.nxpathMap[key]["procArg"] = "string" +} + +func (c *CiscoTelemetryMDT) initBfd() { + key := "sys/bfd/inst" + c.nxpathMap[key] = make(map[string]string, 4) + c.nxpathMap[key]["descr"] = "string" + c.nxpathMap[key]["vrfName"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["name"] = "string" +} + +func (c *CiscoTelemetryMDT) initLldp() { + key := "sys/lldp" + c.nxpathMap[key] = make(map[string]string, 7) + c.nxpathMap[key]["sysDesc"] = "string" + c.nxpathMap[key]["portDesc"] = "string" + c.nxpathMap[key]["portIdV"] = "string" + c.nxpathMap[key]["chassisIdV"] = "string" + c.nxpathMap[key]["sysName"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["id"] = "string" +} + +func (c *CiscoTelemetryMDT) initDb() { + c.nxpathMap = make(map[string]map[string]string, 200) + + c.initPower() + c.initMemPhys() + c.initBgpV4() + c.initCpu() + c.initResources() + c.initPtpCorrection() + c.initTrans() + c.initIgmp() + c.initVrfAll() + c.initIgmpSnoop() + c.initIgmpSnoopGroups() + c.initIgmpSnoopGroupDetails() + c.initIgmpSnoopGroupsSumm() + c.initMrouter() + c.initSnoopStats() + c.initPimInterface() + c.initPimNeigh() + c.initPimRoute() + c.initPimRp() + c.initPimStats() + c.initIntfBrief() + c.initPimVrf() + c.initIpMroute() + c.initIpv6Mroute() + c.initVpc() + c.initBgp() + c.initCh() + c.initIntf() + c.initProcsys() + c.initProc() + c.initBfd() + c.initLldp() +} From 13a4657005daf915b9c41b50c6e7a6f64bba75fb Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Wed, 17 Mar 2021 14:52:26 -0400 Subject: [PATCH 307/761] ci config changes (#9001) --- .circleci/config.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index ce38bf07f5bb5..8ce079a379ed7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,7 @@ version: 2.1 orbs: win: circleci/windows@2.4.0 + aws-cli: circleci/aws-cli@1.4.0 executors: go-1_15: @@ -179,6 +180,20 @@ jobs: - store_artifacts: path: './dist' destination: 'build/dist' + share-artifacts: + executor: aws-cli/default + steps: + - aws-cli/setup: + profile-name: TIGER + aws-access-key-id: TIGER_AWS_ACCESS_KEY_ID + aws-secret-access-key: TIGER_AWS_SECRET_ACCESS_KEY + aws-region: TIGER_AWS_DEFAULT_REGION + - run: + command: | + PR=${CIRCLE_PULL_REQUEST##*/} + printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" + aws lambda invoke --function-name telegraf-tiger-prod-share_artifacts --profile TIGER --cli-binary-format raw-in-base64-out --payload "$payload" output.json + workflows: version: 2 check: @@ -227,6 +242,13 @@ workflows: - 'test-go-1_15-386' - 'test-go-1_16' - 'test-go-1_16-386' + - 'share-artifacts': + requires: + - 'package' + filters: + branches: + ignore: + - master - 'release': requires: - 'test-go-windows' From 927612a0a73c93c3adf7cd785032807f86b94151 Mon Sep 17 00:00:00 2001 From: Felix Moessbauer Date: Wed, 17 Mar 2021 20:42:35 +0100 Subject: [PATCH 308/761] fix segfaults in sflow plugin by checking if protocol headers are set (#8995) --- plugins/inputs/sflow/types.go | 26 +++++++++++++++--- plugins/inputs/sflow/types_test.go | 43 ++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 4 deletions(-) create mode 100644 plugins/inputs/sflow/types_test.go diff --git a/plugins/inputs/sflow/types.go b/plugins/inputs/sflow/types.go index a48857803b40d..c415db26f784a 100644 --- a/plugins/inputs/sflow/types.go +++ b/plugins/inputs/sflow/types.go @@ -118,12 +118,22 @@ type RawPacketHeaderFlowData struct { } func (h RawPacketHeaderFlowData) GetTags() map[string]string { - t := h.Header.GetTags() + var t map[string]string + if h.Header != nil { + t = h.Header.GetTags() + } else { + t = map[string]string{} + } t["header_protocol"] = HeaderProtocolMap[h.HeaderProtocol] return t } func (h RawPacketHeaderFlowData) GetFields() map[string]interface{} { - f := h.Header.GetFields() + var f map[string]interface{} + if h.Header != nil { + f = h.Header.GetFields() + } else { + f = map[string]interface{}{} + } f["bytes"] = h.Bytes f["frame_length"] = h.FrameLength f["header_length"] = h.HeaderLength @@ -143,14 +153,22 @@ type EthHeader struct { } func (h EthHeader) GetTags() map[string]string { - t := h.IPHeader.GetTags() + var t map[string]string + if h.IPHeader != nil { + t = h.IPHeader.GetTags() + } else { + t = map[string]string{} + } t["src_mac"] = net.HardwareAddr(h.SourceMAC[:]).String() t["dst_mac"] = net.HardwareAddr(h.DestinationMAC[:]).String() t["ether_type"] = h.EtherType return t } func (h EthHeader) GetFields() map[string]interface{} { - return h.IPHeader.GetFields() + if h.IPHeader != nil { + return h.IPHeader.GetFields() + } + return map[string]interface{}{} } type ProtocolHeader ContainsMetricData diff --git a/plugins/inputs/sflow/types_test.go b/plugins/inputs/sflow/types_test.go new file mode 100644 index 0000000000000..d59ac0ae23941 --- /dev/null +++ b/plugins/inputs/sflow/types_test.go @@ -0,0 +1,43 @@ +package sflow + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRawPacketHeaderFlowData(t *testing.T) { + h := RawPacketHeaderFlowData{ + HeaderProtocol: HeaderProtocolTypeEthernetISO88023, + FrameLength: 64, + Bytes: 64, + StrippedOctets: 0, + HeaderLength: 0, + Header: nil, + } + tags := h.GetTags() + fields := h.GetFields() + + require.NotNil(t, fields) + require.NotNil(t, tags) + require.Contains(t, tags, "header_protocol") + require.Equal(t, 1, len(tags)) +} + +// process a raw ethernet packet without any encapsulated protocol +func TestEthHeader(t *testing.T) { + h := EthHeader{ + DestinationMAC: [6]byte{0xca, 0xff, 0xee, 0xff, 0xe, 0x0}, + SourceMAC: [6]byte{0xde, 0xad, 0xbe, 0xef, 0x0, 0x0}, + TagProtocolIdentifier: 0x88B5, // IEEE Std 802 - Local Experimental Ethertype + TagControlInformation: 0, + EtherTypeCode: 0, + EtherType: "", + IPHeader: nil, + } + tags := h.GetTags() + fields := h.GetFields() + + require.NotNil(t, fields) + require.NotNil(t, tags) +} From 52528067537887d1996039bf9ee354feac2b178a Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 17 Mar 2021 16:45:15 -0400 Subject: [PATCH 309/761] Update changelog (cherry picked from commit c5a95ded035cd828c219bd01badc47574a49f2bc) --- CHANGELOG.md | 15 +++++++++++---- etc/telegraf.conf | 23 ++++++++++++++++++----- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 88534431b60c4..523e1d7ed4795 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,9 @@ -## v1.18.0-rc1 [2021-03-10] +## v1.18.0 [2021-03-17] #### Release Notes - - Support Go version 1.16 + - Support Go version 1.16.2 + - Added support for code signing in Windows #### Bugfixes @@ -22,6 +23,8 @@ - [#8953](https://github.com/influxdata/telegraf/pull/8953) `agent` Reset the flush interval timer when flush is requested or batch is ready. - [#8954](https://github.com/influxdata/telegraf/pull/8954) `common.kafka` Fix max open requests to one if idempotent writes is set to true - [#8721](https://github.com/influxdata/telegraf/pull/8721) `inputs.kube_inventory` Set $HOSTIP in default URL + - [#8995](https://github.com/influxdata/telegraf/pull/8995) `inputs.sflow` fix segfaults in sflow plugin by checking if protocol headers are set + - [#8986](https://github.com/influxdata/telegraf/pull/8986) `outputs.nats` nats_output: use the configured credentials file #### Features @@ -29,19 +32,21 @@ - [#8852](https://github.com/influxdata/telegraf/pull/8852) `processors.starlark` Add Starlark script for estimating Line Protocol cardinality - [#8828](https://github.com/influxdata/telegraf/pull/8828) `serializers.msgpack` Add MessagePack output data format - [#8915](https://github.com/influxdata/telegraf/pull/8915) `inputs.cloudwatch` add proxy - - [#8910](https://github.com/influxdata/telegraf/pull/8910) Display error message on badly formatted config string array (eg. namepass) + - [#8910](https://github.com/influxdata/telegraf/pull/8910) `agent` Display error message on badly formatted config string array (eg. namepass) - [#8785](https://github.com/influxdata/telegraf/pull/8785) `inputs.diskio` Non systemd support with unittest - [#8850](https://github.com/influxdata/telegraf/pull/8850) `inputs.snmp` Support more snmpv3 authentication protocols - [#8813](https://github.com/influxdata/telegraf/pull/8813) `inputs.redfish` added member_id as tag(as it is a unique value) for redfish plugin and added address of the server when the status is other than 200 for better debugging - [#8613](https://github.com/influxdata/telegraf/pull/8613) `inputs.phpfpm` Support exclamation mark to create non-matching list in tail plugin - [#8179](https://github.com/influxdata/telegraf/pull/8179) `inputs.statsd` Add support for datadog distributions metric - - [#8803](https://github.com/influxdata/telegraf/pull/8803) Add default retry for load config via url + - [#8803](https://github.com/influxdata/telegraf/pull/8803) `agent` Add default retry for load config via url - [#8816](https://github.com/influxdata/telegraf/pull/8816) Code Signing for Windows - [#8772](https://github.com/influxdata/telegraf/pull/8772) `processors.starlark` Allow to provide constants to a starlark script - [#8749](https://github.com/influxdata/telegraf/pull/8749) `outputs.newrelic` Add HTTP proxy setting to New Relic output plugin - [#8543](https://github.com/influxdata/telegraf/pull/8543) `inputs.elasticsearch` Add configurable number of 'most recent' date-stamped indices to gather in Elasticsearch input - [#8675](https://github.com/influxdata/telegraf/pull/8675) `processors.starlark` Add Starlark parsing example of nested JSON - [#8762](https://github.com/influxdata/telegraf/pull/8762) `inputs.prometheus` Optimize for bigger kubernetes clusters (500+ pods) + - [#8950](https://github.com/influxdata/telegraf/pull/8950) `inputs.teamspeak` Teamspeak input plugin query clients + - [#8849](https://github.com/influxdata/telegraf/pull/8849) `inputs.sqlserver` Filter data out from system databases for Azure SQL DB only #### New Input Plugins @@ -57,6 +62,7 @@ - [#8398](https://github.com/influxdata/telegraf/pull/8398) Sensu Go Output Plugin for Telegraf - [#8450](https://github.com/influxdata/telegraf/pull/8450) plugin: output loki - [#6714](https://github.com/influxdata/telegraf/pull/6714) SignalFx Output + - [#8634](https://github.com/influxdata/telegraf/pull/8634) Bigquery output #### New Aggregator Plugins @@ -70,6 +76,7 @@ #### New External Plugins - [#8897](https://github.com/influxdata/telegraf/pull/8897) add SMCIPMITool input to external plugin list + - [#8898](https://github.com/influxdata/telegraf/pull/8898) Add Plex Webhooks external plugin ## v1.17.3 [2021-02-17] diff --git a/etc/telegraf.conf b/etc/telegraf.conf index c6774d5a30ef7..1389597e07ab6 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -343,6 +343,20 @@ # # endpoint_url = "https://monitoring.core.usgovcloudapi.net" +# [[outputs.bigquery]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## The BigQuery dataset +# dataset = "telegraf" +# +# ## Timeout for BigQuery operations. +# # timeout = "5s" +# +# ## Character to replace hyphens on Metric name +# # replace_hyphen_to = "_" + + # # Publish Telegraf metrics to a Google Cloud PubSub topic # [[outputs.cloud_pubsub]] # ## Required. Name of Google Cloud Platform (GCP) Project that owns @@ -5137,7 +5151,7 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false @@ -6970,7 +6984,7 @@ # ## This value is propagated to pqos tool. Interval format is defined by pqos itself. # ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. # # sampling_interval = "10" -# +# # ## Optionally specify the path to pqos executable. # ## If not provided, auto discovery will be performed. # # pqos_path = "/usr/local/bin/pqos" @@ -6978,12 +6992,12 @@ # ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. # ## If not provided, default value is false. # # shortened_metrics = false -# +# # ## Specify the list of groups of CPU core(s) to be provided as pqos input. # ## Mandatory if processes aren't set and forbidden if processes are specified. # ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] # # cores = ["0-3"] -# +# # ## Specify the list of processes for which Metrics will be collected. # ## Mandatory if cores aren't set and forbidden if cores are specified. # ## e.g. ["qemu", "pmd"] @@ -8180,4 +8194,3 @@ # [[inputs.zipkin]] # # path = "/api/v1/spans" # URL path for span data # # port = 9411 # Port on which Telegraf listens - From 79f5803444e0bbe74c7d07234a36472b25a58533 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 17 Mar 2021 16:35:25 -0500 Subject: [PATCH 310/761] Migrate from github.com/ericchiang/k8s to github.com/kubernetes/client-go (#8937) * new k8 client * Make all tests pass * Update licenses * add timeout back * Resolve merge conflicts * Fix tests and linter * Fix linter errors * Linting issues * Extra empty line Co-authored-by: Bas <3441183+BattleBas@users.noreply.github.com> --- docs/LICENSE_OF_DEPENDENCIES.md | 16 +- go.mod | 25 +- go.sum | 200 +++++--- plugins/inputs/kube_inventory/client.go | 91 ++-- plugins/inputs/kube_inventory/client_test.go | 6 +- plugins/inputs/kube_inventory/daemonset.go | 31 +- .../inputs/kube_inventory/daemonset_test.go | 68 +-- plugins/inputs/kube_inventory/deployment.go | 17 +- .../inputs/kube_inventory/deployment_test.go | 75 +-- plugins/inputs/kube_inventory/endpoint.go | 59 +-- .../inputs/kube_inventory/endpoint_test.go | 69 ++- plugins/inputs/kube_inventory/ingress.go | 44 +- plugins/inputs/kube_inventory/ingress_test.go | 63 +-- plugins/inputs/kube_inventory/node.go | 29 +- plugins/inputs/kube_inventory/node_test.go | 97 ++-- .../inputs/kube_inventory/persistentvolume.go | 19 +- .../kube_inventory/persistentvolume_test.go | 31 +- .../kube_inventory/persistentvolumeclaim.go | 23 +- .../persistentvolumeclaim_test.go | 60 ++- plugins/inputs/kube_inventory/pod.go | 81 ++-- plugins/inputs/kube_inventory/pod_test.go | 449 +++++++++--------- plugins/inputs/kube_inventory/service.go | 44 +- plugins/inputs/kube_inventory/service_test.go | 81 ++-- plugins/inputs/kube_inventory/statefulset.go | 25 +- .../inputs/kube_inventory/statefulset_test.go | 68 ++- plugins/inputs/prometheus/kubernetes.go | 166 +++---- plugins/inputs/prometheus/kubernetes_test.go | 127 ++--- 27 files changed, 993 insertions(+), 1071 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 6b811a5a9bcb5..0fafa339fcc56 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -62,9 +62,10 @@ following works: - github.com/eapache/go-xerial-snappy [MIT License](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE) - github.com/eapache/queue [MIT License](https://github.com/eapache/queue/blob/master/LICENSE) - github.com/eclipse/paho.mqtt.golang [Eclipse Public License - v 1.0](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE) -- github.com/ericchiang/k8s [Apache License 2.0](https://github.com/ericchiang/k8s/blob/master/LICENSE) +- github.com/form3tech-oss/jwt-go [MIT License](https://github.com/form3tech-oss/jwt-go/blob/master/LICENSE) - github.com/ghodss/yaml [MIT License](https://github.com/ghodss/yaml/blob/master/LICENSE) - github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) +- github.com/go-logr/logr [Apache License 2.0](https://github.com/go-logr/logr/blob/master/LICENSE) - github.com/go-ole/go-ole [MIT License](https://github.com/go-ole/go-ole/blob/master/LICENSE) - github.com/go-ping/ping [MIT License](https://github.com/go-ping/ping/blob/master/LICENSE) - github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE) @@ -83,7 +84,9 @@ following works: - github.com/google/go-cmp [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-cmp/blob/master/LICENSE) - github.com/google/go-github [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-github/blob/master/LICENSE) - github.com/google/go-querystring [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-querystring/blob/master/LICENSE) +- github.com/google/gofuzz [Apache License 2.0](https://github.com/google/gofuzz/blob/master/LICENSE) - github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) +- github.com/googleapis/gnostic [Apache License 2.0](https://github.com/google/gnostic/blob/master/LICENSE) - github.com/gopcua/opcua [MIT License](https://github.com/gopcua/opcua/blob/master/LICENSE) - github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE) - github.com/gorilla/websocket [BSD 2-Clause "Simplified" License](https://github.com/gorilla/websocket/blob/master/LICENSE) @@ -191,6 +194,7 @@ following works: - google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE) - google.golang.org/genproto [Apache License 2.0](https://github.com/google/go-genproto/blob/master/LICENSE) - google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) +- google.golang.org/protobuf [BSD 3-Clause "New" or "Revised" License](https://pkg.go.dev/google.golang.org/protobuf?tab=licenses) - gopkg.in/asn1-ber.v1 [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE) - gopkg.in/djherbis/times.v1 [MIT License](https://github.com/djherbis/times/blob/master/LICENSE) - gopkg.in/fatih/pool.v2 [MIT License](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) @@ -209,11 +213,15 @@ following works: - gopkg.in/tomb.v2 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v2/LICENSE) - gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE) - gopkg.in/yaml.v3 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v3/LICENSE) -- k8s.io/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE) -- k8s.io/klog [Apache License 2.0](https://github.com/kubernetes/klog/blob/master/LICENSE) +- k8s.io/api [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/apimachinery [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/client-go [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/klog [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/utils [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - modernc.org/libc [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/libc/-/blob/master/LICENSE) - modernc.org/memory [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/memory/-/blob/master/LICENSE) - modernc.org/sqlite [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/sqlite/-/blob/master/LICENSE) - +- sigs.k8s.io/structured-merge-diff [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- sigs.k8s.io/yaml [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) ## telegraf used and modified code from these projects - github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) diff --git a/go.mod b/go.mod index edb407d8ed09c..1754c0fe77425 100644 --- a/go.mod +++ b/go.mod @@ -3,15 +3,15 @@ module github.com/influxdata/telegraf go 1.16 require ( - cloud.google.com/go v0.53.0 - cloud.google.com/go/bigquery v1.3.0 - cloud.google.com/go/pubsub v1.1.0 + cloud.google.com/go v0.54.0 + cloud.google.com/go/bigquery v1.4.0 + cloud.google.com/go/pubsub v1.2.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.3.0 github.com/Azure/azure-event-hubs-go/v3 v3.2.0 github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect - github.com/Azure/go-autorest/autorest v0.9.3 + github.com/Azure/go-autorest/autorest v0.11.1 github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 github.com/BurntSushi/toml v0.3.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee @@ -55,7 +55,6 @@ require ( github.com/docker/go-units v0.3.3 // indirect github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 github.com/eclipse/paho.mqtt.golang v1.3.0 - github.com/ericchiang/k8s v1.2.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.4.0 github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c @@ -67,7 +66,7 @@ require ( github.com/gofrs/uuid v2.1.0+incompatible github.com/gogo/protobuf v1.3.1 github.com/golang/geo v0.0.0-20190916061304-5b978397cfec - github.com/golang/protobuf v1.3.5 + github.com/golang/protobuf v1.4.3 github.com/golang/snappy v0.0.1 github.com/google/go-cmp v0.5.4 github.com/google/go-github/v32 v32.1.0 @@ -142,16 +141,14 @@ require ( github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect go.starlark.net v0.0.0-20200901195727-6e684ef5eeee - golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect - golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 + golang.org/x/net v0.0.0-20201110031124-69a78807bb2b golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 - golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 + golang.org/x/sys v0.0.0-20201112073958-5cba982894dd golang.org/x/text v0.3.4 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.20.0 - google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 google.golang.org/grpc v1.33.1 gopkg.in/djherbis/times.v1 v1.2.0 gopkg.in/fatih/pool.v2 v2.0.0 // indirect @@ -161,8 +158,10 @@ require ( gopkg.in/olivere/elastic.v5 v5.0.70 gopkg.in/yaml.v2 v2.3.0 gotest.tools v2.2.0+incompatible - honnef.co/go/tools v0.0.1-2020.1.3 // indirect - k8s.io/apimachinery v0.17.1 // indirect + k8s.io/api v0.20.4 + k8s.io/apimachinery v0.20.4 + k8s.io/client-go v0.20.4 + k8s.io/klog v1.0.0 // indirect modernc.org/sqlite v1.7.4 ) diff --git a/go.sum b/go.sum index 369efcde64d88..93c77eda17477 100644 --- a/go.sum +++ b/go.sum @@ -7,19 +7,25 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= +cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00= @@ -41,32 +47,42 @@ github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY= github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.11.1 h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.1 h1:pZdL8o72rK+avFWl+p9nE8RWi1JInZrWJYlnpfXJwHk= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -78,8 +94,8 @@ github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcV github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ= github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.27.2 h1:1EyY1dsxNDUQEv0O/4TsjosHI2CgB1uo9H/v56xzTxc= github.com/Shopify/sarama v1.27.2/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= @@ -114,6 +130,7 @@ github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1: github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.34.34 h1:5dC0ZU0xy25+UavGNEkQ/5MOQwxXDA2YXtjCL1HfYKI= github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go-v2 v1.1.0 h1:sKP6QWxdN1oRYjl+k6S3bpgBI+XUx/0mqVOLIw4lR/Q= @@ -178,7 +195,6 @@ github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:sr github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8= github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -203,6 +219,7 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 h1:KgEcrKF0NWi9GT/OvDp9ioXZIrHRbP8S5o+sot9gznQ= github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -218,26 +235,27 @@ github.com/echlebek/timeproxy v1.0.0 h1:V41/v8tmmMDNMA2GrBPI45nlXb3F7+OY+nJz1BqK github.com/echlebek/timeproxy v1.0.0/go.mod h1:0dg2Lnb8no/jFwoMQKMTU6iAivgoMptGqSTprhnrRtk= github.com/eclipse/paho.mqtt.golang v1.3.0 h1:MU79lqr3FKNKbSrGN7d7bNYqh8MwWW7Zcx0iG+VIw9I= github.com/eclipse/paho.mqtt.golang v1.3.0/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ericchiang/k8s v1.2.0 h1:vxrMwEzY43oxu8aZyD/7b1s8tsBM+xoUoxjWECWFbPI= -github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 h1:fP04zlkPjAGpsduG7xN3rRkxjAqkJaIQnnkNYYw/pAk= github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4/go.mod h1:SBHk9aNQtiw4R4bEuzHjVmZikkUKCnO1v3lPQ21HZGk= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= @@ -253,12 +271,17 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c h1:fWdhUpCuoeNIPiQ+pkAmmERYEjhVx5/cbVGK7T99OkI= github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c/go.mod h1:35JbSyV/BYqHwwRA6Zr1uVDm1637YlNOU61wI797NPI= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= @@ -281,7 +304,6 @@ github.com/gogo/googleapis v1.3.1 h1:CzMaKrvF6Qa7XtRii064vKBQiyvmY8H8vG1xa1/W1JA github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -290,7 +312,6 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -300,15 +321,22 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -324,6 +352,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -331,14 +360,16 @@ github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoP github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -347,7 +378,8 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopcua/opcua v0.1.13 h1:UP746MKRFNbv+CQGfrPwgH7rGxOlSGzVu9ieZdcox4E= github.com/gopcua/opcua v0.1.13/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= @@ -360,6 +392,7 @@ github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0U github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosnmp/gosnmp v1.30.0 h1:P6uUvPaoZCZh2EXvSUIgsxYZ1vdD/Sonl2BSVCGieG8= github.com/gosnmp/gosnmp v1.30.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvNDMpgF0= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= @@ -397,6 +430,7 @@ github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5I github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s= github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo= github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4 h1:K3A5vHPs/p8OjI4SL3l1+hs/98mhxTVDcV1Ap0c265E= @@ -427,12 +461,11 @@ github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgb github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -460,9 +493,11 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -476,7 +511,7 @@ github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRC github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= @@ -504,7 +539,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -538,8 +572,8 @@ github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQT github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= @@ -561,6 +595,7 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -570,7 +605,6 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -647,14 +681,16 @@ github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff h1:JcVn27 github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 h1:l6epF6yBwuejBfhGkM5m8VSNM/QAm7ApGyH35ehA7eQ= github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -713,14 +749,16 @@ golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -731,8 +769,8 @@ golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm0 golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= @@ -760,7 +798,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -782,10 +819,14 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -795,8 +836,8 @@ golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 h1:lwlPPsmjDKK0J6eG6xDWd5XPehI0R024zxjDnw3esPA= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -811,7 +852,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -828,18 +868,23 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -847,10 +892,8 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -859,12 +902,13 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -878,6 +922,7 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -892,9 +937,14 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9 h1:sEvmEcJVKBNUvgCUClbUQeHOAa9U0I2Ce1BooMvVCY4= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -922,6 +972,7 @@ google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -944,9 +995,15 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -959,6 +1016,16 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1 h1:DGeFlSan2f+WEtCERJ4J9GJWk15TxUi8QGagfI87Xyc= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -1029,13 +1096,21 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= -k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/api v0.20.4 h1:xZjKidCirayzX6tHONRQyTNDVIR55TYVqgATqo6ZULY= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/apimachinery v0.20.4 h1:vhxQ0PPUUU2Ns1b9r4/UFp13UPs8cw2iOoTjnY9faa0= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/client-go v0.20.4 h1:85crgh1IotNkLpKYKZHVNI1JT86nr/iDCvq2iWKsql4= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/httpfs v1.0.0 h1:LtuKNg6JMiaBKVQHKd6Phhvk+2GFp+pUcmDQgRjrds0= modernc.org/httpfs v1.0.0/go.mod h1:BSkfoMUcahSijQD5J/Vu4UMOxzmEf5SNRwyXC4PJBEw= modernc.org/libc v1.3.1 h1:ZAAaxQZtb94hXvlPMEQybXBLLxEtJlQtVfvLkKOPZ5w= @@ -1052,5 +1127,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/plugins/inputs/kube_inventory/client.go b/plugins/inputs/kube_inventory/client.go index d9b24ba5c0a95..bc26d1a700ec3 100644 --- a/plugins/inputs/kube_inventory/client.go +++ b/plugins/inputs/kube_inventory/client.go @@ -4,10 +4,12 @@ import ( "context" "time" - "github.com/ericchiang/k8s" - v1APPS "github.com/ericchiang/k8s/apis/apps/v1" - v1 "github.com/ericchiang/k8s/apis/core/v1" - v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + netv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "github.com/influxdata/telegraf/plugins/common/tls" ) @@ -15,104 +17,89 @@ import ( type client struct { namespace string timeout time.Duration - *k8s.Client + *kubernetes.Clientset } func newClient(baseURL, namespace, bearerToken string, timeout time.Duration, tlsConfig tls.ClientConfig) (*client, error) { - c, err := k8s.NewClient(&k8s.Config{ - Clusters: []k8s.NamedCluster{{Name: "cluster", Cluster: k8s.Cluster{ - Server: baseURL, - InsecureSkipTLSVerify: tlsConfig.InsecureSkipVerify, - CertificateAuthority: tlsConfig.TLSCA, - }}}, - Contexts: []k8s.NamedContext{{Name: "context", Context: k8s.Context{ - Cluster: "cluster", - AuthInfo: "auth", - Namespace: namespace, - }}}, - AuthInfos: []k8s.NamedAuthInfo{{Name: "auth", AuthInfo: k8s.AuthInfo{ - Token: bearerToken, - ClientCertificate: tlsConfig.TLSCert, - ClientKey: tlsConfig.TLSKey, - }}}, + + c, err := kubernetes.NewForConfig(&rest.Config{ + TLSClientConfig: rest.TLSClientConfig{ + ServerName: baseURL, + Insecure: tlsConfig.InsecureSkipVerify, + CAFile: tlsConfig.TLSCA, + CertFile: tlsConfig.TLSCert, + KeyFile: tlsConfig.TLSKey, + }, + BearerToken: bearerToken, + ContentConfig: rest.ContentConfig{}, }) if err != nil { return nil, err } return &client{ - Client: c, + Clientset: c, timeout: timeout, namespace: namespace, }, nil } -func (c *client) getDaemonSets(ctx context.Context) (*v1APPS.DaemonSetList, error) { - list := new(v1APPS.DaemonSetList) +func (c *client) getDaemonSets(ctx context.Context) (*appsv1.DaemonSetList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.AppsV1().DaemonSets(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getDeployments(ctx context.Context) (*v1APPS.DeploymentList, error) { - list := &v1APPS.DeploymentList{} +func (c *client) getDeployments(ctx context.Context) (*appsv1.DeploymentList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.AppsV1().Deployments(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getEndpoints(ctx context.Context) (*v1.EndpointsList, error) { - list := new(v1.EndpointsList) +func (c *client) getEndpoints(ctx context.Context) (*corev1.EndpointsList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.CoreV1().Endpoints(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getIngress(ctx context.Context) (*v1beta1EXT.IngressList, error) { - list := new(v1beta1EXT.IngressList) +func (c *client) getIngress(ctx context.Context) (*netv1.IngressList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.NetworkingV1().Ingresses(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getNodes(ctx context.Context) (*v1.NodeList, error) { - list := new(v1.NodeList) +func (c *client) getNodes(ctx context.Context) (*corev1.NodeList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, "", list) + return c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) } -func (c *client) getPersistentVolumes(ctx context.Context) (*v1.PersistentVolumeList, error) { - list := new(v1.PersistentVolumeList) +func (c *client) getPersistentVolumes(ctx context.Context) (*corev1.PersistentVolumeList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, "", list) + return c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) } -func (c *client) getPersistentVolumeClaims(ctx context.Context) (*v1.PersistentVolumeClaimList, error) { - list := new(v1.PersistentVolumeClaimList) +func (c *client) getPersistentVolumeClaims(ctx context.Context) (*corev1.PersistentVolumeClaimList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.CoreV1().PersistentVolumeClaims(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getPods(ctx context.Context) (*v1.PodList, error) { - list := new(v1.PodList) +func (c *client) getPods(ctx context.Context) (*corev1.PodList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.CoreV1().Pods(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getServices(ctx context.Context) (*v1.ServiceList, error) { - list := new(v1.ServiceList) +func (c *client) getServices(ctx context.Context) (*corev1.ServiceList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.CoreV1().Services(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getStatefulSets(ctx context.Context) (*v1APPS.StatefulSetList, error) { - list := new(v1APPS.StatefulSetList) +func (c *client) getStatefulSets(ctx context.Context) (*appsv1.StatefulSetList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.AppsV1().StatefulSets(c.namespace).List(ctx, metav1.ListOptions{}) } diff --git a/plugins/inputs/kube_inventory/client_test.go b/plugins/inputs/kube_inventory/client_test.go index 88411ea367ccf..48874dca55209 100644 --- a/plugins/inputs/kube_inventory/client_test.go +++ b/plugins/inputs/kube_inventory/client_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/util/intstr" "github.com/influxdata/telegraf/plugins/common/tls" + "k8s.io/apimachinery/pkg/util/intstr" ) type mockHandler struct { @@ -29,11 +29,11 @@ func toBoolPtr(b bool) *bool { } func toIntStrPtrS(s string) *intstr.IntOrString { - return &intstr.IntOrString{StrVal: &s} + return &intstr.IntOrString{StrVal: s} } func toIntStrPtrI(i int32) *intstr.IntOrString { - return &intstr.IntOrString{IntVal: &i} + return &intstr.IntOrString{IntVal: i} } func TestNewClient(t *testing.T) { _, err := newClient("https://127.0.0.1:443/", "default", "abc123", time.Second, tls.ClientConfig{}) diff --git a/plugins/inputs/kube_inventory/daemonset.go b/plugins/inputs/kube_inventory/daemonset.go index db612a5e33b2a..b169ea16dbac6 100644 --- a/plugins/inputs/kube_inventory/daemonset.go +++ b/plugins/inputs/kube_inventory/daemonset.go @@ -2,9 +2,8 @@ package kube_inventory import ( "context" - "time" - "github.com/ericchiang/k8s/apis/apps/v1" + v1 "k8s.io/api/apps/v1" "github.com/influxdata/telegraf" ) @@ -16,7 +15,7 @@ func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *Kubern return } for _, d := range list.Items { - if err = ki.gatherDaemonSet(*d, acc); err != nil { + if err = ki.gatherDaemonSet(d, acc); err != nil { acc.AddError(err) return } @@ -25,27 +24,27 @@ func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *Kubern func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) error { fields := map[string]interface{}{ - "generation": d.Metadata.GetGeneration(), - "current_number_scheduled": d.Status.GetCurrentNumberScheduled(), - "desired_number_scheduled": d.Status.GetDesiredNumberScheduled(), - "number_available": d.Status.GetNumberAvailable(), - "number_misscheduled": d.Status.GetNumberMisscheduled(), - "number_ready": d.Status.GetNumberReady(), - "number_unavailable": d.Status.GetNumberUnavailable(), - "updated_number_scheduled": d.Status.GetUpdatedNumberScheduled(), + "generation": d.Generation, + "current_number_scheduled": d.Status.CurrentNumberScheduled, + "desired_number_scheduled": d.Status.DesiredNumberScheduled, + "number_available": d.Status.NumberAvailable, + "number_misscheduled": d.Status.NumberMisscheduled, + "number_ready": d.Status.NumberReady, + "number_unavailable": d.Status.NumberUnavailable, + "updated_number_scheduled": d.Status.UpdatedNumberScheduled, } tags := map[string]string{ - "daemonset_name": d.Metadata.GetName(), - "namespace": d.Metadata.GetNamespace(), + "daemonset_name": d.Name, + "namespace": d.Namespace, } - for key, val := range d.GetSpec().GetSelector().GetMatchLabels() { + for key, val := range d.Spec.Selector.MatchLabels { if ki.selectorFilter.Match(key) { tags["selector_"+key] = val } } - if d.Metadata.CreationTimestamp.GetSeconds() != 0 { - fields["created"] = time.Unix(d.Metadata.CreationTimestamp.GetSeconds(), int64(d.Metadata.CreationTimestamp.GetNanos())).UnixNano() + if d.GetCreationTimestamp().Second() != 0 { + fields["created"] = d.GetCreationTimestamp().UnixNano() } acc.AddFields(daemonSetMeasurement, fields, tags) diff --git a/plugins/inputs/kube_inventory/daemonset_test.go b/plugins/inputs/kube_inventory/daemonset_test.go index 0a13f1e42cb3d..dede3d9a534f1 100644 --- a/plugins/inputs/kube_inventory/daemonset_test.go +++ b/plugins/inputs/kube_inventory/daemonset_test.go @@ -6,8 +6,8 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/apps/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/influxdata/telegraf/testutil" ) @@ -38,28 +38,28 @@ func TestDaemonSet(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/daemonsets/": &v1.DaemonSetList{ - Items: []*v1.DaemonSet{ + Items: []v1.DaemonSet{ { - Status: &v1.DaemonSetStatus{ - CurrentNumberScheduled: toInt32Ptr(3), - DesiredNumberScheduled: toInt32Ptr(5), - NumberAvailable: toInt32Ptr(2), - NumberMisscheduled: toInt32Ptr(2), - NumberReady: toInt32Ptr(1), - NumberUnavailable: toInt32Ptr(1), - UpdatedNumberScheduled: toInt32Ptr(2), + Status: v1.DaemonSetStatus{ + CurrentNumberScheduled: 3, + DesiredNumberScheduled: 5, + NumberAvailable: 2, + NumberMisscheduled: 2, + NumberReady: 1, + NumberUnavailable: 1, + UpdatedNumberScheduled: 2, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(11221), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("daemon1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11221, + Namespace: "ns1", + Name: "daemon1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, - Spec: &v1.DaemonSetSpec{ + Spec: v1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "select1": "s1", @@ -108,7 +108,7 @@ func TestDaemonSet(t *testing.T) { ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { - err := ks.gatherDaemonSet(*dset, acc) + err := ks.gatherDaemonSet(dset, acc) if err != nil { t.Errorf("Failed to gather daemonset - %s", err.Error()) } @@ -146,28 +146,28 @@ func TestDaemonSetSelectorFilter(t *testing.T) { responseMap := map[string]interface{}{ "/daemonsets/": &v1.DaemonSetList{ - Items: []*v1.DaemonSet{ + Items: []v1.DaemonSet{ { - Status: &v1.DaemonSetStatus{ - CurrentNumberScheduled: toInt32Ptr(3), - DesiredNumberScheduled: toInt32Ptr(5), - NumberAvailable: toInt32Ptr(2), - NumberMisscheduled: toInt32Ptr(2), - NumberReady: toInt32Ptr(1), - NumberUnavailable: toInt32Ptr(1), - UpdatedNumberScheduled: toInt32Ptr(2), + Status: v1.DaemonSetStatus{ + CurrentNumberScheduled: 3, + DesiredNumberScheduled: 5, + NumberAvailable: 2, + NumberMisscheduled: 2, + NumberReady: 1, + NumberUnavailable: 1, + UpdatedNumberScheduled: 2, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(11221), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("daemon1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11221, + Namespace: "ns1", + Name: "daemon1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: time.Now()}, }, - Spec: &v1.DaemonSetSpec{ + Spec: v1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "select1": "s1", @@ -284,7 +284,7 @@ func TestDaemonSetSelectorFilter(t *testing.T) { ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { - err := ks.gatherDaemonSet(*dset, acc) + err := ks.gatherDaemonSet(dset, acc) if err != nil { t.Errorf("Failed to gather daemonset - %s", err.Error()) } diff --git a/plugins/inputs/kube_inventory/deployment.go b/plugins/inputs/kube_inventory/deployment.go index b91216765e9a6..613f9dff82b72 100644 --- a/plugins/inputs/kube_inventory/deployment.go +++ b/plugins/inputs/kube_inventory/deployment.go @@ -2,10 +2,9 @@ package kube_inventory import ( "context" - "time" - v1 "github.com/ericchiang/k8s/apis/apps/v1" "github.com/influxdata/telegraf" + v1 "k8s.io/api/apps/v1" ) func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { @@ -15,7 +14,7 @@ func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *Kuber return } for _, d := range list.Items { - if err = ki.gatherDeployment(*d, acc); err != nil { + if err = ki.gatherDeployment(d, acc); err != nil { acc.AddError(err) return } @@ -24,15 +23,15 @@ func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *Kuber func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) error { fields := map[string]interface{}{ - "replicas_available": d.Status.GetAvailableReplicas(), - "replicas_unavailable": d.Status.GetUnavailableReplicas(), - "created": time.Unix(d.Metadata.CreationTimestamp.GetSeconds(), int64(d.Metadata.CreationTimestamp.GetNanos())).UnixNano(), + "replicas_available": d.Status.AvailableReplicas, + "replicas_unavailable": d.Status.UnavailableReplicas, + "created": d.GetCreationTimestamp().UnixNano(), } tags := map[string]string{ - "deployment_name": d.Metadata.GetName(), - "namespace": d.Metadata.GetNamespace(), + "deployment_name": d.Name, + "namespace": d.Namespace, } - for key, val := range d.GetSpec().GetSelector().GetMatchLabels() { + for key, val := range d.Spec.Selector.MatchLabels { if ki.selectorFilter.Match(key) { tags["selector_"+key] = val } diff --git a/plugins/inputs/kube_inventory/deployment_test.go b/plugins/inputs/kube_inventory/deployment_test.go index 9407c84d91322..bb5e9101eb42c 100644 --- a/plugins/inputs/kube_inventory/deployment_test.go +++ b/plugins/inputs/kube_inventory/deployment_test.go @@ -6,9 +6,10 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/apps/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" - "github.com/ericchiang/k8s/util/intstr" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "github.com/influxdata/telegraf/testutil" ) @@ -52,23 +53,23 @@ func TestDeployment(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/deployments/": &v1.DeploymentList{ - Items: []*v1.Deployment{ + Items: []v1.Deployment{ { - Status: &v1.DeploymentStatus{ - Replicas: toInt32Ptr(3), - AvailableReplicas: toInt32Ptr(1), - UnavailableReplicas: toInt32Ptr(4), - UpdatedReplicas: toInt32Ptr(2), - ObservedGeneration: toInt64Ptr(9121), + Status: v1.DeploymentStatus{ + Replicas: 3, + AvailableReplicas: 1, + UnavailableReplicas: 4, + UpdatedReplicas: 2, + ObservedGeneration: 9121, }, - Spec: &v1.DeploymentSpec{ - Strategy: &v1.DeploymentStrategy{ + Spec: v1.DeploymentSpec{ + Strategy: v1.DeploymentStrategy{ RollingUpdate: &v1.RollingUpdateDeployment{ MaxUnavailable: &intstr.IntOrString{ - IntVal: toInt32Ptr(30), + IntVal: 30, }, MaxSurge: &intstr.IntOrString{ - IntVal: toInt32Ptr(20), + IntVal: 20, }, }, }, @@ -80,15 +81,15 @@ func TestDeployment(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(11221), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("deploy1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11221, + Namespace: "ns1", + Name: "deploy1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -113,7 +114,7 @@ func TestDeployment(t *testing.T) { ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { - err := ks.gatherDeployment(*deployment, acc) + err := ks.gatherDeployment(deployment, acc) if err != nil { t.Errorf("Failed to gather deployment - %s", err.Error()) } @@ -151,23 +152,23 @@ func TestDeploymentSelectorFilter(t *testing.T) { responseMap := map[string]interface{}{ "/deployments/": &v1.DeploymentList{ - Items: []*v1.Deployment{ + Items: []v1.Deployment{ { - Status: &v1.DeploymentStatus{ - Replicas: toInt32Ptr(3), - AvailableReplicas: toInt32Ptr(1), - UnavailableReplicas: toInt32Ptr(4), - UpdatedReplicas: toInt32Ptr(2), - ObservedGeneration: toInt64Ptr(9121), + Status: v1.DeploymentStatus{ + Replicas: 3, + AvailableReplicas: 1, + UnavailableReplicas: 4, + UpdatedReplicas: 2, + ObservedGeneration: 9121, }, - Spec: &v1.DeploymentSpec{ - Strategy: &v1.DeploymentStrategy{ + Spec: v1.DeploymentSpec{ + Strategy: v1.DeploymentStrategy{ RollingUpdate: &v1.RollingUpdateDeployment{ MaxUnavailable: &intstr.IntOrString{ - IntVal: toInt32Ptr(30), + IntVal: 30, }, MaxSurge: &intstr.IntOrString{ - IntVal: toInt32Ptr(20), + IntVal: 20, }, }, }, @@ -179,15 +180,15 @@ func TestDeploymentSelectorFilter(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(11221), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("deploy1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11221, + Namespace: "ns1", + Name: "deploy1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -298,7 +299,7 @@ func TestDeploymentSelectorFilter(t *testing.T) { ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { - err := ks.gatherDeployment(*deployment, acc) + err := ks.gatherDeployment(deployment, acc) if err != nil { t.Errorf("Failed to gather deployment - %s", err.Error()) } diff --git a/plugins/inputs/kube_inventory/endpoint.go b/plugins/inputs/kube_inventory/endpoint.go index 7298789da8e08..4b3cffa59fad3 100644 --- a/plugins/inputs/kube_inventory/endpoint.go +++ b/plugins/inputs/kube_inventory/endpoint.go @@ -3,11 +3,9 @@ package kube_inventory import ( "context" "strings" - "time" - - "github.com/ericchiang/k8s/apis/core/v1" "github.com/influxdata/telegraf" + corev1 "k8s.io/api/core/v1" ) func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { @@ -17,66 +15,61 @@ func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *Kuberne return } for _, i := range list.Items { - if err = ki.gatherEndpoint(*i, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherEndpoint(i, acc) } } -func (ki *KubernetesInventory) gatherEndpoint(e v1.Endpoints, acc telegraf.Accumulator) error { - if e.Metadata.CreationTimestamp.GetSeconds() == 0 && e.Metadata.CreationTimestamp.GetNanos() == 0 { - return nil +func (ki *KubernetesInventory) gatherEndpoint(e corev1.Endpoints, acc telegraf.Accumulator) { + if e.GetCreationTimestamp().Second() == 0 && e.GetCreationTimestamp().Nanosecond() == 0 { + return } fields := map[string]interface{}{ - "created": time.Unix(e.Metadata.CreationTimestamp.GetSeconds(), int64(e.Metadata.CreationTimestamp.GetNanos())).UnixNano(), - "generation": e.Metadata.GetGeneration(), + "created": e.GetCreationTimestamp().UnixNano(), + "generation": e.Generation, } tags := map[string]string{ - "endpoint_name": e.Metadata.GetName(), - "namespace": e.Metadata.GetNamespace(), + "endpoint_name": e.Name, + "namespace": e.Namespace, } - for _, endpoint := range e.GetSubsets() { - for _, readyAddr := range endpoint.GetAddresses() { + for _, endpoint := range e.Subsets { + for _, readyAddr := range endpoint.Addresses { fields["ready"] = true - tags["hostname"] = readyAddr.GetHostname() - tags["node_name"] = readyAddr.GetNodeName() + tags["hostname"] = readyAddr.Hostname + tags["node_name"] = *readyAddr.NodeName if readyAddr.TargetRef != nil { - tags[strings.ToLower(readyAddr.GetTargetRef().GetKind())] = readyAddr.GetTargetRef().GetName() + tags[strings.ToLower(readyAddr.TargetRef.Kind)] = readyAddr.TargetRef.Name } - for _, port := range endpoint.GetPorts() { - fields["port"] = port.GetPort() + for _, port := range endpoint.Ports { + fields["port"] = port.Port - tags["port_name"] = port.GetName() - tags["port_protocol"] = port.GetProtocol() + tags["port_name"] = port.Name + tags["port_protocol"] = string(port.Protocol) acc.AddFields(endpointMeasurement, fields, tags) } } - for _, notReadyAddr := range endpoint.GetNotReadyAddresses() { + for _, notReadyAddr := range endpoint.NotReadyAddresses { fields["ready"] = false - tags["hostname"] = notReadyAddr.GetHostname() - tags["node_name"] = notReadyAddr.GetNodeName() + tags["hostname"] = notReadyAddr.Hostname + tags["node_name"] = *notReadyAddr.NodeName if notReadyAddr.TargetRef != nil { - tags[strings.ToLower(notReadyAddr.GetTargetRef().GetKind())] = notReadyAddr.GetTargetRef().GetName() + tags[strings.ToLower(notReadyAddr.TargetRef.Kind)] = notReadyAddr.TargetRef.Name } - for _, port := range endpoint.GetPorts() { - fields["port"] = port.GetPort() + for _, port := range endpoint.Ports { + fields["port"] = port.Port - tags["port_name"] = port.GetName() - tags["port_protocol"] = port.GetProtocol() + tags["port_name"] = port.Name + tags["port_protocol"] = string(port.Protocol) acc.AddFields(endpointMeasurement, fields, tags) } } } - - return nil } diff --git a/plugins/inputs/kube_inventory/endpoint_test.go b/plugins/inputs/kube_inventory/endpoint_test.go index b88c388162bd2..0e3203912c1f1 100644 --- a/plugins/inputs/kube_inventory/endpoint_test.go +++ b/plugins/inputs/kube_inventory/endpoint_test.go @@ -4,9 +4,9 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" "github.com/influxdata/telegraf/testutil" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestEndpoint(t *testing.T) { @@ -35,34 +35,34 @@ func TestEndpoint(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/endpoints/": &v1.EndpointsList{ - Items: []*v1.Endpoints{ + Items: []v1.Endpoints{ { - Subsets: []*v1.EndpointSubset{ + Subsets: []v1.EndpointSubset{ { - Addresses: []*v1.EndpointAddress{ + Addresses: []v1.EndpointAddress{ { - Hostname: toStrPtr("storage-6"), + Hostname: "storage-6", NodeName: toStrPtr("b.storage.internal"), TargetRef: &v1.ObjectReference{ - Kind: toStrPtr("pod"), - Name: toStrPtr("storage-6"), + Kind: "pod", + Name: "storage-6", }, }, }, - Ports: []*v1.EndpointPort{ + Ports: []v1.EndpointPort{ { - Name: toStrPtr("server"), - Protocol: toStrPtr("TCP"), - Port: toInt32Ptr(8080), + Name: "server", + Protocol: "TCP", + Port: 8080, }, }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("storage"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "storage", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -97,34 +97,34 @@ func TestEndpoint(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/endpoints/": &v1.EndpointsList{ - Items: []*v1.Endpoints{ + Items: []v1.Endpoints{ { - Subsets: []*v1.EndpointSubset{ + Subsets: []v1.EndpointSubset{ { - NotReadyAddresses: []*v1.EndpointAddress{ + NotReadyAddresses: []v1.EndpointAddress{ { - Hostname: toStrPtr("storage-6"), + Hostname: "storage-6", NodeName: toStrPtr("b.storage.internal"), TargetRef: &v1.ObjectReference{ - Kind: toStrPtr("pod"), - Name: toStrPtr("storage-6"), + Kind: "pod", + Name: "storage-6", }, }, }, - Ports: []*v1.EndpointPort{ + Ports: []v1.EndpointPort{ { - Name: toStrPtr("server"), - Protocol: toStrPtr("TCP"), - Port: toInt32Ptr(8080), + Name: "server", + Protocol: "TCP", + Port: 8080, }, }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("storage"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "storage", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -162,10 +162,7 @@ func TestEndpoint(t *testing.T) { } acc := new(testutil.Accumulator) for _, endpoint := range ((v.handler.responseMap["/endpoints/"]).(*v1.EndpointsList)).Items { - err := ks.gatherEndpoint(*endpoint, acc) - if err != nil { - t.Errorf("Failed to gather endpoint - %s", err.Error()) - } + ks.gatherEndpoint(endpoint, acc) } err := acc.FirstError() diff --git a/plugins/inputs/kube_inventory/ingress.go b/plugins/inputs/kube_inventory/ingress.go index 6d5c8019927cf..69765b4dd3257 100644 --- a/plugins/inputs/kube_inventory/ingress.go +++ b/plugins/inputs/kube_inventory/ingress.go @@ -2,9 +2,8 @@ package kube_inventory import ( "context" - "time" - v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" + netv1 "k8s.io/api/networking/v1" "github.com/influxdata/telegraf" ) @@ -16,45 +15,40 @@ func collectIngress(ctx context.Context, acc telegraf.Accumulator, ki *Kubernete return } for _, i := range list.Items { - if err = ki.gatherIngress(*i, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherIngress(i, acc) } } -func (ki *KubernetesInventory) gatherIngress(i v1beta1EXT.Ingress, acc telegraf.Accumulator) error { - if i.Metadata.CreationTimestamp.GetSeconds() == 0 && i.Metadata.CreationTimestamp.GetNanos() == 0 { - return nil +func (ki *KubernetesInventory) gatherIngress(i netv1.Ingress, acc telegraf.Accumulator) { + if i.GetCreationTimestamp().Second() == 0 && i.GetCreationTimestamp().Nanosecond() == 0 { + return } fields := map[string]interface{}{ - "created": time.Unix(i.Metadata.CreationTimestamp.GetSeconds(), int64(i.Metadata.CreationTimestamp.GetNanos())).UnixNano(), - "generation": i.Metadata.GetGeneration(), + "created": i.GetCreationTimestamp().UnixNano(), + "generation": i.Generation, } tags := map[string]string{ - "ingress_name": i.Metadata.GetName(), - "namespace": i.Metadata.GetNamespace(), + "ingress_name": i.Name, + "namespace": i.Namespace, } - for _, ingress := range i.GetStatus().GetLoadBalancer().GetIngress() { - tags["hostname"] = ingress.GetHostname() - tags["ip"] = ingress.GetIp() + for _, ingress := range i.Status.LoadBalancer.Ingress { + tags["hostname"] = ingress.Hostname + tags["ip"] = ingress.IP - for _, rule := range i.GetSpec().GetRules() { - for _, path := range rule.GetIngressRuleValue().GetHttp().GetPaths() { - fields["backend_service_port"] = path.GetBackend().GetServicePort().GetIntVal() - fields["tls"] = i.GetSpec().GetTls() != nil + for _, rule := range i.Spec.Rules { + for _, path := range rule.IngressRuleValue.HTTP.Paths { + fields["backend_service_port"] = path.Backend.Service.Port.Number + fields["tls"] = i.Spec.TLS != nil - tags["backend_service_name"] = path.GetBackend().GetServiceName() - tags["path"] = path.GetPath() - tags["host"] = rule.GetHost() + tags["backend_service_name"] = path.Backend.Service.Name + tags["path"] = path.Path + tags["host"] = rule.Host acc.AddFields(ingressMeasurement, fields, tags) } } } - - return nil } diff --git a/plugins/inputs/kube_inventory/ingress_test.go b/plugins/inputs/kube_inventory/ingress_test.go index 2d111801a96f3..0d8fefcd93144 100644 --- a/plugins/inputs/kube_inventory/ingress_test.go +++ b/plugins/inputs/kube_inventory/ingress_test.go @@ -4,10 +4,10 @@ import ( "testing" "time" - v1 "github.com/ericchiang/k8s/apis/core/v1" - v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" "github.com/influxdata/telegraf/testutil" + v1 "k8s.io/api/core/v1" + netv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestIngress(t *testing.T) { @@ -26,7 +26,7 @@ func TestIngress(t *testing.T) { name: "no ingress", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/ingress/": &v1beta1EXT.IngressList{}, + "/ingress/": netv1.IngressList{}, }, }, hasError: false, @@ -35,31 +35,35 @@ func TestIngress(t *testing.T) { name: "collect ingress", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/ingress/": &v1beta1EXT.IngressList{ - Items: []*v1beta1EXT.Ingress{ + "/ingress/": netv1.IngressList{ + Items: []netv1.Ingress{ { - Status: &v1beta1EXT.IngressStatus{ - LoadBalancer: &v1.LoadBalancerStatus{ - Ingress: []*v1.LoadBalancerIngress{ + Status: netv1.IngressStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ { - Hostname: toStrPtr("chron-1"), - Ip: toStrPtr("1.0.0.127"), + Hostname: "chron-1", + IP: "1.0.0.127", }, }, }, }, - Spec: &v1beta1EXT.IngressSpec{ - Rules: []*v1beta1EXT.IngressRule{ + Spec: netv1.IngressSpec{ + Rules: []netv1.IngressRule{ { - Host: toStrPtr("ui.internal"), - IngressRuleValue: &v1beta1EXT.IngressRuleValue{ - Http: &v1beta1EXT.HTTPIngressRuleValue{ - Paths: []*v1beta1EXT.HTTPIngressPath{ + Host: "ui.internal", + IngressRuleValue: netv1.IngressRuleValue{ + HTTP: &netv1.HTTPIngressRuleValue{ + Paths: []netv1.HTTPIngressPath{ { - Path: toStrPtr("/"), - Backend: &v1beta1EXT.IngressBackend{ - ServiceName: toStrPtr("chronografd"), - ServicePort: toIntStrPtrI(8080), + Path: "/", + Backend: netv1.IngressBackend{ + Service: &netv1.IngressServiceBackend{ + Name: "chronografd", + Port: netv1.ServiceBackendPort{ + Number: 8080, + }, + }, }, }, }, @@ -68,11 +72,11 @@ func TestIngress(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("ui-lb"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "ui-lb", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -109,11 +113,8 @@ func TestIngress(t *testing.T) { client: cli, } acc := new(testutil.Accumulator) - for _, ingress := range ((v.handler.responseMap["/ingress/"]).(*v1beta1EXT.IngressList)).Items { - err := ks.gatherIngress(*ingress, acc) - if err != nil { - t.Errorf("Failed to gather ingress - %s", err.Error()) - } + for _, ingress := range ((v.handler.responseMap["/ingress/"]).(netv1.IngressList)).Items { + ks.gatherIngress(ingress, acc) } err := acc.FirstError() diff --git a/plugins/inputs/kube_inventory/node.go b/plugins/inputs/kube_inventory/node.go index cb123c458c592..16e8d5b2a017f 100644 --- a/plugins/inputs/kube_inventory/node.go +++ b/plugins/inputs/kube_inventory/node.go @@ -3,7 +3,7 @@ package kube_inventory import ( "context" - "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -15,44 +15,39 @@ func collectNodes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesI return } for _, n := range list.Items { - if err = ki.gatherNode(*n, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherNode(n, acc) } } -func (ki *KubernetesInventory) gatherNode(n v1.Node, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherNode(n corev1.Node, acc telegraf.Accumulator) { fields := map[string]interface{}{} tags := map[string]string{ - "node_name": *n.Metadata.Name, + "node_name": n.Name, } for resourceName, val := range n.Status.Capacity { switch resourceName { case "cpu": - fields["capacity_cpu_cores"] = convertQuantity(val.GetString_(), 1) - fields["capacity_millicpu_cores"] = convertQuantity(val.GetString_(), 1000) + fields["capacity_cpu_cores"] = convertQuantity(string(val.Format), 1) + fields["capacity_millicpu_cores"] = convertQuantity(string(val.Format), 1000) case "memory": - fields["capacity_memory_bytes"] = convertQuantity(val.GetString_(), 1) + fields["capacity_memory_bytes"] = convertQuantity(string(val.Format), 1) case "pods": - fields["capacity_pods"] = atoi(val.GetString_()) + fields["capacity_pods"] = atoi(string(val.Format)) } } for resourceName, val := range n.Status.Allocatable { switch resourceName { case "cpu": - fields["allocatable_cpu_cores"] = convertQuantity(val.GetString_(), 1) - fields["allocatable_millicpu_cores"] = convertQuantity(val.GetString_(), 1000) + fields["allocatable_cpu_cores"] = convertQuantity(string(val.Format), 1) + fields["allocatable_millicpu_cores"] = convertQuantity(string(val.Format), 1000) case "memory": - fields["allocatable_memory_bytes"] = convertQuantity(val.GetString_(), 1) + fields["allocatable_memory_bytes"] = convertQuantity(string(val.Format), 1) case "pods": - fields["allocatable_pods"] = atoi(val.GetString_()) + fields["allocatable_pods"] = atoi(string(val.Format)) } } acc.AddFields(nodeMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/node_test.go b/plugins/inputs/kube_inventory/node_test.go index 68cf463b07e43..d2bf07aeb3c65 100644 --- a/plugins/inputs/kube_inventory/node_test.go +++ b/plugins/inputs/kube_inventory/node_test.go @@ -4,9 +4,9 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" - "github.com/ericchiang/k8s/apis/resource" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/influxdata/telegraf/testutil" ) @@ -26,7 +26,7 @@ func TestNode(t *testing.T) { name: "no nodes", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/nodes/": &v1.NodeList{}, + "/nodes/": corev1.NodeList{}, }, }, hasError: false, @@ -35,63 +35,63 @@ func TestNode(t *testing.T) { name: "collect nodes", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/nodes/": &v1.NodeList{ - Items: []*v1.Node{ + "/nodes/": corev1.NodeList{ + Items: []corev1.Node{ { - Status: &v1.NodeStatus{ - NodeInfo: &v1.NodeSystemInfo{ - KernelVersion: toStrPtr("4.14.48-coreos-r2"), - OsImage: toStrPtr("Container Linux by CoreOS 1745.7.0 (Rhyolite)"), - ContainerRuntimeVersion: toStrPtr("docker://18.3.1"), - KubeletVersion: toStrPtr("v1.10.3"), - KubeProxyVersion: toStrPtr("v1.10.3"), + Status: corev1.NodeStatus{ + NodeInfo: corev1.NodeSystemInfo{ + KernelVersion: "4.14.48-coreos-r2", + OSImage: "Container Linux by CoreOS 1745.7.0 (Rhyolite)", + ContainerRuntimeVersion: "docker://18.3.1", + KubeletVersion: "v1.10.3", + KubeProxyVersion: "v1.10.3", }, - Phase: toStrPtr("Running"), - Capacity: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("16")}, - "ephemeral_storage_bytes": {String_: toStrPtr("49536401408")}, - "hugepages_1Gi_bytes": {String_: toStrPtr("0")}, - "hugepages_2Mi_bytes": {String_: toStrPtr("0")}, - "memory": {String_: toStrPtr("125817904Ki")}, - "pods": {String_: toStrPtr("110")}, + Phase: "Running", + Capacity: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "16"}, + "ephemeral_storage_bytes": resource.Quantity{Format: "49536401408"}, + "hugepages_1Gi_bytes": resource.Quantity{Format: "0"}, + "hugepages_2Mi_bytes": resource.Quantity{Format: "0"}, + "memory": resource.Quantity{Format: "125817904Ki"}, + "pods": resource.Quantity{Format: "110"}, }, - Allocatable: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("1000m")}, - "ephemeral_storage_bytes": {String_: toStrPtr("44582761194")}, - "hugepages_1Gi_bytes": {String_: toStrPtr("0")}, - "hugepages_2Mi_bytes": {String_: toStrPtr("0")}, - "memory": {String_: toStrPtr("125715504Ki")}, - "pods": {String_: toStrPtr("110")}, + Allocatable: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "1000m"}, + "ephemeral_storage_bytes": resource.Quantity{Format: "44582761194"}, + "hugepages_1Gi_bytes": resource.Quantity{Format: "0"}, + "hugepages_2Mi_bytes": resource.Quantity{Format: "0"}, + "memory": resource.Quantity{Format: "125715504Ki"}, + "pods": resource.Quantity{Format: "110"}, }, - Conditions: []*v1.NodeCondition{ - {Type: toStrPtr("Ready"), Status: toStrPtr("true"), LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}}, - {Type: toStrPtr("OutOfDisk"), Status: toStrPtr("false"), LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}}, + Conditions: []corev1.NodeCondition{ + {Type: "Ready", Status: "true", LastTransitionTime: metav1.Time{Time: now}}, + {Type: "OutOfDisk", Status: "false", LastTransitionTime: metav1.Time{Time: created}}, }, }, - Spec: &v1.NodeSpec{ - ProviderID: toStrPtr("aws:///us-east-1c/i-0c00"), - Taints: []*v1.Taint{ + Spec: corev1.NodeSpec{ + ProviderID: "aws:///us-east-1c/i-0c00", + Taints: []corev1.Taint{ { - Key: toStrPtr("k1"), - Value: toStrPtr("v1"), - Effect: toStrPtr("NoExecute"), + Key: "k1", + Value: "v1", + Effect: "NoExecute", }, { - Key: toStrPtr("k2"), - Value: toStrPtr("v2"), - Effect: toStrPtr("NoSchedule"), + Key: "k2", + Value: "v2", + Effect: "NoSchedule", }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(int64(11232)), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("node1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11232, + Namespace: "ns1", + Name: "node1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -127,11 +127,8 @@ func TestNode(t *testing.T) { client: cli, } acc := new(testutil.Accumulator) - for _, node := range ((v.handler.responseMap["/nodes/"]).(*v1.NodeList)).Items { - err := ks.gatherNode(*node, acc) - if err != nil { - t.Errorf("Failed to gather node - %s", err.Error()) - } + for _, node := range ((v.handler.responseMap["/nodes/"]).(corev1.NodeList)).Items { + ks.gatherNode(node, acc) } err := acc.FirstError() diff --git a/plugins/inputs/kube_inventory/persistentvolume.go b/plugins/inputs/kube_inventory/persistentvolume.go index 05600522b7ea8..4199dfed9e4c3 100644 --- a/plugins/inputs/kube_inventory/persistentvolume.go +++ b/plugins/inputs/kube_inventory/persistentvolume.go @@ -4,7 +4,7 @@ import ( "context" "strings" - "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -16,16 +16,13 @@ func collectPersistentVolumes(ctx context.Context, acc telegraf.Accumulator, ki return } for _, pv := range list.Items { - if err = ki.gatherPersistentVolume(*pv, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherPersistentVolume(pv, acc) } } -func (ki *KubernetesInventory) gatherPersistentVolume(pv v1.PersistentVolume, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherPersistentVolume(pv corev1.PersistentVolume, acc telegraf.Accumulator) { phaseType := 5 - switch strings.ToLower(pv.Status.GetPhase()) { + switch strings.ToLower(string(pv.Status.Phase)) { case "bound": phaseType = 0 case "failed": @@ -41,12 +38,10 @@ func (ki *KubernetesInventory) gatherPersistentVolume(pv v1.PersistentVolume, ac "phase_type": phaseType, } tags := map[string]string{ - "pv_name": pv.Metadata.GetName(), - "phase": pv.Status.GetPhase(), - "storageclass": pv.Spec.GetStorageClassName(), + "pv_name": pv.Name, + "phase": string(pv.Status.Phase), + "storageclass": pv.Spec.StorageClassName, } acc.AddFields(persistentVolumeMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/persistentvolume_test.go b/plugins/inputs/kube_inventory/persistentvolume_test.go index a5d20d047331a..80e68605a60a1 100644 --- a/plugins/inputs/kube_inventory/persistentvolume_test.go +++ b/plugins/inputs/kube_inventory/persistentvolume_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/influxdata/telegraf/testutil" ) @@ -25,7 +25,7 @@ func TestPersistentVolume(t *testing.T) { name: "no pv", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/persistentvolumes/": &v1.PersistentVolumeList{}, + "/persistentvolumes/": &corev1.PersistentVolumeList{}, }, }, hasError: false, @@ -34,22 +34,22 @@ func TestPersistentVolume(t *testing.T) { name: "collect pvs", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/persistentvolumes/": &v1.PersistentVolumeList{ - Items: []*v1.PersistentVolume{ + "/persistentvolumes/": &corev1.PersistentVolumeList{ + Items: []corev1.PersistentVolume{ { - Status: &v1.PersistentVolumeStatus{ - Phase: toStrPtr("pending"), + Status: corev1.PersistentVolumeStatus{ + Phase: "pending", }, - Spec: &v1.PersistentVolumeSpec{ - StorageClassName: toStrPtr("ebs-1"), + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "ebs-1", }, - Metadata: &metav1.ObjectMeta{ - Name: toStrPtr("pv1"), + ObjectMeta: metav1.ObjectMeta{ + Name: "pv1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -79,11 +79,8 @@ func TestPersistentVolume(t *testing.T) { client: cli, } acc := new(testutil.Accumulator) - for _, pv := range ((v.handler.responseMap["/persistentvolumes/"]).(*v1.PersistentVolumeList)).Items { - err := ks.gatherPersistentVolume(*pv, acc) - if err != nil { - t.Errorf("Failed to gather pv - %s", err.Error()) - } + for _, pv := range ((v.handler.responseMap["/persistentvolumes/"]).(*corev1.PersistentVolumeList)).Items { + ks.gatherPersistentVolume(pv, acc) } err := acc.FirstError() diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim.go b/plugins/inputs/kube_inventory/persistentvolumeclaim.go index ac8c9f85a931c..10a6abbf72e39 100644 --- a/plugins/inputs/kube_inventory/persistentvolumeclaim.go +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim.go @@ -4,7 +4,7 @@ import ( "context" "strings" - "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -16,16 +16,13 @@ func collectPersistentVolumeClaims(ctx context.Context, acc telegraf.Accumulator return } for _, pvc := range list.Items { - if err = ki.gatherPersistentVolumeClaim(*pvc, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherPersistentVolumeClaim(pvc, acc) } } -func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc v1.PersistentVolumeClaim, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc corev1.PersistentVolumeClaim, acc telegraf.Accumulator) { phaseType := 3 - switch strings.ToLower(pvc.Status.GetPhase()) { + switch strings.ToLower(string(pvc.Status.Phase)) { case "bound": phaseType = 0 case "lost": @@ -37,18 +34,16 @@ func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc v1.PersistentVolu "phase_type": phaseType, } tags := map[string]string{ - "pvc_name": pvc.Metadata.GetName(), - "namespace": pvc.Metadata.GetNamespace(), - "phase": pvc.Status.GetPhase(), - "storageclass": pvc.Spec.GetStorageClassName(), + "pvc_name": pvc.Name, + "namespace": pvc.Namespace, + "phase": string(pvc.Status.Phase), + "storageclass": *pvc.Spec.StorageClassName, } - for key, val := range pvc.GetSpec().GetSelector().GetMatchLabels() { + for key, val := range pvc.Spec.Selector.MatchLabels { if ki.selectorFilter.Match(key) { tags["selector_"+key] = val } } acc.AddFields(persistentVolumeClaimMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go index 5155a5d3ba698..42aec57a76368 100644 --- a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go @@ -6,8 +6,8 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/influxdata/telegraf/testutil" ) @@ -29,7 +29,7 @@ func TestPersistentVolumeClaim(t *testing.T) { name: "no pv claims", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{}, + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{}, }, }, hasError: false, @@ -38,14 +38,14 @@ func TestPersistentVolumeClaim(t *testing.T) { name: "collect pv claims", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{ - Items: []*v1.PersistentVolumeClaim{ + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{ + Items: []corev1.PersistentVolumeClaim{ { - Status: &v1.PersistentVolumeClaimStatus{ - Phase: toStrPtr("bound"), + Status: corev1.PersistentVolumeClaimStatus{ + Phase: "bound", }, - Spec: &v1.PersistentVolumeClaimSpec{ - VolumeName: toStrPtr("pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8"), + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8", StorageClassName: toStrPtr("ebs-1"), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -54,14 +54,14 @@ func TestPersistentVolumeClaim(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Namespace: toStrPtr("ns1"), - Name: toStrPtr("pc1"), + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "pc1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -97,11 +97,8 @@ func TestPersistentVolumeClaim(t *testing.T) { } ks.createSelectorFilters() acc := new(testutil.Accumulator) - for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*v1.PersistentVolumeClaimList)).Items { - err := ks.gatherPersistentVolumeClaim(*pvc, acc) - if err != nil { - t.Errorf("Failed to gather pvc - %s", err.Error()) - } + for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*corev1.PersistentVolumeClaimList)).Items { + ks.gatherPersistentVolumeClaim(pvc, acc) } err := acc.FirstError() @@ -135,14 +132,14 @@ func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) responseMap := map[string]interface{}{ - "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{ - Items: []*v1.PersistentVolumeClaim{ + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{ + Items: []corev1.PersistentVolumeClaim{ { - Status: &v1.PersistentVolumeClaimStatus{ - Phase: toStrPtr("bound"), + Status: corev1.PersistentVolumeClaimStatus{ + Phase: "bound", }, - Spec: &v1.PersistentVolumeClaimSpec{ - VolumeName: toStrPtr("pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8"), + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8", StorageClassName: toStrPtr("ebs-1"), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -151,14 +148,14 @@ func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Namespace: toStrPtr("ns1"), - Name: toStrPtr("pc1"), + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "pc1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -268,11 +265,8 @@ func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { ks.SelectorExclude = v.exclude ks.createSelectorFilters() acc := new(testutil.Accumulator) - for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*v1.PersistentVolumeClaimList)).Items { - err := ks.gatherPersistentVolumeClaim(*pvc, acc) - if err != nil { - t.Errorf("Failed to gather pvc - %s", err.Error()) - } + for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*corev1.PersistentVolumeClaimList)).Items { + ks.gatherPersistentVolumeClaim(pvc, acc) } // Grab selector tags diff --git a/plugins/inputs/kube_inventory/pod.go b/plugins/inputs/kube_inventory/pod.go index c75f133ba1ffe..bbdfb3a699b5e 100644 --- a/plugins/inputs/kube_inventory/pod.go +++ b/plugins/inputs/kube_inventory/pod.go @@ -3,7 +3,7 @@ package kube_inventory import ( "context" - v1 "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -15,62 +15,55 @@ func collectPods(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesIn return } for _, p := range list.Items { - if err = ki.gatherPod(*p, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherPod(p, acc) } } -func (ki *KubernetesInventory) gatherPod(p v1.Pod, acc telegraf.Accumulator) error { - if p.Metadata.CreationTimestamp.GetSeconds() == 0 && p.Metadata.CreationTimestamp.GetNanos() == 0 { - return nil +func (ki *KubernetesInventory) gatherPod(p corev1.Pod, acc telegraf.Accumulator) { + if p.GetCreationTimestamp().Second() == 0 && p.GetCreationTimestamp().Nanosecond() == 0 { + return } - containerList := map[string]*v1.ContainerStatus{} - for _, v := range p.Status.ContainerStatuses { - containerList[*v.Name] = v + containerList := map[string]*corev1.ContainerStatus{} + for i := range p.Status.ContainerStatuses { + containerList[p.Status.ContainerStatuses[i].Name] = &p.Status.ContainerStatuses[i] } for _, c := range p.Spec.Containers { - cs, ok := containerList[*c.Name] + cs, ok := containerList[c.Name] if !ok { - cs = &v1.ContainerStatus{} + cs = &corev1.ContainerStatus{} } - gatherPodContainer(*p.Spec.NodeName, ki, p, *cs, *c, acc) + gatherPodContainer(ki, p, *cs, c, acc) } - - return nil } -func gatherPodContainer(nodeName string, ki *KubernetesInventory, p v1.Pod, cs v1.ContainerStatus, c v1.Container, acc telegraf.Accumulator) { +func gatherPodContainer(ki *KubernetesInventory, p corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) { stateCode := 3 stateReason := "" state := "unknown" readiness := "unready" - if cs.State != nil { - switch { - case cs.State.Running != nil: - stateCode = 0 - state = "running" - case cs.State.Terminated != nil: - stateCode = 1 - state = "terminated" - stateReason = cs.State.Terminated.GetReason() - case cs.State.Waiting != nil: - stateCode = 2 - state = "waiting" - stateReason = cs.State.Waiting.GetReason() - } + switch { + case cs.State.Running != nil: + stateCode = 0 + state = "running" + case cs.State.Terminated != nil: + stateCode = 1 + state = "terminated" + stateReason = cs.State.Terminated.Reason + case cs.State.Waiting != nil: + stateCode = 2 + state = "waiting" + stateReason = cs.State.Waiting.Reason } - if cs.GetReady() { + if cs.Ready { readiness = "ready" } fields := map[string]interface{}{ - "restarts_total": cs.GetRestartCount(), + "restarts_total": cs.RestartCount, "state_code": stateCode, } @@ -83,21 +76,21 @@ func gatherPodContainer(nodeName string, ki *KubernetesInventory, p v1.Pod, cs v fields["state_reason"] = stateReason } - phaseReason := p.Status.GetReason() + phaseReason := p.Status.Reason if phaseReason != "" { fields["phase_reason"] = phaseReason } tags := map[string]string{ - "container_name": *c.Name, - "namespace": *p.Metadata.Namespace, - "node_name": *p.Spec.NodeName, - "pod_name": *p.Metadata.Name, - "phase": *p.Status.Phase, + "container_name": c.Name, + "namespace": p.Namespace, + "node_name": p.Spec.NodeName, + "pod_name": p.Name, + "phase": string(p.Status.Phase), "state": state, "readiness": readiness, } - for key, val := range p.GetSpec().GetNodeSelector() { + for key, val := range p.Spec.NodeSelector { if ki.selectorFilter.Match(key) { tags["node_selector_"+key] = val } @@ -109,17 +102,17 @@ func gatherPodContainer(nodeName string, ki *KubernetesInventory, p v1.Pod, cs v for resourceName, val := range req { switch resourceName { case "cpu": - fields["resource_requests_millicpu_units"] = convertQuantity(val.GetString_(), 1000) + fields["resource_requests_millicpu_units"] = convertQuantity(string(val.Format), 1000) case "memory": - fields["resource_requests_memory_bytes"] = convertQuantity(val.GetString_(), 1) + fields["resource_requests_memory_bytes"] = convertQuantity(string(val.Format), 1) } } for resourceName, val := range lim { switch resourceName { case "cpu": - fields["resource_limits_millicpu_units"] = convertQuantity(val.GetString_(), 1000) + fields["resource_limits_millicpu_units"] = convertQuantity(string(val.Format), 1000) case "memory": - fields["resource_limits_memory_bytes"] = convertQuantity(val.GetString_(), 1) + fields["resource_limits_memory_bytes"] = convertQuantity(string(val.Format), 1) } } diff --git a/plugins/inputs/kube_inventory/pod_test.go b/plugins/inputs/kube_inventory/pod_test.go index 230fbbef99dab..482331aaff026 100644 --- a/plugins/inputs/kube_inventory/pod_test.go +++ b/plugins/inputs/kube_inventory/pod_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - v1 "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" - "github.com/ericchiang/k8s/apis/resource" "github.com/influxdata/telegraf/testutil" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestPod(t *testing.T) { @@ -32,7 +32,7 @@ func TestPod(t *testing.T) { name: "no pods", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/pods/": &v1.PodList{}, + "/pods/": &corev1.PodList{}, }, }, hasError: false, @@ -41,79 +41,79 @@ func TestPod(t *testing.T) { name: "collect pods", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/pods/": &v1.PodList{ - Items: []*v1.Pod{ + "/pods/": &corev1.PodList{ + Items: []corev1.Pod{ { - Spec: &v1.PodSpec{ - NodeName: toStrPtr("node1"), - Containers: []*v1.Container{ + Spec: corev1.PodSpec{ + NodeName: "node1", + Containers: []corev1.Container{ { - Name: toStrPtr("running"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "running", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "100m"}, }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "100m"}, }, }, }, { - Name: toStrPtr("completed"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "completed", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "100m"}, }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "100m"}, }, }, }, { - Name: toStrPtr("waiting"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "waiting", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "100m"}, }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "100m"}, }, }, }, }, - Volumes: []*v1.Volume{ + Volumes: []corev1.Volume{ { - Name: toStrPtr("vol1"), - VolumeSource: &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: toStrPtr("pc1"), - ReadOnly: toBoolPtr(true), + Name: "vol1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pc1", + ReadOnly: true, }, }, }, { - Name: toStrPtr("vol2"), + Name: "vol2", }, }, NodeSelector: map[string]string{ @@ -121,89 +121,89 @@ func TestPod(t *testing.T) { "select2": "s2", }, }, - Status: &v1.PodStatus{ - Phase: toStrPtr("Running"), - HostIP: toStrPtr("180.12.10.18"), - PodIP: toStrPtr("10.244.2.15"), - StartTime: &metav1.Time{Seconds: toInt64Ptr(started.Unix())}, - Conditions: []*v1.PodCondition{ + Status: corev1.PodStatus{ + Phase: "Running", + HostIP: "180.12.10.18", + PodIP: "10.244.2.15", + StartTime: &metav1.Time{Time: started}, + Conditions: []corev1.PodCondition{ { - Type: toStrPtr("Initialized"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Initialized", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, { - Type: toStrPtr("Ready"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + Type: "Ready", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond2}, }, { - Type: toStrPtr("Scheduled"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Scheduled", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, }, - ContainerStatuses: []*v1.ContainerStatus{ + ContainerStatuses: []corev1.ContainerStatus{ { - Name: toStrPtr("running"), - State: &v1.ContainerState{ - Running: &v1.ContainerStateRunning{ - StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + Name: "running", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Time{Time: started}, }, }, - Ready: toBoolPtr(true), - RestartCount: toInt32Ptr(3), - Image: toStrPtr("image1"), - ImageID: toStrPtr("image_id1"), - ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + Ready: true, + RestartCount: 3, + Image: "image1", + ImageID: "image_id1", + ContainerID: "docker://54abe32d0094479d3d", }, { - Name: toStrPtr("completed"), - State: &v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ - StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, - ExitCode: toInt32Ptr(0), - Reason: toStrPtr("Completed"), + Name: "completed", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + StartedAt: metav1.Time{Time: now}, + ExitCode: 0, + Reason: "Completed", }, }, - Ready: toBoolPtr(false), - RestartCount: toInt32Ptr(3), - Image: toStrPtr("image1"), - ImageID: toStrPtr("image_id1"), - ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + Ready: false, + RestartCount: 3, + Image: "image1", + ImageID: "image_id1", + ContainerID: "docker://54abe32d0094479d3d", }, { - Name: toStrPtr("waiting"), - State: &v1.ContainerState{ - Waiting: &v1.ContainerStateWaiting{ - Reason: toStrPtr("PodUninitialized"), + Name: "waiting", + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "PodUninitialized", }, }, - Ready: toBoolPtr(false), - RestartCount: toInt32Ptr(3), - Image: toStrPtr("image1"), - ImageID: toStrPtr("image_id1"), - ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + Ready: false, + RestartCount: 3, + Image: "image1", + ImageID: "image_id1", + ContainerID: "docker://54abe32d0094479d3d", }, }, }, - Metadata: &metav1.ObjectMeta{ - OwnerReferences: []*metav1.OwnerReference{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ { - ApiVersion: toStrPtr("apps/v1"), - Kind: toStrPtr("DaemonSet"), - Name: toStrPtr("forwarder"), + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: "forwarder", Controller: toBoolPtr(true), }, }, - Generation: toInt64Ptr(11232), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("pod1"), + Generation: 11232, + Namespace: "ns1", + Name: "pod1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + CreationTimestamp: metav1.Time{Time: created}, }, }, }, @@ -283,11 +283,8 @@ func TestPod(t *testing.T) { } ks.createSelectorFilters() acc := new(testutil.Accumulator) - for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items { - err := ks.gatherPod(*pod, acc) - if err != nil { - t.Errorf("Failed to gather pod - %s", err.Error()) - } + for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items { + ks.gatherPod(pod, acc) } err := acc.FirstError() @@ -324,43 +321,43 @@ func TestPodSelectorFilter(t *testing.T) { cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location()) responseMap := map[string]interface{}{ - "/pods/": &v1.PodList{ - Items: []*v1.Pod{ + "/pods/": &corev1.PodList{ + Items: []corev1.Pod{ { - Spec: &v1.PodSpec{ - NodeName: toStrPtr("node1"), - Containers: []*v1.Container{ + Spec: corev1.PodSpec{ + NodeName: "node1", + Containers: []corev1.Container{ { - Name: toStrPtr("forwarder"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "forwarder", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "100m"}, }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "100m"}, }, }, }, }, - Volumes: []*v1.Volume{ + Volumes: []corev1.Volume{ { - Name: toStrPtr("vol1"), - VolumeSource: &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: toStrPtr("pc1"), - ReadOnly: toBoolPtr(true), + Name: "vol1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pc1", + ReadOnly: true, }, }, }, { - Name: toStrPtr("vol2"), + Name: "vol2", }, }, NodeSelector: map[string]string{ @@ -368,61 +365,61 @@ func TestPodSelectorFilter(t *testing.T) { "select2": "s2", }, }, - Status: &v1.PodStatus{ - Phase: toStrPtr("Running"), - HostIP: toStrPtr("180.12.10.18"), - PodIP: toStrPtr("10.244.2.15"), - StartTime: &metav1.Time{Seconds: toInt64Ptr(started.Unix())}, - Conditions: []*v1.PodCondition{ + Status: corev1.PodStatus{ + Phase: "Running", + HostIP: "180.12.10.18", + PodIP: "10.244.2.15", + StartTime: &metav1.Time{Time: started}, + Conditions: []corev1.PodCondition{ { - Type: toStrPtr("Initialized"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Initialized", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, { - Type: toStrPtr("Ready"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + Type: "Ready", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond2}, }, { - Type: toStrPtr("Scheduled"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Scheduled", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, }, - ContainerStatuses: []*v1.ContainerStatus{ + ContainerStatuses: []corev1.ContainerStatus{ { - Name: toStrPtr("forwarder"), - State: &v1.ContainerState{ - Running: &v1.ContainerStateRunning{ - StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + Name: "forwarder", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Time{Time: now}, }, }, - Ready: toBoolPtr(true), - RestartCount: toInt32Ptr(3), - Image: toStrPtr("image1"), - ImageID: toStrPtr("image_id1"), - ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + Ready: true, + RestartCount: 3, + Image: "image1", + ImageID: "image_id1", + ContainerID: "docker://54abe32d0094479d3d", }, }, }, - Metadata: &metav1.ObjectMeta{ - OwnerReferences: []*metav1.OwnerReference{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ { - ApiVersion: toStrPtr("apps/v1"), - Kind: toStrPtr("DaemonSet"), - Name: toStrPtr("forwarder"), + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: "forwarder", Controller: toBoolPtr(true), }, }, - Generation: toInt64Ptr(11232), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("pod1"), + Generation: 11232, + Namespace: "ns1", + Name: "pod1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + CreationTimestamp: metav1.Time{Time: created}, }, }, }, @@ -532,11 +529,8 @@ func TestPodSelectorFilter(t *testing.T) { ks.SelectorExclude = v.exclude ks.createSelectorFilters() acc := new(testutil.Accumulator) - for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items { - err := ks.gatherPod(*pod, acc) - if err != nil { - t.Errorf("Failed to gather pod - %s", err.Error()) - } + for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items { + ks.gatherPod(pod, acc) } // Grab selector tags @@ -575,61 +569,61 @@ func TestPodPendingContainers(t *testing.T) { name: "collect pods", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/pods/": &v1.PodList{ - Items: []*v1.Pod{ + "/pods/": &corev1.PodList{ + Items: []corev1.Pod{ { - Spec: &v1.PodSpec{ - NodeName: toStrPtr("node1"), - Containers: []*v1.Container{ + Spec: corev1.PodSpec{ + NodeName: "node1", + Containers: []corev1.Container{ { - Name: toStrPtr("waiting"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "waiting", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "100m"}, }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "100m"}, }, }, }, { - Name: toStrPtr("terminated"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "terminated", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "100m"}, }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.Quantity{Format: "100m"}, }, }, }, }, - Volumes: []*v1.Volume{ + Volumes: []corev1.Volume{ { - Name: toStrPtr("vol1"), - VolumeSource: &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: toStrPtr("pc1"), - ReadOnly: toBoolPtr(true), + Name: "vol1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pc1", + ReadOnly: true, }, }, }, { - Name: toStrPtr("vol2"), + Name: "vol2", }, }, NodeSelector: map[string]string{ @@ -637,48 +631,48 @@ func TestPodPendingContainers(t *testing.T) { "select2": "s2", }, }, - Status: &v1.PodStatus{ - Phase: toStrPtr("Pending"), - Reason: toStrPtr("NetworkNotReady"), - HostIP: toStrPtr("180.12.10.18"), - PodIP: toStrPtr("10.244.2.15"), - StartTime: &metav1.Time{Seconds: toInt64Ptr(started.Unix())}, - Conditions: []*v1.PodCondition{ + Status: corev1.PodStatus{ + Phase: "Pending", + Reason: "NetworkNotReady", + HostIP: "180.12.10.18", + PodIP: "10.244.2.15", + StartTime: &metav1.Time{Time: started}, + Conditions: []corev1.PodCondition{ { - Type: toStrPtr("Initialized"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Initialized", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, { - Type: toStrPtr("Ready"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + Type: "Ready", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond2}, }, { - Type: toStrPtr("Scheduled"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Scheduled", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, }, - ContainerStatuses: []*v1.ContainerStatus{}, + ContainerStatuses: []corev1.ContainerStatus{}, }, - Metadata: &metav1.ObjectMeta{ - OwnerReferences: []*metav1.OwnerReference{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ { - ApiVersion: toStrPtr("apps/v1"), - Kind: toStrPtr("DaemonSet"), - Name: toStrPtr("forwarder"), + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: "forwarder", Controller: toBoolPtr(true), }, }, - Generation: toInt64Ptr(11232), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("pod1"), + Generation: 11232, + Namespace: "ns1", + Name: "pod1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + CreationTimestamp: metav1.Time{Time: created}, }, }, }, @@ -740,11 +734,8 @@ func TestPodPendingContainers(t *testing.T) { } ks.createSelectorFilters() acc := new(testutil.Accumulator) - for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items { - err := ks.gatherPod(*pod, acc) - if err != nil { - t.Errorf("Failed to gather pod - %s", err.Error()) - } + for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items { + ks.gatherPod(pod, acc) } err := acc.FirstError() diff --git a/plugins/inputs/kube_inventory/service.go b/plugins/inputs/kube_inventory/service.go index 0c749ea8ac3fc..c2a7b7077e498 100644 --- a/plugins/inputs/kube_inventory/service.go +++ b/plugins/inputs/kube_inventory/service.go @@ -2,9 +2,8 @@ package kube_inventory import ( "context" - "time" - "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -16,53 +15,50 @@ func collectServices(ctx context.Context, acc telegraf.Accumulator, ki *Kubernet return } for _, i := range list.Items { - if err = ki.gatherService(*i, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherService(i, acc) } } -func (ki *KubernetesInventory) gatherService(s v1.Service, acc telegraf.Accumulator) error { - if s.Metadata.CreationTimestamp.GetSeconds() == 0 && s.Metadata.CreationTimestamp.GetNanos() == 0 { - return nil +func (ki *KubernetesInventory) gatherService(s corev1.Service, acc telegraf.Accumulator) { + if s.GetCreationTimestamp().Second() == 0 && s.GetCreationTimestamp().Nanosecond() == 0 { + return } fields := map[string]interface{}{ - "created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(), - "generation": s.Metadata.GetGeneration(), + "created": s.GetCreationTimestamp().UnixNano(), + "generation": s.Generation, } tags := map[string]string{ - "service_name": s.Metadata.GetName(), - "namespace": s.Metadata.GetNamespace(), + "service_name": s.Name, + "namespace": s.Namespace, } - for key, val := range s.GetSpec().GetSelector() { + for key, val := range s.Spec.Selector { if ki.selectorFilter.Match(key) { tags["selector_"+key] = val } } var getPorts = func() { - for _, port := range s.GetSpec().GetPorts() { - fields["port"] = port.GetPort() - fields["target_port"] = port.GetTargetPort().GetIntVal() + for _, port := range s.Spec.Ports { + fields["port"] = port.Port + fields["target_port"] = port.TargetPort.IntVal - tags["port_name"] = port.GetName() - tags["port_protocol"] = port.GetProtocol() + tags["port_name"] = port.Name + tags["port_protocol"] = string(port.Protocol) - if s.GetSpec().GetType() == "ExternalName" { - tags["external_name"] = s.GetSpec().GetExternalName() + if s.Spec.Type == "ExternalName" { + tags["external_name"] = s.Spec.ExternalName } else { - tags["cluster_ip"] = s.GetSpec().GetClusterIP() + tags["cluster_ip"] = s.Spec.ClusterIP } acc.AddFields(serviceMeasurement, fields, tags) } } - if externIPs := s.GetSpec().GetExternalIPs(); externIPs != nil { + if externIPs := s.Spec.ExternalIPs; externIPs != nil { for _, ip := range externIPs { tags["ip"] = ip @@ -71,6 +67,4 @@ func (ki *KubernetesInventory) gatherService(s v1.Service, acc telegraf.Accumula } else { getPorts() } - - return nil } diff --git a/plugins/inputs/kube_inventory/service_test.go b/plugins/inputs/kube_inventory/service_test.go index 3b1089130fbf7..293152074789a 100644 --- a/plugins/inputs/kube_inventory/service_test.go +++ b/plugins/inputs/kube_inventory/service_test.go @@ -6,9 +6,10 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" "github.com/influxdata/telegraf/testutil" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "strings" ) @@ -30,7 +31,7 @@ func TestService(t *testing.T) { name: "no service", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/service/": &v1.ServiceList{}, + "/service/": &corev1.ServiceList{}, }, }, hasError: false, @@ -39,30 +40,32 @@ func TestService(t *testing.T) { name: "collect service", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/service/": &v1.ServiceList{ - Items: []*v1.Service{ + "/service/": &corev1.ServiceList{ + Items: []corev1.Service{ { - Spec: &v1.ServiceSpec{ - Ports: []*v1.ServicePort{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ { - Port: toInt32Ptr(8080), - TargetPort: toIntStrPtrI(1234), - Name: toStrPtr("diagnostic"), - Protocol: toStrPtr("TCP"), + Port: 8080, + TargetPort: intstr.IntOrString{ + IntVal: 1234, + }, + Name: "diagnostic", + Protocol: "TCP", }, }, ExternalIPs: []string{"1.0.0.127"}, - ClusterIP: toStrPtr("127.0.0.1"), + ClusterIP: "127.0.0.1", Selector: map[string]string{ "select1": "s1", "select2": "s2", }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("checker"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "checker", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -104,11 +107,8 @@ func TestService(t *testing.T) { ks.SelectorExclude = v.exclude ks.createSelectorFilters() acc := new(testutil.Accumulator) - for _, service := range ((v.handler.responseMap["/service/"]).(*v1.ServiceList)).Items { - err := ks.gatherService(*service, acc) - if err != nil { - t.Errorf("Failed to gather service - %s", err.Error()) - } + for _, service := range ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items { + ks.gatherService(service, acc) } err := acc.FirstError() @@ -142,30 +142,32 @@ func TestServiceSelectorFilter(t *testing.T) { now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) responseMap := map[string]interface{}{ - "/service/": &v1.ServiceList{ - Items: []*v1.Service{ + "/service/": &corev1.ServiceList{ + Items: []corev1.Service{ { - Spec: &v1.ServiceSpec{ - Ports: []*v1.ServicePort{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ { - Port: toInt32Ptr(8080), - TargetPort: toIntStrPtrI(1234), - Name: toStrPtr("diagnostic"), - Protocol: toStrPtr("TCP"), + Port: 8080, + TargetPort: intstr.IntOrString{ + IntVal: 1234, + }, + Name: "diagnostic", + Protocol: "TCP", }, }, ExternalIPs: []string{"1.0.0.127"}, - ClusterIP: toStrPtr("127.0.0.1"), + ClusterIP: "127.0.0.1", Selector: map[string]string{ "select1": "s1", "select2": "s2", }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("checker"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "checker", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -275,11 +277,8 @@ func TestServiceSelectorFilter(t *testing.T) { ks.SelectorExclude = v.exclude ks.createSelectorFilters() acc := new(testutil.Accumulator) - for _, service := range ((v.handler.responseMap["/service/"]).(*v1.ServiceList)).Items { - err := ks.gatherService(*service, acc) - if err != nil { - t.Errorf("Failed to gather service - %s", err.Error()) - } + for _, service := range ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items { + ks.gatherService(service, acc) } // Grab selector tags diff --git a/plugins/inputs/kube_inventory/statefulset.go b/plugins/inputs/kube_inventory/statefulset.go index fe25f19f08440..22bc7c8bc0c75 100644 --- a/plugins/inputs/kube_inventory/statefulset.go +++ b/plugins/inputs/kube_inventory/statefulset.go @@ -2,9 +2,8 @@ package kube_inventory import ( "context" - "time" - "github.com/ericchiang/k8s/apis/apps/v1" + v1 "k8s.io/api/apps/v1" "github.com/influxdata/telegraf" ) @@ -16,7 +15,7 @@ func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *Kube return } for _, s := range list.Items { - if err = ki.gatherStatefulSet(*s, acc); err != nil { + if err = ki.gatherStatefulSet(s, acc); err != nil { acc.AddError(err) return } @@ -26,20 +25,20 @@ func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *Kube func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) error { status := s.Status fields := map[string]interface{}{ - "created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(), - "generation": *s.Metadata.Generation, - "replicas": *status.Replicas, - "replicas_current": *status.CurrentReplicas, - "replicas_ready": *status.ReadyReplicas, - "replicas_updated": *status.UpdatedReplicas, + "created": s.GetCreationTimestamp().UnixNano(), + "generation": s.Generation, + "replicas": status.Replicas, + "replicas_current": status.CurrentReplicas, + "replicas_ready": status.ReadyReplicas, + "replicas_updated": status.UpdatedReplicas, "spec_replicas": *s.Spec.Replicas, - "observed_generation": *s.Status.ObservedGeneration, + "observed_generation": s.Status.ObservedGeneration, } tags := map[string]string{ - "statefulset_name": *s.Metadata.Name, - "namespace": *s.Metadata.Namespace, + "statefulset_name": s.Name, + "namespace": s.Namespace, } - for key, val := range s.GetSpec().GetSelector().GetMatchLabels() { + for key, val := range s.Spec.Selector.MatchLabels { if ki.selectorFilter.Match(key) { tags["selector_"+key] = val } diff --git a/plugins/inputs/kube_inventory/statefulset_test.go b/plugins/inputs/kube_inventory/statefulset_test.go index 689cbadbc4b8d..dee8b08b5b887 100644 --- a/plugins/inputs/kube_inventory/statefulset_test.go +++ b/plugins/inputs/kube_inventory/statefulset_test.go @@ -6,8 +6,8 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/apps/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/influxdata/telegraf/testutil" ) @@ -38,16 +38,16 @@ func TestStatefulSet(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/statefulsets/": &v1.StatefulSetList{ - Items: []*v1.StatefulSet{ + Items: []v1.StatefulSet{ { - Status: &v1.StatefulSetStatus{ - Replicas: toInt32Ptr(2), - CurrentReplicas: toInt32Ptr(4), - ReadyReplicas: toInt32Ptr(1), - UpdatedReplicas: toInt32Ptr(3), - ObservedGeneration: toInt64Ptr(119), + Status: v1.StatefulSetStatus{ + Replicas: 2, + CurrentReplicas: 4, + ReadyReplicas: 1, + UpdatedReplicas: 3, + ObservedGeneration: 119, }, - Spec: &v1.StatefulSetSpec{ + Spec: v1.StatefulSetSpec{ Replicas: toInt32Ptr(3), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -56,15 +56,11 @@ func TestStatefulSet(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(332), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("sts1"), - Labels: map[string]string{ - "lab1": "v1", - "lab2": "v2", - }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 332, + Namespace: "ns1", + Name: "sts1", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -106,7 +102,7 @@ func TestStatefulSet(t *testing.T) { ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { - err := ks.gatherStatefulSet(*ss, acc) + err := ks.gatherStatefulSet(ss, acc) if err != nil { t.Errorf("Failed to gather ss - %s", err.Error()) } @@ -144,16 +140,16 @@ func TestStatefulSetSelectorFilter(t *testing.T) { responseMap := map[string]interface{}{ "/statefulsets/": &v1.StatefulSetList{ - Items: []*v1.StatefulSet{ + Items: []v1.StatefulSet{ { - Status: &v1.StatefulSetStatus{ - Replicas: toInt32Ptr(2), - CurrentReplicas: toInt32Ptr(4), - ReadyReplicas: toInt32Ptr(1), - UpdatedReplicas: toInt32Ptr(3), - ObservedGeneration: toInt64Ptr(119), + Status: v1.StatefulSetStatus{ + Replicas: 2, + CurrentReplicas: 4, + ReadyReplicas: 1, + UpdatedReplicas: 3, + ObservedGeneration: 119, }, - Spec: &v1.StatefulSetSpec{ + Spec: v1.StatefulSetSpec{ Replicas: toInt32Ptr(3), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -162,15 +158,11 @@ func TestStatefulSetSelectorFilter(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(332), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("sts1"), - Labels: map[string]string{ - "lab1": "v1", - "lab2": "v2", - }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 332, + Namespace: "ns1", + Name: "sts1", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -281,7 +273,7 @@ func TestStatefulSetSelectorFilter(t *testing.T) { ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { - err := ks.gatherStatefulSet(*ss, acc) + err := ks.gatherStatefulSet(ss, acc) if err != nil { t.Errorf("Failed to gather ss - %s", err.Error()) } diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index bb599bf38a0c3..97473ef96c7e3 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -2,6 +2,7 @@ package prometheus import ( "context" + "crypto/tls" "encoding/json" "fmt" "io/ioutil" @@ -14,11 +15,14 @@ import ( "sync" "time" - "github.com/ericchiang/k8s" - corev1 "github.com/ericchiang/k8s/apis/core/v1" "github.com/ghodss/yaml" "github.com/kubernetes/apimachinery/pkg/fields" "github.com/kubernetes/apimachinery/pkg/labels" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" ) type payload struct { @@ -42,22 +46,26 @@ const cAdvisorPodListDefaultInterval = 60 // loadClient parses a kubeconfig from a file and returns a Kubernetes // client. It does not support extensions or client auth providers. -func loadClient(kubeconfigPath string) (*k8s.Client, error) { +func loadClient(kubeconfigPath string) (*kubernetes.Clientset, error) { data, err := ioutil.ReadFile(kubeconfigPath) if err != nil { return nil, fmt.Errorf("failed reading '%s': %v", kubeconfigPath, err) } // Unmarshal YAML into a Kubernetes config object. - var config k8s.Config + var config rest.Config if err := yaml.Unmarshal(data, &config); err != nil { return nil, err } - return k8s.NewClient(&config) + return kubernetes.NewForConfig(&config) } func (p *Prometheus) start(ctx context.Context) error { - client, err := k8s.NewInClusterClient() + config, err := rest.InClusterConfig() + if err != nil { + return fmt.Errorf("Failed to get InClusterConfig - %v", err) + } + client, err := kubernetes.NewForConfig(config) if err != nil { u, err := user.Current() if err != nil { @@ -85,12 +93,12 @@ func (p *Prometheus) start(ctx context.Context) error { return case <-time.After(time.Second): if p.isNodeScrapeScope { - err = p.cAdvisor(ctx, client) + err = p.cAdvisor(ctx) if err != nil { p.Log.Errorf("Unable to monitor pods with node scrape scope: %s", err.Error()) } } else { - err = p.watch(ctx, client) + err = p.watchPod(ctx, client) if err != nil { p.Log.Errorf("Unable to watch resources: %s", err.Error()) } @@ -106,65 +114,52 @@ func (p *Prometheus) start(ctx context.Context) error { // (without the scrape annotations). K8s may re-assign the old pod ip to the non-scrape // pod, causing errors in the logs. This is only true if the pod going offline is not // directed to do so by K8s. -func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error { - selectors := podSelector(p) - - pod := &corev1.Pod{} - watcher, err := client.Watch(ctx, p.PodNamespace, &corev1.Pod{}, selectors...) +func (p *Prometheus) watchPod(ctx context.Context, client *kubernetes.Clientset) error { + watcher, err := client.CoreV1().Pods(p.PodNamespace).Watch(ctx, metav1.ListOptions{ + LabelSelector: p.KubernetesLabelSelector, + FieldSelector: p.KubernetesFieldSelector, + }) if err != nil { return err } - defer watcher.Close() - - for { - select { - case <-ctx.Done(): - return nil - default: + pod := &corev1.Pod{} + go func() { + for event := range watcher.ResultChan() { pod = &corev1.Pod{} - // An error here means we need to reconnect the watcher. - eventType, err := watcher.Next(pod) - if err != nil { - return err - } - // If the pod is not "ready", there will be no ip associated with it. - if pod.GetMetadata().GetAnnotations()["prometheus.io/scrape"] != "true" || - !podReady(pod.Status.GetContainerStatuses()) { + if pod.Annotations["prometheus.io/scrape"] != "true" || + !podReady(pod.Status.ContainerStatuses) { continue } - switch eventType { - case k8s.EventAdded: + switch event.Type { + case watch.Added: registerPod(pod, p) - case k8s.EventModified: + case watch.Modified: // To avoid multiple actions for each event, unregister on the first event // in the delete sequence, when the containers are still "ready". - if pod.Metadata.GetDeletionTimestamp() != nil { + if pod.GetDeletionTimestamp() != nil { unregisterPod(pod, p) } else { registerPod(pod, p) } } } - } -} + }() -func (p *Prometheus) cAdvisor(ctx context.Context, client *k8s.Client) error { - // Set InsecureSkipVerify for cAdvisor client since Node IP will not be a SAN for the CA cert - tlsConfig := client.Client.Transport.(*http.Transport).TLSClientConfig - tlsConfig.InsecureSkipVerify = true + return nil +} +func (p *Prometheus) cAdvisor(ctx context.Context) error { // The request will be the same each time - podsUrl := fmt.Sprintf("https://%s:10250/pods", p.NodeIP) - req, err := http.NewRequest("GET", podsUrl, nil) + podsURL := fmt.Sprintf("https://%s:10250/pods", p.NodeIP) + req, err := http.NewRequest("GET", podsURL, nil) if err != nil { - return fmt.Errorf("Error when creating request to %s to get pod list: %w", podsUrl, err) + return fmt.Errorf("Error when creating request to %s to get pod list: %w", podsURL, err) } - client.SetHeaders(req.Header) // Update right away so code is not waiting the length of the specified scrape interval initially - err = updateCadvisorPodList(ctx, p, client, req) + err = updateCadvisorPodList(p, req) if err != nil { return fmt.Errorf("Error initially updating pod list: %w", err) } @@ -179,7 +174,7 @@ func (p *Prometheus) cAdvisor(ctx context.Context, client *k8s.Client) error { case <-ctx.Done(): return nil case <-time.After(time.Duration(scrapeInterval) * time.Second): - err := updateCadvisorPodList(ctx, p, client, req) + err := updateCadvisorPodList(p, req) if err != nil { return fmt.Errorf("Error updating pod list: %w", err) } @@ -187,8 +182,11 @@ func (p *Prometheus) cAdvisor(ctx context.Context, client *k8s.Client) error { } } -func updateCadvisorPodList(ctx context.Context, p *Prometheus, client *k8s.Client, req *http.Request) error { - resp, err := client.Client.Do(req) +func updateCadvisorPodList(p *Prometheus, req *http.Request) error { + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + httpClient := http.Client{} + + resp, err := httpClient.Do(req) if err != nil { return fmt.Errorf("Error when making request for pod list: %w", err) } @@ -215,8 +213,8 @@ func updateCadvisorPodList(ctx context.Context, p *Prometheus, client *k8s.Clien // and if namespace and selectors are specified and match for _, pod := range pods { if necessaryPodFieldsArePresent(pod) && - pod.GetMetadata().GetAnnotations()["prometheus.io/scrape"] == "true" && - podReady(pod.GetStatus().GetContainerStatuses()) && + pod.Annotations["prometheus.io/scrape"] == "true" && + podReady(pod.Status.ContainerStatuses) && podHasMatchingNamespace(pod, p) && podHasMatchingLabelSelector(pod, p.podLabelSelector) && podHasMatchingFieldSelector(pod, p.podFieldSelector) { @@ -230,12 +228,9 @@ func updateCadvisorPodList(ctx context.Context, p *Prometheus, client *k8s.Clien } func necessaryPodFieldsArePresent(pod *corev1.Pod) bool { - return pod.GetMetadata() != nil && - pod.GetMetadata().GetAnnotations() != nil && - pod.GetMetadata().GetLabels() != nil && - pod.GetSpec() != nil && - pod.GetStatus() != nil && - pod.GetStatus().GetContainerStatuses() != nil + return pod.Annotations != nil && + pod.Labels != nil && + pod.Status.ContainerStatuses != nil } /* See the docs on kubernetes label selectors: @@ -246,7 +241,7 @@ func podHasMatchingLabelSelector(pod *corev1.Pod, labelSelector labels.Selector) return true } - var labelsSet labels.Set = pod.GetMetadata().GetLabels() + var labelsSet labels.Set = pod.Labels return labelSelector.Matches(labelsSet) } @@ -260,23 +255,14 @@ func podHasMatchingFieldSelector(pod *corev1.Pod, fieldSelector fields.Selector) return true } - podSpec := pod.GetSpec() - podStatus := pod.GetStatus() - - // Spec and Status shouldn't be nil. - // Error handling just in case something goes wrong but won't crash telegraf - if podSpec == nil || podStatus == nil { - return false - } - fieldsSet := make(fields.Set) - fieldsSet["spec.nodeName"] = podSpec.GetNodeName() - fieldsSet["spec.restartPolicy"] = podSpec.GetRestartPolicy() - fieldsSet["spec.schedulerName"] = podSpec.GetSchedulerName() - fieldsSet["spec.serviceAccountName"] = podSpec.GetServiceAccountName() - fieldsSet["status.phase"] = podStatus.GetPhase() - fieldsSet["status.podIP"] = podStatus.GetPodIP() - fieldsSet["status.nominatedNodeName"] = podStatus.GetNominatedNodeName() + fieldsSet["spec.nodeName"] = pod.Spec.NodeName + fieldsSet["spec.restartPolicy"] = string(pod.Spec.RestartPolicy) + fieldsSet["spec.schedulerName"] = pod.Spec.SchedulerName + fieldsSet["spec.serviceAccountName"] = pod.Spec.ServiceAccountName + fieldsSet["status.phase"] = string(pod.Status.Phase) + fieldsSet["status.podIP"] = pod.Status.PodIP + fieldsSet["status.nominatedNodeName"] = pod.Status.NominatedNodeName return fieldSelector.Matches(fieldsSet) } @@ -286,35 +272,21 @@ func podHasMatchingFieldSelector(pod *corev1.Pod, fieldSelector fields.Selector) * Else return true */ func podHasMatchingNamespace(pod *corev1.Pod, p *Prometheus) bool { - return !(p.PodNamespace != "" && pod.GetMetadata().GetNamespace() != p.PodNamespace) + return !(p.PodNamespace != "" && pod.Namespace != p.PodNamespace) } -func podReady(statuss []*corev1.ContainerStatus) bool { +func podReady(statuss []corev1.ContainerStatus) bool { if len(statuss) == 0 { return false } for _, cs := range statuss { - if !cs.GetReady() { + if !cs.Ready { return false } } return true } -func podSelector(p *Prometheus) []k8s.Option { - options := []k8s.Option{} - - if len(p.KubernetesLabelSelector) > 0 { - options = append(options, k8s.QueryParam("labelSelector", p.KubernetesLabelSelector)) - } - - if len(p.KubernetesFieldSelector) > 0 { - options = append(options, k8s.QueryParam("fieldSelector", p.KubernetesFieldSelector)) - } - - return options -} - func registerPod(pod *corev1.Pod, p *Prometheus) { if p.kubernetesPods == nil { p.kubernetesPods = map[string]URLAndAddress{} @@ -326,14 +298,14 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { log.Printf("D! [inputs.prometheus] will scrape metrics from %q", *targetURL) // add annotation as metrics tags - tags := pod.GetMetadata().GetAnnotations() + tags := pod.Annotations if tags == nil { tags = map[string]string{} } - tags["pod_name"] = pod.GetMetadata().GetName() - tags["namespace"] = pod.GetMetadata().GetNamespace() + tags["pod_name"] = pod.Name + tags["namespace"] = pod.Namespace // add labels as metrics tags - for k, v := range pod.GetMetadata().GetLabels() { + for k, v := range pod.Labels { tags[k] = v } URL, err := url.Parse(*targetURL) @@ -358,16 +330,16 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { } func getScrapeURL(pod *corev1.Pod) *string { - ip := pod.Status.GetPodIP() + ip := pod.Status.PodIP if ip == "" { // return as if scrape was disabled, we will be notified again once the pod // has an IP return nil } - scheme := pod.GetMetadata().GetAnnotations()["prometheus.io/scheme"] - path := pod.GetMetadata().GetAnnotations()["prometheus.io/path"] - port := pod.GetMetadata().GetAnnotations()["prometheus.io/port"] + scheme := pod.Annotations["prometheus.io/scheme"] + path := pod.Annotations["prometheus.io/path"] + port := pod.Annotations["prometheus.io/port"] if scheme == "" { scheme = "http" @@ -397,7 +369,7 @@ func unregisterPod(pod *corev1.Pod, p *Prometheus) { } log.Printf("D! [inputs.prometheus] registered a delete request for %q in namespace %q", - pod.GetMetadata().GetName(), pod.GetMetadata().GetNamespace()) + pod.Name, pod.Namespace) p.lock.Lock() defer p.lock.Unlock() diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 7e6e62409e34d..18d9aa603a24c 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -1,58 +1,56 @@ package prometheus import ( - "github.com/ericchiang/k8s" "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - - v1 "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" - "github.com/kubernetes/apimachinery/pkg/fields" "github.com/kubernetes/apimachinery/pkg/labels" + "github.com/stretchr/testify/assert" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestScrapeURLNoAnnotations(t *testing.T) { - p := &v1.Pod{Metadata: &metav1.ObjectMeta{}} - p.GetMetadata().Annotations = map[string]string{} + p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}} + p.Annotations = map[string]string{} url := getScrapeURL(p) assert.Nil(t, url) } func TestScrapeURLAnnotationsNoScrape(t *testing.T) { - p := &v1.Pod{Metadata: &metav1.ObjectMeta{}} - p.Metadata.Name = str("myPod") - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "false"} + p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}} + p.Name = "myPod" + p.Annotations = map[string]string{"prometheus.io/scrape": "false"} url := getScrapeURL(p) assert.Nil(t, url) } func TestScrapeURLAnnotations(t *testing.T) { p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} url := getScrapeURL(p) assert.Equal(t, "http://127.0.0.1:9102/metrics", *url) } func TestScrapeURLAnnotationsCustomPort(t *testing.T) { p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"} url := getScrapeURL(p) assert.Equal(t, "http://127.0.0.1:9000/metrics", *url) } func TestScrapeURLAnnotationsCustomPath(t *testing.T) { p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"} url := getScrapeURL(p) assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) } func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) { p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"} url := getScrapeURL(p) assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) } @@ -61,7 +59,7 @@ func TestAddPod(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}} p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) assert.Equal(t, 1, len(prom.kubernetesPods)) } @@ -70,9 +68,9 @@ func TestAddMultipleDuplicatePods(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}} p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - p.Metadata.Name = str("Pod2") + p.Name = "Pod2" registerPod(p, prom) assert.Equal(t, 1, len(prom.kubernetesPods)) } @@ -81,10 +79,10 @@ func TestAddMultiplePods(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}} p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - p.Metadata.Name = str("Pod2") - p.Status.PodIP = str("127.0.0.2") + p.Name = "Pod2" + p.Status.PodIP = "127.0.0.2" registerPod(p, prom) assert.Equal(t, 2, len(prom.kubernetesPods)) } @@ -93,68 +91,23 @@ func TestDeletePods(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}} p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) unregisterPod(p, prom) assert.Equal(t, 0, len(prom.kubernetesPods)) } -func TestPodSelector(t *testing.T) { - cases := []struct { - expected []k8s.Option - labelselector string - fieldselector string - }{ - { - expected: []k8s.Option{ - k8s.QueryParam("labelSelector", "key1=val1,key2=val2,key3"), - k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"), - }, - labelselector: "key1=val1,key2=val2,key3", - fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com", - }, - { - expected: []k8s.Option{ - k8s.QueryParam("labelSelector", "key1"), - k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"), - }, - labelselector: "key1", - fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com", - }, - { - expected: []k8s.Option{ - k8s.QueryParam("labelSelector", "key1"), - k8s.QueryParam("fieldSelector", "somefield"), - }, - labelselector: "key1", - fieldselector: "somefield", - }, - } - - for _, c := range cases { - prom := &Prometheus{ - Log: testutil.Logger{}, - KubernetesLabelSelector: c.labelselector, - KubernetesFieldSelector: c.fieldselector, - } - - output := podSelector(prom) - - assert.Equal(t, len(output), len(c.expected)) - } -} - func TestPodHasMatchingNamespace(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}, PodNamespace: "default"} pod := pod() - pod.Metadata.Name = str("Pod1") - pod.Metadata.Namespace = str("default") + pod.Name = "Pod1" + pod.Namespace = "default" shouldMatch := podHasMatchingNamespace(pod, prom) assert.Equal(t, true, shouldMatch) - pod.Metadata.Name = str("Pod2") - pod.Metadata.Namespace = str("namespace") + pod.Name = "Pod2" + pod.Namespace = "namespace" shouldNotMatch := podHasMatchingNamespace(pod, prom) assert.Equal(t, false, shouldNotMatch) } @@ -164,13 +117,13 @@ func TestPodHasMatchingLabelSelector(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}, KubernetesLabelSelector: labelSelectorString} pod := pod() - pod.Metadata.Labels = make(map[string]string) - pod.Metadata.Labels["label0"] = "label0" - pod.Metadata.Labels["label1"] = "label1" - pod.Metadata.Labels["label2"] = "label2" - pod.Metadata.Labels["label3"] = "label3" - pod.Metadata.Labels["label4"] = "label4" - pod.Metadata.Labels["label5"] = "label5" + pod.Labels = make(map[string]string) + pod.Labels["label0"] = "label0" + pod.Labels["label1"] = "label1" + pod.Labels["label2"] = "label2" + pod.Labels["label3"] = "label3" + pod.Labels["label4"] = "label4" + pod.Labels["label5"] = "label5" labelSelector, err := labels.Parse(prom.KubernetesLabelSelector) assert.Equal(t, err, nil) @@ -181,8 +134,8 @@ func TestPodHasMatchingFieldSelector(t *testing.T) { fieldSelectorString := "status.podIP=127.0.0.1,spec.restartPolicy=Always,spec.NodeName!=nodeName" prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} pod := pod() - pod.Spec.RestartPolicy = str("Always") - pod.Spec.NodeName = str("node1000") + pod.Spec.RestartPolicy = "Always" + pod.Spec.NodeName = "node1000" fieldSelector, err := fields.ParseSelector(prom.KubernetesFieldSelector) assert.Equal(t, err, nil) @@ -193,18 +146,18 @@ func TestInvalidFieldSelector(t *testing.T) { fieldSelectorString := "status.podIP=127.0.0.1,spec.restartPolicy=Always,spec.NodeName!=nodeName,spec.nodeName" prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} pod := pod() - pod.Spec.RestartPolicy = str("Always") - pod.Spec.NodeName = str("node1000") + pod.Spec.RestartPolicy = "Always" + pod.Spec.NodeName = "node1000" _, err := fields.ParseSelector(prom.KubernetesFieldSelector) assert.NotEqual(t, err, nil) } -func pod() *v1.Pod { - p := &v1.Pod{Metadata: &metav1.ObjectMeta{}, Status: &v1.PodStatus{}, Spec: &v1.PodSpec{}} - p.Status.PodIP = str("127.0.0.1") - p.Metadata.Name = str("myPod") - p.Metadata.Namespace = str("default") +func pod() *corev1.Pod { + p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}, Status: corev1.PodStatus{}, Spec: corev1.PodSpec{}} + p.Status.PodIP = "127.0.0.1" + p.Name = "myPod" + p.Namespace = "default" return p } From 1746f96f15e9c43749329a748ecf9b8eec197698 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 17 Mar 2021 18:34:17 -0400 Subject: [PATCH 311/761] Update build version to 1.19.0 --- build_version.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_version.txt b/build_version.txt index 84cc529467b05..815d5ca06d530 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.18.0 +1.19.0 From 1eb47e245c0c22270aaf9a42938f5f5f6697a959 Mon Sep 17 00:00:00 2001 From: Sven Rebhan Date: Thu, 18 Mar 2021 15:39:44 +0100 Subject: [PATCH 312/761] Add input plugin for KNX home automation bus (#7048) --- README.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 1 + go.sum | 2 + plugins/inputs/all/all.go | 1 + plugins/inputs/knx_listener/README.md | 66 ++++++ .../knx_listener/knx_dummy_interface.go | 28 +++ plugins/inputs/knx_listener/knx_listener.go | 197 ++++++++++++++++++ .../inputs/knx_listener/knx_listener_test.go | 135 ++++++++++++ 9 files changed, 432 insertions(+) create mode 100644 plugins/inputs/knx_listener/README.md create mode 100644 plugins/inputs/knx_listener/knx_dummy_interface.go create mode 100644 plugins/inputs/knx_listener/knx_listener.go create mode 100644 plugins/inputs/knx_listener/knx_listener_test.go diff --git a/README.md b/README.md index 5535b9527fe40..45bdc43baadc2 100644 --- a/README.md +++ b/README.md @@ -232,6 +232,7 @@ For documentation on the latest development code see the [documentation index][d * [kernel](./plugins/inputs/kernel) * [kernel_vmstat](./plugins/inputs/kernel_vmstat) * [kibana](./plugins/inputs/kibana) +* [knx_listener](./plugins/inputs/knx_listener) * [kubernetes](./plugins/inputs/kubernetes) * [kube_inventory](./plugins/inputs/kube_inventory) * [lanz](./plugins/inputs/lanz) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 0fafa339fcc56..4ca2e10c52575 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -169,6 +169,7 @@ following works: - github.com/tidwall/match [MIT License](https://github.com/tidwall/match/blob/master/LICENSE) - github.com/tidwall/pretty [MIT License](https://github.com/tidwall/pretty/blob/master/LICENSE) - github.com/tinylib/msgp [MIT License](https://github.com/tinylib/msgp/blob/master/LICENSE) +- github.com/vapourismo/knx-go [MIT License](https://github.com/vapourismo/knx-go/blob/master/LICENSE) - github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE) - github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE) - github.com/vjeantet/grok [Apache License 2.0](https://github.com/vjeantet/grok/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 1754c0fe77425..1dfe1d9a2d712 100644 --- a/go.mod +++ b/go.mod @@ -131,6 +131,7 @@ require ( github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/tidwall/gjson v1.6.0 github.com/tinylib/msgp v1.1.5 + github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect github.com/vjeantet/grok v1.0.1 diff --git a/go.sum b/go.sum index 93c77eda17477..26aea881ce100 100644 --- a/go.sum +++ b/go.sum @@ -711,6 +711,8 @@ github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 h1:iBlTJosRsR70amr0zsmSPvaKNH8K/p3YlX/5SdPmSl8= +github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330/go.mod h1:7+aWBsUJCo9OQRCgTypRmIQW9KKKcPMjtrdnYIBsS70= github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0= github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 9b22cd442a04c..5f7e816487f62 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -87,6 +87,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/kernel_vmstat" _ "github.com/influxdata/telegraf/plugins/inputs/kibana" _ "github.com/influxdata/telegraf/plugins/inputs/kinesis_consumer" + _ "github.com/influxdata/telegraf/plugins/inputs/knx_listener" _ "github.com/influxdata/telegraf/plugins/inputs/kube_inventory" _ "github.com/influxdata/telegraf/plugins/inputs/kubernetes" _ "github.com/influxdata/telegraf/plugins/inputs/lanz" diff --git a/plugins/inputs/knx_listener/README.md b/plugins/inputs/knx_listener/README.md new file mode 100644 index 0000000000000..de015ddc2793b --- /dev/null +++ b/plugins/inputs/knx_listener/README.md @@ -0,0 +1,66 @@ +# KNX input plugin + +The KNX input plugin that listens for messages on the KNX home-automation bus. +This plugin connects to the KNX bus via a KNX-IP interface. +Information about supported KNX message datapoint types can be found at the +underlying "knx-go" project site (https://github.com/vapourismo/knx-go). + +### Configuration + +This is a sample config for the plugin. + +```toml +# Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +[[inputs.KNXListener]] + ## Type of KNX-IP interface. + ## Can be either "tunnel" or "router". + # service_type = "tunnel" + + ## Address of the KNX-IP interface. + service_address = "localhost:3671" + + ## Measurement definition(s) + # [[inputs.KNXListener.measurement]] + # ## Name of the measurement + # name = "temperature" + # ## Datapoint-Type (DPT) of the KNX messages + # dpt = "9.001" + # ## List of Group-Addresses (GAs) assigned to the measurement + # addresses = ["5/5/1"] + + # [[inputs.KNXListener.measurement]] + # name = "illumination" + # dpt = "9.004" + # addresses = ["5/5/3"] +``` + +#### Measurement configurations + +Each measurement contains only one datapoint-type (DPT) and assigns a list of +addresses to this measurement. You can, for example group all temperature sensor +messages within a "temperature" measurement. However, you are free to split +messages of one datapoint-type to multiple measurements. + +**NOTE: You should not assign a group-address (GA) to multiple measurements!** + +### Metrics + +Received KNX data is stored in the named measurement as configured above using +the "value" field. Additional to the value, there are the following tags added +to the datapoint: + - "groupaddress": KNX group-address corresponding to the value + - "unit": unit of the value + - "source": KNX physical address sending the value + +To find out about the datatype of the datapoint please check your KNX project, +the KNX-specification or the "knx-go" project for the corresponding DPT. + +### Example Output + +This section shows example output in Line Protocol format. + +``` +illumination,groupaddress=5/5/4,host=Hugin,source=1.1.12,unit=lux value=17.889999389648438 1582132674999013274 +temperature,groupaddress=5/5/1,host=Hugin,source=1.1.8,unit=°C value=17.799999237060547 1582132663427587361 +windowopen,groupaddress=1/0/1,host=Hugin,source=1.1.3 value=true 1582132630425581320 +``` diff --git a/plugins/inputs/knx_listener/knx_dummy_interface.go b/plugins/inputs/knx_listener/knx_dummy_interface.go new file mode 100644 index 0000000000000..1f897c4d99baa --- /dev/null +++ b/plugins/inputs/knx_listener/knx_dummy_interface.go @@ -0,0 +1,28 @@ +package knx_listener + +import ( + "github.com/vapourismo/knx-go/knx" +) + +type KNXDummyInterface struct { + inbound chan knx.GroupEvent +} + +func NewDummyInterface() (di KNXDummyInterface, err error) { + di, err = KNXDummyInterface{}, nil + di.inbound = make(chan knx.GroupEvent) + + return di, err +} + +func (di *KNXDummyInterface) Send(event knx.GroupEvent) { + di.inbound <- event +} + +func (di *KNXDummyInterface) Inbound() <-chan knx.GroupEvent { + return di.inbound +} + +func (di *KNXDummyInterface) Close() { + close(di.inbound) +} diff --git a/plugins/inputs/knx_listener/knx_listener.go b/plugins/inputs/knx_listener/knx_listener.go new file mode 100644 index 0000000000000..3bb93fbb2dde3 --- /dev/null +++ b/plugins/inputs/knx_listener/knx_listener.go @@ -0,0 +1,197 @@ +package knx_listener + +import ( + "fmt" + "reflect" + "sync" + + "github.com/vapourismo/knx-go/knx" + "github.com/vapourismo/knx-go/knx/dpt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type KNXInterface interface { + Inbound() <-chan knx.GroupEvent + Close() +} + +type addressTarget struct { + measurement string + datapoint dpt.DatapointValue +} + +type Measurement struct { + Name string + Dpt string + Addresses []string +} + +type KNXListener struct { + ServiceType string `toml:"service_type"` + ServiceAddress string `toml:"service_address"` + Measurements []Measurement `toml:"measurement"` + Log telegraf.Logger `toml:"-"` + + client KNXInterface + gaTargetMap map[string]addressTarget + gaLogbook map[string]bool + + acc telegraf.Accumulator + wg sync.WaitGroup +} + +func (kl *KNXListener) Description() string { + return "Listener capable of handling KNX bus messages provided through a KNX-IP Interface." +} + +func (kl *KNXListener) SampleConfig() string { + return ` + ## Type of KNX-IP interface. + ## Can be either "tunnel" or "router". + # service_type = "tunnel" + + ## Address of the KNX-IP interface. + service_address = "localhost:3671" + + ## Measurement definition(s) + # [[inputs.KNXListener.measurement]] + # ## Name of the measurement + # name = "temperature" + # ## Datapoint-Type (DPT) of the KNX messages + # dpt = "9.001" + # ## List of Group-Addresses (GAs) assigned to the measurement + # addresses = ["5/5/1"] + + # [[inputs.KNXListener.measurement]] + # name = "illumination" + # dpt = "9.004" + # addresses = ["5/5/3"] +` +} + +func (kl *KNXListener) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (kl *KNXListener) Start(acc telegraf.Accumulator) error { + // Store the accumulator for later use + kl.acc = acc + + // Setup a logbook to track unknown GAs to avoid log-spamming + kl.gaLogbook = make(map[string]bool) + + // Construct the mapping of Group-addresses (GAs) to DPTs and the name + // of the measurement + kl.gaTargetMap = make(map[string]addressTarget) + for _, m := range kl.Measurements { + kl.Log.Debugf("Group-address mapping for measurement %q:", m.Name) + for _, ga := range m.Addresses { + kl.Log.Debugf(" %s --> %s", ga, m.Dpt) + if _, ok := kl.gaTargetMap[ga]; ok { + return fmt.Errorf("duplicate specification of address %q", ga) + } + d, ok := dpt.Produce(m.Dpt) + if !ok { + return fmt.Errorf("cannot create datapoint-type %q for address %q", m.Dpt, ga) + } + kl.gaTargetMap[ga] = addressTarget{m.Name, d} + } + } + + // Connect to the KNX-IP interface + kl.Log.Infof("Trying to connect to %q at %q", kl.ServiceType, kl.ServiceAddress) + switch kl.ServiceType { + case "tunnel": + c, err := knx.NewGroupTunnel(kl.ServiceAddress, knx.DefaultTunnelConfig) + if err != nil { + return err + } + kl.client = &c + case "router": + c, err := knx.NewGroupRouter(kl.ServiceAddress, knx.DefaultRouterConfig) + if err != nil { + return err + } + kl.client = &c + case "dummy": + c, err := NewDummyInterface() + if err != nil { + return err + } + kl.client = &c + default: + return fmt.Errorf("invalid interface type: %s", kl.ServiceAddress) + } + kl.Log.Infof("Connected!") + + // Listen to the KNX bus + kl.wg.Add(1) + go func() { + kl.wg.Done() + kl.listen() + }() + + return nil +} + +func (kl *KNXListener) Stop() { + if kl.client != nil { + kl.client.Close() + kl.wg.Wait() + } +} + +func (kl *KNXListener) listen() { + for msg := range kl.client.Inbound() { + // Match GA to DataPointType and measurement name + ga := msg.Destination.String() + target, ok := kl.gaTargetMap[ga] + if !ok && !kl.gaLogbook[ga] { + kl.Log.Infof("Ignoring message %+v for unknown GA %q", msg, ga) + kl.gaLogbook[ga] = true + continue + } + + // Extract the value from the data-frame + err := target.datapoint.Unpack(msg.Data) + if err != nil { + kl.Log.Errorf("Unpacking data failed: %v", err) + continue + } + kl.Log.Debugf("Matched GA %q to measurement %q with value %v", ga, target.measurement, target.datapoint) + + // Convert the DatapointValue interface back to its basic type again + // as otherwise telegraf will not push out the metrics and eat it + // silently. + var value interface{} + vi := reflect.Indirect(reflect.ValueOf(target.datapoint)) + switch vi.Kind() { + case reflect.Bool: + value = vi.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + value = vi.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + value = vi.Uint() + case reflect.Float32, reflect.Float64: + value = vi.Float() + default: + kl.Log.Errorf("Type conversion %v failed for address %q", vi.Kind(), ga) + continue + } + + // Compose the actual data to be pushed out + fields := map[string]interface{}{"value": value} + tags := map[string]string{ + "groupaddress": ga, + "unit": target.datapoint.(dpt.DatapointMeta).Unit(), + "source": msg.Source.String(), + } + kl.acc.AddFields(target.measurement, fields, tags) + } +} + +func init() { + inputs.Add("KNXListener", func() telegraf.Input { return &KNXListener{ServiceType: "tunnel"} }) +} diff --git a/plugins/inputs/knx_listener/knx_listener_test.go b/plugins/inputs/knx_listener/knx_listener_test.go new file mode 100644 index 0000000000000..973605886e3b6 --- /dev/null +++ b/plugins/inputs/knx_listener/knx_listener_test.go @@ -0,0 +1,135 @@ +package knx_listener + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/vapourismo/knx-go/knx" + "github.com/vapourismo/knx-go/knx/cemi" + "github.com/vapourismo/knx-go/knx/dpt" +) + +const epsilon = 1e-3 + +func setValue(data dpt.DatapointValue, value interface{}) error { + d := reflect.Indirect(reflect.ValueOf(data)) + if !d.CanSet() { + return fmt.Errorf("cannot set datapoint %v", data) + } + switch v := value.(type) { + case bool: + d.SetBool(v) + case float64: + d.SetFloat(v) + case int64: + d.SetInt(v) + case uint64: + d.SetUint(v) + default: + return fmt.Errorf("unknown type '%T' when setting value for DPT", value) + } + return nil +} + +func TestRegularReceives_DPT(t *testing.T) { + // Define the test-cases + var testcases = []struct { + address string + dpt string + value interface{} + }{ + {"1/0/1", "1.001", true}, + {"1/0/2", "1.002", false}, + {"1/0/3", "1.003", true}, + {"1/0/9", "1.009", false}, + {"1/1/0", "1.010", true}, + {"5/0/1", "5.001", 12.157}, + {"5/0/3", "5.003", 121.412}, + {"5/0/4", "5.004", uint64(25)}, + {"9/0/1", "9.001", 18.56}, + {"9/0/4", "9.004", 243.84}, + {"9/0/5", "9.005", 12.01}, + {"9/0/7", "9.007", 59.32}, + {"13/0/1", "13.001", int64(-15)}, + {"13/0/2", "13.002", int64(183)}, + {"13/1/0", "13.010", int64(-141)}, + {"13/1/1", "13.011", int64(277)}, + {"13/1/2", "13.012", int64(-4096)}, + {"13/1/3", "13.013", int64(8192)}, + {"13/1/4", "13.014", int64(-65536)}, + {"13/1/5", "13.015", int64(2147483647)}, + {"14/0/0", "14.000", -1.31}, + {"14/0/1", "14.001", 0.44}, + {"14/0/2", "14.002", 32.08}, + // {"14/0/3", "14.003", 92.69}, + // {"14/0/4", "14.004", 1.00794}, + {"14/1/0", "14.010", 5963.78}, + {"14/1/1", "14.011", 150.95}, + } + acc := &testutil.Accumulator{} + + // Setup the unit-under-test + measurements := make([]Measurement, 0, len(testcases)) + for _, testcase := range testcases { + measurements = append(measurements, Measurement{"test", testcase.dpt, []string{testcase.address}}) + } + listener := KNXListener{ + ServiceType: "dummy", + Measurements: measurements, + Log: testutil.Logger{Name: "knx_listener"}, + } + + // Setup the listener to test + err := listener.Start(acc) + require.NoError(t, err) + client := listener.client.(*KNXDummyInterface) + + tstart := time.Now() + + // Send the defined test data + for _, testcase := range testcases { + addr, err := cemi.NewGroupAddrString(testcase.address) + require.NoError(t, err) + + data, ok := dpt.Produce(testcase.dpt) + require.True(t, ok) + err = setValue(data, testcase.value) + require.NoError(t, err) + + client.Send(knx.GroupEvent{ + Command: knx.GroupWrite, + Destination: addr, + Data: data.Pack(), + }) + } + + // Give the accumulator some time to collect the data + acc.Wait(len(testcases)) + + // Stop the listener + listener.Stop() + tstop := time.Now() + + // Check if we got what we expected + require.Len(t, acc.Metrics, len(testcases)) + for i, m := range acc.Metrics { + assert.Equal(t, "test", m.Measurement) + assert.Equal(t, testcases[i].address, m.Tags["groupaddress"]) + assert.Len(t, m.Fields, 1) + switch v := testcases[i].value.(type) { + case bool, int64, uint64: + assert.Equal(t, v, m.Fields["value"]) + case float64: + assert.InDelta(t, v, m.Fields["value"], epsilon) + } + assert.True(t, !tstop.Before(m.Time)) + assert.True(t, !tstart.After(m.Time)) + } +} From cc6c51cf16f2ea90f124aa9797421f3bf66e3c3e Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Thu, 18 Mar 2021 11:33:14 -0400 Subject: [PATCH 313/761] Adds snappy support for http_listener_v2 (#8966) --- .../http_listener_v2/http_listener_v2.go | 53 +++++++++++++------ .../http_listener_v2/http_listener_v2_test.go | 39 ++++++++++++++ 2 files changed, 77 insertions(+), 15 deletions(-) diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index b88e1eb8c0280..cd41f303e7feb 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -12,6 +12,7 @@ import ( "sync" "time" + "github.com/golang/snappy" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" @@ -247,28 +248,50 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) } func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) ([]byte, bool) { - body := req.Body + encoding := req.Header.Get("Content-Encoding") - // Handle gzip request bodies - if req.Header.Get("Content-Encoding") == "gzip" { - var err error - body, err = gzip.NewReader(req.Body) + switch encoding { + case "gzip": + r, err := gzip.NewReader(req.Body) if err != nil { h.Log.Debug(err.Error()) badRequest(res) return nil, false } - defer body.Close() - } - - body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) - bytes, err := ioutil.ReadAll(body) - if err != nil { - tooLarge(res) - return nil, false + defer r.Close() + maxReader := http.MaxBytesReader(res, r, h.MaxBodySize.Size) + bytes, err := ioutil.ReadAll(maxReader) + if err != nil { + tooLarge(res) + return nil, false + } + return bytes, true + case "snappy": + defer req.Body.Close() + bytes, err := ioutil.ReadAll(req.Body) + if err != nil { + h.Log.Debug(err.Error()) + badRequest(res) + return nil, false + } + // snappy block format is only supported by decode/encode not snappy reader/writer + bytes, err = snappy.Decode(nil, bytes) + if err != nil { + h.Log.Debug(err.Error()) + badRequest(res) + return nil, false + } + return bytes, true + default: + defer req.Body.Close() + bytes, err := ioutil.ReadAll(req.Body) + if err != nil { + h.Log.Debug(err.Error()) + badRequest(res) + return nil, false + } + return bytes, true } - - return bytes, true } func (h *HTTPListenerV2) collectQuery(res http.ResponseWriter, req *http.Request) ([]byte, bool) { diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index 1f3b629d09e4c..05eb437429248 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/golang/snappy" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -327,6 +328,44 @@ func TestWriteHTTPGzippedData(t *testing.T) { } } +// test that writing snappy data works +func TestWriteHTTPSnappyData(t *testing.T) { + listener := newTestHTTPListenerV2() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + testData := "cpu_load_short,host=server01 value=12.0 1422568543702900257\n" + encodedData := snappy.Encode(nil, []byte(testData)) + + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(encodedData)) + require.NoError(t, err) + req.Header.Set("Content-Encoding", "snappy") + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + t.Log("Test client request failed. Error: ", err) + } + err = resp.Body.Close() + if err != nil { + t.Log("Test client close failed. Error: ", err) + } + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + + hostTags := []string{"server01"} + acc.Wait(1) + + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } +} + // writes 25,000 metrics to the listener with 10 different writers func TestWriteHTTPHighTraffic(t *testing.T) { if runtime.GOOS == "darwin" { From 67f588cbce90b04bb98518033a61fbc6238db6b6 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Thu, 18 Mar 2021 11:33:58 -0400 Subject: [PATCH 314/761] New prometheus remote write parser (#8967) --- docs/DATA_FORMATS_INPUT.md | 1 + .../parsers/prometheusremotewrite/README.md | 44 +++++ .../parsers/prometheusremotewrite/parser.go | 88 ++++++++++ .../prometheusremotewrite/parser_test.go | 157 ++++++++++++++++++ plugins/parsers/registry.go | 9 + .../prometheusremotewrite_test.go | 7 +- 6 files changed, 303 insertions(+), 3 deletions(-) create mode 100644 plugins/parsers/prometheusremotewrite/README.md create mode 100644 plugins/parsers/prometheusremotewrite/parser.go create mode 100644 plugins/parsers/prometheusremotewrite/parser_test.go diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 3e7dd107becf5..2550e7e1044cc 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -15,6 +15,7 @@ Protocol or in JSON format. - [Logfmt](/plugins/parsers/logfmt) - [Nagios](/plugins/parsers/nagios) - [Prometheus](/plugins/parsers/prometheus) +- [PrometheusRemoteWrite](/plugins/parsers/prometheusremotewrite) - [Value](/plugins/parsers/value), ie: 45 or "booyah" - [Wavefront](/plugins/parsers/wavefront) - [XML](/plugins/parsers/xml) diff --git a/plugins/parsers/prometheusremotewrite/README.md b/plugins/parsers/prometheusremotewrite/README.md new file mode 100644 index 0000000000000..1bad5bd6004ea --- /dev/null +++ b/plugins/parsers/prometheusremotewrite/README.md @@ -0,0 +1,44 @@ +# Prometheus remote write + +Converts prometheus remote write samples directly into Telegraf metrics. It can be used with [http_listener_v2](/plugins/inputs/http_listener_v2). There are no additional configuration options for Prometheus Remote Write Samples. + +### Configuration + +```toml +[[inputs.http_listener_v2]] + ## Address and port to host HTTP listener on + service_address = ":1234" + + ## Path to listen to. + path = "/recieve" + + ## Data format to consume. + data_format = "prometheusremotewrite" +``` + +### Example + +**Example Input** +``` +prompb.WriteRequest{ + Timeseries: []*prompb.TimeSeries{ + { + Labels: []*prompb.Label{ + {Name: "__name__", Value: "go_gc_duration_seconds"}, + {Name: "instance", Value: "localhost:9090"}, + {Name: "job", Value: "prometheus"}, + {Name: "quantile", Value: "0.99"}, + }, + Samples: []prompb.Sample{ + {Value: 4.63, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } + +``` + +**Example Output** +``` +prometheus_remote_write,instance=localhost:9090,job=prometheus,quantile=0.99 go_gc_duration_seconds=4.63 1614889298859000000 +``` diff --git a/plugins/parsers/prometheusremotewrite/parser.go b/plugins/parsers/prometheusremotewrite/parser.go new file mode 100644 index 0000000000000..90921dfb14e7a --- /dev/null +++ b/plugins/parsers/prometheusremotewrite/parser.go @@ -0,0 +1,88 @@ +package prometheusremotewrite + +import ( + "fmt" + "math" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + + "github.com/gogo/protobuf/proto" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/prompb" +) + +type Parser struct { + DefaultTags map[string]string +} + +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + var err error + var metrics []telegraf.Metric + var req prompb.WriteRequest + + if err := proto.Unmarshal(buf, &req); err != nil { + return nil, fmt.Errorf("unable to unmarshal request body: %s", err) + } + + now := time.Now() + + for _, ts := range req.Timeseries { + tags := map[string]string{} + for key, value := range p.DefaultTags { + tags[key] = value + } + + for _, l := range ts.Labels { + tags[l.Name] = l.Value + } + + metricName := tags[model.MetricNameLabel] + if metricName == "" { + return nil, fmt.Errorf("metric name %q not found in tag-set or empty", model.MetricNameLabel) + } + delete(tags, model.MetricNameLabel) + + for _, s := range ts.Samples { + fields := make(map[string]interface{}) + if !math.IsNaN(s.Value) { + fields[metricName] = s.Value + } + // converting to telegraf metric + if len(fields) > 0 { + t := now + if s.Timestamp > 0 { + t = time.Unix(0, s.Timestamp*1000000) + } + m, err := metric.New("prometheus_remote_write", tags, fields, t) + if err != nil { + return nil, fmt.Errorf("unable to convert to telegraf metric: %s", err) + } + metrics = append(metrics, m) + } + } + } + return metrics, err +} + +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { + metrics, err := p.Parse([]byte(line)) + if err != nil { + return nil, err + } + + if len(metrics) < 1 { + return nil, fmt.Errorf("No metrics in line") + } + + if len(metrics) > 1 { + return nil, fmt.Errorf("More than one metric in line") + } + + return metrics[0], nil +} + +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} diff --git a/plugins/parsers/prometheusremotewrite/parser_test.go b/plugins/parsers/prometheusremotewrite/parser_test.go new file mode 100644 index 0000000000000..d32b90673fdb3 --- /dev/null +++ b/plugins/parsers/prometheusremotewrite/parser_test.go @@ -0,0 +1,157 @@ +package prometheusremotewrite + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/prometheus/prometheus/prompb" + "github.com/stretchr/testify/assert" +) + +func TestParse(t *testing.T) { + prompbInput := prompb.WriteRequest{ + Timeseries: []*prompb.TimeSeries{ + { + Labels: []*prompb.Label{ + {Name: "__name__", Value: "go_gc_duration_seconds"}, + {Name: "quantile", Value: "0.99"}, + }, + Samples: []prompb.Sample{ + {Value: 4.63, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + { + Labels: []*prompb.Label{ + {Name: "__name__", Value: "prometheus_target_interval_length_seconds"}, + {Name: "job", Value: "prometheus"}, + }, + Samples: []prompb.Sample{ + {Value: 14.99, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } + + inoutBytes, err := prompbInput.Marshal() + assert.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus_remote_write", + map[string]string{ + "quantile": "0.99", + }, + map[string]interface{}{ + "go_gc_duration_seconds": float64(4.63), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "prometheus_remote_write", + map[string]string{ + "job": "prometheus", + }, + map[string]interface{}{ + "prometheus_target_interval_length_seconds": float64(14.99), + }, + time.Unix(0, 0), + ), + } + + parser := Parser{ + DefaultTags: map[string]string{}, + } + + metrics, err := parser.Parse(inoutBytes) + assert.NoError(t, err) + assert.Len(t, metrics, 2) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestDefaultTags(t *testing.T) { + prompbInput := prompb.WriteRequest{ + Timeseries: []*prompb.TimeSeries{ + { + Labels: []*prompb.Label{ + {Name: "__name__", Value: "foo"}, + {Name: "__eg__", Value: "bar"}, + }, + Samples: []prompb.Sample{ + {Value: 1, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } + + inoutBytes, err := prompbInput.Marshal() + assert.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus_remote_write", + map[string]string{ + "defaultTag": "defaultTagValue", + "__eg__": "bar", + }, + map[string]interface{}{ + "foo": float64(1), + }, + time.Unix(0, 0), + ), + } + + parser := Parser{ + DefaultTags: map[string]string{ + "defaultTag": "defaultTagValue", + }, + } + + metrics, err := parser.Parse(inoutBytes) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestMetricsWithTimestamp(t *testing.T) { + testTime := time.Date(2020, time.October, 4, 17, 0, 0, 0, time.UTC) + testTimeUnix := testTime.UnixNano() / int64(time.Millisecond) + prompbInput := prompb.WriteRequest{ + Timeseries: []*prompb.TimeSeries{ + { + Labels: []*prompb.Label{ + {Name: "__name__", Value: "foo"}, + {Name: "__eg__", Value: "bar"}, + }, + Samples: []prompb.Sample{ + {Value: 1, Timestamp: testTimeUnix}, + }, + }, + }, + } + + inoutBytes, err := prompbInput.Marshal() + assert.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus_remote_write", + map[string]string{ + "__eg__": "bar", + }, + map[string]interface{}{ + "foo": float64(1), + }, + testTime, + ), + } + parser := Parser{ + DefaultTags: map[string]string{}, + } + + metrics, err := parser.Parse(inoutBytes) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics()) +} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index d01b0ee676565..b2e66636cb1b8 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -15,6 +15,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/logfmt" "github.com/influxdata/telegraf/plugins/parsers/nagios" "github.com/influxdata/telegraf/plugins/parsers/prometheus" + "github.com/influxdata/telegraf/plugins/parsers/prometheusremotewrite" "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/plugins/parsers/wavefront" "github.com/influxdata/telegraf/plugins/parsers/xml" @@ -248,6 +249,8 @@ func NewParser(config *Config) (Parser, error) { ) case "prometheus": parser, err = NewPrometheusParser(config.DefaultTags) + case "prometheusremotewrite": + parser, err = NewPrometheusRemoteWriteParser(config.DefaultTags) case "xml": parser, err = NewXMLParser(config.MetricName, config.DefaultTags, config.XMLConfig) default: @@ -361,6 +364,12 @@ func NewPrometheusParser(defaultTags map[string]string) (Parser, error) { }, nil } +func NewPrometheusRemoteWriteParser(defaultTags map[string]string) (Parser, error) { + return &prometheusremotewrite.Parser{ + DefaultTags: defaultTags, + }, nil +} + func NewXMLParser(metricName string, defaultTags map[string]string, xmlConfigs []XMLConfig) (Parser, error) { // Convert the config formats which is a one-to-one copy configs := make([]xml.Config, len(xmlConfigs)) diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go index 8aecd8ebca9bf..32aba632082b6 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go @@ -3,13 +3,14 @@ package prometheusremotewrite import ( "bytes" "fmt" + "strings" + "testing" + "time" + "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/prompb" - "strings" - "testing" - "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" From 67e8d766c5c836a444999a669ae400092912430f Mon Sep 17 00:00:00 2001 From: "Peter (Stig) Edwards" Date: Thu, 18 Mar 2021 15:39:29 +0000 Subject: [PATCH 315/761] Add a starlark example showing how to obtain IOPS (#8996) --- plugins/processors/starlark/README.md | 1 + .../processors/starlark/testdata/iops.star | 54 +++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 plugins/processors/starlark/testdata/iops.star diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index 2922fc42ecb5a..7e1015674df7c 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -220,6 +220,7 @@ def apply(metric): - [drop string fields](/plugins/processors/starlark/testdata/drop_string_fields.star) - Drop fields containing string values. - [drop fields with unexpected type](/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star) - Drop fields containing unexpected value types. +- [iops](/plugins/processors/starlark/testdata/iops.star) - obtain IOPS (to aggregate, to produce max_iops) - [json](/plugins/processors/starlark/testdata/json.star) - an example of processing JSON from a field in a metric - [number logic](/plugins/processors/starlark/testdata/number_logic.star) - transform a numerical value to another numerical value - [pivot](/plugins/processors/starlark/testdata/pivot.star) - Pivots a key's value to be the key for another key. diff --git a/plugins/processors/starlark/testdata/iops.star b/plugins/processors/starlark/testdata/iops.star new file mode 100644 index 0000000000000..e92b79e0ab782 --- /dev/null +++ b/plugins/processors/starlark/testdata/iops.star @@ -0,0 +1,54 @@ +# Example showing how to obtain IOPS (to aggregate, to produce max_iops). Input can be produced by: +# +#[[inputs.diskio]] +# alias = "diskio1s" +# interval = "1s" +# fieldpass = ["reads", "writes"] +# name_suffix = "1s" +# +# Example Input: +# diskio1s,host=hostname,name=diska reads=0i,writes=0i 1554079521000000000 +# diskio1s,host=hostname,name=diska reads=0i,writes=0i 1554079522000000000 +# diskio1s,host=hostname,name=diska reads=110i,writes=0i 1554079523000000000 +# diskio1s,host=hostname,name=diska reads=110i,writes=30i 1554079524000000000 +# diskio1s,host=hostname,name=diska reads=160i,writes=70i 1554079525000000000 +# +# Example Output: +# diskiops,host=hostname,name=diska readsps=0,writesps=0,iops=0 1554079522000000000 +# diskiops,host=hostname,name=diska readsps=110,writesps=0,iops=110 1554079523000000000 +# diskiops,host=hostname,name=diska readsps=0,writesps=30,iops=30 1554079524000000000 +# diskiops,host=hostname,name=diska readsps=50,writesps=40,iops=90 1554079525000000000 + +state = { } + +def apply(metric): + disk_name = metric.tags["name"] + # Load from the shared last_state the metric for the disk name + last = state.get(disk_name) + # Store the deepcopy of the new metric into the shared last_state and assign it to the key "last" + # NB: To store a metric into the shared last_state you have to deep copy it + state[disk_name] = deepcopy(metric) + if last != None: + # Create the new metrics + diskiops = Metric("diskiops") + # Calculate reads/writes per second + reads = metric.fields["reads"] - last.fields["reads"] + writes = metric.fields["writes"] - last.fields["writes"] + io = reads + writes + interval_seconds = ( metric.time - last.time ) / 1000000000 + diskiops.fields["readsps"] = ( reads / interval_seconds ) + diskiops.fields["writesps"] = ( writes / interval_seconds ) + diskiops.fields["iops"] = ( io / interval_seconds ) + diskiops.tags["name"] = disk_name + diskiops.tags["host"] = metric.tags["host"] + return [diskiops] + +# This could be aggregated to obtain max IOPS using: +# +# [[aggregators.basicstats]] +# namepass = ["diskiops"] +# period = "60s" +# drop_original = true +# stats = ["max"] +# +# diskiops,host=hostname,name=diska readsps_max=110,writesps_max=40,iops_max=110 1554079525000000000 From d5f79093f41b9da1793259aa3ba9d61aebf4f7ec Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 18 Mar 2021 12:30:24 -0700 Subject: [PATCH 316/761] update bigquery readme (#9017) --- plugins/outputs/bigquery/README.md | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/plugins/outputs/bigquery/README.md b/plugins/outputs/bigquery/README.md index 96e659956c815..9515711d50a75 100644 --- a/plugins/outputs/bigquery/README.md +++ b/plugins/outputs/bigquery/README.md @@ -1,20 +1,9 @@ -# BigQuery Google Cloud Output Plugin +# Google BigQuery Output Plugin -This plugin writes to the [Google Cloud BigQuery][bigquery] and requires [authentication][] -with Google Cloud using either a service account or user credentials +This plugin writes to the [Google Cloud BigQuery](https://cloud.google.com/bigquery) and requires [authentication](https://cloud.google.com/bigquery/docs/authentication) +with Google Cloud using either a service account or user credentials. -This plugin accesses APIs which are [chargeable][pricing]; you might incur -costs. - -Requires `project` to specify where BigQuery entries will be persisted. - -Requires `dataset` to specify under which BigQuery dataset the corresponding metrics tables reside. - -Each metric should have a corresponding table to BigQuery. -The schema of the table on BigQuery: -* Should contain the field `timestamp` which is the timestamp of a telegraph metrics -* Should contain the metric's tags with the same name and the column type should be set to string. -* Should contain the metric's fields with the same name and the column type should match the field type. +Be aware that this plugin accesses APIs that are [chargeable](https://cloud.google.com/bigquery/pricing) and might incur costs. ### Configuration @@ -32,6 +21,15 @@ The schema of the table on BigQuery: ## Character to replace hyphens on Metric name # replace_hyphen_to = "_" ``` +Requires `project` to specify where BigQuery entries will be persisted. + +Requires `dataset` to specify under which BigQuery dataset the corresponding metrics tables reside. + +Each metric should have a corresponding table to BigQuery. +The schema of the table on BigQuery: +* Should contain the field `timestamp` which is the timestamp of a telegraph metrics +* Should contain the metric's tags with the same name and the column type should be set to string. +* Should contain the metric's fields with the same name and the column type should match the field type. ### Restrictions From 30830c2ec254fb29a2751c7abeaa8fa321faead1 Mon Sep 17 00:00:00 2001 From: Dominic Tootell Date: Thu, 18 Mar 2021 20:43:39 +0000 Subject: [PATCH 317/761] Add content_encoding option to kinesis_consumer input (#8891) --- plugins/inputs/kinesis_consumer/README.md | 9 + .../kinesis_consumer/kinesis_consumer.go | 65 ++++++- .../kinesis_consumer/kinesis_consumer_test.go | 177 ++++++++++++++++++ 3 files changed, 250 insertions(+), 1 deletion(-) create mode 100644 plugins/inputs/kinesis_consumer/kinesis_consumer_test.go diff --git a/plugins/inputs/kinesis_consumer/README.md b/plugins/inputs/kinesis_consumer/README.md index 7896557ac6cf5..ad25940d58541 100644 --- a/plugins/inputs/kinesis_consumer/README.md +++ b/plugins/inputs/kinesis_consumer/README.md @@ -54,6 +54,15 @@ and creates metrics using one of the supported [input data formats][]. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## + ## The content encoding of the data from kinesis + ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" + ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws + ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding + ## is done automatically by the golang sdk, as data is read from kinesis) + ## + # content_encoding = "identity" + ## Optional ## Configuration for a dynamodb checkpoint [inputs.kinesis_consumer.checkpoint_dynamodb] diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 6a3b1c8301a48..0a57955ce7f7b 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -1,8 +1,12 @@ package kinesis_consumer import ( + "bytes" + "compress/gzip" + "compress/zlib" "context" "fmt" + "io/ioutil" "math/big" "strings" "sync" @@ -38,6 +42,7 @@ type ( ShardIteratorType string `toml:"shard_iterator_type"` DynamoDB *DynamoDB `toml:"checkpoint_dynamodb"` MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + ContentEncoding string `toml:"content_encoding"` Log telegraf.Logger @@ -55,6 +60,8 @@ type ( recordsTex sync.Mutex wg sync.WaitGroup + processContentEncodingFunc processContent + lastSeqNum *big.Int } @@ -68,6 +75,8 @@ const ( defaultMaxUndeliveredMessages = 1000 ) +type processContent func([]byte) ([]byte, error) + // this is the largest sequence number allowed - https://docs.aws.amazon.com/kinesis/latest/APIReference/API_SequenceNumberRange.html var maxSeq = strToBint(strings.Repeat("9", 129)) @@ -118,6 +127,15 @@ var sampleConfig = ` ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## + ## The content encoding of the data from kinesis + ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" + ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws + ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding + ## is done automatically by the golang sdk, as data is read from kinesis) + ## + # content_encoding = "identity" + ## Optional ## Configuration for a dynamodb checkpoint [inputs.kinesis_consumer.checkpoint_dynamodb] @@ -239,7 +257,11 @@ func (k *KinesisConsumer) Start(ac telegraf.Accumulator) error { } func (k *KinesisConsumer) onMessage(acc telegraf.TrackingAccumulator, r *consumer.Record) error { - metrics, err := k.parser.Parse(r.Data) + data, err := k.processContentEncodingFunc(r.Data) + if err != nil { + return err + } + metrics, err := k.parser.Parse(data) if err != nil { return err } @@ -334,6 +356,46 @@ func (k *KinesisConsumer) Set(streamName, shardID, sequenceNumber string) error return nil } +func processGzip(data []byte) ([]byte, error) { + zipData, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + defer zipData.Close() + return ioutil.ReadAll(zipData) +} + +func processZlib(data []byte) ([]byte, error) { + zlibData, err := zlib.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + defer zlibData.Close() + return ioutil.ReadAll(zlibData) +} + +func processNoOp(data []byte) ([]byte, error) { + return data, nil +} + +func (k *KinesisConsumer) configureProcessContentEncodingFunc() error { + switch k.ContentEncoding { + case "gzip": + k.processContentEncodingFunc = processGzip + case "zlib": + k.processContentEncodingFunc = processZlib + case "none", "identity", "": + k.processContentEncodingFunc = processNoOp + default: + return fmt.Errorf("unknown content encoding %q", k.ContentEncoding) + } + return nil +} + +func (k *KinesisConsumer) Init() error { + return k.configureProcessContentEncodingFunc() +} + type noopCheckpoint struct{} func (n noopCheckpoint) Set(string, string, string) error { return nil } @@ -347,6 +409,7 @@ func init() { ShardIteratorType: "TRIM_HORIZON", MaxUndeliveredMessages: defaultMaxUndeliveredMessages, lastSeqNum: maxSeq, + ContentEncoding: "identity", } }) } diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go new file mode 100644 index 0000000000000..b8becece054fc --- /dev/null +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go @@ -0,0 +1,177 @@ +package kinesis_consumer + +import ( + "encoding/base64" + "github.com/aws/aws-sdk-go/aws" + consumer "github.com/harlow/kinesis-consumer" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestKinesisConsumer_onMessage(t *testing.T) { + zlibBytpes, _ := base64.StdEncoding.DecodeString("eF5FjlFrgzAUhf9KuM+2aNB2zdsQ2xe3whQGW8qIeqdhaiSJK0P874u1Y4+Hc/jON0GHxoga858BgUF8fs5fzunHU5Jlj6cEPFDXHvXStGqsrsKWTapq44pW1SetxsF1a8qsRtGt0YyFKbUcrFT9UbYWtQH2frntkm/s7RInkNU6t9JpWNE5WBAFPo3CcHeg+9D703OziUOhCg6MQ/yakrspuZsyEjdYfsm+Jg2K1jZEfZLKQWUvFglylBobZXDLwSP8//EGpD4NNj7dUJpT6hQY3W33h/AhCt84zDBf5l/MDl08") + gzippedBytes, _ := base64.StdEncoding.DecodeString("H4sIAAFXNGAAA0WOUWuDMBSF/0q4z7Zo0HbN2xDbF7fCFAZbyoh6p2FqJIkrQ/zvi7Vjj4dz+M43QYfGiBrznwGBQXx+zl/O6cdTkmWPpwQ8UNce9dK0aqyuwpZNqmrjilbVJ63GwXVryqxG0a3RjIUptRysVP1Rtha1AfZ+ue2Sb+ztEieQ1Tq30mlY0TlYEAU+jcJwd6D70PvTc7OJQ6EKDoxD/JqSuym5mzISN1h+yb4mDYrWNkR9kspBZS8WCXKUGhtlcMvBI/z/8QakPg02Pt1QmlPqFBjdbfeH8CEK3zjMMF/mX0TaxZUpAQAA") + notZippedBytes := []byte(`{"messageType":"CONTROL_MESSAGE","owner":"CloudwatchLogs","logGroup":"","logStream":"", +"subscriptionFilters":[],"logEvents":[ + {"id":"","timestamp":1510254469274,"message":"{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"},"}, + {"id":"","timestamp":1510254469274,"message":"{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"}"} +]}`) + parser, _ := json.New(&json.Config{ + MetricName: "json_test", + Query: "logEvents", + StringFields: []string{"message"}, + }) + + type fields struct { + ContentEncoding string + parser parsers.Parser + records map[telegraf.TrackingID]string + } + type args struct { + r *consumer.Record + } + type expected struct { + numberOfMetrics int + messageContains string + } + tests := []struct { + name string + fields fields + args args + wantErr bool + expected expected + }{ + { + name: "test no compression", + fields: fields{ + ContentEncoding: "none", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{Data: notZippedBytes, SequenceNumber: aws.String("anything")}, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 2, + }, + }, + { + name: "test no compression via empty string for ContentEncoding", + fields: fields{ + ContentEncoding: "", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{Data: notZippedBytes, SequenceNumber: aws.String("anything")}, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 2, + }, + }, + { + name: "test no compression via identity ContentEncoding", + fields: fields{ + ContentEncoding: "identity", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{Data: notZippedBytes, SequenceNumber: aws.String("anything")}, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 2, + }, + }, + { + name: "test no compression via no ContentEncoding", + fields: fields{ + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{Data: notZippedBytes, SequenceNumber: aws.String("anything")}, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 2, + }, + }, + { + name: "test gzip compression", + fields: fields{ + ContentEncoding: "gzip", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{Data: gzippedBytes, SequenceNumber: aws.String("anything")}, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 1, + }, + }, + { + name: "test zlib compression", + fields: fields{ + ContentEncoding: "zlib", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{Data: zlibBytpes, SequenceNumber: aws.String("anything")}, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 1, + }, + }, + } + + k := &KinesisConsumer{ + ContentEncoding: "notsupported", + } + err := k.Init() + assert.NotNil(t, err) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + k := &KinesisConsumer{ + ContentEncoding: tt.fields.ContentEncoding, + parser: tt.fields.parser, + records: tt.fields.records, + } + err := k.Init() + assert.Nil(t, err) + + acc := testutil.Accumulator{} + if err := k.onMessage(acc.WithTracking(tt.expected.numberOfMetrics), tt.args.r); (err != nil) != tt.wantErr { + t.Errorf("onMessage() error = %v, wantErr %v", err, tt.wantErr) + } + + assert.Equal(t, tt.expected.numberOfMetrics, len(acc.Metrics)) + + for _, metric := range acc.Metrics { + if logEventMessage, ok := metric.Fields["message"]; ok { + assert.Contains(t, logEventMessage.(string), tt.expected.messageContains) + } else { + t.Errorf("Expect logEvents to be present") + } + } + }) + } +} From 4dcc3c0ad73eccba2c48e68c7eccf5d7d5a784e4 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 18 Mar 2021 17:21:30 -0400 Subject: [PATCH 318/761] exec plugins should not truncate messages in debug mode (#8333) --- cmd/telegraf/telegraf.go | 4 ++- plugin.go | 4 ++- plugins/inputs/exec/exec.go | 41 ++++++++++++++----------------- plugins/inputs/exec/exec_test.go | 7 +++--- plugins/outputs/exec/README.md | 2 ++ plugins/outputs/exec/exec.go | 33 +++++++++++++++++++++++-- plugins/outputs/exec/exec_test.go | 3 ++- 7 files changed, 63 insertions(+), 31 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 7e0b4ec1ca67a..c6b2cc0eef41b 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -15,6 +15,7 @@ import ( "syscall" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" @@ -158,8 +159,9 @@ func runAgent(ctx context.Context, } // Setup logging as configured. + telegraf.Debug = ag.Config.Agent.Debug || *fDebug logConfig := logger.LogConfig{ - Debug: ag.Config.Agent.Debug || *fDebug, + Debug: telegraf.Debug, Quiet: ag.Config.Agent.Quiet || *fQuiet, LogTarget: ag.Config.Agent.LogTarget, Logfile: ag.Config.Agent.Logfile, diff --git a/plugin.go b/plugin.go index 0793fbb061115..f9dcaeac0344c 100644 --- a/plugin.go +++ b/plugin.go @@ -1,5 +1,7 @@ package telegraf +var Debug bool + // Initializer is an interface that all plugin types: Inputs, Outputs, // Processors, and Aggregators can optionally implement to initialize the // plugin. @@ -21,7 +23,7 @@ type PluginDescriber interface { Description() string } -// Logger defines an interface for logging. +// Logger defines an plugin-related interface for logging. type Logger interface { // Errorf logs an error message, patterned after log.Printf. Errorf(format string, args ...interface{}) diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 26e2ab0ba0301..fc498c799c966 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -3,6 +3,7 @@ package exec import ( "bytes" "fmt" + "io" "os/exec" "path/filepath" "runtime" @@ -39,12 +40,12 @@ const sampleConfig = ` data_format = "influx" ` -const MaxStderrBytes = 512 +const MaxStderrBytes int = 512 type Exec struct { - Commands []string - Command string - Timeout internal.Duration + Commands []string `toml:"commands"` + Command string `toml:"command"` + Timeout internal.Duration `toml:"timeout"` parser parsers.Parser @@ -85,16 +86,16 @@ func (c CommandRunner) Run( runErr := internal.RunTimeout(cmd, timeout) - out = removeCarriageReturns(out) - if stderr.Len() > 0 { - stderr = removeCarriageReturns(stderr) - stderr = truncate(stderr) + out = removeWindowsCarriageReturns(out) + if stderr.Len() > 0 && !telegraf.Debug { + stderr = removeWindowsCarriageReturns(stderr) + stderr = c.truncate(stderr) } return out.Bytes(), stderr.Bytes(), runErr } -func truncate(buf bytes.Buffer) bytes.Buffer { +func (c CommandRunner) truncate(buf bytes.Buffer) bytes.Buffer { // Limit the number of bytes. didTruncate := false if buf.Len() > MaxStderrBytes { @@ -114,27 +115,21 @@ func truncate(buf bytes.Buffer) bytes.Buffer { return buf } -// removeCarriageReturns removes all carriage returns from the input if the +// removeWindowsCarriageReturns removes all carriage returns from the input if the // OS is Windows. It does not return any errors. -func removeCarriageReturns(b bytes.Buffer) bytes.Buffer { +func removeWindowsCarriageReturns(b bytes.Buffer) bytes.Buffer { if runtime.GOOS == "windows" { var buf bytes.Buffer for { - byt, er := b.ReadBytes(0x0D) - end := len(byt) - if nil == er { - end-- + byt, err := b.ReadBytes(0x0D) + byt = bytes.TrimRight(byt, "\x0d") + if len(byt) > 0 { + _, _ = buf.Write(byt) } - if nil != byt { - buf.Write(byt[:end]) - } else { - break - } - if nil != er { - break + if err == io.EOF { + return buf } } - b = buf } return b } diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index ba1bc2078c9f7..8d77f0cef4757 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -259,9 +259,10 @@ func TestTruncate(t *testing.T) { }, } + c := CommandRunner{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - res := truncate(*tt.bufF()) + res := c.truncate(*tt.bufF()) require.Equal(t, tt.expF().Bytes(), res.Bytes()) }) } @@ -272,14 +273,14 @@ func TestRemoveCarriageReturns(t *testing.T) { // Test that all carriage returns are removed for _, test := range crTests { b := bytes.NewBuffer(test.input) - out := removeCarriageReturns(*b) + out := removeWindowsCarriageReturns(*b) assert.True(t, bytes.Equal(test.output, out.Bytes())) } } else { // Test that the buffer is returned unaltered for _, test := range crTests { b := bytes.NewBuffer(test.input) - out := removeCarriageReturns(*b) + out := removeWindowsCarriageReturns(*b) assert.True(t, bytes.Equal(test.input, out.Bytes())) } } diff --git a/plugins/outputs/exec/README.md b/plugins/outputs/exec/README.md index d82676a251e4e..7e19b9a8475c6 100644 --- a/plugins/outputs/exec/README.md +++ b/plugins/outputs/exec/README.md @@ -8,6 +8,8 @@ The command should be defined similar to docker's `exec` form: On non-zero exit stderr will be logged at error level. +For better performance, consider execd, which runs continuously. + ### Configuration ```toml diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go index 813b6bb9f54a7..25637bd1984c0 100644 --- a/plugins/outputs/exec/exec.go +++ b/plugins/outputs/exec/exec.go @@ -6,6 +6,7 @@ import ( "io" "log" "os/exec" + "runtime" "time" "github.com/influxdata/telegraf" @@ -39,6 +40,10 @@ var sampleConfig = ` # data_format = "influx" ` +func (e *Exec) Init() error { + return nil +} + // SetSerializer sets the serializer for the output. func (e *Exec) SetSerializer(serializer serializers.Serializer) { e.serializer = serializer @@ -105,8 +110,13 @@ func (c *CommandRunner) Run(timeout time.Duration, command []string, buffer io.R return fmt.Errorf("%q timed out and was killed", command) } + s = removeWindowsCarriageReturns(s) if s.Len() > 0 { - log.Printf("E! [outputs.exec] Command error: %q", truncate(s)) + if !telegraf.Debug { + log.Printf("E! [outputs.exec] Command error: %q", c.truncate(s)) + } else { + log.Printf("D! [outputs.exec] Command error: %q", s) + } } if status, ok := internal.ExitStatus(err); ok { @@ -121,7 +131,7 @@ func (c *CommandRunner) Run(timeout time.Duration, command []string, buffer io.R return nil } -func truncate(buf bytes.Buffer) string { +func (c *CommandRunner) truncate(buf bytes.Buffer) string { // Limit the number of bytes. didTruncate := false if buf.Len() > maxStderrBytes { @@ -149,3 +159,22 @@ func init() { } }) } + +// removeWindowsCarriageReturns removes all carriage returns from the input if the +// OS is Windows. It does not return any errors. +func removeWindowsCarriageReturns(b bytes.Buffer) bytes.Buffer { + if runtime.GOOS == "windows" { + var buf bytes.Buffer + for { + byt, err := b.ReadBytes(0x0D) + byt = bytes.TrimRight(byt, "\x0d") + if len(byt) > 0 { + _, _ = buf.Write(byt) + } + if err == io.EOF { + return buf + } + } + } + return b +} diff --git a/plugins/outputs/exec/exec_test.go b/plugins/outputs/exec/exec_test.go index 5758c307b56e7..f57bf50a1b571 100644 --- a/plugins/outputs/exec/exec_test.go +++ b/plugins/outputs/exec/exec_test.go @@ -83,9 +83,10 @@ func TestTruncate(t *testing.T) { len: len("hola") + len("..."), }, } + c := CommandRunner{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := truncate(*tt.buf) + s := c.truncate(*tt.buf) require.Equal(t, tt.len, len(s)) }) } From 56c92d99e0b477734ee66ec1cc6555284c504a2b Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Thu, 18 Mar 2021 17:31:31 -0400 Subject: [PATCH 319/761] Use endpoint to share artifacts (Tiger bot) (#9012) * Diagnose issue * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml --- .circleci/config.yml | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8ce079a379ed7..f14fa48e0a074 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -183,17 +183,12 @@ jobs: share-artifacts: executor: aws-cli/default steps: - - aws-cli/setup: - profile-name: TIGER - aws-access-key-id: TIGER_AWS_ACCESS_KEY_ID - aws-secret-access-key: TIGER_AWS_SECRET_ACCESS_KEY - aws-region: TIGER_AWS_DEFAULT_REGION - run: command: | PR=${CIRCLE_PULL_REQUEST##*/} printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" - aws lambda invoke --function-name telegraf-tiger-prod-share_artifacts --profile TIGER --cli-binary-format raw-in-base64-out --payload "$payload" output.json - + curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" + workflows: version: 2 check: From 30c933cd682921f035e53794791137e8de27095e Mon Sep 17 00:00:00 2001 From: Kodai Sakabe Date: Fri, 19 Mar 2021 06:42:06 +0900 Subject: [PATCH 320/761] Update README.md (#9016) --- plugins/outputs/azure_monitor/README.md | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/plugins/outputs/azure_monitor/README.md b/plugins/outputs/azure_monitor/README.md index fbb49358665a5..6f2abb97ec3ed 100644 --- a/plugins/outputs/azure_monitor/README.md +++ b/plugins/outputs/azure_monitor/README.md @@ -54,15 +54,7 @@ written as a dimension on each Azure Monitor metric. [enable system-assigned managed identity][enable msi]. 2. Use a region that supports Azure Monitor Custom Metrics, For regions with Custom Metrics support, an endpoint will be available with - the format `https://.monitoring.azure.com`. The following regions - are currently known to be supported: - - East US (eastus) - - West US 2 (westus2) - - South Central US (southcentralus) - - West Central US (westcentralus) - - North Europe (northeurope) - - West Europe (westeurope) - - Southeast Asia (southeastasia) + the format `https://.monitoring.azure.com`. [resource provider]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services [enable msi]: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/qs-configure-portal-windows-vm From aa88896829729785ed6a9fade0ab9982cc6d1086 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Fri, 19 Mar 2021 10:56:39 -0700 Subject: [PATCH 321/761] update new plugins in changelog (#8991) --- CHANGELOG.md | 51 +++++++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 523e1d7ed4795..ce018f3fbec94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,7 +30,6 @@ - [#8887](https://github.com/influxdata/telegraf/pull/8887) `inputs.procstat` Add PPID field to procstat input plugin - [#8852](https://github.com/influxdata/telegraf/pull/8852) `processors.starlark` Add Starlark script for estimating Line Protocol cardinality - - [#8828](https://github.com/influxdata/telegraf/pull/8828) `serializers.msgpack` Add MessagePack output data format - [#8915](https://github.com/influxdata/telegraf/pull/8915) `inputs.cloudwatch` add proxy - [#8910](https://github.com/influxdata/telegraf/pull/8910) `agent` Display error message on badly formatted config string array (eg. namepass) - [#8785](https://github.com/influxdata/telegraf/pull/8785) `inputs.diskio` Non systemd support with unittest @@ -48,37 +47,37 @@ - [#8950](https://github.com/influxdata/telegraf/pull/8950) `inputs.teamspeak` Teamspeak input plugin query clients - [#8849](https://github.com/influxdata/telegraf/pull/8849) `inputs.sqlserver` Filter data out from system databases for Azure SQL DB only -#### New Input Plugins - - - [#8834](https://github.com/influxdata/telegraf/pull/8834) Input plugin for RavenDB - - [#8525](https://github.com/influxdata/telegraf/pull/8525) Add CSGO SRCDS input plugin - - [#8751](https://github.com/influxdata/telegraf/pull/8751) Adding a new directory monitor input plugin. - - [#6653](https://github.com/influxdata/telegraf/pull/6653) Add Beat input plugin - - [#4615](https://github.com/influxdata/telegraf/pull/4615) Add NFS client input - - [#8931](https://github.com/influxdata/telegraf/pull/8931) Add XML parser using XPath queries - -#### New Output Plugins - - - [#8398](https://github.com/influxdata/telegraf/pull/8398) Sensu Go Output Plugin for Telegraf - - [#8450](https://github.com/influxdata/telegraf/pull/8450) plugin: output loki - - [#6714](https://github.com/influxdata/telegraf/pull/6714) SignalFx Output - - [#8634](https://github.com/influxdata/telegraf/pull/8634) Bigquery output +#### New Inputs + - [Beat Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/beat) - Contributed by @nferch + - [CS:GO Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/csgo) - Contributed by @oofdog + - [Directory Monitoring Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/directory_monitor) - Contributed by @InfluxData + - [RavenDB Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ravendb) - Contributed by @ml054 and @bartoncasey + - [NFS Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nfsclient) - Contributed by @pmoranga -#### New Aggregator Plugins +#### New Outputs + - [Grafana Loki Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @Eraac + - [Google BigQuery Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @gkatzioura + - [Sensu Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/sensu) - Contributed by @calebhailey + - [SignalFX Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/signalfx) - Contributed by @keitwb - - [#3762](https://github.com/influxdata/telegraf/pull/3762) Add Derivative Aggregator Plugin - - [#8594](https://github.com/influxdata/telegraf/pull/8594) Add quantile aggregator plugin +#### New Aggregators + - [Derivative Aggregator Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/derivative)- Contributed by @KarstenSchnitter + - [Quantile Aggregator Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/quantile) - Contributed by @srebhan -#### New Processor Plugins +#### New Processors + - [AWS EC2 Metadata Processor Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/processors/aws/ec2) - Contributed by @pmalek-sumo - - [#8707](https://github.com/influxdata/telegraf/pull/8707) AWS EC2 metadata processor Using StreamingProcessor +#### New Parsers + - [XML Parser Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/xml) - Contributed by @srebhan + +#### New Serializers + - [MessagePack Serializer Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/serializers/msgpack) - Contributed by @dialogbox #### New External Plugins - - - [#8897](https://github.com/influxdata/telegraf/pull/8897) add SMCIPMITool input to external plugin list - - [#8898](https://github.com/influxdata/telegraf/pull/8898) Add Plex Webhooks external plugin - - + - [GeoIP Processor Plugin ](https://github.com/a-bali/telegraf-geoip) - Contributed by @a-bali + - [Plex Webhook Input Plugin](https://github.com/russorat/telegraf-webhooks-plex) - Contributed by @russorat + - [SMCIPMITool Input Plugin](https://github.com/jhpope/smc_ipmi) - Contributed by @jhpope + ## v1.17.3 [2021-02-17] #### Bugfixes From 24c8fb20dc83f3139be47ef4210782c1563d6f76 Mon Sep 17 00:00:00 2001 From: reimda Date: Fri, 19 Mar 2021 12:04:20 -0600 Subject: [PATCH 322/761] Update PR template to encourage linking to issues (#9023) --- .github/PULL_REQUEST_TEMPLATE.md | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 4b2eaad4fbab5..822d809c46255 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,4 +1,25 @@ ### Required for all PRs: -- [ ] Associated README.md updated. -- [ ] Has appropriate unit tests. + + +- [ ] Updated associated README.md. +- [ ] Wrote appropriate unit tests. + + + +resolves # + + From 74a1acd8144b6929171efffdf44a1135442b1588 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Mon, 22 Mar 2021 18:21:36 +0100 Subject: [PATCH 323/761] Linter fixes - revive:unused-parameter, unparam, varcheck and unused (#8984) * Linter fixes - revive:unused-parameter and unparam * Linter fixes - revive:unused-parameter and unparam * Linter fixes - revive:unused-parameter and unparam * "nolint"'s removed * Fixes for "varcheck" and "unused" added. * Fixes for "varcheck" and "unused" added. * Fixes for "varcheck" and "unused" added. * Fixes for "varcheck" and "unused" added. * Fixes for "varcheck" and "unused" added. * Fixes for "varcheck" and "unused" added. * Fixes for "varcheck" and "unused" added. Co-authored-by: Pawel Zak --- .golangci.yml | 58 +----- agent/agent.go | 123 +++-------- agent/tick.go | 2 - cmd/telegraf/telegraf.go | 22 +- cmd/telegraf/telegraf_posix.go | 4 +- cmd/telegraf/telegraf_windows.go | 22 +- config/config.go | 16 +- internal/goplugin/noplugin.go | 2 +- internal/internal_test.go | 2 +- metric/metric.go | 4 +- metric/tracking_test.go | 15 +- models/buffer.go | 19 -- models/running_input_test.go | 6 +- models/running_output.go | 1 - models/running_output_test.go | 45 ++-- plugins/aggregators/merge/merge.go | 1 - plugins/aggregators/quantile/algorithms.go | 4 +- plugins/common/shim/config_test.go | 2 +- plugins/common/shim/goshim_test.go | 2 +- plugins/common/shim/input_test.go | 4 +- plugins/inputs/aerospike/aerospike.go | 8 - plugins/inputs/aliyuncms/aliyuncms_test.go | 2 +- plugins/inputs/apcupsd/apcupsd_test.go | 2 +- plugins/inputs/cassandra/cassandra_test.go | 17 +- plugins/inputs/ceph/ceph_test.go | 2 +- plugins/inputs/chrony/chrony_test.go | 2 +- .../cisco_telemetry_mdt.go | 60 +++--- .../cisco_telemetry_util.go | 28 +-- plugins/inputs/clickhouse/clickhouse_test.go | 2 +- plugins/inputs/cloud_pubsub/pubsub.go | 2 +- plugins/inputs/cloud_pubsub/pubsub_test.go | 2 +- .../inputs/cloud_pubsub/subscription_stub.go | 2 +- .../inputs/cloud_pubsub_push/pubsub_push.go | 14 +- .../cloud_pubsub_push/pubsub_push_test.go | 8 +- plugins/inputs/cloudwatch/cloudwatch.go | 14 +- plugins/inputs/cloudwatch/cloudwatch_test.go | 8 +- plugins/inputs/cpu/cpu_test.go | 101 ++++----- plugins/inputs/dcos/client.go | 9 +- plugins/inputs/dcos/creds.go | 8 +- plugins/inputs/dcos/dcos.go | 5 +- .../directory_monitor/directory_monitor.go | 10 +- .../directory_monitor_test.go | 7 - plugins/inputs/disque/disque.go | 7 +- plugins/inputs/docker/docker.go | 11 - plugins/inputs/docker/docker_test.go | 3 +- plugins/inputs/docker_log/docker_log.go | 14 +- plugins/inputs/dovecot/dovecot.go | 2 - plugins/inputs/elasticsearch/elasticsearch.go | 12 +- .../elasticsearch/elasticsearch_test.go | 32 +-- .../eventhub_consumer/eventhub_consumer.go | 12 +- plugins/inputs/exec/exec_test.go | 19 +- plugins/inputs/execd/execd_posix.go | 2 +- plugins/inputs/execd/shim/goshim.go | 1 - plugins/inputs/execd/shim/shim_test.go | 4 +- plugins/inputs/fail2ban/fail2ban_test.go | 2 +- plugins/inputs/fluentd/fluentd_test.go | 2 - plugins/inputs/graylog/graylog.go | 1 - plugins/inputs/hddtemp/hddtemp_test.go | 2 +- .../http_listener_v2/http_listener_v2.go | 5 - .../http_response/http_response_test.go | 6 +- .../intel_powerstat/intel_powerstat_test.go | 6 +- plugins/inputs/intel_rdt/publisher.go | 1 - plugins/inputs/ipmi_sensor/connection_test.go | 5 - plugins/inputs/ipmi_sensor/ipmi_test.go | 4 +- plugins/inputs/jenkins/jenkins.go | 13 -- plugins/inputs/jolokia/jolokia_test.go | 19 +- plugins/inputs/jolokia2/jolokia_test.go | 18 +- .../openconfig_telemetry.go | 13 +- .../openconfig_telemetry_test.go | 8 +- .../inputs/kafka_consumer/kafka_consumer.go | 7 +- .../kafka_consumer/kafka_consumer_test.go | 8 +- .../kafka_consumer_legacy.go | 2 +- .../kafka_consumer_legacy_integration_test.go | 4 +- .../kinesis_consumer/kinesis_consumer.go | 1 - plugins/inputs/kube_inventory/client_test.go | 12 -- plugins/inputs/kube_inventory/daemonset.go | 9 +- .../inputs/kube_inventory/daemonset_test.go | 10 +- plugins/inputs/kube_inventory/deployment.go | 9 +- .../inputs/kube_inventory/deployment_test.go | 10 +- plugins/inputs/kube_inventory/statefulset.go | 9 +- .../inputs/kube_inventory/statefulset_test.go | 10 +- plugins/inputs/kubernetes/kubernetes.go | 15 +- plugins/inputs/lanz/lanz.go | 2 +- plugins/inputs/logparser/logparser.go | 2 +- plugins/inputs/lustre2/lustre2.go | 1 - plugins/inputs/mesos/mesos.go | 43 ---- plugins/inputs/minecraft/client.go | 37 ++-- plugins/inputs/minecraft/client_test.go | 8 +- plugins/inputs/minecraft/minecraft.go | 13 +- plugins/inputs/monit/monit_test.go | 2 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 6 +- .../mqtt_consumer/mqtt_consumer_test.go | 6 +- plugins/inputs/nats_consumer/nats_consumer.go | 2 +- .../inputs/net_response/net_response_test.go | 10 +- plugins/inputs/nfsclient/nfsclient.go | 22 +- plugins/inputs/nfsclient/nfsclient_test.go | 8 +- .../nginx_plus_api_metrics_test.go | 28 +-- plugins/inputs/nsd/nsd.go | 4 +- plugins/inputs/nsd/nsd_test.go | 10 +- plugins/inputs/nsq_consumer/nsq_consumer.go | 4 +- plugins/inputs/nstat/nstat.go | 51 ++--- plugins/inputs/nstat/nstat_test.go | 13 +- plugins/inputs/openntpd/openntpd.go | 9 +- plugins/inputs/openntpd/openntpd_test.go | 20 +- plugins/inputs/opensmtpd/opensmtpd.go | 3 +- plugins/inputs/opensmtpd/opensmtpd_test.go | 10 +- .../inputs/openweathermap/openweathermap.go | 9 +- plugins/inputs/pgbouncer/pgbouncer.go | 6 +- plugins/inputs/phpfpm/fcgi.go | 6 +- plugins/inputs/phpfpm/fcgi_test.go | 20 +- plugins/inputs/phpfpm/phpfpm.go | 4 +- plugins/inputs/phpfpm/phpfpm_test.go | 2 +- plugins/inputs/ping/ping_test.go | 16 +- .../powerdns_recursor_test.go | 2 - plugins/inputs/procstat/process.go | 2 +- plugins/inputs/procstat/procstat.go | 15 +- plugins/inputs/procstat/procstat_test.go | 40 ++-- .../inputs/procstat/win_service_notwindows.go | 2 +- plugins/inputs/prometheus/kubernetes.go | 5 - plugins/inputs/prometheus/kubernetes_test.go | 4 - plugins/inputs/prometheus/parser_test.go | 61 ------ plugins/inputs/prometheus/prometheus.go | 3 +- plugins/inputs/proxmox/proxmox.go | 11 +- plugins/inputs/proxmox/proxmox_test.go | 2 +- plugins/inputs/redis/redis_test.go | 2 +- plugins/inputs/rethinkdb/rethinkdb.go | 4 +- .../riemann_listener/riemann_listener_test.go | 8 +- plugins/inputs/sensors/sensors_test.go | 2 +- plugins/inputs/sflow/sflow.go | 2 - plugins/inputs/sflow/types.go | 10 - plugins/inputs/snmp/snmp_mocks_test.go | 2 +- plugins/inputs/snmp_legacy/snmp_legacy.go | 3 +- plugins/inputs/snmp_trap/snmp_trap.go | 4 +- plugins/inputs/snmp_trap/snmp_trap_test.go | 194 ++++++++++-------- .../inputs/stackdriver/stackdriver_test.go | 4 +- plugins/inputs/statsd/statsd.go | 6 +- plugins/inputs/suricata/suricata.go | 2 +- plugins/inputs/suricata/suricata_testutil.go | 38 ---- plugins/inputs/syslog/nontransparent_test.go | 15 +- plugins/inputs/syslog/octetcounting_test.go | 11 +- plugins/inputs/sysstat/sysstat_test.go | 2 +- plugins/inputs/system/mock_PS.go | 4 +- plugins/inputs/system/ps.go | 8 - plugins/inputs/tail/tail.go | 5 +- plugins/inputs/tcp_listener/tcp_listener.go | 4 +- .../inputs/tcp_listener/tcp_listener_test.go | 2 +- plugins/inputs/udp_listener/udp_listener.go | 4 +- .../inputs/udp_listener/udp_listener_test.go | 2 +- plugins/inputs/unbound/unbound.go | 49 +++-- plugins/inputs/unbound/unbound_test.go | 15 +- plugins/inputs/varnish/varnish_test.go | 14 +- plugins/inputs/vsphere/client.go | 37 ++-- plugins/inputs/vsphere/endpoint.go | 12 +- plugins/inputs/vsphere/vsphere.go | 2 +- plugins/inputs/vsphere/vsphere_test.go | 6 +- .../webhooks/mandrill/mandrill_webhooks.go | 2 +- plugins/inputs/zfs/zfs.go | 6 +- plugins/inputs/zfs/zfs_linux_test.go | 61 ------ .../stress_test_write/stress_test_write.go | 2 - plugins/inputs/zipkin/codec/codec_test.go | 3 - plugins/inputs/zipkin/zipkin.go | 2 +- plugins/outputs/amqp/amqp_test.go | 6 - .../application_insights_test.go | 9 - plugins/outputs/cloud_pubsub/topic_gcp.go | 2 - plugins/outputs/cratedb/cratedb_test.go | 1 + plugins/outputs/discard/discard.go | 2 +- plugins/outputs/graylog/graylog.go | 14 +- plugins/outputs/health/health.go | 2 +- plugins/outputs/influxdb/udp.go | 2 +- plugins/outputs/influxdb/udp_test.go | 2 +- plugins/outputs/influxdb_v2/influxdb_test.go | 2 +- plugins/outputs/kafka/kafka.go | 3 - plugins/outputs/kafka/kafka_test.go | 2 +- plugins/outputs/newrelic/newrelic_test.go | 3 - .../outputs/prometheus_client/v2/collector.go | 2 +- plugins/outputs/signalfx/signalfx.go | 3 - plugins/outputs/signalfx/signalfx_test.go | 8 +- plugins/outputs/sumologic/sumologic.go | 12 +- plugins/outputs/sumologic/sumologic_test.go | 21 +- plugins/outputs/warp10/warp10.go | 4 - .../yandex_cloud_monitoring_test.go | 7 +- plugins/parsers/csv/parser.go | 15 +- plugins/parsers/dropwizard/parser.go | 5 - plugins/parsers/influx/handler.go | 1 - plugins/parsers/influx/machine_test.go | 18 +- plugins/parsers/nagios/parser_test.go | 13 -- plugins/parsers/prometheus/parser_test.go | 3 - plugins/parsers/wavefront/element.go | 17 +- plugins/parsers/wavefront/scanner.go | 5 - plugins/processors/aws/ec2/ec2.go | 2 +- plugins/processors/dedup/dedup_test.go | 34 +-- plugins/processors/execd/execd.go | 2 +- plugins/processors/ifname/ifname.go | 2 +- .../processors/reverse_dns/rdnscache_test.go | 4 +- plugins/processors/reverse_dns/reversedns.go | 2 +- plugins/processors/starlark/builtins.go | 10 +- plugins/processors/starlark/logging.go | 4 +- plugins/processors/starlark/starlark.go | 6 +- plugins/processors/topk/topk_test.go | 34 ++- .../serializers/splunkmetric/splunkmetric.go | 18 +- plugins/serializers/wavefront/wavefront.go | 6 +- selfstat/stat.go | 1 - selfstat/timingStat.go | 1 - testutil/accumulator.go | 22 +- 204 files changed, 802 insertions(+), 1600 deletions(-) delete mode 100644 plugins/inputs/suricata/suricata_testutil.go diff --git a/.golangci.yml b/.golangci.yml index 8eeb577c52ff0..9f01ba6881010 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,4 +1,5 @@ linters: + disable-all: true enable: - bodyclose - dogsled @@ -18,55 +19,6 @@ linters: - unparam - unused - varcheck - disable: - - asciicheck - - deadcode - - depguard - - dupl - - exhaustive - - funlen - - gci - - gochecknoglobals - - gochecknoinits - - gocognit - - goconst - - gocritic - - gocyclo - - godot - - godox - - goerr113 - - gofmt - - gofumpt - - goheader - - goimports - - golint - - gomnd - - gomodguard - - gosec - - ifshort - - interfacer - - lll - - makezero - - maligned - - megacheck - - misspell - - nestif - - nlreturn - - noctx - - nolintlint - - paralleltest - - prealloc - - rowserrcheck - - scopelint - - structcheck - - stylecheck - - testpackage - - thelper - - tparallel - - wastedassign - - whitespace - - wrapcheck - - wsl linters-settings: revive: @@ -131,7 +83,7 @@ linters-settings: run: # timeout for analysis, e.g. 30s, 5m, default is 1m - timeout: 5m + timeout: 10m # which dirs to skip: issues from them won't be reported; # can use regexp here: generated.*, regexp is applied on full path; @@ -169,10 +121,8 @@ issues: linters: - govet - # Show only new issues created after git revision `HEAD~` - # Great for CI setups - # It's not practical to fix all existing issues at the moment of integration: much better to not allow issues in new code. - # new-from-rev: "HEAD~" + - path: _test\.go + text: "parameter.*seems to be a control flag, avoid control coupling" output: format: tab diff --git a/agent/agent.go b/agent/agent.go index 3d40e74a1bcdf..96e8596b851b2 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -126,10 +126,7 @@ func (a *Agent) Run(ctx context.Context) error { } } - next, au, err = a.startAggregators(aggC, next, a.Config.Aggregators) - if err != nil { - return err - } + next, au = a.startAggregators(aggC, next, a.Config.Aggregators) } var pu []*processorUnit @@ -149,29 +146,20 @@ func (a *Agent) Run(ctx context.Context) error { wg.Add(1) go func() { defer wg.Done() - err := a.runOutputs(ou) - if err != nil { - log.Printf("E! [agent] Error running outputs: %v", err) - } + a.runOutputs(ou) }() if au != nil { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(apu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(apu) }() wg.Add(1) go func() { defer wg.Done() - err := a.runAggregators(startTime, au) - if err != nil { - log.Printf("E! [agent] Error running aggregators: %v", err) - } + a.runAggregators(startTime, au) }() } @@ -179,20 +167,14 @@ func (a *Agent) Run(ctx context.Context) error { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(pu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(pu) }() } wg.Add(1) go func() { defer wg.Done() - err := a.runInputs(ctx, startTime, iu) - if err != nil { - log.Printf("E! [agent] Error running inputs: %v", err) - } + a.runInputs(ctx, startTime, iu) }() wg.Wait() @@ -288,7 +270,7 @@ func (a *Agent) runInputs( ctx context.Context, startTime time.Time, unit *inputUnit, -) error { +) { var wg sync.WaitGroup for _, input := range unit.inputs { // Overwrite agent interval if this plugin has its own. @@ -334,8 +316,6 @@ func (a *Agent) runInputs( close(unit.dst) log.Printf("D! [agent] Input channel closed") - - return nil } // testStartInputs is a variation of startInputs for use in --test and --once @@ -344,7 +324,7 @@ func (a *Agent) runInputs( func (a *Agent) testStartInputs( dst chan<- telegraf.Metric, inputs []*models.RunningInput, -) (*inputUnit, error) { +) *inputUnit { log.Printf("D! [agent] Starting service inputs") unit := &inputUnit{ @@ -369,7 +349,7 @@ func (a *Agent) testStartInputs( unit.inputs = append(unit.inputs, input) } - return unit, nil + return unit } // testRunInputs is a variation of runInputs for use in --test and --once mode. @@ -378,7 +358,7 @@ func (a *Agent) testRunInputs( ctx context.Context, wait time.Duration, unit *inputUnit, -) error { +) { var wg sync.WaitGroup nul := make(chan telegraf.Metric) @@ -434,7 +414,6 @@ func (a *Agent) testRunInputs( close(unit.dst) log.Printf("D! [agent] Input channel closed") - return nil } // stopServiceInputs stops all service inputs. @@ -553,7 +532,7 @@ func (a *Agent) startProcessors( // closed and all metrics have been written. func (a *Agent) runProcessors( units []*processorUnit, -) error { +) { var wg sync.WaitGroup for _, unit := range units { wg.Add(1) @@ -573,8 +552,6 @@ func (a *Agent) runProcessors( }(unit) } wg.Wait() - - return nil } // startAggregators sets up the aggregator unit and returns the source channel. @@ -582,7 +559,7 @@ func (a *Agent) startAggregators( aggC chan<- telegraf.Metric, outputC chan<- telegraf.Metric, aggregators []*models.RunningAggregator, -) (chan<- telegraf.Metric, *aggregatorUnit, error) { +) (chan<- telegraf.Metric, *aggregatorUnit) { src := make(chan telegraf.Metric, 100) unit := &aggregatorUnit{ src: src, @@ -590,7 +567,7 @@ func (a *Agent) startAggregators( outputC: outputC, aggregators: aggregators, } - return src, unit, nil + return src, unit } // runAggregators beings aggregating metrics and runs until the source channel @@ -598,7 +575,7 @@ func (a *Agent) startAggregators( func (a *Agent) runAggregators( startTime time.Time, unit *aggregatorUnit, -) error { +) { ctx, cancel := context.WithCancel(context.Background()) // Before calling Add, initialize the aggregation window. This ensures @@ -650,8 +627,6 @@ func (a *Agent) runAggregators( // processor chain will close the outputC when it finishes processing. close(unit.aggC) log.Printf("D! [agent] Aggregator channel closed") - - return nil } func updateWindow(start time.Time, roundInterval bool, period time.Duration) (time.Time, time.Time) { @@ -744,7 +719,7 @@ func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) // written one last time and dropped if unsuccessful. func (a *Agent) runOutputs( unit *outputUnit, -) error { +) { var wg sync.WaitGroup // Start flush loop @@ -793,8 +768,6 @@ func (a *Agent) runOutputs( log.Println("I! [agent] Stopping running outputs") stopRunningOutputs(unit.outputs) - - return nil } // flushLoop runs an output's flush function periodically until the context is @@ -924,10 +897,7 @@ func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- tel } } - next, au, err = a.startAggregators(procC, next, a.Config.Aggregators) - if err != nil { - return err - } + next, au = a.startAggregators(procC, next, a.Config.Aggregators) } var pu []*processorUnit @@ -938,30 +908,20 @@ func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- tel } } - iu, err := a.testStartInputs(next, a.Config.Inputs) - if err != nil { - return err - } + iu := a.testStartInputs(next, a.Config.Inputs) var wg sync.WaitGroup - if au != nil { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(apu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(apu) }() wg.Add(1) go func() { defer wg.Done() - err := a.runAggregators(startTime, au) - if err != nil { - log.Printf("E! [agent] Error running aggregators: %v", err) - } + a.runAggregators(startTime, au) }() } @@ -969,20 +929,14 @@ func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- tel wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(pu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(pu) }() } wg.Add(1) go func() { defer wg.Done() - err := a.testRunInputs(ctx, wait, iu) - if err != nil { - log.Printf("E! [agent] Error running inputs: %v", err) - } + a.testRunInputs(ctx, wait, iu) }() wg.Wait() @@ -1042,10 +996,7 @@ func (a *Agent) once(ctx context.Context, wait time.Duration) error { } } - next, au, err = a.startAggregators(procC, next, a.Config.Aggregators) - if err != nil { - return err - } + next, au = a.startAggregators(procC, next, a.Config.Aggregators) } var pu []*processorUnit @@ -1056,38 +1007,26 @@ func (a *Agent) once(ctx context.Context, wait time.Duration) error { } } - iu, err := a.testStartInputs(next, a.Config.Inputs) - if err != nil { - return err - } + iu := a.testStartInputs(next, a.Config.Inputs) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() - err := a.runOutputs(ou) - if err != nil { - log.Printf("E! [agent] Error running outputs: %v", err) - } + a.runOutputs(ou) }() if au != nil { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(apu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(apu) }() wg.Add(1) go func() { defer wg.Done() - err := a.runAggregators(startTime, au) - if err != nil { - log.Printf("E! [agent] Error running aggregators: %v", err) - } + a.runAggregators(startTime, au) }() } @@ -1095,20 +1034,14 @@ func (a *Agent) once(ctx context.Context, wait time.Duration) error { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(pu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(pu) }() } wg.Add(1) go func() { defer wg.Done() - err := a.testRunInputs(ctx, wait, iu) - if err != nil { - log.Printf("E! [agent] Error running inputs: %v", err) - } + a.testRunInputs(ctx, wait, iu) }() wg.Wait() diff --git a/agent/tick.go b/agent/tick.go index 6afef2fa70edd..9696cd2c18c16 100644 --- a/agent/tick.go +++ b/agent/tick.go @@ -9,8 +9,6 @@ import ( "github.com/influxdata/telegraf/internal" ) -type empty struct{} - type Ticker interface { Elapsed() <-chan time.Time Stop() diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index c6b2cc0eef41b..459f81a90dc7c 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -61,11 +61,22 @@ var fProcessorFilters = flag.String("processor-filter", "", "filter the processors to enable, separator is :") var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf --usage mysql'") + +//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows var fService = flag.String("service", "", "operate on the service (windows only)") -var fServiceName = flag.String("service-name", "telegraf", "service name (windows only)") -var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service", "service display name (windows only)") -var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)") + +//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows +var fServiceName = flag.String("service-name", "telegraf", + "service name (windows only)") + +//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows +var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service", + "service display name (windows only)") + +//nolint:varcheck,unused // False positive - this var is used for non-default build tag: windows +var fRunAsConsole = flag.Bool("console", false, + "run as console application (windows only)") var fPlugins = flag.String("plugin-directory", "", "path to directory containing external plugins") var fRunOnce = flag.Bool("once", false, "run one gather and exit") @@ -81,14 +92,11 @@ var stop chan struct{} func reloadLoop( inputFilters []string, outputFilters []string, - aggregatorFilters []string, - processorFilters []string, ) { reload := make(chan bool, 1) reload <- true for <-reload { reload <- false - ctx, cancel := context.WithCancel(context.Background()) signals := make(chan os.Signal, 1) @@ -363,7 +371,5 @@ func main() { run( inputFilters, outputFilters, - aggregatorFilters, - processorFilters, ) } diff --git a/cmd/telegraf/telegraf_posix.go b/cmd/telegraf/telegraf_posix.go index ca28622f16752..a2d6b1e4e365c 100644 --- a/cmd/telegraf/telegraf_posix.go +++ b/cmd/telegraf/telegraf_posix.go @@ -2,12 +2,10 @@ package main -func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { +func run(inputFilters, outputFilters []string) { stop = make(chan struct{}) reloadLoop( inputFilters, outputFilters, - aggregatorFilters, - processorFilters, ) } diff --git a/cmd/telegraf/telegraf_windows.go b/cmd/telegraf/telegraf_windows.go index 52b9c43b99a2f..d04bfc34c7555 100644 --- a/cmd/telegraf/telegraf_windows.go +++ b/cmd/telegraf/telegraf_windows.go @@ -11,7 +11,7 @@ import ( "github.com/kardianos/service" ) -func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { +func run(inputFilters, outputFilters []string) { // Register the eventlog logging target for windows. logger.RegisterEventLogger(*fServiceName) @@ -19,25 +19,19 @@ func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []stri runAsWindowsService( inputFilters, outputFilters, - aggregatorFilters, - processorFilters, ) } else { stop = make(chan struct{}) reloadLoop( inputFilters, outputFilters, - aggregatorFilters, - processorFilters, ) } } type program struct { - inputFilters []string - outputFilters []string - aggregatorFilters []string - processorFilters []string + inputFilters []string + outputFilters []string } func (p *program) Start(s service.Service) error { @@ -49,8 +43,6 @@ func (p *program) run() { reloadLoop( p.inputFilters, p.outputFilters, - p.aggregatorFilters, - p.processorFilters, ) } func (p *program) Stop(s service.Service) error { @@ -58,7 +50,7 @@ func (p *program) Stop(s service.Service) error { return nil } -func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { +func runAsWindowsService(inputFilters, outputFilters []string) { programFiles := os.Getenv("ProgramFiles") if programFiles == "" { // Should never happen programFiles = "C:\\Program Files" @@ -72,10 +64,8 @@ func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, process } prg := &program{ - inputFilters: inputFilters, - outputFilters: outputFilters, - aggregatorFilters: aggregatorFilters, - processorFilters: processorFilters, + inputFilters: inputFilters, + outputFilters: outputFilters, } s, err := service.New(prg, svcConfig) if err != nil { diff --git a/config/config.go b/config/config.go index 55d101cbf8754..b7c11a95ff8c4 100644 --- a/config/config.go +++ b/config/config.go @@ -1006,14 +1006,14 @@ func (c *Config) addProcessor(name string, table *ast.Table) error { return err } - rf, err := c.newRunningProcessor(creator, processorConfig, name, table) + rf, err := c.newRunningProcessor(creator, processorConfig, table) if err != nil { return err } c.Processors = append(c.Processors, rf) // save a copy for the aggregator - rf, err = c.newRunningProcessor(creator, processorConfig, name, table) + rf, err = c.newRunningProcessor(creator, processorConfig, table) if err != nil { return err } @@ -1025,7 +1025,6 @@ func (c *Config) addProcessor(name string, table *ast.Table) error { func (c *Config) newRunningProcessor( creator processors.StreamingCreator, processorConfig *models.ProcessorConfig, - name string, table *ast.Table, ) (*models.RunningProcessor, error) { processor := creator() @@ -1058,7 +1057,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error { // arbitrary types of output, so build the serializer and set it. switch t := output.(type) { case serializers.SerializerOutput: - serializer, err := c.buildSerializer(name, table) + serializer, err := c.buildSerializer(table) if err != nil { return err } @@ -1074,8 +1073,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error { return err } - ro := models.NewRunningOutput(name, output, outputConfig, - c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) + ro := models.NewRunningOutput(output, outputConfig, c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) c.Outputs = append(c.Outputs, ro) return nil } @@ -1377,8 +1375,8 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, // buildSerializer grabs the necessary entries from the ast.Table for creating // a serializers.Serializer object, and creates it, which can then be added onto // an Output object. -func (c *Config) buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) { - sc := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)} +func (c *Config) buildSerializer(tbl *ast.Table) (serializers.Serializer, error) { + sc := &serializers.Config{TimestampUnits: 1 * time.Second} c.getFieldString(tbl, "data_format", &sc.DataFormat) @@ -1449,7 +1447,7 @@ func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, return oc, nil } -func (c *Config) missingTomlField(typ reflect.Type, key string) error { +func (c *Config) missingTomlField(_ reflect.Type, key string) error { switch key { case "alias", "carbon2_format", "collectd_auth_file", "collectd_parse_multivalue", "collectd_security_level", "collectd_typesdb", "collection_jitter", "csv_column_names", diff --git a/internal/goplugin/noplugin.go b/internal/goplugin/noplugin.go index 23d8634c46520..089972d465196 100644 --- a/internal/goplugin/noplugin.go +++ b/internal/goplugin/noplugin.go @@ -4,6 +4,6 @@ package goplugin import "errors" -func LoadExternalPlugins(rootDir string) error { +func LoadExternalPlugins(_ string) error { return errors.New("go plugin support is not enabled") } diff --git a/internal/internal_test.go b/internal/internal_test.go index 890a787bf258c..16980ecef93af 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -46,7 +46,7 @@ func TestSnakeCase(t *testing.T) { } var ( - sleepbin, _ = exec.LookPath("sleep") + sleepbin, _ = exec.LookPath("sleep") //nolint:unused // Used in skipped tests echobin, _ = exec.LookPath("echo") shell, _ = exec.LookPath("sh") ) diff --git a/metric/metric.go b/metric/metric.go index 517645a831280..b1a6edcfe91c7 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -251,8 +251,8 @@ func (m *metric) Copy() telegraf.Metric { return m2 } -func (m *metric) SetAggregate(b bool) { - m.aggregate = true +func (m *metric) SetAggregate(aggregate bool) { + m.aggregate = aggregate } func (m *metric) IsAggregate() bool { diff --git a/metric/tracking_test.go b/metric/tracking_test.go index 0ca1ca4daa4bc..3464ea15f2ecb 100644 --- a/metric/tracking_test.go +++ b/metric/tracking_test.go @@ -78,12 +78,13 @@ func TestTracking(t *testing.T) { { name: "accept", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m.Accept() @@ -93,12 +94,13 @@ func TestTracking(t *testing.T) { { name: "reject", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m.Reject() @@ -108,12 +110,13 @@ func TestTracking(t *testing.T) { { name: "accept copy", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m2 := m.Copy() @@ -125,12 +128,13 @@ func TestTracking(t *testing.T) { { name: "copy with accept and done", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m2 := m.Copy() @@ -142,12 +146,13 @@ func TestTracking(t *testing.T) { { name: "copy with mixed delivery", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m2 := m.Copy() diff --git a/models/buffer.go b/models/buffer.go index 6cd1a6c71ae26..5f721dc98081b 100644 --- a/models/buffer.go +++ b/models/buffer.go @@ -220,16 +220,6 @@ func (b *Buffer) Reject(batch []telegraf.Metric) { b.BufferSize.Set(int64(b.length())) } -// dist returns the distance between two indexes. Because this data structure -// uses a half open range the arguments must both either left side or right -// side pairs. -func (b *Buffer) dist(begin, end int) int { - if begin <= end { - return end - begin - } - return b.cap - begin + end -} - // next returns the next index with wrapping. func (b *Buffer) next(index int) int { index++ @@ -246,15 +236,6 @@ func (b *Buffer) nextby(index, count int) int { return index } -// next returns the prev index with wrapping. -func (b *Buffer) prev(index int) int { - index-- - if index < 0 { - return b.cap - 1 - } - return index -} - // prevby returns the index that is count older with wrapping. func (b *Buffer) prevby(index, count int) int { index -= count diff --git a/models/running_input_test.go b/models/running_input_test.go index ff3747116f6ca..5c639e6929ef3 100644 --- a/models/running_input_test.go +++ b/models/running_input_test.go @@ -289,6 +289,6 @@ func TestMetricErrorCounters(t *testing.T) { type testInput struct{} -func (t *testInput) Description() string { return "" } -func (t *testInput) SampleConfig() string { return "" } -func (t *testInput) Gather(acc telegraf.Accumulator) error { return nil } +func (t *testInput) Description() string { return "" } +func (t *testInput) SampleConfig() string { return "" } +func (t *testInput) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/models/running_output.go b/models/running_output.go index b7f3fe03f9000..6f5f8c0a84bad 100644 --- a/models/running_output.go +++ b/models/running_output.go @@ -56,7 +56,6 @@ type RunningOutput struct { } func NewRunningOutput( - name string, output telegraf.Output, config *OutputConfig, batchSize int, diff --git a/models/running_output_test.go b/models/running_output_test.go index abde752bc15e2..feea970336817 100644 --- a/models/running_output_test.go +++ b/models/running_output_test.go @@ -29,14 +29,6 @@ var next5 = []telegraf.Metric{ testutil.TestMetric(101, "metric10"), } -func reverse(metrics []telegraf.Metric) []telegraf.Metric { - result := make([]telegraf.Metric, 0, len(metrics)) - for i := len(metrics) - 1; i >= 0; i-- { - result = append(result, metrics[i]) - } - return result -} - // Benchmark adding metrics. func BenchmarkRunningOutputAddWrite(b *testing.B) { conf := &OutputConfig{ @@ -44,7 +36,7 @@ func BenchmarkRunningOutputAddWrite(b *testing.B) { } m := &perfOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for n := 0; n < b.N; n++ { ro.AddMetric(testutil.TestMetric(101, "metric1")) @@ -59,7 +51,7 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) { } m := &perfOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for n := 0; n < b.N; n++ { ro.AddMetric(testutil.TestMetric(101, "metric1")) @@ -77,7 +69,7 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) { m := &perfOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for n := 0; n < b.N; n++ { ro.AddMetric(testutil.TestMetric(101, "metric1")) @@ -94,7 +86,7 @@ func TestRunningOutput_DropFilter(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for _, metric := range first5 { ro.AddMetric(metric) @@ -119,7 +111,7 @@ func TestRunningOutput_PassFilter(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for _, metric := range first5 { ro.AddMetric(metric) @@ -144,7 +136,7 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -165,7 +157,7 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -186,7 +178,7 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -207,7 +199,7 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -225,7 +217,7 @@ func TestRunningOutput_NameOverride(t *testing.T) { } m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -243,7 +235,7 @@ func TestRunningOutput_NamePrefix(t *testing.T) { } m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -261,7 +253,7 @@ func TestRunningOutput_NameSuffix(t *testing.T) { } m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -279,7 +271,7 @@ func TestRunningOutputDefault(t *testing.T) { } m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for _, metric := range first5 { ro.AddMetric(metric) @@ -301,7 +293,7 @@ func TestRunningOutputWriteFail(t *testing.T) { m := &mockOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 4, 12) + ro := NewRunningOutput(m, conf, 4, 12) // Fill buffer to limit twice for _, metric := range first5 { @@ -334,7 +326,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) { m := &mockOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 100, 1000) + ro := NewRunningOutput(m, conf, 100, 1000) // add 5 metrics for _, metric := range first5 { @@ -372,7 +364,7 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) { m := &mockOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 5, 100) + ro := NewRunningOutput(m, conf, 5, 100) // add 5 metrics for _, metric := range first5 { @@ -436,7 +428,7 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) { m := &mockOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 5, 1000) + ro := NewRunningOutput(m, conf, 5, 1000) // add 5 metrics for _, metric := range first5 { @@ -470,7 +462,6 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) { func TestInternalMetrics(t *testing.T) { _ = NewRunningOutput( - "test_internal", &mockOutput{}, &OutputConfig{ Filter: Filter{}, @@ -581,7 +572,7 @@ func (m *perfOutput) SampleConfig() string { return "" } -func (m *perfOutput) Write(metrics []telegraf.Metric) error { +func (m *perfOutput) Write(_ []telegraf.Metric) error { if m.failWrite { return fmt.Errorf("failed write") } diff --git a/plugins/aggregators/merge/merge.go b/plugins/aggregators/merge/merge.go index 35be286d3bc01..e11aad07a4b7d 100644 --- a/plugins/aggregators/merge/merge.go +++ b/plugins/aggregators/merge/merge.go @@ -19,7 +19,6 @@ const ( type Merge struct { grouper *metric.SeriesGrouper - log telegraf.Logger } func (a *Merge) Init() error { diff --git a/plugins/aggregators/quantile/algorithms.go b/plugins/aggregators/quantile/algorithms.go index d2a5ac685397a..641844f3f4e77 100644 --- a/plugins/aggregators/quantile/algorithms.go +++ b/plugins/aggregators/quantile/algorithms.go @@ -21,7 +21,7 @@ type exactAlgorithmR7 struct { sorted bool } -func newExactR7(compression float64) (algorithm, error) { +func newExactR7(_ float64) (algorithm, error) { return &exactAlgorithmR7{xs: make([]float64, 0, 100), sorted: false}, nil } @@ -68,7 +68,7 @@ type exactAlgorithmR8 struct { sorted bool } -func newExactR8(compression float64) (algorithm, error) { +func newExactR8(_ float64) (algorithm, error) { return &exactAlgorithmR8{xs: make([]float64, 0, 100), sorted: false}, nil } diff --git a/plugins/common/shim/config_test.go b/plugins/common/shim/config_test.go index 97d2004200b44..75ad18239fbb0 100644 --- a/plugins/common/shim/config_test.go +++ b/plugins/common/shim/config_test.go @@ -81,7 +81,7 @@ func (i *testDurationInput) SampleConfig() string { func (i *testDurationInput) Description() string { return "" } -func (i *testDurationInput) Gather(acc telegraf.Accumulator) error { +func (i *testDurationInput) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/common/shim/goshim_test.go b/plugins/common/shim/goshim_test.go index 080a513ade250..bbd1a0b703cc5 100644 --- a/plugins/common/shim/goshim_test.go +++ b/plugins/common/shim/goshim_test.go @@ -71,7 +71,7 @@ func (i *erroringInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *erroringInput) Start(acc telegraf.Accumulator) error { +func (i *erroringInput) Start(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/common/shim/input_test.go b/plugins/common/shim/input_test.go index 32f97d5924bc5..7cbfe6413975f 100644 --- a/plugins/common/shim/input_test.go +++ b/plugins/common/shim/input_test.go @@ -100,7 +100,7 @@ func (i *testInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *testInput) Start(acc telegraf.Accumulator) error { +func (i *testInput) Start(_ telegraf.Accumulator) error { return nil } @@ -133,7 +133,7 @@ func (i *serviceInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *serviceInput) Start(acc telegraf.Accumulator) error { +func (i *serviceInput) Start(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index b5c13ddc45a7b..0c88ba840f822 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -486,14 +486,6 @@ func parseAerospikeValue(key string, v string) interface{} { } } -func copyTags(m map[string]string) map[string]string { - out := make(map[string]string) - for k, v := range m { - out[k] = v - } - return out -} - func init() { inputs.Add("aerospike", func() telegraf.Input { return &Aerospike{} diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go index f0ac60e932761..a844ab4ee7a3a 100644 --- a/plugins/inputs/aliyuncms/aliyuncms_test.go +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -71,7 +71,7 @@ type mockAliyunSDKCli struct { resp *responses.CommonResponse } -func (m *mockAliyunSDKCli) ProcessCommonRequest(req *requests.CommonRequest) (response *responses.CommonResponse, err error) { +func (m *mockAliyunSDKCli) ProcessCommonRequest(_ *requests.CommonRequest) (response *responses.CommonResponse, err error) { return m.resp, nil } diff --git a/plugins/inputs/apcupsd/apcupsd_test.go b/plugins/inputs/apcupsd/apcupsd_test.go index 7ab64ba114cc6..3cd90812bba15 100644 --- a/plugins/inputs/apcupsd/apcupsd_test.go +++ b/plugins/inputs/apcupsd/apcupsd_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestApcupsdDocs(t *testing.T) { +func TestApcupsdDocs(_ *testing.T) { apc := &ApcUpsd{} apc.Description() apc.SampleConfig() diff --git a/plugins/inputs/cassandra/cassandra_test.go b/plugins/inputs/cassandra/cassandra_test.go index 9b0798207ef16..325c267d9274b 100644 --- a/plugins/inputs/cassandra/cassandra_test.go +++ b/plugins/inputs/cassandra/cassandra_test.go @@ -77,19 +77,6 @@ const validCassandraNestedMultiValueJSON = ` } }` -const validSingleValueJSON = ` -{ - "request":{ - "path":"used", - "mbean":"java.lang:type=Memory", - "attribute":"HeapMemoryUsage", - "type":"read" - }, - "value":209274376, - "timestamp":1446129256, - "status":200 -}` - const validJavaMultiTypeJSON = ` { "request":{ @@ -104,8 +91,6 @@ const validJavaMultiTypeJSON = ` const invalidJSON = "I don't think this is JSON" -const empty = "" - var Servers = []string{"10.10.10.10:8778"} var AuthServers = []string{"user:passwd@10.10.10.10:8778"} var MultipleServers = []string{"10.10.10.10:8778", "10.10.10.11:8778"} @@ -121,7 +106,7 @@ type jolokiaClientStub struct { statusCode int } -func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) { +func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) { resp := http.Response{} resp.StatusCode = c.statusCode resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index e46a18049c354..08075fd03be49 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -86,7 +86,7 @@ func TestDecodeOSDPoolStats(t *testing.T) { } } -func TestGather(t *testing.T) { +func TestGather(_ *testing.T) { saveFind := findSockets saveDump := perfDump defer func() { diff --git a/plugins/inputs/chrony/chrony_test.go b/plugins/inputs/chrony/chrony_test.go index 4b8ad85ccf932..7c614dbbc75ce 100644 --- a/plugins/inputs/chrony/chrony_test.go +++ b/plugins/inputs/chrony/chrony_test.go @@ -65,7 +65,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking // it returns below mockData. -func TestHelperProcess(t *testing.T) { +func TestHelperProcess(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index e0cbb87d4371c..6dad06061f1cd 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -88,15 +88,15 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { // Invert aliases list c.warned = make(map[string]struct{}) c.aliases = make(map[string]string, len(c.Aliases)) - for alias, path := range c.Aliases { - c.aliases[path] = alias + for alias, encodingPath := range c.Aliases { + c.aliases[encodingPath] = alias } c.initDb() c.dmesFuncs = make(map[string]string, len(c.Dmes)) - for dme, path := range c.Dmes { - c.dmesFuncs[path] = dme - switch path { + for dme, dmeKey := range c.Dmes { + c.dmesFuncs[dmeKey] = dme + switch dmeKey { case "uint64 to int": c.propMap[dme] = nxosValueXformUint64Toint64 case "uint64 to string": @@ -115,7 +115,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { } var jsStruct NxPayloadXfromStructure - err := json.Unmarshal([]byte(path), &jsStruct) + err := json.Unmarshal([]byte(dmeKey), &jsStruct) if err != nil { continue } @@ -449,9 +449,10 @@ func (c *CiscoTelemetryMDT) parseKeyField(tags map[string]string, field *telemet } } -func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string, path string, tags map[string]string, timestamp time.Time) { +func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, + encodingPath string, tags map[string]string, timestamp time.Time) { // RIB - measurement := path + measurement := encodingPath for _, subfield := range field.Fields { //For Every table fill the keys which are vrfName, address and masklen switch subfield.Name { @@ -481,13 +482,14 @@ func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telem } } -func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string, path string, tags map[string]string, timestamp time.Time) { +func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, + encodingPath string, tags map[string]string, timestamp time.Time) { // DME structure: https://developer.cisco.com/site/nxapi-dme-model-reference-api/ var nxAttributes *telemetry.TelemetryField - isDme := strings.Contains(path, "sys/") - if path == "rib" { + isDme := strings.Contains(encodingPath, "sys/") + if encodingPath == "rib" { //handle native data path rib - c.parseRib(grouper, field, prefix, path, tags, timestamp) + c.parseRib(grouper, field, encodingPath, tags, timestamp) return } if field == nil || !isDme || len(field.Fields) == 0 || len(field.Fields[0].Fields) == 0 || len(field.Fields[0].Fields[0].Fields) == 0 { @@ -503,13 +505,13 @@ func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGroup if subfield.Name == "dn" { tags["dn"] = decodeTag(subfield) } else { - c.parseContentField(grouper, subfield, "", path, tags, timestamp) + c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp) } } } func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string, - path string, tags map[string]string, timestamp time.Time) { + encodingPath string, tags map[string]string, timestamp time.Time) { name := strings.Replace(field.Name, "-", "_", -1) if (name == "modTs" || name == "createTs") && decodeValue(field) == "never" { @@ -521,23 +523,23 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie name = prefix + "/" + name } - extraTags := c.extraTags[strings.Replace(path, "-", "_", -1)+"/"+name] + extraTags := c.extraTags[strings.Replace(encodingPath, "-", "_", -1)+"/"+name] if value := decodeValue(field); value != nil { // Do alias lookup, to shorten measurement names - measurement := path - if alias, ok := c.aliases[path]; ok { + measurement := encodingPath + if alias, ok := c.aliases[encodingPath]; ok { measurement = alias } else { c.mutex.Lock() - if _, haveWarned := c.warned[path]; !haveWarned { - c.Log.Debugf("No measurement alias for encoding path: %s", path) - c.warned[path] = struct{}{} + if _, haveWarned := c.warned[encodingPath]; !haveWarned { + c.Log.Debugf("No measurement alias for encoding path: %s", encodingPath) + c.warned[encodingPath] = struct{}{} } c.mutex.Unlock() } - if val := c.nxosValueXform(field, value, path); val != nil { + if val := c.nxosValueXform(field, value, encodingPath); val != nil { grouper.Add(measurement, tags, timestamp, name, val) } else { grouper.Add(measurement, tags, timestamp, name, value) @@ -554,8 +556,8 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie } var nxAttributes, nxChildren, nxRows *telemetry.TelemetryField - isNXOS := !strings.ContainsRune(path, ':') // IOS-XR and IOS-XE have a colon in their encoding path, NX-OS does not - isEVENT := isNXOS && strings.Contains(path, "EVENT-LIST") + isNXOS := !strings.ContainsRune(encodingPath, ':') // IOS-XR and IOS-XE have a colon in their encoding path, NX-OS does not + isEVENT := isNXOS && strings.Contains(encodingPath, "EVENT-LIST") nxChildren = nil nxAttributes = nil for _, subfield := range field.Fields { @@ -574,13 +576,13 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie if nxAttributes == nil { //call function walking over walking list. for _, sub := range subfield.Fields { - c.parseClassAttributeField(grouper, sub, name, path, tags, timestamp) + c.parseClassAttributeField(grouper, sub, encodingPath, tags, timestamp) } } } else if isNXOS && strings.HasPrefix(subfield.Name, "ROW_") { nxRows = subfield } else if _, isExtraTag := extraTags[subfield.Name]; !isExtraTag { // Regular telemetry decoding - c.parseContentField(grouper, subfield, name, path, tags, timestamp) + c.parseContentField(grouper, subfield, name, encodingPath, tags, timestamp) } } @@ -595,10 +597,10 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie //We can have subfield so recursively handle it. if len(row.Fields) == 1 { tags["row_number"] = strconv.FormatInt(int64(i), 10) - c.parseContentField(grouper, subfield, "", path, tags, timestamp) + c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp) } } else { - c.parseContentField(grouper, subfield, "", path, tags, timestamp) + c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp) } // Nxapi we can't identify keys always from prefix tags["row_number"] = strconv.FormatInt(int64(i), 10) @@ -629,14 +631,14 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie for _, subfield := range nxAttributes.Fields { if subfield.Name != "rn" { - c.parseContentField(grouper, subfield, "", path, tags, timestamp) + c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp) } } if nxChildren != nil { // This is a nested structure, children will inherit relative name keys of parent for _, subfield := range nxChildren.Fields { - c.parseContentField(grouper, subfield, prefix, path, tags, timestamp) + c.parseContentField(grouper, subfield, prefix, encodingPath, tags, timestamp) } } delete(tags, prefix) diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go index 60140c030fed2..52f3e6fd59021 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go @@ -39,7 +39,7 @@ func nxosValueXformUint64Toint64(field *telemetry.TelemetryField, value interfac } //xform string to float -func nxosValueXformStringTofloat(field *telemetry.TelemetryField, value interface{}) interface{} { +func nxosValueXformStringTofloat(field *telemetry.TelemetryField, _ interface{}) interface{} { //convert property to float from string. vals := field.GetStringValue() if vals != "" { @@ -51,7 +51,7 @@ func nxosValueXformStringTofloat(field *telemetry.TelemetryField, value interfac } //xform string to uint64 -func nxosValueXformStringToUint64(field *telemetry.TelemetryField, value interface{}) interface{} { +func nxosValueXformStringToUint64(field *telemetry.TelemetryField, _ interface{}) interface{} { //string to uint64 vals := field.GetStringValue() if vals != "" { @@ -63,7 +63,7 @@ func nxosValueXformStringToUint64(field *telemetry.TelemetryField, value interfa } //xform string to int64 -func nxosValueXformStringToInt64(field *telemetry.TelemetryField, value interface{}) interface{} { +func nxosValueXformStringToInt64(field *telemetry.TelemetryField, _ interface{}) interface{} { //string to int64 vals := field.GetStringValue() if vals != "" { @@ -74,26 +74,8 @@ func nxosValueXformStringToInt64(field *telemetry.TelemetryField, value interfac return nil } -//auto-xform -func nxosValueAutoXform(field *telemetry.TelemetryField, value interface{}) interface{} { - //check if we want auto xformation - vals := field.GetStringValue() - if vals != "" { - if val64, err := strconv.ParseUint(vals, 10, 64); err == nil { - return val64 - } - if valf, err := strconv.ParseFloat(vals, 64); err == nil { - return valf - } - if val64, err := strconv.ParseInt(vals, 10, 64); err == nil { - return val64 - } - } // switch - return nil -} - //auto-xform float properties -func nxosValueAutoXformFloatProp(field *telemetry.TelemetryField, value interface{}) interface{} { +func nxosValueAutoXformFloatProp(field *telemetry.TelemetryField, _ interface{}) interface{} { //check if we want auto xformation vals := field.GetStringValue() if vals != "" { @@ -105,7 +87,7 @@ func nxosValueAutoXformFloatProp(field *telemetry.TelemetryField, value interfac } //xform uint64 to string -func nxosValueXformUint64ToString(field *telemetry.TelemetryField, value interface{}) interface{} { +func nxosValueXformUint64ToString(field *telemetry.TelemetryField, _ interface{}) interface{} { switch val := field.ValueByType.(type) { case *telemetry.TelemetryField_StringValue: if len(val.StringValue) > 0 { diff --git a/plugins/inputs/clickhouse/clickhouse_test.go b/plugins/inputs/clickhouse/clickhouse_test.go index c69c455b94c5e..bf53fdae007d8 100644 --- a/plugins/inputs/clickhouse/clickhouse_test.go +++ b/plugins/inputs/clickhouse/clickhouse_test.go @@ -548,7 +548,7 @@ func TestOfflineServer(t *testing.T) { assert.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) } -func TestAutoDiscovery(t *testing.T) { +func TestAutoDiscovery(_ *testing.T) { var ( ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { type result struct { diff --git a/plugins/inputs/cloud_pubsub/pubsub.go b/plugins/inputs/cloud_pubsub/pubsub.go index 0ac40a2cf551d..41ecf09ec3051 100644 --- a/plugins/inputs/cloud_pubsub/pubsub.go +++ b/plugins/inputs/cloud_pubsub/pubsub.go @@ -67,7 +67,7 @@ func (ps *PubSub) SampleConfig() string { } // Gather does nothing for this service input. -func (ps *PubSub) Gather(acc telegraf.Accumulator) error { +func (ps *PubSub) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/cloud_pubsub/pubsub_test.go b/plugins/inputs/cloud_pubsub/pubsub_test.go index 0adc024872df7..d07dfe34f2290 100644 --- a/plugins/inputs/cloud_pubsub/pubsub_test.go +++ b/plugins/inputs/cloud_pubsub/pubsub_test.go @@ -207,7 +207,7 @@ func TestRunErrorInSubscriber(t *testing.T) { messages: make(chan *testMsg, 100), } fakeErrStr := "a fake error" - sub.receiver = testMessagesError(sub, errors.New("a fake error")) + sub.receiver = testMessagesError(errors.New("a fake error")) ps := &PubSub{ Log: testutil.Logger{}, diff --git a/plugins/inputs/cloud_pubsub/subscription_stub.go b/plugins/inputs/cloud_pubsub/subscription_stub.go index e061728caf7fe..1e5bd009bc138 100644 --- a/plugins/inputs/cloud_pubsub/subscription_stub.go +++ b/plugins/inputs/cloud_pubsub/subscription_stub.go @@ -22,7 +22,7 @@ func (s *stubSub) Receive(ctx context.Context, f func(context.Context, message)) type receiveFunc func(ctx context.Context, f func(context.Context, message)) error -func testMessagesError(s *stubSub, expectedErr error) receiveFunc { +func testMessagesError(expectedErr error) receiveFunc { return func(ctx context.Context, f func(context.Context, message)) error { return expectedErr } diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push.go b/plugins/inputs/cloud_pubsub_push/pubsub_push.go index b320daedbacc1..575bdae61f8c9 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push.go @@ -6,7 +6,6 @@ import ( "encoding/base64" "encoding/json" "io/ioutil" - "net" "net/http" "sync" "time" @@ -39,13 +38,12 @@ type PubSubPush struct { tlsint.ServerConfig parsers.Parser - listener net.Listener - server *http.Server - acc telegraf.TrackingAccumulator - ctx context.Context - cancel context.CancelFunc - wg *sync.WaitGroup - mu *sync.Mutex + server *http.Server + acc telegraf.TrackingAccumulator + ctx context.Context + cancel context.CancelFunc + wg *sync.WaitGroup + mu *sync.Mutex undelivered map[telegraf.TrackingID]chan bool sem chan struct{} diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go index ae7601b20cccc..ccce488a81c6e 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go @@ -144,7 +144,7 @@ func TestServeHTTP(t *testing.T) { pubPush.SetParser(p) dst := make(chan telegraf.Metric, 1) - ro := models.NewRunningOutput("test", &testOutput{failWrite: test.fail}, &models.OutputConfig{}, 1, 1) + ro := models.NewRunningOutput(&testOutput{failWrite: test.fail}, &models.OutputConfig{}, 1, 1) pubPush.acc = agent.NewAccumulator(&testMetricMaker{}, dst).WithTracking(1) wg.Add(1) @@ -154,13 +154,13 @@ func TestServeHTTP(t *testing.T) { }() wg.Add(1) - go func(status int, d chan telegraf.Metric) { + go func(d chan telegraf.Metric) { defer wg.Done() for m := range d { ro.AddMetric(m) ro.Write() } - }(test.status, dst) + }(dst) ctx, cancel := context.WithTimeout(req.Context(), pubPush.WriteTimeout.Duration) req = req.WithContext(ctx) @@ -218,7 +218,7 @@ func (*testOutput) SampleConfig() string { return "" } -func (t *testOutput) Write(metrics []telegraf.Metric) error { +func (t *testOutput) Write(_ []telegraf.Metric) error { if t.failWrite { return fmt.Errorf("failed write") } diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 1bc5379e56419..22fdcab38e0b6 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -208,11 +208,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { c.updateWindow(time.Now()) // Get all of the possible queries so we can send groups of 100. - queries, err := c.getDataQueries(filteredMetrics) - if err != nil { - return err - } - + queries := c.getDataQueries(filteredMetrics) if len(queries) == 0 { return nil } @@ -441,9 +437,9 @@ func (c *CloudWatch) updateWindow(relativeTo time.Time) { } // getDataQueries gets all of the possible queries so we can maximize the request payload. -func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudwatch.MetricDataQuery, error) { +func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) []*cloudwatch.MetricDataQuery { if c.metricCache != nil && c.metricCache.queries != nil && c.metricCache.isValid() { - return c.metricCache.queries, nil + return c.metricCache.queries } c.queryDimensions = map[string]*map[string]string{} @@ -518,7 +514,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudw if len(dataQueries) == 0 { c.Log.Debug("no metrics found to collect") - return nil, nil + return nil } if c.metricCache == nil { @@ -531,7 +527,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudw c.metricCache.queries = dataQueries } - return dataQueries, nil + return dataQueries } // gatherMetrics gets metric data from Cloudwatch. diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 798cdff1f2bed..43fb01f058821 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -135,7 +135,7 @@ func TestGather(t *testing.T) { type mockSelectMetricsCloudWatchClient struct{} -func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { +func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { metrics := []*cloudwatch.Metric{} // 4 metrics are available metricNames := []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"} @@ -182,7 +182,7 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM return result, nil } -func (m *mockSelectMetricsCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) { +func (m *mockSelectMetricsCloudWatchClient) GetMetricData(_ *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) { return nil, nil } @@ -246,7 +246,7 @@ func TestGenerateStatisticsInputParams(t *testing.T) { c.updateWindow(now) statFilter, _ := filter.NewIncludeExcludeFilter(nil, nil) - queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) + queries := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) params := c.getDataInputs(queries) assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) @@ -283,7 +283,7 @@ func TestGenerateStatisticsInputParamsFiltered(t *testing.T) { c.updateWindow(now) statFilter, _ := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil) - queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) + queries := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) params := c.getDataInputs(queries) assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) diff --git a/plugins/inputs/cpu/cpu_test.go b/plugins/inputs/cpu/cpu_test.go index bf356ec7b945c..d3849a5198038 100644 --- a/plugins/inputs/cpu/cpu_test.go +++ b/plugins/inputs/cpu/cpu_test.go @@ -48,26 +48,22 @@ func TestCPUStats(t *testing.T) { cs := NewCPUStats(&mps) - cputags := map[string]string{ - "cpu": "cpu0", - } - err := cs.Gather(&acc) require.NoError(t, err) // Computed values are checked with delta > 0 because of floating point arithmetic // imprecision - assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 8.8, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 8.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_active", 19.9, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 1.3, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.8389, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 0.6, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.11, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.0511, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 3.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 0.324, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_user", 8.8, 0) + assertContainsTaggedFloat(t, &acc, "time_system", 8.2, 0) + assertContainsTaggedFloat(t, &acc, "time_idle", 80.1, 0) + assertContainsTaggedFloat(t, &acc, "time_active", 19.9, 0.0005) + assertContainsTaggedFloat(t, &acc, "time_nice", 1.3, 0) + assertContainsTaggedFloat(t, &acc, "time_iowait", 0.8389, 0) + assertContainsTaggedFloat(t, &acc, "time_irq", 0.6, 0) + assertContainsTaggedFloat(t, &acc, "time_softirq", 0.11, 0) + assertContainsTaggedFloat(t, &acc, "time_steal", 0.0511, 0) + assertContainsTaggedFloat(t, &acc, "time_guest", 3.1, 0) + assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0) mps2 := system.MockPS{} mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) @@ -77,29 +73,29 @@ func TestCPUStats(t *testing.T) { err = cs.Gather(&acc) require.NoError(t, err) - assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 24.9, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 10.9, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 157.9798, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_active", 42.0202, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 3.5, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.929, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 1.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.31, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.2812, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 11.4, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 2.524, 0, cputags) - - assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 7.8, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_system", 2.7, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 77.8798, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_active", 22.1202, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_nice", 0, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 0.0901, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_irq", 0.6, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_softirq", 0.2, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_steal", 0.2301, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest", 8.3, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest_nice", 2.2, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "time_user", 24.9, 0) + assertContainsTaggedFloat(t, &acc, "time_system", 10.9, 0) + assertContainsTaggedFloat(t, &acc, "time_idle", 157.9798, 0) + assertContainsTaggedFloat(t, &acc, "time_active", 42.0202, 0.0005) + assertContainsTaggedFloat(t, &acc, "time_nice", 3.5, 0) + assertContainsTaggedFloat(t, &acc, "time_iowait", 0.929, 0) + assertContainsTaggedFloat(t, &acc, "time_irq", 1.2, 0) + assertContainsTaggedFloat(t, &acc, "time_softirq", 0.31, 0) + assertContainsTaggedFloat(t, &acc, "time_steal", 0.2812, 0) + assertContainsTaggedFloat(t, &acc, "time_guest", 11.4, 0) + assertContainsTaggedFloat(t, &acc, "time_guest_nice", 2.524, 0) + + assertContainsTaggedFloat(t, &acc, "usage_user", 7.8, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_system", 2.7, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_idle", 77.8798, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_active", 22.1202, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_nice", 0, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_iowait", 0.0901, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_irq", 0.6, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_softirq", 0.2, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_steal", 0.2301, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_guest", 8.3, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_guest_nice", 2.2, 0.0005) } // Asserts that a given accumulator contains a measurement of type float64 with @@ -109,24 +105,21 @@ func TestCPUStats(t *testing.T) { // Parameters: // t *testing.T : Testing object to use // acc testutil.Accumulator: Accumulator to examine -// measurement string : Name of the measurement to examine +// field string : Name of field to examine // expectedValue float64 : Value to search for within the measurement // delta float64 : Maximum acceptable distance of an accumulated value // from the expectedValue parameter. Useful when // floating-point arithmetic imprecision makes looking // for an exact match impractical -// tags map[string]string : Tag set the found measurement must have. Set to nil to -// ignore the tag set. func assertContainsTaggedFloat( t *testing.T, acc *testutil.Accumulator, - measurement string, field string, expectedValue float64, delta float64, - tags map[string]string, ) { var actualValue float64 + measurement := "cpu" // always cpu for _, pt := range acc.Metrics { if pt.Measurement == measurement { for fieldname, value := range pt.Fields { @@ -218,18 +211,14 @@ func TestCPUTimesDecrease(t *testing.T) { cs := NewCPUStats(&mps) - cputags := map[string]string{ - "cpu": "cpu0", - } - err := cs.Gather(&acc) require.NoError(t, err) // Computed values are checked with delta > 0 because of floating point arithmetic // imprecision - assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 18, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 2, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_user", 18, 0) + assertContainsTaggedFloat(t, &acc, "time_idle", 80, 0) + assertContainsTaggedFloat(t, &acc, "time_iowait", 2, 0) mps2 := system.MockPS{} mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) @@ -246,11 +235,11 @@ func TestCPUTimesDecrease(t *testing.T) { err = cs.Gather(&acc) require.NoError(t, err) - assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 56, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 120, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 3, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_user", 56, 0) + assertContainsTaggedFloat(t, &acc, "time_idle", 120, 0) + assertContainsTaggedFloat(t, &acc, "time_iowait", 3, 0) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 18, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 80, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 2, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "usage_user", 18, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_idle", 80, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_iowait", 2, 0.0005) } diff --git a/plugins/inputs/dcos/client.go b/plugins/inputs/dcos/client.go index 32eab80867cd4..f31c4588ba555 100644 --- a/plugins/inputs/dcos/client.go +++ b/plugins/inputs/dcos/client.go @@ -92,11 +92,10 @@ type AuthToken struct { // ClusterClient is a Client that uses the cluster URL. type ClusterClient struct { - clusterURL *url.URL - httpClient *http.Client - credentials *Credentials - token string - semaphore chan struct{} + clusterURL *url.URL + httpClient *http.Client + token string + semaphore chan struct{} } type claims struct { diff --git a/plugins/inputs/dcos/creds.go b/plugins/inputs/dcos/creds.go index 0178315bb7076..2fd5f078e46e5 100644 --- a/plugins/inputs/dcos/creds.go +++ b/plugins/inputs/dcos/creds.go @@ -47,13 +47,13 @@ func (c *ServiceAccount) IsExpired() bool { return c.auth.Text != "" || c.auth.Expire.Add(relogDuration).After(time.Now()) } -func (c *TokenCreds) Token(ctx context.Context, client Client) (string, error) { +func (c *TokenCreds) Token(_ context.Context, _ Client) (string, error) { octets, err := ioutil.ReadFile(c.Path) if err != nil { - return "", fmt.Errorf("Error reading token file %q: %s", c.Path, err) + return "", fmt.Errorf("error reading token file %q: %s", c.Path, err) } if !utf8.Valid(octets) { - return "", fmt.Errorf("Token file does not contain utf-8 encoded text: %s", c.Path) + return "", fmt.Errorf("token file does not contain utf-8 encoded text: %s", c.Path) } token := strings.TrimSpace(string(octets)) return token, nil @@ -63,7 +63,7 @@ func (c *TokenCreds) IsExpired() bool { return true } -func (c *NullCreds) Token(ctx context.Context, client Client) (string, error) { +func (c *NullCreds) Token(_ context.Context, _ Client) (string, error) { return "", nil } diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index 1cdd40f1112fc..20cb47fd34288 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -66,7 +66,6 @@ type DCOS struct { nodeFilter filter.Filter containerFilter filter.Filter appFilter filter.Filter - taskNameFilter filter.Filter } func (d *DCOS) Description() string { @@ -223,7 +222,7 @@ type point struct { fields map[string]interface{} } -func (d *DCOS) createPoints(acc telegraf.Accumulator, m *Metrics) []*point { +func (d *DCOS) createPoints(m *Metrics) []*point { points := make(map[string]*point) for _, dp := range m.Datapoints { fieldKey := strings.Replace(dp.Name, ".", "_", -1) @@ -288,7 +287,7 @@ func (d *DCOS) createPoints(acc telegraf.Accumulator, m *Metrics) []*point { func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *Metrics, tagDimensions []string) { tm := time.Now() - points := d.createPoints(acc, m) + points := d.createPoints(m) for _, p := range points { tags := make(map[string]string) diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go index 30820659338d8..06ac3ea9568f5 100644 --- a/plugins/inputs/directory_monitor/directory_monitor.go +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -106,7 +106,7 @@ func (monitor *DirectoryMonitor) Description() string { return "Ingests files in a directory and then moves them to a target directory." } -func (monitor *DirectoryMonitor) Gather(acc telegraf.Accumulator) error { +func (monitor *DirectoryMonitor) Gather(_ telegraf.Accumulator) error { // Get all files sitting in the directory. files, err := ioutil.ReadDir(monitor.Directory) if err != nil { @@ -130,7 +130,7 @@ func (monitor *DirectoryMonitor) Gather(acc telegraf.Accumulator) error { // If file is decaying, process it. if timeThresholdExceeded { - monitor.processFile(file, acc) + monitor.processFile(file) } } @@ -149,7 +149,7 @@ func (monitor *DirectoryMonitor) Start(acc telegraf.Accumulator) error { // Monitor the files channel and read what they receive. monitor.waitGroup.Add(1) go func() { - monitor.Monitor(acc) + monitor.Monitor() monitor.waitGroup.Done() }() @@ -164,7 +164,7 @@ func (monitor *DirectoryMonitor) Stop() { monitor.waitGroup.Wait() } -func (monitor *DirectoryMonitor) Monitor(acc telegraf.Accumulator) { +func (monitor *DirectoryMonitor) Monitor() { for filePath := range monitor.filesToProcess { if monitor.context.Err() != nil { return @@ -182,7 +182,7 @@ func (monitor *DirectoryMonitor) Monitor(acc telegraf.Accumulator) { } } -func (monitor *DirectoryMonitor) processFile(file os.FileInfo, acc telegraf.Accumulator) { +func (monitor *DirectoryMonitor) processFile(file os.FileInfo) { if file.IsDir() { return } diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go index 265f371885f29..b9cfbad8df42a 100644 --- a/plugins/inputs/directory_monitor/directory_monitor_test.go +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -77,13 +77,6 @@ func TestCSVGZImport(t *testing.T) { require.NoError(t, err) } -// For JSON data. -type event struct { - Name string - Speed float64 - Length float64 -} - func TestMultipleJSONFileImports(t *testing.T) { acc := testutil.Accumulator{} testJsonFile := "test.json" diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index 6cdff83ee3f16..8ae098011b0a1 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -18,8 +18,7 @@ import ( type Disque struct { Servers []string - c net.Conn - buf []byte + c net.Conn } var sampleConfig = ` @@ -87,10 +86,10 @@ func (d *Disque) Gather(acc telegraf.Accumulator) error { u.Path = "" } wg.Add(1) - go func(serv string) { + go func() { defer wg.Done() acc.AddError(d.gatherServer(u, acc)) - }(serv) + }() } wg.Wait() diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index d3f4c23976cee..ec3453eda042b 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "net/http" "regexp" "strconv" "strings" @@ -57,7 +56,6 @@ type Docker struct { newClient func(string, *tls.Config) (Client, error) client Client - httpClient *http.Client engineHost string serverVersion string filtersCreated bool @@ -937,15 +935,6 @@ func copyTags(in map[string]string) map[string]string { return out } -func sliceContains(in string, sl []string) bool { - for _, str := range sl { - if str == in { - return true - } - } - return false -} - // Parses the human-readable size string into the amount it represents. func parseSize(sizeStr string) (int64, error) { matches := sizeRegex.FindStringSubmatch(sizeStr) diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index b0c9f9791ec8b..e6ecce32323f8 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -99,7 +99,7 @@ var baseClient = MockClient{ }, } -func newClient(host string, tlsConfig *tls.Config) (Client, error) { +func newClient(_ string, _ *tls.Config) (Client, error) { return &baseClient, nil } @@ -1127,7 +1127,6 @@ func TestHostnameFromID(t *testing.T) { func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) { type args struct { stat *types.StatsJSON - acc telegraf.Accumulator tags map[string]string id string perDeviceInclude []string diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index 4ae09e71cca65..878bf4a63fbe3 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -64,11 +64,6 @@ var sampleConfig = ` const ( defaultEndpoint = "unix:///var/run/docker.sock" - - // Maximum bytes of a log line before it will be split, size is mirroring - // docker code: - // https://github.com/moby/moby/blob/master/daemon/logger/copier.go#L21 - maxLineBytes = 16 * 1024 ) var ( @@ -160,18 +155,16 @@ func (d *DockerLogs) Init() error { return nil } -func (d *DockerLogs) addToContainerList(containerID string, cancel context.CancelFunc) error { +func (d *DockerLogs) addToContainerList(containerID string, cancel context.CancelFunc) { d.mu.Lock() defer d.mu.Unlock() d.containerList[containerID] = cancel - return nil } -func (d *DockerLogs) removeFromContainerList(containerID string) error { +func (d *DockerLogs) removeFromContainerList(containerID string) { d.mu.Lock() defer d.mu.Unlock() delete(d.containerList, containerID) - return nil } func (d *DockerLogs) containerInContainerList(containerID string) bool { @@ -181,13 +174,12 @@ func (d *DockerLogs) containerInContainerList(containerID string) bool { return ok } -func (d *DockerLogs) cancelTails() error { +func (d *DockerLogs) cancelTails() { d.mu.Lock() defer d.mu.Unlock() for _, cancel := range d.containerList { cancel() } - return nil } func (d *DockerLogs) matchedContainerName(names []string) string { diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index b9875079d9feb..be2ea49d48134 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -48,8 +48,6 @@ var validQuery = map[string]bool{ func (d *Dovecot) SampleConfig() string { return sampleConfig } -const defaultPort = "24242" - // Reads stats from all configured servers. func (d *Dovecot) Gather(acc telegraf.Accumulator) error { if !validQuery[d.Type] { diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index f8064e606e57f..505bb69835041 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -558,11 +558,7 @@ func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator) // gatherSortedIndicesStats gathers stats for all indices in no particular order. func (e *Elasticsearch) gatherIndividualIndicesStats(indices map[string]indexStat, now time.Time, acc telegraf.Accumulator) error { // Sort indices into buckets based on their configured prefix, if any matches. - categorizedIndexNames, err := e.categorizeIndices(indices) - if err != nil { - return err - } - + categorizedIndexNames := e.categorizeIndices(indices) for _, matchingIndices := range categorizedIndexNames { // Establish the number of each category of indices to use. User can configure to use only the latest 'X' amount. indicesCount := len(matchingIndices) @@ -590,7 +586,7 @@ func (e *Elasticsearch) gatherIndividualIndicesStats(indices map[string]indexSta return nil } -func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) (map[string][]string, error) { +func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) map[string][]string { categorizedIndexNames := map[string][]string{} // If all indices are configured to be gathered, bucket them all together. @@ -599,7 +595,7 @@ func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) (map[str categorizedIndexNames["_all"] = append(categorizedIndexNames["_all"], indexName) } - return categorizedIndexNames, nil + return categorizedIndexNames } // Bucket each returned index with its associated configured index (if any match). @@ -617,7 +613,7 @@ func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) (map[str categorizedIndexNames[match] = append(categorizedIndexNames[match], indexName) } - return categorizedIndexNames, nil + return categorizedIndexNames } func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now time.Time, acc telegraf.Accumulator) error { diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 0700c7833dc15..184acbbbcbf57 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -33,9 +33,9 @@ type transportMock struct { body string } -func newTransportMock(statusCode int, body string) http.RoundTripper { +func newTransportMock(body string) http.RoundTripper { return &transportMock{ - statusCode: statusCode, + statusCode: http.StatusOK, body: body, } } @@ -77,7 +77,7 @@ func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) { func TestGather(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + es.client.Transport = newTransportMock(nodeStatsResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() @@ -94,7 +94,7 @@ func TestGatherIndividualStats(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} es.NodeStats = []string{"jvm", "process"} - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponseJVMProcess) + es.client.Transport = newTransportMock(nodeStatsResponseJVMProcess) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() @@ -120,7 +120,7 @@ func TestGatherIndividualStats(t *testing.T) { func TestGatherNodeStats(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + es.client.Transport = newTransportMock(nodeStatsResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() @@ -138,7 +138,7 @@ func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) { es.Servers = []string{"http://example.com:9200"} es.ClusterHealth = true es.ClusterHealthLevel = "" - es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse) + es.client.Transport = newTransportMock(clusterHealthResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() @@ -165,7 +165,7 @@ func TestGatherClusterHealthSpecificClusterHealth(t *testing.T) { es.Servers = []string{"http://example.com:9200"} es.ClusterHealth = true es.ClusterHealthLevel = "cluster" - es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse) + es.client.Transport = newTransportMock(clusterHealthResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() @@ -192,7 +192,7 @@ func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) { es.Servers = []string{"http://example.com:9200"} es.ClusterHealth = true es.ClusterHealthLevel = "indices" - es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponseWithIndices) + es.client.Transport = newTransportMock(clusterHealthResponseWithIndices) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() @@ -223,7 +223,7 @@ func TestGatherClusterStatsMaster(t *testing.T) { info := serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""} // first get catMaster - es.client.Transport = newTransportMock(http.StatusOK, IsMasterResult) + es.client.Transport = newTransportMock(IsMasterResult) masterID, err := es.getCatMaster("junk") require.NoError(t, err) info.masterID = masterID @@ -238,7 +238,7 @@ func TestGatherClusterStatsMaster(t *testing.T) { // now get node status, which determines whether we're master var acc testutil.Accumulator es.Local = true - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + es.client.Transport = newTransportMock(nodeStatsResponse) if err := es.gatherNodeStats("junk", &acc); err != nil { t.Fatal(err) } @@ -247,7 +247,7 @@ func TestGatherClusterStatsMaster(t *testing.T) { checkNodeStatsResult(t, &acc) // now test the clusterstats method - es.client.Transport = newTransportMock(http.StatusOK, clusterStatsResponse) + es.client.Transport = newTransportMock(clusterStatsResponse) require.NoError(t, es.gatherClusterStats("junk", &acc)) tags := map[string]string{ @@ -269,7 +269,7 @@ func TestGatherClusterStatsNonMaster(t *testing.T) { es.serverInfo["http://example.com:9200"] = serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""} // first get catMaster - es.client.Transport = newTransportMock(http.StatusOK, IsNotMasterResult) + es.client.Transport = newTransportMock(IsNotMasterResult) masterID, err := es.getCatMaster("junk") require.NoError(t, err) @@ -282,7 +282,7 @@ func TestGatherClusterStatsNonMaster(t *testing.T) { // now get node status, which determines whether we're master var acc testutil.Accumulator es.Local = true - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + es.client.Transport = newTransportMock(nodeStatsResponse) if err := es.gatherNodeStats("junk", &acc); err != nil { t.Fatal(err) } @@ -296,7 +296,7 @@ func TestGatherClusterIndicesStats(t *testing.T) { es := newElasticsearchWithClient() es.IndicesInclude = []string{"_all"} es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesResponse) + es.client.Transport = newTransportMock(clusterIndicesResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() @@ -315,7 +315,7 @@ func TestGatherDateStampedIndicesStats(t *testing.T) { es.IndicesInclude = []string{"twitter*", "influx*", "penguins"} es.NumMostRecentIndices = 2 es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, dateStampedIndicesResponse) + es.client.Transport = newTransportMock(dateStampedIndicesResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() es.Init() @@ -357,7 +357,7 @@ func TestGatherClusterIndiceShardsStats(t *testing.T) { es := newElasticsearchWithClient() es.IndicesLevel = "shards" es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesShardsResponse) + es.client.Transport = newTransportMock(clusterIndicesShardsResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() diff --git a/plugins/inputs/eventhub_consumer/eventhub_consumer.go b/plugins/inputs/eventhub_consumer/eventhub_consumer.go index 17092de3217eb..da66872da6284 100644 --- a/plugins/inputs/eventhub_consumer/eventhub_consumer.go +++ b/plugins/inputs/eventhub_consumer/eventhub_consumer.go @@ -207,11 +207,7 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error { }() // Configure receiver options - receiveOpts, err := e.configureReceiver() - if err != nil { - return err - } - + receiveOpts := e.configureReceiver() partitions := e.PartitionIDs if len(e.PartitionIDs) == 0 { @@ -224,7 +220,7 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error { } for _, partitionID := range partitions { - _, err = e.hub.Receive(ctx, partitionID, e.onMessage, receiveOpts...) + _, err := e.hub.Receive(ctx, partitionID, e.onMessage, receiveOpts...) if err != nil { return fmt.Errorf("creating receiver for partition %q: %v", partitionID, err) } @@ -233,7 +229,7 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error { return nil } -func (e *EventHub) configureReceiver() ([]eventhub.ReceiveOption, error) { +func (e *EventHub) configureReceiver() []eventhub.ReceiveOption { receiveOpts := []eventhub.ReceiveOption{} if e.ConsumerGroup != "" { @@ -254,7 +250,7 @@ func (e *EventHub) configureReceiver() ([]eventhub.ReceiveOption, error) { receiveOpts = append(receiveOpts, eventhub.ReceiveWithEpoch(e.Epoch)) } - return receiveOpts, nil + return receiveOpts } // OnMessage handles an Event. When this function returns without error the diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index 8d77f0cef4757..67609bf64af9e 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -18,9 +18,6 @@ import ( "github.com/stretchr/testify/require" ) -// Midnight 9/22/2015 -const baseTimeSeconds = 1442905200 - const validJSON = ` { "status": "green", @@ -40,20 +37,6 @@ const malformedJSON = ` "status": "green", ` -const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1\n" -const lineProtocolEmpty = "" -const lineProtocolShort = "ab" - -const lineProtocolMulti = ` -cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu3,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu4,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -` - type CarriageReturnTest struct { input []byte output []byte @@ -91,7 +74,7 @@ func newRunnerMock(out []byte, errout []byte, err error) Runner { } } -func (r runnerMock) Run(command string, _ time.Duration) ([]byte, []byte, error) { +func (r runnerMock) Run(_ string, _ time.Duration) ([]byte, []byte, error) { return r.out, r.errout, r.err } diff --git a/plugins/inputs/execd/execd_posix.go b/plugins/inputs/execd/execd_posix.go index 4d8789a8d3215..08275c62db5be 100644 --- a/plugins/inputs/execd/execd_posix.go +++ b/plugins/inputs/execd/execd_posix.go @@ -12,7 +12,7 @@ import ( "github.com/influxdata/telegraf" ) -func (e *Execd) Gather(acc telegraf.Accumulator) error { +func (e *Execd) Gather(_ telegraf.Accumulator) error { if e.process == nil || e.process.Cmd == nil { return nil } diff --git a/plugins/inputs/execd/shim/goshim.go b/plugins/inputs/execd/shim/goshim.go index 2ea0b839b3e2f..920d40f8dfddf 100644 --- a/plugins/inputs/execd/shim/goshim.go +++ b/plugins/inputs/execd/shim/goshim.go @@ -26,7 +26,6 @@ import ( type empty struct{} var ( - forever = 100 * 365 * 24 * time.Hour envVarEscaper = strings.NewReplacer( `"`, `\"`, `\`, `\\`, diff --git a/plugins/inputs/execd/shim/shim_test.go b/plugins/inputs/execd/shim/shim_test.go index dbc3462211222..07afde130e04c 100644 --- a/plugins/inputs/execd/shim/shim_test.go +++ b/plugins/inputs/execd/shim/shim_test.go @@ -104,7 +104,7 @@ func (i *testInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *testInput) Start(acc telegraf.Accumulator) error { +func (i *testInput) Start(_ telegraf.Accumulator) error { return nil } @@ -156,7 +156,7 @@ func (i *serviceInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *serviceInput) Start(acc telegraf.Accumulator) error { +func (i *serviceInput) Start(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/fail2ban/fail2ban_test.go b/plugins/inputs/fail2ban/fail2ban_test.go index b28d824ee3aed..ecb539acd3166 100644 --- a/plugins/inputs/fail2ban/fail2ban_test.go +++ b/plugins/inputs/fail2ban/fail2ban_test.go @@ -92,7 +92,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { return cmd } -func TestHelperProcess(t *testing.T) { +func TestHelperProcess(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } diff --git a/plugins/inputs/fluentd/fluentd_test.go b/plugins/inputs/fluentd/fluentd_test.go index 795b6dbcf546d..6279f6cf5ef7e 100644 --- a/plugins/inputs/fluentd/fluentd_test.go +++ b/plugins/inputs/fluentd/fluentd_test.go @@ -96,8 +96,6 @@ const sampleJSON = ` var ( zero float64 - err error - pluginOutput []pluginData expectedOutput = []pluginData{ // {"object:f48698", "dummy", "input", nil, nil, nil}, // {"object:e27138", "dummy", "input", nil, nil, nil}, diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index bc286631fcd7f..585a05ee32bea 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -19,7 +19,6 @@ import ( ) type ResponseMetrics struct { - total int Metrics []Metric `json:"metrics"` } diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index 6d7301bbcf320..79fceb72e8129 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -12,7 +12,7 @@ import ( type mockFetcher struct { } -func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) { +func (h *mockFetcher) Fetch(_ string) ([]hddtemp.Disk, error) { return []hddtemp.Disk{ { DeviceName: "Disk1", diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index cd41f303e7feb..0c94437354feb 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -319,11 +319,6 @@ func methodNotAllowed(res http.ResponseWriter) { res.Write([]byte(`{"error":"http: method not allowed"}`)) } -func internalServerError(res http.ResponseWriter) { - res.Header().Set("Content-Type", "application/json") - res.WriteHeader(http.StatusInternalServerError) -} - func badRequest(res http.ResponseWriter) { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusBadRequest) diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 24ded226346b6..5c05d84264112 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -912,10 +912,8 @@ type fakeClient struct { err error } -func (f *fakeClient) Do(req *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: f.statusCode, - }, f.err +func (f *fakeClient) Do(_ *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: f.statusCode}, f.err } func TestNetworkErrors(t *testing.T) { diff --git a/plugins/inputs/intel_powerstat/intel_powerstat_test.go b/plugins/inputs/intel_powerstat/intel_powerstat_test.go index 13006de3c6e81..d65756595927e 100644 --- a/plugins/inputs/intel_powerstat/intel_powerstat_test.go +++ b/plugins/inputs/intel_powerstat/intel_powerstat_test.go @@ -196,9 +196,9 @@ func TestAddMetricsForSingleCoreNegative(t *testing.T) { func TestAddCPUFrequencyMetric(t *testing.T) { var acc testutil.Accumulator - cpuID := "0" - coreID := "2" - packageID := "1" + cpuID := "1" + coreID := "3" + packageID := "0" frequency := 1200000.2 power, _, _, msrMock := getPowerWithMockedServices() prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) diff --git a/plugins/inputs/intel_rdt/publisher.go b/plugins/inputs/intel_rdt/publisher.go index 5ca9890472b27..ca36e40525c12 100644 --- a/plugins/inputs/intel_rdt/publisher.go +++ b/plugins/inputs/intel_rdt/publisher.go @@ -18,7 +18,6 @@ type Publisher struct { BufferChanProcess chan processMeasurement BufferChanCores chan string errChan chan error - stopChan chan bool } func NewPublisher(acc telegraf.Accumulator, log telegraf.Logger, shortenedMetrics bool) Publisher { diff --git a/plugins/inputs/ipmi_sensor/connection_test.go b/plugins/inputs/ipmi_sensor/connection_test.go index 21d1957c95126..0f40464fbd83a 100644 --- a/plugins/inputs/ipmi_sensor/connection_test.go +++ b/plugins/inputs/ipmi_sensor/connection_test.go @@ -6,11 +6,6 @@ import ( "github.com/stretchr/testify/require" ) -type conTest struct { - Got string - Want *Connection -} - func TestNewConnection(t *testing.T) { testData := []struct { addr string diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index 422d2ab38471e..cb85d8fbc419b 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -227,7 +227,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking // it returns below mockData. -func TestHelperProcess(t *testing.T) { +func TestHelperProcess(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } @@ -546,7 +546,7 @@ func fakeExecCommandV2(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcessV2 -- chrony tracking // it returns below mockData. -func TestHelperProcessV2(t *testing.T) { +func TestHelperProcessV2(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index a4b34baaa4f1c..5844f0e7a8a69 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -295,18 +295,6 @@ func (j *Jenkins) gatherJobs(acc telegraf.Accumulator) { wg.Wait() } -// wrap the tcp request with doGet -// block tcp request if buffered channel is full -func (j *Jenkins) doGet(tcp func() error) error { - j.semaphore <- struct{}{} - if err := tcp(); err != nil { - <-j.semaphore - return err - } - <-j.semaphore - return nil -} - func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { if j.MaxSubJobDepth > 0 && jr.layer == j.MaxSubJobDepth { return nil @@ -451,7 +439,6 @@ type jobRequest struct { name string parents []string layer int - number int64 } func (jr jobRequest) combined() []string { diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index 3f05274eb11a6..5c1bc50aa2ae7 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -98,25 +98,8 @@ const validMultiValueJSON = ` } ]` -const validSingleValueJSON = ` -[ - { - "request":{ - "path":"used", - "mbean":"java.lang:type=Memory", - "attribute":"HeapMemoryUsage", - "type":"read" - }, - "value":209274376, - "timestamp":1446129256, - "status":200 - } -]` - const invalidJSON = "I don't think this is JSON" -const empty = "" - var Servers = []Server{{Name: "as1", Host: "127.0.0.1", Port: "8080"}} var HeapMetric = Metric{Name: "heap_memory_usage", Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"} @@ -130,7 +113,7 @@ type jolokiaClientStub struct { statusCode int } -func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) { +func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) { resp := http.Response{} resp.StatusCode = c.statusCode resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) diff --git a/plugins/inputs/jolokia2/jolokia_test.go b/plugins/inputs/jolokia2/jolokia_test.go index 61c410c0b2067..4fe8b26290da6 100644 --- a/plugins/inputs/jolokia2/jolokia_test.go +++ b/plugins/inputs/jolokia2/jolokia_test.go @@ -74,7 +74,7 @@ func TestJolokia2_ScalarValues(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -234,7 +234,7 @@ func TestJolokia2_ObjectValues(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -322,7 +322,7 @@ func TestJolokia2_StatusCodes(t *testing.T) { "status": 500 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -372,7 +372,7 @@ func TestJolokia2_TagRenaming(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -465,7 +465,7 @@ func TestJolokia2_FieldRenaming(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -573,7 +573,7 @@ func TestJolokia2_MetricMbeanMatching(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -666,7 +666,7 @@ func TestJolokia2_MetricCompaction(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -727,7 +727,7 @@ func TestJolokia2_ProxyTargets(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) @@ -762,7 +762,7 @@ func TestFillFields(t *testing.T) { assert.Equal(t, map[string]interface{}{}, results) } -func setupServer(status int, resp string) *httptest.Server { +func setupServer(resp string) *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) //body, err := ioutil.ReadAll(r.Body) diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go index e423588eed41f..ef8a1400b212b 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go @@ -102,7 +102,7 @@ func (m *OpenConfigTelemetry) Description() string { return "Read JTI OpenConfig Telemetry from listed sensors" } -func (m *OpenConfigTelemetry) Gather(acc telegraf.Accumulator) error { +func (m *OpenConfigTelemetry) Gather(_ telegraf.Accumulator) error { return nil } @@ -278,9 +278,12 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int { } // Subscribes and collects OpenConfig telemetry data from given server -func (m *OpenConfigTelemetry) collectData(ctx context.Context, - grpcServer string, grpcClientConn *grpc.ClientConn, - acc telegraf.Accumulator) error { +func (m *OpenConfigTelemetry) collectData( + ctx context.Context, + grpcServer string, + grpcClientConn *grpc.ClientConn, + acc telegraf.Accumulator, +) { c := telemetry.NewOpenConfigTelemetryClient(grpcClientConn) for _, sensor := range m.sensorsConfig { m.wg.Add(1) @@ -342,8 +345,6 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, } }(ctx, sensor) } - - return nil } func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go index 784b6a8c12526..d32866f2efbe6 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go @@ -63,19 +63,19 @@ func (s *openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.Subscripti return nil } -func (s *openConfigTelemetryServer) CancelTelemetrySubscription(ctx context.Context, req *telemetry.CancelSubscriptionRequest) (*telemetry.CancelSubscriptionReply, error) { +func (s *openConfigTelemetryServer) CancelTelemetrySubscription(_ context.Context, _ *telemetry.CancelSubscriptionRequest) (*telemetry.CancelSubscriptionReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetTelemetrySubscriptions(ctx context.Context, req *telemetry.GetSubscriptionsRequest) (*telemetry.GetSubscriptionsReply, error) { +func (s *openConfigTelemetryServer) GetTelemetrySubscriptions(_ context.Context, _ *telemetry.GetSubscriptionsRequest) (*telemetry.GetSubscriptionsReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetTelemetryOperationalState(ctx context.Context, req *telemetry.GetOperationalStateRequest) (*telemetry.GetOperationalStateReply, error) { +func (s *openConfigTelemetryServer) GetTelemetryOperationalState(_ context.Context, _ *telemetry.GetOperationalStateRequest) (*telemetry.GetOperationalStateReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetDataEncodings(ctx context.Context, req *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) { +func (s *openConfigTelemetryServer) GetDataEncodings(_ context.Context, _ *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) { return nil, nil } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 78feacdd30850..fe24f51724dad 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -107,7 +107,6 @@ const sampleConfig = ` const ( defaultMaxUndeliveredMessages = 1000 - defaultMaxMessageLen = 1000000 defaultConsumerGroup = "telegraf_metrics_consumers" reconnectDelay = 5 * time.Second ) @@ -256,7 +255,7 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { return nil } -func (k *KafkaConsumer) Gather(acc telegraf.Accumulator) error { +func (k *KafkaConsumer) Gather(_ telegraf.Accumulator) error { return nil } @@ -314,11 +313,11 @@ func (h *ConsumerGroupHandler) Setup(sarama.ConsumerGroupSession) error { } // Run processes any delivered metrics during the lifetime of the session. -func (h *ConsumerGroupHandler) run(ctx context.Context) error { +func (h *ConsumerGroupHandler) run(ctx context.Context) { for { select { case <-ctx.Done(): - return nil + return case track := <-h.acc.Delivered(): h.onDelivery(track) } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 90c362b9c01ea..f6aca25c7ed9a 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -23,7 +23,7 @@ type FakeConsumerGroup struct { errors chan error } -func (g *FakeConsumerGroup) Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error { +func (g *FakeConsumerGroup) Consume(_ context.Context, _ []string, handler sarama.ConsumerGroupHandler) error { g.handler = handler g.handler.Setup(nil) return nil @@ -213,15 +213,15 @@ func (s *FakeConsumerGroupSession) GenerationID() int32 { panic("not implemented") } -func (s *FakeConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { +func (s *FakeConsumerGroupSession) MarkOffset(_ string, _ int32, _ int64, _ string) { panic("not implemented") } -func (s *FakeConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { +func (s *FakeConsumerGroupSession) ResetOffset(_ string, _ int32, _ int64, _ string) { panic("not implemented") } -func (s *FakeConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { +func (s *FakeConsumerGroupSession) MarkMessage(_ *sarama.ConsumerMessage, _ string) { } func (s *FakeConsumerGroupSession) Context() context.Context { diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go index bc884a118c69d..8690b1637bac1 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go @@ -177,7 +177,7 @@ func (k *Kafka) Stop() { } } -func (k *Kafka) Gather(acc telegraf.Accumulator) error { +func (k *Kafka) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go index 31bea2210b741..976412a7196b5 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go @@ -78,8 +78,8 @@ func TestReadsMetricsFromKafka(t *testing.T) { } } -// Waits for the metric that was sent to the kafka broker to arrive at the kafka -// consumer +//nolint:unused // Used in skipped tests +// Waits for the metric that was sent to the kafka broker to arrive at the kafka consumer func waitForPoint(acc *testutil.Accumulator, t *testing.T) { // Give the kafka container up to 2 seconds to get the point to the consumer ticker := time.NewTicker(5 * time.Millisecond) diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 0a57955ce7f7b..bf63795f553b9 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -49,7 +49,6 @@ type ( cons *consumer.Consumer parser parsers.Parser cancel context.CancelFunc - ctx context.Context acc telegraf.TrackingAccumulator sem chan struct{} diff --git a/plugins/inputs/kube_inventory/client_test.go b/plugins/inputs/kube_inventory/client_test.go index 48874dca55209..622e35c65c57f 100644 --- a/plugins/inputs/kube_inventory/client_test.go +++ b/plugins/inputs/kube_inventory/client_test.go @@ -5,7 +5,6 @@ import ( "time" "github.com/influxdata/telegraf/plugins/common/tls" - "k8s.io/apimachinery/pkg/util/intstr" ) type mockHandler struct { @@ -20,21 +19,10 @@ func toInt32Ptr(i int32) *int32 { return &i } -func toInt64Ptr(i int64) *int64 { - return &i -} - func toBoolPtr(b bool) *bool { return &b } -func toIntStrPtrS(s string) *intstr.IntOrString { - return &intstr.IntOrString{StrVal: s} -} - -func toIntStrPtrI(i int32) *intstr.IntOrString { - return &intstr.IntOrString{IntVal: i} -} func TestNewClient(t *testing.T) { _, err := newClient("https://127.0.0.1:443/", "default", "abc123", time.Second, tls.ClientConfig{}) if err != nil { diff --git a/plugins/inputs/kube_inventory/daemonset.go b/plugins/inputs/kube_inventory/daemonset.go index b169ea16dbac6..c365d169cacdb 100644 --- a/plugins/inputs/kube_inventory/daemonset.go +++ b/plugins/inputs/kube_inventory/daemonset.go @@ -15,14 +15,11 @@ func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *Kubern return } for _, d := range list.Items { - if err = ki.gatherDaemonSet(d, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherDaemonSet(d, acc) } } -func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) { fields := map[string]interface{}{ "generation": d.Generation, "current_number_scheduled": d.Status.CurrentNumberScheduled, @@ -48,6 +45,4 @@ func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accu } acc.AddFields(daemonSetMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/daemonset_test.go b/plugins/inputs/kube_inventory/daemonset_test.go index dede3d9a534f1..f67707d2c3d21 100644 --- a/plugins/inputs/kube_inventory/daemonset_test.go +++ b/plugins/inputs/kube_inventory/daemonset_test.go @@ -108,10 +108,7 @@ func TestDaemonSet(t *testing.T) { ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { - err := ks.gatherDaemonSet(dset, acc) - if err != nil { - t.Errorf("Failed to gather daemonset - %s", err.Error()) - } + ks.gatherDaemonSet(dset, acc) } err := acc.FirstError() @@ -284,10 +281,7 @@ func TestDaemonSetSelectorFilter(t *testing.T) { ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { - err := ks.gatherDaemonSet(dset, acc) - if err != nil { - t.Errorf("Failed to gather daemonset - %s", err.Error()) - } + ks.gatherDaemonSet(dset, acc) } // Grab selector tags diff --git a/plugins/inputs/kube_inventory/deployment.go b/plugins/inputs/kube_inventory/deployment.go index 613f9dff82b72..510cc68cecaa7 100644 --- a/plugins/inputs/kube_inventory/deployment.go +++ b/plugins/inputs/kube_inventory/deployment.go @@ -14,14 +14,11 @@ func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *Kuber return } for _, d := range list.Items { - if err = ki.gatherDeployment(d, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherDeployment(d, acc) } } -func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) { fields := map[string]interface{}{ "replicas_available": d.Status.AvailableReplicas, "replicas_unavailable": d.Status.UnavailableReplicas, @@ -38,6 +35,4 @@ func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Ac } acc.AddFields(deploymentMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/deployment_test.go b/plugins/inputs/kube_inventory/deployment_test.go index bb5e9101eb42c..9b4c74c9ad856 100644 --- a/plugins/inputs/kube_inventory/deployment_test.go +++ b/plugins/inputs/kube_inventory/deployment_test.go @@ -114,10 +114,7 @@ func TestDeployment(t *testing.T) { ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { - err := ks.gatherDeployment(deployment, acc) - if err != nil { - t.Errorf("Failed to gather deployment - %s", err.Error()) - } + ks.gatherDeployment(deployment, acc) } err := acc.FirstError() @@ -299,10 +296,7 @@ func TestDeploymentSelectorFilter(t *testing.T) { ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { - err := ks.gatherDeployment(deployment, acc) - if err != nil { - t.Errorf("Failed to gather deployment - %s", err.Error()) - } + ks.gatherDeployment(deployment, acc) } // Grab selector tags diff --git a/plugins/inputs/kube_inventory/statefulset.go b/plugins/inputs/kube_inventory/statefulset.go index 22bc7c8bc0c75..22b235116b22c 100644 --- a/plugins/inputs/kube_inventory/statefulset.go +++ b/plugins/inputs/kube_inventory/statefulset.go @@ -15,14 +15,11 @@ func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *Kube return } for _, s := range list.Items { - if err = ki.gatherStatefulSet(s, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherStatefulSet(s, acc) } } -func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) { status := s.Status fields := map[string]interface{}{ "created": s.GetCreationTimestamp().UnixNano(), @@ -45,6 +42,4 @@ func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf. } acc.AddFields(statefulSetMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/statefulset_test.go b/plugins/inputs/kube_inventory/statefulset_test.go index dee8b08b5b887..a6d703c205acf 100644 --- a/plugins/inputs/kube_inventory/statefulset_test.go +++ b/plugins/inputs/kube_inventory/statefulset_test.go @@ -102,10 +102,7 @@ func TestStatefulSet(t *testing.T) { ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { - err := ks.gatherStatefulSet(ss, acc) - if err != nil { - t.Errorf("Failed to gather ss - %s", err.Error()) - } + ks.gatherStatefulSet(ss, acc) } err := acc.FirstError() @@ -273,10 +270,7 @@ func TestStatefulSetSelectorFilter(t *testing.T) { ks.createSelectorFilters() acc := new(testutil.Accumulator) for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { - err := ks.gatherStatefulSet(ss, acc) - if err != nil { - t.Errorf("Failed to gather ss - %s", err.Error()) - } + ks.gatherStatefulSet(ss, acc) } // Grab selector tags diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index 2516d084c3285..e4ebe268755ec 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -5,7 +5,6 @@ import ( "fmt" "io/ioutil" "net/http" - "net/url" "strings" "time" @@ -65,7 +64,6 @@ var sampleConfig = ` ` const ( - summaryEndpoint = `%s/stats/summary` defaultServiceAccountPath = "/run/secrets/kubernetes.io/serviceaccount/token" ) @@ -117,15 +115,6 @@ func (k *Kubernetes) Gather(acc telegraf.Accumulator) error { return nil } -func buildURL(endpoint string, base string) (*url.URL, error) { - u := fmt.Sprintf(endpoint, base) - addr, err := url.Parse(u) - if err != nil { - return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err) - } - return addr, nil -} - func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error { summaryMetrics := &SummaryMetrics{} err := k.LoadJSON(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics) @@ -139,7 +128,7 @@ func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) err } buildSystemContainerMetrics(summaryMetrics, acc) buildNodeMetrics(summaryMetrics, acc) - buildPodMetrics(baseURL, summaryMetrics, podInfos, k.labelFilter, acc) + buildPodMetrics(summaryMetrics, podInfos, k.labelFilter, acc) return nil } @@ -243,7 +232,7 @@ func (k *Kubernetes) LoadJSON(url string, v interface{}) error { return nil } -func buildPodMetrics(baseURL string, summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, acc telegraf.Accumulator) { +func buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, acc telegraf.Accumulator) { for _, pod := range summaryMetrics.Pods { for _, container := range pod.Containers { tags := map[string]string{ diff --git a/plugins/inputs/lanz/lanz.go b/plugins/inputs/lanz/lanz.go index 6a833175fde9f..86bb93a8f754b 100644 --- a/plugins/inputs/lanz/lanz.go +++ b/plugins/inputs/lanz/lanz.go @@ -43,7 +43,7 @@ func (l *Lanz) Description() string { return "Read metrics off Arista LANZ, via socket" } -func (l *Lanz) Gather(acc telegraf.Accumulator) error { +func (l *Lanz) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index fc38b467e2505..5fec865eaa8d7 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -143,7 +143,7 @@ func (l *LogParserPlugin) Init() error { } // Gather is the primary function to collect the metrics for the plugin -func (l *LogParserPlugin) Gather(acc telegraf.Accumulator) error { +func (l *LogParserPlugin) Gather(_ telegraf.Accumulator) error { l.Lock() defer l.Unlock() diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index ecaafb50f86b3..5327386339f84 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -54,7 +54,6 @@ type mapping struct { inProc string // What to look for at the start of a line in /proc/fs/lustre/* field uint32 // which field to extract from that line reportAs string // What measurement name to use - tag string // Additional tag to add for this metric } var wantedOstFields = []*mapping{ diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 0b3c7d26fa5e3..7f3d08b118176 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -532,49 +532,6 @@ type TaskStats struct { Statistics map[string]interface{} `json:"statistics"` } -func (m *Mesos) gatherSlaveTaskMetrics(u *url.URL, acc telegraf.Accumulator) error { - var metrics []TaskStats - - tags := map[string]string{ - "server": u.Hostname(), - "url": urlTag(u), - } - - resp, err := m.client.Get(withPath(u, "/monitor/statistics").String()) - - if err != nil { - return err - } - - data, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return err - } - - if err = json.Unmarshal([]byte(data), &metrics); err != nil { - return errors.New("Error decoding JSON response") - } - - for _, task := range metrics { - tags["framework_id"] = task.FrameworkID - - jf := jsonparser.JSONFlattener{} - err = jf.FlattenJSON("", task.Statistics) - - if err != nil { - return err - } - - timestamp := time.Unix(int64(jf.Fields["timestamp"].(float64)), 0) - jf.Fields["executor_id"] = task.ExecutorID - - acc.AddFields("mesos_tasks", jf.Fields, tags, timestamp) - } - - return nil -} - func withPath(u *url.URL, path string) *url.URL { c := *u c.Path = path diff --git a/plugins/inputs/minecraft/client.go b/plugins/inputs/minecraft/client.go index bb829f6903581..641a8ae75db9f 100644 --- a/plugins/inputs/minecraft/client.go +++ b/plugins/inputs/minecraft/client.go @@ -25,12 +25,12 @@ type Connector interface { Connect() (Connection, error) } -func newConnector(hostname, port, password string) (*connector, error) { +func newConnector(hostname, port, password string) *connector { return &connector{ hostname: hostname, port: port, password: password, - }, nil + } } type connector struct { @@ -58,8 +58,8 @@ func (c *connector) Connect() (Connection, error) { return &connection{rcon: rcon}, nil } -func newClient(connector Connector) (*client, error) { - return &client{connector: connector}, nil +func newClient(connector Connector) *client { + return &client{connector: connector} } type client struct { @@ -90,13 +90,7 @@ func (c *client) Players() ([]string, error) { return nil, err } - players, err := parsePlayers(resp) - if err != nil { - c.conn = nil - return nil, err - } - - return players, nil + return parsePlayers(resp), nil } func (c *client) Scores(player string) ([]Score, error) { @@ -113,13 +107,7 @@ func (c *client) Scores(player string) ([]Score, error) { return nil, err } - scores, err := parseScores(resp) - if err != nil { - c.conn = nil - return nil, err - } - - return scores, nil + return parseScores(resp), nil } type connection struct { @@ -134,10 +122,10 @@ func (c *connection) Execute(command string) (string, error) { return packet.Body, nil } -func parsePlayers(input string) ([]string, error) { +func parsePlayers(input string) []string { parts := strings.SplitAfterN(input, ":", 2) if len(parts) != 2 { - return []string{}, nil + return []string{} } names := strings.Split(parts[1], ",") @@ -158,7 +146,7 @@ func parsePlayers(input string) ([]string, error) { } players = append(players, name) } - return players, nil + return players } // Score is an individual tracked scoreboard stat. @@ -167,9 +155,9 @@ type Score struct { Value int64 } -func parseScores(input string) ([]Score, error) { +func parseScores(input string) []Score { if strings.Contains(input, "has no scores") { - return []Score{}, nil + return []Score{} } // Detect Minecraft <= 1.12 @@ -200,5 +188,6 @@ func parseScores(input string) ([]Score, error) { } scores = append(scores, score) } - return scores, nil + + return scores } diff --git a/plugins/inputs/minecraft/client_test.go b/plugins/inputs/minecraft/client_test.go index 4a5ceb9db9cb7..59db9bf34a8d6 100644 --- a/plugins/inputs/minecraft/client_test.go +++ b/plugins/inputs/minecraft/client_test.go @@ -98,9 +98,7 @@ func TestClient_Player(t *testing.T) { conn: &MockConnection{commands: tt.commands}, } - client, err := newClient(connector) - require.NoError(t, err) - + client := newClient(connector) actual, err := client.Players() require.NoError(t, err) @@ -183,9 +181,7 @@ func TestClient_Scores(t *testing.T) { conn: &MockConnection{commands: tt.commands}, } - client, err := newClient(connector) - require.NoError(t, err) - + client := newClient(connector) actual, err := client.Scores(tt.player) require.NoError(t, err) diff --git a/plugins/inputs/minecraft/minecraft.go b/plugins/inputs/minecraft/minecraft.go index 939cc2c42a7a3..e953b3c2b5d7f 100644 --- a/plugins/inputs/minecraft/minecraft.go +++ b/plugins/inputs/minecraft/minecraft.go @@ -50,17 +50,8 @@ func (s *Minecraft) SampleConfig() string { func (s *Minecraft) Gather(acc telegraf.Accumulator) error { if s.client == nil { - connector, err := newConnector(s.Server, s.Port, s.Password) - if err != nil { - return err - } - - client, err := newClient(connector) - if err != nil { - return err - } - - s.client = client + connector := newConnector(s.Server, s.Port, s.Password) + s.client = newClient(connector) } players, err := s.client.Players() diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index 1a2970f1e2338..0381998d13ba0 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -17,7 +17,7 @@ import ( type transportMock struct { } -func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { +func (t *transportMock) RoundTrip(_ *http.Request) (*http.Response, error) { errorString := "Get http://127.0.0.1:2812/_status?format=xml: " + "read tcp 192.168.10.2:55610->127.0.0.1:2812: " + "read: connection reset by peer" diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 006aaac2538a2..d5f5616b11e27 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -248,14 +248,14 @@ func (m *MQTTConsumer) connect() error { return nil } -func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) { +func (m *MQTTConsumer) onConnectionLost(_ mqtt.Client, err error) { m.acc.AddError(fmt.Errorf("connection lost: %v", err)) m.Log.Debugf("Disconnected %v", m.Servers) m.state = Disconnected return } -func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) { +func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) { for { select { case track := <-m.acc.Delivered(): @@ -310,7 +310,7 @@ func (m *MQTTConsumer) Stop() { m.cancel() } -func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error { +func (m *MQTTConsumer) Gather(_ telegraf.Accumulator) error { if m.state == Disconnected { m.state = Connecting m.Log.Debugf("Connecting %v", m.Servers) diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index efa921cb1dd49..a9b85c108ab65 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -49,15 +49,15 @@ type FakeParser struct { // FakeParser satisfies parsers.Parser var _ parsers.Parser = &FakeParser{} -func (p *FakeParser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *FakeParser) Parse(_ []byte) ([]telegraf.Metric, error) { panic("not implemented") } -func (p *FakeParser) ParseLine(line string) (telegraf.Metric, error) { +func (p *FakeParser) ParseLine(_ string) (telegraf.Metric, error) { panic("not implemented") } -func (p *FakeParser) SetDefaultTags(tags map[string]string) { +func (p *FakeParser) SetDefaultTags(_ map[string]string) { panic("not implemented") } diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 057c77ee795c4..70c3287d12299 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -264,7 +264,7 @@ func (n *natsConsumer) Stop() { n.clean() } -func (n *natsConsumer) Gather(acc telegraf.Accumulator) error { +func (n *natsConsumer) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/net_response/net_response_test.go b/plugins/inputs/net_response/net_response_test.go index 8f01d687927f3..a64d553164a45 100644 --- a/plugins/inputs/net_response/net_response_test.go +++ b/plugins/inputs/net_response/net_response_test.go @@ -125,7 +125,7 @@ func TestTCPOK1(t *testing.T) { } // Start TCP server wg.Add(1) - go TCPServer(t, &wg) + go TCPServer(&wg) wg.Wait() // Connect wg.Add(1) @@ -169,7 +169,7 @@ func TestTCPOK2(t *testing.T) { } // Start TCP server wg.Add(1) - go TCPServer(t, &wg) + go TCPServer(&wg) wg.Wait() // Connect wg.Add(1) @@ -247,7 +247,7 @@ func TestUDPOK1(t *testing.T) { } // Start UDP server wg.Add(1) - go UDPServer(t, &wg) + go UDPServer(&wg) wg.Wait() // Connect wg.Add(1) @@ -277,7 +277,7 @@ func TestUDPOK1(t *testing.T) { wg.Wait() } -func UDPServer(t *testing.T, wg *sync.WaitGroup) { +func UDPServer(wg *sync.WaitGroup) { udpAddr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:2004") conn, _ := net.ListenUDP("udp", udpAddr) wg.Done() @@ -288,7 +288,7 @@ func UDPServer(t *testing.T, wg *sync.WaitGroup) { wg.Done() } -func TCPServer(t *testing.T, wg *sync.WaitGroup) { +func TCPServer(wg *sync.WaitGroup) { tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2004") tcpServer, _ := net.ListenTCP("tcp", tcpAddr) wg.Done() diff --git a/plugins/inputs/nfsclient/nfsclient.go b/plugins/inputs/nfsclient/nfsclient.go index e4005b57685a9..c2823dfa598d4 100644 --- a/plugins/inputs/nfsclient/nfsclient.go +++ b/plugins/inputs/nfsclient/nfsclient.go @@ -93,13 +93,13 @@ func convertToInt64(line []string) []int64 { return nline } -func (n *NFSClient) parseStat(mountpoint string, export string, version string, line []string, fullstat bool, acc telegraf.Accumulator) error { +func (n *NFSClient) parseStat(mountpoint string, export string, version string, line []string, acc telegraf.Accumulator) { tags := map[string]string{"mountpoint": mountpoint, "serverexport": export} nline := convertToInt64(line) if len(nline) == 0 { n.Log.Warnf("Parsing Stat line with one field: %s\n", line) - return nil + return } first := strings.Replace(line[0], ":", "", 1) @@ -191,7 +191,7 @@ func (n *NFSClient) parseStat(mountpoint string, export string, version string, acc.AddFields("nfsstat", fields, tags) } - if fullstat { + if n.Fullstat { switch first { case "events": if len(nline) >= len(eventsFields) { @@ -240,11 +240,9 @@ func (n *NFSClient) parseStat(mountpoint string, export string, version string, } } } - - return nil } -func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator) error { +func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator) { var mount string var version string var export string @@ -252,10 +250,9 @@ func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator for scanner.Scan() { line := strings.Fields(scanner.Text()) + lineLength := len(line) - line_len := len(line) - - if line_len == 0 { + if lineLength == 0 { continue } @@ -263,10 +260,10 @@ func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator // This denotes a new mount has been found, so set // mount and export, and stop skipping (for now) - if line_len > 4 && choice.Contains("fstype", line) && (choice.Contains("nfs", line) || choice.Contains("nfs4", line)) { + if lineLength > 4 && choice.Contains("fstype", line) && (choice.Contains("nfs", line) || choice.Contains("nfs4", line)) { mount = line[4] export = line[1] - } else if line_len > 5 && (choice.Contains("(nfs)", line) || choice.Contains("(nfs4)", line)) { + } else if lineLength > 5 && (choice.Contains("(nfs)", line) || choice.Contains("(nfs4)", line)) { version = strings.Split(line[5], "/")[1] } @@ -296,10 +293,9 @@ func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator } if !skip { - n.parseStat(mount, export, version, line, n.Fullstat, acc) + n.parseStat(mount, export, version, line, acc) } } - return nil } func (n *NFSClient) getMountStatsPath() string { diff --git a/plugins/inputs/nfsclient/nfsclient_test.go b/plugins/inputs/nfsclient/nfsclient_test.go index 72813cc10be4e..4dab7b320f0c5 100644 --- a/plugins/inputs/nfsclient/nfsclient_test.go +++ b/plugins/inputs/nfsclient/nfsclient_test.go @@ -20,11 +20,11 @@ func getMountStatsPath() string { func TestNFSClientParsev3(t *testing.T) { var acc testutil.Accumulator - nfsclient := NFSClient{} + nfsclient := NFSClient{Fullstat: true} nfsclient.nfs3Ops = map[string]bool{"READLINK": true, "GETATTR": false} nfsclient.nfs4Ops = map[string]bool{"READLINK": true, "GETATTR": false} data := strings.Fields(" READLINK: 500 501 502 503 504 505 506 507") - nfsclient.parseStat("1.2.3.4:/storage/NFS", "/A", "3", data, true, &acc) + nfsclient.parseStat("1.2.3.4:/storage/NFS", "/A", "3", data, &acc) fields_ops := map[string]interface{}{ "ops": int64(500), @@ -42,11 +42,11 @@ func TestNFSClientParsev3(t *testing.T) { func TestNFSClientParsev4(t *testing.T) { var acc testutil.Accumulator - nfsclient := NFSClient{} + nfsclient := NFSClient{Fullstat: true} nfsclient.nfs3Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false} nfsclient.nfs4Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false} data := strings.Fields(" DESTROY_SESSION: 500 501 502 503 504 505 506 507") - nfsclient.parseStat("2.2.2.2:/nfsdata/", "/B", "4", data, true, &acc) + nfsclient.parseStat("2.2.2.2:/nfsdata/", "/B", "4", data, &acc) fields_ops := map[string]interface{}{ "ops": int64(500), diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go index 5e05e9f0d4ac9..9ae9e43c29f7a 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -520,7 +520,7 @@ const streamServerZonesPayload = ` ` func TestGatherProcessesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, processesPath, defaultAPIVersion, processesPayload) + ts, n := prepareEndpoint(t, processesPath, processesPayload) defer ts.Close() var acc testutil.Accumulator @@ -541,7 +541,7 @@ func TestGatherProcessesMetrics(t *testing.T) { } func TestGatherConnectionsMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, connectionsPath, defaultAPIVersion, connectionsPayload) + ts, n := prepareEndpoint(t, connectionsPath, connectionsPayload) defer ts.Close() var acc testutil.Accumulator @@ -565,7 +565,7 @@ func TestGatherConnectionsMetrics(t *testing.T) { } func TestGatherSslMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, sslPath, defaultAPIVersion, sslPayload) + ts, n := prepareEndpoint(t, sslPath, sslPayload) defer ts.Close() var acc testutil.Accumulator @@ -588,7 +588,7 @@ func TestGatherSslMetrics(t *testing.T) { } func TestGatherHttpRequestsMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpRequestsPath, defaultAPIVersion, httpRequestsPayload) + ts, n := prepareEndpoint(t, httpRequestsPath, httpRequestsPayload) defer ts.Close() var acc testutil.Accumulator @@ -610,7 +610,7 @@ func TestGatherHttpRequestsMetrics(t *testing.T) { } func TestGatherHttpServerZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpServerZonesPath, defaultAPIVersion, httpServerZonesPayload) + ts, n := prepareEndpoint(t, httpServerZonesPath, httpServerZonesPayload) defer ts.Close() var acc testutil.Accumulator @@ -664,7 +664,7 @@ func TestGatherHttpServerZonesMetrics(t *testing.T) { } func TestGatherHttpLocationZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpLocationZonesPath, defaultAPIVersion, httpLocationZonesPayload) + ts, n := prepareEndpoint(t, httpLocationZonesPath, httpLocationZonesPayload) defer ts.Close() var acc testutil.Accumulator @@ -716,7 +716,7 @@ func TestGatherHttpLocationZonesMetrics(t *testing.T) { } func TestGatherHttpUpstreamsMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultAPIVersion, httpUpstreamsPayload) + ts, n := prepareEndpoint(t, httpUpstreamsPath, httpUpstreamsPayload) defer ts.Close() var acc testutil.Accumulator @@ -888,7 +888,7 @@ func TestGatherHttpUpstreamsMetrics(t *testing.T) { } func TestGatherHttpCachesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpCachesPath, defaultAPIVersion, httpCachesPayload) + ts, n := prepareEndpoint(t, httpCachesPath, httpCachesPayload) defer ts.Close() var acc testutil.Accumulator @@ -966,7 +966,7 @@ func TestGatherHttpCachesMetrics(t *testing.T) { } func TestGatherResolverZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, resolverZonesPath, defaultAPIVersion, resolverZonesPayload) + ts, n := prepareEndpoint(t, resolverZonesPath, resolverZonesPayload) defer ts.Close() var acc testutil.Accumulator @@ -1020,7 +1020,7 @@ func TestGatherResolverZonesMetrics(t *testing.T) { } func TestGatherStreamUpstreams(t *testing.T) { - ts, n := prepareEndpoint(t, streamUpstreamsPath, defaultAPIVersion, streamUpstreamsPayload) + ts, n := prepareEndpoint(t, streamUpstreamsPath, streamUpstreamsPayload) defer ts.Close() var acc testutil.Accumulator @@ -1162,7 +1162,7 @@ func TestGatherStreamUpstreams(t *testing.T) { } func TestGatherStreamServerZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, streamServerZonesPath, defaultAPIVersion, streamServerZonesPayload) + ts, n := prepareEndpoint(t, streamServerZonesPath, streamServerZonesPayload) defer ts.Close() var acc testutil.Accumulator @@ -1305,11 +1305,11 @@ func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) { return addr, host, port } -func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusAPI) { +func prepareEndpoint(t *testing.T, path string, payload string) (*httptest.Server, *NginxPlusAPI) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - if r.URL.Path == fmt.Sprintf("/api/%d/%s", apiVersion, path) { + if r.URL.Path == fmt.Sprintf("/api/%d/%s", defaultAPIVersion, path) { rsp = payload w.Header()["Content-Type"] = []string{"application/json"} } else { @@ -1321,7 +1321,7 @@ func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string n := &NginxPlusAPI{ Urls: []string{fmt.Sprintf("%s/api", ts.URL)}, - APIVersion: apiVersion, + APIVersion: defaultAPIVersion, } client, err := n.createHTTPClient() diff --git a/plugins/inputs/nsd/nsd.go b/plugins/inputs/nsd/nsd.go index 37a7c482020b2..ef6c20a9aad4c 100644 --- a/plugins/inputs/nsd/nsd.go +++ b/plugins/inputs/nsd/nsd.go @@ -11,7 +11,6 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -26,8 +25,7 @@ type NSD struct { Server string ConfigFile string - filter filter.Filter - run runner + run runner } var defaultBinary = "/usr/sbin/nsd-control" diff --git a/plugins/inputs/nsd/nsd_test.go b/plugins/inputs/nsd/nsd_test.go index fbe66ca9e3bdb..67ea6863c5208 100644 --- a/plugins/inputs/nsd/nsd_test.go +++ b/plugins/inputs/nsd/nsd_test.go @@ -3,16 +3,14 @@ package nsd import ( "bytes" "testing" - "time" + + "github.com/stretchr/testify/assert" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) -var TestTimeout = internal.Duration{Duration: time.Second} - -func NSDControl(output string, Timeout internal.Duration, useSudo bool, Server string, ConfigFile string) func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) { +func NSDControl(output string) func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) { return func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } @@ -21,7 +19,7 @@ func NSDControl(output string, Timeout internal.Duration, useSudo bool, Server s func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &NSD{ - run: NSDControl(fullOutput, TestTimeout, true, "", ""), + run: NSDControl(fullOutput), } err := v.Gather(acc) diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index 2c25cce7d8114..1b731a07b3fa0 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -21,7 +21,7 @@ type logger struct { log telegraf.Logger } -func (l *logger) Output(calldepth int, s string) error { +func (l *logger) Output(_ int, s string) error { l.log.Debug(s) return nil } @@ -179,7 +179,7 @@ func (n *NSQConsumer) Stop() { } // Gather is a noop -func (n *NSQConsumer) Gather(acc telegraf.Accumulator) error { +func (n *NSQConsumer) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index 4b25a44c0ab7d..5bc2bc85a3136 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -68,68 +68,47 @@ func (ns *Nstat) Gather(acc telegraf.Accumulator) error { } // collect netstat data - err = ns.gatherNetstat(netstat, acc) - if err != nil { - return err - } + ns.gatherNetstat(netstat, acc) // collect SNMP data snmp, err := ioutil.ReadFile(ns.ProcNetSNMP) if err != nil { return err } - err = ns.gatherSNMP(snmp, acc) - if err != nil { - return err - } + ns.gatherSNMP(snmp, acc) // collect SNMP6 data, if SNMP6 directory exists (IPv6 enabled) snmp6, err := ioutil.ReadFile(ns.ProcNetSNMP6) if err == nil { - err = ns.gatherSNMP6(snmp6, acc) - if err != nil { - return err - } + ns.gatherSNMP6(snmp6, acc) } else if !os.IsNotExist(err) { return err } return nil } -func (ns *Nstat) gatherNetstat(data []byte, acc telegraf.Accumulator) error { - metrics, err := loadUglyTable(data, ns.DumpZeros) - if err != nil { - return err - } +func (ns *Nstat) gatherNetstat(data []byte, acc telegraf.Accumulator) { + metrics := ns.loadUglyTable(data) tags := map[string]string{ "name": "netstat", } acc.AddFields("nstat", metrics, tags) - return nil } -func (ns *Nstat) gatherSNMP(data []byte, acc telegraf.Accumulator) error { - metrics, err := loadUglyTable(data, ns.DumpZeros) - if err != nil { - return err - } +func (ns *Nstat) gatherSNMP(data []byte, acc telegraf.Accumulator) { + metrics := ns.loadUglyTable(data) tags := map[string]string{ "name": "snmp", } acc.AddFields("nstat", metrics, tags) - return nil } -func (ns *Nstat) gatherSNMP6(data []byte, acc telegraf.Accumulator) error { - metrics, err := loadGoodTable(data, ns.DumpZeros) - if err != nil { - return err - } +func (ns *Nstat) gatherSNMP6(data []byte, acc telegraf.Accumulator) { + metrics := ns.loadGoodTable(data) tags := map[string]string{ "name": "snmp6", } acc.AddFields("nstat", metrics, tags) - return nil } // loadPaths can be used to read paths firstly from config @@ -148,7 +127,7 @@ func (ns *Nstat) loadPaths() { // loadGoodTable can be used to parse string heap that // headers and values are arranged in right order -func loadGoodTable(table []byte, dumpZeros bool) (map[string]interface{}, error) { +func (ns *Nstat) loadGoodTable(table []byte) map[string]interface{} { entries := map[string]interface{}{} fields := bytes.Fields(table) var value int64 @@ -158,7 +137,7 @@ func loadGoodTable(table []byte, dumpZeros bool) (map[string]interface{}, error) for i := 0; i < len(fields); i = i + 2 { // counter is zero if bytes.Equal(fields[i+1], zeroByte) { - if !dumpZeros { + if !ns.DumpZeros { continue } else { entries[string(fields[i])] = int64(0) @@ -171,12 +150,12 @@ func loadGoodTable(table []byte, dumpZeros bool) (map[string]interface{}, error) entries[string(fields[i])] = value } } - return entries, nil + return entries } // loadUglyTable can be used to parse string heap that // the headers and values are splitted with a newline -func loadUglyTable(table []byte, dumpZeros bool) (map[string]interface{}, error) { +func (ns *Nstat) loadUglyTable(table []byte) map[string]interface{} { entries := map[string]interface{}{} // split the lines by newline lines := bytes.Split(table, newLineByte) @@ -196,7 +175,7 @@ func loadUglyTable(table []byte, dumpZeros bool) (map[string]interface{}, error) for j := 1; j < len(headers); j++ { // counter is zero if bytes.Equal(metrics[j], zeroByte) { - if !dumpZeros { + if !ns.DumpZeros { continue } else { entries[string(append(prefix, headers[j]...))] = int64(0) @@ -210,7 +189,7 @@ func loadUglyTable(table []byte, dumpZeros bool) (map[string]interface{}, error) } } } - return entries, nil + return entries } // proc can be used to read file paths from env diff --git a/plugins/inputs/nstat/nstat_test.go b/plugins/inputs/nstat/nstat_test.go index 7f4c09ce4d4be..95b64777b08af 100644 --- a/plugins/inputs/nstat/nstat_test.go +++ b/plugins/inputs/nstat/nstat_test.go @@ -12,11 +12,8 @@ func TestLoadUglyTable(t *testing.T) { "IpExtInCEPkts": int64(2660494435), } - got, err := loadUglyTable([]byte(uglyStr), true) - if err != nil { - t.Fatal(err) - } - + n := Nstat{DumpZeros: true} + got := n.loadUglyTable([]byte(uglyStr)) if len(got) == 0 { t.Fatalf("want %+v, got %+v", parsed, got) } @@ -40,10 +37,8 @@ func TestLoadGoodTable(t *testing.T) { "Ip6InDelivers": int64(62), "Ip6InMcastOctets": int64(1242966), } - got, err := loadGoodTable([]byte(goodStr), true) - if err != nil { - t.Fatal(err) - } + n := Nstat{DumpZeros: true} + got := n.loadGoodTable([]byte(goodStr)) if len(got) == 0 { t.Fatalf("want %+v, got %+v", parsed, got) } diff --git a/plugins/inputs/openntpd/openntpd.go b/plugins/inputs/openntpd/openntpd.go index c3fc3e92e74fa..aedff242e9f07 100644 --- a/plugins/inputs/openntpd/openntpd.go +++ b/plugins/inputs/openntpd/openntpd.go @@ -10,16 +10,10 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -// Mapping of ntpctl header names to tag keys -var tagHeaders = map[string]string{ - "st": "stratum", -} - // Mapping of the ntpctl tag key to the index in the command output var tagI = map[string]int{ "stratum": 2, @@ -48,8 +42,7 @@ type Openntpd struct { Timeout internal.Duration UseSudo bool - filter filter.Filter - run runner + run runner } var defaultBinary = "/usr/sbin/ntpctl" diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go index d629949a533c4..f9823e355f69a 100644 --- a/plugins/inputs/openntpd/openntpd_test.go +++ b/plugins/inputs/openntpd/openntpd_test.go @@ -3,16 +3,14 @@ package openntpd import ( "bytes" "testing" - "time" + + "github.com/stretchr/testify/assert" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) -var TestTimeout = internal.Duration{Duration: time.Second} - -func OpenntpdCTL(output string, Timeout internal.Duration, useSudo bool) func(string, internal.Duration, bool) (*bytes.Buffer, error) { +func OpenntpdCTL(output string) func(string, internal.Duration, bool) (*bytes.Buffer, error) { return func(string, internal.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } @@ -21,7 +19,7 @@ func OpenntpdCTL(output string, Timeout internal.Duration, useSudo bool) func(st func TestParseSimpleOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutput, TestTimeout, false), + run: OpenntpdCTL(simpleOutput), } err := v.Gather(acc) @@ -52,7 +50,7 @@ func TestParseSimpleOutput(t *testing.T) { func TestParseSimpleOutputwithStatePrefix(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputwithStatePrefix, TestTimeout, false), + run: OpenntpdCTL(simpleOutputwithStatePrefix), } err := v.Gather(acc) @@ -84,7 +82,7 @@ func TestParseSimpleOutputwithStatePrefix(t *testing.T) { func TestParseSimpleOutputInvalidPeer(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputInvalidPeer, TestTimeout, false), + run: OpenntpdCTL(simpleOutputInvalidPeer), } err := v.Gather(acc) @@ -112,7 +110,7 @@ func TestParseSimpleOutputInvalidPeer(t *testing.T) { func TestParseSimpleOutputServersDNSError(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputServersDNSError, TestTimeout, false), + run: OpenntpdCTL(simpleOutputServersDNSError), } err := v.Gather(acc) @@ -154,7 +152,7 @@ func TestParseSimpleOutputServersDNSError(t *testing.T) { func TestParseSimpleOutputServerDNSError(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputServerDNSError, TestTimeout, false), + run: OpenntpdCTL(simpleOutputServerDNSError), } err := v.Gather(acc) @@ -182,7 +180,7 @@ func TestParseSimpleOutputServerDNSError(t *testing.T) { func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(fullOutput, TestTimeout, false), + run: OpenntpdCTL(fullOutput), } err := v.Gather(acc) diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go index cea266cc81991..47850db09f012 100644 --- a/plugins/inputs/opensmtpd/opensmtpd.go +++ b/plugins/inputs/opensmtpd/opensmtpd.go @@ -23,8 +23,7 @@ type Opensmtpd struct { Timeout internal.Duration UseSudo bool - filter filter.Filter - run runner + run runner } var defaultBinary = "/usr/sbin/smtpctl" diff --git a/plugins/inputs/opensmtpd/opensmtpd_test.go b/plugins/inputs/opensmtpd/opensmtpd_test.go index 2a3f4cdcfb970..4ae3eb9868d40 100644 --- a/plugins/inputs/opensmtpd/opensmtpd_test.go +++ b/plugins/inputs/opensmtpd/opensmtpd_test.go @@ -2,17 +2,13 @@ package opensmtpd import ( "bytes" - "testing" - "time" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "testing" ) -var TestTimeout = internal.Duration{Duration: time.Second} - -func SMTPCTL(output string, Timeout internal.Duration, useSudo bool) func(string, internal.Duration, bool) (*bytes.Buffer, error) { +func SMTPCTL(output string) func(string, internal.Duration, bool) (*bytes.Buffer, error) { return func(string, internal.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } @@ -21,7 +17,7 @@ func SMTPCTL(output string, Timeout internal.Duration, useSudo bool) func(string func TestFilterSomeStats(t *testing.T) { acc := &testutil.Accumulator{} v := &Opensmtpd{ - run: SMTPCTL(fullOutput, TestTimeout, false), + run: SMTPCTL(fullOutput), } err := v.Gather(acc) diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index 688f97782b027..426d412d09114 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -131,7 +131,7 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { return nil } -func (n *OpenWeatherMap) createHTTPClient() (*http.Client, error) { +func (n *OpenWeatherMap) createHTTPClient() *http.Client { if n.ResponseTimeout.Duration < time.Second { n.ResponseTimeout.Duration = defaultResponseTimeout } @@ -141,7 +141,7 @@ func (n *OpenWeatherMap) createHTTPClient() (*http.Client, error) { Timeout: n.ResponseTimeout.Duration, } - return client, nil + return client } func (n *OpenWeatherMap) gatherURL(addr string) (*Status, error) { @@ -318,10 +318,7 @@ func (n *OpenWeatherMap) Init() error { // Create an HTTP client that is re-used for each // collection interval - n.client, err = n.createHTTPClient() - if err != nil { - return err - } + n.client = n.createHTTPClient() switch n.Units { case "imperial", "standard", "metric": diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index 0b8c8c16acd02..17cdaea6966d3 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -61,7 +61,7 @@ func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { } for rows.Next() { - tags, columnMap, err := p.accRow(rows, acc, columns) + tags, columnMap, err := p.accRow(rows, columns) if err != nil { return err @@ -111,7 +111,7 @@ func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { } for poolRows.Next() { - tags, columnMap, err := p.accRow(poolRows, acc, columns) + tags, columnMap, err := p.accRow(poolRows, columns) if err != nil { return err } @@ -145,7 +145,7 @@ type scanner interface { Scan(dest ...interface{}) error } -func (p *PgBouncer) accRow(row scanner, acc telegraf.Accumulator, columns []string) (map[string]string, +func (p *PgBouncer) accRow(row scanner, columns []string) (map[string]string, map[string]*interface{}, error) { var columnVars []interface{} var dbname bytes.Buffer diff --git a/plugins/inputs/phpfpm/fcgi.go b/plugins/inputs/phpfpm/fcgi.go index 551164d15c490..b3ee3f475248b 100644 --- a/plugins/inputs/phpfpm/fcgi.go +++ b/plugins/inputs/phpfpm/fcgi.go @@ -45,12 +45,14 @@ const ( maxPad = 255 ) +//nolint:varcheck // For having proper order const ( roleResponder = iota + 1 // only Responders are implemented. roleAuthorizer roleFilter ) +//nolint:varcheck // For having proper order const ( statusRequestComplete = iota statusCantMultiplex @@ -58,8 +60,6 @@ const ( statusUnknownRole ) -const headerLen = 8 - type header struct { Version uint8 Type recType @@ -72,7 +72,7 @@ type header struct { type beginRequest struct { role uint16 flags uint8 - reserved [5]uint8 + reserved [5]uint8 //nolint:unused // Memory reservation } func (br *beginRequest) read(content []byte) error { diff --git a/plugins/inputs/phpfpm/fcgi_test.go b/plugins/inputs/phpfpm/fcgi_test.go index 7060955e0a10f..a7234225806cc 100644 --- a/plugins/inputs/phpfpm/fcgi_test.go +++ b/plugins/inputs/phpfpm/fcgi_test.go @@ -13,6 +13,8 @@ import ( "testing" ) +const requestID uint16 = 1 + var sizeTests = []struct { size uint32 bytes []byte @@ -124,7 +126,7 @@ func (c *writeOnlyConn) Write(p []byte) (int, error) { return len(p), nil } -func (c *writeOnlyConn) Read(p []byte) (int, error) { +func (c *writeOnlyConn) Read(_ []byte) (int, error) { return 0, errors.New("conn is write-only") } @@ -164,7 +166,6 @@ func nameValuePair11(nameData, valueData string) []byte { func makeRecord( recordType recType, - requestID uint16, contentData []byte, ) []byte { requestIDB1 := byte(requestID >> 8) @@ -185,14 +186,13 @@ func makeRecord( // request body var streamBeginTypeStdin = bytes.Join([][]byte{ // set up request 1 - makeRecord(typeBeginRequest, 1, - []byte{0, byte(roleResponder), 0, 0, 0, 0, 0, 0}), + makeRecord(typeBeginRequest, []byte{0, byte(roleResponder), 0, 0, 0, 0, 0, 0}), // add required parameters to request 1 - makeRecord(typeParams, 1, nameValuePair11("REQUEST_METHOD", "GET")), - makeRecord(typeParams, 1, nameValuePair11("SERVER_PROTOCOL", "HTTP/1.1")), - makeRecord(typeParams, 1, nil), + makeRecord(typeParams, nameValuePair11("REQUEST_METHOD", "GET")), + makeRecord(typeParams, nameValuePair11("SERVER_PROTOCOL", "HTTP/1.1")), + makeRecord(typeParams, nil), // begin sending body of request 1 - makeRecord(typeStdin, 1, []byte("0123456789abcdef")), + makeRecord(typeStdin, []byte("0123456789abcdef")), }, nil) @@ -204,7 +204,7 @@ var cleanUpTests = []struct { { bytes.Join([][]byte{ streamBeginTypeStdin, - makeRecord(typeAbortRequest, 1, nil), + makeRecord(typeAbortRequest, nil), }, nil), ErrRequestAborted, @@ -265,7 +265,7 @@ func (rwNopCloser) Close() error { } // Verifies it doesn't crash. Issue 11824. -func TestMalformedParams(t *testing.T) { +func TestMalformedParams(_ *testing.T) { input := []byte{ // beginRequest, requestId=1, contentLength=8, role=1, keepConn=1 1, 1, 0, 1, 0, 8, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index c71d3290666ad..cc326c3b55577 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -219,7 +219,7 @@ func (p *phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error { } // Import stat data into Telegraf system -func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat { +func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) { stats := make(poolStat) var currentPool string @@ -271,8 +271,6 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat { } acc.AddFields("phpfpm", fields, tags) } - - return stats } func expandUrls(urls []string) ([]string, error) { diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index b077f7955b037..5f0be8999e81c 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -23,7 +23,7 @@ import ( type statServer struct{} // We create a fake server to return test data -func (s statServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (s statServer) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "text/plain") w.Header().Set("Content-Length", fmt.Sprint(len(outputSample))) fmt.Fprint(w, outputSample) diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index e42f4b97a81f4..6d06988dbd6a0 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -3,10 +3,8 @@ package ping import ( - "context" "errors" "fmt" - "net" "reflect" "sort" "testing" @@ -231,7 +229,7 @@ func TestArguments(t *testing.T) { } } -func mockHostPinger(binary string, timeout float64, args ...string) (string, error) { +func mockHostPinger(_ string, _ float64, _ ...string) (string, error) { return linuxPingOutput, nil } @@ -289,7 +287,7 @@ PING www.google.com (216.58.218.164) 56(84) bytes of data. rtt min/avg/max/mdev = 35.225/44.033/51.806/5.325 ms ` -func mockLossyHostPinger(binary string, timeout float64, args ...string) (string, error) { +func mockLossyHostPinger(_ string, _ float64, _ ...string) (string, error) { return lossyPingOutput, nil } @@ -325,7 +323,7 @@ Request timeout for icmp_seq 0 2 packets transmitted, 0 packets received, 100.0% packet loss ` -func mockErrorHostPinger(binary string, timeout float64, args ...string) (string, error) { +func mockErrorHostPinger(_ string, _ float64, _ ...string) (string, error) { // This error will not trigger correct error paths return errorPingOutput, nil } @@ -350,7 +348,7 @@ func TestBadPingGather(t *testing.T) { acc.AssertContainsTaggedFields(t, "ping", fields, tags) } -func mockFatalHostPinger(binary string, timeout float64, args ...string) (string, error) { +func mockFatalHostPinger(_ string, _ float64, _ ...string) (string, error) { return fatalPingOutput, errors.New("So very bad") } @@ -415,12 +413,6 @@ func TestPingBinary(t *testing.T) { acc.GatherError(p.Gather) } -func mockHostResolver(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) { - ipaddr := net.IPAddr{} - ipaddr.IP = net.IPv4(127, 0, 0, 1) - return &ipaddr, nil -} - // Test that Gather function works using native ping func TestPingGatherNative(t *testing.T) { type test struct { diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go index 25d39dcd45560..ad0d9ab941ded 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go @@ -13,8 +13,6 @@ import ( "github.com/stretchr/testify/require" ) -type statServer struct{} - var metrics = "all-outqueries\t3591637\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" + "answers10-100\t1238786\nanswers100-1000\t402917\nauth-zone-queries\t4\nauth4-answers-slow\t44248\n" + "auth4-answers0-1\t59169\nauth4-answers1-10\t1747403\nauth4-answers10-100\t1315621\n" + diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index 8e3e934bbdc55..a8d8f3f51bfbd 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -68,7 +68,7 @@ func (p *Proc) Username() (string, error) { return p.Process.Username() } -func (p *Proc) Percent(interval time.Duration) (float64, error) { +func (p *Proc) Percent(_ time.Duration) (float64, error) { cpuPerc, err := p.Process.Percent(time.Duration(0)) if !p.hasCPUTimes && err == nil { p.hasCPUTimes = true diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 62de739b212bd..b3fa30a56992d 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -133,20 +133,14 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { return err } - procs, err := p.updateProcesses(pids, tags, p.procs) - if err != nil { - acc.AddError(fmt.Errorf("procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", - p.Exe, p.PidFile, p.Pattern, p.User, err.Error())) - } - p.procs = procs - + p.procs = p.updateProcesses(pids, tags, p.procs) for _, proc := range p.procs { p.addMetric(proc, acc, now) } fields := map[string]interface{}{ "pid_count": len(pids), - "running": len(procs), + "running": len(p.procs), "result_code": 0, } tags["pid_finder"] = p.PidFinder @@ -319,7 +313,7 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time } // Update monitored Processes -func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo map[PID]Process) (map[PID]Process, error) { +func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo map[PID]Process) map[PID]Process { procs := make(map[PID]Process, len(prevInfo)) for _, pid := range pids { @@ -356,7 +350,8 @@ func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo } } } - return procs, nil + + return procs } // Create and return PIDGatherer lazily diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 4f1c15f40150e..d59e327027cff 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -27,7 +27,7 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd { cmd.Stderr = os.Stderr return cmd } -func TestMockExecCommand(t *testing.T) { +func TestMockExecCommand(_ *testing.T) { var cmd []string for _, arg := range os.Args { if arg == "--" { @@ -63,11 +63,11 @@ type testPgrep struct { err error } -func pidFinder(pids []PID, err error) func() (PIDFinder, error) { +func pidFinder(pids []PID) func() (PIDFinder, error) { return func() (PIDFinder, error) { return &testPgrep{ pids: pids, - err: err, + err: nil, }, nil } } @@ -176,7 +176,7 @@ func TestGather_CreateProcessErrorOk(t *testing.T) { p := Procstat{ Exe: exe, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: func(PID) (Process, error) { return nil, fmt.Errorf("createProcess error") }, @@ -202,7 +202,7 @@ func TestGather_ProcessName(t *testing.T) { p := Procstat{ Exe: exe, ProcessName: "custom_name", - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -216,7 +216,7 @@ func TestGather_NoProcessNameUsesReal(t *testing.T) { p := Procstat{ Exe: exe, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -229,7 +229,7 @@ func TestGather_NoPidTag(t *testing.T) { p := Procstat{ Exe: exe, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -243,7 +243,7 @@ func TestGather_PidTag(t *testing.T) { p := Procstat{ Exe: exe, PidTag: true, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -257,7 +257,7 @@ func TestGather_Prefix(t *testing.T) { p := Procstat{ Exe: exe, Prefix: "custom_prefix", - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -269,7 +269,7 @@ func TestGather_Exe(t *testing.T) { p := Procstat{ Exe: exe, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -283,7 +283,7 @@ func TestGather_User(t *testing.T) { p := Procstat{ User: user, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -297,7 +297,7 @@ func TestGather_Pattern(t *testing.T) { p := Procstat{ Pattern: pattern, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -309,7 +309,7 @@ func TestGather_MissingPidMethod(t *testing.T) { var acc testutil.Accumulator p := Procstat{ - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.Error(t, acc.GatherError(p.Gather)) @@ -321,7 +321,7 @@ func TestGather_PidFile(t *testing.T) { p := Procstat{ PidFile: pidfile, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -336,7 +336,7 @@ func TestGather_PercentFirstPass(t *testing.T) { p := Procstat{ Pattern: "foo", PidTag: true, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: NewProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -352,7 +352,7 @@ func TestGather_PercentSecondPass(t *testing.T) { p := Procstat{ Pattern: "foo", PidTag: true, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: NewProc, } require.NoError(t, acc.GatherError(p.Gather)) @@ -364,7 +364,7 @@ func TestGather_PercentSecondPass(t *testing.T) { func TestGather_systemdUnitPIDs(t *testing.T) { p := Procstat{ - createPIDFinder: pidFinder([]PID{}, nil), + createPIDFinder: pidFinder([]PID{}), SystemdUnit: "TestGather_systemdUnitPIDs", } pids, tags, err := p.findPids() @@ -385,7 +385,7 @@ func TestGather_cgroupPIDs(t *testing.T) { require.NoError(t, err) p := Procstat{ - createPIDFinder: pidFinder([]PID{}, nil), + createPIDFinder: pidFinder([]PID{}), CGroup: td, } pids, tags, err := p.findPids() @@ -396,7 +396,7 @@ func TestGather_cgroupPIDs(t *testing.T) { func TestProcstatLookupMetric(t *testing.T) { p := Procstat{ - createPIDFinder: pidFinder([]PID{543}, nil), + createPIDFinder: pidFinder([]PID{543}), Exe: "-Gsys", } var acc testutil.Accumulator @@ -411,7 +411,7 @@ func TestGather_SameTimestamps(t *testing.T) { p := Procstat{ PidFile: pidfile, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) diff --git a/plugins/inputs/procstat/win_service_notwindows.go b/plugins/inputs/procstat/win_service_notwindows.go index 3d539d9f9918c..a0a776d33736f 100644 --- a/plugins/inputs/procstat/win_service_notwindows.go +++ b/plugins/inputs/procstat/win_service_notwindows.go @@ -6,6 +6,6 @@ import ( "fmt" ) -func queryPidWithWinServiceName(winServiceName string) (uint32, error) { +func queryPidWithWinServiceName(_ string) (uint32, error) { return 0, fmt.Errorf("os not support win_service option") } diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 97473ef96c7e3..d42e98dd7813b 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -25,11 +25,6 @@ import ( "k8s.io/client-go/rest" ) -type payload struct { - eventype string - pod *corev1.Pod -} - type podMetadata struct { ResourceVersion string `json:"resourceVersion"` SelfLink string `json:"selfLink"` diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 18d9aa603a24c..662af9fc46fb9 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -160,7 +160,3 @@ func pod() *corev1.Pod { p.Namespace = "default" return p } - -func str(x string) *string { - return &x -} diff --git a/plugins/inputs/prometheus/parser_test.go b/plugins/inputs/prometheus/parser_test.go index a80cdbd5a48a3..293e1968d2b5d 100644 --- a/plugins/inputs/prometheus/parser_test.go +++ b/plugins/inputs/prometheus/parser_test.go @@ -3,13 +3,10 @@ package prometheus import ( "net/http" "testing" - "time" "github.com/stretchr/testify/assert" ) -var exptime = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - const validUniqueGauge = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision. # TYPE cadvisor_version_info gauge cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1 @@ -20,9 +17,6 @@ const validUniqueCounter = `# HELP get_token_fail_count Counter of failed Token( get_token_fail_count 0 ` -const validUniqueLine = `# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source -` - const validUniqueSummary = `# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. # TYPE http_request_duration_microseconds summary http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506 @@ -46,61 +40,6 @@ apiserver_request_latencies_sum{resource="bindings",verb="POST"} 1.02726334e+08 apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 ` -const validData = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision. -# TYPE cadvisor_version_info gauge -cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1 -# HELP go_gc_duration_seconds A summary of the GC invocation durations. -# TYPE go_gc_duration_seconds summary -go_gc_duration_seconds{quantile="0"} 0.013534896000000001 -go_gc_duration_seconds{quantile="0.25"} 0.02469263 -go_gc_duration_seconds{quantile="0.5"} 0.033727822000000005 -go_gc_duration_seconds{quantile="0.75"} 0.03840335 -go_gc_duration_seconds{quantile="1"} 0.049956604 -go_gc_duration_seconds_sum 1970.341293002 -go_gc_duration_seconds_count 65952 -# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. -# TYPE http_request_duration_microseconds summary -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 5.876804288e+06 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 5.876804288e+06 -http_request_duration_microseconds_sum{handler="prometheus"} 1.8909097205e+07 -http_request_duration_microseconds_count{handler="prometheus"} 9 -# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source -# TYPE get_token_fail_count counter -get_token_fail_count 0 -# HELP apiserver_request_latencies Response latency distribution in microseconds for each verb, resource and client. -# TYPE apiserver_request_latencies histogram -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="125000"} 1994 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="250000"} 1997 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="500000"} 2000 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="1e+06"} 2005 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="2e+06"} 2012 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="4e+06"} 2017 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="8e+06"} 2024 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="+Inf"} 2025 -apiserver_request_latencies_sum{resource="bindings",verb="POST"} 1.02726334e+08 -apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 -` - -const prometheusMulti = ` -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -` - -const prometheusMultiSomeInvalid = ` -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu4 , usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -` - func TestParseValidPrometheus(t *testing.T) { // Gauge value metrics, err := Parse([]byte(validUniqueGauge), http.Header{}) diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index cdf3bc84bd7dd..c3ceb01c73ae8 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -74,7 +74,6 @@ type Prometheus struct { // Only for monitor_kubernetes_pods=true and pod_scrape_scope="node" podLabelSelector labels.Selector podFieldSelector fields.Selector - nodeIP string isNodeScrapeScope bool } @@ -456,7 +455,7 @@ func fieldSelectorIsSupported(fieldSelector fields.Selector) (bool, string) { } // Start will start the Kubernetes scraping if enabled in the configuration -func (p *Prometheus) Start(a telegraf.Accumulator) error { +func (p *Prometheus) Start(_ telegraf.Accumulator) error { if p.MonitorPods { var ctx context.Context ctx, p.cancel = context.WithCancel(context.Background()) diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index 423070a357c23..a66aa5286fac8 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -157,12 +157,7 @@ func gatherVMData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { return } - fields, err := getFields(currentVMStatus) - if err != nil { - px.Log.Errorf("Error getting VM measurements: %v", err) - return - } - + fields := getFields(currentVMStatus) acc.AddFields("proxmox", fields, tags) } } @@ -216,7 +211,7 @@ func getVMConfig(px *Proxmox, vmID string, rt ResourceType) (VMConfig, error) { return vmConfig, nil } -func getFields(vmStat VMStat) (map[string]interface{}, error) { +func getFields(vmStat VMStat) map[string]interface{} { memTotal, memUsed, memFree, memUsedPercentage := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem) swapTotal, swapUsed, swapFree, swapUsedPercentage := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap) diskTotal, diskUsed, diskFree, diskUsedPercentage := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk) @@ -237,7 +232,7 @@ func getFields(vmStat VMStat) (map[string]interface{}, error) { "disk_total": diskTotal, "disk_free": diskFree, "disk_used_percentage": diskUsedPercentage, - }, nil + } } func getByteMetrics(total json.Number, used json.Number) (int64, int64, int64, float64) { diff --git a/plugins/inputs/proxmox/proxmox_test.go b/plugins/inputs/proxmox/proxmox_test.go index 35ae559ed96c7..f05b6450bd7be 100644 --- a/plugins/inputs/proxmox/proxmox_test.go +++ b/plugins/inputs/proxmox/proxmox_test.go @@ -18,7 +18,7 @@ var lxcConfigTestData = `{"data":{"hostname":"container1","searchdomain":"test.e var lxcCurrentStatusTestData = `{"data":{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}}` var qemuCurrentStatusTestData = `{"data":{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}}` -func performTestRequest(px *Proxmox, apiURL string, method string, data url.Values) ([]byte, error) { +func performTestRequest(_ *Proxmox, apiURL string, _ string, _ url.Values) ([]byte, error) { var bytedata = []byte("") if strings.HasSuffix(apiURL, "dns") { diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index 7ce1112787d4f..6f8abbda6be0c 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -24,7 +24,7 @@ func (t *testClient) Info() *redis.StringCmd { return nil } -func (t *testClient) Do(returnType string, args ...interface{}) (interface{}, error) { +func (t *testClient) Do(_ string, _ ...interface{}) (interface{}, error) { return 2, nil } diff --git a/plugins/inputs/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go index e59350b5c69ca..35994cea65f40 100644 --- a/plugins/inputs/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -62,10 +62,10 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { u.Host = serv } wg.Add(1) - go func(serv string) { + go func() { defer wg.Done() acc.AddError(r.gatherServer(&Server{URL: u}, acc)) - }(serv) + }() } wg.Wait() diff --git a/plugins/inputs/riemann_listener/riemann_listener_test.go b/plugins/inputs/riemann_listener/riemann_listener_test.go index 6948354a85b8d..92af46e8afd03 100644 --- a/plugins/inputs/riemann_listener/riemann_listener_test.go +++ b/plugins/inputs/riemann_listener/riemann_listener_test.go @@ -25,10 +25,10 @@ func TestSocketListener_tcp(t *testing.T) { require.NoError(t, err) defer sl.Stop() - testStats(t, sl) - testMissingService(t, sl) + testStats(t) + testMissingService(t) } -func testStats(t *testing.T, sl *RiemannSocketListener) { +func testStats(t *testing.T) { c := riemanngo.NewTCPClient("127.0.0.1:5555", 5*time.Second) err := c.Connect() if err != nil { @@ -41,7 +41,7 @@ func testStats(t *testing.T, sl *RiemannSocketListener) { }) assert.Equal(t, result.GetOk(), true) } -func testMissingService(t *testing.T, sl *RiemannSocketListener) { +func testMissingService(t *testing.T) { c := riemanngo.NewTCPClient("127.0.0.1:5555", 5*time.Second) err := c.Connect() if err != nil { diff --git a/plugins/inputs/sensors/sensors_test.go b/plugins/inputs/sensors/sensors_test.go index 75fd3e46825ed..e5105caa3f787 100644 --- a/plugins/inputs/sensors/sensors_test.go +++ b/plugins/inputs/sensors/sensors_test.go @@ -306,7 +306,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking // it returns below mockData. -func TestHelperProcess(t *testing.T) { +func TestHelperProcess(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } diff --git a/plugins/inputs/sflow/sflow.go b/plugins/inputs/sflow/sflow.go index f4283cabec095..2876cebe3dc0f 100644 --- a/plugins/inputs/sflow/sflow.go +++ b/plugins/inputs/sflow/sflow.go @@ -2,7 +2,6 @@ package sflow import ( "bytes" - "context" "fmt" "io" "net" @@ -40,7 +39,6 @@ type SFlow struct { addr net.Addr decoder *PacketDecoder closer io.Closer - cancel context.CancelFunc wg sync.WaitGroup } diff --git a/plugins/inputs/sflow/types.go b/plugins/inputs/sflow/types.go index c415db26f784a..7efd59aff0c71 100644 --- a/plugins/inputs/sflow/types.go +++ b/plugins/inputs/sflow/types.go @@ -6,13 +6,8 @@ import ( ) const ( - AddressTypeIPv6 uint32 = 2 // sflow_version_5.txt line: 1384 - AddressTypeIPv4 uint32 = 1 // sflow_version_5.txt line: 1383 - IPProtocolTCP uint8 = 6 IPProtocolUDP uint8 = 17 - - metricName = "sflow" ) var ETypeMap = map[uint16]string{ @@ -20,11 +15,6 @@ var ETypeMap = map[uint16]string{ 0x86DD: "IPv6", } -var IPvMap = map[uint32]string{ - 1: "IPV4", // sflow_version_5.txt line: 1383 - 2: "IPV6", // sflow_version_5.txt line: 1384 -} - type ContainsMetricData interface { GetTags() map[string]string GetFields() map[string]interface{} diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/plugins/inputs/snmp/snmp_mocks_test.go index 5b3bbd767e67d..a3b8fb8e69a6d 100644 --- a/plugins/inputs/snmp/snmp_mocks_test.go +++ b/plugins/inputs/snmp/snmp_mocks_test.go @@ -24,7 +24,7 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd { // This is not a real test. This is just a way of mocking out commands. // // Idea based on https://github.com/golang/go/blob/7c31043/src/os/exec/exec_test.go#L568 -func TestMockExecCommand(t *testing.T) { +func TestMockExecCommand(_ *testing.T) { var cmd []string for _, arg := range os.Args { if string(arg) == "--" { diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index cff524b6b5390..5f6ecd3e0bd4f 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -395,7 +395,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { // to do it only the first time // only if len(s.OidInstanceMapping) == 0 if len(host.OidInstanceMapping) >= 0 { - if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil { + if err := host.SNMPMap(s.nameToOid, s.subTableMap); err != nil { s.Log.Errorf("Mapping error for host %q: %s", host.Address, err.Error()) continue } @@ -412,7 +412,6 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } func (h *Host) SNMPMap( - acc telegraf.Accumulator, nameToOid map[string]string, subTableMap map[string]Subtable, ) error { diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index a887d53897461..b02483af768c1 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -111,11 +111,11 @@ func init() { }) } -func realExecCmd(Timeout internal.Duration, arg0 string, args ...string) ([]byte, error) { +func realExecCmd(timeout internal.Duration, arg0 string, args ...string) ([]byte, error) { cmd := exec.Command(arg0, args...) var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, timeout.Duration) if err != nil { return nil, err } diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index a8c3d01c78bb5..424098cfae365 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -40,104 +40,111 @@ func fakeExecCmd(_ internal.Duration, x string, y ...string) ([]byte, error) { return nil, fmt.Errorf("mock " + x + " " + strings.Join(y, " ")) } -func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, version gosnmp.SnmpVersion, secLevel string, username string, authProto string, authPass string, privProto string, privPass string, contextName string, engineID string) { - var s gosnmp.GoSNMP +func newMsgFlagsV3(secLevel string) gosnmp.SnmpV3MsgFlags { + var msgFlags gosnmp.SnmpV3MsgFlags + switch strings.ToLower(secLevel) { + case "noauthnopriv", "": + msgFlags = gosnmp.NoAuthNoPriv + case "authnopriv": + msgFlags = gosnmp.AuthNoPriv + case "authpriv": + msgFlags = gosnmp.AuthPriv + default: + msgFlags = gosnmp.NoAuthNoPriv + } - if version == gosnmp.Version3 { - var msgFlags gosnmp.SnmpV3MsgFlags - switch strings.ToLower(secLevel) { - case "noauthnopriv", "": - msgFlags = gosnmp.NoAuthNoPriv - case "authnopriv": - msgFlags = gosnmp.AuthNoPriv - case "authpriv": - msgFlags = gosnmp.AuthPriv - default: - msgFlags = gosnmp.NoAuthNoPriv - } + return msgFlags +} - var authenticationProtocol gosnmp.SnmpV3AuthProtocol - switch strings.ToLower(authProto) { - case "md5": - authenticationProtocol = gosnmp.MD5 - case "sha": - authenticationProtocol = gosnmp.SHA - //case "sha224": - // authenticationProtocol = gosnmp.SHA224 - //case "sha256": - // authenticationProtocol = gosnmp.SHA256 - //case "sha384": - // authenticationProtocol = gosnmp.SHA384 - //case "sha512": - // authenticationProtocol = gosnmp.SHA512 - case "": - authenticationProtocol = gosnmp.NoAuth - default: - authenticationProtocol = gosnmp.NoAuth - } +func newUsmSecurityParametersForV3(authProto string, privProto string, username string, privPass string, authPass string) *gosnmp.UsmSecurityParameters { + var authenticationProtocol gosnmp.SnmpV3AuthProtocol + switch strings.ToLower(authProto) { + case "md5": + authenticationProtocol = gosnmp.MD5 + case "sha": + authenticationProtocol = gosnmp.SHA + //case "sha224": + // authenticationProtocol = gosnmp.SHA224 + //case "sha256": + // authenticationProtocol = gosnmp.SHA256 + //case "sha384": + // authenticationProtocol = gosnmp.SHA384 + //case "sha512": + // authenticationProtocol = gosnmp.SHA512 + case "": + authenticationProtocol = gosnmp.NoAuth + default: + authenticationProtocol = gosnmp.NoAuth + } - var privacyProtocol gosnmp.SnmpV3PrivProtocol - switch strings.ToLower(privProto) { - case "aes": - privacyProtocol = gosnmp.AES - case "des": - privacyProtocol = gosnmp.DES - case "aes192": - privacyProtocol = gosnmp.AES192 - case "aes192c": - privacyProtocol = gosnmp.AES192C - case "aes256": - privacyProtocol = gosnmp.AES256 - case "aes256c": - privacyProtocol = gosnmp.AES256C - case "": - privacyProtocol = gosnmp.NoPriv - default: - privacyProtocol = gosnmp.NoPriv - } + var privacyProtocol gosnmp.SnmpV3PrivProtocol + switch strings.ToLower(privProto) { + case "aes": + privacyProtocol = gosnmp.AES + case "des": + privacyProtocol = gosnmp.DES + case "aes192": + privacyProtocol = gosnmp.AES192 + case "aes192c": + privacyProtocol = gosnmp.AES192C + case "aes256": + privacyProtocol = gosnmp.AES256 + case "aes256c": + privacyProtocol = gosnmp.AES256C + case "": + privacyProtocol = gosnmp.NoPriv + default: + privacyProtocol = gosnmp.NoPriv + } - sp := &gosnmp.UsmSecurityParameters{ - AuthoritativeEngineID: "1", - AuthoritativeEngineBoots: 1, - AuthoritativeEngineTime: 1, - UserName: username, - PrivacyProtocol: privacyProtocol, - PrivacyPassphrase: privPass, - AuthenticationPassphrase: authPass, - AuthenticationProtocol: authenticationProtocol, - } - s = gosnmp.GoSNMP{ - Port: port, - Version: version, - Timeout: time.Duration(2) * time.Second, - Retries: 1, - MaxOids: gosnmp.MaxOids, - Target: "127.0.0.1", - SecurityParameters: sp, - SecurityModel: gosnmp.UserSecurityModel, - MsgFlags: msgFlags, - ContextName: contextName, - ContextEngineID: engineID, - } - } else { - s = gosnmp.GoSNMP{ - Port: port, - Version: version, - Timeout: time.Duration(2) * time.Second, - Retries: 1, - MaxOids: gosnmp.MaxOids, - Target: "127.0.0.1", - Community: "public", - } + return &gosnmp.UsmSecurityParameters{ + AuthoritativeEngineID: "1", + AuthoritativeEngineBoots: 1, + AuthoritativeEngineTime: 1, + UserName: username, + PrivacyProtocol: privacyProtocol, + PrivacyPassphrase: privPass, + AuthenticationPassphrase: authPass, + AuthenticationProtocol: authenticationProtocol, } +} - err := s.Connect() +func newGoSNMPV3(port uint16, contextName string, engineID string, msgFlags gosnmp.SnmpV3MsgFlags, sp *gosnmp.UsmSecurityParameters) gosnmp.GoSNMP { + return gosnmp.GoSNMP{ + Port: port, + Version: gosnmp.Version3, + Timeout: time.Duration(2) * time.Second, + Retries: 1, + MaxOids: gosnmp.MaxOids, + Target: "127.0.0.1", + SecurityParameters: sp, + SecurityModel: gosnmp.UserSecurityModel, + MsgFlags: msgFlags, + ContextName: contextName, + ContextEngineID: engineID, + } +} + +func newGoSNMP(version gosnmp.SnmpVersion, port uint16) gosnmp.GoSNMP { + return gosnmp.GoSNMP{ + Port: port, + Version: version, + Timeout: time.Duration(2) * time.Second, + Retries: 1, + MaxOids: gosnmp.MaxOids, + Target: "127.0.0.1", + Community: "public", + } +} + +func sendTrap(t *testing.T, goSNMP gosnmp.GoSNMP, trap gosnmp.SnmpTrap) { + err := goSNMP.Connect() if err != nil { t.Errorf("Connect() err: %v", err) } - defer s.Conn.Close() + defer goSNMP.Conn.Close() - _, err = s.SendTrap(trap) + _, err = goSNMP.SendTrap(trap) if err != nil { t.Errorf("SendTrap() err: %v", err) } @@ -1302,8 +1309,17 @@ func TestReceiveTrap(t *testing.T) { s.load(entry.oid, entry.e) } + var goSNMP gosnmp.GoSNMP + if tt.version == gosnmp.Version3 { + msgFlags := newMsgFlagsV3(tt.secLevel) + sp := newUsmSecurityParametersForV3(tt.authProto, tt.privProto, tt.secName, tt.privPass, tt.authPass) + goSNMP = newGoSNMPV3(port, tt.contextName, tt.engineID, msgFlags, sp) + } else { + goSNMP = newGoSNMP(tt.version, port) + } + // Send the trap - sendTrap(t, port, now, tt.trap, tt.version, tt.secLevel, tt.secName, tt.authProto, tt.authPass, tt.privProto, tt.privPass, tt.contextName, tt.engineID) + sendTrap(t, goSNMP, tt.trap) // Wait for trap to be received select { diff --git a/plugins/inputs/stackdriver/stackdriver_test.go b/plugins/inputs/stackdriver/stackdriver_test.go index ee1f7ac9b01eb..0502c7bed9765 100644 --- a/plugins/inputs/stackdriver/stackdriver_test.go +++ b/plugins/inputs/stackdriver/stackdriver_test.go @@ -1125,8 +1125,8 @@ func TestListMetricDescriptorFilter(t *testing.T) { } } -func TestNewListTimeSeriesFilter(t *testing.T) { +func TestNewListTimeSeriesFilter(_ *testing.T) { } -func TestTimeSeriesConfCacheIsValid(t *testing.T) { +func TestTimeSeriesConfCacheIsValid(_ *testing.T) { } diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index bb9e9664683f8..bf63b6ee41a4d 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -92,8 +92,6 @@ type Statsd struct { accept chan bool // drops tracks the number of dropped metrics. drops int - // malformed tracks the number of malformed packets - malformed int // Channel for all incoming statsd packets in chan input @@ -538,11 +536,11 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error { // parser monitors the s.in channel, if there is a packet ready, it parses the // packet into statsd strings and then calls parseStatsdLine, which parses a // single statsd metric into a struct. -func (s *Statsd) parser() error { +func (s *Statsd) parser() { for { select { case <-s.done: - return nil + return case in := <-s.in: start := time.Now() lines := strings.Split(in.Buffer.String(), "\n") diff --git a/plugins/inputs/suricata/suricata.go b/plugins/inputs/suricata/suricata.go index 17c0b571510b0..8cf374df65d3f 100644 --- a/plugins/inputs/suricata/suricata.go +++ b/plugins/inputs/suricata/suricata.go @@ -215,7 +215,7 @@ func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { // Gather measures and submits one full set of telemetry to Telegraf. // Not used here, submission is completely input-driven. -func (s *Suricata) Gather(acc telegraf.Accumulator) error { +func (s *Suricata) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/suricata/suricata_testutil.go b/plugins/inputs/suricata/suricata_testutil.go deleted file mode 100644 index 55aa2bb9bae69..0000000000000 --- a/plugins/inputs/suricata/suricata_testutil.go +++ /dev/null @@ -1,38 +0,0 @@ -package suricata - -import ( - "bytes" - "sync" -) - -// A thread-safe Buffer wrapper to enable concurrent access to log output. -type buffer struct { - b bytes.Buffer - m sync.Mutex -} - -func (b *buffer) Read(p []byte) (n int, err error) { - b.m.Lock() - defer b.m.Unlock() - return b.b.Read(p) -} -func (b *buffer) Write(p []byte) (n int, err error) { - b.m.Lock() - defer b.m.Unlock() - return b.b.Write(p) -} -func (b *buffer) String() string { - b.m.Lock() - defer b.m.Unlock() - return b.b.String() -} -func (b *buffer) Reset() { - b.m.Lock() - defer b.m.Unlock() - b.b.Reset() -} -func (b *buffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.b.Bytes() -} diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go index 544b31929f123..b71ddfee1a762 100644 --- a/plugins/inputs/syslog/nontransparent_test.go +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -140,7 +140,7 @@ func testStrictNonTransparent(t *testing.T, protocol string, address string, wan for _, tc := range getTestCasesForNonTransparent() { t.Run(tc.name, func(t *testing.T) { // Creation of a strict mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, framing.NonTransparent) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 10, false, framing.NonTransparent) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() @@ -192,11 +192,12 @@ func testStrictNonTransparent(t *testing.T, protocol string, address string, wan } } -func testBestEffortNonTransparent(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { +func testBestEffortNonTransparent(t *testing.T, protocol string, address string, wantTLS bool) { + keepAlive := (*internal.Duration)(nil) for _, tc := range getTestCasesForNonTransparent() { t.Run(tc.name, func(t *testing.T) { // Creation of a best effort mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, framing.NonTransparent) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 10, true, framing.NonTransparent) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() @@ -245,7 +246,7 @@ func TestNonTransparentStrict_tcp(t *testing.T) { } func TestNonTransparentBestEffort_tcp(t *testing.T) { - testBestEffortNonTransparent(t, "tcp", address, false, nil) + testBestEffortNonTransparent(t, "tcp", address, false) } func TestNonTransparentStrict_tcp_tls(t *testing.T) { @@ -253,7 +254,7 @@ func TestNonTransparentStrict_tcp_tls(t *testing.T) { } func TestNonTransparentBestEffort_tcp_tls(t *testing.T) { - testBestEffortNonTransparent(t, "tcp", address, true, nil) + testBestEffortNonTransparent(t, "tcp", address, true) } func TestNonTransparentStrictWithKeepAlive_tcp_tls(t *testing.T) { @@ -277,7 +278,7 @@ func TestNonTransparentBestEffort_unix(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") - testBestEffortNonTransparent(t, "unix", sock, false, nil) + testBestEffortNonTransparent(t, "unix", sock, false) } func TestNonTransparentStrict_unix_tls(t *testing.T) { @@ -293,5 +294,5 @@ func TestNonTransparentBestEffort_unix_tls(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") - testBestEffortNonTransparent(t, "unix", sock, true, nil) + testBestEffortNonTransparent(t, "unix", sock, true) } diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index 87909fcec2dd3..199c380601955 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -392,7 +392,8 @@ func testStrictOctetCounting(t *testing.T, protocol string, address string, want } } -func testBestEffortOctetCounting(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { +func testBestEffortOctetCounting(t *testing.T, protocol string, address string, wantTLS bool) { + keepAlive := (*internal.Duration)(nil) for _, tc := range getTestCasesForOctetCounting() { t.Run(tc.name, func(t *testing.T) { // Creation of a best effort mode receiver @@ -445,7 +446,7 @@ func TestOctetCountingStrict_tcp(t *testing.T) { } func TestOctetCountingBestEffort_tcp(t *testing.T) { - testBestEffortOctetCounting(t, "tcp", address, false, nil) + testBestEffortOctetCounting(t, "tcp", address, false) } func TestOctetCountingStrict_tcp_tls(t *testing.T) { @@ -453,7 +454,7 @@ func TestOctetCountingStrict_tcp_tls(t *testing.T) { } func TestOctetCountingBestEffort_tcp_tls(t *testing.T) { - testBestEffortOctetCounting(t, "tcp", address, true, nil) + testBestEffortOctetCounting(t, "tcp", address, true) } func TestOctetCountingStrictWithKeepAlive_tcp_tls(t *testing.T) { @@ -477,7 +478,7 @@ func TestOctetCountingBestEffort_unix(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") - testBestEffortOctetCounting(t, "unix", sock, false, nil) + testBestEffortOctetCounting(t, "unix", sock, false) } func TestOctetCountingStrict_unix_tls(t *testing.T) { @@ -493,5 +494,5 @@ func TestOctetCountingBestEffort_unix_tls(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") - testBestEffortOctetCounting(t, "unix", sock, true, nil) + testBestEffortOctetCounting(t, "unix", sock, true) } diff --git a/plugins/inputs/sysstat/sysstat_test.go b/plugins/inputs/sysstat/sysstat_test.go index 4aecfaacc2a15..0ef97f0e7c999 100644 --- a/plugins/inputs/sysstat/sysstat_test.go +++ b/plugins/inputs/sysstat/sysstat_test.go @@ -260,7 +260,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- sadf -p -- -p -C tmpFile // it returns mockData["C"] output. -func TestHelperProcess(t *testing.T) { +func TestHelperProcess(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go index b3cf2c1707f5d..e1bd4f84b48e7 100644 --- a/plugins/inputs/system/mock_PS.go +++ b/plugins/inputs/system/mock_PS.go @@ -37,7 +37,7 @@ func (m *MockPS) LoadAvg() (*load.AvgStat, error) { return r0, r1 } -func (m *MockPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) { +func (m *MockPS) CPUTimes(_, _ bool) ([]cpu.TimesStat, error) { ret := m.Called() r0 := ret.Get(0).([]cpu.TimesStat) @@ -74,7 +74,7 @@ func (m *MockPS) NetProto() ([]net.ProtoCountersStat, error) { return r0, r1 } -func (m *MockPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) { +func (m *MockPS) DiskIO(_ []string) (map[string]disk.IOCountersStat, error) { ret := m.Called() r0 := ret.Get(0).(map[string]disk.IOCountersStat) diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index abda443152359..d835d02633d02 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -5,7 +5,6 @@ import ( "path/filepath" "strings" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/shirou/gopsutil/cpu" @@ -34,13 +33,6 @@ type PSDiskDeps interface { PSDiskUsage(path string) (*disk.UsageStat, error) } -func add(acc telegraf.Accumulator, - name string, val float64, tags map[string]string) { - if val >= 0 { - acc.AddFields(name, map[string]interface{}{"value": val}, tags) - } -} - func NewSystemPS() *SystemPS { return &SystemPS{&SystemPSDisk{}} } diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 54d42e44ada59..c7b16eb7a4631 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -22,8 +22,7 @@ import ( ) const ( - defaultWatchMethod = "inotify" - defaultMaxUndeliveredLines = 1000 + defaultWatchMethod = "inotify" ) var ( @@ -157,7 +156,7 @@ func (t *Tail) Init() error { return err } -func (t *Tail) Gather(acc telegraf.Accumulator) error { +func (t *Tail) Gather(_ telegraf.Accumulator) error { return t.tailNewFiles(true) } diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go index 9016061cb565e..53297c4a68fb8 100644 --- a/plugins/inputs/tcp_listener/tcp_listener.go +++ b/plugins/inputs/tcp_listener/tcp_listener.go @@ -243,7 +243,7 @@ func (t *TCPListener) handler(conn *net.TCPConn, id string) { } // tcpParser parses the incoming tcp byte packets -func (t *TCPListener) tcpParser() error { +func (t *TCPListener) tcpParser() { defer t.wg.Done() var packet []byte @@ -254,7 +254,7 @@ func (t *TCPListener) tcpParser() error { case <-t.done: // drain input packets before finishing: if len(t.in) == 0 { - return nil + return } case packet = <-t.in: if len(packet) == 0 { diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go index 5c476703e54cd..d6781b55020c3 100644 --- a/plugins/inputs/tcp_listener/tcp_listener_test.go +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -254,7 +254,7 @@ func TestRunParser(t *testing.T) { ) } -func TestRunParserInvalidMsg(t *testing.T) { +func TestRunParserInvalidMsg(_ *testing.T) { var testmsg = []byte("cpu_load_short") listener, in := newTestTCPListener() diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index 4833b0fdfd132..7222f3b1fb6af 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -189,7 +189,7 @@ func (u *UDPListener) udpListenLoop() { } } -func (u *UDPListener) udpParser() error { +func (u *UDPListener) udpParser() { defer u.wg.Done() var packet []byte @@ -199,7 +199,7 @@ func (u *UDPListener) udpParser() error { select { case <-u.done: if len(u.in) == 0 { - return nil + return } case packet = <-u.in: metrics, err = u.parser.Parse(packet) diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index f3e034363e471..b6c0b5f09b082 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -136,7 +136,7 @@ func TestRunParser(t *testing.T) { ) } -func TestRunParserInvalidMsg(t *testing.T) { +func TestRunParserInvalidMsg(_ *testing.T) { log.SetOutput(ioutil.Discard) var testmsg = []byte("cpu_load_short") diff --git a/plugins/inputs/unbound/unbound.go b/plugins/inputs/unbound/unbound.go index 9b04bbf27f888..441d44c852f92 100644 --- a/plugins/inputs/unbound/unbound.go +++ b/plugins/inputs/unbound/unbound.go @@ -17,19 +17,18 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool, ConfigFile string) (*bytes.Buffer, error) +type runner func(unbound Unbound) (*bytes.Buffer, error) // Unbound is used to store configuration values type Unbound struct { - Binary string - Timeout internal.Duration - UseSudo bool - Server string - ThreadAsTag bool - ConfigFile string - - filter filter.Filter - run runner + Binary string `toml:"binary"` + Timeout internal.Duration `toml:"timeout"` + UseSudo bool `toml:"use_sudo"` + Server string `toml:"server"` + ThreadAsTag bool `toml:"thread_as_tag"` + ConfigFile string `toml:"config_file"` + + run runner } var defaultBinary = "/usr/sbin/unbound-control" @@ -71,26 +70,26 @@ func (s *Unbound) SampleConfig() string { } // Shell out to unbound_stat and return the output -func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool, ConfigFile string) (*bytes.Buffer, error) { +func unboundRunner(unbound Unbound) (*bytes.Buffer, error) { cmdArgs := []string{"stats_noreset"} - if Server != "" { - host, port, err := net.SplitHostPort(Server) + if unbound.Server != "" { + host, port, err := net.SplitHostPort(unbound.Server) if err != nil { // No port was specified - host = Server + host = unbound.Server port = "" } // Unbound control requires an IP address, and we want to be nice to the user resolver := net.Resolver{} - ctx, lookUpCancel := context.WithTimeout(context.Background(), Timeout.Duration) + ctx, lookUpCancel := context.WithTimeout(context.Background(), unbound.Timeout.Duration) defer lookUpCancel() serverIps, err := resolver.LookupIPAddr(ctx, host) if err != nil { - return nil, fmt.Errorf("error looking up ip for server: %s: %s", Server, err) + return nil, fmt.Errorf("error looking up ip for server: %s: %s", unbound.Server, err) } if len(serverIps) == 0 { - return nil, fmt.Errorf("error no ip for server: %s: %s", Server, err) + return nil, fmt.Errorf("error no ip for server: %s: %s", unbound.Server, err) } server := serverIps[0].IP.String() if port != "" { @@ -100,22 +99,22 @@ func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Serv cmdArgs = append([]string{"-s", server}, cmdArgs...) } - if ConfigFile != "" { - cmdArgs = append([]string{"-c", ConfigFile}, cmdArgs...) + if unbound.ConfigFile != "" { + cmdArgs = append([]string{"-c", unbound.ConfigFile}, cmdArgs...) } - cmd := exec.Command(cmdName, cmdArgs...) + cmd := exec.Command(unbound.Binary, cmdArgs...) - if UseSudo { - cmdArgs = append([]string{cmdName}, cmdArgs...) + if unbound.UseSudo { + cmdArgs = append([]string{unbound.Binary}, cmdArgs...) cmd = exec.Command("sudo", cmdArgs...) } var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, unbound.Timeout.Duration) if err != nil { - return &out, fmt.Errorf("error running unbound-control: %s (%s %v)", err, cmdName, cmdArgs) + return &out, fmt.Errorf("error running unbound-control: %s (%s %v)", err, unbound.Binary, cmdArgs) } return &out, nil @@ -132,7 +131,7 @@ func (s *Unbound) Gather(acc telegraf.Accumulator) error { return err } - out, err := s.run(s.Binary, s.Timeout, s.UseSudo, s.Server, s.ThreadAsTag, s.ConfigFile) + out, err := s.run(*s) if err != nil { return fmt.Errorf("error gathering metrics: %s", err) } diff --git a/plugins/inputs/unbound/unbound_test.go b/plugins/inputs/unbound/unbound_test.go index cc4b99daecc59..cac0316d7db01 100644 --- a/plugins/inputs/unbound/unbound_test.go +++ b/plugins/inputs/unbound/unbound_test.go @@ -3,17 +3,14 @@ package unbound import ( "bytes" "testing" - "time" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" -) -var TestTimeout = internal.Duration{Duration: time.Second} + "github.com/influxdata/telegraf/testutil" +) -func UnboundControl(output string, Timeout internal.Duration, useSudo bool, Server string, ThreadAsTag bool, ConfigFile string) func(string, internal.Duration, bool, string, bool, string) (*bytes.Buffer, error) { - return func(string, internal.Duration, bool, string, bool, string) (*bytes.Buffer, error) { +func UnboundControl(output string) func(unbound Unbound) (*bytes.Buffer, error) { + return func(unbound Unbound) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -21,7 +18,7 @@ func UnboundControl(output string, Timeout internal.Duration, useSudo bool, Serv func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Unbound{ - run: UnboundControl(fullOutput, TestTimeout, true, "", false, ""), + run: UnboundControl(fullOutput), } err := v.Gather(acc) @@ -38,7 +35,7 @@ func TestParseFullOutput(t *testing.T) { func TestParseFullOutputThreadAsTag(t *testing.T) { acc := &testutil.Accumulator{} v := &Unbound{ - run: UnboundControl(fullOutput, TestTimeout, true, "", true, ""), + run: UnboundControl(fullOutput), ThreadAsTag: true, } err := v.Gather(acc) diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index 96e5c35562208..ee89105363235 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -7,14 +7,14 @@ import ( "fmt" "strings" "testing" - "time" + + "github.com/stretchr/testify/assert" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) -func fakeVarnishStat(output string, useSudo bool, InstanceName string, Timeout internal.Duration) func(string, bool, string, internal.Duration) (*bytes.Buffer, error) { +func fakeVarnishStat(output string) func(string, bool, string, internal.Duration) (*bytes.Buffer, error) { return func(string, bool, string, internal.Duration) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } @@ -23,7 +23,7 @@ func fakeVarnishStat(output string, useSudo bool, InstanceName string, Timeout i func TestGather(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(smOutput, false, "", internal.Duration{Duration: time.Second}), + run: fakeVarnishStat(smOutput), Stats: []string{"*"}, } v.Gather(acc) @@ -39,7 +39,7 @@ func TestGather(t *testing.T) { func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, true, "", internal.Duration{Duration: time.Second}), + run: fakeVarnishStat(fullOutput), Stats: []string{"*"}, } err := v.Gather(acc) @@ -54,7 +54,7 @@ func TestParseFullOutput(t *testing.T) { func TestFilterSomeStats(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, false, "", internal.Duration{Duration: time.Second}), + run: fakeVarnishStat(fullOutput), Stats: []string{"MGT.*", "VBE.*"}, } err := v.Gather(acc) @@ -77,7 +77,7 @@ func TestFieldConfig(t *testing.T) { for fieldCfg, expected := range expect { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, true, "", internal.Duration{Duration: time.Second}), + run: fakeVarnishStat(fullOutput), Stats: strings.Split(fieldCfg, ","), } err := v.Gather(acc) diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index b3096f7be300b..0eae3be6ffb97 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -10,7 +10,6 @@ import ( "sync" "time" - "github.com/influxdata/telegraf" "github.com/vmware/govmomi" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/performance" @@ -20,6 +19,8 @@ import ( "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" + + "github.com/influxdata/telegraf" ) // The highest number of metrics we can query for, no matter what settings @@ -30,10 +31,10 @@ const absoluteMaxMetrics = 10000 // a single Client is reused across all functions and goroutines, but the client // is periodically recycled to avoid authentication expiration issues. type ClientFactory struct { - client *Client - mux sync.Mutex - url *url.URL - parent *VSphere + client *Client + mux sync.Mutex + vSphereURL *url.URL + parent *VSphere } // Client represents a connection to vSphere and is backed by a govmomi connection @@ -49,11 +50,11 @@ type Client struct { } // NewClientFactory creates a new ClientFactory and prepares it for use. -func NewClientFactory(ctx context.Context, url *url.URL, parent *VSphere) *ClientFactory { +func NewClientFactory(vSphereURL *url.URL, parent *VSphere) *ClientFactory { return &ClientFactory{ - client: nil, - parent: parent, - url: url, + client: nil, + parent: parent, + vSphereURL: vSphereURL, } } @@ -66,7 +67,7 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { for { if cf.client == nil { var err error - if cf.client, err = NewClient(ctx, cf.url, cf.parent); err != nil { + if cf.client, err = NewClient(ctx, cf.vSphereURL, cf.parent); err != nil { return nil, err } } @@ -98,8 +99,8 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { } // NewClient creates a new vSphere client based on the url and setting passed as parameters. -func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { - sw := NewStopwatch("connect", u.Host) +func NewClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*Client, error) { + sw := NewStopwatch("connect", vSphereURL.Host) defer sw.Stop() tlsCfg, err := vs.ClientConfig.TLSConfig() @@ -111,14 +112,14 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { tlsCfg = &tls.Config{} } if vs.Username != "" { - u.User = url.UserPassword(vs.Username, vs.Password) + vSphereURL.User = url.UserPassword(vs.Username, vs.Password) } - vs.Log.Debugf("Creating client: %s", u.Host) - soapClient := soap.NewClient(u, tlsCfg.InsecureSkipVerify) + vs.Log.Debugf("Creating client: %s", vSphereURL.Host) + soapClient := soap.NewClient(vSphereURL, tlsCfg.InsecureSkipVerify) // Add certificate if we have it. Use it to log us in. - if tlsCfg != nil && len(tlsCfg.Certificates) > 0 { + if len(tlsCfg.Certificates) > 0 { soapClient.SetCertificate(tlsCfg.Certificates[0]) } @@ -154,8 +155,8 @@ func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { } // Only login if the URL contains user information. - if u.User != nil { - if err := c.Login(ctx, u.User); err != nil { + if vSphereURL.User != nil { + if err := c.Login(ctx, vSphereURL.User); err != nil { return nil, err } } diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 0eb1b0ab935dc..f8916242286e1 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -31,8 +31,6 @@ var isIPv6 = regexp.MustCompile("^(?:[A-Fa-f0-9]{0,4}:){1,7}[A-Fa-f0-9]{1,4}$") const metricLookback = 3 // Number of time periods to look back at for non-realtime metrics -const rtMetricLookback = 3 // Number of time periods to look back at for realtime metrics - const maxSampleConst = 10 // Absolute maximum number of samples regardless of period const maxMetadataSamples = 100 // Number of resources to sample for metric metadata @@ -126,7 +124,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL, log telegra hwMarks: NewTSCache(hwMarkTTL), lun2ds: make(map[string]string), initialized: false, - clientFactory: NewClientFactory(ctx, url, parent), + clientFactory: NewClientFactory(url, parent), customAttrFilter: newFilterOrPanic(parent.CustomAttributeInclude, parent.CustomAttributeExclude), customAttrEnabled: anythingEnabled(parent.CustomAttributeExclude), log: log, @@ -875,7 +873,7 @@ func submitChunkJob(ctx context.Context, te *ThrottledExecutor, job queryJob, pq }) } -func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, acc telegraf.Accumulator, job queryJob) { +func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, job queryJob) { te := NewThrottledExecutor(e.Parent.CollectConcurrency) maxMetrics := e.Parent.MaxQueryMetrics if maxMetrics < 1 { @@ -1017,9 +1015,9 @@ func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc latestSample := time.Time{} // Divide workload into chunks and process them concurrently - e.chunkify(ctx, res, now, latest, acc, + e.chunkify(ctx, res, now, latest, func(chunk queryChunk) { - n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, now, estInterval) + n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, estInterval) e.log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n) if err != nil { acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error())) @@ -1081,7 +1079,7 @@ func (e *Endpoint) alignSamples(info []types.PerfSampleInfo, values []int64, int return rInfo, rValues } -func (e *Endpoint) collectChunk(ctx context.Context, pqs queryChunk, res *resourceKind, acc telegraf.Accumulator, now time.Time, interval time.Duration) (int, time.Time, error) { +func (e *Endpoint) collectChunk(ctx context.Context, pqs queryChunk, res *resourceKind, acc telegraf.Accumulator, interval time.Duration) (int, time.Time, error) { e.log.Debugf("Query for %s has %d QuerySpecs", res.name, len(pqs)) latestSample := time.Time{} count := 0 diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 300261358270d..a18a5ca70f982 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -258,7 +258,7 @@ func (v *VSphere) Description() string { // Start is called from telegraf core when a plugin is started and allows it to // perform initialization tasks. -func (v *VSphere) Start(acc telegraf.Accumulator) error { +func (v *VSphere) Start(_ telegraf.Accumulator) error { v.Log.Info("Starting plugin") ctx, cancel := context.WithCancel(context.Background()) v.cancel = cancel diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index 60e7e1c8dbf27..e0bcaac1c8eca 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -517,11 +517,11 @@ func testCollection(t *testing.T, excludeClusters bool) { hostMoid = hosts[0].Reference().Value hostCache[hostName] = hostMoid } - if isInCluster(t, v, client, cache, "HostSystem", hostMoid) { // If the VM lives in a cluster + if isInCluster(v, client, cache, "HostSystem", hostMoid) { // If the VM lives in a cluster mustContainAll(t, m.Tags, []string{"clustername"}) } } else if strings.HasPrefix(m.Measurement, "vsphere.host.") { - if isInCluster(t, v, client, cache, "HostSystem", m.Tags["moid"]) { // If the host lives in a cluster + if isInCluster(v, client, cache, "HostSystem", m.Tags["moid"]) { // If the host lives in a cluster mustContainAll(t, m.Tags, []string{"esxhostname", "clustername", "moid", "dcname"}) } else { mustContainAll(t, m.Tags, []string{"esxhostname", "moid", "dcname"}) @@ -535,7 +535,7 @@ func testCollection(t *testing.T, excludeClusters bool) { require.Empty(t, mustHaveMetrics, "Some metrics were not found") } -func isInCluster(t *testing.T, v *VSphere, client *Client, cache map[string]string, resourceKind, moid string) bool { +func isInCluster(v *VSphere, client *Client, cache map[string]string, resourceKind, moid string) bool { ctx := context.Background() ref := types.ManagedObjectReference{ Type: resourceKind, diff --git a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go index 4a14c88947f97..a7e219c53c905 100644 --- a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go +++ b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go @@ -25,7 +25,7 @@ func (md *MandrillWebhook) Register(router *mux.Router, acc telegraf.Accumulator md.acc = acc } -func (md *MandrillWebhook) returnOK(w http.ResponseWriter, r *http.Request) { +func (md *MandrillWebhook) returnOK(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) } diff --git a/plugins/inputs/zfs/zfs.go b/plugins/inputs/zfs/zfs.go index 297e3cc07ec42..4e1999cf68d30 100644 --- a/plugins/inputs/zfs/zfs.go +++ b/plugins/inputs/zfs/zfs.go @@ -13,9 +13,9 @@ type Zfs struct { KstatMetrics []string PoolMetrics bool DatasetMetrics bool - sysctl Sysctl - zpool Zpool - zdataset Zdataset + sysctl Sysctl //nolint:varcheck,unused // False positive - this var is used for non-default build tag: freebsd + zpool Zpool //nolint:varcheck,unused // False positive - this var is used for non-default build tag: freebsd + zdataset Zdataset //nolint:varcheck,unused // False positive - this var is used for non-default build tag: freebsd Log telegraf.Logger `toml:"-"` } diff --git a/plugins/inputs/zfs/zfs_linux_test.go b/plugins/inputs/zfs/zfs_linux_test.go index 8690fee4c3bf9..7d8aff81c689c 100644 --- a/plugins/inputs/zfs/zfs_linux_test.go +++ b/plugins/inputs/zfs/zfs_linux_test.go @@ -182,67 +182,6 @@ scatter_page_alloc_retry 4 99311 scatter_sg_table_retry 4 99221 ` -const dbufcachestatsContents = ` -15 1 0x01 11 2992 6257505590736 8516276189184 -name type data -size 4 242688 -size_max 4 338944 -max_bytes 4 62834368 -lowater_bytes 4 56550932 -hiwater_bytes 4 69117804 -total_evicts 4 0 -hash_collisions 4 0 -hash_elements 4 31 -hash_elements_max 4 32 -hash_chains 4 0 -hash_chain_max 4 0 -` - -const dnodestatsContents = ` -10 1 0x01 28 7616 6257498525011 8671911551753 -name type data -dnode_hold_dbuf_hold 4 0 -dnode_hold_dbuf_read 4 0 -dnode_hold_alloc_hits 4 1460 -dnode_hold_alloc_misses 4 0 -dnode_hold_alloc_interior 4 0 -dnode_hold_alloc_lock_retry 4 0 -dnode_hold_alloc_lock_misses 4 0 -dnode_hold_alloc_type_none 4 0 -dnode_hold_free_hits 4 2 -dnode_hold_free_misses 4 0 -dnode_hold_free_lock_misses 4 0 -dnode_hold_free_lock_retry 4 0 -dnode_hold_free_overflow 4 0 -dnode_hold_free_refcount 4 0 -dnode_hold_free_txg 4 0 -dnode_allocate 4 2 -dnode_reallocate 4 0 -dnode_buf_evict 4 6 -dnode_alloc_next_chunk 4 1 -dnode_alloc_race 4 0 -dnode_alloc_next_block 4 0 -dnode_move_invalid 4 0 -dnode_move_recheck1 4 0 -dnode_move_recheck2 4 0 -dnode_move_special 4 0 -dnode_move_handle 4 0 -dnode_move_rwlock 4 0 -dnode_move_active 4 0 -` - -const vdevmirrorcachestatsContents = ` -18 1 0x01 7 1904 6257505684227 9638257816287 -name type data -rotating_linear 4 0 -rotating_offset 4 0 -rotating_seek 4 0 -non_rotating_linear 4 0 -non_rotating_seek 4 0 -preferred_found 4 0 -preferred_not_found 4 43 -` - var testKstatPath = os.TempDir() + "/telegraf/proc/spl/kstat/zfs" func TestZfsPoolMetrics(t *testing.T) { diff --git a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go index ea25b49a0fcca..61c2eda12bd96 100644 --- a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go +++ b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go @@ -35,8 +35,6 @@ var ( ZipkinServerHost string ) -const usage = `./stress_test_write -batch_size= -max_backlog= -batch_interval= -span_count -zipkin_host=` - func init() { flag.IntVar(&BatchSize, "batch_size", 10000, "") flag.IntVar(&MaxBackLog, "max_backlog", 100000, "") diff --git a/plugins/inputs/zipkin/codec/codec_test.go b/plugins/inputs/zipkin/codec/codec_test.go index 3525f30c201d6..c3d2fa655dcc6 100644 --- a/plugins/inputs/zipkin/codec/codec_test.go +++ b/plugins/inputs/zipkin/codec/codec_test.go @@ -12,9 +12,6 @@ import ( ) func Test_MicroToTime(t *testing.T) { - type args struct { - micro int64 - } tests := []struct { name string micro int64 diff --git a/plugins/inputs/zipkin/zipkin.go b/plugins/inputs/zipkin/zipkin.go index 4224fea3d2928..d0cf9b38dda64 100644 --- a/plugins/inputs/zipkin/zipkin.go +++ b/plugins/inputs/zipkin/zipkin.go @@ -79,7 +79,7 @@ func (z Zipkin) SampleConfig() string { // Gather is empty for the zipkin plugin; all gathering is done through // the separate goroutine launched in (*Zipkin).Start() -func (z *Zipkin) Gather(acc telegraf.Accumulator) error { return nil } +func (z *Zipkin) Gather(_ telegraf.Accumulator) error { return nil } // Start launches a separate goroutine for collecting zipkin client http requests, // passing in a telegraf.Accumulator such that data can be collected. diff --git a/plugins/outputs/amqp/amqp_test.go b/plugins/outputs/amqp/amqp_test.go index 32a9145281e48..537dd7d049b8c 100644 --- a/plugins/outputs/amqp/amqp_test.go +++ b/plugins/outputs/amqp/amqp_test.go @@ -15,8 +15,6 @@ type MockClient struct { PublishCallCount int CloseCallCount int - - t *testing.T } func (c *MockClient) Publish(key string, body []byte) error { @@ -29,10 +27,6 @@ func (c *MockClient) Close() error { return c.CloseF() } -func MockConnect(config *ClientConfig) (Client, error) { - return &MockClient{}, nil -} - func NewMockClient() Client { return &MockClient{ PublishF: func(key string, body []byte) error { diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go index ceaad4a4e9472..8d4fb755646f5 100644 --- a/plugins/outputs/application_insights/application_insights_test.go +++ b/plugins/outputs/application_insights/application_insights_test.go @@ -459,15 +459,6 @@ func findTransmittedTelemetry(transmitter *mocks.Transmitter, telemetryName stri return nil } -func keys(m map[string]string) []string { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - return keys -} - func assertMapContains(assert *assert.Assertions, expected, actual map[string]string) { if expected == nil && actual == nil { return diff --git a/plugins/outputs/cloud_pubsub/topic_gcp.go b/plugins/outputs/cloud_pubsub/topic_gcp.go index a85c6f39eb8f6..72ef50efcba79 100644 --- a/plugins/outputs/cloud_pubsub/topic_gcp.go +++ b/plugins/outputs/cloud_pubsub/topic_gcp.go @@ -6,8 +6,6 @@ import ( ) type ( - topicFactory func(string) (topic, error) - topic interface { ID() string Stop() diff --git a/plugins/outputs/cratedb/cratedb_test.go b/plugins/outputs/cratedb/cratedb_test.go index 993c9d7cbfeb2..43297afe2e5e7 100644 --- a/plugins/outputs/cratedb/cratedb_test.go +++ b/plugins/outputs/cratedb/cratedb_test.go @@ -216,6 +216,7 @@ func Test_hashID(t *testing.T) { } } +//nolint:unused // Used in skipped tests func testURL() string { url := os.Getenv("CRATE_URL") if url == "" { diff --git a/plugins/outputs/discard/discard.go b/plugins/outputs/discard/discard.go index 919f74b477ffa..de3696c3e6148 100644 --- a/plugins/outputs/discard/discard.go +++ b/plugins/outputs/discard/discard.go @@ -11,7 +11,7 @@ func (d *Discard) Connect() error { return nil } func (d *Discard) Close() error { return nil } func (d *Discard) SampleConfig() string { return "" } func (d *Discard) Description() string { return "Send metrics to nowhere at all" } -func (d *Discard) Write(metrics []telegraf.Metric) error { +func (d *Discard) Write(_ []telegraf.Metric) error { return nil } diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index c6cad89c0ba51..05feafe9effc1 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -70,13 +70,13 @@ func (g *Gelf) Write(message []byte) (n int, err error) { for i, index := 0, 0; i < length; i, index = i+chunksize, index+1 { packet := g.createChunkedMessage(index, chunkCountInt, id, &compressed) - _, err = g.send(packet.Bytes()) + err = g.send(packet.Bytes()) if err != nil { return 0, err } } } else { - _, err = g.send(compressed.Bytes()) + err = g.send(compressed.Bytes()) if err != nil { return 0, err } @@ -133,19 +133,19 @@ func (g *Gelf) compress(b []byte) bytes.Buffer { return buf } -func (g *Gelf) send(b []byte) (n int, err error) { +func (g *Gelf) send(b []byte) error { udpAddr, err := net.ResolveUDPAddr("udp", g.GelfConfig.GraylogEndpoint) if err != nil { - return + return err } conn, err := net.DialUDP("udp", nil, udpAddr) if err != nil { - return + return err } - n, err = conn.Write(b) - return + _, err = conn.Write(b) + return err } type Graylog struct { diff --git a/plugins/outputs/health/health.go b/plugins/outputs/health/health.go index c7b584076e779..7447c404c9b2d 100644 --- a/plugins/outputs/health/health.go +++ b/plugins/outputs/health/health.go @@ -178,7 +178,7 @@ func (h *Health) listen() (net.Listener, error) { return net.Listen(h.network, h.address) } -func (h *Health) ServeHTTP(rw http.ResponseWriter, req *http.Request) { +func (h *Health) ServeHTTP(rw http.ResponseWriter, _ *http.Request) { var code = http.StatusOK if !h.isHealthy() { code = http.StatusServiceUnavailable diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go index 510df9463265b..62848417b124c 100644 --- a/plugins/outputs/influxdb/udp.go +++ b/plugins/outputs/influxdb/udp.go @@ -115,7 +115,7 @@ func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error return nil } -func (c *udpClient) CreateDatabase(ctx context.Context, database string) error { +func (c *udpClient) CreateDatabase(_ context.Context, _ string) error { return nil } diff --git a/plugins/outputs/influxdb/udp_test.go b/plugins/outputs/influxdb/udp_test.go index 2e60c586c7a03..1c5696cf10fe5 100644 --- a/plugins/outputs/influxdb/udp_test.go +++ b/plugins/outputs/influxdb/udp_test.go @@ -62,7 +62,7 @@ type MockDialer struct { DialContextF func(network, address string) (influxdb.Conn, error) } -func (d *MockDialer) DialContext(ctx context.Context, network string, address string) (influxdb.Conn, error) { +func (d *MockDialer) DialContext(_ context.Context, network string, address string) (influxdb.Conn, error) { return d.DialContextF(network, address) } diff --git a/plugins/outputs/influxdb_v2/influxdb_test.go b/plugins/outputs/influxdb_v2/influxdb_test.go index 90a3823915a5b..b16fd944d28db 100644 --- a/plugins/outputs/influxdb_v2/influxdb_test.go +++ b/plugins/outputs/influxdb_v2/influxdb_test.go @@ -94,7 +94,7 @@ func TestConnect(t *testing.T) { } } -func TestUnused(t *testing.T) { +func TestUnused(_ *testing.T) { thing := influxdb.InfluxDB{} thing.Close() thing.Description() diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index dde2c8e62e4d0..5aad62f48e408 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -1,7 +1,6 @@ package kafka import ( - "crypto/tls" "fmt" "log" "strings" @@ -44,8 +43,6 @@ type Kafka struct { Log telegraf.Logger `toml:"-"` - tlsConfig tls.Config - producerFunc func(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) producer sarama.SyncProducer diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index 52b020813975b..2d786013c9a24 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -179,7 +179,7 @@ func (p *MockProducer) Close() error { return nil } -func NewMockProducer(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) { +func NewMockProducer(_ []string, _ *sarama.Config) (sarama.SyncProducer, error) { return &MockProducer{}, nil } diff --git a/plugins/outputs/newrelic/newrelic_test.go b/plugins/outputs/newrelic/newrelic_test.go index 1eedc63f44116..2d679bf3cecbc 100644 --- a/plugins/outputs/newrelic/newrelic_test.go +++ b/plugins/outputs/newrelic/newrelic_test.go @@ -31,9 +31,6 @@ func TestBasic(t *testing.T) { } func TestNewRelic_Write(t *testing.T) { - type args struct { - metrics []telegraf.Metric - } tests := []struct { name string metrics []telegraf.Metric diff --git a/plugins/outputs/prometheus_client/v2/collector.go b/plugins/outputs/prometheus_client/v2/collector.go index b28a4deab1cc9..5c569685de5cb 100644 --- a/plugins/outputs/prometheus_client/v2/collector.go +++ b/plugins/outputs/prometheus_client/v2/collector.go @@ -59,7 +59,7 @@ func NewCollector(expire time.Duration, stringsAsLabel bool, exportTimestamp boo } } -func (c *Collector) Describe(ch chan<- *prometheus.Desc) { +func (c *Collector) Describe(_ chan<- *prometheus.Desc) { // Sending no descriptor at all marks the Collector as "unchecked", // i.e. no checks will be performed at registration time, and the // Collector may yield any Metric it sees fit in its Collect method. diff --git a/plugins/outputs/signalfx/signalfx.go b/plugins/outputs/signalfx/signalfx.go index 87285750735c5..b5552ee0e3830 100644 --- a/plugins/outputs/signalfx/signalfx.go +++ b/plugins/outputs/signalfx/signalfx.go @@ -6,8 +6,6 @@ import ( "fmt" "strings" - "sync" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" "github.com/signalfx/golib/v3/datapoint" @@ -37,7 +35,6 @@ type SignalFx struct { ctx context.Context cancel context.CancelFunc - wg sync.WaitGroup } var sampleConfig = ` diff --git a/plugins/outputs/signalfx/signalfx_test.go b/plugins/outputs/signalfx/signalfx_test.go index 3c04c1ef100e2..3f081a16cbbd8 100644 --- a/plugins/outputs/signalfx/signalfx_test.go +++ b/plugins/outputs/signalfx/signalfx_test.go @@ -21,11 +21,11 @@ type sink struct { evs []*event.Event } -func (s *sink) AddDatapoints(ctx context.Context, points []*datapoint.Datapoint) error { +func (s *sink) AddDatapoints(_ context.Context, points []*datapoint.Datapoint) error { s.dps = append(s.dps, points...) return nil } -func (s *sink) AddEvents(ctx context.Context, events []*event.Event) error { +func (s *sink) AddEvents(_ context.Context, events []*event.Event) error { s.evs = append(s.evs, events...) return nil } @@ -35,10 +35,10 @@ type errorsink struct { evs []*event.Event } -func (e *errorsink) AddDatapoints(ctx context.Context, points []*datapoint.Datapoint) error { +func (e *errorsink) AddDatapoints(_ context.Context, _ []*datapoint.Datapoint) error { return errors.New("not sending datapoints") } -func (e *errorsink) AddEvents(ctx context.Context, events []*event.Event) error { +func (e *errorsink) AddEvents(_ context.Context, _ []*event.Event) error { return errors.New("not sending events") } func TestSignalFx_SignalFx(t *testing.T) { diff --git a/plugins/outputs/sumologic/sumologic.go b/plugins/outputs/sumologic/sumologic.go index da86eadfae585..22b64a8e6a114 100644 --- a/plugins/outputs/sumologic/sumologic.go +++ b/plugins/outputs/sumologic/sumologic.go @@ -3,7 +3,6 @@ package sumologic import ( "bytes" "compress/gzip" - "context" "log" "net/http" "time" @@ -139,13 +138,13 @@ func (s *SumoLogic) SetSerializer(serializer serializers.Serializer) { s.serializer = serializer } -func (s *SumoLogic) createClient(ctx context.Context) (*http.Client, error) { +func (s *SumoLogic) createClient() *http.Client { return &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, Timeout: s.Timeout.Duration, - }, nil + } } func (s *SumoLogic) Connect() error { @@ -157,12 +156,7 @@ func (s *SumoLogic) Connect() error { s.Timeout.Duration = defaultClientTimeout } - client, err := s.createClient(context.Background()) - if err != nil { - return err - } - - s.client = client + s.client = s.createClient() return nil } diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index 48450ab450f3e..d6fe2731fcd3e 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -39,7 +39,8 @@ func getMetric(t *testing.T) telegraf.Metric { return m } -func getMetrics(t *testing.T, count int) []telegraf.Metric { +func getMetrics(t *testing.T) []telegraf.Metric { + const count = 100 var metrics = make([]telegraf.Metric, count) for i := 0; i < count; i++ { @@ -450,8 +451,6 @@ func TestMaxRequestBodySize(t *testing.T) { u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) require.NoError(t, err) - const count = 100 - testcases := []struct { name string plugin func() *SumoLogic @@ -479,7 +478,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.URL = u.String() return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 1, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -494,7 +493,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 43_749 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 2, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -507,7 +506,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 10_000 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 5, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -520,7 +519,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 5_000 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 10, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -533,7 +532,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 2_500 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 20, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -546,7 +545,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 1_000 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 50, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -559,7 +558,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 500 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 100, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -572,7 +571,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 300 return s }, - metrics: getMetrics(t, count), + metrics: getMetrics(t), expectedError: false, expectedRequestCount: 100, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index 2e601fc16f29a..1988bc6e1963f 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -214,10 +214,6 @@ func boolToString(inputBool bool) string { return strconv.FormatBool(inputBool) } -func uIntToString(inputNum uint64) string { - return strconv.FormatUint(inputNum, 10) -} - func floatToString(inputNum float64) string { return strconv.FormatFloat(inputNum, 'f', 6, 64) } diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go index db62358777c9a..a3a7ea04d60d4 100644 --- a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring_test.go @@ -14,12 +14,12 @@ import ( ) func TestWrite(t *testing.T) { - readBody := func(r *http.Request) (yandexCloudMonitoringMessage, error) { + readBody := func(r *http.Request) yandexCloudMonitoringMessage { decoder := json.NewDecoder(r.Body) var message yandexCloudMonitoringMessage err := decoder.Decode(&message) require.NoError(t, err) - return message, nil + return message } testMetadataHTTPServer := httptest.NewServer( @@ -67,8 +67,7 @@ func TestWrite(t *testing.T) { ), }, handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - message, err := readBody(r) - require.NoError(t, err) + message := readBody(r) require.Len(t, message.Metrics, 1) require.Equal(t, "cpu", message.Metrics[0].Name) require.Equal(t, 42.0, message.Metrics[0].Value) diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 3f370b507dc4f..9b3219a0580fb 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -80,7 +80,7 @@ func (p *Parser) SetTimeFunc(fn TimeFunc) { p.TimeFunc = fn } -func (p *Parser) compile(r io.Reader) (*csv.Reader, error) { +func (p *Parser) compile(r io.Reader) *csv.Reader { csvReader := csv.NewReader(r) // ensures that the reader reads records of different lengths without an error csvReader.FieldsPerRecord = -1 @@ -91,15 +91,12 @@ func (p *Parser) compile(r io.Reader) (*csv.Reader, error) { csvReader.Comment = []rune(p.Comment)[0] } csvReader.TrimLeadingSpace = p.TrimSpace - return csvReader, nil + return csvReader } func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { r := bytes.NewReader(buf) - csvReader, err := p.compile(r) - if err != nil { - return nil, err - } + csvReader := p.compile(r) // skip first rows for i := 0; i < p.SkipRows; i++ { _, err := csvReader.Read() @@ -163,11 +160,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { // it will also not skip any rows func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { r := bytes.NewReader([]byte(line)) - csvReader, err := p.compile(r) - if err != nil { - return nil, err - } - + csvReader := p.compile(r) // if there is nothing in DataColumns, ParseLine will fail if len(p.ColumnNames) == 0 { return nil, fmt.Errorf("[parsers.csv] data columns must be specified") diff --git a/plugins/parsers/dropwizard/parser.go b/plugins/parsers/dropwizard/parser.go index 179de7dd77e37..43b8c139f3220 100644 --- a/plugins/parsers/dropwizard/parser.go +++ b/plugins/parsers/dropwizard/parser.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "log" - "strings" "time" "github.com/influxdata/telegraf" @@ -14,16 +13,12 @@ import ( "github.com/tidwall/gjson" ) -var fieldEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") -var keyEscaper = strings.NewReplacer(" ", "\\ ", ",", "\\,", "=", "\\=") - type TimeFunc func() time.Time // Parser parses json inputs containing dropwizard metrics, // either top-level or embedded inside a json field. // This parser is using gjson for retrieving paths within the json file. type parser struct { - // an optional json path containing the metric registry object // if left empty, the whole json object is parsed as a metric registry MetricRegistryPath string diff --git a/plugins/parsers/influx/handler.go b/plugins/parsers/influx/handler.go index ae08d5a7c0870..efd7329ca9c29 100644 --- a/plugins/parsers/influx/handler.go +++ b/plugins/parsers/influx/handler.go @@ -12,7 +12,6 @@ import ( // MetricHandler implements the Handler interface and produces telegraf.Metric. type MetricHandler struct { - err error timePrecision time.Duration timeFunc TimeFunc metric telegraf.Metric diff --git a/plugins/parsers/influx/machine_test.go b/plugins/parsers/influx/machine_test.go index de5353da0c446..735b5b9114ddd 100644 --- a/plugins/parsers/influx/machine_test.go +++ b/plugins/parsers/influx/machine_test.go @@ -169,35 +169,35 @@ func (h *TestingHandler) Results() []Result { type BenchmarkingHandler struct { } -func (h *BenchmarkingHandler) SetMeasurement(name []byte) error { +func (h *BenchmarkingHandler) SetMeasurement(_ []byte) error { return nil } -func (h *BenchmarkingHandler) AddTag(key []byte, value []byte) error { +func (h *BenchmarkingHandler) AddTag(_ []byte, _ []byte) error { return nil } -func (h *BenchmarkingHandler) AddInt(key []byte, value []byte) error { +func (h *BenchmarkingHandler) AddInt(_ []byte, _ []byte) error { return nil } -func (h *BenchmarkingHandler) AddUint(key []byte, value []byte) error { +func (h *BenchmarkingHandler) AddUint(_ []byte, _ []byte) error { return nil } -func (h *BenchmarkingHandler) AddFloat(key []byte, value []byte) error { +func (h *BenchmarkingHandler) AddFloat(_ []byte, _ []byte) error { return nil } -func (h *BenchmarkingHandler) AddString(key []byte, value []byte) error { +func (h *BenchmarkingHandler) AddString(_ []byte, _ []byte) error { return nil } -func (h *BenchmarkingHandler) AddBool(key []byte, value []byte) error { +func (h *BenchmarkingHandler) AddBool(_ []byte, _ []byte) error { return nil } -func (h *BenchmarkingHandler) SetTimestamp(tm []byte) error { +func (h *BenchmarkingHandler) SetTimestamp(_ []byte) error { return nil } @@ -1832,7 +1832,7 @@ func BenchmarkMachine(b *testing.B) { } } -func TestMachineProcstat(t *testing.T) { +func TestMachineProcstat(_ *testing.T) { input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000") handler := &TestingHandler{} fsm := influx.NewMachine(handler) diff --git a/plugins/parsers/nagios/parser_test.go b/plugins/parsers/nagios/parser_test.go index 7f5b5937ec0c9..d269debd62ce7 100644 --- a/plugins/parsers/nagios/parser_test.go +++ b/plugins/parsers/nagios/parser_test.go @@ -65,14 +65,6 @@ func (b *metricBuilder) n(v string) *metricBuilder { return b } -func (b *metricBuilder) t(k, v string) *metricBuilder { - if b.tags == nil { - b.tags = make(map[string]string) - } - b.tags[k] = v - return b -} - func (b *metricBuilder) f(k string, v interface{}) *metricBuilder { if b.fields == nil { b.fields = make(map[string]interface{}) @@ -81,11 +73,6 @@ func (b *metricBuilder) f(k string, v interface{}) *metricBuilder { return b } -func (b *metricBuilder) ts(v time.Time) *metricBuilder { - b.timestamp = v - return b -} - func (b *metricBuilder) b() telegraf.Metric { m, err := metric.New(b.name, b.tags, b.fields, b.timestamp) if err != nil { diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go index 8b8a4ad2ff7b0..f53b926bda4a5 100644 --- a/plugins/parsers/prometheus/parser_test.go +++ b/plugins/parsers/prometheus/parser_test.go @@ -24,9 +24,6 @@ cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8. get_token_fail_count 0 ` - validUniqueLine = `# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source -` - validUniqueSummary = `# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. # TYPE http_request_duration_microseconds summary http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506 diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go index 86fd166ffc8c3..4afa199663733 100644 --- a/plugins/parsers/wavefront/element.go +++ b/plugins/parsers/wavefront/element.go @@ -30,9 +30,6 @@ type LoopedParser struct { wrappedParser ElementParser wsParser *WhiteSpaceParser } -type LiteralParser struct { - literal string -} func (ep *NameParser) parse(p *PointParser, pt *Point) error { //Valid characters are: a-z, A-Z, 0-9, hyphen ("-"), underscore ("_"), dot ("."). @@ -168,7 +165,7 @@ func (ep *TagParser) parse(p *PointParser, pt *Point) error { return nil } -func (ep *WhiteSpaceParser) parse(p *PointParser, pt *Point) error { +func (ep *WhiteSpaceParser) parse(p *PointParser, _ *Point) error { tok := Ws for tok != EOF && tok == Ws { tok, _ = p.scan() @@ -184,18 +181,6 @@ func (ep *WhiteSpaceParser) parse(p *PointParser, pt *Point) error { return nil } -func (ep *LiteralParser) parse(p *PointParser, pt *Point) error { - l, err := parseLiteral(p) - if err != nil { - return err - } - - if l != ep.literal { - return fmt.Errorf("found %s, expected %s", l, ep.literal) - } - return nil -} - func parseQuotedLiteral(p *PointParser) (string, error) { p.writeBuf.Reset() diff --git a/plugins/parsers/wavefront/scanner.go b/plugins/parsers/wavefront/scanner.go index 5d22c53896762..70558e604dbb8 100644 --- a/plugins/parsers/wavefront/scanner.go +++ b/plugins/parsers/wavefront/scanner.go @@ -24,11 +24,6 @@ func (s *PointScanner) read() rune { return ch } -// unread places the previously read rune back on the reader. -func (s *PointScanner) unread() { - _ = s.r.UnreadRune() -} - // Scan returns the next token and literal value. func (s *PointScanner) Scan() (Token, string) { // Read the next rune diff --git a/plugins/processors/aws/ec2/ec2.go b/plugins/processors/aws/ec2/ec2.go index 8d22a65305ccd..7126214152a51 100644 --- a/plugins/processors/aws/ec2/ec2.go +++ b/plugins/processors/aws/ec2/ec2.go @@ -113,7 +113,7 @@ func (r *AwsEc2Processor) Description() string { return "Attach AWS EC2 metadata to metrics" } -func (r *AwsEc2Processor) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { +func (r *AwsEc2Processor) Add(metric telegraf.Metric, _ telegraf.Accumulator) error { r.parallel.Enqueue(metric) return nil } diff --git a/plugins/processors/dedup/dedup_test.go b/plugins/processors/dedup/dedup_test.go index cae2bf1a529ed..b2fc9ca1fc546 100644 --- a/plugins/processors/dedup/dedup_test.go +++ b/plugins/processors/dedup/dedup_test.go @@ -11,8 +11,10 @@ import ( "github.com/influxdata/telegraf/metric" ) -func createMetric(name string, value int64, when time.Time) telegraf.Metric { - m, _ := metric.New(name, +const metricName = "m1" + +func createMetric(value int64, when time.Time) telegraf.Metric { + m, _ := metric.New(metricName, map[string]string{"tag": "tag_value"}, map[string]interface{}{"value": value}, when, @@ -70,7 +72,7 @@ func assertMetricPassed(t *testing.T, target []telegraf.Metric, source telegraf. // target is not empty require.NotEqual(t, 0, len(target)) // target has metric with proper name - require.Equal(t, "m1", target[0].Name()) + require.Equal(t, metricName, target[0].Name()) // target metric has proper field tValue, present := target[0].GetField("value") require.True(t, present) @@ -80,14 +82,14 @@ func assertMetricPassed(t *testing.T, target []telegraf.Metric, source telegraf. require.Equal(t, target[0].Time(), source.Time()) } -func assertMetricSuppressed(t *testing.T, target []telegraf.Metric, source telegraf.Metric) { +func assertMetricSuppressed(t *testing.T, target []telegraf.Metric) { // target is empty require.Equal(t, 0, len(target)) } func TestProcRetainsMetric(t *testing.T) { deduplicate := createDedup(time.Now()) - source := createMetric("m1", 1, time.Now()) + source := createMetric(1, time.Now()) target := deduplicate.Apply(source) assertCacheRefresh(t, &deduplicate, source) @@ -97,21 +99,21 @@ func TestProcRetainsMetric(t *testing.T) { func TestSuppressRepeatedValue(t *testing.T) { deduplicate := createDedup(time.Now()) // Create metric in the past - source := createMetric("m1", 1, time.Now().Add(-1*time.Second)) + source := createMetric(1, time.Now().Add(-1*time.Second)) target := deduplicate.Apply(source) - source = createMetric("m1", 1, time.Now()) + source = createMetric(1, time.Now()) target = deduplicate.Apply(source) assertCacheHit(t, &deduplicate, source) - assertMetricSuppressed(t, target, source) + assertMetricSuppressed(t, target) } func TestPassUpdatedValue(t *testing.T) { deduplicate := createDedup(time.Now()) // Create metric in the past - source := createMetric("m1", 1, time.Now().Add(-1*time.Second)) + source := createMetric(1, time.Now().Add(-1*time.Second)) target := deduplicate.Apply(source) - source = createMetric("m1", 2, time.Now()) + source = createMetric(2, time.Now()) target = deduplicate.Apply(source) assertCacheRefresh(t, &deduplicate, source) @@ -121,9 +123,9 @@ func TestPassUpdatedValue(t *testing.T) { func TestPassAfterCacheExpire(t *testing.T) { deduplicate := createDedup(time.Now()) // Create metric in the past - source := createMetric("m1", 1, time.Now().Add(-1*time.Hour)) + source := createMetric(1, time.Now().Add(-1*time.Hour)) target := deduplicate.Apply(source) - source = createMetric("m1", 1, time.Now()) + source = createMetric(1, time.Now()) target = deduplicate.Apply(source) assertCacheRefresh(t, &deduplicate, source) @@ -133,12 +135,12 @@ func TestPassAfterCacheExpire(t *testing.T) { func TestCacheRetainsMetrics(t *testing.T) { deduplicate := createDedup(time.Now()) // Create metric in the past 3sec - source := createMetric("m1", 1, time.Now().Add(-3*time.Hour)) + source := createMetric(1, time.Now().Add(-3*time.Hour)) deduplicate.Apply(source) // Create metric in the past 2sec - source = createMetric("m1", 1, time.Now().Add(-2*time.Hour)) + source = createMetric(1, time.Now().Add(-2*time.Hour)) deduplicate.Apply(source) - source = createMetric("m1", 1, time.Now()) + source = createMetric(1, time.Now()) deduplicate.Apply(source) assertCacheRefresh(t, &deduplicate, source) @@ -148,7 +150,7 @@ func TestCacheShrink(t *testing.T) { // Time offset is more than 2 * DedupInterval deduplicate := createDedup(time.Now().Add(-2 * time.Hour)) // Time offset is more than 1 * DedupInterval - source := createMetric("m1", 1, time.Now().Add(-1*time.Hour)) + source := createMetric(1, time.Now().Add(-1*time.Hour)) deduplicate.Apply(source) require.Equal(t, 0, len(deduplicate.Cache)) diff --git a/plugins/processors/execd/execd.go b/plugins/processors/execd/execd.go index 3d11bac4969fe..992452561db29 100644 --- a/plugins/processors/execd/execd.go +++ b/plugins/processors/execd/execd.go @@ -94,7 +94,7 @@ func (e *Execd) Start(acc telegraf.Accumulator) error { return nil } -func (e *Execd) Add(m telegraf.Metric, acc telegraf.Accumulator) error { +func (e *Execd) Add(m telegraf.Metric, _ telegraf.Accumulator) error { b, err := e.serializer.Serialize(m) if err != nil { return fmt.Errorf("metric serializing error: %w", err) diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index d6c696b75bcf1..10cf38a3cec8a 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -226,7 +226,7 @@ func (d *IfName) Start(acc telegraf.Accumulator) error { return nil } -func (d *IfName) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { +func (d *IfName) Add(metric telegraf.Metric, _ telegraf.Accumulator) error { d.parallel.Enqueue(metric) return nil } diff --git a/plugins/processors/reverse_dns/rdnscache_test.go b/plugins/processors/reverse_dns/rdnscache_test.go index e8466c27fd315..97cc8abdbdff8 100644 --- a/plugins/processors/reverse_dns/rdnscache_test.go +++ b/plugins/processors/reverse_dns/rdnscache_test.go @@ -125,12 +125,12 @@ func TestLookupTimeout(t *testing.T) { type timeoutResolver struct{} -func (r *timeoutResolver) LookupAddr(ctx context.Context, addr string) (names []string, err error) { +func (r *timeoutResolver) LookupAddr(_ context.Context, _ string) (names []string, err error) { return nil, errors.New("timeout") } type localResolver struct{} -func (r *localResolver) LookupAddr(ctx context.Context, addr string) (names []string, err error) { +func (r *localResolver) LookupAddr(_ context.Context, _ string) (names []string, err error) { return []string{"localhost"}, nil } diff --git a/plugins/processors/reverse_dns/reversedns.go b/plugins/processors/reverse_dns/reversedns.go index 616294fc5e54d..966748420bc8d 100644 --- a/plugins/processors/reverse_dns/reversedns.go +++ b/plugins/processors/reverse_dns/reversedns.go @@ -104,7 +104,7 @@ func (r *ReverseDNS) Stop() error { return nil } -func (r *ReverseDNS) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { +func (r *ReverseDNS) Add(metric telegraf.Metric, _ telegraf.Accumulator) error { r.parallel.Enqueue(metric) return nil } diff --git a/plugins/processors/starlark/builtins.go b/plugins/processors/starlark/builtins.go index 53e31fb3a988c..8537c92f34953 100644 --- a/plugins/processors/starlark/builtins.go +++ b/plugins/processors/starlark/builtins.go @@ -9,7 +9,7 @@ import ( "go.starlark.net/starlark" ) -func newMetric(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func newMetric(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var name starlark.String if err := starlark.UnpackPositionalArgs("Metric", args, kwargs, 1, &name); err != nil { return nil, err @@ -23,7 +23,7 @@ func newMetric(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple return &Metric{metric: m}, nil } -func deepcopy(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func deepcopy(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var sm *Metric if err := starlark.UnpackPositionalArgs("deepcopy", args, kwargs, 1, &sm); err != nil { return nil, err @@ -71,12 +71,6 @@ func builtinAttrNames(methods map[string]builtinMethod) []string { return names } -// nameErr returns an error message of the form "name: msg" -// where name is b.Name() and msg is a string or error. -func nameErr(b *starlark.Builtin, msg interface{}) error { - return fmt.Errorf("%s: %v", b.Name(), msg) -} - // --- dictionary methods --- // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·clear diff --git a/plugins/processors/starlark/logging.go b/plugins/processors/starlark/logging.go index 35ba65d1db80f..35efa6a7effba 100644 --- a/plugins/processors/starlark/logging.go +++ b/plugins/processors/starlark/logging.go @@ -12,7 +12,7 @@ import ( // Builds a module that defines all the supported logging functions which will log using the provided logger func LogModule(logger telegraf.Logger) *starlarkstruct.Module { var logFunc = func(t *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - return log(t, b, args, kwargs, logger) + return log(b, args, kwargs, logger) } return &starlarkstruct.Module{ Name: "log", @@ -26,7 +26,7 @@ func LogModule(logger telegraf.Logger) *starlarkstruct.Module { } // Logs the provided message according to the level chosen -func log(t *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple, logger telegraf.Logger) (starlark.Value, error) { +func log(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple, logger telegraf.Logger) (starlark.Value, error) { var msg starlark.String if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &msg); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) diff --git a/plugins/processors/starlark/starlark.go b/plugins/processors/starlark/starlark.go index 64666398d2e50..ffd13680f88e5 100644 --- a/plugins/processors/starlark/starlark.go +++ b/plugins/processors/starlark/starlark.go @@ -61,7 +61,7 @@ func (s *Starlark) Init() error { s.thread = &starlark.Thread{ Print: func(_ *starlark.Thread, msg string) { s.Log.Debug(msg) }, Load: func(thread *starlark.Thread, module string) (starlark.StringDict, error) { - return loadFunc(thread, module, s.Log) + return loadFunc(module, s.Log) }, } @@ -136,7 +136,7 @@ func (s *Starlark) Description() string { return description } -func (s *Starlark) Start(acc telegraf.Accumulator) error { +func (s *Starlark) Start(_ telegraf.Accumulator) error { return nil } @@ -242,7 +242,7 @@ func init() { }) } -func loadFunc(thread *starlark.Thread, module string, logger telegraf.Logger) (starlark.StringDict, error) { +func loadFunc(module string, logger telegraf.Logger) (starlark.StringDict, error) { switch module { case "json.star": return starlark.StringDict{ diff --git a/plugins/processors/topk/topk_test.go b/plugins/processors/topk/topk_test.go index 858859de6261b..79c6b81db4f38 100644 --- a/plugins/processors/topk/topk_test.go +++ b/plugins/processors/topk/topk_test.go @@ -9,6 +9,8 @@ import ( "github.com/influxdata/telegraf/testutil" ) +var oneSecondDuration = internal.Duration{Duration: time.Second} + // Key, value pair that represents a telegraf.Metric Field type field struct { key string @@ -117,10 +119,6 @@ func equalSets(l1 []telegraf.Metric, l2 []telegraf.Metric) bool { return subSet(l1, l2) && subSet(l2, l1) } -func createDuration(t int) internal.Duration { - return internal.Duration{Duration: time.Second * time.Duration(t)} -} - func runAndCompare(topk *TopK, metrics []telegraf.Metric, answer []telegraf.Metric, testID string, t *testing.T) { // Sleep for `period`, otherwise the processor will only // cache the metrics, but it will not process them @@ -142,7 +140,7 @@ func TestTopkAggregatorsSmokeTests(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.Fields = []string{"a"} topk.GroupBy = []string{"tag_name"} @@ -164,7 +162,7 @@ func TestTopkMeanAddAggregateFields(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.Aggregation = "mean" topk.AddAggregateFields = []string{"a"} topk.Fields = []string{"a"} @@ -193,7 +191,7 @@ func TestTopkSumAddAggregateFields(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.Aggregation = "sum" topk.AddAggregateFields = []string{"a"} topk.Fields = []string{"a"} @@ -222,7 +220,7 @@ func TestTopkMaxAddAggregateFields(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.Aggregation = "max" topk.AddAggregateFields = []string{"a"} topk.Fields = []string{"a"} @@ -251,7 +249,7 @@ func TestTopkMinAddAggregateFields(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.Aggregation = "min" topk.AddAggregateFields = []string{"a"} topk.Fields = []string{"a"} @@ -280,7 +278,7 @@ func TestTopkGroupby1(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.K = 3 topk.Aggregation = "sum" topk.AddAggregateFields = []string{"value"} @@ -305,7 +303,7 @@ func TestTopkGroupby2(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.K = 3 topk.Aggregation = "mean" topk.AddAggregateFields = []string{"value"} @@ -334,7 +332,7 @@ func TestTopkGroupby3(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.K = 1 topk.Aggregation = "min" topk.AddAggregateFields = []string{"value"} @@ -360,7 +358,7 @@ func TestTopkGroupbyFields1(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.K = 4 // This settings generate less than 3 groups topk.Aggregation = "mean" topk.AddAggregateFields = []string{"A"} @@ -387,7 +385,7 @@ func TestTopkGroupbyFields2(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.K = 2 topk.Aggregation = "sum" topk.AddAggregateFields = []string{"B", "C"} @@ -415,7 +413,7 @@ func TestTopkGroupbyMetricName1(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.K = 1 topk.Aggregation = "sum" topk.AddAggregateFields = []string{"value"} @@ -441,7 +439,7 @@ func TestTopkGroupbyMetricName2(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.K = 2 topk.Aggregation = "sum" topk.AddAggregateFields = []string{"A", "value"} @@ -469,7 +467,7 @@ func TestTopkBottomk(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.K = 3 topk.Aggregation = "sum" topk.GroupBy = []string{"tag1", "tag3"} @@ -495,7 +493,7 @@ func TestTopkGroupByKeyTag(t *testing.T) { // Build the processor var topk TopK topk = *New() - topk.Period = createDuration(1) + topk.Period = oneSecondDuration topk.K = 3 topk.Aggregation = "sum" topk.GroupBy = []string{"tag1", "tag3"} diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go index f3a3ecebc5c1c..fc9ffe61ecfe4 100644 --- a/plugins/serializers/splunkmetric/splunkmetric.go +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -2,7 +2,6 @@ package splunkmetric import ( "encoding/json" - "fmt" "log" "github.com/influxdata/telegraf" @@ -40,22 +39,15 @@ func NewSerializer(splunkmetricHecRouting bool, splunkmetricMultimetric bool) (* } func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { - m, err := s.createObject(metric) - if err != nil { - return nil, fmt.Errorf("D! [serializer.splunkmetric] Dropping invalid metric: %s", metric.Name()) - } - - return m, nil + return s.createObject(metric), nil } func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { var serialized []byte for _, metric := range metrics { - m, err := s.createObject(metric) - if err != nil { - return nil, fmt.Errorf("D! [serializer.splunkmetric] Dropping invalid metric: %s", metric.Name()) - } else if m != nil { + m := s.createObject(metric) + if m != nil { serialized = append(serialized, m...) } } @@ -157,7 +149,7 @@ func (s *serializer) createSingle(metric telegraf.Metric, dataGroup HECTimeSerie return metricGroup, nil } -func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, err error) { +func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte) { /* Splunk supports one metric json object, and does _not_ support an array of JSON objects. ** Splunk has the following required names for the metric store: ** metric_name: The name of the metric @@ -194,7 +186,7 @@ func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, e } // Return the metric group regardless of if it's multimetric or single metric. - return metricGroup, nil + return metricGroup } func verifyValue(v interface{}) (value interface{}, valid bool) { diff --git a/plugins/serializers/wavefront/wavefront.go b/plugins/serializers/wavefront/wavefront.go index 2538d402298de..0abcf799d2a0f 100755 --- a/plugins/serializers/wavefront/wavefront.go +++ b/plugins/serializers/wavefront/wavefront.go @@ -49,7 +49,7 @@ func NewSerializer(prefix string, useStrict bool, sourceOverride []string) (*Wav return s, nil } -func (s *WavefrontSerializer) serialize(buf *buffer, m telegraf.Metric) { +func (s *WavefrontSerializer) serialize(m telegraf.Metric) { const metricSeparator = "." for fieldName, value := range m.Fields() { @@ -90,7 +90,7 @@ func (s *WavefrontSerializer) serialize(buf *buffer, m telegraf.Metric) { func (s *WavefrontSerializer) Serialize(m telegraf.Metric) ([]byte, error) { s.mu.Lock() s.scratch.Reset() - s.serialize(&s.scratch, m) + s.serialize(m) out := s.scratch.Copy() s.mu.Unlock() return out, nil @@ -100,7 +100,7 @@ func (s *WavefrontSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, s.mu.Lock() s.scratch.Reset() for _, m := range metrics { - s.serialize(&s.scratch, m) + s.serialize(m) } out := s.scratch.Copy() s.mu.Unlock() diff --git a/selfstat/stat.go b/selfstat/stat.go index e1905baf57878..4ca12a0557dce 100644 --- a/selfstat/stat.go +++ b/selfstat/stat.go @@ -9,7 +9,6 @@ type stat struct { measurement string field string tags map[string]string - key uint64 } func (s *stat) Incr(v int64) { diff --git a/selfstat/timingStat.go b/selfstat/timingStat.go index 13f8400bc7a48..e6184dc05c582 100644 --- a/selfstat/timingStat.go +++ b/selfstat/timingStat.go @@ -8,7 +8,6 @@ type timingStat struct { measurement string field string tags map[string]string - key uint64 v int64 prev int64 count int64 diff --git a/testutil/accumulator.go b/testutil/accumulator.go index c09857d15f3e2..baf09f60f1234 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -196,7 +196,7 @@ func (a *Accumulator) AddMetric(m telegraf.Metric) { a.addFields(m.Name(), m.Tags(), m.Fields(), m.Type(), m.Time()) } -func (a *Accumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator { +func (a *Accumulator) WithTracking(_ int) telegraf.TrackingAccumulator { return a } @@ -234,7 +234,7 @@ func (a *Accumulator) AddError(err error) { a.Unlock() } -func (a *Accumulator) SetPrecision(precision time.Duration) { +func (a *Accumulator) SetPrecision(_ time.Duration) { return } @@ -728,17 +728,17 @@ func (a *Accumulator) BoolField(measurement string, field string) (bool, bool) { // telegraf accumulator machinery. type NopAccumulator struct{} -func (n *NopAccumulator) AddFields(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +func (n *NopAccumulator) AddFields(_ string, _ map[string]interface{}, _ map[string]string, _ ...time.Time) { } -func (n *NopAccumulator) AddGauge(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +func (n *NopAccumulator) AddGauge(_ string, _ map[string]interface{}, _ map[string]string, _ ...time.Time) { } -func (n *NopAccumulator) AddCounter(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +func (n *NopAccumulator) AddCounter(_ string, _ map[string]interface{}, _ map[string]string, _ ...time.Time) { } -func (n *NopAccumulator) AddSummary(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +func (n *NopAccumulator) AddSummary(_ string, _ map[string]interface{}, _ map[string]string, _ ...time.Time) { } -func (n *NopAccumulator) AddHistogram(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +func (n *NopAccumulator) AddHistogram(_ string, _ map[string]interface{}, _ map[string]string, _ ...time.Time) { } -func (n *NopAccumulator) AddMetric(telegraf.Metric) {} -func (n *NopAccumulator) SetPrecision(precision time.Duration) {} -func (n *NopAccumulator) AddError(err error) {} -func (n *NopAccumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator { return nil } +func (n *NopAccumulator) AddMetric(telegraf.Metric) {} +func (n *NopAccumulator) SetPrecision(_ time.Duration) {} +func (n *NopAccumulator) AddError(_ error) {} +func (n *NopAccumulator) WithTracking(_ int) telegraf.TrackingAccumulator { return nil } From dc8e4ef62e2e9291c0bc9a0ef97564cf3ab9f827 Mon Sep 17 00:00:00 2001 From: Aladex Date: Mon, 22 Mar 2021 22:01:25 +0300 Subject: [PATCH 324/761] check for length of perusage for stat gathering and removed not used function (#9009) --- plugins/inputs/docker/docker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index ec3453eda042b..087e106ad4112 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -732,7 +732,7 @@ func parseContainerStats( acc.AddFields("docker_container_cpu", cpufields, cputags, tm) } - if choice.Contains("cpu", perDeviceInclude) { + if choice.Contains("cpu", perDeviceInclude) && len(stat.CPUStats.CPUUsage.PercpuUsage) > 0 { // If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs // (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400) var percpuusage []uint64 From f4a51a4c33adc527ffc90bd6f9015e4a0f87ad70 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 23 Mar 2021 10:09:51 -0400 Subject: [PATCH 325/761] Fix ipmi panic (#9035) --- plugins/inputs/ipmi_sensor/connection.go | 6 ++++-- plugins/inputs/ipmi_sensor/connection_test.go | 12 ++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/ipmi_sensor/connection.go b/plugins/inputs/ipmi_sensor/connection.go index 7a1fb71df359a..b67ba06b9a619 100644 --- a/plugins/inputs/ipmi_sensor/connection.go +++ b/plugins/inputs/ipmi_sensor/connection.go @@ -32,8 +32,10 @@ func NewConnection(server, privilege, hexKey string) *Connection { security := server[0:inx1] connstr = server[inx1+1:] up := strings.SplitN(security, ":", 2) - conn.Username = up[0] - conn.Password = up[1] + if len(up) == 2 { + conn.Username = up[0] + conn.Password = up[1] + } } if inx2 > 0 { diff --git a/plugins/inputs/ipmi_sensor/connection_test.go b/plugins/inputs/ipmi_sensor/connection_test.go index 0f40464fbd83a..3be902e3264bc 100644 --- a/plugins/inputs/ipmi_sensor/connection_test.go +++ b/plugins/inputs/ipmi_sensor/connection_test.go @@ -33,6 +33,18 @@ func TestNewConnection(t *testing.T) { HexKey: "0001", }, }, + // test connection doesn't panic if incorrect symbol used + { + "USERID@PASSW0RD@lan(192.168.1.1)", + &Connection{ + Hostname: "192.168.1.1", + Username: "", + Password: "", + Interface: "lan", + Privilege: "USER", + HexKey: "0001", + }, + }, } for _, v := range testData { From f267f342aec81655eefab9d3d817ab0c0bee2fd5 Mon Sep 17 00:00:00 2001 From: Nicolas Filotto Date: Tue, 23 Mar 2021 18:45:29 +0100 Subject: [PATCH 326/761] Add support of the time module in Starlark Processor (#9004) --- go.mod | 6 ++--- go.sum | 14 +++++++---- plugins/processors/starlark/README.md | 5 +++- plugins/processors/starlark/starlark.go | 5 ++++ .../starlark/testdata/time_date.star | 19 +++++++++++++++ .../starlark/testdata/time_duration.star | 17 ++++++++++++++ .../starlark/testdata/time_timestamp.star | 23 +++++++++++++++++++ 7 files changed, 80 insertions(+), 9 deletions(-) create mode 100644 plugins/processors/starlark/testdata/time_date.star create mode 100644 plugins/processors/starlark/testdata/time_duration.star create mode 100644 plugins/processors/starlark/testdata/time_timestamp.star diff --git a/go.mod b/go.mod index 1dfe1d9a2d712..54eef23f3daf2 100644 --- a/go.mod +++ b/go.mod @@ -141,11 +141,11 @@ require ( github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect - go.starlark.net v0.0.0-20200901195727-6e684ef5eeee - golang.org/x/net v0.0.0-20201110031124-69a78807bb2b + go.starlark.net v0.0.0-20210312235212-74c10e2c17dc + golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 - golang.org/x/sys v0.0.0-20201112073958-5cba982894dd + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 golang.org/x/text v0.3.4 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.20.0 diff --git a/go.sum b/go.sum index 26aea881ce100..4c5884da2485b 100644 --- a/go.sum +++ b/go.sum @@ -335,6 +335,7 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -353,6 +354,7 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -742,8 +744,8 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.starlark.net v0.0.0-20200901195727-6e684ef5eeee h1:N4eRtIIYHZE5Mw/Km/orb+naLdwAe+lv2HCxRR5rEBw= -go.starlark.net v0.0.0-20200901195727-6e684ef5eeee/go.mod h1:f0znQkUKRrkk36XxWbGjMqQM8wGv/xHBVE2qc3B5oFU= +go.starlark.net v0.0.0-20210312235212-74c10e2c17dc h1:pVkptfeOTFfx+zXZo7HEHN3d5LmhatBFvHdm/f2QnpY= +go.starlark.net v0.0.0-20210312235212-74c10e2c17dc/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -838,8 +840,9 @@ golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 h1:lwlPPsmjDKK0J6eG6xDWd5XPehI0R024zxjDnw3esPA= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -891,11 +894,12 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index 7e1015674df7c..1f5adbec1f472 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -227,7 +227,10 @@ def apply(metric): - [ratio](/plugins/processors/starlark/testdata/ratio.star) - Compute the ratio of two integer fields - [rename](/plugins/processors/starlark/testdata/rename.star) - Rename tags or fields using a name mapping. - [scale](/plugins/processors/starlark/testdata/scale.star) - Multiply any field by a number -- [value filter](/plugins/processors/starlark/testdata/value_filter.star) - remove a metric based on a field value. +- [time date](/plugins/processors/starlark/testdata/time_date.star) - Parse a date and extract the year, month and day from it. +- [time duration](/plugins/processors/starlark/testdata/time_duration.star) - Parse a duration and convert it into a total amount of seconds. +- [time timestamp](/plugins/processors/starlark/testdata/time_timestamp.star) - Filter metrics based on the timestamp. +- [value filter](/plugins/processors/starlark/testdata/value_filter.star) - Remove a metric based on a field value. - [logging](/plugins/processors/starlark/testdata/logging.star) - Log messages with the logger of Telegraf - [multiple metrics](/plugins/processors/starlark/testdata/multiple_metrics.star) - Return multiple metrics by using [a list](https://docs.bazel.build/versions/master/skylark/lib/list.html) of metrics. - [multiple metrics from json array](/plugins/processors/starlark/testdata/multiple_metrics_with_json.star) - Builds a new metric from each element of a json array then returns all the created metrics. diff --git a/plugins/processors/starlark/starlark.go b/plugins/processors/starlark/starlark.go index ffd13680f88e5..968908c6589a6 100644 --- a/plugins/processors/starlark/starlark.go +++ b/plugins/processors/starlark/starlark.go @@ -7,6 +7,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/processors" + "go.starlark.net/lib/time" "go.starlark.net/resolve" "go.starlark.net/starlark" "go.starlark.net/starlarkjson" @@ -252,6 +253,10 @@ func loadFunc(module string, logger telegraf.Logger) (starlark.StringDict, error return starlark.StringDict{ "log": LogModule(logger), }, nil + case "time.star": + return starlark.StringDict{ + "time": time.Module, + }, nil default: return nil, errors.New("module " + module + " is not available") } diff --git a/plugins/processors/starlark/testdata/time_date.star b/plugins/processors/starlark/testdata/time_date.star new file mode 100644 index 0000000000000..7be7f8fa7fcf2 --- /dev/null +++ b/plugins/processors/starlark/testdata/time_date.star @@ -0,0 +1,19 @@ +# Example of parsing a date out of a field and modifying the metric to inject the year, month and day. +# +# Example Input: +# time value="2009-06-12T12:06:10.000000099" 1465839830100400201 +# +# Example Output: +# time year=2009i,month=6i,day=12i 1465839830100400201 + +load('time.star', 'time') +# loads time.parse_duration(), time.is_valid_timezone(), time.now(), time.time(), +# time.parse_time() and time.from_timestamp() + +def apply(metric): + date = time.parse_time(metric.fields.get('value'), format="2006-01-02T15:04:05.999999999", location="UTC") + metric.fields.pop('value') + metric.fields["year"] = date.year + metric.fields["month"] = date.month + metric.fields["day"] = date.day + return metric diff --git a/plugins/processors/starlark/testdata/time_duration.star b/plugins/processors/starlark/testdata/time_duration.star new file mode 100644 index 0000000000000..773e20744cce6 --- /dev/null +++ b/plugins/processors/starlark/testdata/time_duration.star @@ -0,0 +1,17 @@ +# Example of parsing a duration out of a field and modifying the metric to inject the equivalent in seconds. +# +# Example Input: +# time value="3m35s" 1465839830100400201 +# +# Example Output: +# time seconds=215 1465839830100400201 + +load('time.star', 'time') +# loads time.parse_duration(), time.is_valid_timezone(), time.now(), time.time(), +# time.parse_time() and time.from_timestamp() + +def apply(metric): + duration = time.parse_duration(metric.fields.get('value')) + metric.fields.pop('value') + metric.fields["seconds"] = duration.seconds + return metric diff --git a/plugins/processors/starlark/testdata/time_timestamp.star b/plugins/processors/starlark/testdata/time_timestamp.star new file mode 100644 index 0000000000000..dc1cbaea0296d --- /dev/null +++ b/plugins/processors/starlark/testdata/time_timestamp.star @@ -0,0 +1,23 @@ +# Example of filtering metrics based on the timestamp. Beware the built-in function from_timestamp +# only supports timestamps in seconds. +# +# Example Input: +# time result="KO" 1616020365100400201 +# time result="OK" 1616150517100400201 +# +# Example Output: +# time result="OK" 1616150517100400201 + +load('time.star', 'time') +# loads time.parse_duration(), time.is_valid_timezone(), time.now(), time.time(), +# time.parse_time() and time.from_timestamp() + +def apply(metric): + # 1616198400 sec = Saturday, March 20, 2021 0:00:00 GMT + refDate = time.from_timestamp(1616198400) + # 1616020365 sec = Wednesday, March 17, 2021 22:32:45 GMT + # 1616150517 sec = Friday, March 19, 2021 10:41:57 GMT + metric_date = time.from_timestamp(int(metric.time / 1e9)) + # Only keep metrics with a timestamp that is not more than 24 hours before the reference date + if refDate - time.parse_duration("24h") < metric_date: + return metric From b2b361356e6248dc099842784f8fe4bd9ae007bf Mon Sep 17 00:00:00 2001 From: jaroug Date: Tue, 23 Mar 2021 22:31:15 +0100 Subject: [PATCH 327/761] Wildcard support for x509_cert files (#6952) * Accept standard unix glob matching rules * comply with indentation * update readme * move globing expand and url parsing into Init() * chore: rebase branch on upstream master * rename refreshFilePaths to expandFilePaths * expandFilePaths handles '/path/to/*.pem' and 'files:///path/to/*.pem' * update sample config * fix: recompile files globing pattern at every gather tic * add var globFilePathsToUrls to stack files path * add var globpaths to stack compiled globpath * rework sourcesToURLs to compile files path and stack them * rename expandFilePaths to expandFilePathsToUrls * rework expandFilePathsToUrls to only match compiled globpath * rework the `Gather` ticker to match globpath at each call * fix: comply with requested changes * add specifics regarding relative paths in sample config * add logger and use it in expandFilePathsToUrls() * precompile glob for `files://`, `/` and `://` * fix: update README to match last changes * fix: comply with last requested changes * rename expandFilePathsToUrls() to collectCertURLs() * collectCertURLs() now returns []*url.URL to avoid extra field globFilePathsToUrls in structure * update the Gather() ticker accordingly * fix(windows): do not try to compile glopath for windows path as it's not supposed to be supported by the OS * fix(ci): apply go fmt * fix(ci): empty-lines/import-shadowing Co-authored-by: Anthony LE BERRE --- plugins/inputs/x509_cert/README.md | 6 +- plugins/inputs/x509_cert/x509_cert.go | 86 ++++++++++++++++++++------- 2 files changed, 68 insertions(+), 24 deletions(-) diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md index 42adc39217358..f206f6c0979a5 100644 --- a/plugins/inputs/x509_cert/README.md +++ b/plugins/inputs/x509_cert/README.md @@ -9,8 +9,10 @@ file or network connection. ```toml # Reads metrics from a SSL certificate [[inputs.x509_cert]] - ## List certificate sources - sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "https://example.org:443"] + ## List certificate sources, support wildcard expands for files + ## Prefix your entry with 'file://' if you intend to use relative paths + sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443", + "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] ## Timeout for SSL connection # timeout = "5s" diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 6ad87a9e0fdda..92fbcb4066e61 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -16,13 +16,16 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/globpath" _tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) const sampleConfig = ` ## List certificate sources - sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] + ## Prefix your entry with 'file://' if you intend to use relative paths + sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443", + "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] ## Timeout for SSL connection # timeout = "5s" @@ -45,6 +48,9 @@ type X509Cert struct { ServerName string `toml:"server_name"` tlsCfg *tls.Config _tls.ClientConfig + locations []*url.URL + globpaths []*globpath.GlobPath + Log telegraf.Logger } // Description returns description of the plugin. @@ -57,20 +63,31 @@ func (c *X509Cert) SampleConfig() string { return sampleConfig } -func (c *X509Cert) locationToURL(location string) (*url.URL, error) { - if strings.HasPrefix(location, "/") { - location = "file://" + location - } - if strings.Index(location, ":\\") == 1 { - location = "file://" + filepath.ToSlash(location) - } +func (c *X509Cert) sourcesToURLs() error { + for _, source := range c.Sources { + if strings.HasPrefix(source, "file://") || + strings.HasPrefix(source, "/") || + strings.Index(source, ":\\") != 1 { + source = filepath.ToSlash(strings.TrimPrefix(source, "file://")) + g, err := globpath.Compile(source) + if err != nil { + return fmt.Errorf("could not compile glob %v: %v", source, err) + } + c.globpaths = append(c.globpaths, g) + } else { + if strings.Index(source, ":\\") == 1 { + source = "file://" + filepath.ToSlash(source) + } + u, err := url.Parse(source) + if err != nil { + return fmt.Errorf("failed to parse cert location - %s", err.Error()) + } - u, err := url.Parse(location) - if err != nil { - return nil, fmt.Errorf("failed to parse cert location - %s", err.Error()) + c.locations = append(c.locations, u) + } } - return u, nil + return nil } func (c *X509Cert) serverName(u *url.URL) (string, error) { @@ -204,25 +221,45 @@ func getTags(cert *x509.Certificate, location string) map[string]string { return tags } +func (c *X509Cert) collectCertURLs() ([]*url.URL, error) { + var urls []*url.URL + + for _, path := range c.globpaths { + files := path.Match() + if len(files) <= 0 { + c.Log.Errorf("could not find file: %v", path) + continue + } + for _, file := range files { + file = "file://" + file + u, err := url.Parse(file) + if err != nil { + return urls, fmt.Errorf("failed to parse cert location - %s", err.Error()) + } + urls = append(urls, u) + } + } + + return urls, nil +} + // Gather adds metrics into the accumulator. func (c *X509Cert) Gather(acc telegraf.Accumulator) error { now := time.Now() + collectedUrls, err := c.collectCertURLs() + if err != nil { + acc.AddError(fmt.Errorf("cannot get file: %s", err.Error())) + } - for _, location := range c.Sources { - u, err := c.locationToURL(location) - if err != nil { - acc.AddError(err) - return nil - } - - certs, err := c.getCert(u, c.Timeout.Duration) + for _, location := range append(c.locations, collectedUrls...) { + certs, err := c.getCert(location, c.Timeout.Duration*time.Second) if err != nil { acc.AddError(fmt.Errorf("cannot get SSL cert '%s': %s", location, err.Error())) } for i, cert := range certs { fields := getFields(cert, now) - tags := getTags(cert, location) + tags := getTags(cert, location.String()) // The first certificate is the leaf/end-entity certificate which needs DNS // name validation against the URL hostname. @@ -231,7 +268,7 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, } if i == 0 { - opts.DNSName, err = c.serverName(u) + opts.DNSName, err = c.serverName(location) if err != nil { return err } @@ -263,6 +300,11 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { } func (c *X509Cert) Init() error { + err := c.sourcesToURLs() + if err != nil { + return err + } + tlsCfg, err := c.ClientConfig.TLSConfig() if err != nil { return err From 9aaaf72a96ee77c05c6c26879d513996dc21c23e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A1udio=20Silva?= Date: Tue, 23 Mar 2021 21:45:27 +0000 Subject: [PATCH 328/761] SQLServer - Fixes sqlserver_process_cpu calculation (#8549) --- plugins/inputs/sqlserver/sqlqueriesV2.go | 75 ++++++++++++------- plugins/inputs/sqlserver/sqlserverqueries.go | 77 +++++++++++++------- 2 files changed, 100 insertions(+), 52 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlqueriesV2.go b/plugins/inputs/sqlserver/sqlqueriesV2.go index a6c68f5c0d98e..3521cc9571661 100644 --- a/plugins/inputs/sqlserver/sqlqueriesV2.go +++ b/plugins/inputs/sqlserver/sqlqueriesV2.go @@ -1352,33 +1352,58 @@ const sqlServerCPUV2 string = ` /*The ring buffer has a new value every minute*/ IF SERVERPROPERTY('EngineEdition') IN (2,3,4) /*Standard,Enterpris,Express*/ BEGIN -SELECT - 'sqlserver_cpu' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,[SQLProcessUtilization] AS [sqlserver_process_cpu] - ,[SystemIdle] AS [system_idle_cpu] - ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] -FROM ( - SELECT TOP 1 - [record_id] - /*,dateadd(ms, (y.[timestamp] - (SELECT CAST([ms_ticks] AS BIGINT) FROM sys.dm_os_sys_info)), GETDATE()) AS [EventTime] --use for check/debug purpose*/ - ,[SQLProcessUtilization] - ,[SystemIdle] +;WITH utilization_cte AS +( + SELECT + [SQLProcessUtilization] AS [sqlserver_process_cpu] + ,[SystemIdle] AS [system_idle_cpu] + ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] FROM ( - SELECT record.value('(./Record/@id)[1]', 'int') AS [record_id] - ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] - ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] - ,[TIMESTAMP] + SELECT TOP 1 + [record_id] + ,[SQLProcessUtilization] + ,[SystemIdle] FROM ( - SELECT [TIMESTAMP] - ,convert(XML, [record]) AS [record] - FROM sys.dm_os_ring_buffers - WHERE [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' - AND [record] LIKE '%%' - ) AS x - ) AS y - ORDER BY record_id DESC -) as z + SELECT + record.value('(./Record/@id)[1]', 'int') AS [record_id] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] + ,[TIMESTAMP] + FROM ( + SELECT + [TIMESTAMP] + ,convert(XML, [record]) AS [record] + FROM sys.dm_os_ring_buffers + WHERE + [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' + AND [record] LIKE '%%' + ) AS x + ) AS y + ORDER BY [record_id] DESC + ) AS z +), +processor_Info_cte AS +( + SELECT (cpu_count / hyperthread_ratio) as number_of_physical_cpus +  FROM sys.dm_os_sys_info +) +SELECT + 'sqlserver_cpu' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[sqlserver_process_cpu] + ,[system_idle_cpu] + ,100 - [system_idle_cpu] - [sqlserver_process_cpu] AS [other_process_cpu] +FROM + ( + SELECT + (case + when [other_process_cpu] < 0 then [sqlserver_process_cpu] / a.number_of_physical_cpus + else [sqlserver_process_cpu] +  end) as [sqlserver_process_cpu] + ,[system_idle_cpu] + FROM utilization_cte + CROSS APPLY processor_Info_cte a + ) AS b END ` diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index 41fd848a1b36a..76a7712522189 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -1136,37 +1136,60 @@ IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterp DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard,Enterprise or Express. Check the database_type parameter in the telegraf configuration.'; RAISERROR (@ErrorMessage,11,1) RETURN -END +END; -SELECT - 'sqlserver_cpu' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,[SQLProcessUtilization] AS [sqlserver_process_cpu] - ,[SystemIdle] AS [system_idle_cpu] - ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] -FROM ( - SELECT TOP 1 - [record_id] - ,[SQLProcessUtilization] - ,[SystemIdle] +WITH utilization_cte AS +( + SELECT + [SQLProcessUtilization] AS [sqlserver_process_cpu] + ,[SystemIdle] AS [system_idle_cpu] + ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] FROM ( - SELECT - record.value('(./Record/@id)[1]', 'int') AS [record_id] - ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] - ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] - ,[TIMESTAMP] + SELECT TOP 1 + [record_id] + ,[SQLProcessUtilization] + ,[SystemIdle] FROM ( SELECT - [TIMESTAMP] - ,convert(XML, [record]) AS [record] - FROM sys.dm_os_ring_buffers - WHERE - [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' - AND [record] LIKE '%%' - ) AS x - ) AS y - ORDER BY [record_id] DESC -) AS z + record.value('(./Record/@id)[1]', 'int') AS [record_id] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] + ,[TIMESTAMP] + FROM ( + SELECT + [TIMESTAMP] + ,convert(XML, [record]) AS [record] + FROM sys.dm_os_ring_buffers + WHERE + [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' + AND [record] LIKE '%%' + ) AS x + ) AS y + ORDER BY [record_id] DESC + ) AS z +), +processor_Info_cte AS +( + SELECT (cpu_count / hyperthread_ratio) as number_of_physical_cpus +  FROM sys.dm_os_sys_info +) +SELECT + 'sqlserver_cpu' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[sqlserver_process_cpu] + ,[system_idle_cpu] + ,100 - [system_idle_cpu] - [sqlserver_process_cpu] AS [other_process_cpu] +FROM + ( + SELECT + (case + when [other_process_cpu] < 0 then [sqlserver_process_cpu] / a.number_of_physical_cpus + else [sqlserver_process_cpu] +  end) as [sqlserver_process_cpu] + ,[system_idle_cpu] + FROM utilization_cte + CROSS APPLY processor_Info_cte a + ) AS b ` // Collects availability replica state information from `sys.dm_hadr_availability_replica_states` for a High Availability / Disaster Recovery (HADR) setup From 5de640b855fac74dde87d94936dc65c1c089d365 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 24 Mar 2021 16:27:46 +0100 Subject: [PATCH 329/761] Linter fixes - unconvert, revive:empty-lines, revive:var-naming, revive:unused-parameter (#9036) Co-authored-by: Pawel Zak --- internal/process/process.go | 4 +- metric/metric.go | 4 +- plugins/inputs/aliyuncms/aliyuncms.go | 16 ++-- plugins/inputs/aliyuncms/discovery.go | 26 +++---- plugins/inputs/apcupsd/apcupsd.go | 2 +- .../cisco_telemetry_util.go | 8 +- plugins/inputs/cloud_pubsub/pubsub.go | 2 +- plugins/inputs/couchdb/couchdb.go | 10 +-- plugins/inputs/csgo/csgo.go | 6 +- .../directory_monitor_test.go | 4 +- plugins/inputs/diskio/diskio_linux.go | 4 +- plugins/inputs/docker/docker_test.go | 12 +-- plugins/inputs/filecount/filecount_test.go | 22 +++--- .../filecount/filesystem_helpers_test.go | 6 +- plugins/inputs/fireboard/fireboard.go | 6 +- plugins/inputs/fluentd/fluentd.go | 14 ++-- plugins/inputs/fluentd/fluentd_test.go | 4 +- plugins/inputs/graylog/graylog.go | 10 +-- plugins/inputs/jenkins/jenkins.go | 4 +- plugins/inputs/jolokia/jolokia.go | 2 +- .../jti_openconfig_telemetry/collection.go | 2 +- plugins/inputs/kernel/kernel.go | 14 ++-- plugins/inputs/kernel_vmstat/kernel_vmstat.go | 2 +- plugins/inputs/kube_inventory/client.go | 1 - plugins/inputs/kube_inventory/kube_state.go | 2 +- plugins/inputs/mailchimp/chimp_api.go | 2 +- plugins/inputs/marklogic/marklogic_test.go | 2 +- plugins/inputs/mesos/mesos.go | 4 +- plugins/inputs/nats/nats.go | 2 +- .../inputs/neptune_apex/neptune_apex_test.go | 2 +- plugins/inputs/net_response/net_response.go | 10 +-- .../inputs/net_response/net_response_test.go | 8 +- plugins/inputs/nfsclient/nfsclient_test.go | 32 ++++---- plugins/inputs/nsq/nsq.go | 12 +-- plugins/inputs/openntpd/openntpd.go | 4 +- plugins/inputs/prometheus/kubernetes.go | 16 ++-- plugins/inputs/raindrops/raindrops.go | 6 +- plugins/inputs/raindrops/raindrops_test.go | 2 +- plugins/inputs/ravendb/ravendb.go | 74 +++++++++---------- plugins/inputs/ravendb/ravendb_dto.go | 24 +++--- plugins/inputs/redfish/redfish.go | 16 ++-- plugins/inputs/riak/riak.go | 4 +- plugins/inputs/salesforce/salesforce.go | 6 +- plugins/inputs/salesforce/salesforce_test.go | 4 +- plugins/inputs/snmp/snmp_mocks_test.go | 4 +- plugins/inputs/snmp_legacy/snmp_legacy.go | 8 +- plugins/inputs/sqlserver/connectionstring.go | 10 +-- plugins/inputs/sqlserver/sqlserver_test.go | 4 +- plugins/inputs/statsd/datadog.go | 16 ++-- plugins/inputs/suricata/suricata.go | 4 +- plugins/inputs/suricata/suricata_test.go | 8 +- plugins/inputs/vsphere/endpoint.go | 2 +- plugins/inputs/zookeeper/zookeeper.go | 4 +- plugins/outputs/amon/amon.go | 6 +- plugins/outputs/cloud_pubsub/pubsub_test.go | 2 +- plugins/outputs/dynatrace/dynatrace.go | 4 +- plugins/outputs/librato/librato.go | 2 +- plugins/outputs/sensu/sensu.go | 70 +++++++++--------- plugins/outputs/sensu/sensu_test.go | 50 ++++++------- plugins/outputs/stackdriver/stackdriver.go | 8 +- plugins/outputs/timestream/timestream_test.go | 8 +- plugins/outputs/wavefront/wavefront.go | 2 +- plugins/parsers/influx/machine_test.go | 4 +- plugins/parsers/nagios/parser.go | 14 ++-- plugins/parsers/value/parser.go | 4 +- .../splunkmetric/splunkmetric_test.go | 22 +++--- 66 files changed, 337 insertions(+), 336 deletions(-) diff --git a/internal/process/process.go b/internal/process/process.go index 3f88aac57b317..6da98d211a43b 100644 --- a/internal/process/process.go +++ b/internal/process/process.go @@ -126,12 +126,12 @@ func (p *Process) cmdLoop(ctx context.Context) error { } p.Log.Errorf("Process %s exited: %v", p.Cmd.Path, err) - p.Log.Infof("Restarting in %s...", time.Duration(p.RestartDelay)) + p.Log.Infof("Restarting in %s...", p.RestartDelay) select { case <-ctx.Done(): return nil - case <-time.After(time.Duration(p.RestartDelay)): + case <-time.After(p.RestartDelay): // Continue the loop and restart the process if err := p.cmdStart(); err != nil { return err diff --git a/metric/metric.go b/metric/metric.go index b1a6edcfe91c7..e3b49c3a287fe 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -297,7 +297,7 @@ func convertField(v interface{}) interface{} { case uint: return uint64(v) case uint64: - return uint64(v) + return v case []byte: return string(v) case int32: @@ -340,7 +340,7 @@ func convertField(v interface{}) interface{} { } case *uint64: if v != nil { - return uint64(*v) + return *v } case *[]byte: if v != nil { diff --git a/plugins/inputs/aliyuncms/aliyuncms.go b/plugins/inputs/aliyuncms/aliyuncms.go index e5ce3824101dc..6aebf99b836fa 100644 --- a/plugins/inputs/aliyuncms/aliyuncms.go +++ b/plugins/inputs/aliyuncms/aliyuncms.go @@ -458,11 +458,11 @@ L: metric.requestDimensions = make([]map[string]string, 0, len(s.discoveryData)) //Preparing tags & dims... - for instanceId, elem := range s.discoveryData { + for instanceID, elem := range s.discoveryData { //Start filing tags //Remove old value if exist - delete(metric.discoveryTags, instanceId) - metric.discoveryTags[instanceId] = make(map[string]string, len(metric.TagsQueryPath)+len(defaulTags)) + delete(metric.discoveryTags, instanceID) + metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaulTags)) for _, tagQueryPath := range metric.TagsQueryPath { tagKey, tagValue, err := parseTag(tagQueryPath, elem) @@ -471,11 +471,11 @@ L: continue } if err == nil && tagValue == "" { //Nothing found - s.Log.Debugf("Data by query path %q: is not found, for instance %q", tagQueryPath, instanceId) + s.Log.Debugf("Data by query path %q: is not found, for instance %q", tagQueryPath, instanceID) continue } - metric.discoveryTags[instanceId][tagKey] = tagValue + metric.discoveryTags[instanceID][tagKey] = tagValue } //Adding default tags if not already there @@ -489,17 +489,17 @@ L: if err == nil && tagValue == "" { //Nothing found s.Log.Debugf("Data by query path %q: is not found, for instance %q", - defaultTagQP, instanceId) + defaultTagQP, instanceID) continue } - metric.discoveryTags[instanceId][tagKey] = tagValue + metric.discoveryTags[instanceID][tagKey] = tagValue } //Preparing dimensions (first adding dimensions that comes from discovery data) metric.requestDimensions = append( metric.requestDimensions, - map[string]string{s.dimensionKey: instanceId}) + map[string]string{s.dimensionKey: instanceID}) } //Get final dimension (need to get full lis of diff --git a/plugins/inputs/aliyuncms/discovery.go b/plugins/inputs/aliyuncms/discovery.go index 904f9d1948d3e..7e33d7f92c64a 100644 --- a/plugins/inputs/aliyuncms/discovery.go +++ b/plugins/inputs/aliyuncms/discovery.go @@ -60,7 +60,7 @@ type discoveryTool struct { cli map[string]aliyunSdkClient //API client, which perform discovery request respRootKey string //Root key in JSON response where to look for discovery data - respObjectIdKey string //Key in element of array under root key, that stores object ID + respObjectIDKey string //Key in element of array under root key, that stores object ID //for ,majority of cases it would be InstanceId, for OSS it is BucketName. This key is also used in dimension filtering// ) wg sync.WaitGroup //WG for primary discovery goroutine interval time.Duration //Discovery interval @@ -69,9 +69,9 @@ type discoveryTool struct { lg telegraf.Logger //Telegraf logger (should be provided) } -//getRpcReqFromDiscoveryRequest - utility function to map between aliyun request primitives +//getRPCReqFromDiscoveryRequest - utility function to map between aliyun request primitives //discoveryRequest represents different type of discovery requests -func getRpcReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, error) { +func getRPCReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, error) { if reflect.ValueOf(req).Type().Kind() != reflect.Ptr || reflect.ValueOf(req).IsNil() { return nil, errors.Errorf("Not expected type of the discovery request object: %q, %q", reflect.ValueOf(req).Type(), reflect.ValueOf(req).Kind()) @@ -109,7 +109,7 @@ func NewDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred cli = map[string]aliyunSdkClient{} parseRootKey = regexp.MustCompile(`Describe(.*)`) responseRootKey string - responseObjectIdKey string + responseObjectIDKey string err error noDiscoverySupportErr = errors.Errorf("no discovery support for project %q", project) ) @@ -127,13 +127,13 @@ func NewDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred switch project { case "acs_ecs_dashboard": dscReq[region] = ecs.CreateDescribeInstancesRequest() - responseObjectIdKey = "InstanceId" + responseObjectIDKey = "InstanceId" case "acs_rds_dashboard": dscReq[region] = rds.CreateDescribeDBInstancesRequest() - responseObjectIdKey = "DBInstanceId" + responseObjectIDKey = "DBInstanceId" case "acs_slb_dashboard": dscReq[region] = slb.CreateDescribeLoadBalancersRequest() - responseObjectIdKey = "LoadBalancerId" + responseObjectIDKey = "LoadBalancerId" case "acs_memcache": return nil, noDiscoverySupportErr case "acs_ocs": @@ -152,7 +152,7 @@ func NewDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred //req.InitWithApiInfo("oss", "2014-08-15", "DescribeDBInstances", "oss", "openAPI") case "acs_vpc_eip": dscReq[region] = vpc.CreateDescribeEipAddressesRequest() - responseObjectIdKey = "AllocationId" + responseObjectIDKey = "AllocationId" case "acs_kvstore": return nil, noDiscoverySupportErr case "acs_mns_new": @@ -253,7 +253,7 @@ func NewDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred //Getting response root key (if not set already). This is to be able to parse discovery responses //As they differ per object type //Discovery requests are of the same type per every region, so pick the first one - rpcReq, err := getRpcReqFromDiscoveryRequest(dscReq[regions[0]]) + rpcReq, err := getRPCReqFromDiscoveryRequest(dscReq[regions[0]]) //This means that the discovery request is not of proper type/kind if err != nil { return nil, errors.Errorf("Can't parse rpc request object from discovery request %v", dscReq[regions[0]]) @@ -283,7 +283,7 @@ func NewDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred req: dscReq, cli: cli, respRootKey: responseRootKey, - respObjectIdKey: responseObjectIdKey, + respObjectIDKey: responseObjectIDKey, rateLimit: rateLimit, interval: discoveryInterval, reqDefaultPageSize: 20, @@ -380,8 +380,8 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com for _, raw := range discoveryData { if elem, ok := raw.(map[string]interface{}); ok { - if objectId, ok := elem[dt.respObjectIdKey].(string); ok { - preparedData[objectId] = elem + if objectID, ok := elem[dt.respObjectIDKey].(string); ok { + preparedData[objectID] = elem } } else { return nil, errors.Errorf("Can't parse input data element, not a map[string]interface{} type") @@ -407,7 +407,7 @@ func (dt *discoveryTool) getDiscoveryDataAllRegions(limiter chan bool) (map[stri return nil, errors.Errorf("Error building common discovery request: not valid region %q", region) } - rpcReq, err := getRpcReqFromDiscoveryRequest(dscReq) + rpcReq, err := getRPCReqFromDiscoveryRequest(dscReq) if err != nil { return nil, err } diff --git a/plugins/inputs/apcupsd/apcupsd.go b/plugins/inputs/apcupsd/apcupsd.go index a862bbfc881f8..4acadffe38dd2 100644 --- a/plugins/inputs/apcupsd/apcupsd.go +++ b/plugins/inputs/apcupsd/apcupsd.go @@ -15,7 +15,7 @@ import ( const defaultAddress = "tcp://127.0.0.1:3551" -var defaultTimeout = internal.Duration{Duration: time.Duration(time.Second * 5)} +var defaultTimeout = internal.Duration{Duration: time.Second * 5} type ApcUpsd struct { Servers []string diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go index 52f3e6fd59021..e585b6fe0bda7 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go @@ -171,7 +171,7 @@ func (c *CiscoTelemetryMDT) initBgpV4() { c.nxpathMap[key]["aspath"] = "string" } -func (c *CiscoTelemetryMDT) initCpu() { +func (c *CiscoTelemetryMDT) initCPU() { key := "show processes cpu" c.nxpathMap[key] = make(map[string]string, 5) c.nxpathMap[key]["kernel_percent"] = "float" @@ -654,7 +654,7 @@ func (c *CiscoTelemetryMDT) initPimVrf() { c.nxpathMap[key]["table-id"] = "string" } -func (c *CiscoTelemetryMDT) initIpMroute() { +func (c *CiscoTelemetryMDT) initIPMroute() { key := "show ip mroute summary vrf all" c.nxpathMap[key] = make(map[string]string, 40) c.nxpathMap[key]["nat-mode"] = "string" @@ -842,7 +842,7 @@ func (c *CiscoTelemetryMDT) initDb() { c.initPower() c.initMemPhys() c.initBgpV4() - c.initCpu() + c.initCPU() c.initResources() c.initPtpCorrection() c.initTrans() @@ -861,7 +861,7 @@ func (c *CiscoTelemetryMDT) initDb() { c.initPimStats() c.initIntfBrief() c.initPimVrf() - c.initIpMroute() + c.initIPMroute() c.initIpv6Mroute() c.initVpc() c.initBgp() diff --git a/plugins/inputs/cloud_pubsub/pubsub.go b/plugins/inputs/cloud_pubsub/pubsub.go index 41ecf09ec3051..230c459045727 100644 --- a/plugins/inputs/cloud_pubsub/pubsub.go +++ b/plugins/inputs/cloud_pubsub/pubsub.go @@ -180,7 +180,7 @@ func (ps *PubSub) onMessage(ctx context.Context, msg message) error { if err != nil { return fmt.Errorf("unable to base64 decode message: %v", err) } - data = []byte(strData) + data = strData } else { data = msg.Data() } diff --git a/plugins/inputs/couchdb/couchdb.go b/plugins/inputs/couchdb/couchdb.go index 1b542d042dd30..d96c73f836977 100644 --- a/plugins/inputs/couchdb/couchdb.go +++ b/plugins/inputs/couchdb/couchdb.go @@ -125,9 +125,9 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri if c.client == nil { c.client = &http.Client{ Transport: &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, }, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } } @@ -147,7 +147,7 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri defer response.Body.Close() if response.StatusCode != 200 { - return fmt.Errorf("Failed to get stats from couchdb: HTTP responded %d", response.StatusCode) + return fmt.Errorf("failed to get stats from couchdb: HTTP responded %d", response.StatusCode) } stats := Stats{} @@ -287,9 +287,9 @@ func init() { return &CouchDB{ client: &http.Client{ Transport: &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, }, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, }, } }) diff --git a/plugins/inputs/csgo/csgo.go b/plugins/inputs/csgo/csgo.go index 0fa18cab21d43..75cf8a9240099 100644 --- a/plugins/inputs/csgo/csgo.go +++ b/plugins/inputs/csgo/csgo.go @@ -176,15 +176,15 @@ func requestServer(url string, rconPw string) (string, error) { } defer remoteConsole.Close() - reqId, err := remoteConsole.Write("stats") + reqID, err := remoteConsole.Write("stats") if err != nil { return "", err } - resp, respReqId, err := remoteConsole.Read() + resp, respReqID, err := remoteConsole.Read() if err != nil { return "", err - } else if reqId != respReqId { + } else if reqID != respReqID { return "", errors.New("response/request mismatch") } else { return resp, nil diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go index b9cfbad8df42a..e74a1b27667de 100644 --- a/plugins/inputs/directory_monitor/directory_monitor_test.go +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -79,7 +79,7 @@ func TestCSVGZImport(t *testing.T) { func TestMultipleJSONFileImports(t *testing.T) { acc := testutil.Accumulator{} - testJsonFile := "test.json" + testJSONFile := "test.json" // Establish process directory and finished directory. finishedDirectory, err := ioutil.TempDir("", "finished") @@ -110,7 +110,7 @@ func TestMultipleJSONFileImports(t *testing.T) { // Let's drop a 5-line LINE-DELIMITED json. // Write csv file to process into the 'process' directory. - f, err := os.Create(filepath.Join(processDirectory, testJsonFile)) + f, err := os.Create(filepath.Join(processDirectory, testJSONFile)) require.NoError(t, err) f.WriteString("{\"Name\": \"event1\",\"Speed\": 100.1,\"Length\": 20.1}\n{\"Name\": \"event2\",\"Speed\": 500,\"Length\": 1.4}\n{\"Name\": \"event3\",\"Speed\": 200,\"Length\": 10.23}\n{\"Name\": \"event4\",\"Speed\": 80,\"Length\": 250}\n{\"Name\": \"event5\",\"Speed\": 120.77,\"Length\": 25.97}") f.Close() diff --git a/plugins/inputs/diskio/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go index bb11429f1d387..01ca7055e3db4 100644 --- a/plugins/inputs/diskio/diskio_linux.go +++ b/plugins/inputs/diskio/diskio_linux.go @@ -41,8 +41,8 @@ func (d *DiskIO) diskInfo(devName string) (map[string]string, error) { // This allows us to also "poison" it during test scenarios udevDataPath = ic.udevDataPath } else { - major := unix.Major(uint64(stat.Rdev)) - minor := unix.Minor(uint64(stat.Rdev)) + major := unix.Major(uint64(stat.Rdev)) //nolint:unconvert // Conversion needed for some architectures + minor := unix.Minor(uint64(stat.Rdev)) //nolint:unconvert // Conversion needed for some architectures udevDataPath = fmt.Sprintf("/run/udev/data/b%d:%d", major, minor) _, err := os.Stat(udevDataPath) diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index e6ecce32323f8..c9c19da3c2f6e 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -1136,7 +1136,7 @@ func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) { var ( testDate = time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC) - metricCpuTotal = testutil.MustMetric( + metricCPUTotal = testutil.MustMetric( "docker_container_cpu", map[string]string{ "cpu": "cpu-total", @@ -1144,14 +1144,14 @@ func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) { map[string]interface{}{}, testDate) - metricCpu0 = testutil.MustMetric( + metricCPU0 = testutil.MustMetric( "docker_container_cpu", map[string]string{ "cpu": "cpu0", }, map[string]interface{}{}, testDate) - metricCpu1 = testutil.MustMetric( + metricCPU1 = testutil.MustMetric( "docker_container_cpu", map[string]string{ "cpu": "cpu1", @@ -1218,7 +1218,7 @@ func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) { totalInclude: containerMetricClasses, }, expected: []telegraf.Metric{ - metricCpuTotal, metricCpu0, metricCpu1, + metricCPUTotal, metricCPU0, metricCPU1, metricNetworkTotal, metricNetworkEth0, metricNetworkEth1, metricBlkioTotal, metricBlkio6_0, metricBlkio6_1, }, @@ -1231,7 +1231,7 @@ func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) { totalInclude: []string{}, }, expected: []telegraf.Metric{ - metricCpu0, metricCpu1, + metricCPU0, metricCPU1, metricNetworkEth0, metricNetworkEth1, metricBlkio6_0, metricBlkio6_1, }, @@ -1243,7 +1243,7 @@ func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) { perDeviceInclude: []string{}, totalInclude: containerMetricClasses, }, - expected: []telegraf.Metric{metricCpuTotal, metricNetworkTotal, metricBlkioTotal}, + expected: []telegraf.Metric{metricCPUTotal, metricNetworkTotal, metricBlkioTotal}, }, { name: "Per device and total metrics disabled", diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 2136c348d1d6c..a4c073bf15d80 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -213,20 +213,20 @@ func getFakeFileSystem(basePath string) fakeFileSystem { var dmask uint32 = 0666 // set directory bit - dmask |= (1 << uint(32-1)) + dmask |= 1 << uint(32-1) // create a lookup map for getting "files" from the "filesystem" fileList := map[string]fakeFileInfo{ - basePath: {name: "testdata", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, - basePath + "/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime}, - basePath + "/bar": {name: "bar", filemode: uint32(fmask), modtime: mtime}, - basePath + "/baz": {name: "baz", filemode: uint32(fmask), modtime: olderMtime}, - basePath + "/qux": {name: "qux", size: int64(400), filemode: uint32(fmask), modtime: mtime}, - basePath + "/subdir": {name: "subdir", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, - basePath + "/subdir/quux": {name: "quux", filemode: uint32(fmask), modtime: mtime}, - basePath + "/subdir/quuz": {name: "quuz", filemode: uint32(fmask), modtime: mtime}, - basePath + "/subdir/nested2": {name: "nested2", size: int64(200), filemode: uint32(dmask), modtime: mtime, isdir: true}, - basePath + "/subdir/nested2/qux": {name: "qux", filemode: uint32(fmask), modtime: mtime, size: int64(400)}, + basePath: {name: "testdata", size: int64(4096), filemode: dmask, modtime: mtime, isdir: true}, + basePath + "/foo": {name: "foo", filemode: fmask, modtime: mtime}, + basePath + "/bar": {name: "bar", filemode: fmask, modtime: mtime}, + basePath + "/baz": {name: "baz", filemode: fmask, modtime: olderMtime}, + basePath + "/qux": {name: "qux", size: int64(400), filemode: fmask, modtime: mtime}, + basePath + "/subdir": {name: "subdir", size: int64(4096), filemode: dmask, modtime: mtime, isdir: true}, + basePath + "/subdir/quux": {name: "quux", filemode: fmask, modtime: mtime}, + basePath + "/subdir/quuz": {name: "quuz", filemode: fmask, modtime: mtime}, + basePath + "/subdir/nested2": {name: "nested2", size: int64(200), filemode: dmask, modtime: mtime, isdir: true}, + basePath + "/subdir/nested2/qux": {name: "qux", filemode: fmask, modtime: mtime, size: int64(400)}, } return fakeFileSystem{files: fileList} diff --git a/plugins/inputs/filecount/filesystem_helpers_test.go b/plugins/inputs/filecount/filesystem_helpers_test.go index 2203500726ba8..b1dacc25bc731 100644 --- a/plugins/inputs/filecount/filesystem_helpers_test.go +++ b/plugins/inputs/filecount/filesystem_helpers_test.go @@ -82,11 +82,11 @@ func getTestFileSystem() fakeFileSystem { var dmask uint32 = 0666 // set directory bit - dmask |= (1 << uint(32-1)) + dmask |= 1 << uint(32-1) fileList := map[string]fakeFileInfo{ - "/testdata": {name: "testdata", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, - "/testdata/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime}, + "/testdata": {name: "testdata", size: int64(4096), filemode: dmask, modtime: mtime, isdir: true}, + "/testdata/foo": {name: "foo", filemode: fmask, modtime: mtime}, } return fakeFileSystem{files: fileList} diff --git a/plugins/inputs/fireboard/fireboard.go b/plugins/inputs/fireboard/fireboard.go index c9a79396ee313..92846a0760cfd 100644 --- a/plugins/inputs/fireboard/fireboard.go +++ b/plugins/inputs/fireboard/fireboard.go @@ -23,10 +23,10 @@ type Fireboard struct { // NewFireboard return a new instance of Fireboard with a default http client func NewFireboard() *Fireboard { - tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + tr := &http.Transport{ResponseHeaderTimeout: 3 * time.Second} client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } return &Fireboard{client: client} } @@ -70,7 +70,7 @@ func (r *Fireboard) Description() string { // Init the things func (r *Fireboard) Init() error { if len(r.AuthToken) == 0 { - return fmt.Errorf("You must specify an authToken") + return fmt.Errorf("you must specify an authToken") } if len(r.URL) == 0 { r.URL = "https://fireboard.io/api/v1/devices.json" diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go index 1d23259fa736b..42a2f1b52c0f6 100644 --- a/plugins/inputs/fluentd/fluentd.go +++ b/plugins/inputs/fluentd/fluentd.go @@ -62,7 +62,7 @@ func parse(data []byte) (datapointArray []pluginData, err error) { var endpointData endpointInfo if err = json.Unmarshal(data, &endpointData); err != nil { - err = fmt.Errorf("Processing JSON structure") + err = fmt.Errorf("processing JSON structure") return } @@ -83,17 +83,17 @@ func (h *Fluentd) SampleConfig() string { return sampleConfig } func (h *Fluentd) Gather(acc telegraf.Accumulator) error { _, err := url.Parse(h.Endpoint) if err != nil { - return fmt.Errorf("Invalid URL \"%s\"", h.Endpoint) + return fmt.Errorf("invalid URL \"%s\"", h.Endpoint) } if h.client == nil { tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } h.client = client @@ -102,7 +102,7 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { resp, err := h.client.Get(h.Endpoint) if err != nil { - return fmt.Errorf("Unable to perform HTTP client GET on \"%s\": %s", h.Endpoint, err) + return fmt.Errorf("unable to perform HTTP client GET on \"%s\": %v", h.Endpoint, err) } defer resp.Body.Close() @@ -110,7 +110,7 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { body, err := ioutil.ReadAll(resp.Body) if err != nil { - return fmt.Errorf("Unable to read the HTTP body \"%s\": %s", string(body), err) + return fmt.Errorf("unable to read the HTTP body \"%s\": %v", string(body), err) } if resp.StatusCode != http.StatusOK { @@ -120,7 +120,7 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { dataPoints, err := parse(body) if err != nil { - return fmt.Errorf("Problem with parsing") + return fmt.Errorf("problem with parsing") } // Go through all plugins one by one diff --git a/plugins/inputs/fluentd/fluentd_test.go b/plugins/inputs/fluentd/fluentd_test.go index 6279f6cf5ef7e..41166085a8876 100644 --- a/plugins/inputs/fluentd/fluentd_test.go +++ b/plugins/inputs/fluentd/fluentd_test.go @@ -100,8 +100,8 @@ var ( // {"object:f48698", "dummy", "input", nil, nil, nil}, // {"object:e27138", "dummy", "input", nil, nil, nil}, // {"object:d74060", "monitor_agent", "input", nil, nil, nil}, - {"object:11a5e2c", "stdout", "output", (*float64)(&zero), nil, nil}, - {"object:11237ec", "s3", "output", (*float64)(&zero), (*float64)(&zero), (*float64)(&zero)}, + {"object:11a5e2c", "stdout", "output", &zero, nil, nil}, + {"object:11237ec", "s3", "output", &zero, &zero, &zero}, } fluentdTest = &Fluentd{ Endpoint: "http://localhost:8081", diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index 585a05ee32bea..af19450f1f560 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -128,12 +128,12 @@ func (h *GrayLog) Gather(acc telegraf.Accumulator) error { return err } tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } h.client.SetHTTPClient(client) } @@ -233,7 +233,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { // Prepare URL requestURL, err := url.Parse(serverURL) if err != nil { - return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL) + return "", -1, fmt.Errorf("invalid server URL \"%s\"", serverURL) } // Add X-Requested-By header headers["X-Requested-By"] = "Telegraf" @@ -242,7 +242,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { m := &Messagebody{Metrics: h.Metrics} httpBody, err := json.Marshal(m) if err != nil { - return "", -1, fmt.Errorf("Invalid list of Metrics %s", h.Metrics) + return "", -1, fmt.Errorf("invalid list of Metrics %s", h.Metrics) } method = "POST" content = bytes.NewBuffer(httpBody) @@ -271,7 +271,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { // Process response if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", requestURL.String(), resp.StatusCode, http.StatusText(resp.StatusCode), diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index 5844f0e7a8a69..859121cf606ce 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -427,7 +427,7 @@ type buildResponse struct { } func (b *buildResponse) GetTimestamp() time.Time { - return time.Unix(0, int64(b.Timestamp)*int64(time.Millisecond)) + return time.Unix(0, b.Timestamp*int64(time.Millisecond)) } const ( @@ -501,7 +501,7 @@ func mapResultCode(s string) int { func init() { inputs.Add("jenkins", func() telegraf.Input { return &Jenkins{ - MaxBuildAge: internal.Duration{Duration: time.Duration(time.Hour)}, + MaxBuildAge: internal.Duration{Duration: time.Hour}, MaxConnections: 5, MaxSubJobPerLayer: 10, } diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index ed5922ddaa063..6e7a3d5a524fc 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -160,7 +160,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) // Unmarshal json var jsonOut []map[string]interface{} - if err = json.Unmarshal([]byte(body), &jsonOut); err != nil { + if err = json.Unmarshal(body, &jsonOut); err != nil { return nil, fmt.Errorf("error decoding JSON response: %s: %s", err, body) } diff --git a/plugins/inputs/jti_openconfig_telemetry/collection.go b/plugins/inputs/jti_openconfig_telemetry/collection.go index ffd9019f5f317..d1bad8b30c739 100644 --- a/plugins/inputs/jti_openconfig_telemetry/collection.go +++ b/plugins/inputs/jti_openconfig_telemetry/collection.go @@ -17,7 +17,7 @@ func (a CollectionByKeys) Less(i, j int) bool { return a[i].numKeys < a[j].numKe // Checks to see if there is already a group with these tags and returns its index. Returns -1 if unavailable. func (a CollectionByKeys) IsAvailable(tags map[string]string) *DataGroup { - sort.Sort(CollectionByKeys(a)) + sort.Sort(a) // Iterate through all the groups and see if we have group with these tags for _, group := range a { diff --git a/plugins/inputs/kernel/kernel.go b/plugins/inputs/kernel/kernel.go index 484c819cf7794..404c62d88c2b8 100644 --- a/plugins/inputs/kernel/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -53,7 +53,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { fields := make(map[string]interface{}) - fields["entropy_avail"] = int64(entropyValue) + fields["entropy_avail"] = entropyValue dataFields := bytes.Fields(data) for i, field := range dataFields { @@ -63,25 +63,25 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - fields["interrupts"] = int64(m) + fields["interrupts"] = m case bytes.Equal(field, contextSwitches): m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err } - fields["context_switches"] = int64(m) + fields["context_switches"] = m case bytes.Equal(field, processesForked): m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err } - fields["processes_forked"] = int64(m) + fields["processes_forked"] = m case bytes.Equal(field, bootTime): m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err } - fields["boot_time"] = int64(m) + fields["boot_time"] = m case bytes.Equal(field, diskPages): in, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { @@ -91,8 +91,8 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - fields["disk_pages_in"] = int64(in) - fields["disk_pages_out"] = int64(out) + fields["disk_pages_in"] = in + fields["disk_pages_out"] = out } } diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat.go b/plugins/inputs/kernel_vmstat/kernel_vmstat.go index 7b0292937b1c0..66e7c7d664748 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat.go @@ -45,7 +45,7 @@ func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error { return err } - fields[string(field)] = int64(m) + fields[string(field)] = m } } diff --git a/plugins/inputs/kube_inventory/client.go b/plugins/inputs/kube_inventory/client.go index bc26d1a700ec3..5b53dd1fb98d1 100644 --- a/plugins/inputs/kube_inventory/client.go +++ b/plugins/inputs/kube_inventory/client.go @@ -21,7 +21,6 @@ type client struct { } func newClient(baseURL, namespace, bearerToken string, timeout time.Duration, tlsConfig tls.ClientConfig) (*client, error) { - c, err := kubernetes.NewForConfig(&rest.Config{ TLSClientConfig: rest.TLSClientConfig{ ServerName: baseURL, diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index 0a2a882974e67..6ea5de3525220 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -166,7 +166,7 @@ func atoi(s string) int64 { if err != nil { return 0 } - return int64(i) + return i } func convertQuantity(s string, m float64) int64 { diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index b36bbf322cdf7..0e62fccd6d5dd 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -124,7 +124,7 @@ func (a *ChimpAPI) GetReport(campaignID string) (Report, error) { func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { client := &http.Client{ Transport: api.Transport, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } var b bytes.Buffer diff --git a/plugins/inputs/marklogic/marklogic_test.go b/plugins/inputs/marklogic/marklogic_test.go index e6057f6e088af..a809f850ff3b4 100644 --- a/plugins/inputs/marklogic/marklogic_test.go +++ b/plugins/inputs/marklogic/marklogic_test.go @@ -27,7 +27,7 @@ func TestMarklogic(t *testing.T) { ml := &Marklogic{ Hosts: []string{"example1"}, - URL: string(ts.URL), + URL: ts.URL, //Sources: []string{"http://localhost:8002/manage/v2/hosts/hostname1?view=status&format=json"}, } diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 7f3d08b118176..1ebbc6bf290d1 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -568,8 +568,8 @@ func (m *Mesos) gatherMainMetrics(u *url.URL, role Role, acc telegraf.Accumulato return err } - if err = json.Unmarshal([]byte(data), &jsonOut); err != nil { - return errors.New("Error decoding JSON response") + if err = json.Unmarshal(data, &jsonOut); err != nil { + return errors.New("error decoding JSON response") } m.filterMetrics(role, &jsonOut) diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index 1afb0046dc3a5..94e1ad74e1d69 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -61,7 +61,7 @@ func (n *Nats) Gather(acc telegraf.Accumulator) error { } stats := new(gnatsd.Varz) - err = json.Unmarshal([]byte(bytes), &stats) + err = json.Unmarshal(bytes, &stats) if err != nil { return err } diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go index 6cca64952637f..86e794575a669 100644 --- a/plugins/inputs/neptune_apex/neptune_apex_test.go +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -363,7 +363,7 @@ func TestParseXML(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { var acc testutil.Accumulator - err := n.parseXML(&acc, []byte(test.xmlResponse)) + err := n.parseXML(&acc, test.xmlResponse) if (err != nil) != test.wantErr { t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) } diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index 023b4405e3609..0b092c36d1d73 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -117,7 +117,7 @@ func (n *NetResponse) TCPGather() (tags map[string]string, fields map[string]int } else { // Looking for string in answer RegEx := regexp.MustCompile(`.*` + n.Expect + `.*`) - find := RegEx.FindString(string(data)) + find := RegEx.FindString(data) if find != "" { setResult(Success, fields, tags, n.Expect) } else { @@ -198,10 +198,10 @@ func (n *NetResponse) Gather(acc telegraf.Accumulator) error { } // Check send and expected string if n.Protocol == "udp" && n.Send == "" { - return errors.New("Send string cannot be empty") + return errors.New("send string cannot be empty") } if n.Protocol == "udp" && n.Expect == "" { - return errors.New("Expected string cannot be empty") + return errors.New("expected string cannot be empty") } // Prepare host and port host, port, err := net.SplitHostPort(n.Address) @@ -212,7 +212,7 @@ func (n *NetResponse) Gather(acc telegraf.Accumulator) error { n.Address = "localhost:" + port } if port == "" { - return errors.New("Bad port") + return errors.New("bad port") } // Prepare data tags := map[string]string{"server": host, "port": port} @@ -226,7 +226,7 @@ func (n *NetResponse) Gather(acc telegraf.Accumulator) error { returnTags, fields = n.UDPGather() tags["protocol"] = "udp" } else { - return errors.New("Bad protocol") + return errors.New("bad protocol") } // Merge the tags for k, v := range returnTags { diff --git a/plugins/inputs/net_response/net_response_test.go b/plugins/inputs/net_response/net_response_test.go index a64d553164a45..3bb78b35121a3 100644 --- a/plugins/inputs/net_response/net_response_test.go +++ b/plugins/inputs/net_response/net_response_test.go @@ -38,7 +38,7 @@ func TestBadProtocol(t *testing.T) { // Error err1 := c.Gather(&acc) require.Error(t, err1) - assert.Equal(t, "Bad protocol", err1.Error()) + assert.Equal(t, "bad protocol", err1.Error()) } func TestNoPort(t *testing.T) { @@ -49,7 +49,7 @@ func TestNoPort(t *testing.T) { } err1 := c.Gather(&acc) require.Error(t, err1) - assert.Equal(t, "Bad port", err1.Error()) + assert.Equal(t, "bad port", err1.Error()) } func TestAddressOnly(t *testing.T) { @@ -79,10 +79,10 @@ func TestSendExpectStrings(t *testing.T) { } err1 := tc.Gather(&acc) require.Error(t, err1) - assert.Equal(t, "Send string cannot be empty", err1.Error()) + assert.Equal(t, "send string cannot be empty", err1.Error()) err2 := uc.Gather(&acc) require.Error(t, err2) - assert.Equal(t, "Expected string cannot be empty", err2.Error()) + assert.Equal(t, "expected string cannot be empty", err2.Error()) } func TestTCPError(t *testing.T) { diff --git a/plugins/inputs/nfsclient/nfsclient_test.go b/plugins/inputs/nfsclient/nfsclient_test.go index 4dab7b320f0c5..11a9e4dd37f08 100644 --- a/plugins/inputs/nfsclient/nfsclient_test.go +++ b/plugins/inputs/nfsclient/nfsclient_test.go @@ -26,7 +26,7 @@ func TestNFSClientParsev3(t *testing.T) { data := strings.Fields(" READLINK: 500 501 502 503 504 505 506 507") nfsclient.parseStat("1.2.3.4:/storage/NFS", "/A", "3", data, &acc) - fields_ops := map[string]interface{}{ + fieldsOps := map[string]interface{}{ "ops": int64(500), "trans": int64(501), "timeouts": int64(502), @@ -36,7 +36,7 @@ func TestNFSClientParsev3(t *testing.T) { "response_time": int64(506), "total_time": int64(507), } - acc.AssertContainsFields(t, "nfs_ops", fields_ops) + acc.AssertContainsFields(t, "nfs_ops", fieldsOps) } func TestNFSClientParsev4(t *testing.T) { @@ -48,7 +48,7 @@ func TestNFSClientParsev4(t *testing.T) { data := strings.Fields(" DESTROY_SESSION: 500 501 502 503 504 505 506 507") nfsclient.parseStat("2.2.2.2:/nfsdata/", "/B", "4", data, &acc) - fields_ops := map[string]interface{}{ + fieldsOps := map[string]interface{}{ "ops": int64(500), "trans": int64(501), "timeouts": int64(502), @@ -58,7 +58,7 @@ func TestNFSClientParsev4(t *testing.T) { "response_time": int64(506), "total_time": int64(507), } - acc.AssertContainsFields(t, "nfs_ops", fields_ops) + acc.AssertContainsFields(t, "nfs_ops", fieldsOps) } func TestNFSClientProcessStat(t *testing.T) { @@ -74,7 +74,7 @@ func TestNFSClientProcessStat(t *testing.T) { nfsclient.processText(scanner, &acc) - fields_readstat := map[string]interface{}{ + fieldsReadstat := map[string]interface{}{ "ops": int64(600), "retrans": int64(1), "bytes": int64(1207), @@ -82,15 +82,15 @@ func TestNFSClientProcessStat(t *testing.T) { "exe": int64(607), } - read_tags := map[string]string{ + readTags := map[string]string{ "serverexport": "1.2.3.4:/storage/NFS", "mountpoint": "/A", "operation": "READ", } - acc.AssertContainsTaggedFields(t, "nfsstat", fields_readstat, read_tags) + acc.AssertContainsTaggedFields(t, "nfsstat", fieldsReadstat, readTags) - fields_writestat := map[string]interface{}{ + fieldsWritestat := map[string]interface{}{ "ops": int64(700), "retrans": int64(1), "bytes": int64(1407), @@ -98,12 +98,12 @@ func TestNFSClientProcessStat(t *testing.T) { "exe": int64(707), } - write_tags := map[string]string{ + writeTags := map[string]string{ "serverexport": "1.2.3.4:/storage/NFS", "mountpoint": "/A", "operation": "WRITE", } - acc.AssertContainsTaggedFields(t, "nfsstat", fields_writestat, write_tags) + acc.AssertContainsTaggedFields(t, "nfsstat", fieldsWritestat, writeTags) } func TestNFSClientProcessFull(t *testing.T) { @@ -119,7 +119,7 @@ func TestNFSClientProcessFull(t *testing.T) { nfsclient.processText(scanner, &acc) - fields_events := map[string]interface{}{ + fieldsEvents := map[string]interface{}{ "inoderevalidates": int64(301736), "dentryrevalidates": int64(22838), "datainvalidates": int64(410979), @@ -148,7 +148,7 @@ func TestNFSClientProcessFull(t *testing.T) { "pnfsreads": int64(0), "pnfswrites": int64(0), } - fields_bytes := map[string]interface{}{ + fieldsBytes := map[string]interface{}{ "normalreadbytes": int64(204440464584), "normalwritebytes": int64(110857586443), "directreadbytes": int64(783170354688), @@ -158,7 +158,7 @@ func TestNFSClientProcessFull(t *testing.T) { "readpages": int64(85749323), "writepages": int64(30784819), } - fields_xprt_tcp := map[string]interface{}{ + fieldsXprtTCP := map[string]interface{}{ "bind_count": int64(1), "connect_count": int64(1), "connect_time": int64(0), @@ -170,7 +170,7 @@ func TestNFSClientProcessFull(t *testing.T) { "backlogutil": int64(0), } - acc.AssertContainsFields(t, "nfs_events", fields_events) - acc.AssertContainsFields(t, "nfs_bytes", fields_bytes) - acc.AssertContainsFields(t, "nfs_xprt_tcp", fields_xprt_tcp) + acc.AssertContainsFields(t, "nfs_events", fieldsEvents) + acc.AssertContainsFields(t, "nfs_bytes", fieldsBytes) + acc.AssertContainsFields(t, "nfs_xprt_tcp", fieldsXprtTCP) } diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index 166444f857050..681c2f6e7f460 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -111,7 +111,7 @@ func (n *NSQ) getHTTPClient() (*http.Client, error) { } httpClient := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } return httpClient, nil } @@ -123,7 +123,7 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { } r, err := n.httpClient.Get(u.String()) if err != nil { - return fmt.Errorf("Error while polling %s: %s", u.String(), err) + return fmt.Errorf("error while polling %s: %s", u.String(), err) } defer r.Body.Close() @@ -133,20 +133,20 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { body, err := ioutil.ReadAll(r.Body) if err != nil { - return fmt.Errorf(`Error reading body: %s`, err) + return fmt.Errorf(`error reading body: %s`, err) } data := &NSQStatsData{} err = json.Unmarshal(body, data) if err != nil { - return fmt.Errorf(`Error parsing response: %s`, err) + return fmt.Errorf(`error parsing response: %s`, err) } // Data was not parsed correctly attempt to use old format. if len(data.Version) < 1 { wrapper := &NSQStats{} err = json.Unmarshal(body, wrapper) if err != nil { - return fmt.Errorf(`Error parsing response: %s`, err) + return fmt.Errorf(`error parsing response: %s`, err) } data = &wrapper.Data } @@ -176,7 +176,7 @@ func buildURL(e string) (*url.URL, error) { u := fmt.Sprintf(requestPattern, e) addr, err := url.Parse(u) if err != nil { - return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err) + return nil, fmt.Errorf("unable to parse address '%s': %s", u, err) } return addr, nil } diff --git a/plugins/inputs/openntpd/openntpd.go b/plugins/inputs/openntpd/openntpd.go index aedff242e9f07..b4a35fb55b8d7 100644 --- a/plugins/inputs/openntpd/openntpd.go +++ b/plugins/inputs/openntpd/openntpd.go @@ -126,8 +126,8 @@ func (n *Openntpd) Gather(acc telegraf.Accumulator) error { fields = strings.Fields(line) // if there is an ntpctl state prefix, remove it and make it it's own tag - if strings.ContainsAny(string(fields[0]), "*") { - tags["state_prefix"] = string(fields[0]) + if strings.ContainsAny(fields[0], "*") { + tags["state_prefix"] = fields[0] fields = fields[1:] } diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index d42e98dd7813b..941dada56cd5c 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -32,7 +32,7 @@ type podMetadata struct { type podResponse struct { Kind string `json:"kind"` - ApiVersion string `json:"apiVersion"` + APIVersion string `json:"apiVersion"` Metadata podMetadata `json:"metadata"` Items []*corev1.Pod `json:"items,string,omitempty"` } @@ -58,13 +58,13 @@ func loadClient(kubeconfigPath string) (*kubernetes.Clientset, error) { func (p *Prometheus) start(ctx context.Context) error { config, err := rest.InClusterConfig() if err != nil { - return fmt.Errorf("Failed to get InClusterConfig - %v", err) + return fmt.Errorf("failed to get InClusterConfig - %v", err) } client, err := kubernetes.NewForConfig(config) if err != nil { u, err := user.Current() if err != nil { - return fmt.Errorf("Failed to get current user - %v", err) + return fmt.Errorf("failed to get current user - %v", err) } configLocation := filepath.Join(u.HomeDir, ".kube/config") @@ -150,13 +150,13 @@ func (p *Prometheus) cAdvisor(ctx context.Context) error { podsURL := fmt.Sprintf("https://%s:10250/pods", p.NodeIP) req, err := http.NewRequest("GET", podsURL, nil) if err != nil { - return fmt.Errorf("Error when creating request to %s to get pod list: %w", podsURL, err) + return fmt.Errorf("error when creating request to %s to get pod list: %w", podsURL, err) } // Update right away so code is not waiting the length of the specified scrape interval initially err = updateCadvisorPodList(p, req) if err != nil { - return fmt.Errorf("Error initially updating pod list: %w", err) + return fmt.Errorf("error initially updating pod list: %w", err) } scrapeInterval := cAdvisorPodListDefaultInterval @@ -171,7 +171,7 @@ func (p *Prometheus) cAdvisor(ctx context.Context) error { case <-time.After(time.Duration(scrapeInterval) * time.Second): err := updateCadvisorPodList(p, req) if err != nil { - return fmt.Errorf("Error updating pod list: %w", err) + return fmt.Errorf("error updating pod list: %w", err) } } } @@ -183,12 +183,12 @@ func updateCadvisorPodList(p *Prometheus, req *http.Request) error { resp, err := httpClient.Do(req) if err != nil { - return fmt.Errorf("Error when making request for pod list: %w", err) + return fmt.Errorf("error when making request for pod list: %w", err) } // If err is nil, still check response code if resp.StatusCode != 200 { - return fmt.Errorf("Error when making request for pod list with status %s", resp.Status) + return fmt.Errorf("error when making request for pod list with status %s", resp.Status) } defer resp.Body.Close() diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index 5973390e94a82..904d5418ec8db 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -39,7 +39,7 @@ func (r *Raindrops) Gather(acc telegraf.Accumulator) error { for _, u := range r.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("unable to parse address '%s': %s", u, err)) continue } @@ -178,9 +178,9 @@ func init() { inputs.Add("raindrops", func() telegraf.Input { return &Raindrops{httpClient: &http.Client{ Transport: &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, }, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, }} }) } diff --git a/plugins/inputs/raindrops/raindrops_test.go b/plugins/inputs/raindrops/raindrops_test.go index 2fed0a35a9af8..f8b766101b189 100644 --- a/plugins/inputs/raindrops/raindrops_test.go +++ b/plugins/inputs/raindrops/raindrops_test.go @@ -62,7 +62,7 @@ func TestRaindropsGeneratesMetrics(t *testing.T) { n := &Raindrops{ Urls: []string{fmt.Sprintf("%s/_raindrops", ts.URL)}, httpClient: &http.Client{Transport: &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, }}, } diff --git a/plugins/inputs/ravendb/ravendb.go b/plugins/inputs/ravendb/ravendb.go index 42b50d0d3816f..f246bd8e97689 100644 --- a/plugins/inputs/ravendb/ravendb.go +++ b/plugins/inputs/ravendb/ravendb.go @@ -40,10 +40,10 @@ type RavenDB struct { Log telegraf.Logger `toml:"-"` client *http.Client - requestUrlServer string - requestUrlDatabases string - requestUrlIndexes string - requestUrlCollection string + requestURLServer string + requestURLDatabases string + requestURLIndexes string + requestURLCollection string } var sampleConfig = ` @@ -168,20 +168,20 @@ func (r *RavenDB) requestJSON(u string, target interface{}) error { func (r *RavenDB) gatherServer(acc telegraf.Accumulator) { serverResponse := &serverMetricsResponse{} - err := r.requestJSON(r.requestUrlServer, &serverResponse) + err := r.requestJSON(r.requestURLServer, &serverResponse) if err != nil { acc.AddError(err) return } tags := map[string]string{ - "cluster_id": serverResponse.Cluster.Id, + "cluster_id": serverResponse.Cluster.ID, "node_tag": serverResponse.Cluster.NodeTag, "url": r.URL, } - if serverResponse.Config.PublicServerUrl != nil { - tags["public_server_url"] = *serverResponse.Config.PublicServerUrl + if serverResponse.Config.PublicServerURL != nil { + tags["public_server_url"] = *serverResponse.Config.PublicServerURL } fields := map[string]interface{}{ @@ -192,13 +192,13 @@ func (r *RavenDB) gatherServer(acc telegraf.Accumulator) { "cluster_index": serverResponse.Cluster.Index, "cluster_node_state": serverResponse.Cluster.NodeState, "config_server_urls": strings.Join(serverResponse.Config.ServerUrls, ";"), - "cpu_assigned_processor_count": serverResponse.Cpu.AssignedProcessorCount, - "cpu_machine_io_wait": serverResponse.Cpu.MachineIoWait, - "cpu_machine_usage": serverResponse.Cpu.MachineUsage, - "cpu_process_usage": serverResponse.Cpu.ProcessUsage, - "cpu_processor_count": serverResponse.Cpu.ProcessorCount, - "cpu_thread_pool_available_worker_threads": serverResponse.Cpu.ThreadPoolAvailableWorkerThreads, - "cpu_thread_pool_available_completion_port_threads": serverResponse.Cpu.ThreadPoolAvailableCompletionPortThreads, + "cpu_assigned_processor_count": serverResponse.CPU.AssignedProcessorCount, + "cpu_machine_io_wait": serverResponse.CPU.MachineIoWait, + "cpu_machine_usage": serverResponse.CPU.MachineUsage, + "cpu_process_usage": serverResponse.CPU.ProcessUsage, + "cpu_processor_count": serverResponse.CPU.ProcessorCount, + "cpu_thread_pool_available_worker_threads": serverResponse.CPU.ThreadPoolAvailableWorkerThreads, + "cpu_thread_pool_available_completion_port_threads": serverResponse.CPU.ThreadPoolAvailableCompletionPortThreads, "databases_loaded_count": serverResponse.Databases.LoadedCount, "databases_total_count": serverResponse.Databases.TotalCount, "disk_remaining_storage_space_percentage": serverResponse.Disk.RemainingStorageSpacePercentage, @@ -208,7 +208,7 @@ func (r *RavenDB) gatherServer(acc telegraf.Accumulator) { "license_expiration_left_in_sec": serverResponse.License.ExpirationLeftInSec, "license_max_cores": serverResponse.License.MaxCores, "license_type": serverResponse.License.Type, - "license_utilized_cpu_cores": serverResponse.License.UtilizedCpuCores, + "license_utilized_cpu_cores": serverResponse.License.UtilizedCPUCores, "memory_allocated_in_mb": serverResponse.Memory.AllocatedMemoryInMb, "memory_installed_in_mb": serverResponse.Memory.InstalledMemoryInMb, "memory_low_memory_severity": serverResponse.Memory.LowMemorySeverity, @@ -221,20 +221,20 @@ func (r *RavenDB) gatherServer(acc telegraf.Accumulator) { "network_last_authorized_non_cluster_admin_request_time_in_sec": serverResponse.Network.LastAuthorizedNonClusterAdminRequestTimeInSec, "network_last_request_time_in_sec": serverResponse.Network.LastRequestTimeInSec, "network_requests_per_sec": serverResponse.Network.RequestsPerSec, - "network_tcp_active_connections": serverResponse.Network.TcpActiveConnections, + "network_tcp_active_connections": serverResponse.Network.TCPActiveConnections, "network_total_requests": serverResponse.Network.TotalRequests, "server_full_version": serverResponse.ServerFullVersion, - "server_process_id": serverResponse.ServerProcessId, + "server_process_id": serverResponse.ServerProcessID, "server_version": serverResponse.ServerVersion, "uptime_in_sec": serverResponse.UpTimeInSec, } - if serverResponse.Config.TcpServerUrls != nil { - fields["config_tcp_server_urls"] = strings.Join(serverResponse.Config.TcpServerUrls, ";") + if serverResponse.Config.TCPServerURLs != nil { + fields["config_tcp_server_urls"] = strings.Join(serverResponse.Config.TCPServerURLs, ";") } - if serverResponse.Config.PublicTcpServerUrls != nil { - fields["config_public_tcp_server_urls"] = strings.Join(serverResponse.Config.PublicTcpServerUrls, ";") + if serverResponse.Config.PublicTCPServerURLs != nil { + fields["config_public_tcp_server_urls"] = strings.Join(serverResponse.Config.PublicTCPServerURLs, ";") } if serverResponse.Certificate.WellKnownAdminCertificates != nil { @@ -247,7 +247,7 @@ func (r *RavenDB) gatherServer(acc telegraf.Accumulator) { func (r *RavenDB) gatherDatabases(acc telegraf.Accumulator) { databasesResponse := &databasesMetricResponse{} - err := r.requestJSON(r.requestUrlDatabases, &databasesResponse) + err := r.requestJSON(r.requestURLDatabases, &databasesResponse) if err != nil { acc.AddError(err) return @@ -255,14 +255,14 @@ func (r *RavenDB) gatherDatabases(acc telegraf.Accumulator) { for _, dbResponse := range databasesResponse.Results { tags := map[string]string{ - "database_id": dbResponse.DatabaseId, + "database_id": dbResponse.DatabaseID, "database_name": dbResponse.DatabaseName, "node_tag": databasesResponse.NodeTag, "url": r.URL, } - if databasesResponse.PublicServerUrl != nil { - tags["public_server_url"] = *databasesResponse.PublicServerUrl + if databasesResponse.PublicServerURL != nil { + tags["public_server_url"] = *databasesResponse.PublicServerURL } fields := map[string]interface{}{ @@ -306,7 +306,7 @@ func (r *RavenDB) gatherDatabases(acc telegraf.Accumulator) { func (r *RavenDB) gatherIndexes(acc telegraf.Accumulator) { indexesResponse := &indexesMetricResponse{} - err := r.requestJSON(r.requestUrlIndexes, &indexesResponse) + err := r.requestJSON(r.requestURLIndexes, &indexesResponse) if err != nil { acc.AddError(err) return @@ -321,8 +321,8 @@ func (r *RavenDB) gatherIndexes(acc telegraf.Accumulator) { "url": r.URL, } - if indexesResponse.PublicServerUrl != nil { - tags["public_server_url"] = *indexesResponse.PublicServerUrl + if indexesResponse.PublicServerURL != nil { + tags["public_server_url"] = *indexesResponse.PublicServerURL } fields := map[string]interface{}{ @@ -347,7 +347,7 @@ func (r *RavenDB) gatherIndexes(acc telegraf.Accumulator) { func (r *RavenDB) gatherCollections(acc telegraf.Accumulator) { collectionsResponse := &collectionsMetricResponse{} - err := r.requestJSON(r.requestUrlCollection, &collectionsResponse) + err := r.requestJSON(r.requestURLCollection, &collectionsResponse) if err != nil { acc.AddError(err) return @@ -362,8 +362,8 @@ func (r *RavenDB) gatherCollections(acc telegraf.Accumulator) { "url": r.URL, } - if collectionsResponse.PublicServerUrl != nil { - tags["public_server_url"] = *collectionsResponse.PublicServerUrl + if collectionsResponse.PublicServerURL != nil { + tags["public_server_url"] = *collectionsResponse.PublicServerURL } fields := map[string]interface{}{ @@ -379,7 +379,7 @@ func (r *RavenDB) gatherCollections(acc telegraf.Accumulator) { } } -func prepareDbNamesUrlPart(dbNames []string) string { +func prepareDBNamesURLPart(dbNames []string) string { if len(dbNames) == 0 { return "" } @@ -396,10 +396,10 @@ func (r *RavenDB) Init() error { r.URL = defaultURL } - r.requestUrlServer = r.URL + "/admin/monitoring/v1/server" - r.requestUrlDatabases = r.URL + "/admin/monitoring/v1/databases" + prepareDbNamesUrlPart(r.DbStatsDbs) - r.requestUrlIndexes = r.URL + "/admin/monitoring/v1/indexes" + prepareDbNamesUrlPart(r.IndexStatsDbs) - r.requestUrlCollection = r.URL + "/admin/monitoring/v1/collections" + prepareDbNamesUrlPart(r.IndexStatsDbs) + r.requestURLServer = r.URL + "/admin/monitoring/v1/server" + r.requestURLDatabases = r.URL + "/admin/monitoring/v1/databases" + prepareDBNamesURLPart(r.DbStatsDbs) + r.requestURLIndexes = r.URL + "/admin/monitoring/v1/indexes" + prepareDBNamesURLPart(r.IndexStatsDbs) + r.requestURLCollection = r.URL + "/admin/monitoring/v1/collections" + prepareDBNamesURLPart(r.IndexStatsDbs) err := choice.CheckSlice(r.StatsInclude, []string{"server", "databases", "indexes", "collections"}) if err != nil { diff --git a/plugins/inputs/ravendb/ravendb_dto.go b/plugins/inputs/ravendb/ravendb_dto.go index af4012f8ceecd..87ae34dccc541 100644 --- a/plugins/inputs/ravendb/ravendb_dto.go +++ b/plugins/inputs/ravendb/ravendb_dto.go @@ -4,10 +4,10 @@ type serverMetricsResponse struct { ServerVersion string `json:"ServerVersion"` ServerFullVersion string `json:"ServerFullVersion"` UpTimeInSec int32 `json:"UpTimeInSec"` - ServerProcessId int32 `json:"ServerProcessId"` + ServerProcessID int32 `json:"ServerProcessId"` Backup backupMetrics `json:"Backup"` Config configurationMetrics `json:"Config"` - Cpu cpuMetrics `json:"Cpu"` + CPU cpuMetrics `json:"Cpu"` Memory memoryMetrics `json:"Memory"` Disk diskMetrics `json:"Disk"` License licenseMetrics `json:"License"` @@ -24,9 +24,9 @@ type backupMetrics struct { type configurationMetrics struct { ServerUrls []string `json:"ServerUrls"` - PublicServerUrl *string `json:"PublicServerUrl"` - TcpServerUrls []string `json:"TcpServerUrls"` - PublicTcpServerUrls []string `json:"PublicTcpServerUrls"` + PublicServerURL *string `json:"PublicServerUrl"` + TCPServerURLs []string `json:"TcpServerUrls"` + PublicTCPServerURLs []string `json:"PublicTcpServerUrls"` } type cpuMetrics struct { @@ -60,12 +60,12 @@ type diskMetrics struct { type licenseMetrics struct { Type string `json:"Type"` ExpirationLeftInSec *float64 `json:"ExpirationLeftInSec"` - UtilizedCpuCores int32 `json:"UtilizedCpuCores"` + UtilizedCPUCores int32 `json:"UtilizedCpuCores"` MaxCores int32 `json:"MaxCores"` } type networkMetrics struct { - TcpActiveConnections int64 `json:"TcpActiveConnections"` + TCPActiveConnections int64 `json:"TcpActiveConnections"` ConcurrentRequestsCount int64 `json:"ConcurrentRequestsCount"` TotalRequests int64 `json:"TotalRequests"` RequestsPerSec float64 `json:"RequestsPerSec"` @@ -83,7 +83,7 @@ type clusterMetrics struct { NodeState string `json:"NodeState"` CurrentTerm int64 `json:"CurrentTerm"` Index int64 `json:"Index"` - Id string `json:"Id"` + ID string `json:"Id"` } type allDatabasesMetrics struct { @@ -93,13 +93,13 @@ type allDatabasesMetrics struct { type databasesMetricResponse struct { Results []*databaseMetrics `json:"Results"` - PublicServerUrl *string `json:"PublicServerUrl"` + PublicServerURL *string `json:"PublicServerUrl"` NodeTag string `json:"NodeTag"` } type databaseMetrics struct { DatabaseName string `json:"DatabaseName"` - DatabaseId string `json:"DatabaseId"` + DatabaseID string `json:"DatabaseId"` UptimeInSec float64 `json:"UptimeInSec"` TimeSinceLastBackupInSec *float64 `json:"TimeSinceLastBackupInSec"` @@ -153,7 +153,7 @@ type databaseStorageMetrics struct { type indexesMetricResponse struct { Results []*perDatabaseIndexMetrics `json:"Results"` - PublicServerUrl *string `json:"PublicServerUrl"` + PublicServerURL *string `json:"PublicServerUrl"` NodeTag string `json:"NodeTag"` } @@ -180,7 +180,7 @@ type indexMetrics struct { type collectionsMetricResponse struct { Results []*perDatabaseCollectionMetrics `json:"Results"` - PublicServerUrl *string `json:"PublicServerUrl"` + PublicServerURL *string `json:"PublicServerUrl"` NodeTag string `json:"NodeTag"` } diff --git a/plugins/inputs/redfish/redfish.go b/plugins/inputs/redfish/redfish.go index efd9f9f3367ae..4d9e70a57a9bd 100644 --- a/plugins/inputs/redfish/redfish.go +++ b/plugins/inputs/redfish/redfish.go @@ -73,7 +73,7 @@ type Chassis struct { type Power struct { PowerSupplies []struct { Name string - MemberId string + MemberID string PowerInputWatts *float64 PowerCapacityWatts *float64 PowerOutputWatts *float64 @@ -83,7 +83,7 @@ type Power struct { } Voltages []struct { Name string - MemberId string + MemberID string ReadingVolts *float64 UpperThresholdCritical *float64 UpperThresholdFatal *float64 @@ -96,7 +96,7 @@ type Power struct { type Thermal struct { Fans []struct { Name string - MemberId string + MemberID string Reading *int64 ReadingUnits *string UpperThresholdCritical *int64 @@ -107,7 +107,7 @@ type Thermal struct { } Temperatures []struct { Name string - MemberId string + MemberID string ReadingCelsius *float64 UpperThresholdCritical *float64 UpperThresholdFatal *float64 @@ -276,7 +276,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range thermal.Temperatures { tags := map[string]string{} - tags["member_id"] = j.MemberId + tags["member_id"] = j.MemberID tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname @@ -301,7 +301,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range thermal.Fans { tags := map[string]string{} fields := make(map[string]interface{}) - tags["member_id"] = j.MemberId + tags["member_id"] = j.MemberID tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname @@ -333,7 +333,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range power.PowerSupplies { tags := map[string]string{} - tags["member_id"] = j.MemberId + tags["member_id"] = j.MemberID tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname @@ -357,7 +357,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range power.Voltages { tags := map[string]string{} - tags["member_id"] = j.MemberId + tags["member_id"] = j.MemberID tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname diff --git a/plugins/inputs/riak/riak.go b/plugins/inputs/riak/riak.go index c0f3990fa8b48..6a1a98e4586a1 100644 --- a/plugins/inputs/riak/riak.go +++ b/plugins/inputs/riak/riak.go @@ -21,10 +21,10 @@ type Riak struct { // NewRiak return a new instance of Riak with a default http client func NewRiak() *Riak { - tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + tr := &http.Transport{ResponseHeaderTimeout: 3 * time.Second} client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } return &Riak{client: client} } diff --git a/plugins/inputs/salesforce/salesforce.go b/plugins/inputs/salesforce/salesforce.go index b66266d3f17d2..f1ecff8d61a83 100644 --- a/plugins/inputs/salesforce/salesforce.go +++ b/plugins/inputs/salesforce/salesforce.go @@ -62,11 +62,11 @@ const defaultEnvironment = "production" // returns a new Salesforce plugin instance func NewSalesforce() *Salesforce { tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(5 * time.Second), + ResponseHeaderTimeout: 5 * time.Second, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(10 * time.Second), + Timeout: 10 * time.Second, } return &Salesforce{ client: client, @@ -147,7 +147,7 @@ func (s *Salesforce) fetchLimits() (limits, error) { } if resp.StatusCode != http.StatusOK { - return l, fmt.Errorf("Salesforce responded with unexpected status code %d", resp.StatusCode) + return l, fmt.Errorf("salesforce responded with unexpected status code %d", resp.StatusCode) } l = limits{} diff --git a/plugins/inputs/salesforce/salesforce_test.go b/plugins/inputs/salesforce/salesforce_test.go index 288cc0f40af79..3d26d87dda964 100644 --- a/plugins/inputs/salesforce/salesforce_test.go +++ b/plugins/inputs/salesforce/salesforce_test.go @@ -14,7 +14,7 @@ import ( func Test_Gather(t *testing.T) { fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") - _, _ = w.Write([]byte(testJson)) + _, _ = w.Write([]byte(testJSON)) })) defer fakeServer.Close() @@ -35,7 +35,7 @@ func Test_Gather(t *testing.T) { require.Len(t, m.Tags, 2) } -var testJson = `{ +var testJSON = `{ "ConcurrentAsyncGetReportInstances" : { "Max" : 200, "Remaining" : 200 diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/plugins/inputs/snmp/snmp_mocks_test.go index a3b8fb8e69a6d..80d6e2cbf74ce 100644 --- a/plugins/inputs/snmp/snmp_mocks_test.go +++ b/plugins/inputs/snmp/snmp_mocks_test.go @@ -27,14 +27,14 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd { func TestMockExecCommand(_ *testing.T) { var cmd []string for _, arg := range os.Args { - if string(arg) == "--" { + if arg == "--" { cmd = []string{} continue } if cmd == nil { continue } - cmd = append(cmd, string(arg)) + cmd = append(cmd, arg) } if cmd == nil { return diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index 5f6ecd3e0bd4f..f3f938657d09a 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -268,7 +268,7 @@ func findnodename(node Node, ids []string) (string, string) { return node.name, "0" } else if node.name != "" && len(ids) == 0 && id != "0" { // node with an instance - return node.name, string(id) + return node.name, id } else if node.name != "" && len(ids) > 0 { // node with subinstances return node.name, strings.Join(ids, ".") @@ -339,7 +339,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } else { oid.Name = oidstring oid.Oid = oidstring - if string(oidstring[:1]) != "." { + if oidstring[:1] != "." { oid.rawOid = "." + oidstring } else { oid.rawOid = oidstring @@ -764,7 +764,7 @@ func (h *Host) HandleResponse( var instance string // Get oidname and instance from translate file oidName, instance = findnodename(initNode, - strings.Split(string(variable.Name[1:]), ".")) + strings.Split(variable.Name[1:], ".")) // Set instance tag // From mapping table mapping, inMappingNoSubTable := h.OidInstanceMapping[oidKey] @@ -798,7 +798,7 @@ func (h *Host) HandleResponse( } tags["snmp_host"], _, _ = net.SplitHostPort(h.Address) fields := make(map[string]interface{}) - fields[string(fieldName)] = variable.Value + fields[fieldName] = variable.Value h.processedOids = append(h.processedOids, variable.Name) acc.AddFields(fieldName, fields, tags) diff --git a/plugins/inputs/sqlserver/connectionstring.go b/plugins/inputs/sqlserver/connectionstring.go index 54b5cd8ae6460..b5f530b9f9510 100644 --- a/plugins/inputs/sqlserver/connectionstring.go +++ b/plugins/inputs/sqlserver/connectionstring.go @@ -6,7 +6,7 @@ import ( ) const ( - emptySqlInstance = "" + emptySQLInstance = "" emptyDatabaseName = "" ) @@ -15,7 +15,7 @@ const ( // If the connection string could not be parsed or sqlInstance/databaseName were not present, a placeholder value is returned func getConnectionIdentifiers(connectionString string) (sqlInstance string, databaseName string) { if len(connectionString) == 0 { - return emptySqlInstance, emptyDatabaseName + return emptySQLInstance, emptyDatabaseName } trimmedConnectionString := strings.TrimSpace(connectionString) @@ -61,7 +61,7 @@ func parseConnectionStringKeyValue(connectionString string) (sqlInstance string, } if sqlInstance == "" { - sqlInstance = emptySqlInstance + sqlInstance = emptySQLInstance } if databaseName == "" { databaseName = emptyDatabaseName @@ -72,12 +72,12 @@ func parseConnectionStringKeyValue(connectionString string) (sqlInstance string, // parseConnectionStringURL parses a URL-formatted connection string and returns the SQL instance and database name func parseConnectionStringURL(connectionString string) (sqlInstance string, databaseName string) { - sqlInstance = emptySqlInstance + sqlInstance = emptySQLInstance databaseName = emptyDatabaseName u, err := url.Parse(connectionString) if err != nil { - return emptySqlInstance, emptyDatabaseName + return emptySQLInstance, emptyDatabaseName } sqlInstance = u.Hostname() diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index f462ebbf876bc..0e23c8635fcaa 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -297,7 +297,7 @@ func TestSqlServer_ConnectionString(t *testing.T) { connectionString = "invalid connection string" sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, emptySqlInstance, sqlInstance) + assert.Equal(t, emptySQLInstance, sqlInstance) assert.Equal(t, emptyDatabaseName, database) // Key/value format @@ -323,7 +323,7 @@ func TestSqlServer_ConnectionString(t *testing.T) { connectionString = "invalid connection string" sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, emptySqlInstance, sqlInstance) + assert.Equal(t, emptySQLInstance, sqlInstance) assert.Equal(t, emptyDatabaseName, database) } diff --git a/plugins/inputs/statsd/datadog.go b/plugins/inputs/statsd/datadog.go index 377db66e6d3ad..77a01f5586a7b 100644 --- a/plugins/inputs/statsd/datadog.go +++ b/plugins/inputs/statsd/datadog.go @@ -38,29 +38,29 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam // tag is key:value messageRaw := strings.SplitN(message, ":", 2) if len(messageRaw) < 2 || len(messageRaw[0]) < 7 || len(messageRaw[1]) < 3 { - return fmt.Errorf("Invalid message format") + return fmt.Errorf("invalid message format") } header := messageRaw[0] message = messageRaw[1] rawLen := strings.SplitN(header[3:], ",", 2) if len(rawLen) != 2 { - return fmt.Errorf("Invalid message format") + return fmt.Errorf("invalid message format") } titleLen, err := strconv.ParseInt(rawLen[0], 10, 64) if err != nil { - return fmt.Errorf("Invalid message format, could not parse title.length: '%s'", rawLen[0]) + return fmt.Errorf("invalid message format, could not parse title.length: '%s'", rawLen[0]) } if len(rawLen[1]) < 1 { - return fmt.Errorf("Invalid message format, could not parse text.length: '%s'", rawLen[0]) + return fmt.Errorf("invalid message format, could not parse text.length: '%s'", rawLen[0]) } textLen, err := strconv.ParseInt(rawLen[1][:len(rawLen[1])-1], 10, 64) if err != nil { - return fmt.Errorf("Invalid message format, could not parse text.length: '%s'", rawLen[0]) + return fmt.Errorf("invalid message format, could not parse text.length: '%s'", rawLen[0]) } if titleLen+textLen+1 > int64(len(message)) { - return fmt.Errorf("Invalid message format, title.length and text.length exceed total message length") + return fmt.Errorf("invalid message format, title.length and text.length exceed total message length") } rawTitle := message[:titleLen] @@ -68,14 +68,14 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam message = message[titleLen+1+textLen:] if len(rawTitle) == 0 || len(rawText) == 0 { - return fmt.Errorf("Invalid event message format: empty 'title' or 'text' field") + return fmt.Errorf("invalid event message format: empty 'title' or 'text' field") } name := rawTitle tags := make(map[string]string, strings.Count(message, ",")+2) // allocate for the approximate number of tags fields := make(map[string]interface{}, 9) fields["alert_type"] = eventInfo // default event type - fields["text"] = uncommenter.Replace(string(rawText)) + fields["text"] = uncommenter.Replace(rawText) if defaultHostname != "" { tags["source"] = defaultHostname } diff --git a/plugins/inputs/suricata/suricata.go b/plugins/inputs/suricata/suricata.go index 8cf374df65d3f..98ca348dce711 100644 --- a/plugins/inputs/suricata/suricata.go +++ b/plugins/inputs/suricata/suricata.go @@ -149,7 +149,7 @@ func flexFlatten(outmap map[string]interface{}, field string, v interface{}, del case float64: outmap[field] = v.(float64) default: - return fmt.Errorf("Unsupported type %T encountered", t) + return fmt.Errorf("unsupported type %T encountered", t) } return nil } @@ -157,7 +157,7 @@ func flexFlatten(outmap map[string]interface{}, field string, v interface{}, del func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { // initial parsing var result map[string]interface{} - err := json.Unmarshal([]byte(sjson), &result) + err := json.Unmarshal(sjson, &result) if err != nil { acc.AddError(err) return diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index 7259e658da5a9..0570c8135a418 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -42,9 +42,11 @@ func TestSuricataLarge(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(data)) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write(data) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.Wait(1) } diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index f8916242286e1..fa669a2a024ed 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -37,7 +37,7 @@ const maxMetadataSamples = 100 // Number of resources to sample for metric metad const maxRealtimeMetrics = 50000 // Absolute maximum metrics per realtime query -const hwMarkTTL = time.Duration(4 * time.Hour) +const hwMarkTTL = 4 * time.Hour type queryChunk []types.PerfQuerySpec diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index 6b6d21fc0d4f0..29d88dbfdce05 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -137,7 +137,7 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr fields := make(map[string]interface{}) for scanner.Scan() { line := scanner.Text() - parts := zookeeperFormatRE.FindStringSubmatch(string(line)) + parts := zookeeperFormatRE.FindStringSubmatch(line) if len(parts) != 3 { return fmt.Errorf("unexpected line in mntr response: %q", line) @@ -147,7 +147,7 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr if measurement == "server_state" { zookeeperState = parts[2] } else { - sValue := string(parts[2]) + sValue := parts[2] iVal, err := strconv.ParseInt(sValue, 10, 64) if err == nil { diff --git a/plugins/outputs/amon/amon.go b/plugins/outputs/amon/amon.go index 864bd60c853f1..0ee62f1e94fc2 100644 --- a/plugins/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -133,11 +133,11 @@ func buildMetrics(m telegraf.Metric) (map[string]Point, error) { func (p *Point) setValue(v interface{}) error { switch d := v.(type) { case int: - p[1] = float64(int(d)) + p[1] = float64(d) case int32: - p[1] = float64(int32(d)) + p[1] = float64(d) case int64: - p[1] = float64(int64(d)) + p[1] = float64(d) case float32: p[1] = float64(d) case float64: diff --git a/plugins/outputs/cloud_pubsub/pubsub_test.go b/plugins/outputs/cloud_pubsub/pubsub_test.go index b3948573b7163..967a33d742c3c 100644 --- a/plugins/outputs/cloud_pubsub/pubsub_test.go +++ b/plugins/outputs/cloud_pubsub/pubsub_test.go @@ -183,7 +183,7 @@ func verifyMetricPublished(t *testing.T, m telegraf.Metric, published map[string if err != nil { t.Fatalf("Unable to decode expected base64-encoded message: %s", err) } - data = []byte(v) + data = v } parsed, err := p.Parse(data) diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 8931986dd2fba..2c57d6fc584a0 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -20,7 +20,7 @@ import ( const ( oneAgentMetricsURL = "http://127.0.0.1:14499/metrics/ingest" - dtIngestApiLineLimit = 1000 + dtIngestAPILineLimit = 1000 ) var ( @@ -235,7 +235,7 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { fmt.Fprintf(&buf, "%s%s %v\n", metricID, tagb.String(), value) } - if metricCounter%dtIngestApiLineLimit == 0 { + if metricCounter%dtIngestAPILineLimit == 0 { err = d.send(buf.Bytes()) if err != nil { return err diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index c91955ced2e0d..1d9f6725206a8 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -225,7 +225,7 @@ func verifyValue(v interface{}) bool { func (g *Gauge) setValue(v interface{}) error { switch d := v.(type) { case int64: - g.Value = float64(int64(d)) + g.Value = float64(d) case uint64: g.Value = float64(d) case float64: diff --git a/plugins/outputs/sensu/sensu.go b/plugins/outputs/sensu/sensu.go index a3857b2cfceb9..568f8f7a144e4 100644 --- a/plugins/outputs/sensu/sensu.go +++ b/plugins/outputs/sensu/sensu.go @@ -23,7 +23,7 @@ import ( ) const ( - defaultUrl = "http://127.0.0.1:3031" + defaultURL = "http://127.0.0.1:3031" defaultClientTimeout = 5 * time.Second defaultContentType = "application/json; charset=utf-8" ) @@ -82,9 +82,9 @@ type SensuMetrics struct { } type Sensu struct { - ApiKey *string `toml:"api_key"` - AgentApiUrl *string `toml:"agent_api_url"` - BackendApiUrl *string `toml:"backend_api_url"` + APIKey *string `toml:"api_key"` + AgentAPIURL *string `toml:"agent_api_url"` + BackendAPIURL *string `toml:"backend_api_url"` Entity *SensuEntity `toml:"entity"` Tags map[string]string `toml:"tags"` Metrics *SensuMetrics `toml:"metrics"` @@ -93,7 +93,7 @@ type Sensu struct { Timeout config.Duration `toml:"timeout"` ContentEncoding string `toml:"content_encoding"` - EndpointUrl string + EndpointURL string OutEntity *OutputEntity Log telegraf.Logger `toml:"-"` @@ -219,7 +219,7 @@ func (s *Sensu) createClient() (*http.Client, error) { } func (s *Sensu) Connect() error { - err := s.setEndpointUrl() + err := s.setEndpointURL() if err != nil { return err } @@ -292,7 +292,7 @@ func (s *Sensu) Write(metrics []telegraf.Metric) error { } } - reqBody, err := s.encodeToJson(points) + reqBody, err := s.encodeToJSON(points) if err != nil { return err } @@ -313,7 +313,7 @@ func (s *Sensu) write(reqBody []byte) error { reqBodyBuffer = rc } - req, err := http.NewRequest(method, s.EndpointUrl, reqBodyBuffer) + req, err := http.NewRequest(method, s.EndpointURL, reqBodyBuffer) if err != nil { return err } @@ -325,8 +325,8 @@ func (s *Sensu) write(reqBody []byte) error { req.Header.Set("Content-Encoding", "gzip") } - if s.ApiKey != nil { - req.Header.Set("Authorization", "Key "+*s.ApiKey) + if s.APIKey != nil { + req.Header.Set("Authorization", "Key "+*s.APIKey) } resp, err := s.client.Do(req) @@ -342,7 +342,7 @@ func (s *Sensu) write(reqBody []byte) error { } s.Log.Debugf("Failed to write, response: %v", string(bodyData)) if resp.StatusCode < 400 || resp.StatusCode > 499 { - return fmt.Errorf("when writing to [%s] received status code: %d", s.EndpointUrl, resp.StatusCode) + return fmt.Errorf("when writing to [%s] received status code: %d", s.EndpointURL, resp.StatusCode) } } @@ -350,37 +350,37 @@ func (s *Sensu) write(reqBody []byte) error { } // Resolves the event write endpoint -func (s *Sensu) setEndpointUrl() error { +func (s *Sensu) setEndpointURL() error { var ( - endpointUrl string - path_suffix string + endpointURL string + pathSuffix string ) - if s.BackendApiUrl != nil { - endpointUrl = *s.BackendApiUrl + if s.BackendAPIURL != nil { + endpointURL = *s.BackendAPIURL namespace := "default" if s.Entity != nil && s.Entity.Namespace != nil { namespace = *s.Entity.Namespace } - path_suffix = "/api/core/v2/namespaces/" + namespace + "/events" - } else if s.AgentApiUrl != nil { - endpointUrl = *s.AgentApiUrl - path_suffix = "/events" + pathSuffix = "/api/core/v2/namespaces/" + namespace + "/events" + } else if s.AgentAPIURL != nil { + endpointURL = *s.AgentAPIURL + pathSuffix = "/events" } - if len(endpointUrl) == 0 { - s.Log.Debugf("no backend or agent API URL provided, falling back to default agent API URL %s", defaultUrl) - endpointUrl = defaultUrl - path_suffix = "/events" + if len(endpointURL) == 0 { + s.Log.Debugf("no backend or agent API URL provided, falling back to default agent API URL %s", defaultURL) + endpointURL = defaultURL + pathSuffix = "/events" } - u, err := url.Parse(endpointUrl) + u, err := url.Parse(endpointURL) if err != nil { return err } - u.Path = path.Join(u.Path, path_suffix) - s.EndpointUrl = u.String() + u.Path = path.Join(u.Path, pathSuffix) + s.EndpointURL = u.String() return nil } @@ -389,12 +389,12 @@ func (s *Sensu) Init() error { if len(s.ContentEncoding) != 0 { validEncoding := []string{"identity", "gzip"} if !choice.Contains(s.ContentEncoding, validEncoding) { - return fmt.Errorf("Unsupported content_encoding [%q] specified", s.ContentEncoding) + return fmt.Errorf("unsupported content_encoding [%q] specified", s.ContentEncoding) } } - if s.BackendApiUrl != nil && s.ApiKey == nil { - return fmt.Errorf("backend_api_url [%q] specified, but no API Key provided", *s.BackendApiUrl) + if s.BackendAPIURL != nil && s.APIKey == nil { + return fmt.Errorf("backend_api_url [%q] specified, but no API Key provided", *s.BackendAPIURL) } return nil @@ -404,18 +404,18 @@ func init() { outputs.Add("sensu", func() telegraf.Output { // Default configuration values - // make a string from the defaultUrl const - agentApiUrl := defaultUrl + // make a string from the defaultURL const + agentAPIURL := defaultURL return &Sensu{ - AgentApiUrl: &agentApiUrl, + AgentAPIURL: &agentAPIURL, Timeout: config.Duration(defaultClientTimeout), ContentEncoding: "identity", } }) } -func (s *Sensu) encodeToJson(metricPoints []*OutputMetric) ([]byte, error) { +func (s *Sensu) encodeToJSON(metricPoints []*OutputMetric) ([]byte, error) { timestamp := time.Now().Unix() check, err := s.getCheck(metricPoints) @@ -439,7 +439,7 @@ func (s *Sensu) encodeToJson(metricPoints []*OutputMetric) ([]byte, error) { // Constructs the entity payload // Throws when no entity name is provided and fails resolve to hostname func (s *Sensu) setEntity() error { - if s.BackendApiUrl != nil { + if s.BackendAPIURL != nil { var entityName string if s.Entity != nil && s.Entity.Name != nil { entityName = *s.Entity.Name diff --git a/plugins/outputs/sensu/sensu_test.go b/plugins/outputs/sensu/sensu_test.go index 4184a9976fa89..249775727a481 100644 --- a/plugins/outputs/sensu/sensu_test.go +++ b/plugins/outputs/sensu/sensu_test.go @@ -17,58 +17,58 @@ import ( ) func TestResolveEventEndpointUrl(t *testing.T) { - agentApiUrl := "http://127.0.0.1:3031" - backendApiUrl := "http://127.0.0.1:8080" + agentAPIURL := "http://127.0.0.1:3031" + backendAPIURL := "http://127.0.0.1:8080" entityNamespace := "test-namespace" emptyString := "" tests := []struct { name string plugin *Sensu - expectedEndpointUrl string + expectedEndpointURL string }{ { name: "agent event endpoint", plugin: &Sensu{ - AgentApiUrl: &agentApiUrl, + AgentAPIURL: &agentAPIURL, Log: testutil.Logger{}, }, - expectedEndpointUrl: "http://127.0.0.1:3031/events", + expectedEndpointURL: "http://127.0.0.1:3031/events", }, { name: "backend event endpoint with default namespace", plugin: &Sensu{ - AgentApiUrl: &agentApiUrl, - BackendApiUrl: &backendApiUrl, + AgentAPIURL: &agentAPIURL, + BackendAPIURL: &backendAPIURL, Log: testutil.Logger{}, }, - expectedEndpointUrl: "http://127.0.0.1:8080/api/core/v2/namespaces/default/events", + expectedEndpointURL: "http://127.0.0.1:8080/api/core/v2/namespaces/default/events", }, { name: "backend event endpoint with namespace declared", plugin: &Sensu{ - AgentApiUrl: &agentApiUrl, - BackendApiUrl: &backendApiUrl, + AgentAPIURL: &agentAPIURL, + BackendAPIURL: &backendAPIURL, Entity: &SensuEntity{ Namespace: &entityNamespace, }, Log: testutil.Logger{}, }, - expectedEndpointUrl: "http://127.0.0.1:8080/api/core/v2/namespaces/test-namespace/events", + expectedEndpointURL: "http://127.0.0.1:8080/api/core/v2/namespaces/test-namespace/events", }, { - name: "agent event endpoint due to empty AgentApiUrl", + name: "agent event endpoint due to empty AgentAPIURL", plugin: &Sensu{ - AgentApiUrl: &emptyString, + AgentAPIURL: &emptyString, Log: testutil.Logger{}, }, - expectedEndpointUrl: "http://127.0.0.1:3031/events", + expectedEndpointURL: "http://127.0.0.1:3031/events", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.plugin.setEndpointUrl() + err := tt.plugin.setEndpointURL() require.Equal(t, err, error(nil)) - require.Equal(t, tt.expectedEndpointUrl, tt.plugin.EndpointUrl) + require.Equal(t, tt.expectedEndpointURL, tt.plugin.EndpointURL) }) } } @@ -77,23 +77,23 @@ func TestConnectAndWrite(t *testing.T) { ts := httptest.NewServer(http.NotFoundHandler()) defer ts.Close() - testUrl := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) - testApiKey := "a0b1c2d3-e4f5-g6h7-i8j9-k0l1m2n3o4p5" + testURL := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) + testAPIKey := "a0b1c2d3-e4f5-g6h7-i8j9-k0l1m2n3o4p5" testCheck := "telegraf" testEntity := "entity1" testNamespace := "default" testHandler := "influxdb" testTagName := "myTagName" testTagValue := "myTagValue" - expectedAuthHeader := fmt.Sprintf("Key %s", testApiKey) - expectedUrl := fmt.Sprintf("/api/core/v2/namespaces/%s/events", testNamespace) + expectedAuthHeader := fmt.Sprintf("Key %s", testAPIKey) + expectedURL := fmt.Sprintf("/api/core/v2/namespaces/%s/events", testNamespace) expectedPointName := "cpu" expectedPointValue := float64(42) plugin := &Sensu{ - AgentApiUrl: nil, - BackendApiUrl: &testUrl, - ApiKey: &testApiKey, + AgentAPIURL: nil, + BackendAPIURL: &testURL, + APIKey: &testAPIKey, Check: &SensuCheck{ Name: &testCheck, }, @@ -115,8 +115,8 @@ func TestConnectAndWrite(t *testing.T) { t.Run("write", func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, expectedUrl, r.URL.String()) - require.Equal(t, expectedAuthHeader, (r.Header.Get("Authorization"))) + require.Equal(t, expectedURL, r.URL.String()) + require.Equal(t, expectedAuthHeader, r.Header.Get("Authorization")) // let's make sure what we received is a valid Sensu event that contains all of the expected data body, err := ioutil.ReadAll(r.Body) require.NoError(t, err) diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index 3bd38614b985e..4d561a27b5007 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -71,11 +71,11 @@ var sampleConfig = ` // Connect initiates the primary connection to the GCP project. func (s *Stackdriver) Connect() error { if s.Project == "" { - return fmt.Errorf("Project is a required field for stackdriver output") + return fmt.Errorf("project is a required field for stackdriver output") } if s.Namespace == "" { - return fmt.Errorf("Namespace is a required field for stackdriver output") + return fmt.Errorf("namespace is a required field for stackdriver output") } if s.ResourceType == "" { @@ -300,7 +300,7 @@ func getStackdriverTypedValue(value interface{}) (*monitoringpb.TypedValue, erro case int64: return &monitoringpb.TypedValue{ Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: int64(v), + Int64Value: v, }, }, nil case float64: @@ -312,7 +312,7 @@ func getStackdriverTypedValue(value interface{}) (*monitoringpb.TypedValue, erro case bool: return &monitoringpb.TypedValue{ Value: &monitoringpb.TypedValue_BoolValue{ - BoolValue: bool(v), + BoolValue: v, }, }, nil case string: diff --git a/plugins/outputs/timestream/timestream_test.go b/plugins/outputs/timestream/timestream_test.go index 58984c50b8ad2..67cdb4495c1d8 100644 --- a/plugins/outputs/timestream/timestream_test.go +++ b/plugins/outputs/timestream/timestream_test.go @@ -635,8 +635,8 @@ func TestTransformMetricsUnsupportedFieldsAreSkipped(t *testing.T) { func comparisonTest(t *testing.T, mappingMode string, telegrafMetrics []telegraf.Metric, - timestreamRecords []*timestreamwrite.WriteRecordsInput) { - + timestreamRecords []*timestreamwrite.WriteRecordsInput, +) { var plugin ts.Timestream switch mappingMode { case ts.MappingModeSingleTable: @@ -668,8 +668,8 @@ func comparisonTest(t *testing.T, func arrayContains( array []*timestreamwrite.WriteRecordsInput, - element *timestreamwrite.WriteRecordsInput) bool { - + element *timestreamwrite.WriteRecordsInput, +) bool { sortWriteInputForComparison(*element) for _, a := range array { diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index f793d62b89a80..3ad4e803b9f6a 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -316,7 +316,7 @@ func buildValue(v interface{}, name string, w *Wavefront) (float64, error) { for prefix, mappings := range w.StringToNumber { if strings.HasPrefix(name, prefix) { for _, mapping := range mappings { - val, hasVal := mapping[string(p)] + val, hasVal := mapping[p] if hasVal { return val, nil } diff --git a/plugins/parsers/influx/machine_test.go b/plugins/parsers/influx/machine_test.go index 735b5b9114ddd..e8e0357fdb33f 100644 --- a/plugins/parsers/influx/machine_test.go +++ b/plugins/parsers/influx/machine_test.go @@ -2152,7 +2152,7 @@ func TestStreamMachine(t *testing.T) { for _, tt := range tests { tc = append(tc, testcase{ name: tt.name, - input: bytes.NewBuffer([]byte(tt.input)), + input: bytes.NewBuffer(tt.input), results: tt.results, err: tt.err, }) @@ -2191,7 +2191,7 @@ func TestStreamMachinePosition(t *testing.T) { for _, tt := range positionTests { tc = append(tc, testcase{ name: tt.name, - input: bytes.NewBuffer([]byte(tt.input)), + input: bytes.NewBuffer(tt.input), lineno: tt.lineno, column: tt.column, }) diff --git a/plugins/parsers/nagios/parser.go b/plugins/parsers/nagios/parser.go index e4058852bf2e2..b347195ab3c37 100644 --- a/plugins/parsers/nagios/parser.go +++ b/plugins/parsers/nagios/parser.go @@ -194,7 +194,7 @@ func parsePerfData(perfdatas string, timestamp time.Time) ([]telegraf.Metric, er fieldName := strings.Trim(perf[1], "'") tags := map[string]string{"perfdata": fieldName} if perf[3] != "" { - str := string(perf[3]) + str := perf[3] if str != "" { tags["unit"] = str } @@ -202,10 +202,10 @@ func parsePerfData(perfdatas string, timestamp time.Time) ([]telegraf.Metric, er fields := make(map[string]interface{}) if perf[2] == "U" { - return nil, errors.New("Value undetermined") + return nil, errors.New("value undetermined") } - f, err := strconv.ParseFloat(string(perf[2]), 64) + f, err := strconv.ParseFloat(perf[2], 64) if err == nil { fields["value"] = f } @@ -264,14 +264,14 @@ const ( MinFloat64 = 4.940656458412465441765687928682213723651e-324 // 1 / 2**(1023 - 1 + 52) ) -var ErrBadThresholdFormat = errors.New("Bad threshold format") +var ErrBadThresholdFormat = errors.New("bad threshold format") // Handles all cases from https://nagios-plugins.org/doc/guidelines.html#THRESHOLDFORMAT func parseThreshold(threshold string) (min float64, max float64, err error) { thresh := strings.Split(threshold, ":") switch len(thresh) { case 1: - max, err = strconv.ParseFloat(string(thresh[0]), 64) + max, err = strconv.ParseFloat(thresh[0], 64) if err != nil { return 0, 0, ErrBadThresholdFormat } @@ -281,7 +281,7 @@ func parseThreshold(threshold string) (min float64, max float64, err error) { if thresh[0] == "~" { min = MinFloat64 } else { - min, err = strconv.ParseFloat(string(thresh[0]), 64) + min, err = strconv.ParseFloat(thresh[0], 64) if err != nil { min = 0 } @@ -290,7 +290,7 @@ func parseThreshold(threshold string) (min float64, max float64, err error) { if thresh[1] == "" { max = MaxFloat64 } else { - max, err = strconv.ParseFloat(string(thresh[1]), 64) + max, err = strconv.ParseFloat(thresh[1], 64) if err != nil { return 0, 0, ErrBadThresholdFormat } diff --git a/plugins/parsers/value/parser.go b/plugins/parsers/value/parser.go index 95c87a2eae982..d4a9046e020a9 100644 --- a/plugins/parsers/value/parser.go +++ b/plugins/parsers/value/parser.go @@ -28,7 +28,7 @@ func (v *ValueParser) Parse(buf []byte) ([]telegraf.Metric, error) { if len(values) < 1 { return []telegraf.Metric{}, nil } - vStr = string(values[len(values)-1]) + vStr = values[len(values)-1] } var value interface{} @@ -65,7 +65,7 @@ func (v *ValueParser) ParseLine(line string) (telegraf.Metric, error) { } if len(metrics) < 1 { - return nil, fmt.Errorf("Can not parse the line: %s, for data format: value", line) + return nil, fmt.Errorf("can not parse the line: %s, for data format: value", line) } return metrics[0], nil diff --git a/plugins/serializers/splunkmetric/splunkmetric_test.go b/plugins/serializers/splunkmetric/splunkmetric_test.go index c00bcc7798aac..f00d5d8da2176 100644 --- a/plugins/serializers/splunkmetric/splunkmetric_test.go +++ b/plugins/serializers/splunkmetric/splunkmetric_test.go @@ -33,7 +33,7 @@ func TestSerializeMetricFloat(t *testing.T) { buf, err = s.Serialize(m) assert.NoError(t, err) expS := `{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":1529875740.819}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMetricFloatHec(t *testing.T) { @@ -53,7 +53,7 @@ func TestSerializeMetricFloatHec(t *testing.T) { buf, err = s.Serialize(m) assert.NoError(t, err) expS := `{"time":1529875740.819,"fields":{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMetricInt(t *testing.T) { @@ -73,7 +73,7 @@ func TestSerializeMetricInt(t *testing.T) { assert.NoError(t, err) expS := `{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMetricIntHec(t *testing.T) { @@ -93,7 +93,7 @@ func TestSerializeMetricIntHec(t *testing.T) { assert.NoError(t, err) expS := `{"time":0,"fields":{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMetricBool(t *testing.T) { @@ -113,7 +113,7 @@ func TestSerializeMetricBool(t *testing.T) { assert.NoError(t, err) expS := `{"_value":1,"container-name":"telegraf-test","metric_name":"docker.oomkiller","time":0}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMetricBoolHec(t *testing.T) { @@ -133,7 +133,7 @@ func TestSerializeMetricBoolHec(t *testing.T) { assert.NoError(t, err) expS := `{"time":0,"fields":{"_value":0,"container-name":"telegraf-test","metric_name":"docker.oomkiller"}}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMetricString(t *testing.T) { @@ -154,7 +154,7 @@ func TestSerializeMetricString(t *testing.T) { assert.NoError(t, err) expS := `{"_value":5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) assert.NoError(t, err) } @@ -186,7 +186,7 @@ func TestSerializeBatch(t *testing.T) { assert.NoError(t, err) expS := `{"_value":42,"metric_name":"cpu.value","time":0}{"_value":92,"metric_name":"cpu.value","time":0}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMulti(t *testing.T) { @@ -208,7 +208,7 @@ func TestSerializeMulti(t *testing.T) { assert.NoError(t, err) expS := `{"metric_name:cpu.system":8,"metric_name:cpu.user":42,"time":0}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeBatchHec(t *testing.T) { @@ -239,7 +239,7 @@ func TestSerializeBatchHec(t *testing.T) { assert.NoError(t, err) expS := `{"time":0,"fields":{"_value":42,"metric_name":"cpu.value"}}{"time":0,"fields":{"_value":92,"metric_name":"cpu.value"}}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } func TestSerializeMultiHec(t *testing.T) { @@ -261,5 +261,5 @@ func TestSerializeMultiHec(t *testing.T) { assert.NoError(t, err) expS := `{"time":0,"fields":{"metric_name:cpu.system":8,"metric_name:cpu.usage":42}}` - assert.Equal(t, string(expS), string(buf)) + assert.Equal(t, expS, string(buf)) } From 29ac77906d72af45b7c609fa1200aa44fb1c10d7 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 24 Mar 2021 10:59:05 -0500 Subject: [PATCH 330/761] Change duplicate kubernetes import and update protobuf to v1.5.1 (#9039) * Change import and update protobuf * fix check-deps * go mod tidy * keep imports consistent --- docs/LICENSE_OF_DEPENDENCIES.md | 2 -- go.mod | 6 ++---- go.sum | 15 ++++++++------- plugins/inputs/kube_inventory/kube_state.go | 2 +- plugins/inputs/prometheus/kubernetes.go | 4 ++-- plugins/inputs/prometheus/kubernetes_test.go | 4 ++-- plugins/inputs/prometheus/prometheus.go | 4 ++-- plugins/inputs/prometheus/prometheus_test.go | 2 +- 8 files changed, 18 insertions(+), 21 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 4ca2e10c52575..cbf0d5f290f78 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -115,7 +115,6 @@ following works: - github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE) - github.com/klauspost/compress [BSD 3-Clause Clear License](https://github.com/klauspost/compress/blob/master/LICENSE) - github.com/konsorten/go-windows-terminal-sequences [MIT License](https://github.com/konsorten/go-windows-terminal-sequences/blob/master/LICENSE) -- github.com/kubernetes/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE) - github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) - github.com/mattn/go-isatty [MIT License](https://github.com/mattn/go-isatty/blob/master/LICENSE) @@ -189,7 +188,6 @@ following works: - golang.org/x/sys [BSD 3-Clause Clear License](https://github.com/golang/sys/blob/master/LICENSE) - golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE) - golang.org/x/time [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE) -- golang.org/x/xerrors [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE) - golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - golang.zx2c4.com/wireguard/wgctrl [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 54eef23f3daf2..34d3d9a56419a 100644 --- a/go.mod +++ b/go.mod @@ -66,9 +66,9 @@ require ( github.com/gofrs/uuid v2.1.0+incompatible github.com/gogo/protobuf v1.3.1 github.com/golang/geo v0.0.0-20190916061304-5b978397cfec - github.com/golang/protobuf v1.4.3 + github.com/golang/protobuf v1.5.1 github.com/golang/snappy v0.0.1 - github.com/google/go-cmp v0.5.4 + github.com/google/go-cmp v0.5.5 github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.13 github.com/gorilla/mux v1.6.2 @@ -92,7 +92,6 @@ require ( github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee github.com/kylelemons/godebug v1.1.0 // indirect github.com/lib/pq v1.3.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 @@ -162,7 +161,6 @@ require ( k8s.io/api v0.20.4 k8s.io/apimachinery v0.20.4 k8s.io/client-go v0.20.4 - k8s.io/klog v1.0.0 // indirect modernc.org/sqlite v1.7.4 ) diff --git a/go.sum b/go.sum index 4c5884da2485b..4638a0892eb9c 100644 --- a/go.sum +++ b/go.sum @@ -336,8 +336,10 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -356,8 +358,9 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= @@ -503,8 +506,6 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee h1:MB75LRhfeLER2RF7neSVpYuX/lL8aPi3yPtv5vdOJmk= -github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee/go.mod h1:Pe/YBTPc3vqoMkbuIWPH8CF9ehINdvNyS0dP3J6HC0s= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= @@ -1030,8 +1031,10 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -1109,8 +1112,6 @@ k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp k8s.io/client-go v0.20.4 h1:85crgh1IotNkLpKYKZHVNI1JT86nr/iDCvq2iWKsql4= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index 6ea5de3525220..3aec920886f54 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -10,7 +10,7 @@ import ( "sync" "time" - "github.com/kubernetes/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/api/resource" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 941dada56cd5c..f3fe461450fd0 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -16,10 +16,10 @@ import ( "time" "github.com/ghodss/yaml" - "github.com/kubernetes/apimachinery/pkg/fields" - "github.com/kubernetes/apimachinery/pkg/labels" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 662af9fc46fb9..72f995c3112c9 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -4,9 +4,9 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/kubernetes/apimachinery/pkg/fields" - "github.com/kubernetes/apimachinery/pkg/labels" "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index c3ceb01c73ae8..f85ec44142019 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -18,8 +18,8 @@ import ( "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" parser_v2 "github.com/influxdata/telegraf/plugins/parsers/prometheus" - "github.com/kubernetes/apimachinery/pkg/fields" - "github.com/kubernetes/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" ) const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1` diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index 230934d0e5e67..f5b0d19e41a87 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -12,9 +12,9 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/kubernetes/apimachinery/pkg/fields" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/fields" ) const sampleTextFormat = `# HELP go_gc_duration_seconds A summary of the GC invocation durations. From 8564d928dfb7d0ed86b8f085984dc8be440c0688 Mon Sep 17 00:00:00 2001 From: Jeff Ashton Date: Wed, 24 Mar 2021 14:29:22 -0400 Subject: [PATCH 331/761] Simplifying the kinesis output tests (#8970) --- plugins/outputs/kinesis/kinesis_test.go | 184 ++++++++++-------------- 1 file changed, 74 insertions(+), 110 deletions(-) diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 24de7413c1718..22b8e83e48e24 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" @@ -16,6 +17,10 @@ import ( "github.com/stretchr/testify/require" ) +const testPartitionKey = "partitionKey" +const testShardID = "shardId-000000000003" +const testSequenceNumber = "49543463076570308322303623326179887152428262250726293588" +const testStreamName = "streamName" const zero int64 = 0 func TestPartitionKey(t *testing.T) { @@ -105,14 +110,9 @@ func TestPartitionKey(t *testing.T) { func TestWriteKinesis_WhenSuccess(t *testing.T) { assert := assert.New(t) - partitionKey := "partitionKey" - shard := "shard" - sequenceNumber := "sequenceNumber" - streamName := "stream" - records := []*kinesis.PutRecordsRequestEntry{ { - PartitionKey: &partitionKey, + PartitionKey: aws.String(testPartitionKey), Data: []byte{0x65}, }, } @@ -122,26 +122,24 @@ func TestWriteKinesis_WhenSuccess(t *testing.T) { 0, []*kinesis.PutRecordsResultEntry{ { - ErrorCode: nil, - ErrorMessage: nil, - SequenceNumber: &sequenceNumber, - ShardId: &shard, + SequenceNumber: aws.String(testSequenceNumber), + ShardId: aws.String(testShardID), }, }, ) k := KinesisOutput{ Log: testutil.Logger{}, - StreamName: streamName, + StreamName: testStreamName, svc: svc, } elapsed := k.writeKinesis(records) assert.GreaterOrEqual(elapsed.Nanoseconds(), zero) - svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { - StreamName: &streamName, + StreamName: aws.String(testStreamName), Records: records, }, }) @@ -150,14 +148,9 @@ func TestWriteKinesis_WhenSuccess(t *testing.T) { func TestWriteKinesis_WhenRecordErrors(t *testing.T) { assert := assert.New(t) - errorCode := "InternalFailure" - errorMessage := "Internal Service Failure" - partitionKey := "partitionKey" - streamName := "stream" - records := []*kinesis.PutRecordsRequestEntry{ { - PartitionKey: &partitionKey, + PartitionKey: aws.String(testPartitionKey), Data: []byte{0x66}, }, } @@ -167,26 +160,24 @@ func TestWriteKinesis_WhenRecordErrors(t *testing.T) { 1, []*kinesis.PutRecordsResultEntry{ { - ErrorCode: &errorCode, - ErrorMessage: &errorMessage, - SequenceNumber: nil, - ShardId: nil, + ErrorCode: aws.String("InternalFailure"), + ErrorMessage: aws.String("Internal Service Failure"), }, }, ) k := KinesisOutput{ Log: testutil.Logger{}, - StreamName: streamName, + StreamName: testStreamName, svc: svc, } elapsed := k.writeKinesis(records) assert.GreaterOrEqual(elapsed.Nanoseconds(), zero) - svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { - StreamName: &streamName, + StreamName: aws.String(testStreamName), Records: records, }, }) @@ -195,12 +186,9 @@ func TestWriteKinesis_WhenRecordErrors(t *testing.T) { func TestWriteKinesis_WhenServiceError(t *testing.T) { assert := assert.New(t) - partitionKey := "partitionKey" - streamName := "stream" - records := []*kinesis.PutRecordsRequestEntry{ { - PartitionKey: &partitionKey, + PartitionKey: aws.String(testPartitionKey), Data: []byte{}, }, } @@ -212,16 +200,16 @@ func TestWriteKinesis_WhenServiceError(t *testing.T) { k := KinesisOutput{ Log: testutil.Logger{}, - StreamName: streamName, + StreamName: testStreamName, svc: svc, } elapsed := k.writeKinesis(records) assert.GreaterOrEqual(elapsed.Nanoseconds(), zero) - svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { - StreamName: &streamName, + StreamName: aws.String(testStreamName), Records: records, }, }) @@ -246,14 +234,12 @@ func TestWrite_NoMetrics(t *testing.T) { err := k.Write([]telegraf.Metric{}) assert.Nil(err, "Should not return error") - svc.AssertRequests(assert, []*kinesis.PutRecordsInput{}) + svc.AssertRequests(t, []*kinesis.PutRecordsInput{}) } func TestWrite_SingleMetric(t *testing.T) { assert := assert.New(t) serializer := influx.NewSerializer() - partitionKey := "partitionKey" - streamName := "stream" svc := &mockKinesisPutRecords{} svc.SetupGenericResponse(1, 0) @@ -262,9 +248,9 @@ func TestWrite_SingleMetric(t *testing.T) { Log: testutil.Logger{}, Partition: &Partition{ Method: "static", - Key: partitionKey, + Key: testPartitionKey, }, - StreamName: streamName, + StreamName: testStreamName, serializer: serializer, svc: svc, } @@ -273,12 +259,12 @@ func TestWrite_SingleMetric(t *testing.T) { err := k.Write([]telegraf.Metric{metric}) assert.Nil(err, "Should not return error") - svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { - StreamName: &streamName, + StreamName: aws.String(testStreamName), Records: []*kinesis.PutRecordsRequestEntry{ { - PartitionKey: &partitionKey, + PartitionKey: aws.String(testPartitionKey), Data: metricData, }, }, @@ -289,8 +275,6 @@ func TestWrite_SingleMetric(t *testing.T) { func TestWrite_MultipleMetrics_SinglePartialRequest(t *testing.T) { assert := assert.New(t) serializer := influx.NewSerializer() - partitionKey := "partitionKey" - streamName := "stream" svc := &mockKinesisPutRecords{} svc.SetupGenericResponse(3, 0) @@ -299,9 +283,9 @@ func TestWrite_MultipleMetrics_SinglePartialRequest(t *testing.T) { Log: testutil.Logger{}, Partition: &Partition{ Method: "static", - Key: partitionKey, + Key: testPartitionKey, }, - StreamName: streamName, + StreamName: testStreamName, serializer: serializer, svc: svc, } @@ -310,12 +294,11 @@ func TestWrite_MultipleMetrics_SinglePartialRequest(t *testing.T) { err := k.Write(metrics) assert.Nil(err, "Should not return error") - svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { - StreamName: &streamName, + StreamName: aws.String(testStreamName), Records: createPutRecordsRequestEntries( metricsData, - &partitionKey, ), }, }) @@ -324,8 +307,6 @@ func TestWrite_MultipleMetrics_SinglePartialRequest(t *testing.T) { func TestWrite_MultipleMetrics_SingleFullRequest(t *testing.T) { assert := assert.New(t) serializer := influx.NewSerializer() - partitionKey := "partitionKey" - streamName := "stream" svc := &mockKinesisPutRecords{} svc.SetupGenericResponse(maxRecordsPerRequest, 0) @@ -334,9 +315,9 @@ func TestWrite_MultipleMetrics_SingleFullRequest(t *testing.T) { Log: testutil.Logger{}, Partition: &Partition{ Method: "static", - Key: partitionKey, + Key: testPartitionKey, }, - StreamName: streamName, + StreamName: testStreamName, serializer: serializer, svc: svc, } @@ -345,12 +326,11 @@ func TestWrite_MultipleMetrics_SingleFullRequest(t *testing.T) { err := k.Write(metrics) assert.Nil(err, "Should not return error") - svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { - StreamName: &streamName, + StreamName: aws.String(testStreamName), Records: createPutRecordsRequestEntries( metricsData, - &partitionKey, ), }, }) @@ -359,8 +339,6 @@ func TestWrite_MultipleMetrics_SingleFullRequest(t *testing.T) { func TestWrite_MultipleMetrics_MultipleRequests(t *testing.T) { assert := assert.New(t) serializer := influx.NewSerializer() - partitionKey := "partitionKey" - streamName := "stream" svc := &mockKinesisPutRecords{} svc.SetupGenericResponse(maxRecordsPerRequest, 0) @@ -370,9 +348,9 @@ func TestWrite_MultipleMetrics_MultipleRequests(t *testing.T) { Log: testutil.Logger{}, Partition: &Partition{ Method: "static", - Key: partitionKey, + Key: testPartitionKey, }, - StreamName: streamName, + StreamName: testStreamName, serializer: serializer, svc: svc, } @@ -381,19 +359,17 @@ func TestWrite_MultipleMetrics_MultipleRequests(t *testing.T) { err := k.Write(metrics) assert.Nil(err, "Should not return error") - svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { - StreamName: &streamName, + StreamName: aws.String(testStreamName), Records: createPutRecordsRequestEntries( metricsData[0:maxRecordsPerRequest], - &partitionKey, ), }, { - StreamName: &streamName, + StreamName: aws.String(testStreamName), Records: createPutRecordsRequestEntries( metricsData[maxRecordsPerRequest:], - &partitionKey, ), }, }) @@ -402,8 +378,6 @@ func TestWrite_MultipleMetrics_MultipleRequests(t *testing.T) { func TestWrite_MultipleMetrics_MultipleFullRequests(t *testing.T) { assert := assert.New(t) serializer := influx.NewSerializer() - partitionKey := "partitionKey" - streamName := "stream" svc := &mockKinesisPutRecords{} svc.SetupGenericResponse(maxRecordsPerRequest, 0) @@ -413,9 +387,9 @@ func TestWrite_MultipleMetrics_MultipleFullRequests(t *testing.T) { Log: testutil.Logger{}, Partition: &Partition{ Method: "static", - Key: partitionKey, + Key: testPartitionKey, }, - StreamName: streamName, + StreamName: testStreamName, serializer: serializer, svc: svc, } @@ -424,19 +398,17 @@ func TestWrite_MultipleMetrics_MultipleFullRequests(t *testing.T) { err := k.Write(metrics) assert.Nil(err, "Should not return error") - svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { - StreamName: &streamName, + StreamName: aws.String(testStreamName), Records: createPutRecordsRequestEntries( metricsData[0:maxRecordsPerRequest], - &partitionKey, ), }, { - StreamName: &streamName, + StreamName: aws.String(testStreamName), Records: createPutRecordsRequestEntries( metricsData[maxRecordsPerRequest:], - &partitionKey, ), }, }) @@ -445,8 +417,6 @@ func TestWrite_MultipleMetrics_MultipleFullRequests(t *testing.T) { func TestWrite_SerializerError(t *testing.T) { assert := assert.New(t) serializer := influx.NewSerializer() - partitionKey := "partitionKey" - streamName := "stream" svc := &mockKinesisPutRecords{} svc.SetupGenericResponse(2, 0) @@ -455,9 +425,9 @@ func TestWrite_SerializerError(t *testing.T) { Log: testutil.Logger{}, Partition: &Partition{ Method: "static", - Key: partitionKey, + Key: testPartitionKey, }, - StreamName: streamName, + StreamName: testStreamName, serializer: serializer, svc: svc, } @@ -476,16 +446,16 @@ func TestWrite_SerializerError(t *testing.T) { assert.Nil(err, "Should not return error") // remaining valid metrics should still get written - svc.AssertRequests(assert, []*kinesis.PutRecordsInput{ + svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { - StreamName: &streamName, + StreamName: aws.String(testStreamName), Records: []*kinesis.PutRecordsRequestEntry{ { - PartitionKey: &partitionKey, + PartitionKey: aws.String(testPartitionKey), Data: metric1Data, }, { - PartitionKey: &partitionKey, + PartitionKey: aws.String(testPartitionKey), Data: metric2Data, }, }, @@ -512,7 +482,7 @@ func (m *mockKinesisPutRecords) SetupResponse( m.responses = append(m.responses, &mockKinesisPutRecordsResponse{ Err: nil, Output: &kinesis.PutRecordsOutput{ - FailedRecordCount: &failedRecordCount, + FailedRecordCount: aws.Int64(failedRecordCount), Records: records, }, }) @@ -522,24 +492,19 @@ func (m *mockKinesisPutRecords) SetupGenericResponse( successfulRecordCount uint32, failedRecordCount uint32, ) { - errorCode := "InternalFailure" - errorMessage := "Internal Service Failure" - shard := "shardId-000000000003" - records := []*kinesis.PutRecordsResultEntry{} for i := uint32(0); i < successfulRecordCount; i++ { - sequenceNumber := fmt.Sprintf("%d", i) records = append(records, &kinesis.PutRecordsResultEntry{ - SequenceNumber: &sequenceNumber, - ShardId: &shard, + SequenceNumber: aws.String(testSequenceNumber), + ShardId: aws.String(testShardID), }) } for i := uint32(0); i < failedRecordCount; i++ { records = append(records, &kinesis.PutRecordsResultEntry{ - ErrorCode: &errorCode, - ErrorMessage: &errorMessage, + ErrorCode: aws.String("InternalFailure"), + ErrorMessage: aws.String("Internal Service Failure"), }) } @@ -566,49 +531,49 @@ func (m *mockKinesisPutRecords) PutRecords(input *kinesis.PutRecordsInput) (*kin } func (m *mockKinesisPutRecords) AssertRequests( - assert *assert.Assertions, + t *testing.T, expected []*kinesis.PutRecordsInput, ) { - assert.Equal( + require.Equalf(t, len(expected), len(m.requests), - fmt.Sprintf("Expected %v requests", len(expected)), + "Expected %v requests", len(expected), ) for i, expectedInput := range expected { actualInput := m.requests[i] - assert.Equal( + require.Equalf(t, expectedInput.StreamName, actualInput.StreamName, - fmt.Sprintf("Expected request %v to have correct StreamName", i), + "Expected request %v to have correct StreamName", i, ) - assert.Equal( + require.Equalf(t, len(expectedInput.Records), len(actualInput.Records), - fmt.Sprintf("Expected request %v to have %v Records", i, len(expectedInput.Records)), + "Expected request %v to have %v Records", i, len(expectedInput.Records), ) for r, expectedRecord := range expectedInput.Records { actualRecord := actualInput.Records[r] - assert.Equal( - &expectedRecord.PartitionKey, - &actualRecord.PartitionKey, - fmt.Sprintf("Expected (request %v, record %v) to have correct PartitionKey", i, r), + require.Equalf(t, + expectedRecord.PartitionKey, + actualRecord.PartitionKey, + "Expected (request %v, record %v) to have correct PartitionKey", i, r, ) - assert.Equal( - &expectedRecord.ExplicitHashKey, - &actualRecord.ExplicitHashKey, - fmt.Sprintf("Expected (request %v, record %v) to have correct ExplicitHashKey", i, r), + require.Equalf(t, + expectedRecord.ExplicitHashKey, + actualRecord.ExplicitHashKey, + "Expected (request %v, record %v) to have correct ExplicitHashKey", i, r, ) - assert.Equal( + require.Equalf(t, expectedRecord.Data, actualRecord.Data, - fmt.Sprintf("Expected (request %v, record %v) to have correct Data", i, r), + "Expected (request %v, record %v) to have correct Data", i, r, ) } } @@ -647,14 +612,13 @@ func createTestMetrics( func createPutRecordsRequestEntries( metricsData [][]byte, - partitionKey *string, ) []*kinesis.PutRecordsRequestEntry { count := len(metricsData) records := make([]*kinesis.PutRecordsRequestEntry, count) for i := 0; i < count; i++ { records[i] = &kinesis.PutRecordsRequestEntry{ - PartitionKey: partitionKey, + PartitionKey: aws.String(testPartitionKey), Data: metricsData[i], } } From 991efd5e12cffd58a5077dea321974a3f47175b2 Mon Sep 17 00:00:00 2001 From: Nicolas Filotto Date: Wed, 24 Mar 2021 19:51:15 +0100 Subject: [PATCH 332/761] Add the math module to the Starlark Processor (#9042) --- plugins/processors/starlark/README.md | 1 + plugins/processors/starlark/starlark.go | 5 +++++ plugins/processors/starlark/testdata/math.star | 14 ++++++++++++++ 3 files changed, 20 insertions(+) create mode 100644 plugins/processors/starlark/testdata/math.star diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index 1f5adbec1f472..62b5b85e766d4 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -222,6 +222,7 @@ def apply(metric): - [drop fields with unexpected type](/plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star) - Drop fields containing unexpected value types. - [iops](/plugins/processors/starlark/testdata/iops.star) - obtain IOPS (to aggregate, to produce max_iops) - [json](/plugins/processors/starlark/testdata/json.star) - an example of processing JSON from a field in a metric +- [math](/plugins/processors/starlark/testdata/math.star) - Use a math function to compute the value of a field. [The list of the supported math functions and constants](https://pkg.go.dev/go.starlark.net/lib/math). - [number logic](/plugins/processors/starlark/testdata/number_logic.star) - transform a numerical value to another numerical value - [pivot](/plugins/processors/starlark/testdata/pivot.star) - Pivots a key's value to be the key for another key. - [ratio](/plugins/processors/starlark/testdata/ratio.star) - Compute the ratio of two integer fields diff --git a/plugins/processors/starlark/starlark.go b/plugins/processors/starlark/starlark.go index 968908c6589a6..dceee7bfb5f12 100644 --- a/plugins/processors/starlark/starlark.go +++ b/plugins/processors/starlark/starlark.go @@ -7,6 +7,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/processors" + "go.starlark.net/lib/math" "go.starlark.net/lib/time" "go.starlark.net/resolve" "go.starlark.net/starlark" @@ -253,6 +254,10 @@ func loadFunc(module string, logger telegraf.Logger) (starlark.StringDict, error return starlark.StringDict{ "log": LogModule(logger), }, nil + case "math.star": + return starlark.StringDict{ + "math": math.Module, + }, nil case "time.star": return starlark.StringDict{ "time": time.Module, diff --git a/plugins/processors/starlark/testdata/math.star b/plugins/processors/starlark/testdata/math.star new file mode 100644 index 0000000000000..f63669acebf82 --- /dev/null +++ b/plugins/processors/starlark/testdata/math.star @@ -0,0 +1,14 @@ +# Example showing how the math module can be used to compute the value of a field. +# +# Example Input: +# math value=10000i 1465839830100400201 +# +# Example Output: +# math result=4 1465839830100400201 + +load('math.star', 'math') +# loads all the functions and constants defined in the math module + +def apply(metric): + metric.fields["result"] = math.log(metric.fields.pop('value'), 10) + return metric From 5423e7a04983dd50cdea81e57d54c0f86f6cb723 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Wed, 24 Mar 2021 13:42:58 -0700 Subject: [PATCH 333/761] update: inputs.sqlserver support version in readme (#9040) --- plugins/inputs/sqlserver/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index e69a2d41f9e21..ee2dc52c369ca 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -1,12 +1,12 @@ # SQL Server Input Plugin -The `sqlserver` plugin provides metrics for your SQL Server instance. It -currently works with SQL Server 2008 SP3 and newer. Recorded metrics are +The `sqlserver` plugin provides metrics for your SQL Server instance. Recorded metrics are lightweight and use Dynamic Management Views supplied by SQL Server. ### The SQL Server plugin supports the following editions/versions of SQL Server - SQL Server - - 2008 SP3 (with CU3) - - SQL Server 2008 R2 SP3 and newer versions + - 2012 or newer (Plugin support aligned with the [official Microsoft SQL Server support](https://docs.microsoft.com/en-us/sql/sql-server/end-of-support/sql-server-end-of-life-overview?view=sql-server-ver15#lifecycle-dates)) + - End-of-life SQL Server versions are not guaranteed to be supported by Telegraf. Any issues with the SQL Server plugin for these EOL versions will + need to be addressed by the community. - Azure SQL Database (Single) - Azure SQL Managed Instance From 7c00fb120737303cfd693c4ca5305393a561c798 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 Mar 2021 15:49:23 -0500 Subject: [PATCH 334/761] Bump github.com/nats-io/nats.go from 1.9.1 to 1.10.0 (#8716) Bumps [github.com/nats-io/nats.go](https://github.com/nats-io/nats.go) from 1.9.1 to 1.10.0. - [Release notes](https://github.com/nats-io/nats.go/releases) - [Commits](https://github.com/nats-io/nats.go/compare/v1.9.1...v1.10.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 34d3d9a56419a..60590a57418b0 100644 --- a/go.mod +++ b/go.mod @@ -102,7 +102,7 @@ require ( github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect github.com/nats-io/nats-server/v2 v2.1.4 - github.com/nats-io/nats.go v1.9.1 + github.com/nats-io/nats.go v1.10.0 github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 github.com/nsqio/go-nsq v1.0.8 github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 diff --git a/go.sum b/go.sum index 4638a0892eb9c..38d78c7d04558 100644 --- a/go.sum +++ b/go.sum @@ -336,6 +336,7 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc= @@ -559,11 +560,13 @@ github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.4 h1:BILRnsJ2Yb/fefiFbBWADpViGF69uh4sxe8poVDQ06g= github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg= -github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.10.0 h1:L8qnKaofSfNFbXg0C5F71LdjPRnmQwSsA4ukmkt1TvY= +github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.4 h1:aEsHIssIk6ETN5m2/MD8Y4B2X7FfXrBAUdkyRvbVYzA= +github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 h1:9YEHXplqlVkOltThchh+RxeODvTb1TBvQ1181aXg3pY= @@ -760,6 +763,7 @@ golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= @@ -1031,6 +1035,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= From d8b28bc60930026668213893ceb68a847413a4a0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 Mar 2021 17:26:02 -0500 Subject: [PATCH 335/761] Bump collectd.org from 0.3.0 to 0.5.0 (#8745) * Bump collectd.org from 0.3.0 to 0.5.0 Bumps [collectd.org](https://github.com/collectd/go-collectd) from 0.3.0 to 0.5.0. - [Release notes](https://github.com/collectd/go-collectd/releases) - [Commits](https://github.com/collectd/go-collectd/compare/v0.3.0...v0.5.0) Signed-off-by: dependabot[bot] * fix license doc Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastian Spaink --- docs/LICENSE_OF_DEPENDENCIES.md | 2 ++ go.mod | 3 ++- go.sum | 42 +++++---------------------------- 3 files changed, 10 insertions(+), 37 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index cbf0d5f290f78..ec9bd75689d0e 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -181,6 +181,8 @@ following works: - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) - go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) - go.starlark.net [BSD 3-Clause "New" or "Revised" License](https://github.com/google/starlark-go/blob/master/LICENSE) +- go.uber.org/atomic [MIT License](https://pkg.go.dev/go.uber.org/atomic?tab=licenses) +- go.uber.org/multierr [MIT License](https://pkg.go.dev/go.uber.org/multierr?tab=licenses) - golang.org/x/crypto [BSD 3-Clause Clear License](https://github.com/golang/crypto/blob/master/LICENSE) - golang.org/x/net [BSD 3-Clause Clear License](https://github.com/golang/net/blob/master/LICENSE) - golang.org/x/oauth2 [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/oauth2/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 60590a57418b0..a928474133f47 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( cloud.google.com/go/bigquery v1.4.0 cloud.google.com/go/pubsub v1.2.0 code.cloudfoundry.org/clock v1.0.0 // indirect - collectd.org v0.3.0 + collectd.org v0.5.0 github.com/Azure/azure-event-hubs-go/v3 v3.2.0 github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect @@ -141,6 +141,7 @@ require ( github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect go.starlark.net v0.0.0-20210312235212-74c10e2c17dc + go.uber.org/multierr v1.6.0 // indirect golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 diff --git a/go.sum b/go.sum index 38d78c7d04558..e98722fc7cb5c 100644 --- a/go.sum +++ b/go.sum @@ -28,8 +28,8 @@ cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= -collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00= -collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +collectd.org v0.5.0 h1:y4uFSAuOmeVhG3GCRa3/oH+ysePfO/+eGJNfd0Qa3d8= +collectd.org v0.5.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc= github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= @@ -266,7 +266,6 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -313,7 +312,6 @@ github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXg github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -335,8 +333,6 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc= @@ -351,9 +347,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -377,7 +370,6 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -679,8 +671,6 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff h1:JcVn27VGCEwd33jyNj+3IqEbOmzAX9f9LILt3SoGPHU= @@ -745,17 +735,19 @@ go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.starlark.net v0.0.0-20210312235212-74c10e2c17dc h1:pVkptfeOTFfx+zXZo7HEHN3d5LmhatBFvHdm/f2QnpY= go.starlark.net v0.0.0-20210312235212-74c10e2c17dc/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -788,8 +780,6 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -800,7 +790,6 @@ golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -824,7 +813,6 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -839,10 +827,6 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -875,7 +859,6 @@ golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -896,9 +879,6 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -925,7 +905,6 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -935,7 +914,6 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190906203814-12febf440ab1/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -959,10 +937,8 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9 h1:sEvmEcJVKBNUvgCUClbUQeHOAa9U0I2Ce1BooMvVCY4= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.zx2c4.com/wireguard v0.0.20200121 h1:vcswa5Q6f+sylDfjqyrVNNrjsFUUbPsgAQTBCAg/Qf8= @@ -997,7 +973,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= @@ -1018,7 +993,6 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -1035,8 +1009,6 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= @@ -1096,7 +1068,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= @@ -1108,7 +1079,6 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.20.4 h1:xZjKidCirayzX6tHONRQyTNDVIR55TYVqgATqo6ZULY= From e4bbcc447dc6bce844914d40ebf61d887b066158 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Thu, 25 Mar 2021 09:20:41 -0400 Subject: [PATCH 336/761] Handle error when initializing the auth object in Azure Monitor output plugin. (#9048) --- plugins/outputs/azure_monitor/azure_monitor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index 2bf1d2899fb9b..c295b553f963a 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -193,7 +193,7 @@ func (a *AzureMonitor) Connect() error { a.auth, err = auth.NewAuthorizerFromEnvironmentWithResource(defaultAuthResource) if err != nil { - return nil + return err } a.Reset() From e96955d1bb47480b938858a3576aa6862ec17e51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patryk=20Ma=C5=82ek?= <69143962+pmalek-sumo@users.noreply.github.com> Date: Thu, 25 Mar 2021 17:18:50 +0100 Subject: [PATCH 337/761] Bump prometheus dependency to v2.21.0 (#8795) * Bump prometheus dependency to v2.21.0 * Make golangci-lint happy --- docs/LICENSE_OF_DEPENDENCIES.md | 10 +- go.mod | 52 +- go.sum | 643 ++++++++++++++++-- .../prometheusremotewrite/parser_test.go | 14 +- .../prometheusremotewrite.go | 51 +- .../prometheusremotewrite_test.go | 6 +- 6 files changed, 645 insertions(+), 131 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index ec9bd75689d0e..df9ddeee6a54f 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -27,6 +27,7 @@ following works: - github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE) - github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE) - github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING) +- github.com/armon/go-metrics [MIT License](https://github.com/armon/go-metrics/blob/master/LICENSE) - github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) - github.com/aws/aws-sdk-go-v2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/config [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/config/LICENSE.txt) @@ -62,6 +63,7 @@ following works: - github.com/eapache/go-xerial-snappy [MIT License](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE) - github.com/eapache/queue [MIT License](https://github.com/eapache/queue/blob/master/LICENSE) - github.com/eclipse/paho.mqtt.golang [Eclipse Public License - v 1.0](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE) +- github.com/fatih/color [MIT License](https://github.com/fatih/color/blob/master/LICENSE.md) - github.com/form3tech-oss/jwt-go [MIT License](https://github.com/form3tech-oss/jwt-go/blob/master/LICENSE) - github.com/ghodss/yaml [MIT License](https://github.com/ghodss/yaml/blob/master/LICENSE) - github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) @@ -94,10 +96,13 @@ following works: - github.com/grpc-ecosystem/grpc-gateway [BSD 3-Clause "New" or "Revised" License](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/LICENSE.txt) - github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) - github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE) -- github.com/hashicorp/consul [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) +- github.com/hashicorp/consul/api [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) - github.com/hashicorp/go-cleanhttp [Mozilla Public License 2.0](https://github.com/hashicorp/go-cleanhttp/blob/master/LICENSE) +- github.com/hashicorp/go-hclog [Mozilla Public License 2.0](https://github.com/hashicorp/go-hclog/LICENSE) +- github.com/hashicorp/go-immutable-radix [Mozilla Public License 2.0](https://github.com/hashicorp/go-immutable-radix/LICENSE) - github.com/hashicorp/go-rootcerts [Mozilla Public License 2.0](https://github.com/hashicorp/go-rootcerts/blob/master/LICENSE) -- github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/LICENSE) +- github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/blob/master/LICENSE) +- github.com/hashicorp/golang-lru [Mozilla Public License 2.0](https://github.com/hashicorp/golang-lru/blob/master/LICENSE) - github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE) - github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE) - github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt) @@ -117,6 +122,7 @@ following works: - github.com/konsorten/go-windows-terminal-sequences [MIT License](https://github.com/konsorten/go-windows-terminal-sequences/blob/master/LICENSE) - github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) +- github.com/mattn/go-colorable [MIT License](https://github.com/mattn/go-colorable/blob/master/LICENSE) - github.com/mattn/go-isatty [MIT License](https://github.com/mattn/go-isatty/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) - github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md) diff --git a/go.mod b/go.mod index a928474133f47..0e8bbb04a277d 100644 --- a/go.mod +++ b/go.mod @@ -3,31 +3,29 @@ module github.com/influxdata/telegraf go 1.16 require ( - cloud.google.com/go v0.54.0 + cloud.google.com/go v0.56.0 cloud.google.com/go/bigquery v1.4.0 cloud.google.com/go/pubsub v1.2.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.5.0 github.com/Azure/azure-event-hubs-go/v3 v3.2.0 github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 - github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect - github.com/Azure/go-autorest/autorest v0.11.1 + github.com/Azure/go-autorest/autorest v0.11.4 github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 github.com/BurntSushi/toml v0.3.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee github.com/Microsoft/ApplicationInsights-Go v0.4.2 - github.com/Microsoft/go-winio v0.4.9 // indirect github.com/Shopify/sarama v1.27.2 + github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect github.com/aerospike/aerospike-client-go v1.27.0 - github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 - github.com/aliyun/alibaba-cloud-sdk-go v1.61.785 + github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d + github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 - github.com/antchfx/xmlquery v1.3.3 + github.com/antchfx/xmlquery v1.3.5 github.com/antchfx/xpath v1.1.11 - github.com/apache/thrift v0.12.0 + github.com/apache/thrift v0.13.0 github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 - github.com/armon/go-metrics v0.3.0 // indirect github.com/aws/aws-sdk-go v1.34.34 github.com/aws/aws-sdk-go-v2 v1.1.0 github.com/aws/aws-sdk-go-v2/config v1.1.0 @@ -39,7 +37,6 @@ require ( github.com/bmatcuk/doublestar/v3 v3.0.0 github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 github.com/caio/go-tdigest v3.1.0+incompatible - github.com/cenkalti/backoff v2.0.0+incompatible // indirect github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 github.com/cockroachdb/apd v1.1.0 // indirect github.com/containerd/containerd v1.4.1 // indirect @@ -49,14 +46,11 @@ require ( github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.0 - github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible // indirect github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible - github.com/docker/go-connections v0.3.0 // indirect - github.com/docker/go-units v0.3.3 // indirect github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 github.com/eclipse/paho.mqtt.golang v1.3.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/go-logfmt/logfmt v0.4.0 + github.com/go-logfmt/logfmt v0.5.0 github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c github.com/go-redis/redis v6.15.9+incompatible github.com/go-sql-driver/mysql v1.5.0 @@ -71,16 +65,13 @@ require ( github.com/google/go-cmp v0.5.5 github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.13 - github.com/gorilla/mux v1.6.2 + github.com/gorilla/mux v1.7.3 github.com/gosnmp/gosnmp v1.30.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 - github.com/hashicorp/consul v1.2.1 + github.com/hashicorp/consul/api v1.6.0 github.com/hashicorp/go-msgpack v0.5.5 // indirect - github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect - github.com/hashicorp/memberlist v0.1.5 // indirect - github.com/hashicorp/serf v0.8.1 // indirect github.com/influxdata/go-syslog/v2 v2.0.1 github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 @@ -92,13 +83,10 @@ require ( github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/kylelemons/godebug v1.1.0 // indirect github.com/lib/pq v1.3.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b - github.com/miekg/dns v1.0.14 - github.com/mitchellh/go-testing-interface v1.0.0 // indirect - github.com/morikuni/aec v1.0.0 // indirect + github.com/miekg/dns v1.1.31 github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect github.com/nats-io/nats-server/v2 v2.1.4 @@ -106,26 +94,22 @@ require ( github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 github.com/nsqio/go-nsq v1.0.8 github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 - github.com/opencontainers/go-digest v1.0.0-rc1 // indirect - github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/openzipkin/zipkin-go-opentracing v0.3.4 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.5.1 + github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.9.1 - github.com/prometheus/procfs v0.0.8 - github.com/prometheus/prometheus v2.5.0+incompatible + github.com/prometheus/common v0.13.0 + github.com/prometheus/procfs v0.1.3 + github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 github.com/riemann/riemann-go-client v0.5.0 github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 - github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/sensu/sensu-go/api/core/v2 v2.6.0 github.com/shirou/gopsutil v3.20.11+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/signalfx/golib/v3 v3.3.0 github.com/sirupsen/logrus v1.6.0 github.com/soniah/gosnmp v1.25.0 - github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 + github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/tidwall/gjson v1.6.0 @@ -148,8 +132,8 @@ require ( golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 golang.org/x/text v0.3.4 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 - google.golang.org/api v0.20.0 - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 + google.golang.org/api v0.29.0 + google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70 google.golang.org/grpc v1.33.1 gopkg.in/djherbis/times.v1 v1.2.0 gopkg.in/fatih/pool.v2 v2.0.0 // indirect diff --git a/go.sum b/go.sum index e98722fc7cb5c..5f7f430cbf9fa 100644 --- a/go.sum +++ b/go.sum @@ -2,19 +2,23 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= @@ -28,6 +32,7 @@ cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= collectd.org v0.5.0 h1:y4uFSAuOmeVhG3GCRa3/oH+ysePfO/+eGJNfd0Qa3d8= collectd.org v0.5.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -38,8 +43,9 @@ github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4c github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.1.9 h1:u7JFb9fFTE6Y/j8ae2VK33ePrRqJqoCM/IWkQdAZ+rg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= -github.com/Azure/azure-sdk-for-go v37.1.0+incompatible h1:aFlw3lP7ZHQi4m1kWCpcwYtczhDkGhDoRaMTaxcOf68= github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v45.1.0+incompatible h1:kxtaPD8n2z5Za+9e3sKsYG2IX6PG2R6VXtgS7gAbh3A= +github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 h1:7MiZ6Th+YTmwUdrKmFg5OMsGYz7IdQwjqL0RPxkhhOQ= github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= @@ -51,12 +57,15 @@ github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest v0.11.1 h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.4 h1:iWJqGEvip7mjibEqC/srXNdo+4wLEPiwlP/7dZLtoPc= +github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= @@ -86,53 +95,83 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= github.com/Microsoft/ApplicationInsights-Go v0.4.2 h1:HIZoGXMiKNwAtMAgCSSX35j9mP+DjGF9ezfBvxMDLLg= github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg= -github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ= -github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.27.2 h1:1EyY1dsxNDUQEv0O/4TsjosHI2CgB1uo9H/v56xzTxc= github.com/Shopify/sarama v1.27.2/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 h1:5sXbqlSomvdjlRbWyNqkPsJ3Fg+tQZCbgeX1VGljbQY= +github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE= github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/aliyun/alibaba-cloud-sdk-go v1.61.785 h1:3PVbcCSPY0f4timzlCQbDzL/7y/Z0d4YdEl23iAhSTE= -github.com/aliyun/alibaba-cloud-sdk-go v1.61.785/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 h1:YtaYjXmemIMyySUbs0VGFPqsLpsNHf4TW/L6yqpJQ9s= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= +github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= +github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= -github.com/antchfx/xmlquery v1.3.3 h1:HYmadPG0uz8CySdL68rB4DCLKXz2PurCjS3mnkVF4CQ= -github.com/antchfx/xmlquery v1.3.3/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/antchfx/xmlquery v1.3.5 h1:I7TuBRqsnfFuL11ruavGm911Awx9IqSdiU6W/ztSmVw= +github.com/antchfx/xmlquery v1.3.5/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= github.com/antchfx/xpath v1.1.11 h1:WOFtK8TVAjLm3lbgqeP0arlHpvCEeTANeWZ/csPpJkQ= github.com/antchfx/xpath v1.1.11/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= -github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.34 h1:5dC0ZU0xy25+UavGNEkQ/5MOQwxXDA2YXtjCL1HfYKI= github.com/aws/aws-sdk-go v1.34.34 h1:5dC0ZU0xy25+UavGNEkQ/5MOQwxXDA2YXtjCL1HfYKI= github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.1.0 h1:sKP6QWxdN1oRYjl+k6S3bpgBI+XUx/0mqVOLIw4lR/Q= github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= github.com/aws/aws-sdk-go-v2/config v1.1.0 h1:f3QVGpAcKrWpYNhKB8hE/buMjcfei95buQ5xdr/xYcU= @@ -157,18 +196,26 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= github.com/bmatcuk/doublestar/v3 v3.0.0/go.mod h1:6PcTVMw80pCY1RVuoqu3V++99uQB3vsSYKPTd8AWA0k= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= -github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY= -github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -178,23 +225,33 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/coreos/etcd v3.3.22+incompatible h1:AnRMUyVdVvh1k7lHe61YEd227+CLoNogQuAypztGSK4= github.com/coreos/etcd v3.3.22+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 h1:F8nmbiuX+gCz9xvWMi6Ak8HQntB4ATFXP46gaxifbp4= github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8= github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -206,22 +263,28 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumC github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU= github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible h1:357nGVUC8gSpeSc2Axup8HfrfTLLUfWfCsCUhiQSKIg= -github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible h1:SiUATuP//KecDjpOK2tvZJgeScYAklvyjfK8JZlU6fo= github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 h1:KgEcrKF0NWi9GT/OvDp9ioXZIrHRbP8S5o+sot9gznQ= github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -233,24 +296,34 @@ github.com/echlebek/crock v1.0.1 h1:KbzamClMIfVIkkjq/GTXf+N16KylYBpiaTitO3f1ujg= github.com/echlebek/crock v1.0.1/go.mod h1:/kvwHRX3ZXHj/kHWJkjXDmzzRow54EJuHtQ/PapL/HI= github.com/echlebek/timeproxy v1.0.0 h1:V41/v8tmmMDNMA2GrBPI45nlXb3F7+OY+nJz1BqKsCk= github.com/echlebek/timeproxy v1.0.0/go.mod h1:0dg2Lnb8no/jFwoMQKMTU6iAivgoMptGqSTprhnrRtk= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/eclipse/paho.mqtt.golang v1.3.0 h1:MU79lqr3FKNKbSrGN7d7bNYqh8MwWW7Zcx0iG+VIw9I= github.com/eclipse/paho.mqtt.golang v1.3.0/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 h1:fP04zlkPjAGpsduG7xN3rRkxjAqkJaIQnnkNYYw/pAk= github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4/go.mod h1:SBHk9aNQtiw4R4bEuzHjVmZikkUKCnO1v3lPQ21HZGk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -260,35 +333,125 @@ github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2H github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c h1:fWdhUpCuoeNIPiQ+pkAmmERYEjhVx5/cbVGK7T99OkI= github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c/go.mod h1:35JbSyV/BYqHwwRA6Zr1uVDm1637YlNOU61wI797NPI= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/goburrow/modbus v0.1.0 h1:DejRZY73nEM6+bt5JSP6IsFolJ9dVcqxsYbpLbeW/ro= github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBTlzMcZBg= github.com/goburrow/serial v0.1.0 h1:v2T1SQa/dlUqQiYIT8+Cu7YolfqAi3K96UmhwYyuSrA= @@ -299,18 +462,25 @@ github.com/gofrs/uuid v2.1.0+incompatible h1:8oEj3gioPmmDAOLQUZdnW+h4FZu9aSE/SQI github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.3.1 h1:CzMaKrvF6Qa7XtRii064vKBQiyvmY8H8vG1xa1/W1JA= github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= @@ -320,19 +490,23 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc= @@ -344,6 +518,7 @@ github.com/google/addlicense v0.0.0-20190510175307-22550fa7c1b0/go.mod h1:QtPG26 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -369,72 +544,120 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopcua/opcua v0.1.13 h1:UP746MKRFNbv+CQGfrPwgH7rGxOlSGzVu9ieZdcox4E= github.com/gopcua/opcua v0.1.13/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosnmp/gosnmp v1.30.0 h1:P6uUvPaoZCZh2EXvSUIgsxYZ1vdD/Sonl2BSVCGieG8= github.com/gosnmp/gosnmp v1.30.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvNDMpgF0= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ= github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= -github.com/hashicorp/consul v1.2.1 h1:66MuuTfV4aOXTQM7cjAIKUWFOITSk4XZlMhE09ymVbg= -github.com/hashicorp/consul v1.2.1/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.6.0 h1:SZB2hQW8AcTOpfDmiVblQbijxzsRuiyy0JpHfabvHio= +github.com/hashicorp/consul/api v1.6.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.6.0 h1:FfhMEkwvQl57CildXJyGHnwGGM4HMODGyfjGwNM1Vdw= +github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.12.2 h1:F1fdYblUEsxKiailtkhCCG2g4bipEgaHiDc8vffNpD4= +github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= +github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E= -github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= -github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM= -github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.1 h1:mYs6SMzu72+90OcPa5wr3nfznA4Dw9UyR791ZFNOIf4= -github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.3 h1:AVF6JDQQens6nMHT9OGERBvK0f8rPrAGILnsKLr6lzM= +github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hetznercloud/hcloud-go v1.21.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s= github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo= +github.com/influxdata/influxdb v1.8.2/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4 h1:K3A5vHPs/p8OjI4SL3l1+hs/98mhxTVDcV1Ap0c265E= github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= @@ -447,48 +670,69 @@ github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPI github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQDsAXYfUuF/Z0rtK5eT8x9D6Pi7S3PjXAg= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea h1:g2k+8WR7cHch4g0tBDhfiEvAp7fXxTNBiD1oC1Oxj3E= github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -505,13 +749,39 @@ github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+O github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b h1:Kcr+kPbkWZHFHXwl87quXUAmavS4/IMgu2zck3aiE7k= @@ -522,34 +792,48 @@ github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkfg= github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= -github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo= +github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= +github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/multiplay/go-ts3 v1.0.0 h1:loxtEFqvYtpoGh1jOqEt6aDzctYuQsi3vb3dMpvWiWw= github.com/multiplay/go-ts3 v1.0.0/go.mod h1:14S6cS3fLNT3xOytrA/DkRyAFNuQLMLEqOYAsf87IbQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= github.com/nats-io/nats-server/v2 v2.1.4 h1:BILRnsJ2Yb/fefiFbBWADpViGF69uh4sxe8poVDQ06g= github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= @@ -567,6 +851,11 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsqio/go-nsq v1.0.8 h1:3L2F8tNLlwXXlp2slDUrUWSBn2O3nMh8R1/KEDFTHPk= github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -576,26 +865,45 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g= github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -603,60 +911,93 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.13.0 h1:vJlpe9wPgDRM1Z+7Wj3zUUjY1nr6/1jNKyl7llliccg= +github.com/prometheus/common v0.13.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= -github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 h1:IB/5RJRcJiR/YzKs4Aou86s/RaMepZOZVCArYNHJHWc= +github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2/go.mod h1:Td6hjwdXDmVt5CI9T03Sw+yBNxLBq/Yx3ZtmtP8zlCA= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/riemann/riemann-go-client v0.5.0 h1:yPP7tz1vSYJkSZvZFCsMiDsHHXX57x8/fEX3qyEXuAA= github.com/riemann/riemann-go-client v0.5.0/go.mod h1:FMiaOL8dgBnRfgwENzV0xlYJ2eCbV1o7yqVwOBLbShQ= github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff h1:+6NUiITWwE5q1KO6SAfUX918c+Tab0+tGAM/mtdlUyA= github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75 h1:cA+Ubq9qEVIQhIWvP2kNuSZ2CmnfBJFSRq+kO1pu2cc= github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sensu/sensu-go/api/core/v2 v2.6.0 h1:hEKPHFZZNDuWTlKr7Kgm2yog65ZdkBUqNesE5qaWEGo= github.com/sensu/sensu-go/api/core/v2 v2.6.0/go.mod h1:97IK4ZQuvVjWvvoLkp+NgrD6ot30WDRz3LEbFUc/N34= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.20.11+incompatible h1:LJr4ZQK4mPpIV5gOa4jCOKOGb4ty4DZO54I4FGqIpto= github.com/shirou/gopsutil v3.20.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= +github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/signalfx/com_signalfx_metrics_protobuf v0.0.0-20190222193949-1fb69526e884 h1:KgLGEw137KEUtQnWBGzneCetphBj4+kKHRnhpAkXJC0= github.com/signalfx/com_signalfx_metrics_protobuf v0.0.0-20190222193949-1fb69526e884/go.mod h1:muYA2clvwCdj7nzAJ5vJIXYpJsUumhAl4Uu1wUNpWzA= github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 h1:WsShHmu12ZztYPfh9b+I+VjYD1o8iOHhB67WZCMEEE8= @@ -667,26 +1008,48 @@ github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8 github.com/signalfx/sapm-proto v0.4.0 h1:5lQX++6FeIjUZEIcnSgBqhOpmSjMkRBW3y/4ZiKMo5E= github.com/signalfx/sapm-proto v0.4.0/go.mod h1:x3gtwJ1GRejtkghB4nYpwixh2zqJrLbPU959ZNhM0Fk= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff h1:JcVn27VGCEwd33jyNj+3IqEbOmzAX9f9LILt3SoGPHU= github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= +github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= +github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 h1:l6epF6yBwuejBfhGkM5m8VSNM/QAm7ApGyH35ehA7eQ= -github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -703,13 +1066,21 @@ github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 h1:iBlTJosRsR70amr0zsmSPvaKNH8K/p3YlX/5SdPmSl8= github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330/go.mod h1:7+aWBsUJCo9OQRCgTypRmIQW9KKKcPMjtrdnYIBsS70= github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0= github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= @@ -720,18 +1091,36 @@ github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/wavefronthq/wavefront-sdk-go v0.9.7 h1:SrtABcXXeKCW5SerQYsnCzHo15GeggjZmL+DjtTy6CI= github.com/wavefronthq/wavefront-sdk-go v0.9.7/go.mod h1:JTGsu+KKgxx+GitC65VVdftN2iep1nVpQi/8EGR6v4Y= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOFra9xJfRXZcL2pLhMI8oNuDugNxg9Q= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= +github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= +github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -739,22 +1128,40 @@ go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.starlark.net v0.0.0-20210312235212-74c10e2c17dc h1:pVkptfeOTFfx+zXZo7HEHN3d5LmhatBFvHdm/f2QnpY= go.starlark.net v0.0.0-20210312235212-74c10e2c17dc/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -796,17 +1203,21 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -814,9 +1225,14 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -824,7 +1240,10 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -842,49 +1261,81 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -892,40 +1343,60 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190906203814-12febf440ab1/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -934,6 +1405,9 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9 h1:sEvmEcJVKBNUvgCUClbUQeHOAa9U0I2Ce1BooMvVCY4= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -946,8 +1420,11 @@ golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/gonum v0.7.0 h1:Hdks0L0hgznZLG9nzXb8vZ0rRvqNvAcgAp84y7Mwkgw= gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= @@ -960,20 +1437,25 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -981,24 +1463,35 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70 h1:wboULUXGF3c5qdUnKp+6gLAccE6PRpa/czkYvQ4UXv8= +google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.1 h1:DGeFlSan2f+WEtCERJ4J9GJWk15TxUi8QGagfI87Xyc= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1009,6 +1502,7 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= @@ -1022,6 +1516,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/djherbis/times.v1 v1.2.0 h1:UCvDKl1L/fmBygl2Y7hubXCnY7t4Yj46ZrBFNUipFbM= gopkg.in/djherbis/times.v1 v1.2.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -1030,6 +1525,8 @@ gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixt gopkg.in/fsnotify.v1 v1.2.1/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU= gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -1052,6 +1549,7 @@ gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3 gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE= gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/sourcemap.v1 v1.0.5 h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI= gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1059,11 +1557,14 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs= gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1081,17 +1582,29 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= k8s.io/api v0.20.4 h1:xZjKidCirayzX6tHONRQyTNDVIR55TYVqgATqo6ZULY= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= k8s.io/apimachinery v0.20.4 h1:vhxQ0PPUUU2Ns1b9r4/UFp13UPs8cw2iOoTjnY9faa0= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU= k8s.io/client-go v0.20.4 h1:85crgh1IotNkLpKYKZHVNI1JT86nr/iDCvq2iWKsql4= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/httpfs v1.0.0 h1:LtuKNg6JMiaBKVQHKd6Phhvk+2GFp+pUcmDQgRjrds0= @@ -1110,8 +1623,14 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/plugins/parsers/prometheusremotewrite/parser_test.go b/plugins/parsers/prometheusremotewrite/parser_test.go index d32b90673fdb3..7417c9f5fddaf 100644 --- a/plugins/parsers/prometheusremotewrite/parser_test.go +++ b/plugins/parsers/prometheusremotewrite/parser_test.go @@ -12,9 +12,9 @@ import ( func TestParse(t *testing.T) { prompbInput := prompb.WriteRequest{ - Timeseries: []*prompb.TimeSeries{ + Timeseries: []prompb.TimeSeries{ { - Labels: []*prompb.Label{ + Labels: []prompb.Label{ {Name: "__name__", Value: "go_gc_duration_seconds"}, {Name: "quantile", Value: "0.99"}, }, @@ -23,7 +23,7 @@ func TestParse(t *testing.T) { }, }, { - Labels: []*prompb.Label{ + Labels: []prompb.Label{ {Name: "__name__", Value: "prometheus_target_interval_length_seconds"}, {Name: "job", Value: "prometheus"}, }, @@ -72,9 +72,9 @@ func TestParse(t *testing.T) { func TestDefaultTags(t *testing.T) { prompbInput := prompb.WriteRequest{ - Timeseries: []*prompb.TimeSeries{ + Timeseries: []prompb.TimeSeries{ { - Labels: []*prompb.Label{ + Labels: []prompb.Label{ {Name: "__name__", Value: "foo"}, {Name: "__eg__", Value: "bar"}, }, @@ -118,9 +118,9 @@ func TestMetricsWithTimestamp(t *testing.T) { testTime := time.Date(2020, time.October, 4, 17, 0, 0, 0, time.UTC) testTimeUnix := testTime.UnixNano() / int64(time.Millisecond) prompbInput := prompb.WriteRequest{ - Timeseries: []*prompb.TimeSeries{ + Timeseries: []prompb.TimeSeries{ { - Labels: []*prompb.Label{ + Labels: []prompb.Label{ {Name: "__name__", Value: "foo"}, {Name: "__eg__", Value: "bar"}, }, diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go index 670d3d346a702..87c7f8f798ce0 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go @@ -3,15 +3,16 @@ package prometheusremotewrite import ( "bytes" "fmt" - "github.com/gogo/protobuf/proto" - "github.com/golang/snappy" - "github.com/influxdata/telegraf/plugins/serializers/prometheus" "hash/fnv" "sort" "strconv" "strings" "time" + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/influxdata/telegraf/plugins/serializers/prometheus" + "github.com/influxdata/telegraf" "github.com/prometheus/prometheus/prompb" ) @@ -54,11 +55,11 @@ func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) { func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { var buf bytes.Buffer - var entries = make(map[MetricKey]*prompb.TimeSeries) + var entries = make(map[MetricKey]prompb.TimeSeries) for _, metric := range metrics { commonLabels := s.createLabels(metric) var metrickey MetricKey - var promts *prompb.TimeSeries + var promts prompb.TimeSeries for _, field := range metric.FieldList() { metricName := prometheus.MetricName(metric.Name(), field.Key, metric.Type()) metricName, ok := prometheus.SanitizeMetricName(metricName) @@ -88,9 +89,9 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { if _, ok = entries[metrickeycount]; !ok { entries[metrickeycount] = promtscount } - labels := make([]*prompb.Label, len(commonLabels), len(commonLabels)+1) + labels := make([]prompb.Label, len(commonLabels), len(commonLabels)+1) copy(labels, commonLabels) - labels = append(labels, &prompb.Label{ + labels = append(labels, prompb.Label{ Name: "le", Value: "+Inf", }) @@ -112,9 +113,9 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { continue } - labels = make([]*prompb.Label, len(commonLabels), len(commonLabels)+1) + labels = make([]prompb.Label, len(commonLabels), len(commonLabels)+1) copy(labels, commonLabels) - labels = append(labels, &prompb.Label{ + labels = append(labels, prompb.Label{ Name: "le", Value: fmt.Sprint(bound), }) @@ -133,9 +134,9 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { } // if no bucket generate +Inf entry - labels := make([]*prompb.Label, len(commonLabels), len(commonLabels)+1) + labels := make([]prompb.Label, len(commonLabels), len(commonLabels)+1) copy(labels, commonLabels) - labels = append(labels, &prompb.Label{ + labels = append(labels, prompb.Label{ Name: "le", Value: "+Inf", }) @@ -178,9 +179,9 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { continue } - labels := make([]*prompb.Label, len(commonLabels), len(commonLabels)+1) + labels := make([]prompb.Label, len(commonLabels), len(commonLabels)+1) copy(labels, commonLabels) - labels = append(labels, &prompb.Label{ + labels = append(labels, prompb.Label{ Name: "quantile", Value: fmt.Sprint(quantile), }) @@ -203,8 +204,8 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { } } - var promTS = make([]*prompb.TimeSeries, len(entries)) - var i int64 + var promTS = make([]prompb.TimeSeries, len(entries)) + var i int for _, promts := range entries { promTS[i] = promts i++ @@ -244,7 +245,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { return buf.Bytes(), nil } -func hasLabel(name string, labels []*prompb.Label) bool { +func hasLabel(name string, labels []prompb.Label) bool { for _, label := range labels { if name == label.Name { return true @@ -253,8 +254,8 @@ func hasLabel(name string, labels []*prompb.Label) bool { return false } -func (s *Serializer) createLabels(metric telegraf.Metric) []*prompb.Label { - labels := make([]*prompb.Label, 0, len(metric.TagList())) +func (s *Serializer) createLabels(metric telegraf.Metric) []prompb.Label { + labels := make([]prompb.Label, 0, len(metric.TagList())) for _, tag := range metric.TagList() { // Ignore special tags for histogram and summary types. switch metric.Type() { @@ -273,7 +274,7 @@ func (s *Serializer) createLabels(metric telegraf.Metric) []*prompb.Label { continue } - labels = append(labels, &prompb.Label{Name: name, Value: tag.Value}) + labels = append(labels, prompb.Label{Name: name, Value: tag.Value}) } if s.config.StringHandling != StringAsLabel { @@ -298,7 +299,7 @@ func (s *Serializer) createLabels(metric telegraf.Metric) []*prompb.Label { continue } - labels = append(labels, &prompb.Label{Name: name, Value: value}) + labels = append(labels, prompb.Label{Name: name, Value: value}) addedFieldLabel = true } @@ -311,7 +312,7 @@ func (s *Serializer) createLabels(metric telegraf.Metric) []*prompb.Label { return labels } -func MakeMetricKey(labels []*prompb.Label) MetricKey { +func MakeMetricKey(labels []prompb.Label) MetricKey { h := fnv.New64a() for _, label := range labels { h.Write([]byte(label.Name)) @@ -322,17 +323,17 @@ func MakeMetricKey(labels []*prompb.Label) MetricKey { return MetricKey(h.Sum64()) } -func getPromTS(name string, labels []*prompb.Label, value float64, ts time.Time) (MetricKey, *prompb.TimeSeries) { +func getPromTS(name string, labels []prompb.Label, value float64, ts time.Time) (MetricKey, prompb.TimeSeries) { sample := []prompb.Sample{{ // Timestamp is int milliseconds for remote write. Timestamp: ts.UnixNano() / int64(time.Millisecond), Value: value, }} - labelscopy := make([]*prompb.Label, len(labels), len(labels)+1) + labelscopy := make([]prompb.Label, len(labels), len(labels)+1) copy(labelscopy, labels) - labels = append(labelscopy, &prompb.Label{ + labels = append(labelscopy, prompb.Label{ Name: "__name__", Value: name, }) - return MakeMetricKey(labels), &prompb.TimeSeries{Labels: labels, Samples: sample} + return MakeMetricKey(labels), prompb.TimeSeries{Labels: labels, Samples: sample} } diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go index 32aba632082b6..03879e21915d1 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go @@ -141,6 +141,7 @@ http_request_duration_seconds_bucket{le="0.5"} 129389 }) require.NoError(t, err) data, err := s.Serialize(tt.metric) + require.NoError(t, err) actual, err := prompbToText(data) require.NoError(t, err) @@ -647,7 +648,10 @@ func prompbToText(data []byte) ([]byte, error) { } samples := protoToSamples(&req) for _, sample := range samples { - buf.Write([]byte(fmt.Sprintf("%s %s\n", sample.Metric.String(), sample.Value.String()))) + _, err = buf.Write([]byte(fmt.Sprintf("%s %s\n", sample.Metric.String(), sample.Value.String()))) + if err != nil { + return nil, err + } } if err != nil { return nil, err From d5b4c3e14899fc3d22d543f96dcfce9c16dda837 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Mar 2021 12:07:21 -0500 Subject: [PATCH 338/761] Bump github.com/Azure/go-autorest/autorest/azure/auth from 0.4.2 to 0.5.6 (#8746) * Bump github.com/Azure/go-autorest/autorest/azure/auth Bumps [github.com/Azure/go-autorest/autorest/azure/auth](https://github.com/Azure/go-autorest) from 0.4.2 to 0.5.6. - [Release notes](https://github.com/Azure/go-autorest/releases) - [Changelog](https://github.com/Azure/go-autorest/blob/master/CHANGELOG.md) - [Commits](https://github.com/Azure/go-autorest/compare/autorest/azure/cli/v0.4.2...autorest/azure/auth/v0.5.6) Signed-off-by: dependabot[bot] * fix license doc Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastian Spaink --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 6 +++--- go.sum | 24 ++++++++++++++++++------ 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index df9ddeee6a54f..8a05ab298f471 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -194,6 +194,7 @@ following works: - golang.org/x/oauth2 [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/oauth2/blob/master/LICENSE) - golang.org/x/sync [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/sync/blob/master/LICENSE) - golang.org/x/sys [BSD 3-Clause Clear License](https://github.com/golang/sys/blob/master/LICENSE) +- golang.org/x/term [BSD 3-Clause License](https://pkg.go.dev/golang.org/x/term?tab=licenses) - golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE) - golang.org/x/time [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE) - golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) diff --git a/go.mod b/go.mod index 0e8bbb04a277d..5e8c475cc217a 100644 --- a/go.mod +++ b/go.mod @@ -10,8 +10,8 @@ require ( collectd.org v0.5.0 github.com/Azure/azure-event-hubs-go/v3 v3.2.0 github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 - github.com/Azure/go-autorest/autorest v0.11.4 - github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 + github.com/Azure/go-autorest/autorest v0.11.17 + github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 github.com/BurntSushi/toml v0.3.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee github.com/Microsoft/ApplicationInsights-Go v0.4.2 @@ -45,7 +45,7 @@ require ( github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 - github.com/dimchansky/utfbom v1.1.0 + github.com/dimchansky/utfbom v1.1.1 github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 github.com/eclipse/paho.mqtt.golang v1.3.0 diff --git a/go.sum b/go.sum index 5f7f430cbf9fa..3f8218900e265 100644 --- a/go.sum +++ b/go.sum @@ -58,20 +58,25 @@ github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.4 h1:iWJqGEvip7mjibEqC/srXNdo+4wLEPiwlP/7dZLtoPc= github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.17 h1:2zCdHwNgRH+St1J+ZMf66xI8aLr/5KMy+wWLH97zwYM= +github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= -github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.10 h1:r6fZHMaHD8B6LDCn0o5vyBFHIHrM6Ywwx7mb49lPItI= +github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 h1:cgiBtUxatlt/e3qY6fQJioqbocWHr5osz259MomF5M0= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.6/go.mod h1:nYlP+G+n8MhD5CjIi6W8nFTIJn/PnTHes5nUbK6BxD0= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -266,8 +271,10 @@ github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= -github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -1165,8 +1172,10 @@ golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1307,6 +1316,7 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1334,6 +1344,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 099ccda3f946bbcae5981a40f5a8d71d29d864ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Thu, 25 Mar 2021 18:57:01 +0100 Subject: [PATCH 339/761] Linter fixes - gosimple (#9046) --- filter/filter.go | 2 +- internal/globpath/globpath.go | 4 +- internal/process/process_test.go | 2 +- internal/templating/template.go | 2 - logger/logger.go | 3 +- models/filter.go | 26 +++++------- models/log.go | 2 - models/running_output_test.go | 4 +- plugins/common/shim/logger.go | 2 - plugins/inputs/aerospike/aerospike.go | 10 +---- plugins/inputs/amqp_consumer/amqp_consumer.go | 14 +++---- .../cisco_telemetry_util.go | 4 +- plugins/inputs/diskio/diskio.go | 2 +- plugins/inputs/diskio/diskio_linux_test.go | 2 +- .../elasticsearch/elasticsearch_test.go | 11 ++--- plugins/inputs/fibaro/fibaro.go | 4 +- plugins/inputs/fluentd/fluentd.go | 5 +-- plugins/inputs/graylog/graylog.go | 8 ++-- plugins/inputs/haproxy/haproxy.go | 4 +- plugins/inputs/http_response/http_response.go | 10 ++--- .../http_response/http_response_test.go | 1 - .../influxdb_listener/influxdb_listener.go | 2 +- plugins/inputs/intel_rdt/intel_rdt.go | 1 - plugins/inputs/intel_rdt/publisher.go | 2 - plugins/inputs/jenkins/jenkins.go | 2 +- plugins/inputs/jolokia2/gatherer.go | 10 ++--- plugins/inputs/jolokia2/jolokia_test.go | 3 +- .../openconfig_telemetry.go | 7 ---- plugins/inputs/kapacitor/kapacitor.go | 4 +- plugins/inputs/mesos/mesos.go | 12 ++---- plugins/inputs/modbus/modbus.go | 4 +- plugins/inputs/mongodb/mongostat.go | 4 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 1 - plugins/inputs/mysql/mysql.go | 4 +- plugins/inputs/mysql/v1/mysql.go | 4 +- plugins/inputs/mysql/v2/convert.go | 4 +- .../inputs/neptune_apex/neptune_apex_test.go | 2 +- plugins/inputs/openldap/openldap.go | 1 - plugins/inputs/postfix/postfix.go | 2 +- .../postgresql_extensible_test.go | 10 ++--- plugins/inputs/prometheus/prometheus.go | 12 +++--- plugins/inputs/prometheus/prometheus_test.go | 16 +++---- plugins/inputs/snmp_legacy/snmp_legacy.go | 2 +- plugins/inputs/snmp_trap/snmp_trap_test.go | 7 +--- plugins/inputs/statsd/running_stats_test.go | 5 +-- plugins/inputs/sysstat/sysstat.go | 2 +- plugins/inputs/tengine/tengine_test.go | 3 +- plugins/inputs/vsphere/endpoint.go | 2 +- plugins/inputs/vsphere/tscache.go | 2 +- plugins/inputs/webhooks/webhooks.go | 2 +- plugins/inputs/x509_cert/x509_cert.go | 2 +- plugins/outputs/amqp/amqp.go | 3 +- plugins/outputs/cloudwatch/cloudwatch.go | 8 ++-- .../outputs/prometheus_client/v2/collector.go | 1 - plugins/outputs/riemann/riemann.go | 12 +++--- plugins/outputs/riemann_legacy/riemann.go | 8 ++-- plugins/outputs/signalfx/signalfx.go | 4 +- plugins/outputs/warp10/warp10.go | 2 +- plugins/outputs/warp10/warp10_test.go | 7 ++-- plugins/parsers/collectd/parser.go | 7 ++-- plugins/parsers/graphite/parser.go | 2 +- plugins/parsers/xml/parser.go | 24 +++++------ plugins/processors/dedup/dedup.go | 2 +- plugins/processors/enum/enum.go | 6 +-- plugins/processors/topk/topk.go | 7 +--- plugins/processors/topk/topk_test.go | 42 +++++++------------ plugins/serializers/influx/reader_test.go | 4 +- selfstat/selfstat.go | 1 - testutil/accumulator.go | 3 -- testutil/metric.go | 8 +--- 70 files changed, 152 insertions(+), 255 deletions(-) diff --git a/filter/filter.go b/filter/filter.go index df171257bc789..29fcb8c4fafcc 100644 --- a/filter/filter.go +++ b/filter/filter.go @@ -47,7 +47,7 @@ func Compile(filters []string) (Filter, error) { // hasMeta reports whether path contains any magic glob characters. func hasMeta(s string) bool { - return strings.IndexAny(s, "*?[") >= 0 + return strings.ContainsAny(s, "*?[") } type filter struct { diff --git a/internal/globpath/globpath.go b/internal/globpath/globpath.go index f69f5bfb60900..fb49c232ecc0b 100644 --- a/internal/globpath/globpath.go +++ b/internal/globpath/globpath.go @@ -84,10 +84,10 @@ func (g *GlobPath) GetRoots() []string { // hasMeta reports whether path contains any magic glob characters. func hasMeta(path string) bool { - return strings.IndexAny(path, "*?[") >= 0 + return strings.ContainsAny(path, "*?[") } // hasSuperMeta reports whether path contains any super magic glob characters (**). func hasSuperMeta(path string) bool { - return strings.Index(path, "**") >= 0 + return strings.Contains(path, "**") } diff --git a/internal/process/process_test.go b/internal/process/process_test.go index 7a7c8c6f33fd6..b9cad3598ce13 100644 --- a/internal/process/process_test.go +++ b/internal/process/process_test.go @@ -67,7 +67,7 @@ func TestMain(m *testing.M) { // externalProcess is an external "misbehaving" process that won't exit // cleanly. func externalProcess() { - wait := make(chan int, 0) + wait := make(chan int) fmt.Fprintln(os.Stdout, "started") <-wait os.Exit(2) diff --git a/internal/templating/template.go b/internal/templating/template.go index 235d2f2a58928..09b78e19fce66 100644 --- a/internal/templating/template.go +++ b/internal/templating/template.go @@ -59,10 +59,8 @@ func (t *Template) Apply(line string, joiner string) (string, map[string]string, field = append(field, fields[i]) case "field*": field = append(field, fields[i:]...) - break case "measurement*": measurement = append(measurement, fields[i:]...) - break default: tags[tag] = append(tags[tag], fields[i]) } diff --git a/logger/logger.go b/logger/logger.go index a276d2e807c6c..58a8b906f6596 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -69,8 +69,7 @@ func (t *telegrafLog) Write(b []byte) (n int, err error) { } func (t *telegrafLog) Close() error { - var stdErrWriter io.Writer - stdErrWriter = os.Stderr + stdErrWriter := os.Stderr // avoid closing stderr if t.internalWriter != stdErrWriter { closer, isCloser := t.internalWriter.(io.Closer) diff --git a/models/filter.go b/models/filter.go index 13627daad3434..8103c23173297 100644 --- a/models/filter.go +++ b/models/filter.go @@ -54,41 +54,41 @@ func (f *Filter) Compile() error { var err error f.nameDrop, err = filter.Compile(f.NameDrop) if err != nil { - return fmt.Errorf("Error compiling 'namedrop', %s", err) + return fmt.Errorf("error compiling 'namedrop', %s", err) } f.namePass, err = filter.Compile(f.NamePass) if err != nil { - return fmt.Errorf("Error compiling 'namepass', %s", err) + return fmt.Errorf("error compiling 'namepass', %s", err) } f.fieldDrop, err = filter.Compile(f.FieldDrop) if err != nil { - return fmt.Errorf("Error compiling 'fielddrop', %s", err) + return fmt.Errorf("error compiling 'fielddrop', %s", err) } f.fieldPass, err = filter.Compile(f.FieldPass) if err != nil { - return fmt.Errorf("Error compiling 'fieldpass', %s", err) + return fmt.Errorf("error compiling 'fieldpass', %s", err) } f.tagExclude, err = filter.Compile(f.TagExclude) if err != nil { - return fmt.Errorf("Error compiling 'tagexclude', %s", err) + return fmt.Errorf("error compiling 'tagexclude', %s", err) } f.tagInclude, err = filter.Compile(f.TagInclude) if err != nil { - return fmt.Errorf("Error compiling 'taginclude', %s", err) + return fmt.Errorf("error compiling 'taginclude', %s", err) } for i := range f.TagDrop { f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter) if err != nil { - return fmt.Errorf("Error compiling 'tagdrop', %s", err) + return fmt.Errorf("error compiling 'tagdrop', %s", err) } } for i := range f.TagPass { f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter) if err != nil { - return fmt.Errorf("Error compiling 'tagpass', %s", err) + return fmt.Errorf("error compiling 'tagpass', %s", err) } } return nil @@ -132,17 +132,11 @@ func (f *Filter) IsActive() bool { // based on the drop/pass filter parameters func (f *Filter) shouldNamePass(key string) bool { pass := func(f *Filter) bool { - if f.namePass.Match(key) { - return true - } - return false + return f.namePass.Match(key) } drop := func(f *Filter) bool { - if f.nameDrop.Match(key) { - return false - } - return true + return !f.nameDrop.Match(key) } if f.namePass != nil && f.nameDrop != nil { diff --git a/models/log.go b/models/log.go index c0b52a812d924..063a43d6ebeac 100644 --- a/models/log.go +++ b/models/log.go @@ -100,6 +100,4 @@ func SetLoggerOnPlugin(i interface{}, log telegraf.Logger) { log.Debugf("Plugin %q defines a 'Log' field on its struct of an unexpected type %q. Expected telegraf.Logger", valI.Type().Name(), field.Type().String()) } - - return } diff --git a/models/running_output_test.go b/models/running_output_test.go index feea970336817..8e8d9a995fdf8 100644 --- a/models/running_output_test.go +++ b/models/running_output_test.go @@ -539,9 +539,7 @@ func (m *mockOutput) Write(metrics []telegraf.Metric) error { m.metrics = []telegraf.Metric{} } - for _, metric := range metrics { - m.metrics = append(m.metrics, metric) - } + m.metrics = append(m.metrics, metrics...) return nil } diff --git a/plugins/common/shim/logger.go b/plugins/common/shim/logger.go index 88db63ab7d58c..c8a6ee12ba350 100644 --- a/plugins/common/shim/logger.go +++ b/plugins/common/shim/logger.go @@ -84,6 +84,4 @@ func setLoggerOnPlugin(i interface{}, log telegraf.Logger) { field.Set(reflect.ValueOf(log)) } } - - return } diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index 0c88ba840f822..38674d89a7595 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -250,8 +250,6 @@ func (a *Aerospike) parseNodeInfo(stats map[string]string, hostPort string, node fields[key] = parseAerospikeValue(key, v) } acc.AddFields("aerospike_node", fields, tags, time.Now()) - - return } func (a *Aerospike) getNamespaces(n *as.Node) ([]string, error) { @@ -295,8 +293,6 @@ func (a *Aerospike) parseNamespaceInfo(stats map[string]string, hostPort string, nFields[key] = parseAerospikeValue(key, parts[1]) } acc.AddFields("aerospike_namespace", nFields, nTags, time.Now()) - - return } func (a *Aerospike) getSets(n *as.Node) ([]string, error) { @@ -365,8 +361,6 @@ func (a *Aerospike) parseSetInfo(stats map[string]string, hostPort string, names nFields[key] = parseAerospikeValue(key, pieces[1]) } acc.AddFields("aerospike_set", nFields, nTags, time.Now()) - - return } func (a *Aerospike) getTTLHistogram(hostPort string, namespace string, set string, n *as.Node, acc telegraf.Accumulator) error { @@ -430,7 +424,7 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam // Normalize incase of less buckets than expected numRecordsPerBucket := 1 if len(buckets) > a.NumberHistogramBuckets { - numRecordsPerBucket = int(math.Ceil((float64(len(buckets)) / float64(a.NumberHistogramBuckets)))) + numRecordsPerBucket = int(math.Ceil(float64(len(buckets)) / float64(a.NumberHistogramBuckets))) } bucketCount := 0 @@ -462,8 +456,6 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam } acc.AddFields(fmt.Sprintf("aerospike_histogram_%v", strings.Replace(histogramType, "-", "_", -1)), nFields, nTags, time.Now()) - - return } func splitNamespaceSet(namespaceSet string) (string, string) { diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index d98b1c19f4ab3..39bfeeaede0b3 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -71,7 +71,7 @@ func (a *externalAuth) Mechanism() string { return "EXTERNAL" } func (a *externalAuth) Response() string { - return fmt.Sprintf("\000") + return "\000" } const ( @@ -288,7 +288,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err ch, err := a.conn.Channel() if err != nil { - return nil, fmt.Errorf("Failed to open a channel: %s", err.Error()) + return nil, fmt.Errorf("failed to open a channel: %s", err.Error()) } if a.Exchange != "" { @@ -335,7 +335,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err nil, ) if err != nil { - return nil, fmt.Errorf("Failed to bind a queue: %s", err) + return nil, fmt.Errorf("failed to bind a queue: %s", err) } } @@ -345,7 +345,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err false, // global ) if err != nil { - return nil, fmt.Errorf("Failed to set QoS: %s", err) + return nil, fmt.Errorf("failed to set QoS: %s", err) } msgs, err := ch.Consume( @@ -358,7 +358,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err nil, // arguments ) if err != nil { - return nil, fmt.Errorf("Failed establishing connection to queue: %s", err) + return nil, fmt.Errorf("failed establishing connection to queue: %s", err) } return msgs, err @@ -395,7 +395,7 @@ func declareExchange( ) } if err != nil { - return fmt.Errorf("Error declaring exchange: %v", err) + return fmt.Errorf("error declaring exchange: %v", err) } return nil } @@ -437,7 +437,7 @@ func declareQueue( ) } if err != nil { - return nil, fmt.Errorf("Error declaring queue: %v", err) + return nil, fmt.Errorf("error declaring queue: %v", err) } return &queue, nil } diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go index e585b6fe0bda7..e9fb4efe04103 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go @@ -127,12 +127,12 @@ func (c *CiscoTelemetryMDT) nxosValueXform(field *telemetry.TelemetryField, valu } case *telemetry.TelemetryField_Uint32Value: vali, ok := value.(uint32) - if ok == true { + if ok { return vali } case *telemetry.TelemetryField_Uint64Value: vali, ok := value.(uint64) - if ok == true { + if ok { return vali } } //switch diff --git a/plugins/inputs/diskio/diskio.go b/plugins/inputs/diskio/diskio.go index c586233a8e447..c347e90a36526 100644 --- a/plugins/inputs/diskio/diskio.go +++ b/plugins/inputs/diskio/diskio.go @@ -68,7 +68,7 @@ func (d *DiskIO) SampleConfig() string { // hasMeta reports whether s contains any special glob characters. func hasMeta(s string) bool { - return strings.IndexAny(s, "*?[") >= 0 + return strings.ContainsAny(s, "*?[") } func (d *DiskIO) init() error { diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index 9362b195cd3da..8895afeec1563 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -24,7 +24,7 @@ func setupNullDisk(t *testing.T, s *DiskIO, devName string) func() error { require.NoError(t, err) if s.infoCache == nil { - s.infoCache = make(map[string]diskInfoCache, 0) + s.infoCache = make(map[string]diskInfoCache) } ic, ok := s.infoCache[devName] if !ok { diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 184acbbbcbf57..4a02e927678da 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -8,8 +8,6 @@ import ( "github.com/influxdata/telegraf/testutil" - "fmt" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -56,8 +54,7 @@ func (t *transportMock) CancelRequest(_ *http.Request) { func checkIsMaster(es *Elasticsearch, server string, expected bool, t *testing.T) { if es.serverInfo[server].isMaster() != expected { - msg := fmt.Sprintf("IsMaster set incorrectly") - assert.Fail(t, msg) + assert.Fail(t, "IsMaster set incorrectly") } } @@ -231,8 +228,7 @@ func TestGatherClusterStatsMaster(t *testing.T) { IsMasterResultTokens := strings.Split(string(IsMasterResult), " ") if masterID != IsMasterResultTokens[0] { - msg := fmt.Sprintf("catmaster is incorrect") - assert.Fail(t, msg) + assert.Fail(t, "catmaster is incorrect") } // now get node status, which determines whether we're master @@ -275,8 +271,7 @@ func TestGatherClusterStatsNonMaster(t *testing.T) { IsNotMasterResultTokens := strings.Split(string(IsNotMasterResult), " ") if masterID != IsNotMasterResultTokens[0] { - msg := fmt.Sprintf("catmaster is incorrect") - assert.Fail(t, msg) + assert.Fail(t, "catmaster is incorrect") } // now get node status, which determines whether we're master diff --git a/plugins/inputs/fibaro/fibaro.go b/plugins/inputs/fibaro/fibaro.go index 20b993576b664..6cfe9e64834cf 100644 --- a/plugins/inputs/fibaro/fibaro.go +++ b/plugins/inputs/fibaro/fibaro.go @@ -101,7 +101,7 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", requestURL, resp.StatusCode, http.StatusText(resp.StatusCode), @@ -159,7 +159,7 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { for _, device := range devices { // skip device in some cases if device.RoomID == 0 || - device.Enabled == false || + !device.Enabled || device.Properties.Dead == "true" || device.Type == "com.fibaro.zwaveDevice" { continue diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go index 42a2f1b52c0f6..dac25769a207c 100644 --- a/plugins/inputs/fluentd/fluentd.go +++ b/plugins/inputs/fluentd/fluentd.go @@ -66,10 +66,7 @@ func parse(data []byte) (datapointArray []pluginData, err error) { return } - for _, point := range endpointData.Payload { - datapointArray = append(datapointArray, point) - } - + datapointArray = append(datapointArray, endpointData.Payload...) return } diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index af19450f1f560..9b73991eb8227 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -203,13 +203,13 @@ func (h *GrayLog) flatten(item map[string]interface{}, fields map[string]interfa id = id + "_" } for k, i := range item { - switch i.(type) { + switch i := i.(type) { case int: - fields[id+k] = i.(float64) + fields[id+k] = float64(i) case float64: - fields[id+k] = i.(float64) + fields[id+k] = i case map[string]interface{}: - h.flatten(i.(map[string]interface{}), fields, id+k) + h.flatten(i, fields, id+k) default: } } diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index a3fe09072abc8..f95dbcc9f1045 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -95,9 +95,7 @@ func (h *haproxy) Gather(acc telegraf.Accumulator) error { if len(matches) == 0 { endpoints = append(endpoints, socketPath) } else { - for _, match := range matches { - endpoints = append(endpoints, match) - } + endpoints = append(endpoints, matches...) } } diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 3be16a9d40dd1..50315fceee5b0 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -188,7 +188,7 @@ func (h *HTTPResponse) createHTTPClient() (*http.Client, error) { Timeout: h.ResponseTimeout.Duration, } - if h.FollowRedirects == false { + if !h.FollowRedirects { client.CheckRedirect = func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse } @@ -247,10 +247,10 @@ func setError(err error, fields map[string]interface{}, tags map[string]string) opErr, isNetErr := (urlErr.Err).(*net.OpError) if isNetErr { switch e := (opErr.Err).(type) { - case (*net.DNSError): + case *net.DNSError: setResult("dns_error", fields, tags) return e - case (*net.ParseError): + case *net.ParseError: // Parse error has to do with parsing of IP addresses, so we // group it with address errors setResult("address_error", fields, tags) @@ -412,7 +412,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { var err error h.compiledStringMatch, err = regexp.Compile(h.ResponseStringMatch) if err != nil { - return fmt.Errorf("Failed to compile regular expression %s : %s", h.ResponseStringMatch, err) + return fmt.Errorf("failed to compile regular expression %s : %s", h.ResponseStringMatch, err) } } @@ -450,7 +450,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { } if addr.Scheme != "http" && addr.Scheme != "https" { - acc.AddError(errors.New("Only http and https are supported")) + acc.AddError(errors.New("only http and https are supported")) continue } diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 5c05d84264112..73ef9b0197160 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -130,7 +130,6 @@ func setUpTestMux() http.Handler { }) mux.HandleFunc("/twosecondnap", func(w http.ResponseWriter, req *http.Request) { time.Sleep(time.Second * 2) - return }) mux.HandleFunc("/nocontent", func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNoContent) diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go index de814a19806be..d551cca5f0f26 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -337,7 +337,7 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { var partialErrorString string switch parseErrorCount { case 1: - partialErrorString = fmt.Sprintf("%s", firstParseErrorStr) + partialErrorString = firstParseErrorStr case 2: partialErrorString = fmt.Sprintf("%s (and 1 other parse error)", firstParseErrorStr) default: diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index ba47234a751fd..69cc914227fc8 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -245,7 +245,6 @@ func (r *IntelRDT) createArgsAndStartPQOS(ctx context.Context) { args = append(args, processArg) go r.readData(ctx, args, r.processesPIDsMap) } - return } func (r *IntelRDT) readData(ctx context.Context, args []string, processesPIDsAssociation map[string]string) { diff --git a/plugins/inputs/intel_rdt/publisher.go b/plugins/inputs/intel_rdt/publisher.go index ca36e40525c12..a01d730382da9 100644 --- a/plugins/inputs/intel_rdt/publisher.go +++ b/plugins/inputs/intel_rdt/publisher.go @@ -54,7 +54,6 @@ func (p *Publisher) publishCores(measurement string) { p.errChan <- err } p.addToAccumulatorCores(coresString, values, timestamp) - return } func (p *Publisher) publishProcess(measurement processMeasurement) { @@ -63,7 +62,6 @@ func (p *Publisher) publishProcess(measurement processMeasurement) { p.errChan <- err } p.addToAccumulatorProcesses(process, coresString, values, timestamp) - return } func parseCoresMeasurement(measurements string) (string, []float64, time.Time, error) { diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index 859121cf606ce..fa5727ced32bc 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -301,7 +301,7 @@ func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { } // filter out not included job. - if j.jobFilterInclude != nil && j.jobFilterInclude.Match(jr.hierarchyName()) == false { + if j.jobFilterInclude != nil && !j.jobFilterInclude.Match(jr.hierarchyName()) { return nil } diff --git a/plugins/inputs/jolokia2/gatherer.go b/plugins/inputs/jolokia2/gatherer.go index 7ee8438dd18e6..99cd2f4b91a13 100644 --- a/plugins/inputs/jolokia2/gatherer.go +++ b/plugins/inputs/jolokia2/gatherer.go @@ -46,7 +46,7 @@ func (g *Gatherer) Gather(client *Client, acc telegraf.Accumulator) error { // gatherResponses adds points to an accumulator from the ReadResponse objects // returned by a Jolokia agent. func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]string, acc telegraf.Accumulator) { - series := make(map[string][]point, 0) + series := make(map[string][]point) for _, metric := range g.metrics { points, ok := series[metric.Name] @@ -55,11 +55,7 @@ func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]str } responsePoints, responseErrors := g.generatePoints(metric, responses) - - for _, responsePoint := range responsePoints { - points = append(points, responsePoint) - } - + points = append(points, responsePoints...) for _, err := range responseErrors { acc.AddError(err) } @@ -88,7 +84,7 @@ func (g *Gatherer) generatePoints(metric Metric, responses []ReadResponse) ([]po case 404: continue default: - errors = append(errors, fmt.Errorf("Unexpected status in response from target %s (%q): %d", + errors = append(errors, fmt.Errorf("unexpected status in response from target %s (%q): %d", response.RequestTarget, response.RequestMbean, response.Status)) continue } diff --git a/plugins/inputs/jolokia2/jolokia_test.go b/plugins/inputs/jolokia2/jolokia_test.go index 4fe8b26290da6..aafac023e5081 100644 --- a/plugins/inputs/jolokia2/jolokia_test.go +++ b/plugins/inputs/jolokia2/jolokia_test.go @@ -750,8 +750,7 @@ func TestJolokia2_ProxyTargets(t *testing.T) { func TestFillFields(t *testing.T) { complex := map[string]interface{}{"Value": []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} - var scalar interface{} - scalar = []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + scalar := []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} results := map[string]interface{}{} newPointBuilder(Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").fillFields("", complex, results) diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go index ef8a1400b212b..ca087e12f6904 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go @@ -169,25 +169,18 @@ func (m *OpenConfigTelemetry) extractData(r *telemetry.OpenConfigData, grpcServe } else { kv[xmlpath] = v.GetStrValue() } - break case *telemetry.KeyValue_DoubleValue: kv[xmlpath] = v.GetDoubleValue() - break case *telemetry.KeyValue_IntValue: kv[xmlpath] = v.GetIntValue() - break case *telemetry.KeyValue_UintValue: kv[xmlpath] = v.GetUintValue() - break case *telemetry.KeyValue_SintValue: kv[xmlpath] = v.GetSintValue() - break case *telemetry.KeyValue_BoolValue: kv[xmlpath] = v.GetBoolValue() - break case *telemetry.KeyValue_BytesValue: kv[xmlpath] = v.GetBytesValue() - break } // Insert other tags from message diff --git a/plugins/inputs/kapacitor/kapacitor.go b/plugins/inputs/kapacitor/kapacitor.go index ac037a183b667..073344ed41a93 100644 --- a/plugins/inputs/kapacitor/kapacitor.go +++ b/plugins/inputs/kapacitor/kapacitor.go @@ -219,9 +219,7 @@ func (k *Kapacitor) gatherURL( // Strip out high-cardinality or duplicative tags excludeTags := []string{"host", "cluster_id", "server_id"} for _, key := range excludeTags { - if _, ok := obj.Tags[key]; ok { - delete(obj.Tags, key) - } + delete(obj.Tags, key) } // Convert time-related string field to int diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 1ebbc6bf290d1..f4079464fc601 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -185,7 +185,6 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { go func(master *url.URL) { acc.AddError(m.gatherMainMetrics(master, MASTER, acc)) wg.Done() - return }(master) } @@ -194,7 +193,6 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { go func(slave *url.URL) { acc.AddError(m.gatherMainMetrics(slave, SLAVE, acc)) wg.Done() - return }(slave) } @@ -244,9 +242,7 @@ func metricsDiff(role Role, w []string) []string { // masterBlocks serves as kind of metrics registry grouping them in sets func getMetrics(role Role, group string) []string { - var m map[string][]string - - m = make(map[string][]string) + m := make(map[string][]string) if role == MASTER { m["resources"] = []string{ @@ -504,13 +500,13 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) { case "allocator": for m := range *metrics { if strings.HasPrefix(m, "allocator/") { - delete((*metrics), m) + delete(*metrics, m) } } case "framework_offers": for m := range *metrics { if strings.HasPrefix(m, "master/frameworks/") || strings.HasPrefix(m, "frameworks/") { - delete((*metrics), m) + delete(*metrics, m) } } @@ -518,7 +514,7 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) { default: for _, v := range getMetrics(role, k) { if _, ok = (*metrics)[v]; ok { - delete((*metrics), v) + delete(*metrics, v) } } } diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index 7345aef68bebc..46156dc09fecd 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -208,9 +208,7 @@ func (m *Modbus) InitRegister(fields []fieldContainer, name string) error { addrs := []uint16{} for _, field := range fields { - for _, a := range field.Address { - addrs = append(addrs, a) - } + addrs = append(addrs, field.Address...) } addrs = removeDuplicates(addrs) diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 3cfa9e9747bc9..c4cfa45c5c0e7 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -1220,9 +1220,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec // Get the entry with the highest lock highestLocked := lockdiffs[len(lockdiffs)-1] - var timeDiffMillis int64 - timeDiffMillis = newStat.UptimeMillis - oldStat.UptimeMillis - + timeDiffMillis := newStat.UptimeMillis - oldStat.UptimeMillis lockToReport := highestLocked.Writes // if the highest locked namespace is not '.' diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index d5f5616b11e27..f8304be10348b 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -252,7 +252,6 @@ func (m *MQTTConsumer) onConnectionLost(_ mqtt.Client, err error) { m.acc.AddError(fmt.Errorf("connection lost: %v", err)) m.Log.Debugf("Disconnected %v", m.Servers) m.state = Disconnected - return } func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) { diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 30b7b68f21038..3f79b0e2d9346 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -1858,11 +1858,11 @@ func (m *Mysql) parseValue(value sql.RawBytes) (interface{}, bool) { // parseValue can be used to convert values such as "ON","OFF","Yes","No" to 0,1 func parseValue(value sql.RawBytes) (interface{}, bool) { - if bytes.EqualFold(value, []byte("YES")) || bytes.Compare(value, []byte("ON")) == 0 { + if bytes.EqualFold(value, []byte("YES")) || bytes.Equal(value, []byte("ON")) { return 1, true } - if bytes.EqualFold(value, []byte("NO")) || bytes.Compare(value, []byte("OFF")) == 0 { + if bytes.EqualFold(value, []byte("NO")) || bytes.Equal(value, []byte("OFF")) { return 0, true } diff --git a/plugins/inputs/mysql/v1/mysql.go b/plugins/inputs/mysql/v1/mysql.go index 6f6062d14f4db..374782f9cb29a 100644 --- a/plugins/inputs/mysql/v1/mysql.go +++ b/plugins/inputs/mysql/v1/mysql.go @@ -183,11 +183,11 @@ var Mappings = []*Mapping{ } func ParseValue(value sql.RawBytes) (float64, bool) { - if bytes.Compare(value, []byte("Yes")) == 0 || bytes.Compare(value, []byte("ON")) == 0 { + if bytes.Equal(value, []byte("Yes")) || bytes.Equal(value, []byte("ON")) { return 1, true } - if bytes.Compare(value, []byte("No")) == 0 || bytes.Compare(value, []byte("OFF")) == 0 { + if bytes.Equal(value, []byte("No")) || bytes.Equal(value, []byte("OFF")) { return 0, true } n, err := strconv.ParseFloat(string(value), 64) diff --git a/plugins/inputs/mysql/v2/convert.go b/plugins/inputs/mysql/v2/convert.go index a3ac3e976d6a3..78f978fa059ee 100644 --- a/plugins/inputs/mysql/v2/convert.go +++ b/plugins/inputs/mysql/v2/convert.go @@ -47,11 +47,11 @@ func ParseGTIDMode(value sql.RawBytes) (interface{}, error) { } func ParseValue(value sql.RawBytes) (interface{}, error) { - if bytes.EqualFold(value, []byte("YES")) || bytes.Compare(value, []byte("ON")) == 0 { + if bytes.EqualFold(value, []byte("YES")) || bytes.Equal(value, []byte("ON")) { return 1, nil } - if bytes.EqualFold(value, []byte("NO")) || bytes.Compare(value, []byte("OFF")) == 0 { + if bytes.EqualFold(value, []byte("NO")) || bytes.Equal(value, []byte("OFF")) { return 0, nil } diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go index 86e794575a669..fc5710e9fbadb 100644 --- a/plugins/inputs/neptune_apex/neptune_apex_test.go +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -437,7 +437,7 @@ func TestSendRequest(t *testing.T) { if test.wantErr { return } - if bytes.Compare(resp, []byte("data")) != 0 { + if !bytes.Equal(resp, []byte("data")) { t.Errorf( "Response data mismatch. got=%q, want=%q", resp, "data") } diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index af9a11e4b24bb..f3f7b47cf597c 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -190,7 +190,6 @@ func gatherSearchResult(sr *ldap.SearchResult, o *Openldap, acc telegraf.Accumul } } acc.AddFields("openldap", fields, tags) - return } // Convert a DN to metric name, eg cn=Read,cn=Waiters,cn=Monitor becomes waiters_read diff --git a/plugins/inputs/postfix/postfix.go b/plugins/inputs/postfix/postfix.go index 87e11a195add0..f72474a114f94 100644 --- a/plugins/inputs/postfix/postfix.go +++ b/plugins/inputs/postfix/postfix.go @@ -61,7 +61,7 @@ func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) { } var age int64 if !oldest.IsZero() { - age = int64(time.Now().Sub(oldest) / time.Second) + age = int64(time.Since(oldest) / time.Second) } else if length != 0 { // system doesn't support ctime age = -1 diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index f78e46199a122..b80965fbcb066 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -289,15 +289,15 @@ type fakeRow struct { func (f fakeRow) Scan(dest ...interface{}) error { if len(f.fields) != len(dest) { - return errors.New("Nada matchy buddy") + return errors.New("nada matchy buddy") } for i, d := range dest { - switch d.(type) { - case (*interface{}): - *d.(*interface{}) = f.fields[i] + switch d := d.(type) { + case *interface{}: + *d = f.fields[i] default: - return fmt.Errorf("Bad type %T", d) + return fmt.Errorf("bad type %T", d) } } return nil diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index f85ec44142019..319f96a69b70c 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -169,8 +169,8 @@ func (p *Prometheus) Init() error { // Check if set as env var and is valid IP address envVarNodeIP := os.Getenv("NODE_IP") if envVarNodeIP == "" || net.ParseIP(envVarNodeIP) == nil { - errorMessage := "The node_ip config and the environment variable NODE_IP are not set or invalid. Cannot get pod list for monitor_kubernetes_pods using node scrape scope" - return errors.New(errorMessage) + return errors.New("the node_ip config and the environment variable NODE_IP are not set or invalid; " + + "cannot get pod list for monitor_kubernetes_pods using node scrape scope") } p.NodeIP = envVarNodeIP @@ -180,15 +180,15 @@ func (p *Prometheus) Init() error { var err error p.podLabelSelector, err = labels.Parse(p.KubernetesLabelSelector) if err != nil { - return fmt.Errorf("Error parsing the specified label selector(s): %s", err.Error()) + return fmt.Errorf("error parsing the specified label selector(s): %s", err.Error()) } p.podFieldSelector, err = fields.ParseSelector(p.KubernetesFieldSelector) if err != nil { - return fmt.Errorf("Error parsing the specified field selector(s): %s", err.Error()) + return fmt.Errorf("error parsing the specified field selector(s): %s", err.Error()) } isValid, invalidSelector := fieldSelectorIsSupported(p.podFieldSelector) if !isValid { - return fmt.Errorf("The field selector %s is not supported for pods", invalidSelector) + return fmt.Errorf("the field selector %s is not supported for pods", invalidSelector) } p.Log.Infof("Using pod scrape scope at node level to get pod list using cAdvisor.") @@ -227,7 +227,7 @@ type URLAndAddress struct { } func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { - allURLs := make(map[string]URLAndAddress, 0) + allURLs := make(map[string]URLAndAddress) for _, u := range p.URLs { URL, err := url.Parse(u) if err != nil { diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index f5b0d19e41a87..3ba4b5f4a1a01 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -261,23 +261,23 @@ func TestInitConfigErrors(t *testing.T) { p.NodeIP = "10.240.0.0.0" os.Setenv("NODE_IP", "10.000.0.0.0") err := p.Init() - expectedMessage := "The node_ip config and the environment variable NODE_IP are not set or invalid. Cannot get pod list for monitor_kubernetes_pods using node scrape scope" - assert.Equal(t, expectedMessage, err.Error()) + expectedMessage := "the node_ip config and the environment variable NODE_IP are not set or invalid. Cannot get pod list for monitor_kubernetes_pods using node scrape scope" + require.Error(t, err, expectedMessage) os.Setenv("NODE_IP", "10.000.0.0") p.KubernetesLabelSelector = "label0==label0, label0 in (=)" err = p.Init() - expectedMessage = "Error parsing the specified label selector(s): unable to parse requirement: found '=', expected: ',', ')' or identifier" - assert.Equal(t, expectedMessage, err.Error()) + expectedMessage = "error parsing the specified label selector(s): unable to parse requirement: found '=', expected: ',', ')' or identifier" + require.Error(t, err, expectedMessage) p.KubernetesLabelSelector = "label0==label" p.KubernetesFieldSelector = "field," err = p.Init() - expectedMessage = "Error parsing the specified field selector(s): invalid selector: 'field,'; can't understand 'field'" - assert.Equal(t, expectedMessage, err.Error()) + expectedMessage = "error parsing the specified field selector(s): invalid selector: 'field,'; can't understand 'field'" + require.Error(t, err, expectedMessage) p.KubernetesFieldSelector = "spec.containerNames=containerNames" err = p.Init() - expectedMessage = "The field selector spec.containerNames is not supported for pods" - assert.Equal(t, expectedMessage, err.Error()) + expectedMessage = "the field selector spec.containerNames is not supported for pods" + require.Error(t, err, expectedMessage) } diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index f3f938657d09a..99ad5d170cb0e 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -234,7 +234,7 @@ func fillnode(parentNode Node, oidName string, ids []string) { // ids = ["1", "3", "6", ...] id, ids := ids[0], ids[1:] node, ok := parentNode.subnodes[id] - if ok == false { + if !ok { node = Node{ id: id, name: "", diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index 424098cfae365..87938e9837790 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -151,11 +151,8 @@ func sendTrap(t *testing.T, goSNMP gosnmp.GoSNMP, trap gosnmp.SnmpTrap) { } func TestReceiveTrap(t *testing.T) { - var now uint32 - now = 123123123 - - var fakeTime time.Time - fakeTime = time.Unix(456456456, 456) + now := uint32(123123123) + fakeTime := time.Unix(456456456, 456) type entry struct { oid string diff --git a/plugins/inputs/statsd/running_stats_test.go b/plugins/inputs/statsd/running_stats_test.go index a52209c5665cb..2cf987a69bbf1 100644 --- a/plugins/inputs/statsd/running_stats_test.go +++ b/plugins/inputs/statsd/running_stats_test.go @@ -162,8 +162,5 @@ func TestRunningStats_PercentileLimit(t *testing.T) { } func fuzzyEqual(a, b, epsilon float64) bool { - if math.Abs(a-b) > epsilon { - return false - } - return true + return math.Abs(a-b) <= epsilon } diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 528f4ec43d0e5..6eb649ebaacdc 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -284,7 +284,7 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option string, ts time.Time) e tags: make(map[string]string), } } - g, _ := m[device] + g := m[device] if len(g.tags) == 0 { for k, v := range tags { g.tags[k] = v diff --git a/plugins/inputs/tengine/tengine_test.go b/plugins/inputs/tengine/tengine_test.go index 70526826cd0ae..960998e6e16ee 100644 --- a/plugins/inputs/tengine/tengine_test.go +++ b/plugins/inputs/tengine/tengine_test.go @@ -28,8 +28,7 @@ func TestTengineTags(t *testing.T) { func TestTengineGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var rsp string - rsp = tengineSampleResponse + rsp := tengineSampleResponse fmt.Fprintln(w, rsp) })) defer ts.Close() diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index fa669a2a024ed..85fda786b17ba 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -891,7 +891,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim numQs := 0 for _, object := range res.objects { - timeBuckets := make(map[int64]*types.PerfQuerySpec, 0) + timeBuckets := make(map[int64]*types.PerfQuerySpec) for metricIdx, metric := range res.metrics { // Determine time of last successful collection metricName := e.getMetricNameForID(metric.CounterId) diff --git a/plugins/inputs/vsphere/tscache.go b/plugins/inputs/vsphere/tscache.go index 1be75d7605173..c312260c85b9b 100644 --- a/plugins/inputs/vsphere/tscache.go +++ b/plugins/inputs/vsphere/tscache.go @@ -27,7 +27,7 @@ func (t *TSCache) Purge() { defer t.mux.Unlock() n := 0 for k, v := range t.table { - if time.Now().Sub(v) > t.ttl { + if time.Since(v) > t.ttl { delete(t.table, k) n++ } diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go index d54951bca5956..1fedca96ca4a9 100644 --- a/plugins/inputs/webhooks/webhooks.go +++ b/plugins/inputs/webhooks/webhooks.go @@ -109,7 +109,7 @@ func (wb *Webhooks) Start(acc telegraf.Accumulator) error { wb.srv = &http.Server{Handler: r} - ln, err := net.Listen("tcp", fmt.Sprintf("%s", wb.ServiceAddress)) + ln, err := net.Listen("tcp", wb.ServiceAddress) if err != nil { return fmt.Errorf("error starting server: %v", err) } diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 92fbcb4066e61..3128b90686d0c 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -154,7 +154,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica } certs = append(certs, cert) } - if rest == nil || len(rest) == 0 { + if len(rest) == 0 { break } content = rest diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index 96e0970b27c52..8bf469a590248 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -2,7 +2,6 @@ package amqp import ( "bytes" - "fmt" "strings" "time" @@ -29,7 +28,7 @@ func (a *externalAuth) Mechanism() string { } func (a *externalAuth) Response() string { - return fmt.Sprintf("\000") + return "\000" } type AMQP struct { diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 3042a0b89dfe3..9844ab271e205 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -68,10 +68,10 @@ func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { if f.hasAllFields() { // If we have all required fields, we build datum with StatisticValues - min, _ := f.values[statisticTypeMin] - max, _ := f.values[statisticTypeMax] - sum, _ := f.values[statisticTypeSum] - count, _ := f.values[statisticTypeCount] + min := f.values[statisticTypeMin] + max := f.values[statisticTypeMax] + sum := f.values[statisticTypeSum] + count := f.values[statisticTypeCount] datum := &cloudwatch.MetricDatum{ MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), diff --git a/plugins/outputs/prometheus_client/v2/collector.go b/plugins/outputs/prometheus_client/v2/collector.go index 5c569685de5cb..a12c17571124c 100644 --- a/plugins/outputs/prometheus_client/v2/collector.go +++ b/plugins/outputs/prometheus_client/v2/collector.go @@ -63,7 +63,6 @@ func (c *Collector) Describe(_ chan<- *prometheus.Desc) { // Sending no descriptor at all marks the Collector as "unchecked", // i.e. no checks will be performed at registration time, and the // Collector may yield any Metric it sees fit in its Collect method. - return } func (c *Collector) Collect(ch chan<- prometheus.Metric) { diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go index b6882dceaffec..9c202d8cf9af3 100644 --- a/plugins/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -101,7 +101,7 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error { if r.client == nil { if err := r.Connect(); err != nil { - return fmt.Errorf("Failed to (re)connect to Riemann: %s", err.Error()) + return fmt.Errorf("failed to (re)connect to Riemann: %s", err.Error()) } } @@ -109,14 +109,12 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error { var events []*raidman.Event for _, m := range metrics { evs := r.buildRiemannEvents(m) - for _, ev := range evs { - events = append(events, ev) - } + events = append(events, evs...) } if err := r.client.SendMulti(events); err != nil { r.Close() - return fmt.Errorf("Failed to send riemann message: %s", err) + return fmt.Errorf("failed to send riemann message: %s", err) } return nil } @@ -145,14 +143,14 @@ func (r *Riemann) buildRiemannEvents(m telegraf.Metric) []*raidman.Event { Tags: r.tags(m.Tags()), } - switch value.(type) { + switch value := value.(type) { case string: // only send string metrics if explicitly enabled, skip otherwise if !r.StringAsState { r.Log.Debugf("Riemann event states disabled, skipping metric value [%s]", value) continue } - event.State = value.(string) + event.State = value case int, int64, uint64, float32, float64: event.Metric = value default: diff --git a/plugins/outputs/riemann_legacy/riemann.go b/plugins/outputs/riemann_legacy/riemann.go index a123bd7d0578b..7fe80297de4d9 100644 --- a/plugins/outputs/riemann_legacy/riemann.go +++ b/plugins/outputs/riemann_legacy/riemann.go @@ -77,9 +77,7 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error { var events []*raidman.Event for _, p := range metrics { evs := buildEvents(p, r.Separator) - for _, ev := range evs { - events = append(events, ev) - } + events = append(events, evs...) } var senderr = r.client.SendMulti(events) @@ -109,9 +107,9 @@ func buildEvents(p telegraf.Metric, s string) []*raidman.Event { Service: serviceName(s, p.Name(), p.Tags(), fieldName), } - switch value.(type) { + switch value := value.(type) { case string: - event.State = value.(string) + event.State = value default: event.Metric = value } diff --git a/plugins/outputs/signalfx/signalfx.go b/plugins/outputs/signalfx/signalfx.go index b5552ee0e3830..d8452d7b7ffec 100644 --- a/plugins/outputs/signalfx/signalfx.go +++ b/plugins/outputs/signalfx/signalfx.go @@ -130,10 +130,8 @@ func (s *SignalFx) ConvertToSignalFx(metrics []telegraf.Metric) ([]*datapoint.Da for _, metric := range metrics { s.Log.Debugf("Processing the following measurement: %v", metric) var timestamp = metric.Time() - var metricType datapoint.MetricType - - metricType = GetMetricType(metric.Type()) + metricType := GetMetricType(metric.Type()) for field, val := range metric.Fields() { // Copy the metric tags because they are meant to be treated as // immutable diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index 1988bc6e1963f..32018329f0984 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -178,7 +178,7 @@ func buildTags(tags []*telegraf.Tag) []string { indexSource = index } indexSource++ - tagsString[indexSource] = fmt.Sprintf("source=telegraf") + tagsString[indexSource] = "source=telegraf" sort.Strings(tagsString) return tagsString } diff --git a/plugins/outputs/warp10/warp10_test.go b/plugins/outputs/warp10/warp10_test.go index afe931182a1d1..3fd08055fbb02 100644 --- a/plugins/outputs/warp10/warp10_test.go +++ b/plugins/outputs/warp10/warp10_test.go @@ -1,7 +1,6 @@ package warp10 import ( - "fmt" "testing" "github.com/influxdata/telegraf/testutil" @@ -60,7 +59,7 @@ func TestHandleWarp10Error(t *testing.T) { `, - Expected: fmt.Sprintf("Invalid token"), + Expected: "Invalid token", }, { Message: ` @@ -75,7 +74,7 @@ func TestHandleWarp10Error(t *testing.T) { `, - Expected: fmt.Sprintf("Token Expired"), + Expected: "Token Expired", }, { Message: ` @@ -90,7 +89,7 @@ func TestHandleWarp10Error(t *testing.T) { `, - Expected: fmt.Sprintf("Token revoked"), + Expected: "Token revoked", }, { Message: ` diff --git a/plugins/parsers/collectd/parser.go b/plugins/parsers/collectd/parser.go index 6b7fbd7566d12..692307abe868b 100644 --- a/plugins/parsers/collectd/parser.go +++ b/plugins/parsers/collectd/parser.go @@ -76,7 +76,7 @@ func NewCollectdParser( func (p *CollectdParser) Parse(buf []byte) ([]telegraf.Metric, error) { valueLists, err := network.Parse(buf, p.popts) if err != nil { - return nil, fmt.Errorf("Collectd parser error: %s", err) + return nil, fmt.Errorf("collectd parser error: %s", err) } metrics := []telegraf.Metric{} @@ -105,7 +105,7 @@ func (p *CollectdParser) ParseLine(line string) (telegraf.Metric, error) { } if len(metrics) != 1 { - return nil, errors.New("Line contains multiple metrics") + return nil, errors.New("line contains multiple metrics") } return metrics[0], nil @@ -128,8 +128,7 @@ func UnmarshalValueList(vl *api.ValueList, multiValue string) []telegraf.Metric switch multiValue { case "split": for i := range vl.Values { - var name string - name = fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i)) + name := fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i)) tags := make(map[string]string) fields := make(map[string]interface{}) diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go index 528bc4f2072e6..5c0f3a8070452 100644 --- a/plugins/parsers/graphite/parser.go +++ b/plugins/parsers/graphite/parser.go @@ -157,7 +157,7 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) { if len(tagValue) != 2 || len(tagValue[0]) == 0 || len(tagValue[1]) == 0 { continue } - if strings.IndexAny(tagValue[0], "!^") != -1 { + if strings.ContainsAny(tagValue[0], "!^") { continue } if strings.Index(tagValue[1], "~") == 0 { diff --git a/plugins/parsers/xml/parser.go b/plugins/parsers/xml/parser.go index 8ee002ff3b0e7..75c79fbd71bae 100644 --- a/plugins/parsers/xml/parser.go +++ b/plugins/parsers/xml/parser.go @@ -134,14 +134,14 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c if err != nil { return nil, fmt.Errorf("failed to query timestamp: %v", err) } - switch v.(type) { + switch v := v.(type) { case string: // Parse the string with the given format or assume the string to contain // a unix timestamp in seconds if no format is given. if len(config.TimestampFmt) < 1 || strings.HasPrefix(config.TimestampFmt, "unix") { var nanoseconds int64 - t, err := strconv.ParseFloat(v.(string), 64) + t, err := strconv.ParseFloat(v, 64) if err != nil { return nil, fmt.Errorf("failed to parse unix timestamp: %v", err) } @@ -158,14 +158,14 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c } timestamp = time.Unix(0, nanoseconds) } else { - timestamp, err = time.Parse(config.TimestampFmt, v.(string)) + timestamp, err = time.Parse(config.TimestampFmt, v) if err != nil { return nil, fmt.Errorf("failed to query timestamp format: %v", err) } } case float64: // Assume the value to contain a timestamp in seconds and fractions thereof. - timestamp = time.Unix(0, int64(v.(float64)*1e9)) + timestamp = time.Unix(0, int64(v*1e9)) case nil: // No timestamp found. Just ignore the time and use "starttime" default: @@ -181,13 +181,13 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c if err != nil { return nil, fmt.Errorf("failed to query tag '%s': %v", name, err) } - switch v.(type) { + switch v := v.(type) { case string: - tags[name] = v.(string) + tags[name] = v case bool: - tags[name] = strconv.FormatBool(v.(bool)) + tags[name] = strconv.FormatBool(v) case float64: - tags[name] = strconv.FormatFloat(v.(float64), 'G', -1, 64) + tags[name] = strconv.FormatFloat(v, 'G', -1, 64) case nil: continue default: @@ -206,19 +206,19 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c if err != nil { return nil, fmt.Errorf("failed to query field (int) '%s': %v", name, err) } - switch v.(type) { + switch v := v.(type) { case string: - fields[name], err = strconv.ParseInt(v.(string), 10, 54) + fields[name], err = strconv.ParseInt(v, 10, 54) if err != nil { return nil, fmt.Errorf("failed to parse field (int) '%s': %v", name, err) } case bool: fields[name] = int64(0) - if v.(bool) { + if v { fields[name] = int64(1) } case float64: - fields[name] = int64(v.(float64)) + fields[name] = int64(v) case nil: continue default: diff --git a/plugins/processors/dedup/dedup.go b/plugins/processors/dedup/dedup.go index 3dd7516a696c2..3823b393e27fd 100644 --- a/plugins/processors/dedup/dedup.go +++ b/plugins/processors/dedup/dedup.go @@ -40,7 +40,7 @@ func (d *Dedup) cleanup() { return } d.FlushTime = time.Now() - keep := make(map[uint64]telegraf.Metric, 0) + keep := make(map[uint64]telegraf.Metric) for id, metric := range d.Cache { if time.Since(metric.Time()) < d.DedupInterval.Duration { keep[id] = metric diff --git a/plugins/processors/enum/enum.go b/plugins/processors/enum/enum.go index 60a4264528844..6a4a7f67afffd 100644 --- a/plugins/processors/enum/enum.go +++ b/plugins/processors/enum/enum.go @@ -55,14 +55,14 @@ func (mapper *EnumMapper) Init() error { if mapping.Field != "" { fieldFilter, err := filter.NewIncludeExcludeFilter([]string{mapping.Field}, nil) if err != nil { - return fmt.Errorf("Failed to create new field filter: %w", err) + return fmt.Errorf("failed to create new field filter: %w", err) } mapper.FieldFilters[mapping.Field] = fieldFilter } if mapping.Tag != "" { tagFilter, err := filter.NewIncludeExcludeFilter([]string{mapping.Tag}, nil) if err != nil { - return fmt.Errorf("Failed to create new tag filter: %s", err) + return fmt.Errorf("failed to create new tag filter: %s", err) } mapper.TagFilters[mapping.Tag] = tagFilter } @@ -153,7 +153,7 @@ func adjustValue(in interface{}) interface{} { } func (mapping *Mapping) mapValue(original string) (interface{}, bool) { - if mapped, found := mapping.ValueMappings[original]; found == true { + if mapped, found := mapping.ValueMappings[original]; found { return mapped, true } if mapping.Default != nil { diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index 24e1f7c9516a7..28a2cb7fc2498 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -110,10 +110,7 @@ func sortMetrics(metrics []MetricAggregation, field string, reverse bool) { less := func(i, j int) bool { iv := metrics[i].values[field] jv := metrics[j].values[field] - if iv < jv { - return true - } - return false + return iv < jv } if reverse { @@ -276,7 +273,7 @@ func (t *TopK) push() []telegraf.Metric { } // The return value that will hold the returned metrics - var ret = make([]telegraf.Metric, 0, 0) + var ret = make([]telegraf.Metric, 0) // Get the top K metrics for each field and add them to the return value addedKeys := make(map[string]bool) for _, field := range t.Fields { diff --git a/plugins/processors/topk/topk_test.go b/plugins/processors/topk/topk_test.go index 79c6b81db4f38..9df10c761eddf 100644 --- a/plugins/processors/topk/topk_test.go +++ b/plugins/processors/topk/topk_test.go @@ -138,8 +138,7 @@ func runAndCompare(topk *TopK, metrics []telegraf.Metric, answer []telegraf.Metr // Smoke tests func TestTopkAggregatorsSmokeTests(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.Fields = []string{"a"} topk.GroupBy = []string{"tag_name"} @@ -160,8 +159,7 @@ func TestTopkAggregatorsSmokeTests(t *testing.T) { // AddAggregateFields + Mean aggregator func TestTopkMeanAddAggregateFields(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.Aggregation = "mean" topk.AddAggregateFields = []string{"a"} @@ -189,8 +187,7 @@ func TestTopkMeanAddAggregateFields(t *testing.T) { // AddAggregateFields + Sum aggregator func TestTopkSumAddAggregateFields(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.Aggregation = "sum" topk.AddAggregateFields = []string{"a"} @@ -218,8 +215,7 @@ func TestTopkSumAddAggregateFields(t *testing.T) { // AddAggregateFields + Max aggregator func TestTopkMaxAddAggregateFields(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.Aggregation = "max" topk.AddAggregateFields = []string{"a"} @@ -247,8 +243,7 @@ func TestTopkMaxAddAggregateFields(t *testing.T) { // AddAggregateFields + Min aggregator func TestTopkMinAddAggregateFields(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.Aggregation = "min" topk.AddAggregateFields = []string{"a"} @@ -276,8 +271,7 @@ func TestTopkMinAddAggregateFields(t *testing.T) { // GroupBy func TestTopkGroupby1(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.K = 3 topk.Aggregation = "sum" @@ -301,8 +295,7 @@ func TestTopkGroupby1(t *testing.T) { } func TestTopkGroupby2(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.K = 3 topk.Aggregation = "mean" @@ -330,8 +323,7 @@ func TestTopkGroupby2(t *testing.T) { } func TestTopkGroupby3(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.K = 1 topk.Aggregation = "min" @@ -356,8 +348,7 @@ func TestTopkGroupby3(t *testing.T) { // GroupBy + Fields func TestTopkGroupbyFields1(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.K = 4 // This settings generate less than 3 groups topk.Aggregation = "mean" @@ -383,8 +374,7 @@ func TestTopkGroupbyFields1(t *testing.T) { func TestTopkGroupbyFields2(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.K = 2 topk.Aggregation = "sum" @@ -411,8 +401,7 @@ func TestTopkGroupbyFields2(t *testing.T) { // GroupBy metric name func TestTopkGroupbyMetricName1(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.K = 1 topk.Aggregation = "sum" @@ -437,8 +426,7 @@ func TestTopkGroupbyMetricName1(t *testing.T) { func TestTopkGroupbyMetricName2(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.K = 2 topk.Aggregation = "sum" @@ -465,8 +453,7 @@ func TestTopkGroupbyMetricName2(t *testing.T) { // BottomK func TestTopkBottomk(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.K = 3 topk.Aggregation = "sum" @@ -491,8 +478,7 @@ func TestTopkBottomk(t *testing.T) { // GroupByKeyTag func TestTopkGroupByKeyTag(t *testing.T) { // Build the processor - var topk TopK - topk = *New() + topk := *New() topk.Period = oneSecondDuration topk.K = 3 topk.Aggregation = "sum" diff --git a/plugins/serializers/influx/reader_test.go b/plugins/serializers/influx/reader_test.go index 7aaf3fccf41e9..1bb4a3b61cd4e 100644 --- a/plugins/serializers/influx/reader_test.go +++ b/plugins/serializers/influx/reader_test.go @@ -256,14 +256,14 @@ func BenchmarkReader(b *testing.B) { ), ) - metrics := make([]telegraf.Metric, 1000, 1000) + metrics := make([]telegraf.Metric, 1000) for i := range metrics { metrics[i] = m } b.ResetTimer() for i := 0; i < b.N; i++ { - readbuf := make([]byte, 4096, 4096) + readbuf := make([]byte, 4096) serializer := NewSerializer() reader := NewReader(metrics, serializer) for { diff --git a/selfstat/selfstat.go b/selfstat/selfstat.go index f98821fa97c6c..bafd3bd129bd5 100644 --- a/selfstat/selfstat.go +++ b/selfstat/selfstat.go @@ -178,7 +178,6 @@ func (r *Registry) set(key uint64, s Stat) { } r.stats[key][s.FieldName()] = s - return } func key(measurement string, tags map[string]string) uint64 { diff --git a/testutil/accumulator.go b/testutil/accumulator.go index baf09f60f1234..4da3a76fcc8ca 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -235,11 +235,9 @@ func (a *Accumulator) AddError(err error) { } func (a *Accumulator) SetPrecision(_ time.Duration) { - return } func (a *Accumulator) DisablePrecision() { - return } func (a *Accumulator) Debug() bool { @@ -394,7 +392,6 @@ func (a *Accumulator) AssertDoesNotContainsTaggedFields( assert.Fail(t, msg) } } - return } func (a *Accumulator) AssertContainsFields( t *testing.T, diff --git a/testutil/metric.go b/testutil/metric.go index 36ba63af9338f..1fb18991e1558 100644 --- a/testutil/metric.go +++ b/testutil/metric.go @@ -99,16 +99,12 @@ func newMetricDiff(metric telegraf.Metric) *metricDiff { m := &metricDiff{} m.Measurement = metric.Name() - for _, tag := range metric.TagList() { - m.Tags = append(m.Tags, tag) - } + m.Tags = append(m.Tags, metric.TagList()...) sort.Slice(m.Tags, func(i, j int) bool { return m.Tags[i].Key < m.Tags[j].Key }) - for _, field := range metric.FieldList() { - m.Fields = append(m.Fields, field) - } + m.Fields = append(m.Fields, metric.FieldList()...) sort.Slice(m.Fields, func(i, j int) bool { return m.Fields[i].Key < m.Fields[j].Key }) From f26084acf1202f5c186595bc34dade3ece92881b Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 25 Mar 2021 13:15:33 -0700 Subject: [PATCH 340/761] add xpath testers to xml readme (#9049) * add xpath testers to xml readme * fix linking --- plugins/parsers/xml/README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/parsers/xml/README.md b/plugins/parsers/xml/README.md index 93b150703c1bc..02b3c4530ecb6 100644 --- a/plugins/parsers/xml/README.md +++ b/plugins/parsers/xml/README.md @@ -53,7 +53,8 @@ that need to be specified in a `fields_int` section. ``` A configuration can contain muliple *xml* subsections for e.g. the file plugin to process the xml-string multiple times. -Consult the [XPath syntax][xpath] and the [underlying library's functions][xpath lib] for details and help regarding XPath queries. +Consult the [XPath syntax][xpath] and the [underlying library's functions][xpath lib] for details and help regarding XPath queries. Consider using an XPath tester such as [xpather.com][xpather] or [Code Beautify's XPath Tester][xpath tester] for help developing and debugging +your query. Alternatively to the configuration above, fields can also be specified in a batch way. So contrary to specify the fields in a section, you can define a `name` and a `value` selector used to determine the name and value of the fields in the @@ -341,5 +342,7 @@ For each selected *field-node* we use `field_name` and `field_value` to determin [xpath lib]: https://github.com/antchfx/xpath [xml]: https://www.w3.org/XML/ [xpath]: https://www.w3.org/TR/xpath/ +[xpather]: http://xpather.com/ +[xpath tester]: https://codebeautify.org/Xpath-Tester [time const]: https://golang.org/pkg/time/#pkg-constants [time parse]: https://golang.org/pkg/time/#Parse From 565336280ad95969c8431e0b5c34d6ef89c4c58e Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Thu, 25 Mar 2021 18:06:03 -0400 Subject: [PATCH 341/761] moved samara config out of init into connect (#9051) --- plugins/outputs/kafka/kafka.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 5aad62f48e408..d30c730cfac18 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -43,6 +43,7 @@ type Kafka struct { Log telegraf.Logger `toml:"-"` + saramaConfig *sarama.Config producerFunc func(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) producer sarama.SyncProducer @@ -278,6 +279,8 @@ func (k *Kafka) Init() error { return err } + k.saramaConfig = config + // Legacy support ssl config if k.Certificate != "" { k.TLSCert = k.Certificate @@ -285,15 +288,15 @@ func (k *Kafka) Init() error { k.TLSKey = k.Key } - producer, err := k.producerFunc(k.Brokers, config) - if err != nil { - return err - } - k.producer = producer return nil } func (k *Kafka) Connect() error { + producer, err := k.producerFunc(k.Brokers, k.saramaConfig) + if err != nil { + return err + } + k.producer = producer return nil } From fdde9084c106b6dde757edfa3c2cf6e962d782d7 Mon Sep 17 00:00:00 2001 From: i-prudnikov Date: Fri, 26 Mar 2021 18:02:42 +0200 Subject: [PATCH 342/761] AWS Cloudwatch log output (#8639) * Cloudwatch log output * Fixes based on @sspaink review * Make linter happy * iMake LGTM happy, add new tests --- plugins/outputs/all/all.go | 1 + plugins/outputs/cloudwatch_logs/README.md | 78 +++ .../cloudwatch_logs/cloudwatch_logs.go | 440 +++++++++++++++ .../cloudwatch_logs/cloudwatch_logs_test.go | 528 ++++++++++++++++++ 4 files changed, 1047 insertions(+) create mode 100644 plugins/outputs/cloudwatch_logs/README.md create mode 100644 plugins/outputs/cloudwatch_logs/cloudwatch_logs.go create mode 100644 plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index e183242b91343..61270d5ad412e 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -8,6 +8,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/azure_monitor" _ "github.com/influxdata/telegraf/plugins/outputs/cloud_pubsub" _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" + _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch_logs" _ "github.com/influxdata/telegraf/plugins/outputs/cratedb" _ "github.com/influxdata/telegraf/plugins/outputs/datadog" _ "github.com/influxdata/telegraf/plugins/outputs/discard" diff --git a/plugins/outputs/cloudwatch_logs/README.md b/plugins/outputs/cloudwatch_logs/README.md new file mode 100644 index 0000000000000..26dd3cfafc9a3 --- /dev/null +++ b/plugins/outputs/cloudwatch_logs/README.md @@ -0,0 +1,78 @@ +## Amazon CloudWatch Logs Output for Telegraf + +This plugin will send logs to Amazon CloudWatch. + +## Amazon Authentication + +This plugin uses a credential chain for Authentication with the CloudWatch Logs +API endpoint. In the following order the plugin will attempt to authenticate. +1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) +2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes +3. Shared profile from `profile` attribute +4. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables) +5. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) +6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) + +The IAM user needs the following permissions ( https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/permissions-reference-cwl.html): +- `logs:DescribeLogGroups` - required for check if configured log group exist +- `logs:DescribeLogStreams` - required to view all log streams associated with a log group. +- `logs:CreateLogStream` - required to create a new log stream in a log group.) +- `logs:PutLogEvents` - required to upload a batch of log events into log stream. + +## Config +```toml +[[outputs.cloudwatch_logs]] + ## The region is the Amazon region that you wish to connect to. + ## Examples include but are not limited to: + ## - us-west-1 + ## - us-west-2 + ## - us-east-1 + ## - ap-southeast-1 + ## - ap-southeast-2 + ## ... + region = "us-east-1" + + ## Amazon Credentials + ## Credentials are loaded in the following order + ## 1) Assumed credentials via STS if role_arn is specified + ## 2) explicit credentials from 'access_key' and 'secret_key' + ## 3) shared profile from 'profile' + ## 4) environment variables + ## 5) shared credentials file + ## 6) EC2 Instance Profile + #access_key = "" + #secret_key = "" + #token = "" + #role_arn = "" + #profile = "" + #shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! + ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place + log_group = "my-group-name" + + ## Log stream in log group + ## Either log group name or reference to metric attribute, from which it can be parsed: + ## tag: or field:. If log stream is not exist, it will be created. + ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) + ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) + log_stream = "tag:location" + + ## Source of log data - metric name + ## specify the name of the metric, from which the log data should be retrieved. + ## I.e., if you are using docker_log plugin to stream logs from container, then + ## specify log_data_metric_name = "docker_log" + log_data_metric_name = "docker_log" + + ## Specify from which metric attribute the log data should be retrieved: + ## tag: or field:. + ## I.e., if you are using docker_log plugin to stream logs from container, then + ## specify log_data_source = "field:message" + log_data_source = "field:message" +``` \ No newline at end of file diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go new file mode 100644 index 0000000000000..d1d96b0b33951 --- /dev/null +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go @@ -0,0 +1,440 @@ +package cloudwatch_logs + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/influxdata/telegraf" + internalaws "github.com/influxdata/telegraf/config/aws" + "github.com/influxdata/telegraf/plugins/outputs" +) + +type messageBatch struct { + logEvents []*cloudwatchlogs.InputLogEvent + messageCount int +} +type logStreamContainer struct { + currentBatchSizeBytes int + currentBatchIndex int + messageBatches []messageBatch + sequenceToken string +} + +//Cloudwatch Logs service interface +type cloudWatchLogs interface { + DescribeLogGroups(*cloudwatchlogs.DescribeLogGroupsInput) (*cloudwatchlogs.DescribeLogGroupsOutput, error) + DescribeLogStreams(*cloudwatchlogs.DescribeLogStreamsInput) (*cloudwatchlogs.DescribeLogStreamsOutput, error) + CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) + PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) +} + +// CloudWatchLogs plugin object definition +type CloudWatchLogs struct { + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` + + LogGroup string `toml:"log_group"` + lg *cloudwatchlogs.LogGroup //log group data + + LogStream string `toml:"log_stream"` + lsKey string //log stream source: tag or field + lsSource string //log stream source tag or field name + ls map[string]*logStreamContainer //log stream info + + LDMetricName string `toml:"log_data_metric_name"` + + LDSource string `toml:"log_data_source"` + logDatKey string //log data source (tag or field) + logDataSource string //log data source tag or field name + + svc cloudWatchLogs //cloudwatch logs service + + Log telegraf.Logger `toml:"-"` +} + +const ( + // Log events must comply with the following + // (https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatchlogs/#CloudWatchLogs.PutLogEvents): + maxLogMessageLength = 262144 - awsOverheadPerLogMessageBytes //In bytes + maxBatchSizeBytes = 1048576 // The sum of all event messages in UTF-8, plus 26 bytes for each log event + awsOverheadPerLogMessageBytes = 26 + maxFutureLogEventTimeOffset = time.Hour * 2 // None of the log events in the batch can be more than 2 hours in the future. + + maxPastLogEventTimeOffset = time.Hour * 24 * 14 // None of the log events in the batch can be older than 14 days or older + // than the retention period of the log group. + + maxItemsInBatch = 10000 // The maximum number of log events in a batch is 10,000. + + //maxTimeSpanInBatch = time.Hour * 24 // A batch of log events in a single request cannot span more than 24 hours. + // Otherwise, the operation fails. +) + +var sampleConfig = ` +## The region is the Amazon region that you wish to connect to. +## Examples include but are not limited to: +## - us-west-1 +## - us-west-2 +## - us-east-1 +## - ap-southeast-1 +## - ap-southeast-2 +## ... +region = "us-east-1" + +## Amazon Credentials +## Credentials are loaded in the following order +## 1) Assumed credentials via STS if role_arn is specified +## 2) explicit credentials from 'access_key' and 'secret_key' +## 3) shared profile from 'profile' +## 4) environment variables +## 5) shared credentials file +## 6) EC2 Instance Profile +#access_key = "" +#secret_key = "" +#token = "" +#role_arn = "" +#profile = "" +#shared_credential_file = "" + +## Endpoint to make request against, the correct endpoint is automatically +## determined and this option should only be set if you wish to override the +## default. +## ex: endpoint_url = "http://localhost:8000" +# endpoint_url = "" + +## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +log_group = "my-group-name" + +## Log stream in log group +## Either log group name or reference to metric attribute, from which it can be parsed: +## tag: or field:. If log stream is not exist, it will be created. +## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +log_stream = "tag:location" + +## Source of log data - metric name +## specify the name of the metric, from which the log data should be retrieved. +## I.e., if you are using docker_log plugin to stream logs from container, then +## specify log_data_metric_name = "docker_log" +log_data_metric_name = "docker_log" + +## Specify from which metric attribute the log data should be retrieved: +## tag: or field:. +## I.e., if you are using docker_log plugin to stream logs from container, then +## specify log_data_source = "field:message" +log_data_source = "field:message" +` + +// SampleConfig returns sample config description for plugin +func (c *CloudWatchLogs) SampleConfig() string { + return sampleConfig +} + +// Description returns one-liner description for plugin +func (c *CloudWatchLogs) Description() string { + return "Configuration for AWS CloudWatchLogs output." +} + +// Init initialize plugin with checking configuration parameters +func (c *CloudWatchLogs) Init() error { + if c.LogGroup == "" { + return fmt.Errorf("log group is not set") + } + + if c.LogStream == "" { + return fmt.Errorf("log stream is not set") + } + + if c.LDMetricName == "" { + return fmt.Errorf("log data metrics name is not set") + } + + if c.LDSource == "" { + return fmt.Errorf("log data source is not set") + } + lsSplitArray := strings.Split(c.LDSource, ":") + if len(lsSplitArray) != 2 { + return fmt.Errorf("log data source is not properly formatted, ':' is missed.\n" + + "Should be 'tag:' or 'field:'") + } + + if lsSplitArray[0] != "tag" && lsSplitArray[0] != "field" { + return fmt.Errorf("log data source is not properly formatted.\n" + + "Should be 'tag:' or 'field:'") + } + + c.logDatKey = lsSplitArray[0] + c.logDataSource = lsSplitArray[1] + c.Log.Debugf("Log data: key '%s', source '%s'...", c.logDatKey, c.logDataSource) + + if c.lsSource == "" { + c.lsSource = c.LogStream + c.Log.Debugf("Log stream '%s'...", c.lsSource) + } + + return nil +} + +// Connect connects plugin with to receiver of metrics +func (c *CloudWatchLogs) Connect() error { + var queryToken *string + var dummyToken = "dummy" + var logGroupsOutput = &cloudwatchlogs.DescribeLogGroupsOutput{NextToken: &dummyToken} + var err error + + credentialConfig := &internalaws.CredentialConfig{ + Region: c.Region, + AccessKey: c.AccessKey, + SecretKey: c.SecretKey, + RoleARN: c.RoleARN, + Profile: c.Profile, + Filename: c.Filename, + Token: c.Token, + EndpointURL: c.EndpointURL, + } + configProvider := credentialConfig.Credentials() + + c.svc = cloudwatchlogs.New(configProvider) + if c.svc == nil { + return fmt.Errorf("can't create cloudwatch logs service endpoint") + } + + //Find log group with name 'c.LogGroup' + if c.lg == nil { //In case connection is not retried, first time + for logGroupsOutput.NextToken != nil { + logGroupsOutput, err = c.svc.DescribeLogGroups( + &cloudwatchlogs.DescribeLogGroupsInput{ + LogGroupNamePrefix: &c.LogGroup, + NextToken: queryToken}) + + if err != nil { + return err + } + queryToken = logGroupsOutput.NextToken + + for _, logGroup := range logGroupsOutput.LogGroups { + if *(logGroup.LogGroupName) == c.LogGroup { + c.Log.Debugf("Found log group %q", c.LogGroup) + c.lg = logGroup + } + } + } + + if c.lg == nil { + return fmt.Errorf("can't find log group %q", c.LogGroup) + } + + lsSplitArray := strings.Split(c.LogStream, ":") + if len(lsSplitArray) > 1 { + if lsSplitArray[0] == "tag" || lsSplitArray[0] == "field" { + c.lsKey = lsSplitArray[0] + c.lsSource = lsSplitArray[1] + c.Log.Debugf("Log stream: key %q, source %q...", c.lsKey, c.lsSource) + } + } + + if c.lsSource == "" { + c.lsSource = c.LogStream + c.Log.Debugf("Log stream %q...", c.lsSource) + } + + c.ls = map[string]*logStreamContainer{} + } + + return nil +} + +// Close closes plugin connection with remote receiver +func (c *CloudWatchLogs) Close() error { + return nil +} + +// Write perform metrics write to receiver of metrics +func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { + minTime := time.Now() + if c.lg.RetentionInDays != nil { + minTime = minTime.Add(-time.Hour * 24 * time.Duration(*c.lg.RetentionInDays)) + } else { + minTime = minTime.Add(-maxPastLogEventTimeOffset) + } + + maxTime := time.Now().Add(maxFutureLogEventTimeOffset) + + for _, m := range metrics { + //Filtering metrics + if m.Name() != c.LDMetricName { + continue + } + + if m.Time().After(maxTime) || m.Time().Before(minTime) { + c.Log.Debugf("Processing metric '%v': Metric is filtered based on TS!", m) + continue + } + + tags := m.Tags() + fields := m.Fields() + + logStream := "" + logData := "" + lsContainer := &logStreamContainer{ + currentBatchSizeBytes: 0, + currentBatchIndex: 0, + messageBatches: []messageBatch{{}}} + + switch c.lsKey { + case "tag": + logStream = tags[c.lsSource] + case "field": + if fields[c.lsSource] != nil { + logStream = fields[c.lsSource].(string) + } + default: + logStream = c.lsSource + } + + if logStream == "" { + c.Log.Errorf("Processing metric '%v': log stream: key %q, source %q, not found!", m, c.lsKey, c.lsSource) + continue + } + + switch c.logDatKey { + case "tag": + logData = tags[c.logDataSource] + case "field": + if fields[c.logDataSource] != nil { + logData = fields[c.logDataSource].(string) + } + } + + if logData == "" { + c.Log.Errorf("Processing metric '%v': log data: key %q, source %q, not found!", m, c.logDatKey, c.logDataSource) + continue + } + + //Check if message size is not fit to batch + if len(logData) > maxLogMessageLength { + metricStr := fmt.Sprintf("%v", m) + c.Log.Errorf("Processing metric '%s...', message is too large to fit to aws max log message size: %d (bytes) !", metricStr[0:maxLogMessageLength/1000], maxLogMessageLength) + continue + } + //Batching log messages + //awsOverheadPerLogMessageBytes - is mandatory aws overhead per each log message + messageSizeInBytesForAWS := len(logData) + awsOverheadPerLogMessageBytes + + //Pick up existing or prepare new log stream container. + //Log stream container stores logs per log stream in + //the AWS Cloudwatch logs API friendly structure + if val, ok := c.ls[logStream]; ok { + lsContainer = val + } else { + lsContainer.messageBatches[0].messageCount = 0 + lsContainer.messageBatches[0].logEvents = []*cloudwatchlogs.InputLogEvent{} + c.ls[logStream] = lsContainer + } + + if lsContainer.currentBatchSizeBytes+messageSizeInBytesForAWS > maxBatchSizeBytes || + lsContainer.messageBatches[lsContainer.currentBatchIndex].messageCount >= maxItemsInBatch { + //Need to start new batch, and reset counters + lsContainer.currentBatchIndex++ + lsContainer.messageBatches = append(lsContainer.messageBatches, + messageBatch{ + logEvents: []*cloudwatchlogs.InputLogEvent{}, + messageCount: 0}) + lsContainer.currentBatchSizeBytes = messageSizeInBytesForAWS + } else { + lsContainer.currentBatchSizeBytes += messageSizeInBytesForAWS + lsContainer.messageBatches[lsContainer.currentBatchIndex].messageCount++ + } + + //AWS need time in milliseconds. time.UnixNano() returns time in nanoseconds since epoch + //we store here TS with nanosec precision iun order to have proper ordering, later ts will be reduced to milliseconds + metricTime := m.Time().UnixNano() + //Adding metring to batch + lsContainer.messageBatches[lsContainer.currentBatchIndex].logEvents = + append(lsContainer.messageBatches[lsContainer.currentBatchIndex].logEvents, + &cloudwatchlogs.InputLogEvent{ + Message: &logData, + Timestamp: &metricTime}) + } + + // Sorting out log events by TS and sending them to cloud watch logs + for logStream, elem := range c.ls { + for index, batch := range elem.messageBatches { + if len(batch.logEvents) == 0 { //can't push empty batch + //c.Log.Warnf("Empty batch detected, skipping...") + continue + } + //Sorting + sort.Slice(batch.logEvents[:], func(i, j int) bool { + return *batch.logEvents[i].Timestamp < *batch.logEvents[j].Timestamp + }) + + putLogEvents := cloudwatchlogs.PutLogEventsInput{LogGroupName: &c.LogGroup, LogStreamName: &logStream} + if elem.sequenceToken == "" { + //This is the first attempt to write to log stream, + //need to check log stream existence and create it if necessary + describeLogStreamOutput, err := c.svc.DescribeLogStreams(&cloudwatchlogs.DescribeLogStreamsInput{ + LogGroupName: &c.LogGroup, + LogStreamNamePrefix: &logStream}) + if err == nil && len(describeLogStreamOutput.LogStreams) == 0 { + _, err := c.svc.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: &c.LogGroup, + LogStreamName: &logStream}) + if err != nil { + c.Log.Errorf("Can't create log stream %q in log group. Reason: %v %q.", logStream, c.LogGroup, err) + continue + } + putLogEvents.SequenceToken = nil + } else if err == nil && len(describeLogStreamOutput.LogStreams) == 1 { + putLogEvents.SequenceToken = describeLogStreamOutput.LogStreams[0].UploadSequenceToken + } else if err == nil && len(describeLogStreamOutput.LogStreams) > 1 { //Ambiguity + c.Log.Errorf("More than 1 log stream found with prefix %q in log group %q.", logStream, c.LogGroup) + continue + } else { + c.Log.Errorf("Error describing log streams in log group %q. Reason: %v", c.LogGroup, err) + continue + } + } else { + putLogEvents.SequenceToken = &elem.sequenceToken + } + + //Upload log events + //Adjusting TS to be in align with cloudwatch logs requirements + for _, event := range batch.logEvents { + *event.Timestamp = *event.Timestamp / 1000000 + } + putLogEvents.LogEvents = batch.logEvents + + //There is a quota of 5 requests per second per log stream. Additional + //requests are throttled. This quota can't be changed. + putLogEventsOutput, err := c.svc.PutLogEvents(&putLogEvents) + if err != nil { + c.Log.Errorf("Can't push logs batch to AWS. Reason: %v", err) + continue + } + //Cleanup batch + elem.messageBatches[index] = messageBatch{ + logEvents: []*cloudwatchlogs.InputLogEvent{}, + messageCount: 0} + + elem.sequenceToken = *putLogEventsOutput.NextSequenceToken + } + } + + return nil +} + +func init() { + outputs.Add("cloudwatch_logs", func() telegraf.Output { + return &CloudWatchLogs{} + }) +} diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go new file mode 100644 index 0000000000000..66378969f2ac2 --- /dev/null +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go @@ -0,0 +1,528 @@ +package cloudwatch_logs + +import ( + "fmt" + "math/rand" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +type mockCloudWatchLogs struct { + logStreamName string + pushedLogEvents []cloudwatchlogs.InputLogEvent +} + +func (c *mockCloudWatchLogs) Init(lsName string) { + c.logStreamName = lsName + c.pushedLogEvents = make([]cloudwatchlogs.InputLogEvent, 0) +} + +func (c *mockCloudWatchLogs) DescribeLogGroups(*cloudwatchlogs.DescribeLogGroupsInput) (*cloudwatchlogs.DescribeLogGroupsOutput, error) { + return nil, nil +} + +func (c *mockCloudWatchLogs) DescribeLogStreams(*cloudwatchlogs.DescribeLogStreamsInput) (*cloudwatchlogs.DescribeLogStreamsOutput, error) { + arn := "arn" + creationTime := time.Now().Unix() + sequenceToken := "arbitraryToken" + output := &cloudwatchlogs.DescribeLogStreamsOutput{ + LogStreams: []*cloudwatchlogs.LogStream{ + { + Arn: &arn, + CreationTime: &creationTime, + FirstEventTimestamp: &creationTime, + LastEventTimestamp: &creationTime, + LastIngestionTime: &creationTime, + LogStreamName: &c.logStreamName, + UploadSequenceToken: &sequenceToken, + }}, + NextToken: &sequenceToken, + } + return output, nil +} +func (c *mockCloudWatchLogs) CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { + return nil, nil +} +func (c *mockCloudWatchLogs) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + sequenceToken := "arbitraryToken" + output := &cloudwatchlogs.PutLogEventsOutput{NextSequenceToken: &sequenceToken} + //Saving messages + for _, event := range input.LogEvents { + c.pushedLogEvents = append(c.pushedLogEvents, *event) + } + + return output, nil +} + +//Ensure mockCloudWatchLogs implement cloudWatchLogs interface +var _ cloudWatchLogs = (*mockCloudWatchLogs)(nil) + +func RandStringBytes(n int) string { + const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[rand.Intn(len(letterBytes))] + } + return string(b) +} +func TestInit(t *testing.T) { + tests := []struct { + name string + expectedErrorString string + plugin *CloudWatchLogs + }{ + { + name: "log group is not set", + expectedErrorString: "log group is not set", + plugin: &CloudWatchLogs{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + LogGroup: "", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "field:message", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + { + name: "log stream is not set", + expectedErrorString: "log stream is not set", + plugin: &CloudWatchLogs{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + LogGroup: "TestLogGroup", + LogStream: "", + LDMetricName: "docker_log", + LDSource: "field:message", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + { + name: "log data metrics name is not set", + expectedErrorString: "log data metrics name is not set", + plugin: &CloudWatchLogs{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "", + LDSource: "field:message", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + { + name: "log data source is not set", + expectedErrorString: "log data source is not set", + plugin: &CloudWatchLogs{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + { + name: "log data source is not properly formatted (no divider)", + expectedErrorString: "log data source is not properly formatted, ':' is missed.\n" + + "Should be 'tag:' or 'field:'", + plugin: &CloudWatchLogs{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "field_message", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + { + name: "log data source is not properly formatted (inappropriate fields)", + expectedErrorString: "log data source is not properly formatted.\n" + + "Should be 'tag:' or 'field:'", + plugin: &CloudWatchLogs{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "bla:bla", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + { + name: "valid config", + plugin: &CloudWatchLogs{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "tag:location", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.expectedErrorString != "" { + require.EqualError(t, tt.plugin.Init(), tt.expectedErrorString) + } else { + require.Nil(t, tt.plugin.Init()) + } + }) + } +} + +func TestConnect(t *testing.T) { + //mock cloudwatch logs endpoint that is used only in plugin.Connect + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = fmt.Fprintln(w, + `{ + "logGroups": [ + { + "arn": "string", + "creationTime": 123456789, + "kmsKeyId": "string", + "logGroupName": "TestLogGroup", + "metricFilterCount": 1, + "retentionInDays": 10, + "storedBytes": 0 + } + ] + }`) + })) + defer ts.Close() + + plugin := &CloudWatchLogs{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + EndpointURL: ts.URL, + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "field:message", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + } + + require.Nil(t, plugin.Init()) + require.Nil(t, plugin.Connect()) +} + +func TestWrite(t *testing.T) { + //mock cloudwatch logs endpoint that is used only in plugin.Connect + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = fmt.Fprintln(w, + `{ + "logGroups": [ + { + "arn": "string", + "creationTime": 123456789, + "kmsKeyId": "string", + "logGroupName": "TestLogGroup", + "metricFilterCount": 1, + "retentionInDays": 1, + "storedBytes": 0 + } + ] + }`) + })) + defer ts.Close() + + plugin := &CloudWatchLogs{ + Region: "eu-central-1", + AccessKey: "dummy", + SecretKey: "dummy", + EndpointURL: ts.URL, + LogGroup: "TestLogGroup", + LogStream: "tag:source", + LDMetricName: "docker_log", + LDSource: "field:message", + Log: testutil.Logger{ + Name: "outputs.cloudwatch_logs", + }, + } + require.Nil(t, plugin.Init()) + require.Nil(t, plugin.Connect()) + + tests := []struct { + name string + logStreamName string + metrics []telegraf.Metric + expectedMetricsOrder map[int]int //map[] + expectedMetricsCount int + }{ + { + name: "Sorted by timestamp log entries", + logStreamName: "deadbeef", + expectedMetricsOrder: map[int]int{0: 0, 1: 1}, + expectedMetricsCount: 2, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "Sorted: message #1", + }, + time.Now().Add(-time.Minute), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "Sorted: message #2", + }, + time.Now(), + ), + }, + }, + { + name: "Unsorted log entries", + logStreamName: "deadbeef", + expectedMetricsOrder: map[int]int{0: 1, 1: 0}, + expectedMetricsCount: 2, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "Unsorted: message #1", + }, + time.Now(), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "Unsorted: message #2", + }, + time.Now().Add(-time.Minute), + ), + }, + }, + { + name: "Too old log entry & log entry in the future", + logStreamName: "deadbeef", + expectedMetricsCount: 0, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "message #1", + }, + time.Now().Add(-maxPastLogEventTimeOffset).Add(-time.Hour), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "message #2", + }, + time.Now().Add(maxFutureLogEventTimeOffset).Add(time.Hour), + ), + }, + }, + { + name: "Oversized log entry", + logStreamName: "deadbeef", + expectedMetricsCount: 0, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + //Here comes very long message + "message": RandStringBytes(maxLogMessageLength + 1), + }, + time.Now().Add(-time.Minute), + ), + }, + }, + { + name: "Batching log entries", + logStreamName: "deadbeef", + expectedMetricsOrder: map[int]int{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}, + expectedMetricsCount: 5, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + //Here comes very long message to cause message batching + "message": "batch1 message1:" + RandStringBytes(maxLogMessageLength-16), + }, + time.Now().Add(-4*time.Minute), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + //Here comes very long message to cause message batching + "message": "batch1 message2:" + RandStringBytes(maxLogMessageLength-16), + }, + time.Now().Add(-3*time.Minute), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + //Here comes very long message to cause message batching + "message": "batch1 message3:" + RandStringBytes(maxLogMessageLength-16), + }, + time.Now().Add(-2*time.Minute), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + //Here comes very long message to cause message batching + "message": "batch1 message4:" + RandStringBytes(maxLogMessageLength-16), + }, + time.Now().Add(-time.Minute), + ), + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "batch2 message1", + }, + time.Now(), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + //Overwrite cloud watch log endpoint + mockCwl := &mockCloudWatchLogs{} + mockCwl.Init(tt.logStreamName) + plugin.svc = mockCwl + require.Nil(t, plugin.Write(tt.metrics)) + require.Equal(t, tt.expectedMetricsCount, len(mockCwl.pushedLogEvents)) + + for index, elem := range mockCwl.pushedLogEvents { + require.Equal(t, *elem.Message, tt.metrics[tt.expectedMetricsOrder[index]].Fields()["message"]) + require.Equal(t, *elem.Timestamp, tt.metrics[tt.expectedMetricsOrder[index]].Time().UnixNano()/1000000) + } + }) + } +} From 61ea585533aeba837d22e38714fa853d299917c3 Mon Sep 17 00:00:00 2001 From: peter-volkov Date: Fri, 26 Mar 2021 20:27:53 +0300 Subject: [PATCH 343/761] use correct compute metadata url to get folder-id (#9056) --- .../outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go index 95b0bda0f44ea..b3578be904da0 100644 --- a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go @@ -60,7 +60,7 @@ const ( defaultRequestTimeout = time.Second * 20 defaultEndpointURL = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" defaultMetadataTokenURL = "http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token" - defaultMetadataFolderURL = "http://169.254.169.254/computeMetadata/v1/instance/attributes/folder-id" + defaultMetadataFolderURL = "http://169.254.169.254/computeMetadata/v1/yandex/folder-id" ) var sampleConfig = ` @@ -235,6 +235,7 @@ func (a *YandexCloudMonitoring) send(body []byte) error { req.Header.Set("Authorization", "Bearer "+a.IAMToken) a.Log.Debugf("sending metrics to %s", req.URL.String()) + a.Log.Debugf("body: %s", body) resp, err := a.client.Do(req) if err != nil { return err From e6165ecd182da7e49b3b965724114fb3463a9ac7 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Mon, 29 Mar 2021 10:40:28 -0400 Subject: [PATCH 344/761] readme fix (#9064) closes #9060 --- plugins/processors/execd/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/processors/execd/README.md b/plugins/processors/execd/README.md index 79c354bdd4dec..6f8d376a01171 100644 --- a/plugins/processors/execd/README.md +++ b/plugins/processors/execd/README.md @@ -23,7 +23,7 @@ Telegraf minimum version: Telegraf 1.15.0 ### Configuration: ```toml -[[processor.execd]] +[[processors.execd]] ## One program to run as daemon. ## NOTE: process and each argument should each be their own string ## eg: command = ["/path/to/your_program", "arg1", "arg2"] From 871447b22ce3d076dd20b0ed2ef435adce6f47ae Mon Sep 17 00:00:00 2001 From: Mattias Jiderhamn Date: Mon, 29 Mar 2021 17:22:36 +0200 Subject: [PATCH 345/761] input/sqlserver: Add service and save connection pools (#8596) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 2 +- go.sum | 10 +-- plugins/inputs/sqlserver/sqlserver.go | 73 +++++++++++++--------- plugins/inputs/sqlserver/sqlserver_test.go | 48 ++++++++------ 5 files changed, 80 insertions(+), 54 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 8a05ab298f471..0e2d31cb99ec6 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -79,6 +79,7 @@ following works: - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) - github.com/gogo/googleapis [Apache License 2.0](https://github.com/gogo/googleapis/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang-sql/civil [Apache License 2.0](https://github.com/golang-sql/civil/blob/master/LICENSE) - github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE) - github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE) - github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 5e8c475cc217a..6bf359ff70ae2 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect - github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 + github.com/denisenkom/go-mssqldb v0.9.0 github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible diff --git a/go.sum b/go.sum index 3f8218900e265..2fcbe42fd5fde 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -260,8 +259,8 @@ github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhr github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADGYw5LqMnHqSkyIELsHCGF6PkrmM31V8rF7o= -github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/denisenkom/go-mssqldb v0.9.0 h1:RSohk2RsiZqLZ0zCjtfn3S4Gp4exhpBWHyQ7D0yGjAk= +github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= @@ -483,6 +482,8 @@ github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= @@ -1317,6 +1318,7 @@ golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1345,6 +1347,7 @@ golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1462,7 +1465,6 @@ google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuh google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 75e52e6e8ed9f..db499a7472578 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -15,15 +15,15 @@ import ( // SQLServer struct type SQLServer struct { - Servers []string `toml:"servers"` - QueryVersion int `toml:"query_version"` - AzureDB bool `toml:"azuredb"` - DatabaseType string `toml:"database_type"` - IncludeQuery []string `toml:"include_query"` - ExcludeQuery []string `toml:"exclude_query"` - HealthMetric bool `toml:"health_metric"` - queries MapQuery - isInitialized bool + Servers []string `toml:"servers"` + QueryVersion int `toml:"query_version"` + AzureDB bool `toml:"azuredb"` + DatabaseType string `toml:"database_type"` + IncludeQuery []string `toml:"include_query"` + ExcludeQuery []string `toml:"exclude_query"` + HealthMetric bool `toml:"health_metric"` + pools []*sql.DB + queries MapQuery } // Query struct @@ -223,8 +223,6 @@ func initQueries(s *SQLServer) error { } } - // Set a flag so we know that queries have already been initialized - s.isInitialized = true var querylist []string for query := range queries { querylist = append(querylist, query) @@ -236,32 +234,25 @@ func initQueries(s *SQLServer) error { // Gather collect data from SQL Server func (s *SQLServer) Gather(acc telegraf.Accumulator) error { - if !s.isInitialized { - if err := initQueries(s); err != nil { - acc.AddError(err) - return err - } - } - var wg sync.WaitGroup var mutex sync.Mutex var healthMetrics = make(map[string]*HealthMetric) - for _, serv := range s.Servers { + for i, pool := range s.pools { for _, query := range s.queries { wg.Add(1) - go func(serv string, query Query) { + go func(pool *sql.DB, query Query, serverIndex int) { defer wg.Done() - queryError := s.gatherServer(serv, query, acc) + queryError := s.gatherServer(pool, query, acc) if s.HealthMetric { mutex.Lock() - s.gatherHealth(healthMetrics, serv, queryError) + s.gatherHealth(healthMetrics, s.Servers[serverIndex], queryError) mutex.Unlock() } acc.AddError(queryError) - }(serv, query) + }(pool, query, i) } } @@ -274,16 +265,40 @@ func (s *SQLServer) Gather(acc telegraf.Accumulator) error { return nil } -func (s *SQLServer) gatherServer(server string, query Query, acc telegraf.Accumulator) error { - // deferred opening - conn, err := sql.Open("mssql", server) - if err != nil { +// Start initialize a list of connection pools +func (s *SQLServer) Start(acc telegraf.Accumulator) error { + if err := initQueries(s); err != nil { + acc.AddError(err) return err } - defer conn.Close() + if len(s.Servers) == 0 { + s.Servers = append(s.Servers, defaultServer) + } + + for _, serv := range s.Servers { + pool, err := sql.Open("mssql", serv) + if err != nil { + acc.AddError(err) + return err + } + + s.pools = append(s.pools, pool) + } + + return nil +} + +// Stop cleanup server connection pools +func (s *SQLServer) Stop() { + for _, pool := range s.pools { + _ = pool.Close() + } +} + +func (s *SQLServer) gatherServer(pool *sql.DB, query Query, acc telegraf.Accumulator) error { // execute query - rows, err := conn.Query(query.Script) + rows, err := pool.Query(query.Script) if err != nil { return fmt.Errorf("Script %s failed: %w", query.ScriptName, err) //return err diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index 0e23c8635fcaa..d8ab33b71bf4d 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -124,15 +124,13 @@ func TestSqlServer_MultipleInstanceIntegration(t *testing.T) { } var acc, acc2 testutil.Accumulator + require.NoError(t, s.Start(&acc)) err := s.Gather(&acc) require.NoError(t, err) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, false) + require.NoError(t, s2.Start(&acc2)) err = s2.Gather(&acc2) require.NoError(t, err) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, true) // acc includes size metrics, and excludes memory metrics assert.False(t, acc.HasMeasurement("Memory breakdown (%)")) @@ -141,6 +139,9 @@ func TestSqlServer_MultipleInstanceIntegration(t *testing.T) { // acc2 includes memory metrics, and excludes size metrics assert.True(t, acc2.HasMeasurement("Memory breakdown (%)")) assert.False(t, acc2.HasMeasurement("Log size (bytes)")) + + s.Stop() + s2.Stop() } func TestSqlServer_MultipleInstanceWithHealthMetricIntegration(t *testing.T) { @@ -162,15 +163,13 @@ func TestSqlServer_MultipleInstanceWithHealthMetricIntegration(t *testing.T) { } var acc, acc2 testutil.Accumulator + require.NoError(t, s.Start(&acc)) err := s.Gather(&acc) require.NoError(t, err) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, false) + require.NoError(t, s2.Start(&acc)) err = s2.Gather(&acc2) require.NoError(t, err) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, true) // acc includes size metrics, and excludes memory metrics and the health metric assert.False(t, acc.HasMeasurement(healthMetricName)) @@ -186,6 +185,9 @@ func TestSqlServer_MultipleInstanceWithHealthMetricIntegration(t *testing.T) { tags := map[string]string{healthMetricInstanceTag: sqlInstance, healthMetricDatabaseTag: database} assert.True(t, acc2.HasPoint(healthMetricName, tags, healthMetricAttemptedQueries, 9)) assert.True(t, acc2.HasPoint(healthMetricName, tags, healthMetricSuccessfulQueries, 9)) + + s.Stop() + s2.Stop() } func TestSqlServer_HealthMetric(t *testing.T) { @@ -205,6 +207,7 @@ func TestSqlServer_HealthMetric(t *testing.T) { // acc1 should have the health metric because it is specified in the config var acc1 testutil.Accumulator + require.NoError(t, s1.Start(&acc1)) s1.Gather(&acc1) assert.True(t, acc1.HasMeasurement(healthMetricName)) @@ -222,8 +225,12 @@ func TestSqlServer_HealthMetric(t *testing.T) { // acc2 should not have the health metric because it is not specified in the config var acc2 testutil.Accumulator + require.NoError(t, s2.Start(&acc2)) s2.Gather(&acc2) assert.False(t, acc2.HasMeasurement(healthMetricName)) + + s1.Stop() + s2.Stop() } func TestSqlServer_MultipleInit(t *testing.T) { @@ -236,15 +243,14 @@ func TestSqlServer_MultipleInit(t *testing.T) { _, ok := s.queries["DatabaseSize"] // acc includes size metrics assert.True(t, ok) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, false) initQueries(s2) _, ok = s2.queries["DatabaseSize"] // acc2 excludes size metrics assert.False(t, ok) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, true) + + s.Stop() + s2.Stop() } func TestSqlServer_ConnectionString(t *testing.T) { @@ -349,15 +355,13 @@ func TestSqlServer_AGQueriesApplicableForDatabaseTypeSQLServer(t *testing.T) { } var acc, acc2 testutil.Accumulator + require.NoError(t, s.Start(&acc)) err := s.Gather(&acc) require.NoError(t, err) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, false) err = s2.Gather(&acc2) + require.NoError(t, s2.Start(&acc)) require.NoError(t, err) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, true) // acc includes size metrics, and excludes memory metrics assert.True(t, acc.HasMeasurement("sqlserver_hadr_replica_states")) @@ -366,6 +370,9 @@ func TestSqlServer_AGQueriesApplicableForDatabaseTypeSQLServer(t *testing.T) { // acc2 includes memory metrics, and excludes size metrics assert.False(t, acc2.HasMeasurement("sqlserver_hadr_replica_states")) assert.False(t, acc2.HasMeasurement("sqlserver_hadr_dbreplica_states")) + + s.Stop() + s2.Stop() } func TestSqlServer_AGQueryFieldsOutputBasedOnSQLServerVersion(t *testing.T) { @@ -390,15 +397,13 @@ func TestSqlServer_AGQueryFieldsOutputBasedOnSQLServerVersion(t *testing.T) { } var acc2019, acc2012 testutil.Accumulator + require.NoError(t, s2019.Start(&acc2019)) err := s2019.Gather(&acc2019) require.NoError(t, err) - assert.Equal(t, s2019.isInitialized, true) - assert.Equal(t, s2012.isInitialized, false) err = s2012.Gather(&acc2012) + require.NoError(t, s2012.Start(&acc2012)) require.NoError(t, err) - assert.Equal(t, s2019.isInitialized, true) - assert.Equal(t, s2012.isInitialized, true) // acc2019 includes new HADR query fields assert.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "basic_features")) @@ -415,6 +420,9 @@ func TestSqlServer_AGQueryFieldsOutputBasedOnSQLServerVersion(t *testing.T) { assert.False(t, acc2012.HasTag("sqlserver_hadr_replica_states", "seeding_mode_desc")) assert.False(t, acc2012.HasField("sqlserver_hadr_dbreplica_states", "is_primary_replica")) assert.False(t, acc2012.HasField("sqlserver_hadr_dbreplica_states", "secondary_lag_seconds")) + + s2019.Stop() + s2012.Stop() } const mockPerformanceMetrics = `measurement;servername;type;Point In Time Recovery;Available physical memory (bytes);Average pending disk IO;Average runnable tasks;Average tasks;Buffer pool rate (bytes/sec);Connection memory per connection (bytes);Memory grant pending;Page File Usage (%);Page lookup per batch request;Page split per batch request;Readahead per page read;Signal wait (%);Sql compilation per batch request;Sql recompilation per batch request;Total target memory ratio From ffe92a7658d31e99750bcf81e87b94e849ed9364 Mon Sep 17 00:00:00 2001 From: Thomas Casteleyn Date: Tue, 30 Mar 2021 01:05:19 +0200 Subject: [PATCH 346/761] Don't walk the entire interface table to just retrieve one field (#9043) --- plugins/processors/ifname/ifname.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index 10cf38a3cec8a..52a9161b0f6fe 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -201,13 +201,13 @@ func (d *IfName) Start(acc telegraf.Accumulator) error { return fmt.Errorf("parsing SNMP client config: %w", err) } - d.ifTable, err = d.makeTable("IF-MIB::ifTable") + d.ifTable, err = d.makeTable("IF-MIB::ifDescr") if err != nil { - return fmt.Errorf("looking up ifTable in local MIB: %w", err) + return fmt.Errorf("looking up ifDescr in local MIB: %w", err) } - d.ifXTable, err = d.makeTable("IF-MIB::ifXTable") + d.ifXTable, err = d.makeTable("IF-MIB::ifName") if err != nil { - return fmt.Errorf("looking up ifXTable in local MIB: %w", err) + return fmt.Errorf("looking up ifName in local MIB: %w", err) } fn := func(m telegraf.Metric) []telegraf.Metric { @@ -347,11 +347,13 @@ func init() { }) } -func makeTableNoMock(tableName string) (*si.Table, error) { +func makeTableNoMock(fieldName string) (*si.Table, error) { var err error tab := si.Table{ - Oid: tableName, IndexAsTag: true, + Fields: []si.Field{ + {Oid: fieldName}, + }, } err = tab.Init() From 470628e5db5a0597999fb74e2c5cc169b40ad761 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Tue, 30 Mar 2021 11:17:58 -0400 Subject: [PATCH 347/761] Add ~200 more Couchbase metrics via Buckets endpoint (#9032) * Couchbase new metrics for buckets * updated * update to simplify two different modes. * cleanup code * linter telling me to fix things I didn't do * add include/exclude filter * update sample config * appease the linter * Update README.md * make all metrics configurable, defaults as just 'basic bucket metrics' * check error in test --- plugins/inputs/couchbase/README.md | 224 ++++++++++++++- plugins/inputs/couchbase/couchbase.go | 318 +++++++++++++++++++-- plugins/inputs/couchbase/couchbase_data.go | 228 +++++++++++++++ plugins/inputs/couchbase/couchbase_test.go | 66 ++++- 4 files changed, 813 insertions(+), 23 deletions(-) create mode 100644 plugins/inputs/couchbase/couchbase_data.go diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index 659b87c3a1fb6..2c777e17a9ed0 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -1,4 +1,6 @@ # Couchbase Input Plugin +Couchbase is a distributed NoSQL database. +This plugin gets metrics for each Couchbase node, as well as detailed metrics for each bucket, for a given couchbase server. ## Configuration: @@ -15,6 +17,9 @@ ## If no protocol is specified, HTTP is used. ## If no port is specified, 8091 is used. servers = ["http://localhost:8091"] + + ## Filter bucket fields to include only here. + # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] ``` ## Measurements: @@ -35,7 +40,7 @@ Tags: - cluster: whatever you called it in `servers` in the configuration, e.g.: `http://couchbase-0.example.com/`) - bucket: the name of the couchbase bucket, e.g., `blastro-df` -Fields: +Default bucket fields: - quota_percent_used (unit: percent, example: 68.85424936294555) - ops_per_sec (unit: count, example: 5686.789686789687) - disk_fetches (unit: count, example: 0.0) @@ -44,6 +49,223 @@ Fields: - data_used (unit: bytes, example: 212179309111.0) - mem_used (unit: bytes, example: 202156957464.0) +Additional fields that can be configured with the `bucket_stats_included` option: +- couch_total_disk_size +- couch_docs_fragmentation +- couch_views_fragmentation +- hit_ratio +- ep_cache_miss_rate +- ep_resident_items_rate +- vb_avg_active_queue_age +- vb_avg_replica_queue_age +- vb_avg_pending_queue_age +- vb_avg_total_queue_age +- vb_active_resident_items_ratio +- vb_replica_resident_items_ratio +- vb_pending_resident_items_ratio +- avg_disk_update_time +- avg_disk_commit_time +- avg_bg_wait_time +- avg_active_timestamp_drift +- avg_replica_timestamp_drift +- ep_dcp_views+indexes_count +- ep_dcp_views+indexes_items_remaining +- ep_dcp_views+indexes_producer_count +- ep_dcp_views+indexes_total_backlog_size +- ep_dcp_views+indexes_items_sent +- ep_dcp_views+indexes_total_bytes +- ep_dcp_views+indexes_backoff +- bg_wait_count +- bg_wait_total +- bytes_read +- bytes_written +- cas_badval +- cas_hits +- cas_misses +- cmd_get +- cmd_lookup +- cmd_set +- couch_docs_actual_disk_size +- couch_docs_data_size +- couch_docs_disk_size +- couch_spatial_data_size +- couch_spatial_disk_size +- couch_spatial_ops +- couch_views_actual_disk_size +- couch_views_data_size +- couch_views_disk_size +- couch_views_ops +- curr_connections +- curr_items +- curr_items_tot +- decr_hits +- decr_misses +- delete_hits +- delete_misses +- disk_commit_count +- disk_commit_total +- disk_update_count +- disk_update_total +- disk_write_queue +- ep_active_ahead_exceptions +- ep_active_hlc_drift +- ep_active_hlc_drift_count +- ep_bg_fetched +- ep_clock_cas_drift_threshold_exceeded +- ep_data_read_failed +- ep_data_write_failed +- ep_dcp_2i_backoff +- ep_dcp_2i_count +- ep_dcp_2i_items_remaining +- ep_dcp_2i_items_sent +- ep_dcp_2i_producer_count +- ep_dcp_2i_total_backlog_size +- ep_dcp_2i_total_bytes +- ep_dcp_cbas_backoff +- ep_dcp_cbas_count +- ep_dcp_cbas_items_remaining +- ep_dcp_cbas_items_sent +- ep_dcp_cbas_producer_count +- ep_dcp_cbas_total_backlog_size +- ep_dcp_cbas_total_bytes +- ep_dcp_eventing_backoff +- ep_dcp_eventing_count +- ep_dcp_eventing_items_remaining +- ep_dcp_eventing_items_sent +- ep_dcp_eventing_producer_count +- ep_dcp_eventing_total_backlog_size +- ep_dcp_eventing_total_bytes +- ep_dcp_fts_backoff +- ep_dcp_fts_count +- ep_dcp_fts_items_remaining +- ep_dcp_fts_items_sent +- ep_dcp_fts_producer_count +- ep_dcp_fts_total_backlog_size +- ep_dcp_fts_total_bytes +- ep_dcp_other_backoff +- ep_dcp_other_count +- ep_dcp_other_items_remaining +- ep_dcp_other_items_sent +- ep_dcp_other_producer_count +- ep_dcp_other_total_backlog_size +- ep_dcp_other_total_bytes +- ep_dcp_replica_backoff +- ep_dcp_replica_count +- ep_dcp_replica_items_remaining +- ep_dcp_replica_items_sent +- ep_dcp_replica_producer_count +- ep_dcp_replica_total_backlog_size +- ep_dcp_replica_total_bytes +- ep_dcp_views_backoff +- ep_dcp_views_count +- ep_dcp_views_items_remaining +- ep_dcp_views_items_sent +- ep_dcp_views_producer_count +- ep_dcp_views_total_backlog_size +- ep_dcp_views_total_bytes +- ep_dcp_xdcr_backoff +- ep_dcp_xdcr_count +- ep_dcp_xdcr_items_remaining +- ep_dcp_xdcr_items_sent +- ep_dcp_xdcr_producer_count +- ep_dcp_xdcr_total_backlog_size +- ep_dcp_xdcr_total_bytes +- ep_diskqueue_drain +- ep_diskqueue_fill +- ep_diskqueue_items +- ep_flusher_todo +- ep_item_commit_failed +- ep_kv_size +- ep_max_size +- ep_mem_high_wat +- ep_mem_low_wat +- ep_meta_data_memory +- ep_num_non_resident +- ep_num_ops_del_meta +- ep_num_ops_del_ret_meta +- ep_num_ops_get_meta +- ep_num_ops_set_meta +- ep_num_ops_set_ret_meta +- ep_num_value_ejects +- ep_oom_errors +- ep_ops_create +- ep_ops_update +- ep_overhead +- ep_queue_size +- ep_replica_ahead_exceptions +- ep_replica_hlc_drift +- ep_replica_hlc_drift_count +- ep_tmp_oom_errors +- ep_vb_total +- evictions +- get_hits +- get_misses +- incr_hits +- incr_misses +- mem_used +- misses +- ops +- timestamp +- vb_active_eject +- vb_active_itm_memory +- vb_active_meta_data_memory +- vb_active_num +- vb_active_num_non_resident +- vb_active_ops_create +- vb_active_ops_update +- vb_active_queue_age +- vb_active_queue_drain +- vb_active_queue_fill +- vb_active_queue_size +- vb_active_sync_write_aborted_count +- vb_active_sync_write_accepted_count +- vb_active_sync_write_committed_count +- vb_pending_curr_items +- vb_pending_eject +- vb_pending_itm_memory +- vb_pending_meta_data_memory +- vb_pending_num +- vb_pending_num_non_resident +- vb_pending_ops_create +- vb_pending_ops_update +- vb_pending_queue_age +- vb_pending_queue_drain +- vb_pending_queue_fill +- vb_pending_queue_size +- vb_replica_curr_items +- vb_replica_eject +- vb_replica_itm_memory +- vb_replica_meta_data_memory +- vb_replica_num +- vb_replica_num_non_resident +- vb_replica_ops_create +- vb_replica_ops_update +- vb_replica_queue_age +- vb_replica_queue_drain +- vb_replica_queue_fill +- vb_replica_queue_size +- vb_total_queue_age +- xdc_ops +- allocstall +- cpu_cores_available +- cpu_irq_rate +- cpu_stolen_rate +- cpu_sys_rate +- cpu_user_rate +- cpu_utilization_rate +- hibernated_requests +- hibernated_waked +- mem_actual_free +- mem_actual_used +- mem_free +- mem_limit +- mem_total +- mem_used_sys +- odp_report_failed +- rest_requests +- swap_total +- swap_used + ## Example output diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index de7f0bec0c9fa..ef66cb8d1d053 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -1,16 +1,24 @@ package couchbase import ( + "encoding/json" + "net/http" "regexp" "sync" + "time" couchbase "github.com/couchbase/go-couchbase" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" ) type Couchbase struct { Servers []string + + BucketStatsIncluded []string `toml:"bucket_stats_included"` + + bucketInclude filter.Filter } var sampleConfig = ` @@ -24,33 +32,35 @@ var sampleConfig = ` ## If no protocol is specified, HTTP is used. ## If no port is specified, 8091 is used. servers = ["http://localhost:8091"] + + ## Filter fields to include only here. + # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] ` var regexpURI = regexp.MustCompile(`(\S+://)?(\S+\:\S+@)`) +var client = &http.Client{Timeout: 10 * time.Second} -func (r *Couchbase) SampleConfig() string { +func (cb *Couchbase) SampleConfig() string { return sampleConfig } -func (r *Couchbase) Description() string { +func (cb *Couchbase) Description() string { return "Read metrics from one or many couchbase clusters" } // Reads stats from all configured clusters. Accumulates stats. // Returns one of the errors encountered while gathering stats (if any). -func (r *Couchbase) Gather(acc telegraf.Accumulator) error { - if len(r.Servers) == 0 { - r.gatherServer("http://localhost:8091/", acc, nil) - return nil +func (cb *Couchbase) Gather(acc telegraf.Accumulator) error { + if len(cb.Servers) == 0 { + return cb.gatherServer("http://localhost:8091/", acc, nil) } var wg sync.WaitGroup - - for _, serv := range r.Servers { + for _, serv := range cb.Servers { wg.Add(1) go func(serv string) { defer wg.Done() - acc.AddError(r.gatherServer(serv, acc, nil)) + acc.AddError(cb.gatherServer(serv, acc, nil)) }(serv) } @@ -59,7 +69,7 @@ func (r *Couchbase) Gather(acc telegraf.Accumulator) error { return nil } -func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error { +func (cb *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error { if pool == nil { client, err := couchbase.Connect(addr) if err != nil { @@ -89,20 +99,292 @@ func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *co tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "bucket": bucketName} bs := pool.BucketMap[bucketName].BasicStats fields := make(map[string]interface{}) - fields["quota_percent_used"] = bs["quotaPercentUsed"] - fields["ops_per_sec"] = bs["opsPerSec"] - fields["disk_fetches"] = bs["diskFetches"] - fields["item_count"] = bs["itemCount"] - fields["disk_used"] = bs["diskUsed"] - fields["data_used"] = bs["dataUsed"] - fields["mem_used"] = bs["memUsed"] + cb.addBucketField(fields, "quota_percent_used", bs["quotaPercentUsed"]) + cb.addBucketField(fields, "ops_per_sec", bs["opsPerSec"]) + cb.addBucketField(fields, "disk_fetches", bs["diskFetches"]) + cb.addBucketField(fields, "item_count", bs["itemCount"]) + cb.addBucketField(fields, "disk_used", bs["diskUsed"]) + cb.addBucketField(fields, "data_used", bs["dataUsed"]) + cb.addBucketField(fields, "mem_used", bs["memUsed"]) + + err := cb.gatherDetailedBucketStats(addr, bucketName, fields) + if err != nil { + return err + } + acc.AddFields("couchbase_bucket", fields, tags) } + + return nil +} + +func (cb *Couchbase) gatherDetailedBucketStats(server, bucket string, fields map[string]interface{}) error { + extendedBucketStats := &BucketStats{} + err := cb.queryDetailedBucketStats(server, bucket, extendedBucketStats) + if err != nil { + return err + } + + // Use length of any set of metrics, they will all be the same length. + lastEntry := len(extendedBucketStats.Op.Samples.CouchTotalDiskSize) - 1 + cb.addBucketField(fields, "couch_total_disk_size", extendedBucketStats.Op.Samples.CouchTotalDiskSize[lastEntry]) + cb.addBucketField(fields, "couch_docs_fragmentation", extendedBucketStats.Op.Samples.CouchDocsFragmentation[lastEntry]) + cb.addBucketField(fields, "couch_views_fragmentation", extendedBucketStats.Op.Samples.CouchViewsFragmentation[lastEntry]) + cb.addBucketField(fields, "hit_ratio", extendedBucketStats.Op.Samples.HitRatio[lastEntry]) + cb.addBucketField(fields, "ep_cache_miss_rate", extendedBucketStats.Op.Samples.EpCacheMissRate[lastEntry]) + cb.addBucketField(fields, "ep_resident_items_rate", extendedBucketStats.Op.Samples.EpResidentItemsRate[lastEntry]) + cb.addBucketField(fields, "vb_avg_active_queue_age", extendedBucketStats.Op.Samples.VbAvgActiveQueueAge[lastEntry]) + cb.addBucketField(fields, "vb_avg_replica_queue_age", extendedBucketStats.Op.Samples.VbAvgReplicaQueueAge[lastEntry]) + cb.addBucketField(fields, "vb_avg_pending_queue_age", extendedBucketStats.Op.Samples.VbAvgPendingQueueAge[lastEntry]) + cb.addBucketField(fields, "vb_avg_total_queue_age", extendedBucketStats.Op.Samples.VbAvgTotalQueueAge[lastEntry]) + cb.addBucketField(fields, "vb_active_resident_items_ratio", extendedBucketStats.Op.Samples.VbActiveResidentItemsRatio[lastEntry]) + cb.addBucketField(fields, "vb_replica_resident_items_ratio", extendedBucketStats.Op.Samples.VbReplicaResidentItemsRatio[lastEntry]) + cb.addBucketField(fields, "vb_pending_resident_items_ratio", extendedBucketStats.Op.Samples.VbPendingResidentItemsRatio[lastEntry]) + cb.addBucketField(fields, "avg_disk_update_time", extendedBucketStats.Op.Samples.AvgDiskUpdateTime[lastEntry]) + cb.addBucketField(fields, "avg_disk_commit_time", extendedBucketStats.Op.Samples.AvgDiskCommitTime[lastEntry]) + cb.addBucketField(fields, "avg_bg_wait_time", extendedBucketStats.Op.Samples.AvgBgWaitTime[lastEntry]) + cb.addBucketField(fields, "avg_active_timestamp_drift", extendedBucketStats.Op.Samples.AvgActiveTimestampDrift[lastEntry]) + cb.addBucketField(fields, "avg_replica_timestamp_drift", extendedBucketStats.Op.Samples.AvgReplicaTimestampDrift[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views+indexes_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views+indexes_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsRemaining[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views+indexes_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesProducerCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views+indexes_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBacklogSize[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views+indexes_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsSent[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views+indexes_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBytes[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views+indexes_backoff", extendedBucketStats.Op.Samples.EpDcpViewsIndexesBackoff[lastEntry]) + cb.addBucketField(fields, "bg_wait_count", extendedBucketStats.Op.Samples.BgWaitCount[lastEntry]) + cb.addBucketField(fields, "bg_wait_total", extendedBucketStats.Op.Samples.BgWaitTotal[lastEntry]) + cb.addBucketField(fields, "bytes_read", extendedBucketStats.Op.Samples.BytesRead[lastEntry]) + cb.addBucketField(fields, "bytes_written", extendedBucketStats.Op.Samples.BytesWritten[lastEntry]) + cb.addBucketField(fields, "cas_badval", extendedBucketStats.Op.Samples.CasBadval[lastEntry]) + cb.addBucketField(fields, "cas_hits", extendedBucketStats.Op.Samples.CasHits[lastEntry]) + cb.addBucketField(fields, "cas_misses", extendedBucketStats.Op.Samples.CasMisses[lastEntry]) + cb.addBucketField(fields, "cmd_get", extendedBucketStats.Op.Samples.CmdGet[lastEntry]) + cb.addBucketField(fields, "cmd_lookup", extendedBucketStats.Op.Samples.CmdLookup[lastEntry]) + cb.addBucketField(fields, "cmd_set", extendedBucketStats.Op.Samples.CmdSet[lastEntry]) + cb.addBucketField(fields, "couch_docs_actual_disk_size", extendedBucketStats.Op.Samples.CouchDocsActualDiskSize[lastEntry]) + cb.addBucketField(fields, "couch_docs_data_size", extendedBucketStats.Op.Samples.CouchDocsDataSize[lastEntry]) + cb.addBucketField(fields, "couch_docs_disk_size", extendedBucketStats.Op.Samples.CouchDocsDiskSize[lastEntry]) + cb.addBucketField(fields, "couch_spatial_data_size", extendedBucketStats.Op.Samples.CouchSpatialDataSize[lastEntry]) + cb.addBucketField(fields, "couch_spatial_disk_size", extendedBucketStats.Op.Samples.CouchSpatialDiskSize[lastEntry]) + cb.addBucketField(fields, "couch_spatial_ops", extendedBucketStats.Op.Samples.CouchSpatialOps[lastEntry]) + cb.addBucketField(fields, "couch_views_actual_disk_size", extendedBucketStats.Op.Samples.CouchViewsActualDiskSize[lastEntry]) + cb.addBucketField(fields, "couch_views_data_size", extendedBucketStats.Op.Samples.CouchViewsDataSize[lastEntry]) + cb.addBucketField(fields, "couch_views_disk_size", extendedBucketStats.Op.Samples.CouchViewsDiskSize[lastEntry]) + cb.addBucketField(fields, "couch_views_ops", extendedBucketStats.Op.Samples.CouchViewsOps[lastEntry]) + cb.addBucketField(fields, "curr_connections", extendedBucketStats.Op.Samples.CurrConnections[lastEntry]) + cb.addBucketField(fields, "curr_items", extendedBucketStats.Op.Samples.CurrItems[lastEntry]) + cb.addBucketField(fields, "curr_items_tot", extendedBucketStats.Op.Samples.CurrItemsTot[lastEntry]) + cb.addBucketField(fields, "decr_hits", extendedBucketStats.Op.Samples.DecrHits[lastEntry]) + cb.addBucketField(fields, "decr_misses", extendedBucketStats.Op.Samples.DecrMisses[lastEntry]) + cb.addBucketField(fields, "delete_hits", extendedBucketStats.Op.Samples.DeleteHits[lastEntry]) + cb.addBucketField(fields, "delete_misses", extendedBucketStats.Op.Samples.DeleteMisses[lastEntry]) + cb.addBucketField(fields, "disk_commit_count", extendedBucketStats.Op.Samples.DiskCommitCount[lastEntry]) + cb.addBucketField(fields, "disk_commit_total", extendedBucketStats.Op.Samples.DiskCommitTotal[lastEntry]) + cb.addBucketField(fields, "disk_update_count", extendedBucketStats.Op.Samples.DiskUpdateCount[lastEntry]) + cb.addBucketField(fields, "disk_update_total", extendedBucketStats.Op.Samples.DiskUpdateTotal[lastEntry]) + cb.addBucketField(fields, "disk_write_queue", extendedBucketStats.Op.Samples.DiskWriteQueue[lastEntry]) + cb.addBucketField(fields, "ep_active_ahead_exceptions", extendedBucketStats.Op.Samples.EpActiveAheadExceptions[lastEntry]) + cb.addBucketField(fields, "ep_active_hlc_drift", extendedBucketStats.Op.Samples.EpActiveHlcDrift[lastEntry]) + cb.addBucketField(fields, "ep_active_hlc_drift_count", extendedBucketStats.Op.Samples.EpActiveHlcDriftCount[lastEntry]) + cb.addBucketField(fields, "ep_bg_fetched", extendedBucketStats.Op.Samples.EpBgFetched[lastEntry]) + cb.addBucketField(fields, "ep_clock_cas_drift_threshold_exceeded", extendedBucketStats.Op.Samples.EpClockCasDriftThresholdExceeded[lastEntry]) + cb.addBucketField(fields, "ep_data_read_failed", extendedBucketStats.Op.Samples.EpDataReadFailed[lastEntry]) + cb.addBucketField(fields, "ep_data_write_failed", extendedBucketStats.Op.Samples.EpDataWriteFailed[lastEntry]) + cb.addBucketField(fields, "ep_dcp_2i_backoff", extendedBucketStats.Op.Samples.EpDcp2IBackoff[lastEntry]) + cb.addBucketField(fields, "ep_dcp_2i_count", extendedBucketStats.Op.Samples.EpDcp2ICount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_2i_items_remaining", extendedBucketStats.Op.Samples.EpDcp2IItemsRemaining[lastEntry]) + cb.addBucketField(fields, "ep_dcp_2i_items_sent", extendedBucketStats.Op.Samples.EpDcp2IItemsSent[lastEntry]) + cb.addBucketField(fields, "ep_dcp_2i_producer_count", extendedBucketStats.Op.Samples.EpDcp2IProducerCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_2i_total_backlog_size", extendedBucketStats.Op.Samples.EpDcp2ITotalBacklogSize[lastEntry]) + cb.addBucketField(fields, "ep_dcp_2i_total_bytes", extendedBucketStats.Op.Samples.EpDcp2ITotalBytes[lastEntry]) + cb.addBucketField(fields, "ep_dcp_cbas_backoff", extendedBucketStats.Op.Samples.EpDcpCbasBackoff[lastEntry]) + cb.addBucketField(fields, "ep_dcp_cbas_count", extendedBucketStats.Op.Samples.EpDcpCbasCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_cbas_items_remaining", extendedBucketStats.Op.Samples.EpDcpCbasItemsRemaining[lastEntry]) + cb.addBucketField(fields, "ep_dcp_cbas_items_sent", extendedBucketStats.Op.Samples.EpDcpCbasItemsSent[lastEntry]) + cb.addBucketField(fields, "ep_dcp_cbas_producer_count", extendedBucketStats.Op.Samples.EpDcpCbasProducerCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_cbas_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpCbasTotalBacklogSize[lastEntry]) + cb.addBucketField(fields, "ep_dcp_cbas_total_bytes", extendedBucketStats.Op.Samples.EpDcpCbasTotalBytes[lastEntry]) + cb.addBucketField(fields, "ep_dcp_eventing_backoff", extendedBucketStats.Op.Samples.EpDcpEventingBackoff[lastEntry]) + cb.addBucketField(fields, "ep_dcp_eventing_count", extendedBucketStats.Op.Samples.EpDcpEventingCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_eventing_items_remaining", extendedBucketStats.Op.Samples.EpDcpEventingItemsRemaining[lastEntry]) + cb.addBucketField(fields, "ep_dcp_eventing_items_sent", extendedBucketStats.Op.Samples.EpDcpEventingItemsSent[lastEntry]) + cb.addBucketField(fields, "ep_dcp_eventing_producer_count", extendedBucketStats.Op.Samples.EpDcpEventingProducerCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_eventing_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpEventingTotalBacklogSize[lastEntry]) + cb.addBucketField(fields, "ep_dcp_eventing_total_bytes", extendedBucketStats.Op.Samples.EpDcpEventingTotalBytes[lastEntry]) + cb.addBucketField(fields, "ep_dcp_fts_backoff", extendedBucketStats.Op.Samples.EpDcpFtsBackoff[lastEntry]) + cb.addBucketField(fields, "ep_dcp_fts_count", extendedBucketStats.Op.Samples.EpDcpFtsCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_fts_items_remaining", extendedBucketStats.Op.Samples.EpDcpFtsItemsRemaining[lastEntry]) + cb.addBucketField(fields, "ep_dcp_fts_items_sent", extendedBucketStats.Op.Samples.EpDcpFtsItemsSent[lastEntry]) + cb.addBucketField(fields, "ep_dcp_fts_producer_count", extendedBucketStats.Op.Samples.EpDcpFtsProducerCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_fts_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpFtsTotalBacklogSize[lastEntry]) + cb.addBucketField(fields, "ep_dcp_fts_total_bytes", extendedBucketStats.Op.Samples.EpDcpFtsTotalBytes[lastEntry]) + cb.addBucketField(fields, "ep_dcp_other_backoff", extendedBucketStats.Op.Samples.EpDcpOtherBackoff[lastEntry]) + cb.addBucketField(fields, "ep_dcp_other_count", extendedBucketStats.Op.Samples.EpDcpOtherCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_other_items_remaining", extendedBucketStats.Op.Samples.EpDcpOtherItemsRemaining[lastEntry]) + cb.addBucketField(fields, "ep_dcp_other_items_sent", extendedBucketStats.Op.Samples.EpDcpOtherItemsSent[lastEntry]) + cb.addBucketField(fields, "ep_dcp_other_producer_count", extendedBucketStats.Op.Samples.EpDcpOtherProducerCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_other_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpOtherTotalBacklogSize[lastEntry]) + cb.addBucketField(fields, "ep_dcp_other_total_bytes", extendedBucketStats.Op.Samples.EpDcpOtherTotalBytes[lastEntry]) + cb.addBucketField(fields, "ep_dcp_replica_backoff", extendedBucketStats.Op.Samples.EpDcpReplicaBackoff[lastEntry]) + cb.addBucketField(fields, "ep_dcp_replica_count", extendedBucketStats.Op.Samples.EpDcpReplicaCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_replica_items_remaining", extendedBucketStats.Op.Samples.EpDcpReplicaItemsRemaining[lastEntry]) + cb.addBucketField(fields, "ep_dcp_replica_items_sent", extendedBucketStats.Op.Samples.EpDcpReplicaItemsSent[lastEntry]) + cb.addBucketField(fields, "ep_dcp_replica_producer_count", extendedBucketStats.Op.Samples.EpDcpReplicaProducerCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_replica_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBacklogSize[lastEntry]) + cb.addBucketField(fields, "ep_dcp_replica_total_bytes", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBytes[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views_backoff", extendedBucketStats.Op.Samples.EpDcpViewsBackoff[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views_count", extendedBucketStats.Op.Samples.EpDcpViewsCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsItemsRemaining[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsItemsSent[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsProducerCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsTotalBacklogSize[lastEntry]) + cb.addBucketField(fields, "ep_dcp_views_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsTotalBytes[lastEntry]) + cb.addBucketField(fields, "ep_dcp_xdcr_backoff", extendedBucketStats.Op.Samples.EpDcpXdcrBackoff[lastEntry]) + cb.addBucketField(fields, "ep_dcp_xdcr_count", extendedBucketStats.Op.Samples.EpDcpXdcrCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_xdcr_items_remaining", extendedBucketStats.Op.Samples.EpDcpXdcrItemsRemaining[lastEntry]) + cb.addBucketField(fields, "ep_dcp_xdcr_items_sent", extendedBucketStats.Op.Samples.EpDcpXdcrItemsSent[lastEntry]) + cb.addBucketField(fields, "ep_dcp_xdcr_producer_count", extendedBucketStats.Op.Samples.EpDcpXdcrProducerCount[lastEntry]) + cb.addBucketField(fields, "ep_dcp_xdcr_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBacklogSize[lastEntry]) + cb.addBucketField(fields, "ep_dcp_xdcr_total_bytes", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBytes[lastEntry]) + cb.addBucketField(fields, "ep_diskqueue_drain", extendedBucketStats.Op.Samples.EpDiskqueueDrain[lastEntry]) + cb.addBucketField(fields, "ep_diskqueue_fill", extendedBucketStats.Op.Samples.EpDiskqueueFill[lastEntry]) + cb.addBucketField(fields, "ep_diskqueue_items", extendedBucketStats.Op.Samples.EpDiskqueueItems[lastEntry]) + cb.addBucketField(fields, "ep_flusher_todo", extendedBucketStats.Op.Samples.EpFlusherTodo[lastEntry]) + cb.addBucketField(fields, "ep_item_commit_failed", extendedBucketStats.Op.Samples.EpItemCommitFailed[lastEntry]) + cb.addBucketField(fields, "ep_kv_size", extendedBucketStats.Op.Samples.EpKvSize[lastEntry]) + cb.addBucketField(fields, "ep_max_size", extendedBucketStats.Op.Samples.EpMaxSize[lastEntry]) + cb.addBucketField(fields, "ep_mem_high_wat", extendedBucketStats.Op.Samples.EpMemHighWat[lastEntry]) + cb.addBucketField(fields, "ep_mem_low_wat", extendedBucketStats.Op.Samples.EpMemLowWat[lastEntry]) + cb.addBucketField(fields, "ep_meta_data_memory", extendedBucketStats.Op.Samples.EpMetaDataMemory[lastEntry]) + cb.addBucketField(fields, "ep_num_non_resident", extendedBucketStats.Op.Samples.EpNumNonResident[lastEntry]) + cb.addBucketField(fields, "ep_num_ops_del_meta", extendedBucketStats.Op.Samples.EpNumOpsDelMeta[lastEntry]) + cb.addBucketField(fields, "ep_num_ops_del_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsDelRetMeta[lastEntry]) + cb.addBucketField(fields, "ep_num_ops_get_meta", extendedBucketStats.Op.Samples.EpNumOpsGetMeta[lastEntry]) + cb.addBucketField(fields, "ep_num_ops_set_meta", extendedBucketStats.Op.Samples.EpNumOpsSetMeta[lastEntry]) + cb.addBucketField(fields, "ep_num_ops_set_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsSetRetMeta[lastEntry]) + cb.addBucketField(fields, "ep_num_value_ejects", extendedBucketStats.Op.Samples.EpNumValueEjects[lastEntry]) + cb.addBucketField(fields, "ep_oom_errors", extendedBucketStats.Op.Samples.EpOomErrors[lastEntry]) + cb.addBucketField(fields, "ep_ops_create", extendedBucketStats.Op.Samples.EpOpsCreate[lastEntry]) + cb.addBucketField(fields, "ep_ops_update", extendedBucketStats.Op.Samples.EpOpsUpdate[lastEntry]) + cb.addBucketField(fields, "ep_overhead", extendedBucketStats.Op.Samples.EpOverhead[lastEntry]) + cb.addBucketField(fields, "ep_queue_size", extendedBucketStats.Op.Samples.EpQueueSize[lastEntry]) + cb.addBucketField(fields, "ep_replica_ahead_exceptions", extendedBucketStats.Op.Samples.EpReplicaAheadExceptions[lastEntry]) + cb.addBucketField(fields, "ep_replica_hlc_drift", extendedBucketStats.Op.Samples.EpReplicaHlcDrift[lastEntry]) + cb.addBucketField(fields, "ep_replica_hlc_drift_count", extendedBucketStats.Op.Samples.EpReplicaHlcDriftCount[lastEntry]) + cb.addBucketField(fields, "ep_tmp_oom_errors", extendedBucketStats.Op.Samples.EpTmpOomErrors[lastEntry]) + cb.addBucketField(fields, "ep_vb_total", extendedBucketStats.Op.Samples.EpVbTotal[lastEntry]) + cb.addBucketField(fields, "evictions", extendedBucketStats.Op.Samples.Evictions[lastEntry]) + cb.addBucketField(fields, "get_hits", extendedBucketStats.Op.Samples.GetHits[lastEntry]) + cb.addBucketField(fields, "get_misses", extendedBucketStats.Op.Samples.GetMisses[lastEntry]) + cb.addBucketField(fields, "incr_hits", extendedBucketStats.Op.Samples.IncrHits[lastEntry]) + cb.addBucketField(fields, "incr_misses", extendedBucketStats.Op.Samples.IncrMisses[lastEntry]) + cb.addBucketField(fields, "misses", extendedBucketStats.Op.Samples.Misses[lastEntry]) + cb.addBucketField(fields, "ops", extendedBucketStats.Op.Samples.Ops[lastEntry]) + cb.addBucketField(fields, "timestamp", extendedBucketStats.Op.Samples.Timestamp[lastEntry]) + cb.addBucketField(fields, "vb_active_eject", extendedBucketStats.Op.Samples.VbActiveEject[lastEntry]) + cb.addBucketField(fields, "vb_active_itm_memory", extendedBucketStats.Op.Samples.VbActiveItmMemory[lastEntry]) + cb.addBucketField(fields, "vb_active_meta_data_memory", extendedBucketStats.Op.Samples.VbActiveMetaDataMemory[lastEntry]) + cb.addBucketField(fields, "vb_active_num", extendedBucketStats.Op.Samples.VbActiveNum[lastEntry]) + cb.addBucketField(fields, "vb_active_num_non_resident", extendedBucketStats.Op.Samples.VbActiveNumNonResident[lastEntry]) + cb.addBucketField(fields, "vb_active_ops_create", extendedBucketStats.Op.Samples.VbActiveOpsCreate[lastEntry]) + cb.addBucketField(fields, "vb_active_ops_update", extendedBucketStats.Op.Samples.VbActiveOpsUpdate[lastEntry]) + cb.addBucketField(fields, "vb_active_queue_age", extendedBucketStats.Op.Samples.VbActiveQueueAge[lastEntry]) + cb.addBucketField(fields, "vb_active_queue_drain", extendedBucketStats.Op.Samples.VbActiveQueueDrain[lastEntry]) + cb.addBucketField(fields, "vb_active_queue_fill", extendedBucketStats.Op.Samples.VbActiveQueueFill[lastEntry]) + cb.addBucketField(fields, "vb_active_queue_size", extendedBucketStats.Op.Samples.VbActiveQueueSize[lastEntry]) + cb.addBucketField(fields, "vb_active_sync_write_aborted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAbortedCount[lastEntry]) + cb.addBucketField(fields, "vb_active_sync_write_accepted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAcceptedCount[lastEntry]) + cb.addBucketField(fields, "vb_active_sync_write_committed_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteCommittedCount[lastEntry]) + cb.addBucketField(fields, "vb_pending_curr_items", extendedBucketStats.Op.Samples.VbPendingCurrItems[lastEntry]) + cb.addBucketField(fields, "vb_pending_eject", extendedBucketStats.Op.Samples.VbPendingEject[lastEntry]) + cb.addBucketField(fields, "vb_pending_itm_memory", extendedBucketStats.Op.Samples.VbPendingItmMemory[lastEntry]) + cb.addBucketField(fields, "vb_pending_meta_data_memory", extendedBucketStats.Op.Samples.VbPendingMetaDataMemory[lastEntry]) + cb.addBucketField(fields, "vb_pending_num", extendedBucketStats.Op.Samples.VbPendingNum[lastEntry]) + cb.addBucketField(fields, "vb_pending_num_non_resident", extendedBucketStats.Op.Samples.VbPendingNumNonResident[lastEntry]) + cb.addBucketField(fields, "vb_pending_ops_create", extendedBucketStats.Op.Samples.VbPendingOpsCreate[lastEntry]) + cb.addBucketField(fields, "vb_pending_ops_update", extendedBucketStats.Op.Samples.VbPendingOpsUpdate[lastEntry]) + cb.addBucketField(fields, "vb_pending_queue_age", extendedBucketStats.Op.Samples.VbPendingQueueAge[lastEntry]) + cb.addBucketField(fields, "vb_pending_queue_drain", extendedBucketStats.Op.Samples.VbPendingQueueDrain[lastEntry]) + cb.addBucketField(fields, "vb_pending_queue_fill", extendedBucketStats.Op.Samples.VbPendingQueueFill[lastEntry]) + cb.addBucketField(fields, "vb_pending_queue_size", extendedBucketStats.Op.Samples.VbPendingQueueSize[lastEntry]) + cb.addBucketField(fields, "vb_replica_curr_items", extendedBucketStats.Op.Samples.VbReplicaCurrItems[lastEntry]) + cb.addBucketField(fields, "vb_replica_eject", extendedBucketStats.Op.Samples.VbReplicaEject[lastEntry]) + cb.addBucketField(fields, "vb_replica_itm_memory", extendedBucketStats.Op.Samples.VbReplicaItmMemory[lastEntry]) + cb.addBucketField(fields, "vb_replica_meta_data_memory", extendedBucketStats.Op.Samples.VbReplicaMetaDataMemory[lastEntry]) + cb.addBucketField(fields, "vb_replica_num", extendedBucketStats.Op.Samples.VbReplicaNum[lastEntry]) + cb.addBucketField(fields, "vb_replica_num_non_resident", extendedBucketStats.Op.Samples.VbReplicaNumNonResident[lastEntry]) + cb.addBucketField(fields, "vb_replica_ops_create", extendedBucketStats.Op.Samples.VbReplicaOpsCreate[lastEntry]) + cb.addBucketField(fields, "vb_replica_ops_update", extendedBucketStats.Op.Samples.VbReplicaOpsUpdate[lastEntry]) + cb.addBucketField(fields, "vb_replica_queue_age", extendedBucketStats.Op.Samples.VbReplicaQueueAge[lastEntry]) + cb.addBucketField(fields, "vb_replica_queue_drain", extendedBucketStats.Op.Samples.VbReplicaQueueDrain[lastEntry]) + cb.addBucketField(fields, "vb_replica_queue_fill", extendedBucketStats.Op.Samples.VbReplicaQueueFill[lastEntry]) + cb.addBucketField(fields, "vb_replica_queue_size", extendedBucketStats.Op.Samples.VbReplicaQueueSize[lastEntry]) + cb.addBucketField(fields, "vb_total_queue_age", extendedBucketStats.Op.Samples.VbTotalQueueAge[lastEntry]) + cb.addBucketField(fields, "xdc_ops", extendedBucketStats.Op.Samples.XdcOps[lastEntry]) + cb.addBucketField(fields, "allocstall", extendedBucketStats.Op.Samples.Allocstall[lastEntry]) + cb.addBucketField(fields, "cpu_cores_available", extendedBucketStats.Op.Samples.CPUCoresAvailable[lastEntry]) + cb.addBucketField(fields, "cpu_irq_rate", extendedBucketStats.Op.Samples.CPUIrqRate[lastEntry]) + cb.addBucketField(fields, "cpu_stolen_rate", extendedBucketStats.Op.Samples.CPUStolenRate[lastEntry]) + cb.addBucketField(fields, "cpu_sys_rate", extendedBucketStats.Op.Samples.CPUSysRate[lastEntry]) + cb.addBucketField(fields, "cpu_user_rate", extendedBucketStats.Op.Samples.CPUUserRate[lastEntry]) + cb.addBucketField(fields, "cpu_utilization_rate", extendedBucketStats.Op.Samples.CPUUtilizationRate[lastEntry]) + cb.addBucketField(fields, "hibernated_requests", extendedBucketStats.Op.Samples.HibernatedRequests[lastEntry]) + cb.addBucketField(fields, "hibernated_waked", extendedBucketStats.Op.Samples.HibernatedWaked[lastEntry]) + cb.addBucketField(fields, "mem_actual_free", extendedBucketStats.Op.Samples.MemActualFree[lastEntry]) + cb.addBucketField(fields, "mem_actual_used", extendedBucketStats.Op.Samples.MemActualUsed[lastEntry]) + cb.addBucketField(fields, "mem_free", extendedBucketStats.Op.Samples.MemFree[lastEntry]) + cb.addBucketField(fields, "mem_limit", extendedBucketStats.Op.Samples.MemLimit[lastEntry]) + cb.addBucketField(fields, "mem_total", extendedBucketStats.Op.Samples.MemTotal[lastEntry]) + cb.addBucketField(fields, "mem_used_sys", extendedBucketStats.Op.Samples.MemUsedSys[lastEntry]) + cb.addBucketField(fields, "odp_report_failed", extendedBucketStats.Op.Samples.OdpReportFailed[lastEntry]) + cb.addBucketField(fields, "rest_requests", extendedBucketStats.Op.Samples.RestRequests[lastEntry]) + cb.addBucketField(fields, "swap_total", extendedBucketStats.Op.Samples.SwapTotal[lastEntry]) + cb.addBucketField(fields, "swap_used", extendedBucketStats.Op.Samples.SwapUsed[lastEntry]) + + return nil +} + +func (cb *Couchbase) addBucketField(fields map[string]interface{}, fieldKey string, value interface{}) { + if !cb.bucketInclude.Match(fieldKey) { + return + } + + fields[fieldKey] = value +} + +func (cb *Couchbase) queryDetailedBucketStats(server, bucket string, bucketStats *BucketStats) error { + // Set up an HTTP request to get the complete set of bucket stats. + req, err := http.NewRequest("GET", server+"/pools/default/buckets/"+bucket+"/stats?", nil) + if err != nil { + return err + } + + r, err := client.Do(req) + if err != nil { + return err + } + + defer r.Body.Close() + + return json.NewDecoder(r.Body).Decode(bucketStats) +} + +func (cb *Couchbase) Init() error { + f, err := filter.NewIncludeExcludeFilter(cb.BucketStatsIncluded, []string{}) + if err != nil { + return err + } + + cb.bucketInclude = f + return nil } func init() { inputs.Add("couchbase", func() telegraf.Input { - return &Couchbase{} + return &Couchbase{ + BucketStatsIncluded: []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"}, + } }) } diff --git a/plugins/inputs/couchbase/couchbase_data.go b/plugins/inputs/couchbase/couchbase_data.go new file mode 100644 index 0000000000000..2b1227f5c8cdc --- /dev/null +++ b/plugins/inputs/couchbase/couchbase_data.go @@ -0,0 +1,228 @@ +package couchbase + +type BucketStats struct { + Op struct { + Samples struct { + CouchTotalDiskSize []float64 `json:"couch_total_disk_size"` + CouchDocsFragmentation []float64 `json:"couch_docs_fragmentation"` + CouchViewsFragmentation []float64 `json:"couch_views_fragmentation"` + HitRatio []float64 `json:"hit_ratio"` + EpCacheMissRate []float64 `json:"ep_cache_miss_rate"` + EpResidentItemsRate []float64 `json:"ep_resident_items_rate"` + VbAvgActiveQueueAge []float64 `json:"vb_avg_active_queue_age"` + VbAvgReplicaQueueAge []float64 `json:"vb_avg_replica_queue_age"` + VbAvgPendingQueueAge []float64 `json:"vb_avg_pending_queue_age"` + VbAvgTotalQueueAge []float64 `json:"vb_avg_total_queue_age"` + VbActiveResidentItemsRatio []float64 `json:"vb_active_resident_items_ratio"` + VbReplicaResidentItemsRatio []float64 `json:"vb_replica_resident_items_ratio"` + VbPendingResidentItemsRatio []float64 `json:"vb_pending_resident_items_ratio"` + AvgDiskUpdateTime []float64 `json:"avg_disk_update_time"` + AvgDiskCommitTime []float64 `json:"avg_disk_commit_time"` + AvgBgWaitTime []float64 `json:"avg_bg_wait_time"` + AvgActiveTimestampDrift []float64 `json:"avg_active_timestamp_drift"` + AvgReplicaTimestampDrift []float64 `json:"avg_replica_timestamp_drift"` + EpDcpViewsIndexesCount []float64 `json:"ep_dcp_views+indexes_count"` + EpDcpViewsIndexesItemsRemaining []float64 `json:"ep_dcp_views+indexes_items_remaining"` + EpDcpViewsIndexesProducerCount []float64 `json:"ep_dcp_views+indexes_producer_count"` + EpDcpViewsIndexesTotalBacklogSize []float64 `json:"ep_dcp_views+indexes_total_backlog_size"` + EpDcpViewsIndexesItemsSent []float64 `json:"ep_dcp_views+indexes_items_sent"` + EpDcpViewsIndexesTotalBytes []float64 `json:"ep_dcp_views+indexes_total_bytes"` + EpDcpViewsIndexesBackoff []float64 `json:"ep_dcp_views+indexes_backoff"` + BgWaitCount []float64 `json:"bg_wait_count"` + BgWaitTotal []float64 `json:"bg_wait_total"` + BytesRead []float64 `json:"bytes_read"` + BytesWritten []float64 `json:"bytes_written"` + CasBadval []float64 `json:"cas_badval"` + CasHits []float64 `json:"cas_hits"` + CasMisses []float64 `json:"cas_misses"` + CmdGet []float64 `json:"cmd_get"` + CmdLookup []float64 `json:"cmd_lookup"` + CmdSet []float64 `json:"cmd_set"` + CouchDocsActualDiskSize []float64 `json:"couch_docs_actual_disk_size"` + CouchDocsDataSize []float64 `json:"couch_docs_data_size"` + CouchDocsDiskSize []float64 `json:"couch_docs_disk_size"` + CouchSpatialDataSize []float64 `json:"couch_spatial_data_size"` + CouchSpatialDiskSize []float64 `json:"couch_spatial_disk_size"` + CouchSpatialOps []float64 `json:"couch_spatial_ops"` + CouchViewsActualDiskSize []float64 `json:"couch_views_actual_disk_size"` + CouchViewsDataSize []float64 `json:"couch_views_data_size"` + CouchViewsDiskSize []float64 `json:"couch_views_disk_size"` + CouchViewsOps []float64 `json:"couch_views_ops"` + CurrConnections []float64 `json:"curr_connections"` + CurrItems []float64 `json:"curr_items"` + CurrItemsTot []float64 `json:"curr_items_tot"` + DecrHits []float64 `json:"decr_hits"` + DecrMisses []float64 `json:"decr_misses"` + DeleteHits []float64 `json:"delete_hits"` + DeleteMisses []float64 `json:"delete_misses"` + DiskCommitCount []float64 `json:"disk_commit_count"` + DiskCommitTotal []float64 `json:"disk_commit_total"` + DiskUpdateCount []float64 `json:"disk_update_count"` + DiskUpdateTotal []float64 `json:"disk_update_total"` + DiskWriteQueue []float64 `json:"disk_write_queue"` + EpActiveAheadExceptions []float64 `json:"ep_active_ahead_exceptions"` + EpActiveHlcDrift []float64 `json:"ep_active_hlc_drift"` + EpActiveHlcDriftCount []float64 `json:"ep_active_hlc_drift_count"` + EpBgFetched []float64 `json:"ep_bg_fetched"` + EpClockCasDriftThresholdExceeded []float64 `json:"ep_clock_cas_drift_threshold_exceeded"` + EpDataReadFailed []float64 `json:"ep_data_read_failed"` + EpDataWriteFailed []float64 `json:"ep_data_write_failed"` + EpDcp2IBackoff []float64 `json:"ep_dcp_2i_backoff"` + EpDcp2ICount []float64 `json:"ep_dcp_2i_count"` + EpDcp2IItemsRemaining []float64 `json:"ep_dcp_2i_items_remaining"` + EpDcp2IItemsSent []float64 `json:"ep_dcp_2i_items_sent"` + EpDcp2IProducerCount []float64 `json:"ep_dcp_2i_producer_count"` + EpDcp2ITotalBacklogSize []float64 `json:"ep_dcp_2i_total_backlog_size"` + EpDcp2ITotalBytes []float64 `json:"ep_dcp_2i_total_bytes"` + EpDcpCbasBackoff []float64 `json:"ep_dcp_cbas_backoff"` + EpDcpCbasCount []float64 `json:"ep_dcp_cbas_count"` + EpDcpCbasItemsRemaining []float64 `json:"ep_dcp_cbas_items_remaining"` + EpDcpCbasItemsSent []float64 `json:"ep_dcp_cbas_items_sent"` + EpDcpCbasProducerCount []float64 `json:"ep_dcp_cbas_producer_count"` + EpDcpCbasTotalBacklogSize []float64 `json:"ep_dcp_cbas_total_backlog_size"` + EpDcpCbasTotalBytes []float64 `json:"ep_dcp_cbas_total_bytes"` + EpDcpEventingBackoff []float64 `json:"ep_dcp_eventing_backoff"` + EpDcpEventingCount []float64 `json:"ep_dcp_eventing_count"` + EpDcpEventingItemsRemaining []float64 `json:"ep_dcp_eventing_items_remaining"` + EpDcpEventingItemsSent []float64 `json:"ep_dcp_eventing_items_sent"` + EpDcpEventingProducerCount []float64 `json:"ep_dcp_eventing_producer_count"` + EpDcpEventingTotalBacklogSize []float64 `json:"ep_dcp_eventing_total_backlog_size"` + EpDcpEventingTotalBytes []float64 `json:"ep_dcp_eventing_total_bytes"` + EpDcpFtsBackoff []float64 `json:"ep_dcp_fts_backoff"` + EpDcpFtsCount []float64 `json:"ep_dcp_fts_count"` + EpDcpFtsItemsRemaining []float64 `json:"ep_dcp_fts_items_remaining"` + EpDcpFtsItemsSent []float64 `json:"ep_dcp_fts_items_sent"` + EpDcpFtsProducerCount []float64 `json:"ep_dcp_fts_producer_count"` + EpDcpFtsTotalBacklogSize []float64 `json:"ep_dcp_fts_total_backlog_size"` + EpDcpFtsTotalBytes []float64 `json:"ep_dcp_fts_total_bytes"` + EpDcpOtherBackoff []float64 `json:"ep_dcp_other_backoff"` + EpDcpOtherCount []float64 `json:"ep_dcp_other_count"` + EpDcpOtherItemsRemaining []float64 `json:"ep_dcp_other_items_remaining"` + EpDcpOtherItemsSent []float64 `json:"ep_dcp_other_items_sent"` + EpDcpOtherProducerCount []float64 `json:"ep_dcp_other_producer_count"` + EpDcpOtherTotalBacklogSize []float64 `json:"ep_dcp_other_total_backlog_size"` + EpDcpOtherTotalBytes []float64 `json:"ep_dcp_other_total_bytes"` + EpDcpReplicaBackoff []float64 `json:"ep_dcp_replica_backoff"` + EpDcpReplicaCount []float64 `json:"ep_dcp_replica_count"` + EpDcpReplicaItemsRemaining []float64 `json:"ep_dcp_replica_items_remaining"` + EpDcpReplicaItemsSent []float64 `json:"ep_dcp_replica_items_sent"` + EpDcpReplicaProducerCount []float64 `json:"ep_dcp_replica_producer_count"` + EpDcpReplicaTotalBacklogSize []float64 `json:"ep_dcp_replica_total_backlog_size"` + EpDcpReplicaTotalBytes []float64 `json:"ep_dcp_replica_total_bytes"` + EpDcpViewsBackoff []float64 `json:"ep_dcp_views_backoff"` + EpDcpViewsCount []float64 `json:"ep_dcp_views_count"` + EpDcpViewsItemsRemaining []float64 `json:"ep_dcp_views_items_remaining"` + EpDcpViewsItemsSent []float64 `json:"ep_dcp_views_items_sent"` + EpDcpViewsProducerCount []float64 `json:"ep_dcp_views_producer_count"` + EpDcpViewsTotalBacklogSize []float64 `json:"ep_dcp_views_total_backlog_size"` + EpDcpViewsTotalBytes []float64 `json:"ep_dcp_views_total_bytes"` + EpDcpXdcrBackoff []float64 `json:"ep_dcp_xdcr_backoff"` + EpDcpXdcrCount []float64 `json:"ep_dcp_xdcr_count"` + EpDcpXdcrItemsRemaining []float64 `json:"ep_dcp_xdcr_items_remaining"` + EpDcpXdcrItemsSent []float64 `json:"ep_dcp_xdcr_items_sent"` + EpDcpXdcrProducerCount []float64 `json:"ep_dcp_xdcr_producer_count"` + EpDcpXdcrTotalBacklogSize []float64 `json:"ep_dcp_xdcr_total_backlog_size"` + EpDcpXdcrTotalBytes []float64 `json:"ep_dcp_xdcr_total_bytes"` + EpDiskqueueDrain []float64 `json:"ep_diskqueue_drain"` + EpDiskqueueFill []float64 `json:"ep_diskqueue_fill"` + EpDiskqueueItems []float64 `json:"ep_diskqueue_items"` + EpFlusherTodo []float64 `json:"ep_flusher_todo"` + EpItemCommitFailed []float64 `json:"ep_item_commit_failed"` + EpKvSize []float64 `json:"ep_kv_size"` + EpMaxSize []float64 `json:"ep_max_size"` + EpMemHighWat []float64 `json:"ep_mem_high_wat"` + EpMemLowWat []float64 `json:"ep_mem_low_wat"` + EpMetaDataMemory []float64 `json:"ep_meta_data_memory"` + EpNumNonResident []float64 `json:"ep_num_non_resident"` + EpNumOpsDelMeta []float64 `json:"ep_num_ops_del_meta"` + EpNumOpsDelRetMeta []float64 `json:"ep_num_ops_del_ret_meta"` + EpNumOpsGetMeta []float64 `json:"ep_num_ops_get_meta"` + EpNumOpsSetMeta []float64 `json:"ep_num_ops_set_meta"` + EpNumOpsSetRetMeta []float64 `json:"ep_num_ops_set_ret_meta"` + EpNumValueEjects []float64 `json:"ep_num_value_ejects"` + EpOomErrors []float64 `json:"ep_oom_errors"` + EpOpsCreate []float64 `json:"ep_ops_create"` + EpOpsUpdate []float64 `json:"ep_ops_update"` + EpOverhead []float64 `json:"ep_overhead"` + EpQueueSize []float64 `json:"ep_queue_size"` + EpReplicaAheadExceptions []float64 `json:"ep_replica_ahead_exceptions"` + EpReplicaHlcDrift []float64 `json:"ep_replica_hlc_drift"` + EpReplicaHlcDriftCount []float64 `json:"ep_replica_hlc_drift_count"` + EpTmpOomErrors []float64 `json:"ep_tmp_oom_errors"` + EpVbTotal []float64 `json:"ep_vb_total"` + Evictions []float64 `json:"evictions"` + GetHits []float64 `json:"get_hits"` + GetMisses []float64 `json:"get_misses"` + IncrHits []float64 `json:"incr_hits"` + IncrMisses []float64 `json:"incr_misses"` + MemUsed []float64 `json:"mem_used"` + Misses []float64 `json:"misses"` + Ops []float64 `json:"ops"` + Timestamp []float64 `json:"timestamp"` + VbActiveEject []float64 `json:"vb_active_eject"` + VbActiveItmMemory []float64 `json:"vb_active_itm_memory"` + VbActiveMetaDataMemory []float64 `json:"vb_active_meta_data_memory"` + VbActiveNum []float64 `json:"vb_active_num"` + VbActiveNumNonResident []float64 `json:"vb_active_num_non_resident"` + VbActiveOpsCreate []float64 `json:"vb_active_ops_create"` + VbActiveOpsUpdate []float64 `json:"vb_active_ops_update"` + VbActiveQueueAge []float64 `json:"vb_active_queue_age"` + VbActiveQueueDrain []float64 `json:"vb_active_queue_drain"` + VbActiveQueueFill []float64 `json:"vb_active_queue_fill"` + VbActiveQueueSize []float64 `json:"vb_active_queue_size"` + VbActiveSyncWriteAbortedCount []float64 `json:"vb_active_sync_write_aborted_count"` + VbActiveSyncWriteAcceptedCount []float64 `json:"vb_active_sync_write_accepted_count"` + VbActiveSyncWriteCommittedCount []float64 `json:"vb_active_sync_write_committed_count"` + VbPendingCurrItems []float64 `json:"vb_pending_curr_items"` + VbPendingEject []float64 `json:"vb_pending_eject"` + VbPendingItmMemory []float64 `json:"vb_pending_itm_memory"` + VbPendingMetaDataMemory []float64 `json:"vb_pending_meta_data_memory"` + VbPendingNum []float64 `json:"vb_pending_num"` + VbPendingNumNonResident []float64 `json:"vb_pending_num_non_resident"` + VbPendingOpsCreate []float64 `json:"vb_pending_ops_create"` + VbPendingOpsUpdate []float64 `json:"vb_pending_ops_update"` + VbPendingQueueAge []float64 `json:"vb_pending_queue_age"` + VbPendingQueueDrain []float64 `json:"vb_pending_queue_drain"` + VbPendingQueueFill []float64 `json:"vb_pending_queue_fill"` + VbPendingQueueSize []float64 `json:"vb_pending_queue_size"` + VbReplicaCurrItems []float64 `json:"vb_replica_curr_items"` + VbReplicaEject []float64 `json:"vb_replica_eject"` + VbReplicaItmMemory []float64 `json:"vb_replica_itm_memory"` + VbReplicaMetaDataMemory []float64 `json:"vb_replica_meta_data_memory"` + VbReplicaNum []float64 `json:"vb_replica_num"` + VbReplicaNumNonResident []float64 `json:"vb_replica_num_non_resident"` + VbReplicaOpsCreate []float64 `json:"vb_replica_ops_create"` + VbReplicaOpsUpdate []float64 `json:"vb_replica_ops_update"` + VbReplicaQueueAge []float64 `json:"vb_replica_queue_age"` + VbReplicaQueueDrain []float64 `json:"vb_replica_queue_drain"` + VbReplicaQueueFill []float64 `json:"vb_replica_queue_fill"` + VbReplicaQueueSize []float64 `json:"vb_replica_queue_size"` + VbTotalQueueAge []float64 `json:"vb_total_queue_age"` + XdcOps []float64 `json:"xdc_ops"` + Allocstall []float64 `json:"allocstall"` + CPUCoresAvailable []float64 `json:"cpu_cores_available"` + CPUIrqRate []float64 `json:"cpu_irq_rate"` + CPUStolenRate []float64 `json:"cpu_stolen_rate"` + CPUSysRate []float64 `json:"cpu_sys_rate"` + CPUUserRate []float64 `json:"cpu_user_rate"` + CPUUtilizationRate []float64 `json:"cpu_utilization_rate"` + HibernatedRequests []float64 `json:"hibernated_requests"` + HibernatedWaked []float64 `json:"hibernated_waked"` + MemActualFree []float64 `json:"mem_actual_free"` + MemActualUsed []float64 `json:"mem_actual_used"` + MemFree []float64 `json:"mem_free"` + MemLimit []float64 `json:"mem_limit"` + MemTotal []float64 `json:"mem_total"` + MemUsedSys []float64 `json:"mem_used_sys"` + OdpReportFailed []float64 `json:"odp_report_failed"` + RestRequests []float64 `json:"rest_requests"` + SwapTotal []float64 `json:"swap_total"` + SwapUsed []float64 `json:"swap_used"` + } `json:"samples"` + Samplescount int `json:"samplesCount"` + Ispersistent bool `json:"isPersistent"` + Lasttstamp int64 `json:"lastTStamp"` + Interval int `json:"interval"` + } `json:"op"` + HotKeys []interface{} `json:"hot_keys"` +} diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go index 66a1d08abad1f..25728544c6a97 100644 --- a/plugins/inputs/couchbase/couchbase_test.go +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -2,15 +2,28 @@ package couchbase import ( "encoding/json" + "net/http" + "net/http/httptest" "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" "github.com/couchbase/go-couchbase" ) func TestGatherServer(t *testing.T) { + bucket := "blastro-df" + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" { + _, _ = w.Write([]byte(bucketStatsResponse)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + var pool couchbase.Pool + var err error if err := json.Unmarshal([]byte(poolsDefaultResponse), &pool); err != nil { t.Fatal("parse poolsDefaultResponse", err) } @@ -18,15 +31,26 @@ func TestGatherServer(t *testing.T) { if err := json.Unmarshal([]byte(bucketResponse), &pool.BucketMap); err != nil { t.Fatal("parse bucketResponse", err) } + + bucketStats := &BucketStats{} + if err := json.Unmarshal([]byte(bucketStatsResponse), bucketStats); err != nil { + t.Fatal("parse bucketResponse", err) + } + var cb Couchbase + cb.BucketStatsIncluded = []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"} + err = cb.Init() + require.NoError(t, err) + var acc testutil.Accumulator - cb.gatherServer("mycluster", &acc, &pool) + err = cb.gatherServer(fakeServer.URL, &acc, &pool) + require.NoError(t, err) acc.AssertContainsTaggedFields(t, "couchbase_node", map[string]interface{}{"memory_free": 23181365248.0, "memory_total": 64424656896.0}, - map[string]string{"cluster": "mycluster", "hostname": "172.16.10.187:8091"}) + map[string]string{"cluster": fakeServer.URL, "hostname": "172.16.10.187:8091"}) acc.AssertContainsTaggedFields(t, "couchbase_node", map[string]interface{}{"memory_free": 23665811456.0, "memory_total": 64424656896.0}, - map[string]string{"cluster": "mycluster", "hostname": "172.16.10.65:8091"}) + map[string]string{"cluster": fakeServer.URL, "hostname": "172.16.10.65:8091"}) acc.AssertContainsTaggedFields(t, "couchbase_bucket", map[string]interface{}{ "quota_percent_used": 68.85424936294555, @@ -37,7 +61,7 @@ func TestGatherServer(t *testing.T) { "data_used": 212179309111.0, "mem_used": 202156957464.0, }, - map[string]string{"cluster": "mycluster", "bucket": "blastro-df"}) + map[string]string{"cluster": fakeServer.URL, "bucket": "blastro-df"}) } func TestSanitizeURI(t *testing.T) { @@ -63,8 +87,42 @@ func TestSanitizeURI(t *testing.T) { } } +func TestGatherDetailedBucketMetrics(t *testing.T) { + bucket := "Ducks" + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" { + _, _ = w.Write([]byte(bucketStatsResponse)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + + var err error + var cb Couchbase + cb.BucketStatsIncluded = []string{"couch_total_disk_size"} + err = cb.Init() + require.NoError(t, err) + var acc testutil.Accumulator + bucketStats := &BucketStats{} + if err := json.Unmarshal([]byte(bucketStatsResponse), bucketStats); err != nil { + t.Fatal("parse bucketResponse", err) + } + + fields := make(map[string]interface{}) + err = cb.gatherDetailedBucketStats(fakeServer.URL, bucket, fields) + require.NoError(t, err) + + acc.AddFields("couchbase_bucket", fields, nil) + + // Ensure we gathered only one metric (the one that we configured). + require.Equal(t, len(acc.Metrics), 1) + require.Equal(t, len(acc.Metrics[0].Fields), 1) +} + // From `/pools/default` on a real cluster const poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":450972598272,"quotaTotal":360777252864,"quotaUsed":360777252864,"used":446826622976,"usedByData":255061495696,"quotaUsedPerNode":51539607552,"quotaTotalPerNode":51539607552},"hdd":{"total":1108766539776,"quotaTotal":1108766539776,"used":559135126484,"usedByData":515767865143,"free":498944942902}},"serverGroupsUri":"/pools/default/serverGroups?v=98656394","name":"default","alerts":["Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.","Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.","Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."],"alertsSilenceURL":"/controller/resetAlerts?token=2814&uuid=2bec87861652b990cf6aa5c7ee58c253","nodes":[{"systemStats":{"cpu_utilization_rate":35.43307086614173,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23181365248},"interestingStats":{"cmd_get":17.98201798201798,"couch_docs_actual_disk_size":68506048063,"couch_docs_data_size":38718796110,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140158886,"curr_items_tot":279374646,"ep_bg_fetched":0.999000999000999,"get_hits":10.98901098901099,"mem_used":36497390640,"ops":829.1708291708292,"vb_replica_curr_items":139215760},"uptime":"341236","memoryTotal":64424656896,"memoryFree":23181365248,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.187:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.38255033557047,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23665811456},"interestingStats":{"cmd_get":172.8271728271728,"couch_docs_actual_disk_size":79360565405,"couch_docs_data_size":38736382876,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140174377,"curr_items_tot":279383025,"ep_bg_fetched":0.999000999000999,"get_hits":167.8321678321678,"mem_used":36650059656,"ops":1685.314685314685,"vb_replica_curr_items":139208648},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23665811456,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.65:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":25.5586592178771,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23726600192},"interestingStats":{"cmd_get":63.06306306306306,"couch_docs_actual_disk_size":79345105217,"couch_docs_data_size":38728086130,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139195268,"curr_items_tot":279349113,"ep_bg_fetched":0,"get_hits":53.05305305305306,"mem_used":36476665576,"ops":1878.878878878879,"vb_replica_curr_items":140153845},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23726600192,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.105:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":26.45803698435277,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23854841856},"interestingStats":{"cmd_get":51.05105105105105,"couch_docs_actual_disk_size":74465931949,"couch_docs_data_size":38723830730,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139209869,"curr_items_tot":279380019,"ep_bg_fetched":0,"get_hits":47.04704704704704,"mem_used":36471784896,"ops":1831.831831831832,"vb_replica_curr_items":140170150},"uptime":"340526","memoryTotal":64424656896,"memoryFree":23854841856,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.173:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.31034482758621,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23773573120},"interestingStats":{"cmd_get":77.07707707707708,"couch_docs_actual_disk_size":74743093945,"couch_docs_data_size":38594660087,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139215932,"curr_items_tot":278427644,"ep_bg_fetched":0,"get_hits":53.05305305305305,"mem_used":36306500344,"ops":1981.981981981982,"vb_replica_curr_items":139211712},"uptime":"340495","memoryTotal":64424656896,"memoryFree":23773573120,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.15.120:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":17.60660247592847,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23662190592},"interestingStats":{"cmd_get":146.8531468531468,"couch_docs_actual_disk_size":72932847344,"couch_docs_data_size":38581771457,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139226879,"curr_items_tot":278436540,"ep_bg_fetched":0,"get_hits":144.8551448551448,"mem_used":36421860496,"ops":1495.504495504495,"vb_replica_curr_items":139209661},"uptime":"337174","memoryTotal":64424656896,"memoryFree":23662190592,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.127:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":21.68831168831169,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":24049729536},"interestingStats":{"cmd_get":11.98801198801199,"couch_docs_actual_disk_size":66414273220,"couch_docs_data_size":38587642702,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139193759,"curr_items_tot":278398926,"ep_bg_fetched":0,"get_hits":9.990009990009991,"mem_used":36237234088,"ops":883.1168831168832,"vb_replica_curr_items":139205167},"uptime":"341228","memoryTotal":64424656896,"memoryFree":24049729536,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.148:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"buckets":{"uri":"/pools/default/buckets?v=74117050&uuid=2bec87861652b990cf6aa5c7ee58c253","terseBucketsBase":"/pools/default/b/","terseStreamingBucketsBase":"/pools/default/bs/"},"remoteClusters":{"uri":"/pools/default/remoteClusters?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/pools/default/remoteClusters?just_validate=1"},"controllers":{"addNode":{"uri":"/controller/addNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"rebalance":{"uri":"/controller/rebalance?uuid=2bec87861652b990cf6aa5c7ee58c253"},"failOver":{"uri":"/controller/failOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"startGracefulFailover":{"uri":"/controller/startGracefulFailover?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reAddNode":{"uri":"/controller/reAddNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reFailOver":{"uri":"/controller/reFailOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"ejectNode":{"uri":"/controller/ejectNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setRecoveryType":{"uri":"/controller/setRecoveryType?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setAutoCompaction":{"uri":"/controller/setAutoCompaction?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setAutoCompaction?just_validate=1"},"clusterLogsCollection":{"startURI":"/controller/startLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253","cancelURI":"/controller/cancelLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253"},"replication":{"createURI":"/controller/createReplication?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/createReplication?just_validate=1"},"setFastWarmup":{"uri":"/controller/setFastWarmup?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setFastWarmup?just_validate=1"}},"rebalanceStatus":"none","rebalanceProgressUri":"/pools/default/rebalanceProgress","stopRebalanceUri":"/controller/stopRebalance?uuid=2bec87861652b990cf6aa5c7ee58c253","nodeStatusesUri":"/nodeStatuses","maxBucketCount":10,"autoCompactionSettings":{"parallelDBAndViewCompaction":false,"databaseFragmentationThreshold":{"percentage":50,"size":"undefined"},"viewFragmentationThreshold":{"percentage":50,"size":"undefined"}},"fastWarmupSettings":{"fastWarmupEnabled":true,"minMemoryThreshold":10,"minItemsThreshold":10},"tasks":{"uri":"/pools/default/tasks?v=97479372"},"visualSettingsUri":"/internalSettings/visual?v=7111573","counters":{"rebalance_success":4,"rebalance_start":6,"rebalance_stop":2}}` // From `/pools/default/buckets/blastro-df` on a real cluster const bucketResponse string = `{"blastro-df": {"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}}` + +const bucketStatsResponse string = `{"op":{"samples":{"couch_total_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341],"couch_docs_fragmentation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_fragmentation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"hit_ratio":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_cache_miss_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_resident_items_rate":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_avg_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_replica_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_pending_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"avg_disk_update_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_disk_commit_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_bg_wait_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_active_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_replica_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bytes_read":[118.1818181818182,142.2805247225025,180.8080808080808,197.7800201816347,141.9939577039275,118.5410334346505,142.4242424242424,148.4848484848485,197.3816717019134,202.4291497975709,118.0625630676085,142.4242424242424,179.6165489404642,197.979797979798,142.4242424242424,118.1818181818182,142.2805247225025,148.4848484848485,197.979797979798,201.816347124117,118.1818181818182,142.4242424242424,148.4848484848485,197.7800201816347,142.4242424242424,118.1818181818182,142.2805247225025,179.7979797979798,197.1830985915493,202.6342451874367,118.1818181818182,142.2805247225025,180.4435483870968,198.3805668016194,142.2805247225025,118.1818181818182,142.2805247225025,148.4848484848485,197.979797979798,202.020202020202,118.0625630676085,118.1818181818182,204.040404040404,197.7800201816347,142.1370967741935,118.4210526315789,118.1818181818182,172.5529767911201,197.5806451612903,202.4291497975709,118.0625630676085,118.1818181818182,172.7272727272727,197.7800201816347,142.4242424242424,118.0625630676085,118.1818181818182,204.040404040404,197.979797979798,201.816347124117],"bytes_written":[36420.20202020202,37762.86579212916,37225.25252525252,50460.14127144299,37686.80765357502,36530.90172239109,37801.0101010101,37111.11111111111,50358.50956696878,60511.13360323886,36383.45105953582,37801.0101010101,37393.54187689203,50511.11111111111,37801.0101010101,36420.20202020202,37762.86579212916,37111.11111111111,50511.11111111111,60327.95156407669,36420.20202020202,37801.0101010101,37111.11111111111,50460.14127144299,37801.0101010101,36420.20202020202,37762.86579212916,37431.31313131313,50307.84708249497,60572.44174265451,36420.20202020202,37762.86579212916,37150.20161290323,50613.36032388664,37762.86579212916,36420.20202020202,37762.86579212916,37111.11111111111,50511.11111111111,60388.88888888889,36383.45105953582,36420.20202020202,38812.12121212122,50460.14127144299,37724.79838709677,36493.92712550607,36420.20202020202,38453.07769929364,50409.27419354839,60511.13360323886,36383.45105953582,36420.20202020202,38491.91919191919,50460.14127144299,37801.0101010101,36383.45105953582,36420.20202020202,38812.12121212122,50511.11111111111,60327.95156407669],"cas_badval":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_get":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_lookup":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_set":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_docs_actual_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341],"couch_docs_data_size":[531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373],"couch_docs_disk_size":[531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373],"couch_spatial_data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_actual_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"curr_connections":[14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14],"curr_items":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"curr_items_tot":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"decr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"decr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_write_queue":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_bg_fetched":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_clock_cas_drift_threshold_exceeded":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_read_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_write_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_flusher_todo":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_item_commit_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_kv_size":[10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340],"ep_max_size":[8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032],"ep_mem_high_wat":[7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627],"ep_mem_low_wat":[6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024],"ep_meta_data_memory":[68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68],"ep_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_get_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_value_ejects":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_overhead":[403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824],"ep_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_tmp_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_vb_total":[64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64],"evictions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"get_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"get_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"mem_used":[4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016],"misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"timestamp":[1615918120012,1615918121003,1615918121993,1615918122984,1615918123977,1615918124964,1615918125954,1615918126944,1615918127937,1615918128925,1615918129916,1615918130906,1615918131897,1615918132887,1615918133877,1615918134867,1615918135858,1615918136848,1615918137838,1615918138829,1615918139819,1615918140809,1615918141799,1615918142790,1615918143780,1615918144770,1615918145761,1615918146751,1615918147745,1615918148732,1615918149722,1615918150713,1615918151705,1615918152693,1615918153684,1615918154674,1615918155665,1615918156655,1615918157645,1615918158635,1615918159626,1615918160616,1615918161606,1615918162597,1615918163589,1615918164577,1615918165567,1615918166558,1615918167550,1615918168538,1615918169529,1615918170519,1615918171509,1615918172500,1615918173490,1615918174481,1615918175471,1615918176461,1615918177451,1615918178442],"vb_active_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_itm_memory":[88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88],"vb_active_meta_data_memory":[68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68],"vb_active_num":[64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64],"vb_active_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_aborted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_accepted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_committed_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_curr_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_itm_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_meta_data_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_curr_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_itm_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_meta_data_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_num":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"xdc_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"allocstall":[18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615],"cpu_cores_available":[12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12],"cpu_irq_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_stolen_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_sys_rate":[4.942965779467681,5.243268776570619,6.823027718550106,4.815073272854153,4.853128991060026,5.068836045056321,4.983108108108108,4.110738255033557,3.201347935973041,3.959561920808762,3.610411418975651,3.459915611814346,3.691275167785235,4.553119730185498,6.470588235294118,4.545454545454546,5.046257359125315,5.976430976430977,5.564924114671164,3.703703703703704,3.529411764705882,3.544303797468354,3.826787512588117,5.118961788031723,7.166947723440135,5.87248322147651,4.289318755256518,5.485232067510548,4.765886287625418,4.672897196261682,4.184100418410042,4.560810810810811,7.02928870292887,6.081081081081081,5.378151260504202,6.239460370994941,8.984047019311502,6.896551724137931,9.636517328825022,9.335576114381833,7.64063811922754,8.684654300168635,6.543624161073826,6.465155331654072,5.961376994122586,3.807106598984772,3.36417157275021,3.700588730025231,3.775167785234899,9.45945945945946,3.114478114478115,3.451178451178451,4.465037910699242,3.852596314907873,3.462837837837838,5.205709487825357,5.218855218855219,6.532663316582915,5.885057471264368,4.030226700251889],"cpu_user_rate":[15.20912547528517,9.58904109589041,10.76759061833689,8.443824145150035,8.301404853128991,10.95118898623279,9.797297297297296,6.879194630872483,6.823925863521483,6.908171861836562,6.54911838790932,6.835443037974684,7.382550335570469,10.28667790893761,16.97478991596639,11.53198653198653,9.75609756097561,11.11111111111111,12.05733558178752,7.154882154882155,6.890756302521009,6.666666666666667,7.150050352467271,10.23792357606345,12.7318718381113,9.479865771812081,7.905803195962994,8.016877637130802,9.19732441471572,9.600679694137638,7.364016736401673,8.108108108108109,15.31380753138075,13.85135135135135,10.58823529411765,12.64755480607083,18.47187237615449,13.28847771236333,19.8647506339814,21.86711522287637,23.5936188077246,22.17537942664418,12.08053691275168,16.96053736356003,32.49370277078086,8.20642978003384,10.17661900756939,7.653490328006728,10.82214765100671,14.27364864864865,6.986531986531986,7.407407407407407,10.02527379949452,11.55778894472362,8.192567567567568,12.34256926952141,14.05723905723906,28.64321608040201,13.14942528735632,7.388748950461797],"cpu_utilization_rate":[20.15209125475285,14.83230987246103,17.59061833688699,13.25889741800419,13.15453384418902,16.02002503128911,14.78040540540541,10.98993288590604,10.02527379949452,10.86773378264532,10.15952980688497,10.29535864978903,11.0738255033557,14.8397976391231,23.4453781512605,16.07744107744108,14.80235492010092,17.08754208754209,17.62225969645868,10.85858585858586,10.42016806722689,10.21097046413502,10.97683786505539,15.35688536409517,19.89881956155143,15.35234899328859,12.19512195121951,13.50210970464135,13.96321070234114,14.27357689039932,11.54811715481171,12.66891891891892,22.34309623430962,19.93243243243243,15.96638655462185,18.88701517706577,27.45591939546599,20.18502943650126,29.50126796280642,31.2026913372582,31.23425692695214,30.86003372681282,18.6241610738255,23.42569269521411,38.45507976490345,12.01353637901861,13.5407905803196,11.35407905803196,14.59731543624161,23.73310810810811,10.1010101010101,10.85858585858586,14.49031171019377,15.41038525963149,11.65540540540541,17.54827875734677,19.27609427609428,35.17587939698493,19.03448275862069,11.41897565071369],"hibernated_requests":[7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7],"hibernated_waked":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"mem_actual_free":[7004864512,6998364160,7056683008,7055605760,7059243008,7078457344,7079067648,7079514112,7078977536,7088099328,7091081216,7091773440,7091589120,7080108032,7073554432,7073914880,7080144896,7065124864,7063183360,7072677888,7073767424,7073542144,7073542144,7074902016,7053836288,7050895360,7055720448,7056822272,7057281024,7053025280,7052763136,7051984896,7049113600,7040618496,7045636096,7050907648,7021027328,7001329664,6985895936,6985895936,6955642880,7059750912,7058616320,7050067968,7047163904,7045873664,7050272768,7068528640,7073677312,7079116800,7078252544,7075880960,7065079808,7066251264,7065726976,7063486464,7064797184,7066206208,7068819456,7071809536],"mem_actual_used":[10175004672,10181505024,10123186176,10124263424,10120626176,10101411840,10100801536,10100355072,10100891648,10091769856,10088787968,10088095744,10088280064,10099761152,10106314752,10105954304,10099724288,10114744320,10116685824,10107191296,10106101760,10106327040,10106327040,10104967168,10126032896,10128973824,10124148736,10123046912,10122588160,10126843904,10127106048,10127884288,10130755584,10139250688,10134233088,10128961536,10158841856,10178539520,10193973248,10193973248,10224226304,10120118272,10121252864,10129801216,10132705280,10133995520,10129596416,10111340544,10106191872,10100752384,10101616640,10103988224,10114789376,10113617920,10114142208,10116382720,10115072000,10113662976,10111049728,10108059648],"mem_free":[7004864512,6998364160,7056683008,7055605760,7059243008,7078457344,7079067648,7079514112,7078977536,7088099328,7091081216,7091773440,7091589120,7080108032,7073554432,7073914880,7080144896,7065124864,7063183360,7072677888,7073767424,7073542144,7073542144,7074902016,7053836288,7050895360,7055720448,7056822272,7057281024,7053025280,7052763136,7051984896,7049113600,7040618496,7045636096,7050907648,7021027328,7001329664,6985895936,6985895936,6955642880,7059750912,7058616320,7050067968,7047163904,7045873664,7050272768,7068528640,7073677312,7079116800,7078252544,7075880960,7065079808,7066251264,7065726976,7063486464,7064797184,7066206208,7068819456,7071809536],"mem_limit":[17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184],"mem_total":[17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184],"mem_used_sys":[16694517760,16707862528,16608030720,16610041856,16604663808,16553811968,16553463808,16553369600,16553861120,16539238400,16536092672,16535760896,16535707648,16553418752,16559439872,16558895104,16554569728,16580468736,16582680576,16565084160,16564649984,16565272576,16565272576,16562823168,16599863296,16602157056,16597528576,16596774912,16595107840,16593002496,16593485824,16596668416,16598691840,16607469568,16599904256,16590753792,16644947968,16684613632,16714768384,16714768384,16781234176,16573353984,16575979520,16593072128,16603037696,16605077504,16599199744,16581554176,16570187776,16560140288,16561221632,16565153792,16577990656,16577200128,16578031616,16582909952,16569671680,16565702656,16560218112,16554315776],"odp_report_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"rest_requests":[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,8,2,2,2,2,2,2,2,2,3,2,2,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,2,2,2,2,2],"swap_total":[1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824],"swap_used":[122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392]},"samplesCount":60,"isPersistent":true,"lastTStamp":1615918178442,"interval":1000},"hot_keys":[{"name":"first-duck","ops":6.003482019571351e-05}]}` From 7d665904fc3300ed8a1e70f551800793c873777d Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 30 Mar 2021 13:08:54 -0500 Subject: [PATCH 348/761] inputs.ping: Always SetPrivileged(true) in native mode (#9072) * Always SetPrivileged(true) * Improve error message --- plugins/inputs/ping/README.md | 13 +++---------- plugins/inputs/ping/ping.go | 14 +++++++++----- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 7293a17081a71..82c0d58480b2a 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -102,7 +102,7 @@ $ systemctl edit telegraf #### Linux Permissions When using `method = "native"`, Telegraf will attempt to use privileged raw -ICMP sockets. On most systems, doing so requires `CAP_NET_RAW` capabilities. +ICMP sockets. On most systems, doing so requires `CAP_NET_RAW` capabilities or for Telegraf to be run as root. With systemd: ```sh @@ -127,16 +127,9 @@ setting capabilities. [man 7 capabilities]: http://man7.org/linux/man-pages/man7/capabilities.7.html -On Linux the default behaviour is to restrict creation of ping sockets for everybody. Execute the below command to enable creation of ping sockets for all possible user groups. The integers provided to ping_group_range defines the range of user groups that are permited to create ping sockets, were 2147483647 (the max of a signed int 2^31) is the max group identifier (GID). +#### Other OS Permissions -```sh -$ sudo sysctl -w net.ipv4.ping_group_range="0 2147483647" -``` - -Reference [`man 7 icmp`][man 7 icmp] for more information about ICMP echo -sockets and the `ping_group_range` setting. - -[man 7 icmp]: http://man7.org/linux/man-pages/man7/icmp.7.html +When using `method = "native"`, you will need permissions similar to the executable ping program for your OS. ### Metrics diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 6249677eab6e2..c8d768c64a385 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -166,10 +166,7 @@ func (p *Ping) nativePing(destination string) (*pingStats, error) { return nil, fmt.Errorf("failed to create new pinger: %w", err) } - // Required for windows. Despite the method name, this should work without the need to elevate privileges and has been tested on Windows 10 - if runtime.GOOS == "windows" { - pinger.SetPrivileged(true) - } + pinger.SetPrivileged(true) if p.IPv6 { pinger.SetNetwork("ip6") @@ -193,7 +190,14 @@ func (p *Ping) nativePing(destination string) (*pingStats, error) { pinger.Count = p.Count err = pinger.Run() if err != nil { - return nil, fmt.Errorf("failed to run pinger: %w", err) + if strings.Contains(err.Error(), "operation not permitted") { + if runtime.GOOS == "linux" { + return nil, fmt.Errorf("permission changes required, enable CAP_NET_RAW capabilities (refer to the ping plugin's README.md for more info)") + } + + return nil, fmt.Errorf("permission changes required, refer to the ping plugin's README.md for more info") + } + return nil, fmt.Errorf("%w", err) } ps.Statistics = *pinger.Statistics() From 9c54c8e233f09c0c011fcfd4a8de67ec0a47a428 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 30 Mar 2021 14:11:31 -0400 Subject: [PATCH 349/761] new linter commands (#9020) --- .golangci.yml | 4 ++-- Makefile | 38 +++++++++++++++++++++++++++----------- 2 files changed, 29 insertions(+), 13 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 9f01ba6881010..ffef28f505900 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -16,7 +16,6 @@ linters: - staticcheck - typecheck - unconvert - - unparam - unused - varcheck @@ -73,7 +72,7 @@ linters-settings: - name: unhandled-error - name: unnecessary-stmt - name: unreachable-code - - name: unused-parameter + # - name: unused-parameter - name: var-declaration - name: var-naming - name: waitgroup-by-value @@ -96,6 +95,7 @@ run: - docs - etc - scripts + # - plugins/parsers/influx/machine.go # which files to skip: they will be analyzed, but issues from them # won't be reported. Default value is empty list, but there is diff --git a/Makefile b/Makefile index 992ddb18993f1..7f090ca574514 100644 --- a/Makefile +++ b/Makefile @@ -69,15 +69,17 @@ all: .PHONY: help help: @echo 'Targets:' - @echo ' all - download dependencies and compile telegraf binary' - @echo ' deps - download dependencies' - @echo ' telegraf - compile telegraf binary' - @echo ' test - run short unit tests' - @echo ' fmt - format source files' - @echo ' tidy - tidy go modules' - @echo ' lint - run linter' - @echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md' - @echo ' clean - delete build artifacts' + @echo ' all - download dependencies and compile telegraf binary' + @echo ' deps - download dependencies' + @echo ' telegraf - compile telegraf binary' + @echo ' test - run short unit tests' + @echo ' fmt - format source files' + @echo ' tidy - tidy go modules' + @echo ' lint - run linter' + @echo ' lint-branch - run linter on changes in current branch since master' + @echo ' lint-install - install linter' + @echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md' + @echo ' clean - delete build artifacts' @echo '' @echo 'Package Targets:' @$(foreach dist,$(dists),echo " $(dist)";) @@ -131,14 +133,28 @@ vet: exit 1; \ fi +.PHONY: lint-install +lint-install: + + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.38.0 + .PHONY: lint lint: ifeq (, $(shell which golangci-lint)) - $(info golangci-lint can't be found, please install it: https://golangci-lint.run/usage/install/) + $(info golangci-lint can't be found, please run: make lint-install) + exit 1 +endif + + golangci-lint run + +.PHONY: lint-branch +lint-branch: +ifeq (, $(shell which golangci-lint)) + $(info golangci-lint can't be found, please run: make lint-install) exit 1 endif - golangci-lint -v run + golangci-lint run --new-from-rev master .PHONY: tidy tidy: From 66c639668c610cc05732da87cb5a0fe502acc6f4 Mon Sep 17 00:00:00 2001 From: Thomas Casteleyn Date: Tue, 30 Mar 2021 21:08:14 +0200 Subject: [PATCH 350/761] Fix inputs.snmp init when no mibs installed (#9050) --- plugins/inputs/snmp/snmp.go | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 35d00629acc4c..8f88211d56924 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -255,20 +255,22 @@ func (f *Field) init() error { return nil } - _, oidNum, oidText, conversion, err := SnmpTranslate(f.Oid) - if err != nil { - return fmt.Errorf("translating: %w", err) - } - f.Oid = oidNum - if f.Name == "" { - f.Name = oidText - } - if f.Conversion == "" { - f.Conversion = conversion + // check if oid needs translation or name is not set + if strings.ContainsAny(f.Oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") || f.Name == "" { + _, oidNum, oidText, conversion, err := SnmpTranslate(f.Oid) + if err != nil { + return fmt.Errorf("translating: %w", err) + } + f.Oid = oidNum + if f.Name == "" { + f.Name = oidText + } + if f.Conversion == "" { + f.Conversion = conversion + } + //TODO use textual convention conversion from the MIB } - //TODO use textual convention conversion from the MIB - f.initialized = true return nil } From 071fef78ef98163903aebfb7d8b1e636ac3a6614 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 30 Mar 2021 21:43:08 -0500 Subject: [PATCH 351/761] inputs.nfsclient: use uint64, also update error handling (#9067) * Use uint64 Fix error handling * update comment * More detail to error --- plugins/inputs/nfsclient/nfsclient.go | 43 +++-- plugins/inputs/nfsclient/nfsclient_test.go | 179 ++++++++++++--------- 2 files changed, 132 insertions(+), 90 deletions(-) diff --git a/plugins/inputs/nfsclient/nfsclient.go b/plugins/inputs/nfsclient/nfsclient.go index c2823dfa598d4..07a8382d9137f 100644 --- a/plugins/inputs/nfsclient/nfsclient.go +++ b/plugins/inputs/nfsclient/nfsclient.go @@ -2,7 +2,7 @@ package nfsclient import ( "bufio" - "log" + "fmt" "os" "regexp" "strconv" @@ -61,45 +61,48 @@ func (n *NFSClient) Description() string { return "Read per-mount NFS client metrics from /proc/self/mountstats" } -func convertToInt64(line []string) []int64 { +func convertToUint64(line []string) ([]uint64, error) { /* A "line" of input data (a pre-split array of strings) is processed one field at a time. Each field is converted to - an int64 value, and appened to an array of return values. - On an error, check for ErrRange, and throw a fatal error + an uint64 value, and appened to an array of return values. + On an error, check for ErrRange, and returns an error if found. This situation indicates a pretty major issue in the /proc/self/mountstats file, and returning faulty data is worse than no data. Other errors are ignored, and append whatever we got in the first place (probably 0). Yes, this is ugly. */ - var nline []int64 + var nline []uint64 if len(line) < 2 { - return nline + return nline, nil } // Skip the first field; it's handled specially as the "first" variable for _, l := range line[1:] { - val, err := strconv.ParseInt(l, 10, 64) + val, err := strconv.ParseUint(l, 10, 64) if err != nil { if numError, ok := err.(*strconv.NumError); ok { if numError.Err == strconv.ErrRange { - log.Fatalf("ErrRange: line:[%v] raw:[%v] -> parsed:[%v]\n", line, l, val) + return nil, fmt.Errorf("errrange: line:[%v] raw:[%v] -> parsed:[%v]", line, l, val) } } } nline = append(nline, val) } - return nline + return nline, nil } -func (n *NFSClient) parseStat(mountpoint string, export string, version string, line []string, acc telegraf.Accumulator) { +func (n *NFSClient) parseStat(mountpoint string, export string, version string, line []string, acc telegraf.Accumulator) error { tags := map[string]string{"mountpoint": mountpoint, "serverexport": export} - nline := convertToInt64(line) + nline, err := convertToUint64(line) + if err != nil { + return err + } if len(nline) == 0 { n.Log.Warnf("Parsing Stat line with one field: %s\n", line) - return + return nil } first := strings.Replace(line[0], ":", "", 1) @@ -240,9 +243,11 @@ func (n *NFSClient) parseStat(mountpoint string, export string, version string, } } } + + return nil } -func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator) { +func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator) error { var mount string var version string var export string @@ -293,9 +298,14 @@ func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator } if !skip { - n.parseStat(mount, export, version, line, acc) + err := n.parseStat(mount, export, version, line, acc) + if err != nil { + return fmt.Errorf("could not parseStat: %w", err) + } } } + + return nil } func (n *NFSClient) getMountStatsPath() string { @@ -316,7 +326,10 @@ func (n *NFSClient) Gather(acc telegraf.Accumulator) error { defer file.Close() scanner := bufio.NewScanner(file) - n.processText(scanner, acc) + err = n.processText(scanner, acc) + if err != nil { + return err + } if err := scanner.Err(); err != nil { n.Log.Errorf("%s", err) diff --git a/plugins/inputs/nfsclient/nfsclient_test.go b/plugins/inputs/nfsclient/nfsclient_test.go index 11a9e4dd37f08..961c0f34c8d75 100644 --- a/plugins/inputs/nfsclient/nfsclient_test.go +++ b/plugins/inputs/nfsclient/nfsclient_test.go @@ -2,10 +2,12 @@ package nfsclient import ( "bufio" - "github.com/influxdata/telegraf/testutil" "os" "strings" "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func getMountStatsPath() string { @@ -24,17 +26,18 @@ func TestNFSClientParsev3(t *testing.T) { nfsclient.nfs3Ops = map[string]bool{"READLINK": true, "GETATTR": false} nfsclient.nfs4Ops = map[string]bool{"READLINK": true, "GETATTR": false} data := strings.Fields(" READLINK: 500 501 502 503 504 505 506 507") - nfsclient.parseStat("1.2.3.4:/storage/NFS", "/A", "3", data, &acc) + err := nfsclient.parseStat("1.2.3.4:/storage/NFS", "/A", "3", data, &acc) + require.NoError(t, err) fieldsOps := map[string]interface{}{ - "ops": int64(500), - "trans": int64(501), - "timeouts": int64(502), - "bytes_sent": int64(503), - "bytes_recv": int64(504), - "queue_time": int64(505), - "response_time": int64(506), - "total_time": int64(507), + "ops": uint64(500), + "trans": uint64(501), + "timeouts": uint64(502), + "bytes_sent": uint64(503), + "bytes_recv": uint64(504), + "queue_time": uint64(505), + "response_time": uint64(506), + "total_time": uint64(507), } acc.AssertContainsFields(t, "nfs_ops", fieldsOps) } @@ -46,17 +49,41 @@ func TestNFSClientParsev4(t *testing.T) { nfsclient.nfs3Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false} nfsclient.nfs4Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false} data := strings.Fields(" DESTROY_SESSION: 500 501 502 503 504 505 506 507") - nfsclient.parseStat("2.2.2.2:/nfsdata/", "/B", "4", data, &acc) + err := nfsclient.parseStat("2.2.2.2:/nfsdata/", "/B", "4", data, &acc) + require.NoError(t, err) + + fieldsOps := map[string]interface{}{ + "ops": uint64(500), + "trans": uint64(501), + "timeouts": uint64(502), + "bytes_sent": uint64(503), + "bytes_recv": uint64(504), + "queue_time": uint64(505), + "response_time": uint64(506), + "total_time": uint64(507), + } + acc.AssertContainsFields(t, "nfs_ops", fieldsOps) +} + +func TestNFSClientParseLargeValue(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{Fullstat: true} + nfsclient.nfs3Ops = map[string]bool{"SETCLIENTID": true, "GETATTR": false} + nfsclient.nfs4Ops = map[string]bool{"SETCLIENTID": true, "GETATTR": false} + data := strings.Fields(" SETCLIENTID: 218 216 0 53568 12960 18446744073709531008 134 197") + err := nfsclient.parseStat("2.2.2.2:/nfsdata/", "/B", "4", data, &acc) + require.NoError(t, err) fieldsOps := map[string]interface{}{ - "ops": int64(500), - "trans": int64(501), - "timeouts": int64(502), - "bytes_sent": int64(503), - "bytes_recv": int64(504), - "queue_time": int64(505), - "response_time": int64(506), - "total_time": int64(507), + "ops": uint64(218), + "trans": uint64(216), + "timeouts": uint64(0), + "bytes_sent": uint64(53568), + "bytes_recv": uint64(12960), + "queue_time": uint64(18446744073709531008), + "response_time": uint64(134), + "total_time": uint64(197), } acc.AssertContainsFields(t, "nfs_ops", fieldsOps) } @@ -72,14 +99,15 @@ func TestNFSClientProcessStat(t *testing.T) { scanner := bufio.NewScanner(file) - nfsclient.processText(scanner, &acc) + err := nfsclient.processText(scanner, &acc) + require.NoError(t, err) fieldsReadstat := map[string]interface{}{ - "ops": int64(600), - "retrans": int64(1), - "bytes": int64(1207), - "rtt": int64(606), - "exe": int64(607), + "ops": uint64(600), + "retrans": uint64(1), + "bytes": uint64(1207), + "rtt": uint64(606), + "exe": uint64(607), } readTags := map[string]string{ @@ -91,11 +119,11 @@ func TestNFSClientProcessStat(t *testing.T) { acc.AssertContainsTaggedFields(t, "nfsstat", fieldsReadstat, readTags) fieldsWritestat := map[string]interface{}{ - "ops": int64(700), - "retrans": int64(1), - "bytes": int64(1407), - "rtt": int64(706), - "exe": int64(707), + "ops": uint64(700), + "retrans": uint64(1), + "bytes": uint64(1407), + "rtt": uint64(706), + "exe": uint64(707), } writeTags := map[string]string{ @@ -117,57 +145,58 @@ func TestNFSClientProcessFull(t *testing.T) { scanner := bufio.NewScanner(file) - nfsclient.processText(scanner, &acc) + err := nfsclient.processText(scanner, &acc) + require.NoError(t, err) fieldsEvents := map[string]interface{}{ - "inoderevalidates": int64(301736), - "dentryrevalidates": int64(22838), - "datainvalidates": int64(410979), - "attrinvalidates": int64(26188427), - "vfsopen": int64(27525), - "vfslookup": int64(9140), - "vfsaccess": int64(114420), - "vfsupdatepage": int64(30785253), - "vfsreadpage": int64(5308856), - "vfsreadpages": int64(5364858), - "vfswritepage": int64(30784819), - "vfswritepages": int64(79832668), - "vfsgetdents": int64(170), - "vfssetattr": int64(64), - "vfsflush": int64(18194), - "vfsfsync": int64(29294718), - "vfslock": int64(0), - "vfsrelease": int64(18279), - "congestionwait": int64(0), - "setattrtrunc": int64(2), - "extendwrite": int64(785551), - "sillyrenames": int64(0), - "shortreads": int64(0), - "shortwrites": int64(0), - "delay": int64(0), - "pnfsreads": int64(0), - "pnfswrites": int64(0), + "inoderevalidates": uint64(301736), + "dentryrevalidates": uint64(22838), + "datainvalidates": uint64(410979), + "attrinvalidates": uint64(26188427), + "vfsopen": uint64(27525), + "vfslookup": uint64(9140), + "vfsaccess": uint64(114420), + "vfsupdatepage": uint64(30785253), + "vfsreadpage": uint64(5308856), + "vfsreadpages": uint64(5364858), + "vfswritepage": uint64(30784819), + "vfswritepages": uint64(79832668), + "vfsgetdents": uint64(170), + "vfssetattr": uint64(64), + "vfsflush": uint64(18194), + "vfsfsync": uint64(29294718), + "vfslock": uint64(0), + "vfsrelease": uint64(18279), + "congestionwait": uint64(0), + "setattrtrunc": uint64(2), + "extendwrite": uint64(785551), + "sillyrenames": uint64(0), + "shortreads": uint64(0), + "shortwrites": uint64(0), + "delay": uint64(0), + "pnfsreads": uint64(0), + "pnfswrites": uint64(0), } fieldsBytes := map[string]interface{}{ - "normalreadbytes": int64(204440464584), - "normalwritebytes": int64(110857586443), - "directreadbytes": int64(783170354688), - "directwritebytes": int64(296174954496), - "serverreadbytes": int64(1134399088816), - "serverwritebytes": int64(407107155723), - "readpages": int64(85749323), - "writepages": int64(30784819), + "normalreadbytes": uint64(204440464584), + "normalwritebytes": uint64(110857586443), + "directreadbytes": uint64(783170354688), + "directwritebytes": uint64(296174954496), + "serverreadbytes": uint64(1134399088816), + "serverwritebytes": uint64(407107155723), + "readpages": uint64(85749323), + "writepages": uint64(30784819), } fieldsXprtTCP := map[string]interface{}{ - "bind_count": int64(1), - "connect_count": int64(1), - "connect_time": int64(0), - "idle_time": int64(0), - "rpcsends": int64(96172963), - "rpcreceives": int64(96172963), - "badxids": int64(0), - "inflightsends": int64(620878754), - "backlogutil": int64(0), + "bind_count": uint64(1), + "connect_count": uint64(1), + "connect_time": uint64(0), + "idle_time": uint64(0), + "rpcsends": uint64(96172963), + "rpcreceives": uint64(96172963), + "badxids": uint64(0), + "inflightsends": uint64(620878754), + "backlogutil": uint64(0), } acc.AssertContainsFields(t, "nfs_events", fieldsEvents) From 78d67ba87b472405b88048e9d86235fcf5abaecc Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Wed, 31 Mar 2021 12:06:13 -0400 Subject: [PATCH 352/761] Add configurable option for the 'path' tag override in the Tail plugin. (#9069) * Add configurable option for the 'path' tag override in the Tail plugin. * get test cases to pass * update default config * convert to configurable string field --- plugins/inputs/tail/README.md | 3 +++ plugins/inputs/tail/tail.go | 11 +++++++++-- plugins/inputs/tail/tail_test.go | 6 ++++-- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/tail/README.md b/plugins/inputs/tail/README.md index 5664f8704eec3..abdf0878aff56 100644 --- a/plugins/inputs/tail/README.md +++ b/plugins/inputs/tail/README.md @@ -64,6 +64,9 @@ The plugin expects messages in one of the ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. + # path_tag = "path" + ## multiline parser/codec ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html #[inputs.tail.multiline] diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index c7b16eb7a4631..84a91635540bf 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -40,6 +40,7 @@ type Tail struct { WatchMethod string `toml:"watch_method"` MaxUndeliveredLines int `toml:"max_undelivered_lines"` CharacterEncoding string `toml:"character_encoding"` + PathTag string `toml:"path_tag"` Log telegraf.Logger `toml:"-"` tailers map[string]*tail.Tail @@ -70,6 +71,7 @@ func NewTail() *Tail { FromBeginning: false, MaxUndeliveredLines: 1000, offsets: offsetsCopy, + PathTag: "path", } } @@ -115,6 +117,9 @@ const sampleConfig = ` ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. + # path_tag = "path" + ## multiline parser/codec ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html #[inputs.tail.multiline] @@ -380,8 +385,10 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { } firstLine = false - for _, metric := range metrics { - metric.AddTag("path", tailer.Filename) + if t.PathTag != "" { + for _, metric := range metrics { + metric.AddTag(t.PathTag, tailer.Filename) + } } // try writing out metric first without blocking diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index f9acdbcdba6d4..99090f70d67a8 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -44,6 +44,7 @@ func NewTestTail() *Tail { MaxUndeliveredLines: 1000, offsets: offsetsCopy, WatchMethod: watchMethod, + PathTag: "path", } } @@ -357,6 +358,7 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) { plugin.Log = testutil.Logger{} plugin.FromBeginning = true plugin.Files = []string{tmpfile.Name()} + plugin.PathTag = "customPathTagMyFile" plugin.SetParserFunc(func() (parsers.Parser, error) { return json.New( &json.Config{ @@ -379,7 +381,7 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) { expected := []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{ - "path": tmpfile.Name(), + "customPathTagMyFile": tmpfile.Name(), }, map[string]interface{}{ "time_idle": 42.0, @@ -387,7 +389,7 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) { time.Unix(0, 0)), testutil.MustMetric("cpu", map[string]string{ - "path": tmpfile.Name(), + "customPathTagMyFile": tmpfile.Name(), }, map[string]interface{}{ "time_idle": 42.0, From 885bf273a929e1f92f242f75dc27749b193e3a90 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 31 Mar 2021 15:08:34 -0400 Subject: [PATCH 353/761] Starlark script for renaming prometheus remote write metrics (#9074) --- plugins/parsers/prometheusremotewrite/README.md | 10 ++++++---- plugins/processors/starlark/README.md | 1 + .../testdata/rename_prometheus_remote_write.star | 16 ++++++++++++++++ 3 files changed, 23 insertions(+), 4 deletions(-) create mode 100644 plugins/processors/starlark/testdata/rename_prometheus_remote_write.star diff --git a/plugins/parsers/prometheusremotewrite/README.md b/plugins/parsers/prometheusremotewrite/README.md index 1bad5bd6004ea..b523174e9184a 100644 --- a/plugins/parsers/prometheusremotewrite/README.md +++ b/plugins/parsers/prometheusremotewrite/README.md @@ -16,9 +16,7 @@ Converts prometheus remote write samples directly into Telegraf metrics. It can data_format = "prometheusremotewrite" ``` -### Example - -**Example Input** +### Example Input ``` prompb.WriteRequest{ Timeseries: []*prompb.TimeSeries{ @@ -38,7 +36,11 @@ prompb.WriteRequest{ ``` -**Example Output** +### Example Output ``` prometheus_remote_write,instance=localhost:9090,job=prometheus,quantile=0.99 go_gc_duration_seconds=4.63 1614889298859000000 ``` + +**For alignment with the [InfluxDB v1.x Prometheus Remote Write Spec](https://docs.influxdata.com/influxdb/v1.8/supported_protocols/prometheus/#how-prometheus-metrics-are-parsed-in-influxdb)** + +- Use the [Starlark processor rename prometheus remote write script](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star) to rename the measurement name to the fieldname and rename the fieldname to value. \ No newline at end of file diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index 62b5b85e766d4..e30ea506c13f7 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -237,6 +237,7 @@ def apply(metric): - [multiple metrics from json array](/plugins/processors/starlark/testdata/multiple_metrics_with_json.star) - Builds a new metric from each element of a json array then returns all the created metrics. - [custom error](/plugins/processors/starlark/testdata/fail.star) - Return a custom error with [fail](https://docs.bazel.build/versions/master/skylark/lib/globals.html#fail). - [compare with previous metric](/plugins/processors/starlark/testdata/compare_metrics.star) - Compare the current metric with the previous one using the shared state. +- [rename prometheus remote write](/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star) - Rename prometheus remote write measurement name with fieldname and rename fieldname to value. [All examples](/plugins/processors/starlark/testdata) are in the testdata folder. diff --git a/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star b/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star new file mode 100644 index 0000000000000..cee49196c48ff --- /dev/null +++ b/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star @@ -0,0 +1,16 @@ +# Specifically for prometheus remote write - renames the measurement name to the fieldname. Renames the fieldname to value. +# Assumes there is only one field as is the case for prometheus remote write. +# +# Example Input: +# prometheus_remote_write,instance=localhost:9090,job=prometheus,quantile=0.99 go_gc_duration_seconds=4.63 1614889298859000000 +# +# Example Output: +# go_gc_duration_seconds,instance=localhost:9090,job=prometheus,quantile=0.99 value=4.63 1614889299000000000 + +def apply(metric): + if metric.name == "prometheus_remote_write": + for k, v in metric.fields.items(): + metric.name = k + metric.fields["value"] = v + metric.fields.pop(k) + return metric \ No newline at end of file From 0b0fc087c4e1b56552ccd85fdb6b7b71ece792b8 Mon Sep 17 00:00:00 2001 From: LEDUNOIS Simon Date: Mon, 5 Apr 2021 21:07:02 +0200 Subject: [PATCH 354/761] feat: Add external Big blue button plugin (#9090) --- EXTERNAL_PLUGINS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index ffefd065c4e11..1bf0d2f1dd371 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -15,6 +15,7 @@ Pull requests welcome. - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. - [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts - [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels +- [Big Blue Button](https://github.com/SLedunois/bigbluebutton-telegraf-plugin) - Gather meetings information from [Big Blue Button](https://bigbluebutton.org/) server ## Outputs - [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. From 5524acfb787a85ba0773f9b2070830bb41918690 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 6 Apr 2021 18:14:06 +0200 Subject: [PATCH 355/761] Remove deprecated and unused SetAggregate() and IsAggregate() functions. (#8994) --- metric.go | 10 ---------- metric/metric.go | 33 +++++++++++---------------------- metric/metric_test.go | 7 ------- models/running_aggregator.go | 4 ---- 4 files changed, 11 insertions(+), 43 deletions(-) diff --git a/metric.go b/metric.go index 6c7b1c6c5f75c..32bc69499aedd 100644 --- a/metric.go +++ b/metric.go @@ -122,14 +122,4 @@ type Metric interface { // Drop marks the metric as processed successfully without being written // to any output. Drop() - - // SetAggregate indicates the metric is an aggregated value. - // - // This method may be removed in the future and its use is discouraged. - SetAggregate(bool) - - // IsAggregate returns true if the Metric is an aggregate. - // - // This method may be removed in the future and its use is discouraged. - IsAggregate() bool } diff --git a/metric/metric.go b/metric/metric.go index e3b49c3a287fe..d28503b743f89 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -15,8 +15,7 @@ type metric struct { fields []*telegraf.Field tm time.Time - tp telegraf.ValueType - aggregate bool + tp telegraf.ValueType } func New( @@ -68,12 +67,11 @@ func New( // removed. func FromMetric(other telegraf.Metric) telegraf.Metric { m := &metric{ - name: other.Name(), - tags: make([]*telegraf.Tag, len(other.TagList())), - fields: make([]*telegraf.Field, len(other.FieldList())), - tm: other.Time(), - tp: other.Type(), - aggregate: other.IsAggregate(), + name: other.Name(), + tags: make([]*telegraf.Tag, len(other.TagList())), + fields: make([]*telegraf.Field, len(other.FieldList())), + tm: other.Time(), + tp: other.Type(), } for i, tag := range other.TagList() { @@ -233,12 +231,11 @@ func (m *metric) SetTime(t time.Time) { func (m *metric) Copy() telegraf.Metric { m2 := &metric{ - name: m.name, - tags: make([]*telegraf.Tag, len(m.tags)), - fields: make([]*telegraf.Field, len(m.fields)), - tm: m.tm, - tp: m.tp, - aggregate: m.aggregate, + name: m.name, + tags: make([]*telegraf.Tag, len(m.tags)), + fields: make([]*telegraf.Field, len(m.fields)), + tm: m.tm, + tp: m.tp, } for i, tag := range m.tags { @@ -251,14 +248,6 @@ func (m *metric) Copy() telegraf.Metric { return m2 } -func (m *metric) SetAggregate(aggregate bool) { - m.aggregate = aggregate -} - -func (m *metric) IsAggregate() bool { - return m.aggregate -} - func (m *metric) HashID() uint64 { h := fnv.New64a() h.Write([]byte(m.name)) diff --git a/metric/metric_test.go b/metric/metric_test.go index 7033d32303f16..b85f0c89ffdeb 100644 --- a/metric/metric_test.go +++ b/metric/metric_test.go @@ -333,10 +333,3 @@ func TestValueType(t *testing.T) { assert.Equal(t, telegraf.Gauge, m.Type()) } - -func TestCopyAggregate(t *testing.T) { - m1 := baseMetric() - m1.SetAggregate(true) - m2 := m1.Copy() - assert.True(t, m2.IsAggregate()) -} diff --git a/models/running_aggregator.go b/models/running_aggregator.go index cbfb9889b87e5..5aa3979c36926 100644 --- a/models/running_aggregator.go +++ b/models/running_aggregator.go @@ -117,10 +117,6 @@ func (r *RunningAggregator) MakeMetric(metric telegraf.Metric) telegraf.Metric { r.Config.Tags, nil) - if m != nil { - m.SetAggregate(true) - } - r.MetricsPushed.Incr(1) return m From 868befcb5f67d5d72b13e1a78aed1009a8bfe2f6 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Tue, 6 Apr 2021 17:33:35 -0400 Subject: [PATCH 356/761] Add support for Logstash 7 'queue' stats from the Pipelines API (#9080) * LAdd support for logstash 7 'queue' stats for its pipelines stats API * appease the linter * Update samples_logstash7.go --- plugins/inputs/logstash/logstash.go | 26 +++- plugins/inputs/logstash/logstash_test.go | 69 ++++++++++ plugins/inputs/logstash/samples_logstash7.go | 137 +++++++++++++++++++ 3 files changed, 227 insertions(+), 5 deletions(-) create mode 100644 plugins/inputs/logstash/samples_logstash7.go diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index 92b392d67c36d..c9833f028654d 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -138,10 +138,13 @@ type PipelinePlugins struct { } type PipelineQueue struct { - Events float64 `json:"events"` - Type string `json:"type"` - Capacity interface{} `json:"capacity"` - Data interface{} `json:"data"` + Events float64 `json:"events"` + EventsCount *float64 `json:"events_count"` + Type string `json:"type"` + Capacity interface{} `json:"capacity"` + Data interface{} `json:"data"` + QueueSizeInBytes *float64 `json:"queue_size_in_bytes"` + MaxQueueSizeInBytes *float64 `json:"max_queue_size_in_bytes"` } const jvmStats = "/_node/stats/jvm" @@ -304,8 +307,13 @@ func (logstash *Logstash) gatherQueueStats( queueTags[tag] = value } + events := queue.Events + if queue.EventsCount != nil { + events = *queue.EventsCount + } + queueFields := map[string]interface{}{ - "events": queue.Events, + "events": events, } if queue.Type != "memory" { @@ -321,6 +329,14 @@ func (logstash *Logstash) gatherQueueStats( for field, value := range flattener.Fields { queueFields[field] = value } + + if queue.MaxQueueSizeInBytes != nil { + queueFields["max_queue_size_in_bytes"] = *queue.MaxQueueSizeInBytes + } + + if queue.QueueSizeInBytes != nil { + queueFields["queue_size_in_bytes"] = *queue.QueueSizeInBytes + } } accumulator.AddFields("logstash_queue", queueFields, queueTags) diff --git a/plugins/inputs/logstash/logstash_test.go b/plugins/inputs/logstash/logstash_test.go index b0d020b487003..931af66b23fd6 100644 --- a/plugins/inputs/logstash/logstash_test.go +++ b/plugins/inputs/logstash/logstash_test.go @@ -16,6 +16,7 @@ var logstashTest = NewLogstash() var ( logstash5accPipelineStats testutil.Accumulator logstash6accPipelinesStats testutil.Accumulator + logstash7accPipelinesStats testutil.Accumulator logstash5accProcessStats testutil.Accumulator logstash6accProcessStats testutil.Accumulator logstash5accJVMStats testutil.Accumulator @@ -686,3 +687,71 @@ func Test_Logstash6GatherJVMStats(test *testing.T) { }, ) } + +func Test_Logstash7GatherPipelinesQueueStats(test *testing.T) { + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + _, err := fmt.Fprintf(writer, "%s", string(logstash7PipelinesJSON)) + if err != nil { + test.Logf("Can't print test json") + } + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHTTPClient() + + if err != nil { + test.Logf("Can't createHTTPClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash7accPipelinesStats); err != nil { + test.Logf("Can't gather Pipeline stats") + } + + fields := make(map[string]interface{}) + fields["duration_in_millis"] = float64(3032875.0) + fields["queue_push_duration_in_millis"] = float64(13300.0) + fields["in"] = float64(2665549.0) + fields["filtered"] = float64(2665549.0) + fields["out"] = float64(2665549.0) + + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_events", + fields, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + }, + ) + + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_queue", + map[string]interface{}{ + "events": float64(0), + "max_queue_size_in_bytes": float64(4294967296), + "queue_size_in_bytes": float64(32028566), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "queue_type": string("persisted"), + }, + ) +} diff --git a/plugins/inputs/logstash/samples_logstash7.go b/plugins/inputs/logstash/samples_logstash7.go new file mode 100644 index 0000000000000..fe05712909c81 --- /dev/null +++ b/plugins/inputs/logstash/samples_logstash7.go @@ -0,0 +1,137 @@ +package logstash + +const logstash7PipelinesJSON = ` +{ + "host" : "HOST01.local", + "version" : "7.4.2", + "http_address" : "127.0.0.1:9600", + "id" : "28580380-ad2c-4032-934b-76359125edca", + "name" : "HOST01.local", + "ephemeral_id" : "bd95ff6b-3fa8-42ae-be32-098a4e4ea1ec", + "status" : "green", + "snapshot" : true, + "pipeline" : { + "workers" : 8, + "batch_size" : 125, + "batch_delay" : 50 + }, + "pipelines" : { + "infra" : { + "events" : { + "in" : 2665549, + "out" : 2665549, + "duration_in_millis" : 3032875, + "filtered" : 2665549, + "queue_push_duration_in_millis" : 13300 + }, + "plugins" : { + "inputs" : [ { + "id" : "8526dc80bc2257ab08f96018f96b0c68dd03abc5695bb22fb9e96339a8dfb4f86", + "events" : { + "out" : 2665549, + "queue_push_duration_in_millis" : 13300 + }, + "peak_connections" : 1, + "name" : "beats", + "current_connections" : 1 + } ], + "codecs" : [ { + "id" : "plain_7312c097-1e7f-41db-983b-4f5a87a9eba2", + "encode" : { + "duration_in_millis" : 0, + "writes_in" : 0 + }, + "name" : "plain", + "decode" : { + "out" : 0, + "duration_in_millis" : 0, + "writes_in" : 0 + } + }, { + "id" : "rubydebug_e958e3dc-10f6-4dd6-b7c5-ae3de2892afb", + "encode" : { + "duration_in_millis" : 0, + "writes_in" : 0 + }, + "name" : "rubydebug", + "decode" : { + "out" : 0, + "duration_in_millis" : 0, + "writes_in" : 0 + } + }, { + "id" : "plain_addb97be-fb77-4cbc-b45c-0424cd5d0ac7", + "encode" : { + "duration_in_millis" : 0, + "writes_in" : 0 + }, + "name" : "plain", + "decode" : { + "out" : 0, + "duration_in_millis" : 0, + "writes_in" : 0 + } + } ], + "filters" : [ { + "id" : "9e8297a6ee7b61864f77853317dccde83d29952ef869010c385dcfc9064ab8b8", + "events" : { + "in" : 2665549, + "out" : 2665549, + "duration_in_millis" : 8648 + }, + "name" : "date", + "matches" : 2665549 + }, { + "id" : "bec0c77b3f53a78c7878449c72ec59f97be31c1f12f9621f61ed2d4563bad869", + "events" : { + "in" : 2665549, + "out" : 2665549, + "duration_in_millis" : 195138 + }, + "name" : "fingerprint" + } ], + "outputs" : [ { + "id" : "df59066a933f038354c1845ba44de692f70dbd0d2009ab07a12b98b776be7e3f", + "events" : { + "in" : 0, + "out" : 0, + "duration_in_millis" : 25 + }, + "name" : "stdout" + }, { + "id" : "38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3", + "events" : { + "in" : 2665549, + "out" : 2665549, + "duration_in_millis" : 2802177 + }, + "name" : "elasticsearch", + "bulk_requests" : { + "successes" : 2870, + "responses" : { + "200" : 2870 + } + }, + "documents" : { + "successes" : 2665549 + } + } ] + }, + "reloads" : { + "successes" : 4, + "last_error" : null, + "failures" : 0, + "last_success_timestamp" : "2020-06-05T08:06:12.538Z", + "last_failure_timestamp" : null + }, + "queue" : { + "type" : "persisted", + "events_count" : 0, + "queue_size_in_bytes" : 32028566, + "max_queue_size_in_bytes" : 4294967296 + }, + "hash" : "5bc589ae4b02cb3e436626429b50928b9d99360639c84dc7fc69268ac01a9fd0", + "ephemeral_id" : "4bcacefa-6cbf-461e-b14e-184edd9ebdf3" + } + } +}` From ef2def20dcb714161df71335e3b4a6afac2e564c Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Tue, 6 Apr 2021 17:35:54 -0400 Subject: [PATCH 357/761] Delete log.Fatal calls and replace with error returns. (#9086) * Delete log.Fatal calls and replace with error returns. * Update opcua_util.go * Update opcua_util.go --- plugins/inputs/opcua/opcua_client.go | 9 +- plugins/inputs/opcua/opcua_util.go | 64 ++++++----- .../webhooks/github/github_webhooks_models.go | 106 ++++-------------- plugins/inputs/win_eventlog/util.go | 1 - 4 files changed, 62 insertions(+), 118 deletions(-) diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index f213826f8fa13..eacfc3d00a8d9 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -440,13 +440,16 @@ func (o *OpcUA) setupOptions() error { if o.Certificate == "" && o.PrivateKey == "" { if o.SecurityPolicy != "None" || o.SecurityMode != "None" { - o.Certificate, o.PrivateKey = generateCert("urn:telegraf:gopcua:client", 2048, o.Certificate, o.PrivateKey, (365 * 24 * time.Hour)) + o.Certificate, o.PrivateKey, err = generateCert("urn:telegraf:gopcua:client", 2048, o.Certificate, o.PrivateKey, (365 * 24 * time.Hour)) + if err != nil { + return err + } } } - o.opts = generateClientOpts(endpoints, o.Certificate, o.PrivateKey, o.SecurityPolicy, o.SecurityMode, o.AuthMethod, o.Username, o.Password, time.Duration(o.RequestTimeout)) + o.opts, err = generateClientOpts(endpoints, o.Certificate, o.PrivateKey, o.SecurityPolicy, o.SecurityMode, o.AuthMethod, o.Username, o.Password, time.Duration(o.RequestTimeout)) - return nil + return err } func (o *OpcUA) getData() error { diff --git a/plugins/inputs/opcua/opcua_util.go b/plugins/inputs/opcua/opcua_util.go index a115f8558aad0..2197e8088ab8f 100644 --- a/plugins/inputs/opcua/opcua_util.go +++ b/plugins/inputs/opcua/opcua_util.go @@ -31,11 +31,11 @@ func newTempDir() (string, error) { return dir, err } -func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.Duration) (string, string) { +func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.Duration) (cert string, key string, err error) { dir, _ := newTempDir() if len(host) == 0 { - log.Fatalf("Missing required host parameter") + return "", "", fmt.Errorf("missing required host parameter") } if rsaBits == 0 { rsaBits = 2048 @@ -49,7 +49,7 @@ func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.D priv, err := rsa.GenerateKey(rand.Reader, rsaBits) if err != nil { - log.Fatalf("failed to generate private key: %s", err) + return "", "", fmt.Errorf("failed to generate private key: %s", err) } notBefore := time.Now() @@ -58,7 +58,7 @@ func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.D serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { - log.Fatalf("failed to generate serial number: %s", err) + return "", "", fmt.Errorf("failed to generate serial number: %s", err) } template := x509.Certificate{ @@ -88,33 +88,33 @@ func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.D derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv) if err != nil { - log.Fatalf("Failed to create certificate: %s", err) + return "", "", fmt.Errorf("failed to create certificate: %s", err) } certOut, err := os.Create(certFile) if err != nil { - log.Fatalf("failed to open %s for writing: %s", certFile, err) + return "", "", fmt.Errorf("failed to open %s for writing: %s", certFile, err) } if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { - log.Fatalf("failed to write data to %s: %s", certFile, err) + return "", "", fmt.Errorf("failed to write data to %s: %s", certFile, err) } if err := certOut.Close(); err != nil { - log.Fatalf("error closing %s: %s", certFile, err) + return "", "", fmt.Errorf("error closing %s: %s", certFile, err) } keyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { log.Printf("failed to open %s for writing: %s", keyFile, err) - return "", "" + return "", "", nil } if err := pem.Encode(keyOut, pemBlockForKey(priv)); err != nil { - log.Fatalf("failed to write data to %s: %s", keyFile, err) + return "", "", fmt.Errorf("failed to write data to %s: %s", keyFile, err) } if err := keyOut.Close(); err != nil { - log.Fatalf("error closing %s: %s", keyFile, err) + return "", "", fmt.Errorf("error closing %s: %s", keyFile, err) } - return certFile, keyFile + return certFile, keyFile, nil } func publicKey(priv interface{}) interface{} { @@ -144,9 +144,8 @@ func pemBlockForKey(priv interface{}) *pem.Block { } } -// OPT FUNCTIONS - -func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, policy, mode, auth, username, password string, requestTimeout time.Duration) []opcua.Option { +//revive:disable-next-line +func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, policy, mode, auth, username, password string, requestTimeout time.Duration) ([]opcua.Option, error) { opts := []opcua.Option{} appuri := "urn:telegraf:gopcua:client" appname := "Telegraf" @@ -157,9 +156,13 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, opts = append(opts, opcua.RequestTimeout(requestTimeout)) + var err error if certFile == "" && keyFile == "" { if policy != "None" || mode != "None" { - certFile, keyFile = generateCert(appuri, 2048, certFile, keyFile, (365 * 24 * time.Hour)) + certFile, keyFile, err = generateCert(appuri, 2048, certFile, keyFile, (365 * 24 * time.Hour)) + if err != nil { + return nil, err + } } } @@ -172,7 +175,7 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, } else { pk, ok := c.PrivateKey.(*rsa.PrivateKey) if !ok { - log.Fatalf("Invalid private key") + return nil, fmt.Errorf("invalid private key") } cert = c.Certificate[0] opts = append(opts, opcua.PrivateKey(pk), opcua.Certificate(cert)) @@ -190,11 +193,15 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, secPolicy = ua.SecurityPolicyURIPrefix + policy policy = "" default: - log.Fatalf("Invalid security policy: %s", policy) + return nil, fmt.Errorf("invalid security policy: %s", policy) } // Select the most appropriate authentication mode from server capabilities and user input - authMode, authOption := generateAuth(auth, cert, username, password) + authMode, authOption, err := generateAuth(auth, cert, username, password) + if err != nil { + return nil, err + } + opts = append(opts, authOption) var secMode ua.MessageSecurityMode @@ -210,7 +217,7 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, secMode = ua.MessageSecurityModeSignAndEncrypt mode = "" default: - log.Fatalf("Invalid security mode: %s", mode) + return nil, fmt.Errorf("invalid security mode: %s", mode) } // Allow input of only one of sec-mode,sec-policy when choosing 'None' @@ -252,24 +259,23 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, } if serverEndpoint == nil { // Didn't find an endpoint with matching policy and mode. - log.Printf("unable to find suitable server endpoint with selected sec-policy and sec-mode") - log.Fatalf("quitting") + return nil, fmt.Errorf("unable to find suitable server endpoint with selected sec-policy and sec-mode") } secPolicy = serverEndpoint.SecurityPolicyURI secMode = serverEndpoint.SecurityMode // Check that the selected endpoint is a valid combo - err := validateEndpointConfig(endpoints, secPolicy, secMode, authMode) + err = validateEndpointConfig(endpoints, secPolicy, secMode, authMode) if err != nil { - log.Fatalf("error validating input: %s", err) + return nil, fmt.Errorf("error validating input: %s", err) } opts = append(opts, opcua.SecurityFromEndpoint(serverEndpoint, authMode)) - return opts + return opts, nil } -func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua.Option) { +func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua.Option, error) { var err error var authMode ua.UserTokenType @@ -284,13 +290,13 @@ func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua if un == "" { if err != nil { - log.Fatalf("error reading username input: %s", err) + return 0, nil, fmt.Errorf("error reading the username input: %s", err) } } if pw == "" { if err != nil { - log.Fatalf("error reading username input: %s", err) + return 0, nil, fmt.Errorf("error reading the password input: %s", err) } } @@ -311,7 +317,7 @@ func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua authOption = opcua.AuthAnonymous() } - return authMode, authOption + return authMode, authOption, nil } func validateEndpointConfig(endpoints []*ua.EndpointDescription, secPolicy string, secMode ua.MessageSecurityMode, authMode ua.UserTokenType) error { diff --git a/plugins/inputs/webhooks/github/github_webhooks_models.go b/plugins/inputs/webhooks/github/github_webhooks_models.go index 4c15ac6c2907a..88c75526b28cf 100644 --- a/plugins/inputs/webhooks/github/github_webhooks_models.go +++ b/plugins/inputs/webhooks/github/github_webhooks_models.go @@ -2,7 +2,6 @@ package github import ( "fmt" - "log" "time" "github.com/influxdata/telegraf" @@ -107,10 +106,7 @@ func (s CommitCommentEvent) NewMetric() telegraf.Metric { "commit": s.Comment.Commit, "comment": s.Comment.Body, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -137,10 +133,7 @@ func (s CreateEvent) NewMetric() telegraf.Metric { "ref": s.Ref, "refType": s.RefType, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -167,10 +160,7 @@ func (s DeleteEvent) NewMetric() telegraf.Metric { "ref": s.Ref, "refType": s.RefType, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -198,10 +188,7 @@ func (s DeploymentEvent) NewMetric() telegraf.Metric { "environment": s.Deployment.Environment, "description": s.Deployment.Description, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -232,10 +219,7 @@ func (s DeploymentStatusEvent) NewMetric() telegraf.Metric { "depState": s.DeploymentStatus.State, "depDescription": s.DeploymentStatus.Description, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -260,10 +244,7 @@ func (s ForkEvent) NewMetric() telegraf.Metric { "issues": s.Repository.Issues, "fork": s.Forkee.Repository, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -288,10 +269,7 @@ func (s GollumEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -320,10 +298,7 @@ func (s IssueCommentEvent) NewMetric() telegraf.Metric { "comments": s.Issue.Comments, "body": s.Comment.Body, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -352,10 +327,7 @@ func (s IssuesEvent) NewMetric() telegraf.Metric { "title": s.Issue.Title, "comments": s.Issue.Comments, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -381,10 +353,7 @@ func (s MemberEvent) NewMetric() telegraf.Metric { "newMember": s.Member.User, "newMemberStatus": s.Member.Admin, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -407,10 +376,7 @@ func (s MembershipEvent) NewMetric() telegraf.Metric { "newMember": s.Member.User, "newMemberStatus": s.Member.Admin, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -433,10 +399,7 @@ func (s PageBuildEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -459,10 +422,7 @@ func (s PublicEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -496,10 +456,7 @@ func (s PullRequestEvent) NewMetric() telegraf.Metric { "deletions": s.PullRequest.Deletions, "changedFiles": s.PullRequest.ChangedFiles, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -534,10 +491,7 @@ func (s PullRequestReviewCommentEvent) NewMetric() telegraf.Metric { "commentFile": s.Comment.File, "comment": s.Comment.Comment, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -566,10 +520,7 @@ func (s PushEvent) NewMetric() telegraf.Metric { "before": s.Before, "after": s.After, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -594,10 +545,7 @@ func (s ReleaseEvent) NewMetric() telegraf.Metric { "issues": s.Repository.Issues, "tagName": s.Release.TagName, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -620,10 +568,7 @@ func (s RepositoryEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -650,10 +595,7 @@ func (s StatusEvent) NewMetric() telegraf.Metric { "commit": s.Commit, "state": s.State, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -678,10 +620,7 @@ func (s TeamAddEvent) NewMetric() telegraf.Metric { "issues": s.Repository.Issues, "teamName": s.Team.Name, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } @@ -704,9 +643,6 @@ func (s WatchEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, err := metric.New(meas, t, f, time.Now()) - if err != nil { - log.Fatalf("Failed to create %v event", event) - } + m, _ := metric.New(meas, t, f, time.Now()) return m } diff --git a/plugins/inputs/win_eventlog/util.go b/plugins/inputs/win_eventlog/util.go index f085c3c055f5c..7435cdb09ceaf 100644 --- a/plugins/inputs/win_eventlog/util.go +++ b/plugins/inputs/win_eventlog/util.go @@ -100,7 +100,6 @@ func UnrollXMLFields(data []byte, fieldsUsage map[string]int, separator string) break } if err != nil { - // log.Fatal(err) break } var parents []string From 14f428d2fdc22d967a3714b3288ef47f72dce330 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Tue, 6 Apr 2021 17:45:36 -0400 Subject: [PATCH 358/761] Add ability to handle 'binary logs' mySQL query with 3 columns, in case 3 columns are sent (MySQL 8 and greater) (#9082) * Add ability to handle 'binary logs' mySQL query with 3 columns, in case 3 columns are sent (MySQL 8 and greater) * Update mysql.go * Update mysql.go --- plugins/inputs/mysql/mysql.go | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 3f79b0e2d9346..faec0b73c7078 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -13,8 +13,8 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/inputs/mysql/v1" - "github.com/influxdata/telegraf/plugins/inputs/mysql/v2" + v1 "github.com/influxdata/telegraf/plugins/inputs/mysql/v1" + v2 "github.com/influxdata/telegraf/plugins/inputs/mysql/v2" ) type Mysql struct { @@ -711,17 +711,31 @@ func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulat servtag := getDSNTag(serv) tags := map[string]string{"server": servtag} var ( - size uint64 - count uint64 - fileSize uint64 - fileName string + size uint64 + count uint64 + fileSize uint64 + fileName string + encrypted string ) + columns, err := rows.Columns() + if err != nil { + return err + } + numColumns := len(columns) + // iterate over rows and count the size and count of files for rows.Next() { - if err := rows.Scan(&fileName, &fileSize); err != nil { - return err + if numColumns == 3 { + if err := rows.Scan(&fileName, &fileSize, &encrypted); err != nil { + return err + } + } else { + if err := rows.Scan(&fileName, &fileSize); err != nil { + return err + } } + size += fileSize count++ } @@ -729,6 +743,7 @@ func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulat "binary_size_bytes": size, "binary_files_count": count, } + acc.AddFields("mysql", fields, tags) return nil } From f0c85492c3666f6e20966d8b8b6bd7c78dae70e8 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Wed, 7 Apr 2021 13:19:06 -0600 Subject: [PATCH 359/761] Update changelog (cherry picked from commit dc4fa5dd9aa5876b6ba5022aab3d5453fecc7b2b) --- CHANGELOG.md | 18 ++++++++++++++++++ etc/telegraf.conf | 12 ++++++++---- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ce018f3fbec94..43e4e3fdd5e11 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,21 @@ +## v1.18.1 [2021-04-07] + +#### Bugfixes + + - [#9082](https://github.com/influxdata/telegraf/pull/9082) `inputs.mysql` Fix 'binary logs' query for MySQL 8 + - [#9069](https://github.com/influxdata/telegraf/pull/9069) `inputs.tail` Add configurable option for the 'path' tag override + - [#9067](https://github.com/influxdata/telegraf/pull/9067) `inputs.nfsclient` Fix integer overflow in fields from mountstat + - [#9050](https://github.com/influxdata/telegraf/pull/9050) `inputs.snmp` Fix init when no mibs are installed + - [#9072](https://github.com/influxdata/telegraf/pull/9072) `inputs.ping` Always call SetPrivileged(true) in native mode + - [#9043](https://github.com/influxdata/telegraf/pull/9043) `processors.ifname` Get interface name more effeciently + - [#9056](https://github.com/influxdata/telegraf/pull/9056) `outputs.yandex_cloud_monitoring` Use correct compute metadata URL to get folder-id + - [#9048](https://github.com/influxdata/telegraf/pull/9048) `outputs.azure_monitor` Handle error when initializing the auth object + - [#8549](https://github.com/influxdata/telegraf/pull/8549) `inputs.sqlserver` Fix sqlserver_process_cpu calculation + - [#9035](https://github.com/influxdata/telegraf/pull/9035) `inputs.ipmi_sensor` Fix panic + - [#9009](https://github.com/influxdata/telegraf/pull/9009) `inputs.docker` Fix panic when parsing container stats + - [#8333](https://github.com/influxdata/telegraf/pull/8333) `inputs.exec` Don't truncate messages in debug mode + - [#8769](https://github.com/influxdata/telegraf/pull/8769) `agent` Close running outputs when reloadinlg + ## v1.18.0 [2021-03-17] #### Release Notes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 1389597e07ab6..dae67c3bf6b59 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -5151,7 +5151,7 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false @@ -6984,7 +6984,7 @@ # ## This value is propagated to pqos tool. Interval format is defined by pqos itself. # ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. # # sampling_interval = "10" -# +# # ## Optionally specify the path to pqos executable. # ## If not provided, auto discovery will be performed. # # pqos_path = "/usr/local/bin/pqos" @@ -6992,12 +6992,12 @@ # ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. # ## If not provided, default value is false. # # shortened_metrics = false -# +# # ## Specify the list of groups of CPU core(s) to be provided as pqos input. # ## Mandatory if processes aren't set and forbidden if processes are specified. # ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] # # cores = ["0-3"] -# +# # ## Specify the list of processes for which Metrics will be collected. # ## Mandatory if cores aren't set and forbidden if cores are specified. # ## e.g. ["qemu", "pmd"] @@ -7949,6 +7949,9 @@ # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" # +# ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. +# # path_tag = "path" +# # ## multiline parser/codec # ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html # #[inputs.tail.multiline] @@ -8194,3 +8197,4 @@ # [[inputs.zipkin]] # # path = "/api/v1/spans" # URL path for span data # # port = 9411 # Port on which Telegraf listens + From 2b41a1e1f4b15a658c042472035c4a577cfe5efa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patryk=20Ma=C5=82ek?= <69143962+pmalek-sumo@users.noreply.github.com> Date: Thu, 8 Apr 2021 16:31:31 +0200 Subject: [PATCH 360/761] Carbon2 serializer: sanitize metric name (#9026) --- config/config.go | 7 +- plugins/outputs/sumologic/sumologic_test.go | 16 +-- plugins/serializers/carbon2/README.md | 16 +++ plugins/serializers/carbon2/carbon2.go | 36 +++++- plugins/serializers/carbon2/carbon2_test.go | 124 ++++++++++++++++++-- plugins/serializers/registry.go | 9 +- 6 files changed, 182 insertions(+), 26 deletions(-) diff --git a/config/config.go b/config/config.go index b7c11a95ff8c4..097fff385f531 100644 --- a/config/config.go +++ b/config/config.go @@ -1388,6 +1388,7 @@ func (c *Config) buildSerializer(tbl *ast.Table) (serializers.Serializer, error) c.getFieldString(tbl, "template", &sc.Template) c.getFieldStringSlice(tbl, "templates", &sc.Templates) c.getFieldString(tbl, "carbon2_format", &sc.Carbon2Format) + c.getFieldString(tbl, "carbon2_sanitize_replace_char", &sc.Carbon2SanitizeReplaceChar) c.getFieldInt(tbl, "influx_max_line_bytes", &sc.InfluxMaxLineBytes) c.getFieldBool(tbl, "influx_sort_fields", &sc.InfluxSortFields) @@ -1449,9 +1450,9 @@ func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, func (c *Config) missingTomlField(_ reflect.Type, key string) error { switch key { - case "alias", "carbon2_format", "collectd_auth_file", "collectd_parse_multivalue", - "collectd_security_level", "collectd_typesdb", "collection_jitter", "csv_column_names", - "csv_column_types", "csv_comment", "csv_delimiter", "csv_header_row_count", + case "alias", "carbon2_format", "carbon2_sanitize_replace_char", "collectd_auth_file", + "collectd_parse_multivalue", "collectd_security_level", "collectd_typesdb", "collection_jitter", + "csv_column_names", "csv_column_types", "csv_comment", "csv_delimiter", "csv_header_row_count", "csv_measurement_column", "csv_skip_columns", "csv_skip_rows", "csv_tag_columns", "csv_timestamp_column", "csv_timestamp_format", "csv_timezone", "csv_trim_space", "csv_skip_values", "data_format", "data_type", "delay", "drop", "drop_original", "dropwizard_metric_registry_path", diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index d6fe2731fcd3e..b7fc917b43368 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -96,7 +96,7 @@ func TestMethod(t *testing.T) { w.WriteHeader(http.StatusOK) }) - serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) plugin := tt.plugin() @@ -173,7 +173,7 @@ func TestStatusCode(t *testing.T) { w.WriteHeader(tt.statusCode) }) - serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) tt.plugin.SetSerializer(serializer) @@ -199,7 +199,7 @@ func TestContentType(t *testing.T) { s.headers = map[string]string{ contentTypeHeader: carbon2ContentType, } - sr, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + sr, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) s.SetSerializer(sr) return s @@ -213,7 +213,7 @@ func TestContentType(t *testing.T) { s.headers = map[string]string{ contentTypeHeader: carbon2ContentType, } - sr, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatMetricIncludesField)) + sr, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatMetricIncludesField), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) s.SetSerializer(sr) return s @@ -310,7 +310,7 @@ func TestContentEncodingGzip(t *testing.T) { w.WriteHeader(http.StatusNoContent) }) - serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) plugin := tt.plugin() @@ -345,7 +345,7 @@ func TestDefaultUserAgent(t *testing.T) { MaxRequstBodySize: Default().MaxRequstBodySize, } - serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) plugin.SetSerializer(serializer) @@ -594,7 +594,7 @@ func TestMaxRequestBodySize(t *testing.T) { w.WriteHeader(http.StatusOK) }) - serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) plugin := tt.plugin() @@ -626,7 +626,7 @@ func TestTryingToSendEmptyMetricsDoesntFail(t *testing.T) { plugin := Default() plugin.URL = u.String() - serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate)) + serializer, err := carbon2.NewSerializer(string(carbon2.Carbon2FormatFieldSeparate), carbon2.DefaultSanitizeReplaceChar) require.NoError(t, err) plugin.SetSerializer(serializer) diff --git a/plugins/serializers/carbon2/README.md b/plugins/serializers/carbon2/README.md index e32a420aec0af..3ad54a1699d3a 100644 --- a/plugins/serializers/carbon2/README.md +++ b/plugins/serializers/carbon2/README.md @@ -21,6 +21,11 @@ The `carbon2` serializer translates the Telegraf metric format to the [Carbon2 f ## * "metric_includes_field" ## * "" - defaults to "field_separate" # carbon2_format = "field_separate" + + ## Character used for replacing sanitized characters. By default ":" is used. + ## The following character set is being replaced with sanitize replace char: + ## !@#$%^&*()+`'\"[]{};<>,?/\\|= + # carbon2_sanitize_replace_char = ":" ``` Standard form: @@ -52,6 +57,17 @@ metric=name_field_2 host=foo 4 1234567890 metric=name_field_N host=foo 59 1234567890 ``` +### Metric name sanitization + +In order to sanitize the metric name one can specify `carbon2_sanitize_replace_char` +in order to replace the following characters in the metric name: + +``` +!@#$%^&*()+`'\"[]{};<>,?/\\|= +``` + +By default they will be replaced with `:`. + ## Metrics The serializer converts the metrics by creating `intrinsic_tags` using the combination of metric name and fields. diff --git a/plugins/serializers/carbon2/carbon2.go b/plugins/serializers/carbon2/carbon2.go index 1b05d4cb2d4c7..4eb5798d64a69 100644 --- a/plugins/serializers/carbon2/carbon2.go +++ b/plugins/serializers/carbon2/carbon2.go @@ -2,6 +2,7 @@ package carbon2 import ( "bytes" + "errors" "fmt" "strconv" "strings" @@ -23,11 +24,23 @@ var formats = map[format]struct{}{ Carbon2FormatMetricIncludesField: {}, } +const ( + DefaultSanitizeReplaceChar = ":" + sanitizedChars = "!@#$%^&*()+`'\"[]{};<>,?/\\|=" +) + type Serializer struct { - metricsFormat format + metricsFormat format + sanitizeReplacer *strings.Replacer } -func NewSerializer(metricsFormat string) (*Serializer, error) { +func NewSerializer(metricsFormat string, sanitizeReplaceChar string) (*Serializer, error) { + if sanitizeReplaceChar == "" { + sanitizeReplaceChar = DefaultSanitizeReplaceChar + } else if len(sanitizeReplaceChar) > 1 { + return nil, errors.New("sanitize replace char has to be a singular character") + } + var f = format(metricsFormat) if _, ok := formats[f]; !ok { @@ -40,7 +53,8 @@ func NewSerializer(metricsFormat string) (*Serializer, error) { } return &Serializer{ - metricsFormat: f, + metricsFormat: f, + sanitizeReplacer: createSanitizeReplacer(sanitizedChars, rune(sanitizeReplaceChar[0])), }, nil } @@ -65,15 +79,17 @@ func (s *Serializer) createObject(metric telegraf.Metric) []byte { continue } + name := s.sanitizeReplacer.Replace(metric.Name()) + switch metricsFormat { case Carbon2FormatFieldSeparate: m.WriteString(serializeMetricFieldSeparate( - metric.Name(), fieldName, + name, fieldName, )) case Carbon2FormatMetricIncludesField: m.WriteString(serializeMetricIncludeField( - metric.Name(), fieldName, + name, fieldName, )) } @@ -152,3 +168,13 @@ func bool2int(b bool) int { } return i } + +// createSanitizeReplacer creates string replacer replacing all provided +// characters with the replaceChar. +func createSanitizeReplacer(sanitizedChars string, replaceChar rune) *strings.Replacer { + sanitizeCharPairs := make([]string, 0, 2*len(sanitizedChars)) + for _, c := range sanitizedChars { + sanitizeCharPairs = append(sanitizeCharPairs, string(c), string(replaceChar)) + } + return strings.NewReplacer(sanitizeCharPairs...) +} diff --git a/plugins/serializers/carbon2/carbon2_test.go b/plugins/serializers/carbon2/carbon2_test.go index 1d6359858dd9e..4afc0932cc7ba 100644 --- a/plugins/serializers/carbon2/carbon2_test.go +++ b/plugins/serializers/carbon2/carbon2_test.go @@ -46,7 +46,7 @@ func TestSerializeMetricFloat(t *testing.T) { for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - s, err := NewSerializer(string(tc.format)) + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.Serialize(m) @@ -84,7 +84,7 @@ func TestSerializeMetricWithEmptyStringTag(t *testing.T) { for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - s, err := NewSerializer(string(tc.format)) + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.Serialize(m) @@ -122,7 +122,7 @@ func TestSerializeWithSpaces(t *testing.T) { for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - s, err := NewSerializer(string(tc.format)) + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.Serialize(m) @@ -160,7 +160,7 @@ func TestSerializeMetricInt(t *testing.T) { for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - s, err := NewSerializer(string(tc.format)) + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.Serialize(m) @@ -198,7 +198,7 @@ func TestSerializeMetricString(t *testing.T) { for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - s, err := NewSerializer(string(tc.format)) + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.Serialize(m) @@ -255,7 +255,7 @@ func TestSerializeMetricBool(t *testing.T) { for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - s, err := NewSerializer(string(tc.format)) + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.Serialize(tc.metric) @@ -300,7 +300,7 @@ metric=cpu_value 42 0 for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - s, err := NewSerializer(string(tc.format)) + s, err := NewSerializer(string(tc.format), DefaultSanitizeReplaceChar) require.NoError(t, err) buf, err := s.SerializeBatch(metrics) @@ -310,3 +310,113 @@ metric=cpu_value 42 0 }) } } + +func TestSerializeMetricIsProperlySanitized(t *testing.T) { + now := time.Now() + + testcases := []struct { + metricFunc func() (telegraf.Metric, error) + format format + expected string + replaceChar string + expectedErr bool + }{ + { + metricFunc: func() (telegraf.Metric, error) { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1", nil, fields, now) + }, + format: Carbon2FormatFieldSeparate, + expected: fmt.Sprintf("metric=cpu:1 field=usage_idle 91.5 %d\n", now.Unix()), + replaceChar: DefaultSanitizeReplaceChar, + }, + { + metricFunc: func() (telegraf.Metric, error) { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1", nil, fields, now) + }, + format: Carbon2FormatFieldSeparate, + expected: fmt.Sprintf("metric=cpu_1 field=usage_idle 91.5 %d\n", now.Unix()), + replaceChar: "_", + }, + { + metricFunc: func() (telegraf.Metric, error) { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1=tmp$custom", nil, fields, now) + }, + format: Carbon2FormatFieldSeparate, + expected: fmt.Sprintf("metric=cpu:1:tmp:custom field=usage_idle 91.5 %d\n", now.Unix()), + replaceChar: DefaultSanitizeReplaceChar, + }, + { + metricFunc: func() (telegraf.Metric, error) { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1=tmp$custom%namespace", nil, fields, now) + }, + format: Carbon2FormatFieldSeparate, + expected: fmt.Sprintf("metric=cpu:1:tmp:custom:namespace field=usage_idle 91.5 %d\n", now.Unix()), + replaceChar: DefaultSanitizeReplaceChar, + }, + { + metricFunc: func() (telegraf.Metric, error) { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1=tmp$custom%namespace", nil, fields, now) + }, + format: Carbon2FormatMetricIncludesField, + expected: fmt.Sprintf("metric=cpu:1:tmp:custom:namespace_usage_idle 91.5 %d\n", now.Unix()), + replaceChar: DefaultSanitizeReplaceChar, + }, + { + metricFunc: func() (telegraf.Metric, error) { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1=tmp$custom%namespace", nil, fields, now) + }, + format: Carbon2FormatMetricIncludesField, + expected: fmt.Sprintf("metric=cpu_1_tmp_custom_namespace_usage_idle 91.5 %d\n", now.Unix()), + replaceChar: "_", + }, + { + metricFunc: func() (telegraf.Metric, error) { + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + return metric.New("cpu=1=tmp$custom%namespace", nil, fields, now) + }, + format: Carbon2FormatMetricIncludesField, + expectedErr: true, + replaceChar: "___", + }, + } + + for _, tc := range testcases { + t.Run(string(tc.format), func(t *testing.T) { + m, err := tc.metricFunc() + require.NoError(t, err) + + s, err := NewSerializer(string(tc.format), tc.replaceChar) + if tc.expectedErr { + require.Error(t, err) + return + } + + require.NoError(t, err) + + buf, err := s.Serialize(m) + require.NoError(t, err) + + assert.Equal(t, tc.expected, string(buf)) + }) + } +} diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index f6c62fc12cbda..247324d4ab4f5 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -53,6 +53,9 @@ type Config struct { // Carbon2 metric format. Carbon2Format string `toml:"carbon2_format"` + // Character used for metric name sanitization in Carbon2. + Carbon2SanitizeReplaceChar string `toml:"carbon2_sanitize_replace_char"` + // Support tags in graphite protocol GraphiteTagSupport bool `toml:"graphite_tag_support"` @@ -123,7 +126,7 @@ func NewSerializer(config *Config) (Serializer, error) { case "nowmetric": serializer, err = NewNowSerializer() case "carbon2": - serializer, err = NewCarbon2Serializer(config.Carbon2Format) + serializer, err = NewCarbon2Serializer(config.Carbon2Format, config.Carbon2SanitizeReplaceChar) case "wavefront": serializer, err = NewWavefrontSerializer(config.Prefix, config.WavefrontUseStrict, config.WavefrontSourceOverride) case "prometheus": @@ -186,8 +189,8 @@ func NewJSONSerializer(timestampUnits time.Duration) (Serializer, error) { return json.NewSerializer(timestampUnits) } -func NewCarbon2Serializer(carbon2format string) (Serializer, error) { - return carbon2.NewSerializer(carbon2format) +func NewCarbon2Serializer(carbon2format string, carbon2SanitizeReplaceChar string) (Serializer, error) { + return carbon2.NewSerializer(carbon2format, carbon2SanitizeReplaceChar) } func NewSplunkmetricSerializer(splunkmetricHecRouting bool, splunkmetricMultimetric bool) (Serializer, error) { From 8e7da355b31801283578a01ed82f9d2bc76c72cd Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 8 Apr 2021 18:43:39 +0200 Subject: [PATCH 361/761] Linter fixes (unhandled errors) -- Part 1 (#8992) --- plugins/inputs/burrow/burrow_test.go | 22 +- .../inputs/hddtemp/go-hddtemp/hddtemp_test.go | 47 +- plugins/inputs/http/http_test.go | 12 +- .../http_listener_v2/http_listener_v2.go | 57 ++- .../http_listener_v2/http_listener_v2_test.go | 53 +-- plugins/inputs/http_response/http_response.go | 10 +- .../http_response/http_response_test.go | 11 +- plugins/inputs/httpjson/httpjson_test.go | 9 +- plugins/inputs/icinga2/icinga2.go | 4 +- plugins/inputs/icinga2/icinga2_test.go | 5 +- plugins/inputs/influxdb/influxdb_test.go | 18 +- .../influxdb_listener/influxdb_listener.go | 40 +- .../influxdb_listener_test.go | 54 ++- .../influxdb_v2_listener.go | 30 +- .../influxdb_v2_listener_test.go | 48 +- plugins/inputs/ipmi_sensor/ipmi_test.go | 6 + plugins/inputs/jenkins/client.go | 2 + plugins/inputs/jenkins/jenkins_test.go | 2 + plugins/inputs/jolokia2/client_test.go | 79 ++-- plugins/inputs/jolokia2/jolokia_test.go | 7 +- .../openconfig_telemetry_test.go | 19 +- .../inputs/kafka_consumer/kafka_consumer.go | 6 +- .../kafka_consumer/kafka_consumer_test.go | 43 +- .../kafka_consumer_legacy.go | 5 +- .../kafka_consumer_legacy_test.go | 23 +- plugins/inputs/kapacitor/kapacitor_test.go | 12 +- plugins/inputs/kernel/kernel_test.go | 52 +-- .../kernel_vmstat/kernel_vmstat_test.go | 40 +- .../kinesis_consumer/kinesis_consumer.go | 4 +- plugins/inputs/kube_inventory/client_test.go | 5 +- .../inputs/kube_inventory/daemonset_test.go | 85 ++-- .../inputs/kube_inventory/deployment_test.go | 76 ++-- .../inputs/kube_inventory/endpoint_test.go | 112 +++-- plugins/inputs/kube_inventory/ingress_test.go | 74 ++-- plugins/inputs/kube_inventory/node_test.go | 80 ++-- .../kube_inventory/persistentvolume_test.go | 57 +-- .../persistentvolumeclaim_test.go | 73 ++-- plugins/inputs/kube_inventory/pod_test.go | 285 ++++++------ plugins/inputs/kube_inventory/service_test.go | 88 ++-- .../inputs/kube_inventory/statefulset_test.go | 85 ++-- plugins/inputs/kubernetes/kubernetes_test.go | 6 +- plugins/inputs/leofs/leofs.go | 9 +- plugins/inputs/leofs/leofs_test.go | 33 +- .../inputs/linux_sysctl_fs/linux_sysctl_fs.go | 28 +- plugins/inputs/logstash/logstash_test.go | 103 ++--- plugins/inputs/mailchimp/chimp_api.go | 4 +- plugins/inputs/mailchimp/mailchimp_test.go | 9 +- plugins/inputs/marklogic/marklogic_test.go | 3 +- plugins/inputs/mcrouter/mcrouter.go | 4 +- plugins/inputs/memcached/memcached.go | 4 +- plugins/inputs/mesos/mesos.go | 2 + plugins/inputs/mesos/mesos_test.go | 73 +--- .../inputs/minecraft/internal/rcon/rcon.go | 85 ++-- plugins/inputs/modbus/modbus.go | 6 +- plugins/inputs/modbus/modbus_test.go | 2 +- plugins/inputs/mongodb/mongodb.go | 9 +- plugins/inputs/monit/monit_test.go | 65 +-- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 6 +- plugins/inputs/mysql/mysql.go | 6 +- plugins/inputs/nats/nats_test.go | 26 +- .../inputs/neptune_apex/neptune_apex_test.go | 271 +++++------- plugins/inputs/net_response/net_response.go | 56 ++- .../inputs/net_response/net_response_test.go | 106 ++--- plugins/inputs/nfsclient/nfsclient.go | 3 +- plugins/inputs/nginx/nginx_test.go | 16 +- plugins/inputs/nginx_plus/nginx_plus_test.go | 18 +- .../nginx_plus_api_metrics_test.go | 41 +- plugins/inputs/nginx_sts/nginx_sts_test.go | 18 +- .../nginx_upstream_check_test.go | 28 +- plugins/inputs/nginx_vts/nginx_vts_test.go | 18 +- plugins/inputs/nsq/nsq_test.go | 6 +- plugins/inputs/nsq_consumer/nsq_consumer.go | 14 +- .../inputs/nsq_consumer/nsq_consumer_test.go | 65 +-- plugins/inputs/opcua/opcua_client.go | 14 +- plugins/inputs/opcua/opcua_util.go | 20 +- .../openweathermap/openweathermap_test.go | 52 +-- plugins/inputs/passenger/passenger_test.go | 26 +- plugins/inputs/pgbouncer/pgbouncer.go | 8 +- plugins/inputs/phpfpm/child.go | 41 +- plugins/inputs/phpfpm/fcgi.go | 5 +- plugins/inputs/phpfpm/phpfpm_test.go | 86 ++-- plugins/inputs/ping/ping_test.go | 31 +- plugins/inputs/postgresql/postgresql.go | 12 +- plugins/inputs/postgresql/service.go | 2 + .../postgresql_extensible.go | 18 +- .../postgresql_extensible_test.go | 8 +- plugins/inputs/powerdns/powerdns.go | 6 +- plugins/inputs/powerdns/powerdns_test.go | 4 + .../powerdns_recursor/powerdns_recursor.go | 10 +- .../powerdns_recursor_test.go | 49 +-- plugins/inputs/procstat/procstat_test.go | 2 + plugins/inputs/prometheus/kubernetes.go | 4 +- plugins/inputs/prometheus/parser.go | 5 +- plugins/inputs/prometheus/prometheus_test.go | 27 +- .../inputs/puppetagent/puppetagent_test.go | 6 +- plugins/inputs/rabbitmq/rabbitmq.go | 4 +- plugins/inputs/rabbitmq/rabbitmq_test.go | 11 +- plugins/inputs/raindrops/raindrops_test.go | 10 +- plugins/inputs/ravendb/ravendb_test.go | 25 +- plugins/inputs/redfish/redfish_test.go | 10 +- plugins/inputs/rethinkdb/rethinkdb.go | 3 +- plugins/inputs/rethinkdb/rethinkdb_server.go | 1 - plugins/inputs/riak/riak_test.go | 6 +- .../riemann_listener/riemann_listener.go | 18 +- plugins/inputs/sensors/sensors_test.go | 14 +- plugins/inputs/sflow/packetdecoder_test.go | 3 +- plugins/inputs/sflow/sflow.go | 6 +- plugins/inputs/sflow/sflow_test.go | 6 +- plugins/inputs/snmp/snmp_mocks_test.go | 3 + plugins/inputs/snmp/snmp_test.go | 22 +- .../inputs/socket_listener/socket_listener.go | 33 +- .../socket_listener/socket_listener_test.go | 12 +- plugins/inputs/solr/solr_test.go | 30 ++ plugins/inputs/sqlserver/sqlserver_test.go | 164 ++++--- plugins/inputs/stackdriver/stackdriver.go | 36 +- plugins/inputs/statsd/statsd.go | 52 ++- plugins/inputs/statsd/statsd_test.go | 409 +++++------------- plugins/inputs/suricata/suricata.go | 2 + plugins/inputs/suricata/suricata_test.go | 62 ++- plugins/inputs/synproxy/synproxy_test.go | 2 + plugins/inputs/syslog/nontransparent_test.go | 6 +- plugins/inputs/syslog/octetcounting_test.go | 6 +- plugins/inputs/syslog/rfc5426_test.go | 6 +- plugins/inputs/syslog/syslog.go | 26 +- plugins/inputs/sysstat/sysstat_test.go | 6 +- plugins/inputs/system/system.go | 6 + plugins/inputs/tail/multiline.go | 10 +- plugins/inputs/tail/multiline_test.go | 18 +- plugins/inputs/tail/tail_test.go | 8 +- plugins/inputs/tcp_listener/tcp_listener.go | 17 +- .../inputs/tcp_listener/tcp_listener_test.go | 62 +-- plugins/inputs/teamspeak/teamspeak.go | 5 +- plugins/inputs/teamspeak/teamspeak_test.go | 33 +- plugins/inputs/tengine/tengine.go | 3 +- plugins/inputs/tengine/tengine_test.go | 4 +- plugins/inputs/tomcat/tomcat.go | 4 +- plugins/inputs/tomcat/tomcat_test.go | 12 +- plugins/inputs/trig/trig_test.go | 3 +- plugins/inputs/twemproxy/twemproxy_test.go | 8 +- plugins/inputs/udp_listener/udp_listener.go | 10 +- .../inputs/udp_listener/udp_listener_test.go | 12 +- plugins/inputs/uwsgi/uwsgi_test.go | 10 +- plugins/inputs/varnish/varnish_test.go | 16 +- plugins/inputs/vsphere/finder.go | 4 + plugins/inputs/vsphere/vsphere_test.go | 5 +- .../inputs/webhooks/github/github_webhooks.go | 4 +- plugins/inputs/webhooks/webhooks.go | 2 + plugins/inputs/x509_cert/x509_cert_test.go | 109 ++--- .../stress_test_write/stress_test_write.go | 2 +- .../cmd/thrift_serialize/thrift_serialize.go | 2 - plugins/inputs/zipkin/zipkin.go | 2 + plugins/inputs/zookeeper/zookeeper.go | 8 +- 152 files changed, 2267 insertions(+), 2425 deletions(-) diff --git a/plugins/inputs/burrow/burrow_test.go b/plugins/inputs/burrow/burrow_test.go index cafbcb9408775..de0b56692e11a 100644 --- a/plugins/inputs/burrow/burrow_test.go +++ b/plugins/inputs/burrow/burrow_test.go @@ -37,6 +37,8 @@ func getHTTPServer() *httptest.Server { body, code := getResponseJSON(r.RequestURI) w.WriteHeader(code) w.Header().Set("Content-Type", "application/json") + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive w.Write(body) })) } @@ -61,6 +63,8 @@ func getHTTPServerBasicAuth() *httptest.Server { body, code := getResponseJSON(r.RequestURI) w.WriteHeader(code) w.Header().Set("Content-Type", "application/json") + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive w.Write(body) })) } @@ -72,7 +76,7 @@ func TestBurrowTopic(t *testing.T) { plugin := &burrow{Servers: []string{s.URL}} acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) fields := []map[string]interface{}{ // topicA @@ -103,7 +107,7 @@ func TestBurrowPartition(t *testing.T) { Servers: []string{s.URL}, } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) fields := []map[string]interface{}{ { @@ -151,7 +155,7 @@ func TestBurrowGroup(t *testing.T) { Servers: []string{s.URL}, } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) fields := []map[string]interface{}{ { @@ -189,7 +193,7 @@ func TestMultipleServers(t *testing.T) { Servers: []string{s1.URL, s2.URL}, } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 14, len(acc.Metrics)) require.Empty(t, acc.Errors) @@ -205,7 +209,7 @@ func TestMultipleRuns(t *testing.T) { } for i := 0; i < 4; i++ { acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 7, len(acc.Metrics)) require.Empty(t, acc.Errors) @@ -224,7 +228,7 @@ func TestBasicAuthConfig(t *testing.T) { } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 7, len(acc.Metrics)) require.Empty(t, acc.Errors) @@ -241,7 +245,7 @@ func TestFilterClusters(t *testing.T) { } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) // no match by cluster require.Exactly(t, 0, len(acc.Metrics)) @@ -260,7 +264,7 @@ func TestFilterGroups(t *testing.T) { } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 1, len(acc.Metrics)) require.Empty(t, acc.Errors) @@ -278,7 +282,7 @@ func TestFilterTopics(t *testing.T) { } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 3, len(acc.Metrics)) require.Empty(t, acc.Errors) diff --git a/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go index 57d53270b44c1..41d513e4011e3 100644 --- a/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go @@ -2,7 +2,6 @@ package hddtemp import ( "net" - "reflect" "testing" "github.com/stretchr/testify/require" @@ -13,10 +12,7 @@ func TestFetch(t *testing.T) { defer l.Close() disks, err := New().Fetch(l.Addr().String()) - - if err != nil { - t.Error("expecting err to be nil") - } + require.NoError(t, err) expected := []Disk{ { @@ -26,18 +22,12 @@ func TestFetch(t *testing.T) { Unit: "C", }, } - - if !reflect.DeepEqual(expected, disks) { - t.Error("disks' slice is different from expected") - } + require.Equal(t, expected, disks, "disks' slice is different from expected") } func TestFetchWrongAddress(t *testing.T) { _, err := New().Fetch("127.0.0.1:1") - - if err == nil { - t.Error("expecting err to be non-nil") - } + require.Error(t, err) } func TestFetchStatus(t *testing.T) { @@ -45,10 +35,7 @@ func TestFetchStatus(t *testing.T) { defer l.Close() disks, err := New().Fetch(l.Addr().String()) - - if err != nil { - t.Error("expecting err to be nil") - } + require.NoError(t, err) expected := []Disk{ { @@ -59,10 +46,7 @@ func TestFetchStatus(t *testing.T) { Status: "SLP", }, } - - if !reflect.DeepEqual(expected, disks) { - t.Error("disks' slice is different from expected") - } + require.Equal(t, expected, disks, "disks' slice is different from expected") } func TestFetchTwoDisks(t *testing.T) { @@ -70,10 +54,7 @@ func TestFetchTwoDisks(t *testing.T) { defer l.Close() disks, err := New().Fetch(l.Addr().String()) - - if err != nil { - t.Error("expecting err to be nil") - } + require.NoError(t, err) expected := []Disk{ { @@ -90,26 +71,20 @@ func TestFetchTwoDisks(t *testing.T) { Status: "SLP", }, } - - if !reflect.DeepEqual(expected, disks) { - t.Error("disks' slice is different from expected") - } + require.Equal(t, expected, disks, "disks' slice is different from expected") } func serve(t *testing.T, data []byte) net.Listener { l, err := net.Listen("tcp", "127.0.0.1:0") - - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) go func(t *testing.T) { conn, err := l.Accept() - require.NoError(t, err) - conn.Write(data) - conn.Close() + _, err = conn.Write(data) + require.NoError(t, err) + require.NoError(t, conn.Close()) }(t) return l diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index 993eda7321c0f..edd0b2004a0d1 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -37,7 +37,7 @@ func TestHTTPwithJSONFormat(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.NoError(t, acc.GatherError(plugin.Gather)) require.Len(t, acc.Metrics, 1) @@ -79,7 +79,7 @@ func TestHTTPHeaders(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.NoError(t, acc.GatherError(plugin.Gather)) } @@ -102,7 +102,7 @@ func TestInvalidStatusCode(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.Error(t, acc.GatherError(plugin.Gather)) } @@ -126,7 +126,7 @@ func TestSuccessStatusCodes(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.NoError(t, acc.GatherError(plugin.Gather)) } @@ -152,7 +152,7 @@ func TestMethod(t *testing.T) { plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.NoError(t, acc.GatherError(plugin.Gather)) } @@ -246,7 +246,7 @@ func TestBodyAndContentEncoding(t *testing.T) { tt.plugin.SetParser(parser) var acc testutil.Accumulator - tt.plugin.Init() + require.NoError(t, tt.plugin.Init()) err = tt.plugin.Gather(&acc) require.NoError(t, err) }) diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 0c94437354feb..6a6d45592033d 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -166,7 +166,9 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { h.wg.Add(1) go func() { defer h.wg.Done() - server.Serve(h.listener) + if err := server.Serve(h.listener); err != nil { + h.Log.Errorf("Serve failed: %v", err) + } }() h.Log.Infof("Listening on %s", listener.Addr().String()) @@ -177,6 +179,8 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { // Stop cleans up all resources func (h *HTTPListenerV2) Stop() { if h.listener != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive h.listener.Close() } h.wg.Wait() @@ -195,7 +199,9 @@ func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) { // Check that the content length is not too large for us to handle. if req.ContentLength > h.MaxBodySize.Size { - tooLarge(res) + if err := tooLarge(res); err != nil { + h.Log.Debugf("error in too-large: %v", err) + } return } @@ -208,7 +214,9 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) } } if !isAcceptedMethod { - methodNotAllowed(res) + if err := methodNotAllowed(res); err != nil { + h.Log.Debugf("error in method-not-allowed: %v", err) + } return } @@ -229,7 +237,9 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) metrics, err := h.Parse(bytes) if err != nil { h.Log.Debugf("Parse error: %s", err.Error()) - badRequest(res) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } @@ -255,14 +265,18 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) r, err := gzip.NewReader(req.Body) if err != nil { h.Log.Debug(err.Error()) - badRequest(res) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return nil, false } defer r.Close() maxReader := http.MaxBytesReader(res, r, h.MaxBodySize.Size) bytes, err := ioutil.ReadAll(maxReader) if err != nil { - tooLarge(res) + if err := tooLarge(res); err != nil { + h.Log.Debugf("error in too-large: %v", err) + } return nil, false } return bytes, true @@ -271,14 +285,18 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) bytes, err := ioutil.ReadAll(req.Body) if err != nil { h.Log.Debug(err.Error()) - badRequest(res) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return nil, false } // snappy block format is only supported by decode/encode not snappy reader/writer bytes, err = snappy.Decode(nil, bytes) if err != nil { h.Log.Debug(err.Error()) - badRequest(res) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return nil, false } return bytes, true @@ -287,7 +305,9 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) bytes, err := ioutil.ReadAll(req.Body) if err != nil { h.Log.Debug(err.Error()) - badRequest(res) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return nil, false } return bytes, true @@ -300,29 +320,34 @@ func (h *HTTPListenerV2) collectQuery(res http.ResponseWriter, req *http.Request query, err := url.QueryUnescape(rawQuery) if err != nil { h.Log.Debugf("Error parsing query: %s", err.Error()) - badRequest(res) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return nil, false } return []byte(query), true } -func tooLarge(res http.ResponseWriter) { +func tooLarge(res http.ResponseWriter) error { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusRequestEntityTooLarge) - res.Write([]byte(`{"error":"http: request body too large"}`)) + _, err := res.Write([]byte(`{"error":"http: request body too large"}`)) + return err } -func methodNotAllowed(res http.ResponseWriter) { +func methodNotAllowed(res http.ResponseWriter) error { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusMethodNotAllowed) - res.Write([]byte(`{"error":"http: method not allowed"}`)) + _, err := res.Write([]byte(`{"error":"http: method not allowed"}`)) + return err } -func badRequest(res http.ResponseWriter) { +func badRequest(res http.ResponseWriter) error { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusBadRequest) - res.Write([]byte(`{"error":"http: bad request"}`)) + _, err := res.Write([]byte(`{"error":"http: bad request"}`)) + return err } func (h *HTTPListenerV2) authenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) { diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index 05eb437429248..e4507984c3394 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -146,7 +146,7 @@ func TestWriteHTTPSNoClientAuth(t *testing.T) { // post single message to listener resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -160,7 +160,7 @@ func TestWriteHTTPSWithClientAuth(t *testing.T) { // post single message to listener resp, err := getHTTPSClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -178,7 +178,7 @@ func TestWriteHTTPBasicAuth(t *testing.T) { req.SetBasicAuth(basicUsername, basicPassword) resp, err := client.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, http.StatusNoContent, resp.StatusCode) } @@ -192,7 +192,7 @@ func TestWriteHTTP(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -204,7 +204,7 @@ func TestWriteHTTP(t *testing.T) { // post multiple message to listener resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(2) @@ -220,7 +220,7 @@ func TestWriteHTTP(t *testing.T) { // Post a gigantic metric to the listener and verify that an error is returned: resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 413, resp.StatusCode) acc.Wait(3) @@ -241,7 +241,7 @@ func TestWriteHTTPNoNewline(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -270,7 +270,7 @@ func TestWriteHTTPExactMaxBodySize(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -293,7 +293,7 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 413, resp.StatusCode) } @@ -348,10 +348,7 @@ func TestWriteHTTPSnappyData(t *testing.T) { if err != nil { t.Log("Test client request failed. Error: ", err) } - err = resp.Body.Close() - if err != nil { - t.Log("Test client close failed. Error: ", err) - } + require.NoErrorf(t, resp.Body.Close(), "Test client close failed. Error: %v", err) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) @@ -385,15 +382,21 @@ func TestWriteHTTPHighTraffic(t *testing.T) { defer innerwg.Done() for i := 0; i < 500; i++ { resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + if err != nil { + return + } + if err := resp.Body.Close(); err != nil { + return + } + if resp.StatusCode != 204 { + return + } } }(&wg) } wg.Wait() - listener.Gather(acc) + require.NoError(t, listener.Gather(acc)) acc.Wait(25000) require.Equal(t, int64(25000), int64(acc.NMetrics())) @@ -409,7 +412,7 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 404, resp.StatusCode) } @@ -423,7 +426,7 @@ func TestWriteHTTPInvalid(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) } @@ -437,7 +440,7 @@ func TestWriteHTTPEmpty(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -457,7 +460,7 @@ func TestWriteHTTPTransformHeaderValuesToTagsSingleWrite(t *testing.T) { resp, err := http.DefaultClient.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -469,7 +472,7 @@ func TestWriteHTTPTransformHeaderValuesToTagsSingleWrite(t *testing.T) { // post single message to listener resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -495,7 +498,7 @@ func TestWriteHTTPTransformHeaderValuesToTagsBulkWrite(t *testing.T) { resp, err := http.DefaultClient.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(2) @@ -520,7 +523,7 @@ func TestWriteHTTPQueryParams(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/write", "tagKey=tagValue&fieldKey=42"), "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -544,7 +547,7 @@ func TestWriteHTTPFormData(t *testing.T) { "fieldKey": {"42"}, }) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 50315fceee5b0..7ec46ea486ab0 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -308,15 +308,11 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] h.Log.Debugf("Network error while polling %s: %s", u, err.Error()) // Get error details - netErr := setError(err, fields, tags) - - // If recognize the returned error, get out - if netErr != nil { - return fields, tags, nil + if setError(err, fields, tags) == nil { + // Any error not recognized by `set_error` is considered a "connection_failed" + setResult("connection_failed", fields, tags) } - // Any error not recognized by `set_error` is considered a "connection_failed" - setResult("connection_failed", fields, tags) return fields, tags, nil } diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 73ef9b0197160..4772024c569d1 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -88,21 +88,26 @@ func checkTags(t *testing.T, tags map[string]interface{}, acc *testutil.Accumula func setUpTestMux() http.Handler { mux := http.NewServeMux() + // Ignore all returned errors below as the tests will fail anyway mux.HandleFunc("/redirect", func(w http.ResponseWriter, req *http.Request) { http.Redirect(w, req, "/good", http.StatusMovedPermanently) }) mux.HandleFunc("/good", func(w http.ResponseWriter, req *http.Request) { w.Header().Set("Server", "MyTestServer") w.Header().Set("Content-Type", "application/json; charset=utf-8") + //nolint:errcheck,revive fmt.Fprintf(w, "hit the good page!") }) mux.HandleFunc("/invalidUTF8", func(w http.ResponseWriter, req *http.Request) { + //nolint:errcheck,revive w.Write([]byte{0xff, 0xfe, 0xfd}) }) mux.HandleFunc("/noheader", func(w http.ResponseWriter, req *http.Request) { + //nolint:errcheck,revive fmt.Fprintf(w, "hit the good page!") }) mux.HandleFunc("/jsonresponse", func(w http.ResponseWriter, req *http.Request) { + //nolint:errcheck,revive fmt.Fprintf(w, "\"service_status\": \"up\", \"healthy\" : \"true\"") }) mux.HandleFunc("/badredirect", func(w http.ResponseWriter, req *http.Request) { @@ -113,10 +118,12 @@ func setUpTestMux() http.Handler { http.Error(w, "method wasn't post", http.StatusMethodNotAllowed) return } + //nolint:errcheck,revive fmt.Fprintf(w, "used post correctly!") }) mux.HandleFunc("/musthaveabody", func(w http.ResponseWriter, req *http.Request) { body, err := ioutil.ReadAll(req.Body) + //nolint:errcheck,revive req.Body.Close() if err != nil { http.Error(w, "couldn't read request body", http.StatusBadRequest) @@ -126,6 +133,7 @@ func setUpTestMux() http.Handler { http.Error(w, "body was empty", http.StatusBadRequest) return } + //nolint:errcheck,revive fmt.Fprintf(w, "sent a body!") }) mux.HandleFunc("/twosecondnap", func(w http.ResponseWriter, req *http.Request) { @@ -1047,7 +1055,8 @@ func TestRedirect(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Location", "http://example.org") w.WriteHeader(http.StatusMovedPermanently) - w.Write([]byte("test")) + _, err := w.Write([]byte("test")) + require.NoError(t, err) }) plugin := &HTTPResponse{ diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 9e3e95aeaa71d..9f6292cba722d 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -233,7 +233,8 @@ func TestHttpJsonGET_URL(t *testing.T) { key := r.FormValue("api_key") assert.Equal(t, "mykey", key) w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, validJSON2) + _, err := fmt.Fprintln(w, validJSON2) + require.NoError(t, err) })) defer ts.Close() @@ -305,7 +306,8 @@ func TestHttpJsonGET(t *testing.T) { key := r.FormValue("api_key") assert.Equal(t, "mykey", key) w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, validJSON2) + _, err := fmt.Fprintln(w, validJSON2) + require.NoError(t, err) })) defer ts.Close() @@ -379,7 +381,8 @@ func TestHttpJsonPOST(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "api_key=mykey", string(body)) w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, validJSON2) + _, err = fmt.Fprintln(w, validJSON2) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go index 3392300f9a44a..9dbf52f243e3f 100644 --- a/plugins/inputs/icinga2/icinga2.go +++ b/plugins/inputs/icinga2/icinga2.go @@ -53,7 +53,7 @@ type ObjectType string var sampleConfig = ` ## Required Icinga2 server address # server = "https://localhost:5665" - + ## Required Icinga2 object type ("services" or "hosts") # object_type = "services" @@ -171,7 +171,7 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error { defer resp.Body.Close() result := Result{} - json.NewDecoder(resp.Body).Decode(&result) + err = json.NewDecoder(resp.Body).Decode(&result) if err != nil { return err } diff --git a/plugins/inputs/icinga2/icinga2_test.go b/plugins/inputs/icinga2/icinga2_test.go index 13055ed8c2d16..2a965877aeada 100644 --- a/plugins/inputs/icinga2/icinga2_test.go +++ b/plugins/inputs/icinga2/icinga2_test.go @@ -7,6 +7,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestGatherServicesStatus(t *testing.T) { @@ -30,7 +31,7 @@ func TestGatherServicesStatus(t *testing.T) { ` checks := Result{} - json.Unmarshal([]byte(s), &checks) + require.NoError(t, json.Unmarshal([]byte(s), &checks)) icinga2 := new(Icinga2) icinga2.Log = testutil.Logger{} @@ -84,7 +85,7 @@ func TestGatherHostsStatus(t *testing.T) { ` checks := Result{} - json.Unmarshal([]byte(s), &checks) + require.NoError(t, json.Unmarshal([]byte(s), &checks)) var acc testutil.Accumulator diff --git a/plugins/inputs/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go index 27ea81b6d7dd6..93a02a19e56a7 100644 --- a/plugins/inputs/influxdb/influxdb_test.go +++ b/plugins/inputs/influxdb/influxdb_test.go @@ -14,7 +14,8 @@ import ( func TestBasic(t *testing.T) { fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(basicJSON)) + _, err := w.Write([]byte(basicJSON)) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -61,7 +62,8 @@ func TestBasic(t *testing.T) { func TestInfluxDB(t *testing.T) { fakeInfluxServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(influxReturn)) + _, err := w.Write([]byte(influxReturn)) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -121,7 +123,8 @@ func TestInfluxDB(t *testing.T) { func TestInfluxDB2(t *testing.T) { fakeInfluxServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(influxReturn2)) + _, err := w.Write([]byte(influxReturn2)) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -146,7 +149,8 @@ func TestInfluxDB2(t *testing.T) { func TestErrorHandling(t *testing.T) { badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte("not json")) + _, err := w.Write([]byte("not json")) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -164,7 +168,8 @@ func TestErrorHandling(t *testing.T) { func TestErrorHandling404(t *testing.T) { badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(basicJSON)) + _, err := w.Write([]byte(basicJSON)) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -182,7 +187,8 @@ func TestErrorHandling404(t *testing.T) { func TestErrorResponse(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"error": "unable to parse authentication credentials"}`)) + _, err := w.Write([]byte(`{"error": "unable to parse authentication credentials"}`)) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go index d551cca5f0f26..269ba17d6fa67 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -221,7 +221,10 @@ func (h *InfluxDBListener) handleQuery() http.HandlerFunc { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") res.WriteHeader(http.StatusOK) - res.Write([]byte("{\"results\":[]}")) + _, err := res.Write([]byte("{\"results\":[]}")) + if err != nil { + h.Log.Debugf("error writing result in handleQuery: %v", err) + } } } @@ -236,7 +239,9 @@ func (h *InfluxDBListener) handlePing() http.HandlerFunc { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusOK) b, _ := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above - res.Write(b) + if _, err := res.Write(b); err != nil { + h.Log.Debugf("error writing result in handlePing: %v", err) + } } else { res.WriteHeader(http.StatusNoContent) } @@ -255,7 +260,9 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { defer h.writesServed.Incr(1) // Check that the content length is not too large for us to handle. if req.ContentLength > h.MaxBodySize.Size { - tooLarge(res) + if err := tooLarge(res); err != nil { + h.Log.Debugf("error in too-large: %v", err) + } return } @@ -270,7 +277,9 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { body, err = gzip.NewReader(body) if err != nil { h.Log.Debugf("Error decompressing request body: %v", err.Error()) - badRequest(res, err.Error()) + if err := badRequest(res, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } defer body.Close() @@ -330,7 +339,9 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { } if err != influx.EOF { h.Log.Debugf("Error parsing the request body: %v", err.Error()) - badRequest(res, err.Error()) + if err := badRequest(res, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } if parseErrorCount > 0 { @@ -343,7 +354,9 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { default: partialErrorString = fmt.Sprintf("%s (and %d other parse errors)", firstParseErrorStr, parseErrorCount-1) } - partialWrite(res, partialErrorString) + if err := partialWrite(res, partialErrorString); err != nil { + h.Log.Debugf("error in partial-write: %v", err) + } return } @@ -352,15 +365,16 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { } } -func tooLarge(res http.ResponseWriter) { +func tooLarge(res http.ResponseWriter) error { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") res.Header().Set("X-Influxdb-Error", "http: request body too large") res.WriteHeader(http.StatusRequestEntityTooLarge) - res.Write([]byte(`{"error":"http: request body too large"}`)) + _, err := res.Write([]byte(`{"error":"http: request body too large"}`)) + return err } -func badRequest(res http.ResponseWriter, errString string) { +func badRequest(res http.ResponseWriter, errString string) error { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") if errString == "" { @@ -368,15 +382,17 @@ func badRequest(res http.ResponseWriter, errString string) { } res.Header().Set("X-Influxdb-Error", errString) res.WriteHeader(http.StatusBadRequest) - res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) + _, err := res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) + return err } -func partialWrite(res http.ResponseWriter, errString string) { +func partialWrite(res http.ResponseWriter, errString string) error { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") res.Header().Set("X-Influxdb-Error", errString) res.WriteHeader(http.StatusBadRequest) - res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) + _, err := res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) + return err } func getPrecisionMultiplier(precision string) time.Duration { diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go index 5c934e371bfc7..09c02fb7b0662 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -117,7 +117,7 @@ func TestWriteSecureNoClientAuth(t *testing.T) { // post single message to listener resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -132,7 +132,7 @@ func TestWriteSecureWithClientAuth(t *testing.T) { // post single message to listener resp, err := getSecureClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -151,7 +151,7 @@ func TestWriteBasicAuth(t *testing.T) { req.SetBasicAuth(basicUsername, basicPassword) resp, err := client.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, http.StatusNoContent, resp.StatusCode) } @@ -169,7 +169,7 @@ func TestWriteKeepDatabase(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -181,7 +181,7 @@ func TestWriteKeepDatabase(t *testing.T) { // post single message to listener with a database tag in it already. It should be clobbered. resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgWithDB))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -193,7 +193,7 @@ func TestWriteKeepDatabase(t *testing.T) { // post multiple message to listener resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(2) @@ -218,7 +218,7 @@ func TestWriteRetentionPolicyTag(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/write", "rp=myrp"), "", bytes.NewBuffer([]byte("cpu time_idle=42"))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.Equal(t, 204, resp.StatusCode) expected := []telegraf.Metric{ @@ -250,7 +250,7 @@ func TestWriteNoNewline(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -271,7 +271,7 @@ func TestPartialWrite(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testPartial))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) acc.Wait(1) @@ -300,7 +300,7 @@ func TestWriteMaxLineSizeIncrease(t *testing.T) { // Post a gigantic metric to the listener and verify that it writes OK this time: resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -319,7 +319,7 @@ func TestWriteVerySmallMaxBody(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 413, resp.StatusCode) } @@ -339,7 +339,7 @@ func TestWriteLargeLine(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) //todo: with the new parser, long lines aren't a problem. Do we need to skip them? //require.EqualValues(t, 400, resp.StatusCode) @@ -449,15 +449,21 @@ func TestWriteHighTraffic(t *testing.T) { defer innerwg.Done() for i := 0; i < 500; i++ { resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + if err != nil { + return + } + if err := resp.Body.Close(); err != nil { + return + } + if resp.StatusCode != 204 { + return + } } }(&wg) } wg.Wait() - listener.Gather(acc) + require.NoError(t, listener.Gather(acc)) acc.Wait(25000) require.Equal(t, int64(25000), int64(acc.NMetrics())) @@ -474,7 +480,7 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 404, resp.StatusCode) } @@ -489,7 +495,7 @@ func TestWriteInvalid(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) } @@ -504,7 +510,7 @@ func TestWriteEmpty(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -535,7 +541,7 @@ func TestPing(t *testing.T) { require.NoError(t, err) require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0]) require.Len(t, resp.Header["Content-Type"], 0) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -551,7 +557,7 @@ func TestPingVerbose(t *testing.T) { require.NoError(t, err) require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0]) require.Equal(t, "application/json", resp.Header["Content-Type"][0]) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 200, resp.StatusCode) } @@ -567,7 +573,7 @@ func TestWriteWithPrecision(t *testing.T) { resp, err := http.Post( createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -592,7 +598,7 @@ func TestWriteWithPrecisionNoTimestamp(t *testing.T) { resp, err := http.Post( createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -638,7 +644,7 @@ func TestWriteParseErrors(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(tt.input))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) require.Equal(t, tt.expected, resp.Header["X-Influxdb-Error"][0]) }) diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go index 30c449f7dd910..ab1d83732c96a 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go @@ -210,7 +210,9 @@ func (h *InfluxDBV2Listener) handleReady() http.HandlerFunc { "started": h.startTime.Format(time.RFC3339Nano), "status": "ready", "up": h.timeFunc().Sub(h.startTime).String()}) - res.Write(b) + if _, err := res.Write(b); err != nil { + h.Log.Debugf("error writing in handle-ready: %v", err) + } } } @@ -226,7 +228,9 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { defer h.writesServed.Incr(1) // Check that the content length is not too large for us to handle. if req.ContentLength > h.MaxBodySize.Size { - tooLarge(res, h.MaxBodySize.Size) + if err := tooLarge(res, h.MaxBodySize.Size); err != nil { + h.Log.Debugf("error in too-large: %v", err) + } return } @@ -240,7 +244,9 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { body, err = gzip.NewReader(body) if err != nil { h.Log.Debugf("Error decompressing request body: %v", err.Error()) - badRequest(res, Invalid, err.Error()) + if err := badRequest(res, Invalid, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } defer body.Close() @@ -252,7 +258,9 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { bytes, readErr = ioutil.ReadAll(body) if readErr != nil { h.Log.Debugf("Error parsing the request body: %v", readErr.Error()) - badRequest(res, InternalError, readErr.Error()) + if err := badRequest(res, InternalError, readErr.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } metricHandler := influx.NewMetricHandler() @@ -272,7 +280,9 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { if err != influx.EOF && err != nil { h.Log.Debugf("Error parsing the request body: %v", err.Error()) - badRequest(res, Invalid, err.Error()) + if err := badRequest(res, Invalid, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } @@ -290,7 +300,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { } } -func tooLarge(res http.ResponseWriter, maxLength int64) { +func tooLarge(res http.ResponseWriter, maxLength int64) error { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Error", "http: request body too large") res.WriteHeader(http.StatusRequestEntityTooLarge) @@ -298,10 +308,11 @@ func tooLarge(res http.ResponseWriter, maxLength int64) { "code": fmt.Sprint(Invalid), "message": "http: request body too large", "maxLength": fmt.Sprint(maxLength)}) - res.Write(b) + _, err := res.Write(b) + return err } -func badRequest(res http.ResponseWriter, code BadRequestCode, errString string) { +func badRequest(res http.ResponseWriter, code BadRequestCode, errString string) error { res.Header().Set("Content-Type", "application/json") if errString == "" { errString = "http: bad request" @@ -314,7 +325,8 @@ func badRequest(res http.ResponseWriter, code BadRequestCode, errString string) "op": "", "err": errString, }) - res.Write(b) + _, err := res.Write(b) + return err } func getPrecisionMultiplier(precision string) time.Duration { diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go index 2a80bb4d351e6..9d327b41bc377 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go @@ -115,7 +115,7 @@ func TestWriteSecureNoClientAuth(t *testing.T) { // post single message to listener resp, err := noClientAuthClient.Post(createURL(listener, "https", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -130,7 +130,7 @@ func TestWriteSecureWithClientAuth(t *testing.T) { // post single message to listener resp, err := getSecureClient().Post(createURL(listener, "https", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -149,7 +149,7 @@ func TestWriteTokenAuth(t *testing.T) { req.Header.Set("Authorization", fmt.Sprintf("Token %s", token)) resp, err := client.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, http.StatusNoContent, resp.StatusCode) } @@ -167,7 +167,7 @@ func TestWriteKeepBucket(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -179,7 +179,7 @@ func TestWriteKeepBucket(t *testing.T) { // post single message to listener with a database tag in it already. It should be clobbered. resp, err = http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgWithDB))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -191,7 +191,7 @@ func TestWriteKeepBucket(t *testing.T) { // post multiple message to listener resp, err = http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(2) @@ -217,7 +217,7 @@ func TestWriteNoNewline(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -238,7 +238,7 @@ func TestAllOrNothing(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testPartial))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) } @@ -257,7 +257,7 @@ func TestWriteMaxLineSizeIncrease(t *testing.T) { // Post a gigantic metric to the listener and verify that it writes OK this time: resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -276,7 +276,7 @@ func TestWriteVerySmallMaxBody(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 413, resp.StatusCode) } @@ -296,7 +296,7 @@ func TestWriteLargeLine(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) //todo: with the new parser, long lines aren't a problem. Do we need to skip them? //require.EqualValues(t, 400, resp.StatusCode) @@ -406,15 +406,21 @@ func TestWriteHighTraffic(t *testing.T) { defer innerwg.Done() for i := 0; i < 500; i++ { resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + if err != nil { + return + } + if err := resp.Body.Close(); err != nil { + return + } + if resp.StatusCode != 204 { + return + } } }(&wg) } wg.Wait() - listener.Gather(acc) + require.NoError(t, listener.Gather(acc)) acc.Wait(25000) require.Equal(t, int64(25000), int64(acc.NMetrics())) @@ -431,7 +437,7 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 404, resp.StatusCode) } @@ -446,7 +452,7 @@ func TestWriteInvalid(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(badMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) } @@ -461,7 +467,7 @@ func TestWriteEmpty(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -482,7 +488,7 @@ func TestReady(t *testing.T) { bodyBytes, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) require.Contains(t, string(bodyBytes), "\"status\":\"ready\"") - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 200, resp.StatusCode) } @@ -498,7 +504,7 @@ func TestWriteWithPrecision(t *testing.T) { resp, err := http.Post( createURL(listener, "http", "/api/v2/write", "bucket=mybucket&precision=s"), "", bytes.NewBuffer([]byte(msg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -523,7 +529,7 @@ func TestWriteWithPrecisionNoTimestamp(t *testing.T) { resp, err := http.Post( createURL(listener, "http", "/api/v2/write", "bucket=mybucket&precision=s"), "", bytes.NewBuffer([]byte(msg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index cb85d8fbc419b..80332abc0d924 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -373,9 +373,12 @@ OS RealTime Mod | 0x00 | ok // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- cmd, args := args[3], args[4:] + // Ignore the returned errors for the mocked interface as tests will fail anyway if cmd == "ipmitool" { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, mockData) } else { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") os.Exit(1) } @@ -567,9 +570,12 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- cmd, args := args[3], args[4:] + // Ignore the returned errors for the mocked interface as tests will fail anyway if cmd == "ipmitool" { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, mockData) } else { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") os.Exit(1) } diff --git a/plugins/inputs/jenkins/client.go b/plugins/inputs/jenkins/client.go index 9cc8e073bfa48..00c9bb54251f4 100644 --- a/plugins/inputs/jenkins/client.go +++ b/plugins/inputs/jenkins/client.go @@ -69,6 +69,8 @@ func (c *client) doGet(ctx context.Context, url string, v interface{}) error { return err } defer func() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive resp.Body.Close() <-c.semaphore }() diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index f877c700da77c..833b36fcbd60d 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -97,6 +97,8 @@ func (h mockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) return } + // Ignore the returned error as the tests will fail anyway + //nolint:errcheck,revive w.Write(b) } diff --git a/plugins/inputs/jolokia2/client_test.go b/plugins/inputs/jolokia2/client_test.go index 0c7cd4c010d50..7ec65d27a0ebf 100644 --- a/plugins/inputs/jolokia2/client_test.go +++ b/plugins/inputs/jolokia2/client_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestJolokia2_ClientAuthRequest(t *testing.T) { @@ -20,10 +21,7 @@ func TestJolokia2_ClientAuthRequest(t *testing.T) { username, password, _ = r.BasicAuth() body, _ := ioutil.ReadAll(r.Body) - err := json.Unmarshal(body, &requests) - if err != nil { - t.Error(err) - } + require.NoError(t, json.Unmarshal(body, &requests)) w.WriteHeader(http.StatusOK) })) @@ -40,22 +38,14 @@ func TestJolokia2_ClientAuthRequest(t *testing.T) { `, server.URL)) var acc testutil.Accumulator - plugin.Gather(&acc) - - if username != "sally" { - t.Errorf("Expected to post with username %s, but was %s", "sally", username) - } - if password != "seashore" { - t.Errorf("Expected to post with password %s, but was %s", "seashore", password) - } - if len(requests) == 0 { - t.Fatal("Expected to post a request body, but was empty.") - } + require.NoError(t, plugin.Gather(&acc)) - request := requests[0] - if expect := "hello:foo=bar"; request["mbean"] != expect { - t.Errorf("Expected to query mbean %s, but was %s", expect, request["mbean"]) - } + require.EqualValuesf(t, "sally", username, "Expected to post with username %s, but was %s", "sally", username) + require.EqualValuesf(t, "seashore", password, "Expected to post with password %s, but was %s", "seashore", password) + require.NotZero(t, len(requests), "Expected to post a request body, but was empty.") + + request := requests[0]["mbean"] + require.EqualValuesf(t, "hello:foo=bar", request, "Expected to query mbean %s, but was %s", "hello:foo=bar", request) } func TestJolokia2_ClientProxyAuthRequest(t *testing.T) { @@ -67,12 +57,10 @@ func TestJolokia2_ClientProxyAuthRequest(t *testing.T) { username, password, _ = r.BasicAuth() body, _ := ioutil.ReadAll(r.Body) - err := json.Unmarshal(body, &requests) - if err != nil { - t.Error(err) - } - + require.NoError(t, json.Unmarshal(body, &requests)) w.WriteHeader(http.StatusOK) + _, err := fmt.Fprintf(w, "[]") + require.NoError(t, err) })) defer server.Close() @@ -93,37 +81,22 @@ func TestJolokia2_ClientProxyAuthRequest(t *testing.T) { `, server.URL)) var acc testutil.Accumulator - plugin.Gather(&acc) - - if username != "sally" { - t.Errorf("Expected to post with username %s, but was %s", "sally", username) - } - if password != "seashore" { - t.Errorf("Expected to post with password %s, but was %s", "seashore", password) - } - if len(requests) == 0 { - t.Fatal("Expected to post a request body, but was empty.") - } + require.NoError(t, plugin.Gather(&acc)) + require.EqualValuesf(t, "sally", username, "Expected to post with username %s, but was %s", "sally", username) + require.EqualValuesf(t, "seashore", password, "Expected to post with password %s, but was %s", "seashore", password) + require.NotZero(t, len(requests), "Expected to post a request body, but was empty.") request := requests[0] - if expect := "hello:foo=bar"; request["mbean"] != expect { - t.Errorf("Expected to query mbean %s, but was %s", expect, request["mbean"]) - } + expected := "hello:foo=bar" + require.EqualValuesf(t, expected, request["mbean"], "Expected to query mbean %s, but was %s", expected, request["mbean"]) target, ok := request["target"].(map[string]interface{}) - if !ok { - t.Fatal("Expected a proxy target, but was empty.") - } - - if expect := "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi"; target["url"] != expect { - t.Errorf("Expected proxy target url %s, but was %s", expect, target["url"]) - } - - if expect := "jack"; target["user"] != expect { - t.Errorf("Expected proxy target username %s, but was %s", expect, target["user"]) - } - - if expect := "benimble"; target["password"] != expect { - t.Errorf("Expected proxy target password %s, but was %s", expect, target["password"]) - } + require.True(t, ok, "Expected a proxy target, but was empty.") + + expected = "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi" + require.Equalf(t, expected, target["url"], "Expected proxy target url %s, but was %s", expected, target["url"]) + expected = "jack" + require.Equalf(t, expected, target["user"], "Expected proxy target username %s, but was %s", expected, target["user"]) + expected = "benimble" + require.Equalf(t, expected, target["password"], "Expected proxy target username %s, but was %s", expected, target["password"]) } diff --git a/plugins/inputs/jolokia2/jolokia_test.go b/plugins/inputs/jolokia2/jolokia_test.go index aafac023e5081..eddcebfce0892 100644 --- a/plugins/inputs/jolokia2/jolokia_test.go +++ b/plugins/inputs/jolokia2/jolokia_test.go @@ -764,11 +764,8 @@ func TestFillFields(t *testing.T) { func setupServer(resp string) *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - //body, err := ioutil.ReadAll(r.Body) - //if err == nil { - // fmt.Println(string(body)) - //} - + // Ignore the returned error as the tests will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, resp) })) } diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go index d32866f2efbe6..99185e53d015d 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go @@ -51,14 +51,15 @@ type openConfigTelemetryServer struct { func (s *openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.SubscriptionRequest, stream telemetry.OpenConfigTelemetry_TelemetrySubscribeServer) error { path := req.PathList[0].Path - if path == "/sensor" { - stream.Send(data) - } else if path == "/sensor_with_prefix" { - stream.Send(dataWithPrefix) - } else if path == "/sensor_with_multiple_tags" { - stream.Send(dataWithMultipleTags) - } else if path == "/sensor_with_string_values" { - stream.Send(dataWithStringValues) + switch path { + case "/sensor": + return stream.Send(data) + case "/sensor_with_prefix": + return stream.Send(dataWithPrefix) + case "/sensor_with_multiple_tags": + return stream.Send(dataWithMultipleTags) + case "/sensor_with_string_values": + return stream.Send(dataWithStringValues) } return nil } @@ -219,6 +220,8 @@ func TestMain(m *testing.M) { grpcServer := grpc.NewServer(opts...) telemetry.RegisterOpenConfigTelemetryServer(grpcServer, newServer()) go func() { + // Ignore the returned error as the tests will fail anyway + //nolint:errcheck,revive grpcServer.Serve(lis) }() defer grpcServer.Stop() diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index fe24f51724dad..c6894fd74ae21 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -77,7 +77,7 @@ const sampleConfig = ` ## 3 : LZ4 ## 4 : ZSTD # compression_codec = 0 - + ## Initial offset position; one of "oldest" or "newest". # offset = "oldest" @@ -235,6 +235,8 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { err := k.consumer.Consume(ctx, k.Topics, handler) if err != nil { acc.AddError(err) + // Ignore returned error as we cannot do anything about it anyway + //nolint:errcheck,revive internal.SleepContext(ctx, reconnectDelay) } } @@ -393,7 +395,7 @@ func (h *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, for { err := h.Reserve(ctx) if err != nil { - return nil + return err } select { diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index f6aca25c7ed9a..c73104278338e 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -25,8 +25,7 @@ type FakeConsumerGroup struct { func (g *FakeConsumerGroup) Consume(_ context.Context, _ []string, handler sarama.ConsumerGroupHandler) error { g.handler = handler - g.handler.Setup(nil) - return nil + return g.handler.Setup(nil) } func (g *FakeConsumerGroup) Errors() <-chan error { @@ -175,6 +174,8 @@ func TestInit(t *testing.T) { require.Error(t, err) return } + // No error path + require.NoError(t, err) tt.check(t, tt.plugin) }) @@ -273,8 +274,12 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) { require.NoError(t, err) cancel() - err = cg.ConsumeClaim(session, &claim) - require.NoError(t, err) + // This produces a flappy testcase probably due to a race between context cancelation and consumption. + // Furthermore, it is not clear what the outcome of this test should be... + // err = cg.ConsumeClaim(session, &claim) + //require.NoError(t, err) + // So stick with the line below for now. + cg.ConsumeClaim(session, &claim) err = cg.Cleanup(session) require.NoError(t, err) @@ -303,7 +308,8 @@ func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { go func() { err := cg.ConsumeClaim(session, claim) - require.NoError(t, err) + require.Error(t, err) + require.EqualValues(t, "context canceled", err.Error()) }() acc.Wait(1) @@ -328,11 +334,12 @@ func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { func TestConsumerGroupHandler_Handle(t *testing.T) { tests := []struct { - name string - maxMessageLen int - topicTag string - msg *sarama.ConsumerMessage - expected []telegraf.Metric + name string + maxMessageLen int + topicTag string + msg *sarama.ConsumerMessage + expected []telegraf.Metric + expectedHandleError string }{ { name: "happy path", @@ -358,7 +365,8 @@ func TestConsumerGroupHandler_Handle(t *testing.T) { Topic: "telegraf", Value: []byte("12345"), }, - expected: []telegraf.Metric{}, + expected: []telegraf.Metric{}, + expectedHandleError: "message exceeds max_message_len (actual 5, max 4)", }, { name: "parse error", @@ -366,7 +374,8 @@ func TestConsumerGroupHandler_Handle(t *testing.T) { Topic: "telegraf", Value: []byte("not an integer"), }, - expected: []telegraf.Metric{}, + expected: []telegraf.Metric{}, + expectedHandleError: "strconv.Atoi: parsing \"integer\": invalid syntax", }, { name: "add topic tag", @@ -400,8 +409,14 @@ func TestConsumerGroupHandler_Handle(t *testing.T) { ctx := context.Background() session := &FakeConsumerGroupSession{ctx: ctx} - cg.Reserve(ctx) - cg.Handle(session, tt.msg) + require.NoError(t, cg.Reserve(ctx)) + err := cg.Handle(session, tt.msg) + if tt.expectedHandleError != "" { + require.Error(t, err) + require.EqualValues(t, tt.expectedHandleError, err.Error()) + } else { + require.NoError(t, err) + } testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go index 8690b1637bac1..ab19e0875820a 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go @@ -161,8 +161,11 @@ func (k *Kafka) receiver() { // TODO(cam) this locking can be removed if this PR gets merged: // https://github.com/wvanbergen/kafka/pull/84 k.Lock() - k.Consumer.CommitUpto(msg) + err := k.Consumer.CommitUpto(msg) k.Unlock() + if err != nil { + k.acc.AddError(fmt.Errorf("committing to consumer failed: %v", err)) + } } } } diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go index 8037f49a053b5..ad8e372941ebb 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go @@ -4,11 +4,12 @@ import ( "strings" "testing" + "github.com/Shopify/sarama" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/Shopify/sarama" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( @@ -46,7 +47,7 @@ func TestRunParser(t *testing.T) { in <- saramaMsg(testMsg) acc.Wait(1) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) } // Test that the parser ignores invalid messages @@ -61,7 +62,7 @@ func TestRunParserInvalidMsg(t *testing.T) { in <- saramaMsg(invalidMsg) acc.WaitError(1) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) } // Test that overlong messages are dropped @@ -78,7 +79,7 @@ func TestDropOverlongMsg(t *testing.T) { in <- saramaMsg(overlongMsg) acc.WaitError(1) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) } // Test that the parser parses kafka messages into points @@ -93,9 +94,9 @@ func TestRunParserAndGather(t *testing.T) { in <- saramaMsg(testMsg) acc.Wait(1) - acc.GatherError(k.Gather) + require.NoError(t, acc.GatherError(k.Gather)) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) acc.AssertContainsFields(t, "cpu_load_short", map[string]interface{}{"value": float64(23422)}) } @@ -112,9 +113,9 @@ func TestRunParserAndGatherGraphite(t *testing.T) { in <- saramaMsg(testMsgGraphite) acc.Wait(1) - acc.GatherError(k.Gather) + require.NoError(t, acc.GatherError(k.Gather)) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) acc.AssertContainsFields(t, "cpu_load_short_graphite", map[string]interface{}{"value": float64(23422)}) } @@ -134,9 +135,9 @@ func TestRunParserAndGatherJSON(t *testing.T) { in <- saramaMsg(testMsgJSON) acc.Wait(1) - acc.GatherError(k.Gather) + require.NoError(t, acc.GatherError(k.Gather)) - assert.Equal(t, acc.NFields(), 2) + require.Equal(t, acc.NFields(), 2) acc.AssertContainsFields(t, "kafka_json_test", map[string]interface{}{ "a": float64(5), diff --git a/plugins/inputs/kapacitor/kapacitor_test.go b/plugins/inputs/kapacitor/kapacitor_test.go index cae1f9ce30e77..163af10601f0a 100644 --- a/plugins/inputs/kapacitor/kapacitor_test.go +++ b/plugins/inputs/kapacitor/kapacitor_test.go @@ -74,7 +74,8 @@ func TestKapacitor(t *testing.T) { func TestMissingStats(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(`{}`)) + _, err := w.Write([]byte(`{}`)) + require.NoError(t, err) })) defer server.Close() @@ -83,7 +84,7 @@ func TestMissingStats(t *testing.T) { } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) require.False(t, acc.HasField("kapacitor_memstats", "alloc_bytes")) require.True(t, acc.HasField("kapacitor", "num_tasks")) @@ -92,7 +93,8 @@ func TestMissingStats(t *testing.T) { func TestErrorHandling(t *testing.T) { badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte("not json")) + _, err := w.Write([]byte("not json")) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -104,7 +106,7 @@ func TestErrorHandling(t *testing.T) { } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) acc.WaitError(1) require.Equal(t, uint64(0), acc.NMetrics()) } @@ -120,7 +122,7 @@ func TestErrorHandling404(t *testing.T) { } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) acc.WaitError(1) require.Equal(t, uint64(0), acc.NMetrics()) } diff --git a/plugins/inputs/kernel/kernel_test.go b/plugins/inputs/kernel/kernel_test.go index e844d24322490..2068237d5b60f 100644 --- a/plugins/inputs/kernel/kernel_test.go +++ b/plugins/inputs/kernel/kernel_test.go @@ -9,12 +9,12 @@ import ( "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestFullProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFileFull)) - tmpfile2 := makeFakeStatFile([]byte(entropyStatFileFull)) + tmpfile := makeFakeStatFile(t, []byte(statFileFull)) + tmpfile2 := makeFakeStatFile(t, []byte(entropyStatFileFull)) defer os.Remove(tmpfile) defer os.Remove(tmpfile2) @@ -24,8 +24,7 @@ func TestFullProcFile(t *testing.T) { } acc := testutil.Accumulator{} - err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, k.Gather(&acc)) fields := map[string]interface{}{ "boot_time": int64(1457505775), @@ -40,8 +39,8 @@ func TestFullProcFile(t *testing.T) { } func TestPartialProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFilePartial)) - tmpfile2 := makeFakeStatFile([]byte(entropyStatFilePartial)) + tmpfile := makeFakeStatFile(t, []byte(statFilePartial)) + tmpfile2 := makeFakeStatFile(t, []byte(entropyStatFilePartial)) defer os.Remove(tmpfile) defer os.Remove(tmpfile2) @@ -51,8 +50,7 @@ func TestPartialProcFile(t *testing.T) { } acc := testutil.Accumulator{} - err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, k.Gather(&acc)) fields := map[string]interface{}{ "boot_time": int64(1457505775), @@ -66,8 +64,8 @@ func TestPartialProcFile(t *testing.T) { } func TestInvalidProcFile1(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFileInvalid)) - tmpfile2 := makeFakeStatFile([]byte(entropyStatFileInvalid)) + tmpfile := makeFakeStatFile(t, []byte(statFileInvalid)) + tmpfile2 := makeFakeStatFile(t, []byte(entropyStatFileInvalid)) defer os.Remove(tmpfile) defer os.Remove(tmpfile2) @@ -78,11 +76,12 @@ func TestInvalidProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid syntax") } func TestInvalidProcFile2(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFileInvalid2)) + tmpfile := makeFakeStatFile(t, []byte(statFileInvalid2)) defer os.Remove(tmpfile) k := Kernel{ @@ -91,12 +90,13 @@ func TestInvalidProcFile2(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) + require.Contains(t, err.Error(), "no such file") } func TestNoProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFileInvalid2)) - os.Remove(tmpfile) + tmpfile := makeFakeStatFile(t, []byte(statFileInvalid2)) + require.NoError(t, os.Remove(tmpfile)) k := Kernel{ statFile: tmpfile, @@ -104,8 +104,8 @@ func TestNoProcFile(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) - assert.Contains(t, err.Error(), "does not exist") + require.Error(t, err) + require.Contains(t, err.Error(), "does not exist") } const statFileFull = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 @@ -167,18 +167,14 @@ const entropyStatFilePartial = `1024` const entropyStatFileInvalid = `` -func makeFakeStatFile(content []byte) string { +func makeFakeStatFile(t *testing.T, content []byte) string { tmpfile, err := ioutil.TempFile("", "kernel_test") - if err != nil { - panic(err) - } + require.NoError(t, err) - if _, err := tmpfile.Write(content); err != nil { - panic(err) - } - if err := tmpfile.Close(); err != nil { - panic(err) - } + _, err = tmpfile.Write(content) + require.NoError(t, err) + + require.NoError(t, tmpfile.Close()) return tmpfile.Name() } diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go index cb571e8a320c6..eca873ff71896 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go @@ -9,11 +9,11 @@ import ( "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestFullVmStatProcFile(t *testing.T) { - tmpfile := makeFakeVMStatFile([]byte(vmStatFileFull)) + tmpfile := makeFakeVMStatFile(t, []byte(vmStatFileFull)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -21,8 +21,7 @@ func TestFullVmStatProcFile(t *testing.T) { } acc := testutil.Accumulator{} - err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, k.Gather(&acc)) fields := map[string]interface{}{ "nr_free_pages": int64(78730), @@ -121,7 +120,7 @@ func TestFullVmStatProcFile(t *testing.T) { } func TestPartialVmStatProcFile(t *testing.T) { - tmpfile := makeFakeVMStatFile([]byte(vmStatFilePartial)) + tmpfile := makeFakeVMStatFile(t, []byte(vmStatFilePartial)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -130,7 +129,7 @@ func TestPartialVmStatProcFile(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) fields := map[string]interface{}{ "unevictable_pgs_culled": int64(1531), @@ -151,7 +150,7 @@ func TestPartialVmStatProcFile(t *testing.T) { } func TestInvalidVmStatProcFile1(t *testing.T) { - tmpfile := makeFakeVMStatFile([]byte(vmStatFileInvalid)) + tmpfile := makeFakeVMStatFile(t, []byte(vmStatFileInvalid)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -160,12 +159,13 @@ func TestInvalidVmStatProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid syntax") } func TestNoVmStatProcFile(t *testing.T) { - tmpfile := makeFakeVMStatFile([]byte(vmStatFileInvalid)) - os.Remove(tmpfile) + tmpfile := makeFakeVMStatFile(t, []byte(vmStatFileInvalid)) + require.NoError(t, os.Remove(tmpfile)) k := KernelVmstat{ statFile: tmpfile, @@ -173,8 +173,8 @@ func TestNoVmStatProcFile(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) - assert.Contains(t, err.Error(), "does not exist") + require.Error(t, err) + require.Contains(t, err.Error(), "does not exist") } const vmStatFileFull = `nr_free_pages 78730 @@ -298,18 +298,14 @@ thp_collapse_alloc 24857 thp_collapse_alloc_failed 102214 thp_split abcd` -func makeFakeVMStatFile(content []byte) string { +func makeFakeVMStatFile(t *testing.T, content []byte) string { tmpfile, err := ioutil.TempFile("", "kernel_vmstat_test") - if err != nil { - panic(err) - } + require.NoError(t, err) - if _, err := tmpfile.Write(content); err != nil { - panic(err) - } - if err := tmpfile.Close(); err != nil { - panic(err) - } + _, err = tmpfile.Write(content) + require.NoError(t, err) + + require.NoError(t, tmpfile.Close()) return tmpfile.Name() } diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index bf63795f553b9..64822c2d75453 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -305,7 +305,9 @@ func (k *KinesisConsumer) onDelivery(ctx context.Context) { } k.lastSeqNum = strToBint(sequenceNum) - k.checkpoint.Set(chk.streamName, chk.shardID, sequenceNum) + if err := k.checkpoint.Set(chk.streamName, chk.shardID, sequenceNum); err != nil { + k.Log.Debug("Setting checkpoint failed: %v", err) + } } else { k.Log.Debug("Metric group failed to process") } diff --git a/plugins/inputs/kube_inventory/client_test.go b/plugins/inputs/kube_inventory/client_test.go index 622e35c65c57f..0462c0222d527 100644 --- a/plugins/inputs/kube_inventory/client_test.go +++ b/plugins/inputs/kube_inventory/client_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/stretchr/testify/require" ) type mockHandler struct { @@ -25,7 +26,5 @@ func toBoolPtr(b bool) *bool { func TestNewClient(t *testing.T) { _, err := newClient("https://127.0.0.1:443/", "default", "abc123", time.Second, tls.ClientConfig{}) - if err != nil { - t.Errorf("Failed to create new client - %s", err.Error()) - } + require.NoErrorf(t, err, "Failed to create new client - %v", err) } diff --git a/plugins/inputs/kube_inventory/daemonset_test.go b/plugins/inputs/kube_inventory/daemonset_test.go index f67707d2c3d21..5c67f39432dae 100644 --- a/plugins/inputs/kube_inventory/daemonset_test.go +++ b/plugins/inputs/kube_inventory/daemonset_test.go @@ -1,7 +1,6 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" @@ -9,7 +8,9 @@ import ( v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestDaemonSet(t *testing.T) { @@ -21,7 +22,7 @@ func TestDaemonSet(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -72,28 +73,28 @@ func TestDaemonSet(t *testing.T) { }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "generation": int64(11221), - "current_number_scheduled": int32(3), - "desired_number_scheduled": int32(5), - "number_available": int32(2), - "number_misscheduled": int32(2), - "number_ready": int32(1), - "number_unavailable": int32(1), - "updated_number_scheduled": int32(2), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "daemonset_name": "daemon1", - "namespace": "ns1", - "selector_select1": "s1", - "selector_select2": "s2", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_daemonset", + map[string]string{ + "daemonset_name": "daemon1", + "namespace": "ns1", + "selector_select1": "s1", + "selector_select2": "s2", }, - }, + map[string]interface{}{ + "generation": int64(11221), + "current_number_scheduled": int32(3), + "desired_number_scheduled": int32(5), + "number_available": int32(2), + "number_misscheduled": int32(2), + "number_ready": int32(1), + "number_unavailable": int32(1), + "updated_number_scheduled": int32(2), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -105,34 +106,23 @@ func TestDaemonSet(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { ks.gatherDaemonSet(dset, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -278,7 +268,7 @@ func TestDaemonSetSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { ks.gatherDaemonSet(dset, acc) @@ -294,8 +284,7 @@ func TestDaemonSetSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kube_inventory/deployment_test.go b/plugins/inputs/kube_inventory/deployment_test.go index 9b4c74c9ad856..277377619fe84 100644 --- a/plugins/inputs/kube_inventory/deployment_test.go +++ b/plugins/inputs/kube_inventory/deployment_test.go @@ -1,7 +1,6 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" @@ -10,7 +9,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestDeployment(t *testing.T) { @@ -19,24 +20,11 @@ func TestDeployment(t *testing.T) { selectExclude := []string{} now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) - outputMetric := &testutil.Metric{ - Fields: map[string]interface{}{ - "replicas_available": int32(1), - "replicas_unavailable": int32(4), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "namespace": "ns1", - "deployment_name": "deploy1", - "selector_select1": "s1", - "selector_select2": "s2", - }, - } tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -96,10 +84,22 @@ func TestDeployment(t *testing.T) { }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - outputMetric, - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_deployment", + map[string]string{ + "namespace": "ns1", + "deployment_name": "deploy1", + "selector_select1": "s1", + "selector_select2": "s2", + }, + map[string]interface{}{ + "replicas_available": int32(1), + "replicas_unavailable": int32(4), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -111,34 +111,23 @@ func TestDeployment(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { ks.gatherDeployment(deployment, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -293,7 +282,7 @@ func TestDeploymentSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { ks.gatherDeployment(deployment, acc) @@ -309,8 +298,7 @@ func TestDeploymentSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kube_inventory/endpoint_test.go b/plugins/inputs/kube_inventory/endpoint_test.go index 0e3203912c1f1..6feb262cbcee7 100644 --- a/plugins/inputs/kube_inventory/endpoint_test.go +++ b/plugins/inputs/kube_inventory/endpoint_test.go @@ -4,9 +4,12 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestEndpoint(t *testing.T) { @@ -18,7 +21,7 @@ func TestEndpoint(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -69,26 +72,26 @@ func TestEndpoint(t *testing.T) { }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "ready": true, - "port": int32(8080), - "generation": int64(12), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "endpoint_name": "storage", - "namespace": "ns1", - "hostname": "storage-6", - "node_name": "b.storage.internal", - "port_name": "server", - "port_protocol": "TCP", - "pod": "storage-6", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_endpoint", + map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-6", + "node_name": "b.storage.internal", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-6", }, - }, + map[string]interface{}{ + "ready": true, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -131,26 +134,26 @@ func TestEndpoint(t *testing.T) { }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "ready": false, - "port": int32(8080), - "generation": int64(12), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "endpoint_name": "storage", - "namespace": "ns1", - "hostname": "storage-6", - "node_name": "b.storage.internal", - "port_name": "server", - "port_protocol": "TCP", - "pod": "storage-6", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_endpoint", + map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-6", + "node_name": "b.storage.internal", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-6", }, - }, + map[string]interface{}{ + "ready": false, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -166,26 +169,15 @@ func TestEndpoint(t *testing.T) { } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/ingress_test.go b/plugins/inputs/kube_inventory/ingress_test.go index 0d8fefcd93144..cd2af76d34045 100644 --- a/plugins/inputs/kube_inventory/ingress_test.go +++ b/plugins/inputs/kube_inventory/ingress_test.go @@ -4,10 +4,13 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" v1 "k8s.io/api/core/v1" netv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestIngress(t *testing.T) { @@ -19,7 +22,7 @@ func TestIngress(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -83,26 +86,26 @@ func TestIngress(t *testing.T) { }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "tls": false, - "backend_service_port": int32(8080), - "generation": int64(12), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "ingress_name": "ui-lb", - "namespace": "ns1", - "ip": "1.0.0.127", - "hostname": "chron-1", - "backend_service_name": "chronografd", - "host": "ui.internal", - "path": "/", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_ingress", + map[string]string{ + "ingress_name": "ui-lb", + "namespace": "ns1", + "ip": "1.0.0.127", + "hostname": "chron-1", + "backend_service_name": "chronografd", + "host": "ui.internal", + "path": "/", }, - }, + map[string]interface{}{ + "tls": false, + "backend_service_port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -118,26 +121,15 @@ func TestIngress(t *testing.T) { } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/node_test.go b/plugins/inputs/kube_inventory/node_test.go index d2bf07aeb3c65..560e662bcdd5c 100644 --- a/plugins/inputs/kube_inventory/node_test.go +++ b/plugins/inputs/kube_inventory/node_test.go @@ -8,7 +8,9 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestNode(t *testing.T) { @@ -19,7 +21,7 @@ func TestNode(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -98,25 +100,24 @@ func TestNode(t *testing.T) { }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Measurement: nodeMeasurement, - Fields: map[string]interface{}{ - "capacity_cpu_cores": int64(16), - "capacity_millicpu_cores": int64(16000), - "capacity_memory_bytes": int64(1.28837533696e+11), - "capacity_pods": int64(110), - "allocatable_cpu_cores": int64(1), - "allocatable_millicpu_cores": int64(1000), - "allocatable_memory_bytes": int64(1.28732676096e+11), - "allocatable_pods": int64(110), - }, - Tags: map[string]string{ - "node_name": "node1", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + nodeMeasurement, + map[string]string{ + "node_name": "node1", }, - }, + map[string]interface{}{ + "capacity_cpu_cores": int64(16), + "capacity_millicpu_cores": int64(16000), + "capacity_memory_bytes": int64(1.28837533696e+11), + "capacity_pods": int64(110), + "allocatable_cpu_cores": int64(1), + "allocatable_millicpu_cores": int64(1000), + "allocatable_memory_bytes": int64(1.28732676096e+11), + "allocatable_pods": int64(110), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -132,40 +133,15 @@ func TestNode(t *testing.T) { } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - measurement := v.output.Metrics[i].Measurement - var keyTag string - switch measurement { - case nodeMeasurement: - keyTag = "node" - } - var j int - for j = range acc.Metrics { - if acc.Metrics[j].Measurement == measurement && - acc.Metrics[j].Tags[keyTag] == v.output.Metrics[i].Tags[keyTag] { - break - } - } - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[j].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s, measurement %s, j %d\n", v.name, k, m, acc.Metrics[j].Tags[k], measurement, j) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[j].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T), measurement %s, j %d\n", v.name, k, m, m, acc.Metrics[j].Fields[k], acc.Metrics[i].Fields[k], measurement, j) - } - } - } - } + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/persistentvolume_test.go b/plugins/inputs/kube_inventory/persistentvolume_test.go index 80e68605a60a1..2f62081afb7f6 100644 --- a/plugins/inputs/kube_inventory/persistentvolume_test.go +++ b/plugins/inputs/kube_inventory/persistentvolume_test.go @@ -7,7 +7,9 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestPersistentVolume(t *testing.T) { @@ -18,7 +20,7 @@ func TestPersistentVolume(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -56,19 +58,19 @@ func TestPersistentVolume(t *testing.T) { }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "phase_type": 2, - }, - Tags: map[string]string{ - "pv_name": "pv1", - "storageclass": "ebs-1", - "phase": "pending", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_persistentvolume", + map[string]string{ + "pv_name": "pv1", + "storageclass": "ebs-1", + "phase": "pending", }, - }, + map[string]interface{}{ + "phase_type": 2, + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -84,26 +86,15 @@ func TestPersistentVolume(t *testing.T) { } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go index 42aec57a76368..796b055f90d9c 100644 --- a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go @@ -1,7 +1,6 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" @@ -9,7 +8,9 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestPersistentVolumeClaim(t *testing.T) { @@ -22,7 +23,7 @@ func TestPersistentVolumeClaim(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -68,22 +69,22 @@ func TestPersistentVolumeClaim(t *testing.T) { }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "phase_type": 0, - }, - Tags: map[string]string{ - "pvc_name": "pc1", - "namespace": "ns1", - "storageclass": "ebs-1", - "phase": "bound", - "selector_select1": "s1", - "selector_select2": "s2", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_persistentvolumeclaim", + map[string]string{ + "pvc_name": "pc1", + "namespace": "ns1", + "storageclass": "ebs-1", + "phase": "bound", + "selector_select1": "s1", + "selector_select2": "s2", }, - }, + map[string]interface{}{ + "phase_type": 0, + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -95,34 +96,23 @@ func TestPersistentVolumeClaim(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*corev1.PersistentVolumeClaimList)).Items { ks.gatherPersistentVolumeClaim(pvc, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -263,7 +253,7 @@ func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*corev1.PersistentVolumeClaimList)).Items { ks.gatherPersistentVolumeClaim(pvc, acc) @@ -279,8 +269,7 @@ func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kube_inventory/pod_test.go b/plugins/inputs/kube_inventory/pod_test.go index 482331aaff026..777e15a017c7c 100644 --- a/plugins/inputs/kube_inventory/pod_test.go +++ b/plugins/inputs/kube_inventory/pod_test.go @@ -1,15 +1,17 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" - "github.com/influxdata/telegraf/testutil" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestPod(t *testing.T) { @@ -25,7 +27,7 @@ func TestPod(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -210,67 +212,73 @@ func TestPod(t *testing.T) { }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Measurement: podContainerMeasurement, - Fields: map[string]interface{}{ - "restarts_total": int32(3), - "state_code": 0, - "resource_requests_millicpu_units": int64(100), - "resource_limits_millicpu_units": int64(100), - }, - Tags: map[string]string{ - "namespace": "ns1", - "container_name": "running", - "node_name": "node1", - "pod_name": "pod1", - "phase": "Running", - "state": "running", - "readiness": "ready", - "node_selector_select1": "s1", - "node_selector_select2": "s2", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "running", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Running", + "state": "running", + "readiness": "ready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", }, - { - Measurement: podContainerMeasurement, - Fields: map[string]interface{}{ - "restarts_total": int32(3), - "state_code": 1, - "state_reason": "Completed", - "resource_requests_millicpu_units": int64(100), - "resource_limits_millicpu_units": int64(100), - }, - Tags: map[string]string{ - "namespace": "ns1", - "container_name": "completed", - "node_name": "node1", - "pod_name": "pod1", - "phase": "Running", - "state": "terminated", - "readiness": "unready", - }, + map[string]interface{}{ + "restarts_total": int32(3), + "state_code": 0, + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), }, - { - Measurement: podContainerMeasurement, - Fields: map[string]interface{}{ - "restarts_total": int32(3), - "state_code": 2, - "state_reason": "PodUninitialized", - "resource_requests_millicpu_units": int64(100), - "resource_limits_millicpu_units": int64(100), - }, - Tags: map[string]string{ - "namespace": "ns1", - "container_name": "waiting", - "node_name": "node1", - "pod_name": "pod1", - "phase": "Running", - "state": "waiting", - "readiness": "unready", - }, + time.Unix(0, 0), + ), + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "completed", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Running", + "state": "terminated", + "readiness": "unready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", }, - }, + map[string]interface{}{ + "restarts_total": int32(3), + "state_code": 1, + "state_reason": "Completed", + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + "terminated_reason": "Completed", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "waiting", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Running", + "state": "waiting", + "readiness": "unready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", + }, + map[string]interface{}{ + "restarts_total": int32(3), + "state_code": 2, + "state_reason": "PodUninitialized", + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -281,34 +289,23 @@ func TestPod(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items { ks.gatherPod(pod, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s, i %d\n", v.name, k, m, acc.Metrics[i].Tags[k], i) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T), i %d\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k], i) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -527,7 +524,7 @@ func TestPodSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items { ks.gatherPod(pod, acc) @@ -543,9 +540,8 @@ func TestPodSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } @@ -562,7 +558,7 @@ func TestPodPendingContainers(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -679,49 +675,51 @@ func TestPodPendingContainers(t *testing.T) { }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Measurement: podContainerMeasurement, - Fields: map[string]interface{}{ - "phase_reason": "NetworkNotReady", - "restarts_total": int32(0), - "state_code": 3, - "resource_requests_millicpu_units": int64(100), - "resource_limits_millicpu_units": int64(100), - }, - Tags: map[string]string{ - "namespace": "ns1", - "container_name": "waiting", - "node_name": "node1", - "pod_name": "pod1", - "phase": "Pending", - "state": "unknown", - "readiness": "unready", - "node_selector_select1": "s1", - "node_selector_select2": "s2", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "waiting", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Pending", + "state": "unknown", + "readiness": "unready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", }, - { - Measurement: podContainerMeasurement, - Fields: map[string]interface{}{ - "phase_reason": "NetworkNotReady", - "restarts_total": int32(0), - "state_code": 3, - "resource_requests_millicpu_units": int64(100), - "resource_limits_millicpu_units": int64(100), - }, - Tags: map[string]string{ - "namespace": "ns1", - "container_name": "terminated", - "node_name": "node1", - "pod_name": "pod1", - "phase": "Pending", - "state": "unknown", - "readiness": "unready", - }, + map[string]interface{}{ + "phase_reason": "NetworkNotReady", + "restarts_total": int32(0), + "state_code": 3, + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), }, - }, + time.Unix(0, 0), + ), + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "terminated", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Pending", + "state": "unknown", + "readiness": "unready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", + }, + map[string]interface{}{ + "phase_reason": "NetworkNotReady", + "restarts_total": int32(0), + "state_code": 3, + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -732,33 +730,22 @@ func TestPodPendingContainers(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items { ks.gatherPod(pod, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s, i %d\n", v.name, k, m, acc.Metrics[i].Tags[k], i) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T), i %d\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k], i) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/service_test.go b/plugins/inputs/kube_inventory/service_test.go index 293152074789a..b89a45a45dd5c 100644 --- a/plugins/inputs/kube_inventory/service_test.go +++ b/plugins/inputs/kube_inventory/service_test.go @@ -1,17 +1,17 @@ package kube_inventory import ( - "reflect" - + "strings" "testing" "time" - "github.com/influxdata/telegraf/testutil" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "strings" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestService(t *testing.T) { @@ -22,7 +22,7 @@ func TestService(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool include []string exclude []string @@ -73,27 +73,27 @@ func TestService(t *testing.T) { }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "port": int32(8080), - "target_port": int32(1234), - "generation": int64(12), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "service_name": "checker", - "namespace": "ns1", - "port_name": "diagnostic", - "port_protocol": "TCP", - "cluster_ip": "127.0.0.1", - "ip": "1.0.0.127", - "selector_select1": "s1", - "selector_select2": "s2", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_service", + map[string]string{ + "service_name": "checker", + "namespace": "ns1", + "port_name": "diagnostic", + "port_protocol": "TCP", + "cluster_ip": "127.0.0.1", + "ip": "1.0.0.127", + "selector_select1": "s1", + "selector_select2": "s2", }, - }, + map[string]interface{}{ + "port": int32(8080), + "target_port": int32(1234), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -105,34 +105,23 @@ func TestService(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, service := range ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items { ks.gatherService(service, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -275,7 +264,7 @@ func TestServiceSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, service := range ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items { ks.gatherService(service, acc) @@ -291,8 +280,7 @@ func TestServiceSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kube_inventory/statefulset_test.go b/plugins/inputs/kube_inventory/statefulset_test.go index a6d703c205acf..cbbc453f58f35 100644 --- a/plugins/inputs/kube_inventory/statefulset_test.go +++ b/plugins/inputs/kube_inventory/statefulset_test.go @@ -1,7 +1,6 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" @@ -9,7 +8,9 @@ import ( v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestStatefulSet(t *testing.T) { @@ -21,7 +22,7 @@ func TestStatefulSet(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -67,27 +68,27 @@ func TestStatefulSet(t *testing.T) { }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "generation": int64(332), - "observed_generation": int64(119), - "created": now.UnixNano(), - "spec_replicas": int32(3), - "replicas": int32(2), - "replicas_current": int32(4), - "replicas_ready": int32(1), - "replicas_updated": int32(3), - }, - Tags: map[string]string{ - "namespace": "ns1", - "statefulset_name": "sts1", - "selector_select1": "s1", - "selector_select2": "s2", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_statefulset", + map[string]string{ + "namespace": "ns1", + "statefulset_name": "sts1", + "selector_select1": "s1", + "selector_select2": "s2", }, - }, + map[string]interface{}{ + "generation": int64(332), + "observed_generation": int64(119), + "created": now.UnixNano(), + "spec_replicas": int32(3), + "replicas": int32(2), + "replicas_current": int32(4), + "replicas_ready": int32(1), + "replicas_updated": int32(3), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -99,34 +100,23 @@ func TestStatefulSet(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() - acc := new(testutil.Accumulator) + require.NoError(t, ks.createSelectorFilters()) + acc := &testutil.Accumulator{} for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { ks.gatherStatefulSet(ss, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -267,7 +257,7 @@ func TestStatefulSetSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { ks.gatherStatefulSet(ss, acc) @@ -283,8 +273,7 @@ func TestStatefulSetSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kubernetes/kubernetes_test.go b/plugins/inputs/kubernetes/kubernetes_test.go index eb6d285525eb3..531dd13f950c8 100644 --- a/plugins/inputs/kubernetes/kubernetes_test.go +++ b/plugins/inputs/kubernetes/kubernetes_test.go @@ -15,11 +15,13 @@ func TestKubernetesStats(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.RequestURI == "/stats/summary" { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, responseStatsSummery) + _, err := fmt.Fprintln(w, responseStatsSummery) + require.NoError(t, err) } if r.RequestURI == "/pods" { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, responsePods) + _, err := fmt.Fprintln(w, responsePods) + require.NoError(t, err) } })) defer ts.Close() diff --git a/plugins/inputs/leofs/leofs.go b/plugins/inputs/leofs/leofs.go index 7e5ae25d4743d..bcb992b6fb6f7 100644 --- a/plugins/inputs/leofs/leofs.go +++ b/plugins/inputs/leofs/leofs.go @@ -162,8 +162,7 @@ func (l *LeoFS) Description() string { func (l *LeoFS) Gather(acc telegraf.Accumulator) error { if len(l.Servers) == 0 { - l.gatherServer(defaultEndpoint, ServerTypeManagerMaster, acc) - return nil + return l.gatherServer(defaultEndpoint, ServerTypeManagerMaster, acc) } var wg sync.WaitGroup for _, endpoint := range l.Servers { @@ -206,7 +205,11 @@ func (l *LeoFS) gatherServer( if err != nil { return err } - cmd.Start() + if err := cmd.Start(); err != nil { + return err + } + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive defer internal.WaitTimeout(cmd, time.Second*5) scanner := bufio.NewScanner(stdout) if !scanner.Scan() { diff --git a/plugins/inputs/leofs/leofs_test.go b/plugins/inputs/leofs/leofs_test.go index 6d7799d0b8cdc..513d2f5ed7de7 100644 --- a/plugins/inputs/leofs/leofs_test.go +++ b/plugins/inputs/leofs/leofs_test.go @@ -1,15 +1,14 @@ package leofs import ( - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "io/ioutil" - "log" "os" "os/exec" "runtime" "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var fakeSNMP4Manager = ` @@ -125,22 +124,6 @@ func main() { } ` -func makeFakeSNMPSrc(code string) string { - path := os.TempDir() + "/test.go" - err := ioutil.WriteFile(path, []byte(code), 0600) - if err != nil { - log.Fatalln(err) - } - return path -} - -func buildFakeSNMPCmd(src string, executable string) { - err := exec.Command("go", "build", "-o", executable, src).Run() - if err != nil { - log.Fatalln(err) - } -} - func testMain(t *testing.T, code string, endpoint string, serverType ServerType) { executable := "snmpwalk" if runtime.GOOS == "windows" { @@ -148,14 +131,16 @@ func testMain(t *testing.T, code string, endpoint string, serverType ServerType) } // Build the fake snmpwalk for test - src := makeFakeSNMPSrc(code) + src := os.TempDir() + "/test.go" + require.NoError(t, ioutil.WriteFile(src, []byte(code), 0600)) defer os.Remove(src) - buildFakeSNMPCmd(src, executable) + + require.NoError(t, exec.Command("go", "build", "-o", executable, src).Run()) defer os.Remove("./" + executable) envPathOrigin := os.Getenv("PATH") // Refer to the fake snmpwalk - os.Setenv("PATH", ".") + require.NoError(t, os.Setenv("PATH", ".")) defer os.Setenv("PATH", envPathOrigin) l := &LeoFS{ @@ -171,7 +156,7 @@ func testMain(t *testing.T, code string, endpoint string, serverType ServerType) floatMetrics := KeyMapping[serverType] for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("leofs", metric), metric) + require.True(t, acc.HasFloatField("leofs", metric), metric) } } diff --git a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go index 5aa75f07514e7..55cb22292105a 100644 --- a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go @@ -2,6 +2,7 @@ package linux_sysctl_fs import ( "bytes" + "errors" "io/ioutil" "os" "strconv" @@ -30,6 +31,10 @@ func (sfs SysctlFS) SampleConfig() string { func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fieldNames ...string) error { bs, err := ioutil.ReadFile(sfs.path + "/" + file) if err != nil { + // Ignore non-existing entries + if errors.Is(err, os.ErrNotExist) { + return nil + } return err } @@ -55,6 +60,10 @@ func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fiel func (sfs *SysctlFS) gatherOne(name string, fields map[string]interface{}) error { bs, err := ioutil.ReadFile(sfs.path + "/" + name) if err != nil { + // Ignore non-existing entries + if errors.Is(err, os.ErrNotExist) { + return nil + } return err } @@ -71,12 +80,23 @@ func (sfs *SysctlFS) Gather(acc telegraf.Accumulator) error { fields := map[string]interface{}{} for _, n := range []string{"aio-nr", "aio-max-nr", "dquot-nr", "dquot-max", "super-nr", "super-max"} { - sfs.gatherOne(n, fields) + if err := sfs.gatherOne(n, fields); err != nil { + return err + } } - sfs.gatherList("inode-state", fields, "inode-nr", "inode-free-nr", "inode-preshrink-nr") - sfs.gatherList("dentry-state", fields, "dentry-nr", "dentry-unused-nr", "dentry-age-limit", "dentry-want-pages") - sfs.gatherList("file-nr", fields, "file-nr", "", "file-max") + err := sfs.gatherList("inode-state", fields, "inode-nr", "inode-free-nr", "inode-preshrink-nr") + if err != nil { + return err + } + err = sfs.gatherList("dentry-state", fields, "dentry-nr", "dentry-unused-nr", "dentry-age-limit", "dentry-want-pages") + if err != nil { + return err + } + err = sfs.gatherList("file-nr", fields, "file-nr", "", "file-max") + if err != nil { + return err + } acc.AddFields("linux_sysctl_fs", fields, nil) return nil diff --git a/plugins/inputs/logstash/logstash_test.go b/plugins/inputs/logstash/logstash_test.go index 931af66b23fd6..d8db3475a1e95 100644 --- a/plugins/inputs/logstash/logstash_test.go +++ b/plugins/inputs/logstash/logstash_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var logstashTest = NewLogstash() @@ -26,28 +27,23 @@ var ( func Test_Logstash5GatherProcessStats(test *testing.T) { fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash5ProcessJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash5ProcessJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { client, err := logstashTest.createHTTPClient() - - if err != nil { - test.Logf("Can't createHTTPClient") - } + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash5accProcessStats); err != nil { - test.Logf("Can't gather Process stats") - } + err = logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash5accProcessStats) + require.NoError(test, err, "Can't gather Process stats") logstash5accProcessStats.AssertContainsTaggedFields( test, @@ -75,28 +71,23 @@ func Test_Logstash5GatherProcessStats(test *testing.T) { func Test_Logstash6GatherProcessStats(test *testing.T) { fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash6ProcessJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash6ProcessJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { client, err := logstashTest.createHTTPClient() - - if err != nil { - test.Logf("Can't createHTTPClient") - } + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash6accProcessStats); err != nil { - test.Logf("Can't gather Process stats") - } + err = logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash6accProcessStats) + require.NoError(test, err, "Can't gather Process stats") logstash6accProcessStats.AssertContainsTaggedFields( test, @@ -125,28 +116,23 @@ func Test_Logstash5GatherPipelineStats(test *testing.T) { //logstash5accPipelineStats.SetDebug(true) fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash5PipelineJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash5PipelineJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { client, err := logstashTest.createHTTPClient() - - if err != nil { - test.Logf("Can't createHTTPClient") - } + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherPipelineStats(logstashTest.URL+pipelineStats, &logstash5accPipelineStats); err != nil { - test.Logf("Can't gather Pipeline stats") - } + err = logstashTest.gatherPipelineStats(logstashTest.URL+pipelineStats, &logstash5accPipelineStats) + require.NoError(test, err, "Can't gather Pipeline stats") logstash5accPipelineStats.AssertContainsTaggedFields( test, @@ -227,28 +213,23 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { //logstash6accPipelinesStats.SetDebug(true) fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash6PipelinesJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash6PipelinesJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { client, err := logstashTest.createHTTPClient() - - if err != nil { - test.Logf("Can't createHTTPClient") - } + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash6accPipelinesStats); err != nil { - test.Logf("Can't gather Pipeline stats") - } + err = logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash6accPipelinesStats) + require.NoError(test, err, "Can't gather Pipeline stats") fields := make(map[string]interface{}) fields["duration_in_millis"] = float64(8540751.0) @@ -555,28 +536,23 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { func Test_Logstash5GatherJVMStats(test *testing.T) { fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash5JvmJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash5JvmJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { client, err := logstashTest.createHTTPClient() - - if err != nil { - test.Logf("Can't createHTTPClient") - } + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash5accJVMStats); err != nil { - test.Logf("Can't gather JVM stats") - } + err = logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash5accJVMStats) + require.NoError(test, err, "Can't gather JVM stats") logstash5accJVMStats.AssertContainsTaggedFields( test, @@ -623,28 +599,23 @@ func Test_Logstash5GatherJVMStats(test *testing.T) { func Test_Logstash6GatherJVMStats(test *testing.T) { fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash6JvmJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash6JvmJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { client, err := logstashTest.createHTTPClient() - - if err != nil { - test.Logf("Can't createHTTPClient") - } + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash6accJVMStats); err != nil { - test.Logf("Can't gather JVM stats") - } + err = logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash6accJVMStats) + require.NoError(test, err, "Can't gather JVM stats") logstash6accJVMStats.AssertContainsTaggedFields( test, diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index 0e62fccd6d5dd..259e64a0e3104 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -76,7 +76,9 @@ func (e APIError) Error() string { func chimpErrorCheck(body []byte) error { var e APIError - json.Unmarshal(body, &e) + if err := json.Unmarshal(body, &e); err != nil { + return err + } if e.Title != "" || e.Status != 0 { return e } diff --git a/plugins/inputs/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go index c35a706600742..1366d8859df5d 100644 --- a/plugins/inputs/mailchimp/mailchimp_test.go +++ b/plugins/inputs/mailchimp/mailchimp_test.go @@ -17,7 +17,8 @@ func TestMailChimpGatherReports(t *testing.T) { http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, sampleReports) + _, err := fmt.Fprintln(w, sampleReports) + require.NoError(t, err) }, )) defer ts.Close() @@ -80,7 +81,8 @@ func TestMailChimpGatherReport(t *testing.T) { http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, sampleReport) + _, err := fmt.Fprintln(w, sampleReport) + require.NoError(t, err) }, )) defer ts.Close() @@ -144,7 +146,8 @@ func TestMailChimpGatherError(t *testing.T) { http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, sampleError) + _, err := fmt.Fprintln(w, sampleError) + require.NoError(t, err) }, )) defer ts.Close() diff --git a/plugins/inputs/marklogic/marklogic_test.go b/plugins/inputs/marklogic/marklogic_test.go index a809f850ff3b4..5c39fac19051d 100644 --- a/plugins/inputs/marklogic/marklogic_test.go +++ b/plugins/inputs/marklogic/marklogic_test.go @@ -15,7 +15,8 @@ func TestMarklogic(t *testing.T) { // Create a test server with the const response JSON ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, response) + _, err := fmt.Fprintln(w, response) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/mcrouter/mcrouter.go b/plugins/inputs/mcrouter/mcrouter.go index d6303c87758e4..b93044f1c1e6c 100644 --- a/plugins/inputs/mcrouter/mcrouter.go +++ b/plugins/inputs/mcrouter/mcrouter.go @@ -213,7 +213,9 @@ func (m *Mcrouter) gatherServer(ctx context.Context, address string, acc telegra deadline, ok := ctx.Deadline() if ok { - conn.SetDeadline(deadline) + if err := conn.SetDeadline(deadline); err != nil { + return err + } } // Read and write buffer diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go index 99128263ade10..eefb3f85441ea 100644 --- a/plugins/inputs/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -129,7 +129,9 @@ func (m *Memcached) gatherServer( } // Extend connection - conn.SetDeadline(time.Now().Add(defaultTimeout)) + if err := conn.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return err + } // Read and write buffer rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index f4079464fc601..acb79ce5724e5 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -559,6 +559,8 @@ func (m *Mesos) gatherMainMetrics(u *url.URL, role Role, acc telegraf.Accumulato } data, err := ioutil.ReadAll(resp.Body) + // Ignore the returned error to not shadow the initial one + //nolint:errcheck,revive resp.Body.Close() if err != nil { return err diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index f06052c07f469..4b6d5ab74d371 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -278,31 +278,6 @@ func generateMetrics() { for _, k := range slaveMetricNames { slaveMetrics[k] = rand.Float64() } - // slaveTaskMetrics = map[string]interface{}{ - // "executor_id": fmt.Sprintf("task_name.%s", randUUID()), - // "executor_name": "Some task description", - // "framework_id": randUUID(), - // "source": fmt.Sprintf("task_source.%s", randUUID()), - // "statistics": map[string]interface{}{ - // "cpus_limit": rand.Float64(), - // "cpus_system_time_secs": rand.Float64(), - // "cpus_user_time_secs": rand.Float64(), - // "mem_anon_bytes": float64(rand.Int63()), - // "mem_cache_bytes": float64(rand.Int63()), - // "mem_critical_pressure_counter": float64(rand.Int63()), - // "mem_file_bytes": float64(rand.Int63()), - // "mem_limit_bytes": float64(rand.Int63()), - // "mem_low_pressure_counter": float64(rand.Int63()), - // "mem_mapped_file_bytes": float64(rand.Int63()), - // "mem_medium_pressure_counter": float64(rand.Int63()), - // "mem_rss_bytes": float64(rand.Int63()), - // "mem_swap_bytes": float64(rand.Int63()), - // "mem_total_bytes": float64(rand.Int63()), - // "mem_total_memsw_bytes": float64(rand.Int63()), - // "mem_unevictable_bytes": float64(rand.Int63()), - // "timestamp": rand.Float64(), - // }, - // } } func TestMain(m *testing.M) { @@ -312,6 +287,8 @@ func TestMain(m *testing.M) { masterRouter.HandleFunc("/metrics/snapshot", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/json") + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive json.NewEncoder(w).Encode(masterMetrics) }) masterTestServer = httptest.NewServer(masterRouter) @@ -320,13 +297,10 @@ func TestMain(m *testing.M) { slaveRouter.HandleFunc("/metrics/snapshot", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/json") + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive json.NewEncoder(w).Encode(slaveMetrics) }) - // slaveRouter.HandleFunc("/monitor/statistics", func(w http.ResponseWriter, r *http.Request) { - // w.WriteHeader(http.StatusOK) - // w.Header().Set("Content-Type", "application/json") - // json.NewEncoder(w).Encode([]map[string]interface{}{slaveTaskMetrics}) - // }) slaveTestServer = httptest.NewServer(slaveRouter) rc := m.Run() @@ -345,11 +319,7 @@ func TestMesosMaster(t *testing.T) { Timeout: 10, } - err := acc.GatherError(m.Gather) - - if err != nil { - t.Errorf(err.Error()) - } + require.NoError(t, acc.GatherError(m.Gather)) acc.AssertContainsFields(t, "mesos", masterMetrics) } @@ -371,9 +341,8 @@ func TestMasterFilter(t *testing.T) { // Assert expected metrics are present. for _, v := range m.MasterCols { for _, x := range getMetrics(MASTER, v) { - if _, ok := masterMetrics[x]; !ok { - t.Errorf("Didn't find key %s, it should present.", x) - } + _, ok := masterMetrics[x] + require.Truef(t, ok, "Didn't find key %s, it should present.", x) } } // m.MasterCols includes "allocator", so allocator metrics should be present. @@ -381,18 +350,16 @@ func TestMasterFilter(t *testing.T) { // getMetrics(). We have to find them by checking name prefixes. for _, x := range masterMetricNames { if strings.HasPrefix(x, "allocator/") { - if _, ok := masterMetrics[x]; !ok { - t.Errorf("Didn't find key %s, it should be present.", x) - } + _, ok := masterMetrics[x] + require.Truef(t, ok, "Didn't find key %s, it should present.", x) } } // Assert unexpected metrics are not present. for _, v := range b { for _, x := range getMetrics(MASTER, v) { - if _, ok := masterMetrics[x]; ok { - t.Errorf("Found key %s, it should be gone.", x) - } + _, ok := masterMetrics[x] + require.Falsef(t, ok, "Found key %s, it should be gone.", x) } } // m.MasterCols does not include "framework_offers", so framework_offers metrics should not be present. @@ -400,7 +367,7 @@ func TestMasterFilter(t *testing.T) { // getMetrics(). We have to find them by checking name prefixes. for k := range masterMetrics { if strings.HasPrefix(k, "master/frameworks/") || strings.HasPrefix(k, "frameworks/") { - t.Errorf("Found key %s, it should be gone.", k) + require.Failf(t, "Found key %s, it should be gone.", k) } } } @@ -416,11 +383,7 @@ func TestMesosSlave(t *testing.T) { Timeout: 10, } - err := acc.GatherError(m.Gather) - - if err != nil { - t.Errorf(err.Error()) - } + require.NoError(t, acc.GatherError(m.Gather)) acc.AssertContainsFields(t, "mesos", slaveMetrics) } @@ -440,16 +403,14 @@ func TestSlaveFilter(t *testing.T) { for _, v := range b { for _, x := range getMetrics(SLAVE, v) { - if _, ok := slaveMetrics[x]; ok { - t.Errorf("Found key %s, it should be gone.", x) - } + _, ok := slaveMetrics[x] + require.Falsef(t, ok, "Found key %s, it should be gone.", x) } } for _, v := range m.MasterCols { for _, x := range getMetrics(SLAVE, v) { - if _, ok := slaveMetrics[x]; !ok { - t.Errorf("Didn't find key %s, it should present.", x) - } + _, ok := slaveMetrics[x] + require.Truef(t, ok, "Didn't find key %s, it should present.", x) } } } diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go index 12d76a366c231..ccc020edb4fb6 100644 --- a/plugins/inputs/minecraft/internal/rcon/rcon.go +++ b/plugins/inputs/minecraft/internal/rcon/rcon.go @@ -74,8 +74,12 @@ func (p Packet) Compile() (payload []byte, err error) { return } - buffer.WriteString(p.Body) - buffer.Write(padding[:]) + if _, err := buffer.WriteString(p.Body); err != nil { + return nil, err + } + if _, err := buffer.Write(padding[:]); err != nil { + return nil, err + } return buffer.Bytes(), nil } @@ -115,85 +119,90 @@ func (c *Client) Execute(command string) (response *Packet, err error) { // and compiling its payload bytes in the appropriate order. The response is // decompiled from its bytes into a Packet type for return. An error is returned // if send fails. -func (c *Client) Send(typ int32, command string) (response *Packet, err error) { +func (c *Client) Send(typ int32, command string) (*Packet, error) { if typ != Auth && !c.Authorized { - err = ErrUnauthorizedRequest - return + return nil, ErrUnauthorizedRequest } // Create a random challenge for the server to mirror in its response. var challenge int32 - binary.Read(rand.Reader, binary.LittleEndian, &challenge) + if err := binary.Read(rand.Reader, binary.LittleEndian, &challenge); nil != err { + return nil, err + } // Create the packet from the challenge, typ and command // and compile it to its byte payload packet := NewPacket(challenge, typ, command) payload, err := packet.Compile() + if nil != err { + return nil, err + } - var n int - + n, err := c.Connection.Write(payload) if nil != err { - return - } else if n, err = c.Connection.Write(payload); nil != err { - return - } else if n != len(payload) { - err = ErrInvalidWrite - return + return nil, err + } + if n != len(payload) { + return nil, ErrInvalidWrite } var header Header - - if err = binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { - return - } else if err = binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { - return - } else if err = binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { - return + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { + return nil, err + } + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { + return nil, err + } + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { + return nil, err } if packet.Header.Type == Auth && header.Type == ResponseValue { // Discard, empty SERVERDATA_RESPONSE_VALUE from authorization. - c.Connection.Read(make([]byte, header.Size-int32(PacketHeaderSize))) + if _, err := c.Connection.Read(make([]byte, header.Size-int32(PacketHeaderSize))); nil != err { + return nil, err + } // Reread the packet header. - if err = binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { - return - } else if err = binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { - return - } else if err = binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { - return + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { + return nil, err + } + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { + return nil, err + } + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { + return nil, err } } if header.Challenge != packet.Header.Challenge { - err = ErrInvalidChallenge - return + return nil, ErrInvalidChallenge } body := make([]byte, header.Size-int32(PacketHeaderSize)) n, err = c.Connection.Read(body) - for n < len(body) { var nBytes int nBytes, err = c.Connection.Read(body[n:]) if err != nil { - return + return nil, err } n += nBytes } + // Shouldn't this be moved up to the first read? if nil != err { - return - } else if n != len(body) { - err = ErrInvalidRead - return + return nil, err + } + if n != len(body) { + return nil, ErrInvalidRead } - response = new(Packet) + response := new(Packet) response.Header = header response.Body = strings.TrimRight(string(body), TerminationSequence) - return + return response, nil } // NewClient creates a new Client type, creating the connection diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index 46156dc09fecd..d7c5b1d92f0c5 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -682,6 +682,8 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error { time.Sleep(m.RetriesWaitTime.Duration) continue } + // Ignore return error to not shadow the initial error + //nolint:errcheck,revive disconnect(m) m.isConnected = false return err @@ -705,7 +707,9 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error { } // Group the data by series - grouper.Add(measurement, tags, timestamp, field.Name, field.value) + if err := grouper.Add(measurement, tags, timestamp, field.Name, field.value); err != nil { + return err + } } // Add the metrics grouped by series to the accumulator diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index 397e6da463335..d506562106da2 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -648,7 +648,7 @@ func TestHoldingRegisters(t *testing.T) { err = modbus.Init() assert.NoError(t, err) var acc testutil.Accumulator - modbus.Gather(&acc) + assert.NoError(t, modbus.Gather(&acc)) assert.NotEmpty(t, modbus.registers) for _, coil := range modbus.registers { diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 355c12caffef6..82a1b75c4e4fb 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -84,8 +84,7 @@ var localhost = &url.URL{Host: "mongodb://127.0.0.1:27017"} // Returns one of the errors encountered while gather stats (if any). func (m *MongoDB) Gather(acc telegraf.Accumulator) error { if len(m.Servers) == 0 { - m.gatherServer(m.getMongoServer(localhost), acc) - return nil + return m.gatherServer(m.getMongoServer(localhost), acc) } var wg sync.WaitGroup @@ -174,11 +173,7 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { // If configured to use TLS, add a dial function if tlsConfig != nil { dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) { - conn, err := tls.Dial("tcp", addr.String(), tlsConfig) - if err != nil { - fmt.Printf("error in Dial, %s\n", err.Error()) - } - return conn, err + return tls.Dial("tcp", addr.String(), tlsConfig) } } diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index 0381998d13ba0..b3bbed79f68e1 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -335,14 +335,12 @@ func TestServiceType(t *testing.T) { Address: ts.URL, } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator - err := plugin.Gather(&acc) - require.NoError(t, err) + require.NoError(t, plugin.Gather(&acc)) - testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), - testutil.IgnoreTime()) + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } @@ -534,14 +532,12 @@ func TestMonitFailure(t *testing.T) { Address: ts.URL, } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator - err := plugin.Gather(&acc) - require.NoError(t, err) + require.NoError(t, plugin.Gather(&acc)) - testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), - testutil.IgnoreTime()) + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } @@ -566,10 +562,8 @@ func TestAllowHosts(t *testing.T) { r.client.Transport = &transportMock{} err := r.Gather(&acc) - - if assert.Error(t, err) { - assert.Contains(t, err.Error(), "read: connection reset by peer") - } + require.Error(t, err) + require.Contains(t, err.Error(), "read: connection reset by peer") } func TestConnection(t *testing.T) { @@ -579,14 +573,14 @@ func TestConnection(t *testing.T) { Password: "test", } - r.Init() + require.NoError(t, r.Init()) var acc testutil.Accumulator + err := r.Gather(&acc) - if assert.Error(t, err) { - _, ok := err.(*url.Error) - assert.True(t, ok) - } + require.Error(t, err) + _, ok := err.(*url.Error) + require.True(t, ok) } func TestInvalidUsernameOrPassword(t *testing.T) { @@ -596,12 +590,8 @@ func TestInvalidUsernameOrPassword(t *testing.T) { return } - switch r.URL.Path { - case "/_status": - http.ServeFile(w, r, "testdata/response_servicetype_0.xml") - default: - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/_status", "Cannot handle request") + http.ServeFile(w, r, "testdata/response_servicetype_0.xml") })) defer ts.Close() @@ -614,11 +604,10 @@ func TestInvalidUsernameOrPassword(t *testing.T) { var acc testutil.Accumulator - r.Init() + require.NoError(t, r.Init()) err := r.Gather(&acc) - - assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") + require.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") } func TestNoUsernameOrPasswordConfiguration(t *testing.T) { @@ -628,12 +617,8 @@ func TestNoUsernameOrPasswordConfiguration(t *testing.T) { return } - switch r.URL.Path { - case "/_status": - http.ServeFile(w, r, "testdata/response_servicetype_0.xml") - default: - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/_status", "Cannot handle request") + http.ServeFile(w, r, "testdata/response_servicetype_0.xml") })) defer ts.Close() @@ -644,10 +629,9 @@ func TestNoUsernameOrPasswordConfiguration(t *testing.T) { var acc testutil.Accumulator - r.Init() + require.NoError(t, r.Init()) err := r.Gather(&acc) - assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") } @@ -685,14 +669,13 @@ func TestInvalidXMLAndInvalidTypes(t *testing.T) { Address: ts.URL, } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator - err := plugin.Gather(&acc) - if assert.Error(t, err) { - assert.Contains(t, err.Error(), "error parsing input:") - } + err := plugin.Gather(&acc) + require.Error(t, err) + require.Contains(t, err.Error(), "error parsing input:") }) } } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index f8304be10348b..603b4228db5d1 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -207,9 +207,7 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { } m.state = Connecting - m.connect() - - return nil + return m.connect() } func (m *MQTTConsumer) connect() error { @@ -313,7 +311,7 @@ func (m *MQTTConsumer) Gather(_ telegraf.Accumulator) error { if m.state == Disconnected { m.state = Connecting m.Log.Debugf("Connecting %v", m.Servers) - m.connect() + return m.connect() } return nil diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index faec0b73c7078..5f8c2918abdd6 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -185,7 +185,9 @@ func (m *Mysql) Gather(acc telegraf.Accumulator) error { } if tlsConfig != nil { - mysql.RegisterTLSConfig("custom", tlsConfig) + if err := mysql.RegisterTLSConfig("custom", tlsConfig); err != nil { + return err + } } var wg sync.WaitGroup @@ -453,7 +455,7 @@ const ( sum_sort_rows, sum_sort_scan, sum_no_index_used, - sum_no_good_index_used + sum_no_good_index_used FROM performance_schema.events_statements_summary_by_account_by_event_name ` ) diff --git a/plugins/inputs/nats/nats_test.go b/plugins/inputs/nats/nats_test.go index ece22288ff9af..7207df94cfd02 100644 --- a/plugins/inputs/nats/nats_test.go +++ b/plugins/inputs/nats/nats_test.go @@ -69,12 +69,17 @@ var sampleVarz = ` func TestMetricsCorrect(t *testing.T) { var acc testutil.Accumulator - srv := newTestNatsServer() + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/varz", "Cannot handle request") + + rsp := sampleVarz + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) + })) defer srv.Close() n := &Nats{Server: srv.URL} - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) fields := map[string]interface{}{ "in_msgs": int64(74148556), @@ -97,18 +102,3 @@ func TestMetricsCorrect(t *testing.T) { } acc.AssertContainsTaggedFields(t, "nats", fields, tags) } - -func newTestNatsServer() *httptest.Server { - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var rsp string - - switch r.URL.Path { - case "/varz": - rsp = sampleVarz - default: - panic("Cannot handle request") - } - - fmt.Fprintln(w, rsp) - })) -} diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go index fc5710e9fbadb..dd2bbeb3d9227 100644 --- a/plugins/inputs/neptune_apex/neptune_apex_test.go +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -1,22 +1,23 @@ package neptuneapex import ( - "bytes" "context" "net" "net/http" "net/http/httptest" - "reflect" "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestGather(t *testing.T) { h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotFound) - w.Write([]byte("data")) + _, err := w.Write([]byte("data")) + require.NoError(t, err) }) c, destroy := fakeHTTPClient(h) defer destroy() @@ -46,11 +47,9 @@ func TestGather(t *testing.T) { t.Run(test.name, func(t *testing.T) { var acc testutil.Accumulator n.Servers = test.servers - n.Gather(&acc) - if len(acc.Errors) != len(test.servers) { - t.Errorf("Number of servers mismatch. got=%d, want=%d", - len(acc.Errors), len(test.servers)) - } + require.NoError(t, n.Gather(&acc)) + require.Lenf(t, acc.Errors, len(test.servers), + "Number of servers mismatch. got=%d, want=%d", len(acc.Errors), len(test.servers)) }) } } @@ -62,33 +61,32 @@ func TestParseXML(t *testing.T) { tests := []struct { name string xmlResponse []byte - wantMetrics []*testutil.Metric + wantMetrics []telegraf.Metric wantAccErr bool wantErr bool }{ { name: "Good test", xmlResponse: []byte(APEX2016), - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "type": "controller", "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "AC5:12345", "power_failed": int64(1544814000000000000), "power_restored": int64(1544833875000000000), }, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "0", "device_id": "base_Var1", @@ -98,12 +96,12 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"state": "PF1"}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"state": "PF1"}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "6", "device_id": "base_email", @@ -113,12 +111,12 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"state": "AOF"}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"state": "AOF"}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "8", "device_id": "2_1", @@ -128,16 +126,16 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "state": "AON", "watt": 35.0, "amp": 0.3, }, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "18", "device_id": "3_1", @@ -147,15 +145,15 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "state": "TBL", "xstatus": "OK", }, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "28", "device_id": "4_9", @@ -165,12 +163,12 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"state": "AOF"}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"state": "AOF"}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "32", "device_id": "Cntl_A2", @@ -180,12 +178,12 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"state": "AOF"}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"state": "AOF"}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "name": "Salt", "type": "probe", @@ -193,20 +191,21 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"value": 30.1}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"value": 30.1}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "name": "Volt_2", "type": "probe", "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"value": 115.0}, - }, + map[string]interface{}{"value": 115.0}, + goodTime, + ), }, }, { @@ -225,21 +224,21 @@ func TestParseXML(t *testing.T) { `12/22/2018 21:55:37 -8.0a 12/22/2018 22:55:37`), - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_restored": int64(1545548137000000000), }, - }, + goodTime, + ), }, }, { @@ -248,21 +247,21 @@ func TestParseXML(t *testing.T) { `12/22/2018 21:55:37 -8.0a 12/22/2018 22:55:37`), - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_failed": int64(1545548137000000000), }, - }, + goodTime, + ), }, }, { @@ -282,22 +281,22 @@ func TestParseXML(t *testing.T) { o1Wabc `), wantAccErr: true, - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_failed": int64(1545544537000000000), "power_restored": int64(1545544537000000000), }, - }, + goodTime, + ), }, }, { @@ -311,22 +310,22 @@ func TestParseXML(t *testing.T) { o1Aabc `), wantAccErr: true, - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_failed": int64(1545544537000000000), "power_restored": int64(1545544537000000000), }, - }, + goodTime, + ), }, }, { @@ -339,22 +338,22 @@ func TestParseXML(t *testing.T) { p1abc `), wantAccErr: true, - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_failed": int64(1545544537000000000), "power_restored": int64(1545544537000000000), }, - }, + goodTime, + ), }, }, } @@ -364,32 +363,16 @@ func TestParseXML(t *testing.T) { t.Run(test.name, func(t *testing.T) { var acc testutil.Accumulator err := n.parseXML(&acc, test.xmlResponse) - if (err != nil) != test.wantErr { - t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) - } if test.wantErr { + require.Error(t, err, "expected error but got ") return } - if len(acc.Errors) > 0 != test.wantAccErr { - t.Errorf("Accumulator errors. got=%v, want=none", acc.Errors) - } - if len(acc.Metrics) != len(test.wantMetrics) { - t.Fatalf("Invalid number of metrics received. got=%d, want=%d", len(acc.Metrics), len(test.wantMetrics)) - } - for i, m := range acc.Metrics { - if m.Measurement != test.wantMetrics[i].Measurement { - t.Errorf("Metric measurement mismatch at position %d:\ngot=\n%s\nWant=\n%s", i, m.Measurement, test.wantMetrics[i].Measurement) - } - if !reflect.DeepEqual(m.Tags, test.wantMetrics[i].Tags) { - t.Errorf("Metric tags mismatch at position %d:\ngot=\n%v\nwant=\n%v", i, m.Tags, test.wantMetrics[i].Tags) - } - if !reflect.DeepEqual(m.Fields, test.wantMetrics[i].Fields) { - t.Errorf("Metric fields mismatch at position %d:\ngot=\n%#v\nwant=:\n%#v", i, m.Fields, test.wantMetrics[i].Fields) - } - if !m.Time.Equal(test.wantMetrics[i].Time) { - t.Errorf("Metric time mismatch at position %d:\ngot=\n%s\nwant=\n%s", i, m.Time, test.wantMetrics[i].Time) - } - } + // No error case + require.NoErrorf(t, err, "expected no error but got: %v", err) + require.Equalf(t, len(acc.Errors) > 0, test.wantAccErr, + "Accumulator errors. got=%v, want=%t", acc.Errors, test.wantAccErr) + + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), test.wantMetrics) }) } } @@ -423,7 +406,8 @@ func TestSendRequest(t *testing.T) { h := http.HandlerFunc(func( w http.ResponseWriter, r *http.Request) { w.WriteHeader(test.statusCode) - w.Write([]byte("data")) + _, err := w.Write([]byte("data")) + require.NoError(t, err) }) c, destroy := fakeHTTPClient(h) defer destroy() @@ -431,16 +415,14 @@ func TestSendRequest(t *testing.T) { httpClient: c, } resp, err := n.sendRequest("http://abc") - if (err != nil) != test.wantErr { - t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) - } if test.wantErr { + require.Error(t, err, "expected error but got ") return } - if !bytes.Equal(resp, []byte("data")) { - t.Errorf( - "Response data mismatch. got=%q, want=%q", resp, "data") - } + + // No error case + require.NoErrorf(t, err, "expected no error but got: %v", err) + require.Equalf(t, resp, []byte("data"), "Response data mismatch. got=%q, want=%q", resp, "data") }) } } @@ -479,15 +461,14 @@ func TestParseTime(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() res, err := parseTime(test.input, test.timeZone) - if (err != nil) != test.wantErr { - t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) - } if test.wantErr { + require.Error(t, err, "expected error but got ") return } - if !test.wantTime.Equal(res) { - t.Errorf("err mismatch. got=%s, want=%s", res, test.wantTime) - } + + // No error case + require.NoErrorf(t, err, "expected no error but got: %v", err) + require.Truef(t, test.wantTime.Equal(res), "time mismatch. got=%q, want=%q", res, test.wantTime) }) } } @@ -523,27 +504,11 @@ func TestFindProbe(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() index := findProbe(test.probeName, fakeProbes) - if index != test.wantIndex { - t.Errorf("probe index mismatch; got=%d, want %d", index, test.wantIndex) - } + require.Equalf(t, index, test.wantIndex, "probe index mismatch; got=%d, want %d", index, test.wantIndex) }) } } -func TestDescription(t *testing.T) { - n := &NeptuneApex{} - if n.Description() == "" { - t.Errorf("Empty description") - } -} - -func TestSampleConfig(t *testing.T) { - n := &NeptuneApex{} - if n.SampleConfig() == "" { - t.Errorf("Empty sample config") - } -} - // This fakeHttpClient creates a server and binds a client to it. // That way, it is possible to control the http // output from within the test without changes to the main code. diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index 0b092c36d1d73..cb0e008f3d7c0 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -73,10 +73,10 @@ func (*NetResponse) SampleConfig() string { // TCPGather will execute if there are TCP tests defined in the configuration. // It will return a map[string]interface{} for fields and a map[string]string for tags -func (n *NetResponse) TCPGather() (tags map[string]string, fields map[string]interface{}) { +func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, error) { // Prepare returns - tags = make(map[string]string) - fields = make(map[string]interface{}) + tags := make(map[string]string) + fields := make(map[string]interface{}) // Start Timer start := time.Now() // Connecting @@ -90,20 +90,24 @@ func (n *NetResponse) TCPGather() (tags map[string]string, fields map[string]int } else { setResult(ConnectionFailed, fields, tags, n.Expect) } - return tags, fields + return tags, fields, nil } defer conn.Close() // Send string if needed if n.Send != "" { msg := []byte(n.Send) - conn.Write(msg) + if _, gerr := conn.Write(msg); gerr != nil { + return nil, nil, gerr + } // Stop timer responseTime = time.Since(start).Seconds() } // Read string if needed if n.Expect != "" { // Set read timeout - conn.SetReadDeadline(time.Now().Add(n.ReadTimeout.Duration)) + if gerr := conn.SetReadDeadline(time.Now().Add(n.ReadTimeout.Duration)); gerr != nil { + return nil, nil, gerr + } // Prepare reader reader := bufio.NewReader(conn) tp := textproto.NewReader(reader) @@ -128,15 +132,15 @@ func (n *NetResponse) TCPGather() (tags map[string]string, fields map[string]int setResult(Success, fields, tags, n.Expect) } fields["response_time"] = responseTime - return tags, fields + return tags, fields, nil } // UDPGather will execute if there are UDP tests defined in the configuration. // It will return a map[string]interface{} for fields and a map[string]string for tags -func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]interface{}) { +func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, error) { // Prepare returns - tags = make(map[string]string) - fields = make(map[string]interface{}) + tags := make(map[string]string) + fields := make(map[string]interface{}) // Start Timer start := time.Now() // Resolving @@ -144,22 +148,30 @@ func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]int // Handle error if err != nil { setResult(ConnectionFailed, fields, tags, n.Expect) - return tags, fields + // Error encoded in result + //nolint:nilerr + return tags, fields, nil } // Connecting conn, err := net.DialUDP("udp", nil, udpAddr) // Handle error if err != nil { setResult(ConnectionFailed, fields, tags, n.Expect) - return tags, fields + // Error encoded in result + //nolint:nilerr + return tags, fields, nil } defer conn.Close() // Send string msg := []byte(n.Send) - conn.Write(msg) + if _, gerr := conn.Write(msg); gerr != nil { + return nil, nil, gerr + } // Read string // Set read timeout - conn.SetReadDeadline(time.Now().Add(n.ReadTimeout.Duration)) + if gerr := conn.SetReadDeadline(time.Now().Add(n.ReadTimeout.Duration)); gerr != nil { + return nil, nil, gerr + } // Read buf := make([]byte, 1024) _, _, err = conn.ReadFromUDP(buf) @@ -168,7 +180,9 @@ func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]int // Handle error if err != nil { setResult(ReadFailed, fields, tags, n.Expect) - return tags, fields + // Error encoded in result + //nolint:nilerr + return tags, fields, nil } // Looking for string in answer @@ -182,7 +196,7 @@ func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]int fields["response_time"] = responseTime - return tags, fields + return tags, fields, nil } // Gather is called by telegraf when the plugin is executed on its interval. @@ -220,10 +234,16 @@ func (n *NetResponse) Gather(acc telegraf.Accumulator) error { var returnTags map[string]string // Gather data if n.Protocol == "tcp" { - returnTags, fields = n.TCPGather() + returnTags, fields, err = n.TCPGather() + if err != nil { + return err + } tags["protocol"] = "tcp" } else if n.Protocol == "udp" { - returnTags, fields = n.UDPGather() + returnTags, fields, err = n.UDPGather() + if err != nil { + return err + } tags["protocol"] = "udp" } else { return errors.New("bad protocol") diff --git a/plugins/inputs/net_response/net_response_test.go b/plugins/inputs/net_response/net_response_test.go index 3bb78b35121a3..48e3d80dc23ef 100644 --- a/plugins/inputs/net_response/net_response_test.go +++ b/plugins/inputs/net_response/net_response_test.go @@ -9,24 +9,19 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSample(t *testing.T) { c := &NetResponse{} output := c.SampleConfig() - if output != sampleConfig { - t.Error("Sample config doesn't match") - } + require.Equal(t, output, sampleConfig, "Sample config doesn't match") } func TestDescription(t *testing.T) { c := &NetResponse{} output := c.Description() - if output != description { - t.Error("Description output is not correct") - } + require.Equal(t, output, description, "Description output is not correct") } func TestBadProtocol(t *testing.T) { var acc testutil.Accumulator @@ -36,9 +31,9 @@ func TestBadProtocol(t *testing.T) { Address: ":9999", } // Error - err1 := c.Gather(&acc) - require.Error(t, err1) - assert.Equal(t, "bad protocol", err1.Error()) + err := c.Gather(&acc) + require.Error(t, err) + require.Equal(t, "bad protocol", err.Error()) } func TestNoPort(t *testing.T) { @@ -47,9 +42,9 @@ func TestNoPort(t *testing.T) { Protocol: "tcp", Address: ":", } - err1 := c.Gather(&acc) - require.Error(t, err1) - assert.Equal(t, "bad port", err1.Error()) + err := c.Gather(&acc) + require.Error(t, err) + require.Equal(t, "bad port", err.Error()) } func TestAddressOnly(t *testing.T) { @@ -58,9 +53,9 @@ func TestAddressOnly(t *testing.T) { Protocol: "tcp", Address: "127.0.0.1", } - err1 := c.Gather(&acc) - require.Error(t, err1) - assert.Equal(t, "address 127.0.0.1: missing port in address", err1.Error()) + err := c.Gather(&acc) + require.Error(t, err) + require.Equal(t, "address 127.0.0.1: missing port in address", err.Error()) } func TestSendExpectStrings(t *testing.T) { @@ -77,12 +72,12 @@ func TestSendExpectStrings(t *testing.T) { Send: "toast", Expect: "", } - err1 := tc.Gather(&acc) - require.Error(t, err1) - assert.Equal(t, "send string cannot be empty", err1.Error()) - err2 := uc.Gather(&acc) - require.Error(t, err2) - assert.Equal(t, "expected string cannot be empty", err2.Error()) + err := tc.Gather(&acc) + require.Error(t, err) + require.Equal(t, "send string cannot be empty", err.Error()) + err = uc.Gather(&acc) + require.Error(t, err) + require.Equal(t, "expected string cannot be empty", err.Error()) } func TestTCPError(t *testing.T) { @@ -93,9 +88,8 @@ func TestTCPError(t *testing.T) { Address: ":9999", Timeout: internal.Duration{Duration: time.Second * 30}, } - // Error - err1 := c.Gather(&acc) - require.NoError(t, err1) + // Gather + require.NoError(t, c.Gather(&acc)) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -125,17 +119,17 @@ func TestTCPOK1(t *testing.T) { } // Start TCP server wg.Add(1) - go TCPServer(&wg) - wg.Wait() - // Connect + go TCPServer(t, &wg) + wg.Wait() // Wait for the server to spin up wg.Add(1) - err1 := c.Gather(&acc) - wg.Wait() + // Connect + require.NoError(t, c.Gather(&acc)) + acc.Wait(1) + // Override response time for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } - require.NoError(t, err1) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -169,17 +163,18 @@ func TestTCPOK2(t *testing.T) { } // Start TCP server wg.Add(1) - go TCPServer(&wg) + go TCPServer(t, &wg) wg.Wait() - // Connect wg.Add(1) - err1 := c.Gather(&acc) - wg.Wait() + + // Connect + require.NoError(t, c.Gather(&acc)) + acc.Wait(1) + // Override response time for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } - require.NoError(t, err1) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -209,13 +204,14 @@ func TestUDPError(t *testing.T) { Protocol: "udp", } // Gather - err1 := c.Gather(&acc) + require.NoError(t, c.Gather(&acc)) + acc.Wait(1) + // Override response time for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } // Error - require.NoError(t, err1) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -247,17 +243,18 @@ func TestUDPOK1(t *testing.T) { } // Start UDP server wg.Add(1) - go UDPServer(&wg) + go UDPServer(t, &wg) wg.Wait() - // Connect wg.Add(1) - err1 := c.Gather(&acc) - wg.Wait() + + // Connect + require.NoError(t, c.Gather(&acc)) + acc.Wait(1) + // Override response time for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } - require.NoError(t, err1) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -277,26 +274,29 @@ func TestUDPOK1(t *testing.T) { wg.Wait() } -func UDPServer(wg *sync.WaitGroup) { +func UDPServer(t *testing.T, wg *sync.WaitGroup) { + defer wg.Done() udpAddr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:2004") conn, _ := net.ListenUDP("udp", udpAddr) wg.Done() buf := make([]byte, 1024) _, remoteaddr, _ := conn.ReadFromUDP(buf) - conn.WriteToUDP(buf, remoteaddr) - conn.Close() - wg.Done() + _, err := conn.WriteToUDP(buf, remoteaddr) + require.NoError(t, err) + require.NoError(t, conn.Close()) } -func TCPServer(wg *sync.WaitGroup) { +func TCPServer(t *testing.T, wg *sync.WaitGroup) { + defer wg.Done() tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2004") tcpServer, _ := net.ListenTCP("tcp", tcpAddr) wg.Done() conn, _ := tcpServer.AcceptTCP() buf := make([]byte, 1024) - conn.Read(buf) - conn.Write(buf) - conn.CloseWrite() - tcpServer.Close() - wg.Done() + _, err := conn.Read(buf) + require.NoError(t, err) + _, err = conn.Write(buf) + require.NoError(t, err) + require.NoError(t, conn.CloseWrite()) + require.NoError(t, tcpServer.Close()) } diff --git a/plugins/inputs/nfsclient/nfsclient.go b/plugins/inputs/nfsclient/nfsclient.go index 07a8382d9137f..6b621e4bd2265 100644 --- a/plugins/inputs/nfsclient/nfsclient.go +++ b/plugins/inputs/nfsclient/nfsclient.go @@ -326,8 +326,7 @@ func (n *NFSClient) Gather(acc telegraf.Accumulator) error { defer file.Close() scanner := bufio.NewScanner(file) - err = n.processText(scanner, acc) - if err != nil { + if err := n.processText(scanner, acc); err != nil { return err } diff --git a/plugins/inputs/nginx/nginx_test.go b/plugins/inputs/nginx/nginx_test.go index 8d9f047f50c8c..db30304dcc15a 100644 --- a/plugins/inputs/nginx/nginx_test.go +++ b/plugins/inputs/nginx/nginx_test.go @@ -46,10 +46,11 @@ func TestNginxGeneratesMetrics(t *testing.T) { } else if r.URL.Path == "/tengine_status" { rsp = tengineSampleResponse } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -64,11 +65,8 @@ func TestNginxGeneratesMetrics(t *testing.T) { var accNginx testutil.Accumulator var accTengine testutil.Accumulator - errNginx := accNginx.GatherError(n.Gather) - errTengine := accTengine.GatherError(nt.Gather) - - require.NoError(t, errNginx) - require.NoError(t, errTengine) + require.NoError(t, accNginx.GatherError(n.Gather)) + require.NoError(t, accTengine.GatherError(nt.Gather)) fieldsNginx := map[string]interface{}{ "active": uint64(585), @@ -91,9 +89,7 @@ func TestNginxGeneratesMetrics(t *testing.T) { } addr, err := url.Parse(ts.URL) - if err != nil { - panic(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) if err != nil { diff --git a/plugins/inputs/nginx_plus/nginx_plus_test.go b/plugins/inputs/nginx_plus/nginx_plus_test.go index caaea7dcb59d4..36fe5a2dce8f6 100644 --- a/plugins/inputs/nginx_plus/nginx_plus_test.go +++ b/plugins/inputs/nginx_plus/nginx_plus_test.go @@ -253,14 +253,13 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - if r.URL.Path == "/status" { - rsp = sampleStatusResponse - w.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/status", "Cannot handle request") + + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -271,13 +270,10 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator errNginx := n.Gather(&acc) - require.NoError(t, errNginx) addr, err := url.Parse(ts.URL) - if err != nil { - panic(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) if err != nil { diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go index 9ae9e43c29f7a..8f28772537288 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -1212,9 +1212,7 @@ func TestUnavailableEndpoints(t *testing.T) { } addr, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var acc testutil.Accumulator n.gatherMetrics(addr, &acc) @@ -1232,9 +1230,7 @@ func TestServerError(t *testing.T) { } addr, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var acc testutil.Accumulator n.gatherMetrics(addr, &acc) @@ -1244,7 +1240,8 @@ func TestServerError(t *testing.T) { func TestMalformedJSON(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintln(w, "this is not JSON") + _, err := fmt.Fprintln(w, "this is not JSON") + require.NoError(t, err) })) defer ts.Close() @@ -1253,9 +1250,7 @@ func TestMalformedJSON(t *testing.T) { } addr, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var acc testutil.Accumulator n.gatherMetrics(addr, &acc) @@ -1273,9 +1268,7 @@ func TestUnknownContentType(t *testing.T) { } addr, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var acc testutil.Accumulator n.gatherMetrics(addr, &acc) @@ -1285,9 +1278,7 @@ func TestUnknownContentType(t *testing.T) { func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) { t.Helper() addr, err := url.Parse(fmt.Sprintf("%s/api", ts.URL)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) @@ -1307,16 +1298,11 @@ func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) { func prepareEndpoint(t *testing.T, path string, payload string) (*httptest.Server, *NginxPlusAPI) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var rsp string - - if r.URL.Path == fmt.Sprintf("/api/%d/%s", defaultAPIVersion, path) { - rsp = payload - w.Header()["Content-Type"] = []string{"application/json"} - } else { - t.Errorf("unknown request path") - } + require.Equal(t, r.URL.Path, fmt.Sprintf("/api/%d/%s", defaultAPIVersion, path), "unknown request path") - fmt.Fprintln(w, rsp) + w.Header()["Content-Type"] = []string{"application/json"} + _, err := fmt.Fprintln(w, payload) + require.NoError(t, err) })) n := &NginxPlusAPI{ @@ -1325,9 +1311,8 @@ func prepareEndpoint(t *testing.T, path string, payload string) (*httptest.Serve } client, err := n.createHTTPClient() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + n.client = client return ts, n diff --git a/plugins/inputs/nginx_sts/nginx_sts_test.go b/plugins/inputs/nginx_sts/nginx_sts_test.go index 18081eadf7f43..9ebb5f91ad9d8 100644 --- a/plugins/inputs/nginx_sts/nginx_sts_test.go +++ b/plugins/inputs/nginx_sts/nginx_sts_test.go @@ -166,14 +166,13 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - if r.URL.Path == "/status" { - rsp = sampleStatusResponse - w.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/status", "Cannot handle request") - fmt.Fprintln(w, rsp) + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} + + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -184,13 +183,10 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator err := n.Gather(&acc) - require.NoError(t, err) addr, err := url.Parse(ts.URL) - if err != nil { - panic(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) if err != nil { diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go index df6b08b09fb12..353619b362228 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go @@ -45,14 +45,13 @@ func TestNginxUpstreamCheckData(test *testing.T) { testServer := httptest.NewServer(http.HandlerFunc(func(responseWriter http.ResponseWriter, request *http.Request) { var response string - if request.URL.Path == "/status" { - response = sampleStatusResponse - responseWriter.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } - - fmt.Fprintln(responseWriter, response) + require.Equal(test, request.URL.Path, "/status", "Cannot handle request") + + response = sampleStatusResponse + responseWriter.Header()["Content-Type"] = []string{"application/json"} + + _, err := fmt.Fprintln(responseWriter, response) + require.NoError(test, err) })) defer testServer.Close() @@ -103,14 +102,13 @@ func TestNginxUpstreamCheckRequest(test *testing.T) { testServer := httptest.NewServer(http.HandlerFunc(func(responseWriter http.ResponseWriter, request *http.Request) { var response string - if request.URL.Path == "/status" { - response = sampleStatusResponse - responseWriter.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } + require.Equal(test, request.URL.Path, "/status", "Cannot handle request") + + response = sampleStatusResponse + responseWriter.Header()["Content-Type"] = []string{"application/json"} - fmt.Fprintln(responseWriter, response) + _, err := fmt.Fprintln(responseWriter, response) + require.NoError(test, err) require.Equal(test, request.Method, "POST") require.Equal(test, request.Header.Get("X-Test"), "test-value") diff --git a/plugins/inputs/nginx_vts/nginx_vts_test.go b/plugins/inputs/nginx_vts/nginx_vts_test.go index 085fc38433dff..589bc634f9358 100644 --- a/plugins/inputs/nginx_vts/nginx_vts_test.go +++ b/plugins/inputs/nginx_vts/nginx_vts_test.go @@ -203,14 +203,13 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - if r.URL.Path == "/status" { - rsp = sampleStatusResponse - w.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/status", "Cannot handle request") - fmt.Fprintln(w, rsp) + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} + + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -221,13 +220,10 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator err := n.Gather(&acc) - require.NoError(t, err) addr, err := url.Parse(ts.URL) - if err != nil { - panic(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) if err != nil { diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go index 23af13a4c82bc..03ebeaed65382 100644 --- a/plugins/inputs/nsq/nsq_test.go +++ b/plugins/inputs/nsq/nsq_test.go @@ -15,7 +15,8 @@ import ( func TestNSQStatsV1(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, responseV1) + _, err := fmt.Fprintln(w, responseV1) + require.NoError(t, err) })) defer ts.Close() @@ -271,7 +272,8 @@ var responseV1 = ` func TestNSQStatsPreV1(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, responsePreV1) + _, err := fmt.Fprintln(w, responsePreV1) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index 1b731a07b3fa0..718a2ed3e321c 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -102,7 +102,9 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { ctx, cancel := context.WithCancel(context.Background()) n.cancel = cancel - n.connect() + if err := n.connect(); err != nil { + return err + } n.consumer.SetLogger(&logger{log: n.Log}, nsq.LogLevelInfo) n.consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error { metrics, err := n.parser.Parse(message.Body) @@ -133,9 +135,15 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { })) if len(n.Nsqlookupd) > 0 { - n.consumer.ConnectToNSQLookupds(n.Nsqlookupd) + err := n.consumer.ConnectToNSQLookupds(n.Nsqlookupd) + if err != nil && err != nsq.ErrAlreadyConnected { + return err + } + } + err := n.consumer.ConnectToNSQDs(append(n.Nsqd, n.Server)) + if err != nil && err != nsq.ErrAlreadyConnected { + return err } - n.consumer.ConnectToNSQDs(append(n.Nsqd, n.Server)) n.wg.Add(1) go func() { diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go index bcc1fdf321129..d5086862bbf7e 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go @@ -14,7 +14,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" "github.com/nsqio/go-nsq" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // This test is modeled after the kafka consumer integration test @@ -22,12 +22,15 @@ func TestReadsMetricsFromNSQ(t *testing.T) { msgID := nsq.MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} msg := nsq.NewMessage(msgID, []byte("cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257\n")) + frameMsg, err := frameMessage(msg) + require.NoError(t, err) + script := []instruction{ // SUB {0, nsq.FrameTypeResponse, []byte("OK")}, // IDENTIFY {0, nsq.FrameTypeResponse, []byte("OK")}, - {20 * time.Millisecond, nsq.FrameTypeMessage, frameMessage(msg)}, + {20 * time.Millisecond, nsq.FrameTypeMessage, frameMsg}, // needed to exit test {100 * time.Millisecond, -1, []byte("exit")}, } @@ -48,26 +51,22 @@ func TestReadsMetricsFromNSQ(t *testing.T) { p, _ := parsers.NewInfluxParser() consumer.SetParser(p) var acc testutil.Accumulator - assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") - if err := consumer.Start(&acc); err != nil { - t.Fatal(err.Error()) - } + require.Len(t, acc.Metrics, 0, "There should not be any points") + require.NoError(t, consumer.Start(&acc)) waitForPoint(&acc, t) - if len(acc.Metrics) == 1 { - point := acc.Metrics[0] - assert.Equal(t, "cpu_load_short", point.Measurement) - assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) - assert.Equal(t, map[string]string{ - "host": "server01", - "direction": "in", - "region": "us-west", - }, point.Tags) - assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) - } else { - t.Errorf("No points found in accumulator, expected 1") - } + require.Len(t, acc.Metrics, 1, "No points found in accumulator, expected 1") + + point := acc.Metrics[0] + require.Equal(t, "cpu_load_short", point.Measurement) + require.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) + require.Equal(t, map[string]string{ + "host": "server01", + "direction": "in", + "region": "us-west", + }, point.Tags) + require.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) } // Waits for the metric that was sent to the kafka broker to arrive at the kafka @@ -201,9 +200,14 @@ func (n *mockNSQD) handle(conn net.Conn) { } rdyCount-- } - _, err := conn.Write(framedResponse(inst.frameType, inst.body)) + buf, err := framedResponse(inst.frameType, inst.body) + if err != nil { + log.Print(err.Error()) + goto exit + } + _, err = conn.Write(buf) if err != nil { - log.Printf(err.Error()) + log.Print(err.Error()) goto exit } scriptTime = time.After(n.script[idx+1].delay) @@ -212,11 +216,14 @@ func (n *mockNSQD) handle(conn net.Conn) { } exit: + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive n.tcpListener.Close() + //nolint:errcheck,revive conn.Close() } -func framedResponse(frameType int32, data []byte) []byte { +func framedResponse(frameType int32, data []byte) ([]byte, error) { var w bytes.Buffer beBuf := make([]byte, 4) @@ -225,21 +232,21 @@ func framedResponse(frameType int32, data []byte) []byte { binary.BigEndian.PutUint32(beBuf, size) _, err := w.Write(beBuf) if err != nil { - return nil + return nil, err } binary.BigEndian.PutUint32(beBuf, uint32(frameType)) _, err = w.Write(beBuf) if err != nil { - return nil + return nil, err } - w.Write(data) - return w.Bytes() + _, err = w.Write(data) + return w.Bytes(), err } -func frameMessage(m *nsq.Message) []byte { +func frameMessage(m *nsq.Message) ([]byte, error) { var b bytes.Buffer - m.WriteTo(&b) - return b.Bytes() + _, err := m.WriteTo(&b) + return b.Bytes(), err } diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index eacfc3d00a8d9..ac7becbe09e4d 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -328,10 +328,18 @@ func newMP(n *Node) metricParts { var sb strings.Builder for i, key := range keys { if i != 0 { + // Writes to a string-builder will always succeed + //nolint:errcheck,revive sb.WriteString(", ") } + // Writes to a string-builder will always succeed + //nolint:errcheck,revive sb.WriteString(key) + // Writes to a string-builder will always succeed + //nolint:errcheck,revive sb.WriteString("=") + // Writes to a string-builder will always succeed + //nolint:errcheck,revive sb.WriteString(n.metricTags[key]) } x := metricParts{ @@ -397,7 +405,9 @@ func Connect(o *OpcUA) error { o.state = Connecting if o.client != nil { - o.client.CloseSession() + if err := o.client.CloseSession(); err != nil { + return err + } } o.client = opcua.NewClient(o.Endpoint, o.opts...) @@ -515,6 +525,8 @@ func (o *OpcUA) Gather(acc telegraf.Accumulator) error { err := o.getData() if err != nil && o.state == Connected { o.state = Disconnected + // Ignore returned error to not mask the original problem + //nolint:errcheck,revive disconnect(o) return err } diff --git a/plugins/inputs/opcua/opcua_util.go b/plugins/inputs/opcua/opcua_util.go index 2197e8088ab8f..bb7ca56200954 100644 --- a/plugins/inputs/opcua/opcua_util.go +++ b/plugins/inputs/opcua/opcua_util.go @@ -104,10 +104,13 @@ func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.D keyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { - log.Printf("failed to open %s for writing: %s", keyFile, err) - return "", "", nil + return "", "", fmt.Errorf("failed to open %s for writing: %s", keyFile, err) } - if err := pem.Encode(keyOut, pemBlockForKey(priv)); err != nil { + keyBlock, err := pemBlockForKey(priv) + if err != nil { + return "", "", fmt.Errorf("error generating block: %v", err) + } + if err := pem.Encode(keyOut, keyBlock); err != nil { return "", "", fmt.Errorf("failed to write data to %s: %s", keyFile, err) } if err := keyOut.Close(); err != nil { @@ -128,19 +131,18 @@ func publicKey(priv interface{}) interface{} { } } -func pemBlockForKey(priv interface{}) *pem.Block { +func pemBlockForKey(priv interface{}) (*pem.Block, error) { switch k := priv.(type) { case *rsa.PrivateKey: - return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}, nil case *ecdsa.PrivateKey: b, err := x509.MarshalECPrivateKey(k) if err != nil { - fmt.Fprintf(os.Stderr, "Unable to marshal ECDSA private key: %v", err) - os.Exit(2) + return nil, fmt.Errorf("unable to marshal ECDSA private key: %v", err) } - return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}, nil default: - return nil + return nil, nil } } diff --git a/plugins/inputs/openweathermap/openweathermap_test.go b/plugins/inputs/openweathermap/openweathermap_test.go index d513f6273d07f..0e86646a27594 100644 --- a/plugins/inputs/openweathermap/openweathermap_test.go +++ b/plugins/inputs/openweathermap/openweathermap_test.go @@ -408,10 +408,11 @@ func TestForecastGeneratesMetrics(t *testing.T) { } else if r.URL.Path == "/data/2.5/group" { rsp = sampleNoContent } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -422,12 +423,11 @@ func TestForecastGeneratesMetrics(t *testing.T) { Fetch: []string{"weather", "forecast"}, Units: "metric", } - n.Init() + require.NoError(t, n.Init()) var acc testutil.Accumulator - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) expected := []telegraf.Metric{ testutil.MustMetric( @@ -492,10 +492,11 @@ func TestWeatherGeneratesMetrics(t *testing.T) { } else if r.URL.Path == "/data/2.5/forecast" { rsp = sampleNoContent } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -506,12 +507,11 @@ func TestWeatherGeneratesMetrics(t *testing.T) { Fetch: []string{"weather"}, Units: "metric", } - n.Init() + require.NoError(t, n.Init()) var acc testutil.Accumulator - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) expected := []telegraf.Metric{ testutil.MustMetric( @@ -552,10 +552,11 @@ func TestRainMetrics(t *testing.T) { rsp = rainWeatherResponse w.Header()["Content-Type"] = []string{"application/json"} } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -566,12 +567,11 @@ func TestRainMetrics(t *testing.T) { Fetch: []string{"weather"}, Units: "metric", } - n.Init() + require.NoError(t, n.Init()) var acc testutil.Accumulator - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) expected := []telegraf.Metric{ // City with 1h rain value @@ -695,10 +695,11 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { } else if r.URL.Path == "/data/2.5/forecast" { rsp = sampleNoContent } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -709,12 +710,11 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { Fetch: []string{"weather"}, Units: "metric", } - n.Init() + require.NoError(t, n.Init()) var acc testutil.Accumulator - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) expected := []telegraf.Metric{ testutil.MustMetric( @@ -804,27 +804,27 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { func TestFormatURL(t *testing.T) { n := &OpenWeatherMap{ AppID: "appid", - Units: "units", - Lang: "lang", + Units: "metric", + Lang: "de", BaseURL: "http://foo.com", } - n.Init() + require.NoError(t, n.Init()) require.Equal(t, - "http://foo.com/data/2.5/forecast?APPID=appid&id=12345&lang=lang&units=units", + "http://foo.com/data/2.5/forecast?APPID=appid&id=12345&lang=de&units=metric", n.formatURL("/data/2.5/forecast", "12345")) } func TestDefaultUnits(t *testing.T) { n := &OpenWeatherMap{} - n.Init() + require.NoError(t, n.Init()) require.Equal(t, "metric", n.Units) } func TestDefaultLang(t *testing.T) { n := &OpenWeatherMap{} - n.Init() + require.NoError(t, n.Init()) require.Equal(t, "en", n.Lang) } diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index fc03f235b8082..dbee336ba1040 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -15,7 +15,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -func fakePassengerStatus(stat string) string { +func fakePassengerStatus(stat string) (string, error) { var fileExtension, content string if runtime.GOOS == "windows" { fileExtension = ".bat" @@ -28,12 +28,16 @@ func fakePassengerStatus(stat string) string { } tempFilePath := filepath.Join(os.TempDir(), "passenger-status"+fileExtension) - ioutil.WriteFile(tempFilePath, []byte(content), 0700) + if err := ioutil.WriteFile(tempFilePath, []byte(content), 0700); err != nil { + return "", err + } - return tempFilePath + return tempFilePath, nil } func teardown(tempFilePath string) { + // Ignore the returned error as we want to remove the file and ignore missing file errors + //nolint:errcheck,revive os.Remove(tempFilePath) } @@ -50,7 +54,8 @@ func Test_Invalid_Passenger_Status_Cli(t *testing.T) { } func Test_Invalid_Xml(t *testing.T) { - tempFilePath := fakePassengerStatus("invalid xml") + tempFilePath, err := fakePassengerStatus("invalid xml") + require.NoError(t, err) defer teardown(tempFilePath) r := &passenger{ @@ -59,27 +64,29 @@ func Test_Invalid_Xml(t *testing.T) { var acc testutil.Accumulator - err := r.Gather(&acc) + err = r.Gather(&acc) require.Error(t, err) assert.Equal(t, "cannot parse input with error: EOF", err.Error()) } // We test this by ensure that the error message match the path of default cli func Test_Default_Config_Load_Default_Command(t *testing.T) { - tempFilePath := fakePassengerStatus("invalid xml") + tempFilePath, err := fakePassengerStatus("invalid xml") + require.NoError(t, err) defer teardown(tempFilePath) r := &passenger{} var acc testutil.Accumulator - err := r.Gather(&acc) + err = r.Gather(&acc) require.Error(t, err) assert.Contains(t, err.Error(), "exec: \"passenger-status\": executable file not found in ") } func TestPassengerGenerateMetric(t *testing.T) { - tempFilePath := fakePassengerStatus(sampleStat) + tempFilePath, err := fakePassengerStatus(sampleStat) + require.NoError(t, err) defer teardown(tempFilePath) //Now we tested again above server, with our authentication data @@ -89,8 +96,7 @@ func TestPassengerGenerateMetric(t *testing.T) { var acc testutil.Accumulator - err := r.Gather(&acc) - require.NoError(t, err) + require.NoError(t, r.Gather(&acc)) tags := map[string]string{ "passenger_version": "5.0.17", diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index 17cdaea6966d3..24a7f1e8fb7d2 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -170,9 +170,13 @@ func (p *PgBouncer) accRow(row scanner, columns []string) (map[string]string, } if columnMap["database"] != nil { // extract the database name from the column map - dbname.WriteString((*columnMap["database"]).(string)) + if _, err := dbname.WriteString((*columnMap["database"]).(string)); err != nil { + return nil, nil, err + } } else { - dbname.WriteString("postgres") + if _, err := dbname.WriteString("postgres"); err != nil { + return nil, nil, err + } } var tagAddress string diff --git a/plugins/inputs/phpfpm/child.go b/plugins/inputs/phpfpm/child.go index a90cf093bd8e6..9ac7e60715856 100644 --- a/plugins/inputs/phpfpm/child.go +++ b/plugins/inputs/phpfpm/child.go @@ -193,8 +193,7 @@ func (c *child) handleRecord(rec *record) error { return err } if br.role != roleResponder { - c.conn.writeEndRequest(rec.h.ID, 0, statusUnknownRole) - return nil + return c.conn.writeEndRequest(rec.h.ID, 0, statusUnknownRole) } req = newRequest(rec.h.ID, br.flags) c.mu.Lock() @@ -226,15 +225,18 @@ func (c *child) handleRecord(rec *record) error { if len(content) > 0 { // TODO(eds): This blocks until the handler reads from the pipe. // If the handler takes a long time, it might be a problem. - req.pw.Write(content) + if _, err := req.pw.Write(content); err != nil { + return err + } } else if req.pw != nil { - req.pw.Close() + if err := req.pw.Close(); err != nil { + return err + } } return nil case typeGetValues: values := map[string]string{"FCGI_MPXS_CONNS": "1"} - c.conn.writePairs(typeGetValuesResult, 0, values) - return nil + return c.conn.writePairs(typeGetValuesResult, 0, values) case typeData: // If the filter role is implemented, read the data stream here. return nil @@ -242,9 +244,13 @@ func (c *child) handleRecord(rec *record) error { c.mu.Lock() delete(c.requests, rec.h.ID) c.mu.Unlock() - c.conn.writeEndRequest(rec.h.ID, 0, statusRequestComplete) + if err := c.conn.writeEndRequest(rec.h.ID, 0, statusRequestComplete); err != nil { + return err + } if req.pw != nil { - req.pw.CloseWithError(ErrRequestAborted) + if err := req.pw.CloseWithError(ErrRequestAborted); err != nil { + return err + } } if !req.keepConn { // connection will close upon return @@ -254,8 +260,7 @@ func (c *child) handleRecord(rec *record) error { default: b := make([]byte, 8) b[0] = byte(rec.h.Type) - c.conn.writeRecord(typeUnknownType, 0, b) - return nil + return c.conn.writeRecord(typeUnknownType, 0, b) } } @@ -265,16 +270,22 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) { if err != nil { // there was an error reading the request r.WriteHeader(http.StatusInternalServerError) - c.conn.writeRecord(typeStderr, req.reqID, []byte(err.Error())) + if err := c.conn.writeRecord(typeStderr, req.reqID, []byte(err.Error())); err != nil { + return + } } else { httpReq.Body = body c.handler.ServeHTTP(r, httpReq) } + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive r.Close() c.mu.Lock() delete(c.requests, req.reqID) c.mu.Unlock() - c.conn.writeEndRequest(req.reqID, 0, statusRequestComplete) + if err := c.conn.writeEndRequest(req.reqID, 0, statusRequestComplete); err != nil { + return + } // Consume the entire body, so the host isn't still writing to // us when we close the socket below in the !keepConn case, @@ -283,10 +294,14 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) { // some sort of abort request to the host, so the host // can properly cut off the client sending all the data. // For now just bound it a little and + //nolint:errcheck,revive io.CopyN(ioutil.Discard, body, 100<<20) + //nolint:errcheck,revive body.Close() if !req.keepConn { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive c.conn.Close() } } @@ -298,6 +313,8 @@ func (c *child) cleanUp() { if req.pw != nil { // race with call to Close in c.serveRequest doesn't matter because // Pipe(Reader|Writer).Close are idempotent + // Ignore the returned error as we continue in the loop anyway + //nolint:errcheck,revive req.pw.CloseWithError(ErrConnClosed) } } diff --git a/plugins/inputs/phpfpm/fcgi.go b/plugins/inputs/phpfpm/fcgi.go index b3ee3f475248b..45248329efda6 100644 --- a/plugins/inputs/phpfpm/fcgi.go +++ b/plugins/inputs/phpfpm/fcgi.go @@ -186,8 +186,7 @@ func (c *conn) writePairs(recType recType, reqID uint16, pairs map[string]string return err } } - w.Close() - return nil + return w.Close() } func readSize(s []byte) (uint32, int) { @@ -232,6 +231,8 @@ type bufWriter struct { func (w *bufWriter) Close() error { if err := w.Writer.Flush(); err != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive w.closer.Close() return err } diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 5f0be8999e81c..c3a3f29f570f5 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -26,6 +26,8 @@ type statServer struct{} func (s statServer) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "text/plain") w.Header().Set("Content-Length", fmt.Sprint(len(outputSample))) + // Ignore the returned error as the tests will fail anyway + //nolint:errcheck,revive fmt.Fprint(w, outputSample) } @@ -34,7 +36,8 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { require.Equal(t, "ok", r.URL.Query().Get("test")) w.Header().Set("Content-Type", "text/plain") w.Header().Set("Content-Length", fmt.Sprint(len(outputSample))) - fmt.Fprint(w, outputSample) + _, err := fmt.Fprint(w, outputSample) + require.NoError(t, err) })) defer ts.Close() @@ -43,13 +46,11 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { Urls: []string{url}, } - err := r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ "pool": "www", @@ -76,12 +77,11 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { // Let OS find an available port tcp, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal("Cannot initialize test server") - } + require.NoError(t, err, "Cannot initialize test server") defer tcp.Close() s := statServer{} + //nolint:errcheck,revive go fcgi.Serve(tcp, s) //Now we tested again above server @@ -89,12 +89,10 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"}, } - err = r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ "pool": "www", @@ -123,27 +121,24 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) { // removing of socket fail when system restart /tmp is clear so // we don't have junk files around var randomNumber int64 - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)) - if err != nil { - t.Fatal("Cannot initialize server on port ") - } + require.NoError(t, err, "Cannot initialize server on port ") defer tcp.Close() s := statServer{} + //nolint:errcheck,revive go fcgi.Serve(tcp, s) r := &phpfpm{ Urls: []string{tcp.Addr().String()}, } - err = r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ "pool": "www", @@ -172,40 +167,35 @@ func TestPhpFpmGeneratesMetrics_From_Multiple_Sockets_With_Glob(t *testing.T) { // removing of socket fail when system restart /tmp is clear so // we don't have junk files around var randomNumber int64 - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) socket1 := fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber) tcp1, err := net.Listen("unix", socket1) - if err != nil { - t.Fatal("Cannot initialize server on port ") - } + require.NoError(t, err, "Cannot initialize server on port ") defer tcp1.Close() - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) socket2 := fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber) tcp2, err := net.Listen("unix", socket2) - if err != nil { - t.Fatal("Cannot initialize server on port ") - } + require.NoError(t, err, "Cannot initialize server on port ") defer tcp2.Close() s := statServer{} + //nolint:errcheck,revive go fcgi.Serve(tcp1, s) + //nolint:errcheck,revive go fcgi.Serve(tcp2, s) r := &phpfpm{ Urls: []string{"/tmp/test-fpm[\\-0-9]*.sock"}, } - err = r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc1, acc2 testutil.Accumulator - err = acc1.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc1.GatherError(r.Gather)) - err = acc2.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc2.GatherError(r.Gather)) tags1 := map[string]string{ "pool": "www", @@ -240,27 +230,24 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { // removing of socket fail we won't have junk files around. Cuz when system // restart, it clears out /tmp var randomNumber int64 - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)) - if err != nil { - t.Fatal("Cannot initialize server on port ") - } + require.NoError(t, err, "Cannot initialize server on port ") defer tcp.Close() s := statServer{} + //nolint:errcheck,revive go fcgi.Serve(tcp, s) r := &phpfpm{ Urls: []string{tcp.Addr().String() + ":custom-status-path"}, } - err = r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ "pool": "www", @@ -289,12 +276,11 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { r := &phpfpm{} - err := r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) + err := acc.GatherError(r.Gather) require.Error(t, err) assert.Contains(t, err.Error(), "127.0.0.1/status") } @@ -304,12 +290,11 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t Urls: []string{"http://aninvalidone"}, } - err := r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) + err := acc.GatherError(r.Gather) require.Error(t, err) assert.Contains(t, err.Error(), `unable to connect to phpfpm status page 'http://aninvalidone'`) assert.Contains(t, err.Error(), `lookup aninvalidone`) @@ -320,12 +305,11 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testi Urls: []string{"/tmp/invalid.sock"}, } - err := r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) + err := acc.GatherError(r.Gather) require.Error(t, err) assert.Equal(t, `socket doesn't exist "/tmp/invalid.sock"`, err.Error()) } diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 6d06988dbd6a0..895b9c1fdf5b9 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -241,7 +241,7 @@ func TestPingGather(t *testing.T) { pingHost: mockHostPinger, } - acc.GatherError(p.Gather) + require.NoError(t, acc.GatherError(p.Gather)) tags := map[string]string{"url": "localhost"} fields := map[string]interface{}{ "packets_transmitted": 5, @@ -270,8 +270,8 @@ func TestPingGatherIntegration(t *testing.T) { p.Log = testutil.Logger{} require.True(t, ok) p.Urls = []string{"localhost", "influxdata.com"} - err := acc.GatherError(p.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(p.Gather)) + require.Equal(t, 0, acc.Metrics[0].Fields["result_code"]) require.Equal(t, 0, acc.Metrics[1].Fields["result_code"]) } @@ -299,7 +299,7 @@ func TestLossyPingGather(t *testing.T) { pingHost: mockLossyHostPinger, } - acc.GatherError(p.Gather) + require.NoError(t, acc.GatherError(p.Gather)) tags := map[string]string{"url": "www.google.com"} fields := map[string]interface{}{ "packets_transmitted": 5, @@ -337,7 +337,7 @@ func TestBadPingGather(t *testing.T) { pingHost: mockErrorHostPinger, } - acc.GatherError(p.Gather) + require.NoError(t, acc.GatherError(p.Gather)) tags := map[string]string{"url": "www.amazon.com"} fields := map[string]interface{}{ "packets_transmitted": 2, @@ -360,7 +360,9 @@ func TestFatalPingGather(t *testing.T) { pingHost: mockFatalHostPinger, } - acc.GatherError(p.Gather) + err := acc.GatherError(p.Gather) + require.Error(t, err) + require.EqualValues(t, err.Error(), "host www.amazon.com: ping: -i interval too short: Operation not permitted, So very bad") assert.False(t, acc.HasMeasurement("packets_transmitted"), "Fatal ping should not have packet measurements") assert.False(t, acc.HasMeasurement("packets_received"), @@ -394,7 +396,7 @@ func TestErrorWithHostNamePingGather(t *testing.T) { return param.out, errors.New("So very bad") }, } - acc.GatherError(p.Gather) + require.Error(t, acc.GatherError(p.Gather)) assert.True(t, len(acc.Errors) > 0) assert.Contains(t, acc.Errors, param.error) } @@ -410,7 +412,9 @@ func TestPingBinary(t *testing.T) { return "", nil }, } - acc.GatherError(p.Gather) + err := acc.GatherError(p.Gather) + require.Error(t, err) + require.EqualValues(t, err.Error(), "Fatal error processing ping output: www.google.com") } // Test that Gather function works using native ping @@ -462,8 +466,7 @@ func TestPingGatherNative(t *testing.T) { for _, tc := range tests { var acc testutil.Accumulator - err := tc.P.Init() - require.NoError(t, err) + require.NoError(t, tc.P.Init()) require.NoError(t, acc.GatherError(tc.P.Gather)) assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) @@ -501,8 +504,8 @@ func TestNoPacketsSent(t *testing.T) { } var testAcc testutil.Accumulator - err := p.Init() - require.NoError(t, err) + require.NoError(t, p.Init()) + p.pingToURLNative("localhost", &testAcc) require.Zero(t, testAcc.Errors) require.True(t, testAcc.HasField("ping", "result_code")) @@ -523,8 +526,8 @@ func TestDNSLookupError(t *testing.T) { } var testAcc testutil.Accumulator - err := p.Init() - require.NoError(t, err) + require.NoError(t, p.Init()) + p.pingToURLNative("localhost", &testAcc) require.Zero(t, testAcc.Errors) require.True(t, testAcc.HasField("ping", "result_code")) diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 231e864c1e2d0..3ce8963e90c3e 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -156,13 +156,19 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator, columns []str if columnMap["datname"] != nil { // extract the database name from the column map if dbNameStr, ok := (*columnMap["datname"]).(string); ok { - dbname.WriteString(dbNameStr) + if _, err := dbname.WriteString(dbNameStr); err != nil { + return err + } } else { // PG 12 adds tracking of global objects to pg_stat_database - dbname.WriteString("postgres_global") + if _, err := dbname.WriteString("postgres_global"); err != nil { + return err + } } } else { - dbname.WriteString("postgres") + if _, err := dbname.WriteString("postgres"); err != nil { + return err + } } var tagAddress string diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go index 580ae20e50f07..d4be13ee7bca2 100644 --- a/plugins/inputs/postgresql/service.go +++ b/plugins/inputs/postgresql/service.go @@ -152,6 +152,8 @@ func (p *Service) Start(telegraf.Accumulator) (err error) { // Stop stops the services and closes any necessary channels and connections func (p *Service) Stop() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive p.DB.Close() } diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index e1ad27086b312..ceb6c0be5fe9c 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -83,16 +83,16 @@ var sampleConfig = ` ## output measurement name ("postgresql"). ## ## The script option can be used to specify the .sql file path. - ## If script and sqlquery options specified at same time, sqlquery will be used + ## If script and sqlquery options specified at same time, sqlquery will be used ## ## the tagvalue field is used to define custom tags (separated by comas). ## the query is expected to return columns which match the names of the ## defined tags. The values in these columns must be of a string-type, ## a number-type or a blob-type. - ## + ## ## The timestamp field is used to override the data points timestamp value. By ## default, all rows inserted with current time. By setting a timestamp column, - ## the row will be inserted with that column's value. + ## the row will be inserted with that column's value. ## ## Structure : ## [[inputs.postgresql_extensible.query]] @@ -268,12 +268,18 @@ func (p *Postgresql) accRow(measName string, row scanner, acc telegraf.Accumulat // extract the database name from the column map switch datname := (*c).(type) { case string: - dbname.WriteString(datname) + if _, err := dbname.WriteString(datname); err != nil { + return err + } default: - dbname.WriteString("postgres") + if _, err := dbname.WriteString("postgres"); err != nil { + return err + } } } else { - dbname.WriteString("postgres") + if _, err := dbname.WriteString("postgres"); err != nil { + return err + } } if tagAddress, err = p.SanitizedAddress(); err != nil { diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index b80965fbcb066..399c236bffcea 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -26,8 +26,8 @@ func queryRunner(t *testing.T, q query) *testutil.Accumulator { Query: q, } var acc testutil.Accumulator - p.Start(&acc) - p.Init() + require.NoError(t, p.Init()) + require.NoError(t, p.Start(&acc)) require.NoError(t, acc.GatherError(p.Gather)) return &acc } @@ -231,8 +231,8 @@ func TestPostgresqlSqlScript(t *testing.T) { Query: q, } var acc testutil.Accumulator - p.Start(&acc) - p.Init() + require.NoError(t, p.Init()) + require.NoError(t, p.Start(&acc)) require.NoError(t, acc.GatherError(p.Gather)) } diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go index 3c661990cee4c..5421c926a7745 100644 --- a/plugins/inputs/powerdns/powerdns.go +++ b/plugins/inputs/powerdns/powerdns.go @@ -56,14 +56,16 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error defer conn.Close() - conn.SetDeadline(time.Now().Add(defaultTimeout)) + if err := conn.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return err + } // Read and write buffer rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) // Send command if _, err := fmt.Fprint(conn, "show * \n"); err != nil { - return nil + return err } if err := rw.Flush(); err != nil { return err diff --git a/plugins/inputs/powerdns/powerdns_test.go b/plugins/inputs/powerdns/powerdns_test.go index 19be4a7dfb825..bf7d3845f7dc9 100644 --- a/plugins/inputs/powerdns/powerdns_test.go +++ b/plugins/inputs/powerdns/powerdns_test.go @@ -63,7 +63,11 @@ func (s statServer) serverSocket(l net.Listener) { data := buf[:n] if string(data) == "show * \n" { + // Ignore the returned error as we need to close the socket anyway + //nolint:errcheck,revive c.Write([]byte(metrics)) + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive c.Close() } }(conn) diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go index d040d8355329d..190297f9f58a1 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -97,14 +97,16 @@ func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator } defer conn.Close() - conn.SetDeadline(time.Now().Add(defaultTimeout)) + if err := conn.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return err + } // Read and write buffer rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) // Send command if _, err := fmt.Fprint(rw, "get-all\n"); err != nil { - return nil + return err } if err := rw.Flush(); err != nil { return err @@ -130,9 +132,7 @@ func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator acc.AddFields("powerdns_recursor", fields, tags) - conn.Close() - - return nil + return conn.Close() } func parseResponse(metrics string) map[string]interface{} { diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go index ad0d9ab941ded..e715fe4e2d165 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go @@ -103,19 +103,20 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { // We create a fake server to return test data controlSocket := "/tmp/pdns5724354148158589552.controlsocket" addr, err := net.ResolveUnixAddr("unixgram", controlSocket) - if err != nil { - t.Fatal("Cannot parse unix socket") - } + require.NoError(t, err, "Cannot parse unix socket") socket, err := net.ListenUnixgram("unixgram", addr) - if err != nil { - t.Fatal("Cannot initialize server on port") - } + require.NoError(t, err, "Cannot initialize server on port") var wg sync.WaitGroup wg.Add(1) go func() { defer func() { + // Ignore the returned error as we need to remove the socket file anyway + //nolint:errcheck,revive socket.Close() + // Ignore the returned error as we want to remove the file and ignore + // no-such-file errors + //nolint:errcheck,revive os.Remove(controlSocket) wg.Done() }() @@ -124,13 +125,19 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { buf := make([]byte, 1024) n, remote, err := socket.ReadFromUnix(buf) if err != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive socket.Close() return } data := buf[:n] if string(data) == "get-all\n" { + // Ignore the returned error as we need to close the socket anyway + //nolint:errcheck,revive socket.WriteToUnix([]byte(metrics), remote) + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive socket.Close() } @@ -143,13 +150,11 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { SocketDir: "/tmp", SocketMode: "0666", } - err = p.Init() - require.NoError(t, err) + require.NoError(t, p.Init()) var acc testutil.Accumulator - err = acc.GatherError(p.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(p.Gather)) wg.Wait() @@ -297,14 +302,10 @@ func TestPowerdnsRecursorParseMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !ok { - t.Errorf("Did not find key for metric %s in values", test.key) + if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) { continue } - if value != test.value { - t.Errorf("Metric: %s, Expected: %d, actual: %d", - test.key, test.value, value) - } + require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } @@ -422,14 +423,10 @@ func TestPowerdnsRecursorParseCorruptMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !ok { - t.Errorf("Did not find key for metric %s in values", test.key) + if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) { continue } - if value != test.value { - t.Errorf("Metric: %s, Expected: %d, actual: %d", - test.key, test.value, value) - } + require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } @@ -547,13 +544,9 @@ func TestPowerdnsRecursorParseIntOverflowMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !ok { - t.Errorf("Did not find key for metric %s in values", test.key) + if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) { continue } - if value != test.value { - t.Errorf("Metric: %s, Expected: %d, actual: %d", - test.key, test.value, value) - } + require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index d59e327027cff..e9289493b2c58 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -45,6 +45,7 @@ func TestMockExecCommand(_ *testing.T) { cmdline := strings.Join(cmd, " ") if cmdline == "systemctl show TestGather_systemdUnitPIDs" { + //nolint:errcheck,revive fmt.Printf(`PIDFile= GuessMainPID=yes MainPID=11408 @@ -54,6 +55,7 @@ ExecMainPID=11408 os.Exit(0) } + //nolint:errcheck,revive fmt.Printf("command not found\n") os.Exit(1) } diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index f3fe461450fd0..7a85d88e2c59b 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -197,7 +197,9 @@ func updateCadvisorPodList(p *Prometheus, req *http.Request) error { // Will have expected type errors for some parts of corev1.Pod struct for some unused fields // Instead have nil checks for every used field in case of incorrect decoding - json.NewDecoder(resp.Body).Decode(&cadvisorPodsResponse) + if err := json.NewDecoder(resp.Body).Decode(&cadvisorPodsResponse); err != nil { + return fmt.Errorf("decoding response failed: %v", err) + } pods := cadvisorPodsResponse.Items // Updating pod list to be latest cadvisor response diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index d62602dc169c1..9a4b5a4837643 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -93,9 +93,8 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { } func isProtobuf(header http.Header) bool { - mediatype, params, error := mime.ParseMediaType(header.Get("Content-Type")) - - if error != nil { + mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) + if err != nil { return false } diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index 3ba4b5f4a1a01..ea8ca0e9346ab 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -51,7 +51,8 @@ go_goroutines 15 1490802350000 func TestPrometheusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleTextFormat) + _, err := fmt.Fprintln(w, sampleTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -76,7 +77,8 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleTextFormat) + _, err := fmt.Fprintln(w, sampleTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -107,7 +109,8 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFailsIntegration(t *testing.T } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleTextFormat) + _, err := fmt.Fprintln(w, sampleTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -130,7 +133,8 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFailsIntegration(t *testing.T func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleSummaryTextFormat) + _, err := fmt.Fprintln(w, sampleSummaryTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -160,7 +164,8 @@ go_gc_duration_seconds_sum 42.0 go_gc_duration_seconds_count 42 ` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, data) + _, err := fmt.Fprintln(w, data) + require.NoError(t, err) })) defer ts.Close() @@ -216,7 +221,8 @@ go_gc_duration_seconds_count 42 func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleGaugeTextFormat) + _, err := fmt.Fprintln(w, sampleGaugeTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -259,11 +265,12 @@ func TestInitConfigErrors(t *testing.T) { // Both invalid IP addresses p.NodeIP = "10.240.0.0.0" - os.Setenv("NODE_IP", "10.000.0.0.0") + require.NoError(t, os.Setenv("NODE_IP", "10.000.0.0.0")) err := p.Init() - expectedMessage := "the node_ip config and the environment variable NODE_IP are not set or invalid. Cannot get pod list for monitor_kubernetes_pods using node scrape scope" - require.Error(t, err, expectedMessage) - os.Setenv("NODE_IP", "10.000.0.0") + require.Error(t, err) + expectedMessage := "the node_ip config and the environment variable NODE_IP are not set or invalid; cannot get pod list for monitor_kubernetes_pods using node scrape scope" + require.Equal(t, expectedMessage, err.Error()) + require.NoError(t, os.Setenv("NODE_IP", "10.000.0.0")) p.KubernetesLabelSelector = "label0==label0, label0 in (=)" err = p.Init() diff --git a/plugins/inputs/puppetagent/puppetagent_test.go b/plugins/inputs/puppetagent/puppetagent_test.go index b1c447887f23c..6ba769ac5dd37 100644 --- a/plugins/inputs/puppetagent/puppetagent_test.go +++ b/plugins/inputs/puppetagent/puppetagent_test.go @@ -1,8 +1,10 @@ package puppetagent import ( - "github.com/influxdata/telegraf/testutil" "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestGather(t *testing.T) { @@ -11,7 +13,7 @@ func TestGather(t *testing.T) { pa := PuppetAgent{ Location: "last_run_summary.yaml", } - pa.Gather(&acc) + require.NoError(t, pa.Gather(&acc)) tags := map[string]string{"location": "last_run_summary.yaml"} fields := map[string]interface{}{ diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 29e2864399c08..fa92fc744f97f 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -396,9 +396,7 @@ func (r *RabbitMQ) requestJSON(u string, target interface{}) error { defer resp.Body.Close() - json.NewDecoder(resp.Body).Decode(target) - - return nil + return json.NewDecoder(resp.Body).Decode(target) } func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator) { diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 869e8036d157d..b65585b8f0a57 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -1,7 +1,6 @@ package rabbitmq import ( - "fmt" "net/http" "net/http/httptest" "testing" @@ -31,16 +30,14 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { case "/api/nodes/rabbit@vagrant-ubuntu-trusty-64/memory": jsonFilePath = "testdata/memory.json" default: - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } data, err := ioutil.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) - if err != nil { - panic(fmt.Sprintf("could not read from data file %s", jsonFilePath)) - } - - w.Write(data) + _, err = w.Write(data) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/raindrops/raindrops_test.go b/plugins/inputs/raindrops/raindrops_test.go index f8b766101b189..591dd624a10ea 100644 --- a/plugins/inputs/raindrops/raindrops_test.go +++ b/plugins/inputs/raindrops/raindrops_test.go @@ -49,13 +49,11 @@ func TestRaindropsGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - if r.URL.Path == "/_raindrops" { - rsp = sampleResponse - } else { - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/_raindrops", "Cannot handle request") + rsp = sampleResponse - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/ravendb/ravendb_test.go b/plugins/inputs/ravendb/ravendb_test.go index 754ece88fd01d..42eaea3fb3e3b 100644 --- a/plugins/inputs/ravendb/ravendb_test.go +++ b/plugins/inputs/ravendb/ravendb_test.go @@ -1,7 +1,6 @@ package ravendb import ( - "fmt" "io/ioutil" "net/http" "net/http/httptest" @@ -28,16 +27,14 @@ func TestRavenDBGeneratesMetricsFull(t *testing.T) { jsonFilePath = "testdata/collections_full.json" default: - panic(fmt.Sprintf("Cannot handle request for uri %s", r.URL.Path)) + require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) } data, err := ioutil.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) - if err != nil { - panic(fmt.Sprintf("could not read from data file %s", jsonFilePath)) - } - - w.Write(data) + _, err = w.Write(data) + require.NoError(t, err) })) defer ts.Close() @@ -47,7 +44,7 @@ func TestRavenDBGeneratesMetricsFull(t *testing.T) { Log: testutil.Logger{}, } - r.Init() + require.NoError(t, r.Init()) acc := &testutil.Accumulator{} @@ -225,16 +222,14 @@ func TestRavenDBGeneratesMetricsMin(t *testing.T) { case "/admin/monitoring/v1/collections": jsonFilePath = "testdata/collections_min.json" default: - panic(fmt.Sprintf("Cannot handle request for uri %s", r.URL.Path)) + require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) } data, err := ioutil.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) - if err != nil { - panic(fmt.Sprintf("could not read from data file %s", jsonFilePath)) - } - - w.Write(data) + _, err = w.Write(data) + require.NoError(t, err) })) defer ts.Close() @@ -244,7 +239,7 @@ func TestRavenDBGeneratesMetricsMin(t *testing.T) { Log: testutil.Logger{}, } - r.Init() + require.NoError(t, r.Init()) acc := &testutil.Accumulator{} diff --git a/plugins/inputs/redfish/redfish_test.go b/plugins/inputs/redfish/redfish_test.go index 568db00092e2e..4cbbb045302c1 100644 --- a/plugins/inputs/redfish/redfish_test.go +++ b/plugins/inputs/redfish/redfish_test.go @@ -489,7 +489,7 @@ func TestDellApis(t *testing.T) { Password: "test", ComputerSystemID: "System.Embedded.1", } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator err = plugin.Gather(&acc) @@ -649,7 +649,7 @@ func TestHPApis(t *testing.T) { Password: "test", ComputerSystemID: "1", } - hpPlugin.Init() + require.NoError(t, hpPlugin.Init()) var hpAcc testutil.Accumulator err = hpPlugin.Gather(&hpAcc) @@ -691,7 +691,7 @@ func TestInvalidUsernameorPassword(t *testing.T) { } var acc testutil.Accumulator - r.Init() + require.NoError(t, r.Init()) u, err := url.Parse(ts.URL) require.NoError(t, err) err = r.Gather(&acc) @@ -789,7 +789,7 @@ func TestInvalidDellJSON(t *testing.T) { ComputerSystemID: "System.Embedded.1", } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator err := plugin.Gather(&acc) @@ -858,7 +858,7 @@ func TestInvalidHPJSON(t *testing.T) { ComputerSystemID: "System.Embedded.2", } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator err := plugin.Gather(&acc) diff --git a/plugins/inputs/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go index 35994cea65f40..a0108acf64df5 100644 --- a/plugins/inputs/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -46,8 +46,7 @@ var localhost = &Server{URL: &url.URL{Host: "127.0.0.1:28015"}} // Returns one of the errors encountered while gather stats (if any). func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { if len(r.Servers) == 0 { - r.gatherServer(localhost, acc) - return nil + return r.gatherServer(localhost, acc) } var wg sync.WaitGroup diff --git a/plugins/inputs/rethinkdb/rethinkdb_server.go b/plugins/inputs/rethinkdb/rethinkdb_server.go index ca12a224356d1..ffb63e64106e2 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server.go @@ -30,7 +30,6 @@ func (s *Server) gatherData(acc telegraf.Accumulator) error { } if err := s.addClusterStats(acc); err != nil { - fmt.Printf("error adding cluster stats, %s\n", err.Error()) return fmt.Errorf("error adding cluster stats, %s", err.Error()) } diff --git a/plugins/inputs/riak/riak_test.go b/plugins/inputs/riak/riak_test.go index 09f9a961f4d76..90688b17827b0 100644 --- a/plugins/inputs/riak/riak_test.go +++ b/plugins/inputs/riak/riak_test.go @@ -15,7 +15,8 @@ func TestRiak(t *testing.T) { // Create a test server with the const response JSON ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, response) + _, err := fmt.Fprintln(w, response) + require.NoError(t, err) })) defer ts.Close() @@ -31,8 +32,7 @@ func TestRiak(t *testing.T) { acc := &testutil.Accumulator{} // Gather data from the test server - err = riak.Gather(acc) - require.NoError(t, err) + require.NoError(t, riak.Gather(acc)) // Expect the correct values for all known keys expectFields := map[string]interface{}{ diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index de872e948722c..5c075017a8430 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -77,7 +77,9 @@ func (rsl *riemannListener) listen(ctx context.Context) { if rsl.ReadBufferSize.Size > 0 { if srb, ok := c.(setReadBufferer); ok { - srb.SetReadBuffer(int(rsl.ReadBufferSize.Size)) + if err := srb.SetReadBuffer(int(rsl.ReadBufferSize.Size)); err != nil { + rsl.Log.Warnf("Setting read buffer failed: %v", err) + } } else { rsl.Log.Warnf("Unable to set read buffer on a %s socket", rsl.sockType) } @@ -86,7 +88,9 @@ func (rsl *riemannListener) listen(ctx context.Context) { rsl.connectionsMtx.Lock() if rsl.MaxConnections > 0 && len(rsl.connections) >= rsl.MaxConnections { rsl.connectionsMtx.Unlock() - c.Close() + if err := c.Close(); err != nil { + rsl.Log.Warnf("Closing the connection failed: %v", err) + } continue } rsl.connections[c.RemoteAddr().String()] = c @@ -110,7 +114,9 @@ func (rsl *riemannListener) listen(ctx context.Context) { func (rsl *riemannListener) closeAllConnections() { rsl.connectionsMtx.Lock() for _, c := range rsl.connections { - c.Close() + if err := c.Close(); err != nil { + rsl.Log.Warnf("Closing the connection failed: %v", err.Error()) + } } rsl.connectionsMtx.Unlock() } @@ -170,7 +176,9 @@ func (rsl *riemannListener) read(conn net.Conn) { for { if rsl.ReadTimeout != nil && rsl.ReadTimeout.Duration > 0 { - conn.SetDeadline(time.Now().Add(rsl.ReadTimeout.Duration)) + if err := conn.SetDeadline(time.Now().Add(rsl.ReadTimeout.Duration)); err != nil { + rsl.Log.Warnf("Setting deadline failed: %v", err) + } } messagePb := &riemangoProto.Msg{} @@ -278,7 +286,7 @@ func (rsl *RiemannSocketListener) Description() string { func (rsl *RiemannSocketListener) SampleConfig() string { return ` - ## URL to listen on. + ## URL to listen on. ## Default is "tcp://:5555" # service_address = "tcp://:8094" # service_address = "tcp://127.0.0.1:http" diff --git a/plugins/inputs/sensors/sensors_test.go b/plugins/inputs/sensors/sensors_test.go index e5105caa3f787..6bf1b616cb985 100644 --- a/plugins/inputs/sensors/sensors_test.go +++ b/plugins/inputs/sensors/sensors_test.go @@ -8,6 +8,8 @@ import ( "os/exec" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" ) @@ -22,10 +24,7 @@ func TestGatherDefault(t *testing.T) { defer func() { execCommand = exec.Command }() var acc testutil.Accumulator - err := s.Gather(&acc) - if err != nil { - t.Fatal(err) - } + require.NoError(t, s.Gather(&acc)) var tests = []struct { tags map[string]string @@ -163,10 +162,7 @@ func TestGatherNotRemoveNumbers(t *testing.T) { defer func() { execCommand = exec.Command }() var acc testutil.Accumulator - err := s.Gather(&acc) - if err != nil { - t.Fatal(err) - } + require.NoError(t, s.Gather(&acc)) var tests = []struct { tags map[string]string @@ -373,8 +369,10 @@ Vcore Voltage: cmd, args := args[3], args[4:] if cmd == "sensors" { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, mockData) } else { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") os.Exit(1) } diff --git a/plugins/inputs/sflow/packetdecoder_test.go b/plugins/inputs/sflow/packetdecoder_test.go index f078eaf310e8b..bb318a86a1932 100644 --- a/plugins/inputs/sflow/packetdecoder_test.go +++ b/plugins/inputs/sflow/packetdecoder_test.go @@ -40,7 +40,8 @@ func BenchmarkUDPHeader(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - dc.decodeUDPHeader(octets) + _, err := dc.decodeUDPHeader(octets) + require.NoError(b, err) } } diff --git a/plugins/inputs/sflow/sflow.go b/plugins/inputs/sflow/sflow.go index 2876cebe3dc0f..45578d5396cc3 100644 --- a/plugins/inputs/sflow/sflow.go +++ b/plugins/inputs/sflow/sflow.go @@ -84,7 +84,9 @@ func (s *SFlow) Start(acc telegraf.Accumulator) error { s.addr = conn.LocalAddr() if s.ReadBufferSize.Size > 0 { - conn.SetReadBuffer(int(s.ReadBufferSize.Size)) + if err := conn.SetReadBuffer(int(s.ReadBufferSize.Size)); err != nil { + return err + } } s.Log.Infof("Listening on %s://%s", s.addr.Network(), s.addr.String()) @@ -105,6 +107,8 @@ func (s *SFlow) Gather(_ telegraf.Accumulator) error { func (s *SFlow) Stop() { if s.closer != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.closer.Close() } s.wg.Wait() diff --git a/plugins/inputs/sflow/sflow_test.go b/plugins/inputs/sflow/sflow_test.go index 2df56c2ae97cd..6129c2d95c079 100644 --- a/plugins/inputs/sflow/sflow_test.go +++ b/plugins/inputs/sflow/sflow_test.go @@ -29,7 +29,8 @@ func TestSFlow(t *testing.T) { packetBytes, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000") require.NoError(t, err) - client.Write(packetBytes) + _, err = client.Write(packetBytes) + require.NoError(t, err) acc.Wait(2) @@ -129,7 +130,8 @@ func BenchmarkSFlow(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - client.Write(packetBytes) + _, err := client.Write(packetBytes) + require.NoError(b, err) acc.Wait(2) } } diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/plugins/inputs/snmp/snmp_mocks_test.go index 80d6e2cbf74ce..1927db23246b4 100644 --- a/plugins/inputs/snmp/snmp_mocks_test.go +++ b/plugins/inputs/snmp/snmp_mocks_test.go @@ -44,10 +44,13 @@ func TestMockExecCommand(_ *testing.T) { mcr, ok := mockedCommandResults[cmd0] if !ok { cv := fmt.Sprintf("%#v", cmd)[8:] // trim `[]string` prefix + //nolint:errcheck,revive fmt.Fprintf(os.Stderr, "Unmocked command. Please add the following to `mockedCommands` in snmp_mocks_generate.go, and then run `go generate`:\n\t%s,\n", cv) os.Exit(1) } + //nolint:errcheck,revive fmt.Printf("%s", mcr.stdout) + //nolint:errcheck,revive fmt.Fprintf(os.Stderr, "%s", mcr.stderr) if mcr.exitError { os.Exit(1) diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index e14305d087144..b589a60f72969 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -488,8 +488,8 @@ func TestGosnmpWrapper_walk_retry(t *testing.T) { t.Skip("Skipping test due to random failures.") } srvr, err := net.ListenUDP("udp4", &net.UDPAddr{}) - defer srvr.Close() require.NoError(t, err) + defer srvr.Close() reqCount := 0 // Set up a WaitGroup to wait for the server goroutine to exit and protect // reqCount. @@ -507,7 +507,10 @@ func TestGosnmpWrapper_walk_retry(t *testing.T) { } reqCount++ - srvr.WriteTo([]byte{'X'}, addr) // will cause decoding error + // will cause decoding error + if _, err := srvr.WriteTo([]byte{'X'}, addr); err != nil { + return + } } }() @@ -527,7 +530,7 @@ func TestGosnmpWrapper_walk_retry(t *testing.T) { GoSNMP: gs, } err = gsw.Walk(".1.0.0", func(_ gosnmp.SnmpPDU) error { return nil }) - srvr.Close() + assert.NoError(t, srvr.Close()) wg.Wait() assert.Error(t, err) assert.False(t, gs.Conn == conn) @@ -538,8 +541,8 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { // TODO: Fix this test t.Skip("Test failing too often, skip for now and revisit later.") srvr, err := net.ListenUDP("udp4", &net.UDPAddr{}) - defer srvr.Close() require.NoError(t, err) + defer srvr.Close() reqCount := 0 // Set up a WaitGroup to wait for the server goroutine to exit and protect // reqCount. @@ -557,7 +560,10 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { } reqCount++ - srvr.WriteTo([]byte{'X'}, addr) // will cause decoding error + // will cause decoding error + if _, err := srvr.WriteTo([]byte{'X'}, addr); err != nil { + return + } } }() @@ -577,7 +583,7 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { GoSNMP: gs, } _, err = gsw.Get([]string{".1.0.0"}) - srvr.Close() + require.NoError(t, srvr.Close()) wg.Wait() assert.Error(t, err) assert.False(t, gs.Conn == conn) @@ -760,7 +766,7 @@ func TestGather(t *testing.T) { acc := &testutil.Accumulator{} tstart := time.Now() - s.Gather(acc) + require.NoError(t, s.Gather(acc)) tstop := time.Now() require.Len(t, acc.Metrics, 2) @@ -807,7 +813,7 @@ func TestGather_host(t *testing.T) { acc := &testutil.Accumulator{} - s.Gather(acc) + require.NoError(t, s.Gather(acc)) require.Len(t, acc.Metrics, 1) m := acc.Metrics[0] diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index 15c6f18e1e1dc..4ffa01a3440a8 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -49,7 +49,10 @@ func (ssl *streamSocketListener) listen() { if ssl.ReadBufferSize.Size > 0 { if srb, ok := c.(setReadBufferer); ok { - srb.SetReadBuffer(int(ssl.ReadBufferSize.Size)) + if err := srb.SetReadBuffer(int(ssl.ReadBufferSize.Size)); err != nil { + ssl.Log.Error(err.Error()) + break + } } else { ssl.Log.Warnf("Unable to set read buffer on a %s socket", ssl.sockType) } @@ -58,6 +61,8 @@ func (ssl *streamSocketListener) listen() { ssl.connectionsMtx.Lock() if ssl.MaxConnections > 0 && len(ssl.connections) >= ssl.MaxConnections { ssl.connectionsMtx.Unlock() + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive c.Close() continue } @@ -77,6 +82,8 @@ func (ssl *streamSocketListener) listen() { ssl.connectionsMtx.Lock() for _, c := range ssl.connections { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive c.Close() } ssl.connectionsMtx.Unlock() @@ -120,7 +127,10 @@ func (ssl *streamSocketListener) read(c net.Conn) { scnr := bufio.NewScanner(decoder) for { if ssl.ReadTimeout != nil && ssl.ReadTimeout.Duration > 0 { - c.SetReadDeadline(time.Now().Add(ssl.ReadTimeout.Duration)) + if err := c.SetReadDeadline(time.Now().Add(ssl.ReadTimeout.Duration)); err != nil { + ssl.Log.Error("setting read deadline failed: %v", err) + return + } } if !scnr.Scan() { break @@ -289,6 +299,7 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { // no good way of testing for "file does not exist". // Instead just ignore error and blow up when we try to listen, which will // indicate "address already in use" if file existed and we couldn't remove. + //nolint:errcheck,revive os.Remove(addr) } @@ -319,7 +330,9 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return err } - os.Chmod(spl[1], os.FileMode(uint32(i))) + if err := os.Chmod(spl[1], os.FileMode(uint32(i))); err != nil { + return err + } } ssl := &streamSocketListener{ @@ -354,12 +367,16 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return err } - os.Chmod(spl[1], os.FileMode(uint32(i))) + if err := os.Chmod(spl[1], os.FileMode(uint32(i))); err != nil { + return err + } } if sl.ReadBufferSize.Size > 0 { if srb, ok := pc.(setReadBufferer); ok { - srb.SetReadBuffer(int(sl.ReadBufferSize.Size)) + if err := srb.SetReadBuffer(int(sl.ReadBufferSize.Size)); err != nil { + sl.Log.Warnf("Setting read buffer on a %s socket failed: %v", protocol, err) + } } else { sl.Log.Warnf("Unable to set read buffer on a %s socket", protocol) } @@ -418,6 +435,8 @@ func udpListen(network string, address string) (net.PacketConn, error) { func (sl *SocketListener) Stop() { if sl.Closer != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive sl.Close() sl.Closer = nil } @@ -439,7 +458,9 @@ type unixCloser struct { func (uc unixCloser) Close() error { err := uc.closer.Close() - os.Remove(uc.path) // ignore error + // Ignore the error if e.g. the file does not exist + //nolint:errcheck,revive + os.Remove(uc.path) return err } diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index 03d0c045307c9..2a24850eaf889 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -140,7 +140,7 @@ func TestSocketListener_unix(t *testing.T) { defer testEmptyLog(t)() f, _ := os.Create(sock) - f.Close() + require.NoError(t, f.Close()) sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "unix://" + sock @@ -169,7 +169,8 @@ func TestSocketListener_unixgram(t *testing.T) { defer testEmptyLog(t)() - os.Create(sock) + _, err = os.Create(sock) + require.NoError(t, err) sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "unixgram://" + sock @@ -242,9 +243,10 @@ func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) { require.NoError(t, err) } - client.Write(mstr12) - client.Write(mstr3) - + _, err := client.Write(mstr12) + require.NoError(t, err) + _, err = client.Write(mstr3) + require.NoError(t, err) acc := sl.Accumulator.(*testutil.Accumulator) acc.Wait(3) diff --git a/plugins/inputs/solr/solr_test.go b/plugins/inputs/solr/solr_test.go index f4451ccc4c2f3..42a6753c9b999 100644 --- a/plugins/inputs/solr/solr_test.go +++ b/plugins/inputs/solr/solr_test.go @@ -110,15 +110,23 @@ func createMockServer() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/solr/admin/cores") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, statusResponse) } else if strings.Contains(r.URL.Path, "solr/main/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansMainResponse) } else if strings.Contains(r.URL.Path, "solr/core1/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansCore1Response) } else { w.WriteHeader(http.StatusNotFound) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, "nope") } })) @@ -129,15 +137,23 @@ func createMockNoCoreDataServer() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/solr/admin/cores") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, statusResponse) } else if strings.Contains(r.URL.Path, "solr/main/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, nodata) } else if strings.Contains(r.URL.Path, "solr/core1/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, nodata) } else { w.WriteHeader(http.StatusNotFound) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, "nope") } })) @@ -147,15 +163,23 @@ func createMockSolr3Server() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/solr/admin/cores") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, statusResponse) } else if strings.Contains(r.URL.Path, "solr/main/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansSolr3MainResponse) } else if strings.Contains(r.URL.Path, "solr/core1/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansSolr3MainResponse) } else { w.WriteHeader(http.StatusNotFound) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, "nope") } })) @@ -165,12 +189,18 @@ func createMockSolr7Server() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/solr/admin/cores") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, statusResponse) } else if strings.Contains(r.URL.Path, "solr/main/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansSolr7Response) } else { w.WriteHeader(http.StatusNotFound) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, "nope") } })) diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index d8ab33b71bf4d..580bfe5ee9e9d 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -6,10 +6,9 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestSqlServer_QueriesInclusionExclusion(t *testing.T) { @@ -34,10 +33,10 @@ func TestSqlServer_QueriesInclusionExclusion(t *testing.T) { IncludeQuery: test["IncludeQuery"].([]string), ExcludeQuery: test["ExcludeQuery"].([]string), } - initQueries(&s) - assert.Equal(t, len(s.queries), test["queriesTotal"].(int)) + require.NoError(t, initQueries(&s)) + require.Equal(t, len(s.queries), test["queriesTotal"].(int)) for _, query := range test["queries"].([]string) { - assert.Contains(t, s.queries, query) + require.Contains(t, s.queries, query) } } } @@ -133,15 +132,12 @@ func TestSqlServer_MultipleInstanceIntegration(t *testing.T) { require.NoError(t, err) // acc includes size metrics, and excludes memory metrics - assert.False(t, acc.HasMeasurement("Memory breakdown (%)")) - assert.True(t, acc.HasMeasurement("Log size (bytes)")) + require.False(t, acc.HasMeasurement("Memory breakdown (%)")) + require.True(t, acc.HasMeasurement("Log size (bytes)")) // acc2 includes memory metrics, and excludes size metrics - assert.True(t, acc2.HasMeasurement("Memory breakdown (%)")) - assert.False(t, acc2.HasMeasurement("Log size (bytes)")) - - s.Stop() - s2.Stop() + require.True(t, acc2.HasMeasurement("Memory breakdown (%)")) + require.False(t, acc2.HasMeasurement("Log size (bytes)")) } func TestSqlServer_MultipleInstanceWithHealthMetricIntegration(t *testing.T) { @@ -172,22 +168,19 @@ func TestSqlServer_MultipleInstanceWithHealthMetricIntegration(t *testing.T) { require.NoError(t, err) // acc includes size metrics, and excludes memory metrics and the health metric - assert.False(t, acc.HasMeasurement(healthMetricName)) - assert.False(t, acc.HasMeasurement("Memory breakdown (%)")) - assert.True(t, acc.HasMeasurement("Log size (bytes)")) + require.False(t, acc.HasMeasurement(healthMetricName)) + require.False(t, acc.HasMeasurement("Memory breakdown (%)")) + require.True(t, acc.HasMeasurement("Log size (bytes)")) // acc2 includes memory metrics and the health metric, and excludes size metrics - assert.True(t, acc2.HasMeasurement(healthMetricName)) - assert.True(t, acc2.HasMeasurement("Memory breakdown (%)")) - assert.False(t, acc2.HasMeasurement("Log size (bytes)")) + require.True(t, acc2.HasMeasurement(healthMetricName)) + require.True(t, acc2.HasMeasurement("Memory breakdown (%)")) + require.False(t, acc2.HasMeasurement("Log size (bytes)")) sqlInstance, database := getConnectionIdentifiers(testServer) tags := map[string]string{healthMetricInstanceTag: sqlInstance, healthMetricDatabaseTag: database} - assert.True(t, acc2.HasPoint(healthMetricName, tags, healthMetricAttemptedQueries, 9)) - assert.True(t, acc2.HasPoint(healthMetricName, tags, healthMetricSuccessfulQueries, 9)) - - s.Stop() - s2.Stop() + require.True(t, acc2.HasPoint(healthMetricName, tags, healthMetricAttemptedQueries, 9)) + require.True(t, acc2.HasPoint(healthMetricName, tags, healthMetricSuccessfulQueries, 9)) } func TestSqlServer_HealthMetric(t *testing.T) { @@ -208,29 +201,25 @@ func TestSqlServer_HealthMetric(t *testing.T) { // acc1 should have the health metric because it is specified in the config var acc1 testutil.Accumulator require.NoError(t, s1.Start(&acc1)) - s1.Gather(&acc1) - assert.True(t, acc1.HasMeasurement(healthMetricName)) + require.NoError(t, s1.Gather(&acc1)) + require.True(t, acc1.HasMeasurement(healthMetricName)) // There will be 2 attempted queries (because we specified 2 queries in IncludeQuery) // Both queries should fail because the specified SQL instances do not exist sqlInstance1, database1 := getConnectionIdentifiers(fakeServer1) tags1 := map[string]string{healthMetricInstanceTag: sqlInstance1, healthMetricDatabaseTag: database1} - assert.True(t, acc1.HasPoint(healthMetricName, tags1, healthMetricAttemptedQueries, 2)) - assert.True(t, acc1.HasPoint(healthMetricName, tags1, healthMetricSuccessfulQueries, 0)) + require.True(t, acc1.HasPoint(healthMetricName, tags1, healthMetricAttemptedQueries, 2)) + require.True(t, acc1.HasPoint(healthMetricName, tags1, healthMetricSuccessfulQueries, 0)) sqlInstance2, database2 := getConnectionIdentifiers(fakeServer2) tags2 := map[string]string{healthMetricInstanceTag: sqlInstance2, healthMetricDatabaseTag: database2} - assert.True(t, acc1.HasPoint(healthMetricName, tags2, healthMetricAttemptedQueries, 2)) - assert.True(t, acc1.HasPoint(healthMetricName, tags2, healthMetricSuccessfulQueries, 0)) + require.True(t, acc1.HasPoint(healthMetricName, tags2, healthMetricAttemptedQueries, 2)) + require.True(t, acc1.HasPoint(healthMetricName, tags2, healthMetricSuccessfulQueries, 0)) // acc2 should not have the health metric because it is not specified in the config var acc2 testutil.Accumulator - require.NoError(t, s2.Start(&acc2)) - s2.Gather(&acc2) - assert.False(t, acc2.HasMeasurement(healthMetricName)) - - s1.Stop() - s2.Stop() + require.NoError(t, s2.Gather(&acc2)) + require.False(t, acc2.HasMeasurement(healthMetricName)) } func TestSqlServer_MultipleInit(t *testing.T) { @@ -239,16 +228,13 @@ func TestSqlServer_MultipleInit(t *testing.T) { ExcludeQuery: []string{"DatabaseSize"}, } - initQueries(s) + require.NoError(t, initQueries(s)) _, ok := s.queries["DatabaseSize"] - // acc includes size metrics - assert.True(t, ok) + require.True(t, ok) - initQueries(s2) + require.NoError(t, initQueries(s2)) _, ok = s2.queries["DatabaseSize"] - // acc2 excludes size metrics - assert.False(t, ok) - + require.False(t, ok) s.Stop() s2.Stop() } @@ -257,80 +243,80 @@ func TestSqlServer_ConnectionString(t *testing.T) { // URL format connectionString := "sqlserver://username:password@hostname.database.windows.net?database=databasename&connection+timeout=30" sqlInstance, database := getConnectionIdentifiers(connectionString) - assert.Equal(t, "hostname.database.windows.net", sqlInstance) - assert.Equal(t, "databasename", database) + require.Equal(t, "hostname.database.windows.net", sqlInstance) + require.Equal(t, "databasename", database) connectionString = " sqlserver://hostname2.somethingelse.net:1433?database=databasename2" sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, "hostname2.somethingelse.net", sqlInstance) - assert.Equal(t, "databasename2", database) + require.Equal(t, "hostname2.somethingelse.net", sqlInstance) + require.Equal(t, "databasename2", database) connectionString = "sqlserver://hostname3:1433/SqlInstanceName3?database=databasename3" sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, "hostname3\\SqlInstanceName3", sqlInstance) - assert.Equal(t, "databasename3", database) + require.Equal(t, "hostname3\\SqlInstanceName3", sqlInstance) + require.Equal(t, "databasename3", database) connectionString = " sqlserver://hostname4/SqlInstanceName4?database=databasename4&connection%20timeout=30" sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, "hostname4\\SqlInstanceName4", sqlInstance) - assert.Equal(t, "databasename4", database) + require.Equal(t, "hostname4\\SqlInstanceName4", sqlInstance) + require.Equal(t, "databasename4", database) connectionString = " sqlserver://username:password@hostname5?connection%20timeout=30" sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, "hostname5", sqlInstance) - assert.Equal(t, emptyDatabaseName, database) + require.Equal(t, "hostname5", sqlInstance) + require.Equal(t, emptyDatabaseName, database) // odbc format connectionString = "odbc:server=hostname.database.windows.net;user id=sa;database=master;Trusted_Connection=Yes;Integrated Security=true;" sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, "hostname.database.windows.net", sqlInstance) - assert.Equal(t, "master", database) + require.Equal(t, "hostname.database.windows.net", sqlInstance) + require.Equal(t, "master", database) connectionString = " odbc:server=192.168.0.1;user id=somethingelse;Integrated Security=true;Database=mydb " sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, "192.168.0.1", sqlInstance) - assert.Equal(t, "mydb", database) + require.Equal(t, "192.168.0.1", sqlInstance) + require.Equal(t, "mydb", database) connectionString = " odbc:Server=servername\\instancename;Database=dbname;" sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, "servername\\instancename", sqlInstance) - assert.Equal(t, "dbname", database) + require.Equal(t, "servername\\instancename", sqlInstance) + require.Equal(t, "dbname", database) connectionString = "server=hostname2.database.windows.net;user id=sa;Trusted_Connection=Yes;Integrated Security=true;" sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, "hostname2.database.windows.net", sqlInstance) - assert.Equal(t, emptyDatabaseName, database) + require.Equal(t, "hostname2.database.windows.net", sqlInstance) + require.Equal(t, emptyDatabaseName, database) connectionString = "invalid connection string" sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, emptySQLInstance, sqlInstance) - assert.Equal(t, emptyDatabaseName, database) + require.Equal(t, emptySQLInstance, sqlInstance) + require.Equal(t, emptyDatabaseName, database) // Key/value format connectionString = " server=hostname.database.windows.net;user id=sa;database=master;Trusted_Connection=Yes;Integrated Security=true" sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, "hostname.database.windows.net", sqlInstance) - assert.Equal(t, "master", database) + require.Equal(t, "hostname.database.windows.net", sqlInstance) + require.Equal(t, "master", database) connectionString = " server=192.168.0.1;user id=somethingelse;Integrated Security=true;Database=mydb;" sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, "192.168.0.1", sqlInstance) - assert.Equal(t, "mydb", database) + require.Equal(t, "192.168.0.1", sqlInstance) + require.Equal(t, "mydb", database) connectionString = "Server=servername\\instancename;Database=dbname; " sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, "servername\\instancename", sqlInstance) - assert.Equal(t, "dbname", database) + require.Equal(t, "servername\\instancename", sqlInstance) + require.Equal(t, "dbname", database) connectionString = "server=hostname2.database.windows.net;user id=sa;Trusted_Connection=Yes;Integrated Security=true " sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, "hostname2.database.windows.net", sqlInstance) - assert.Equal(t, emptyDatabaseName, database) + require.Equal(t, "hostname2.database.windows.net", sqlInstance) + require.Equal(t, emptyDatabaseName, database) connectionString = "invalid connection string" sqlInstance, database = getConnectionIdentifiers(connectionString) - assert.Equal(t, emptySQLInstance, sqlInstance) - assert.Equal(t, emptyDatabaseName, database) + require.Equal(t, emptySQLInstance, sqlInstance) + require.Equal(t, emptyDatabaseName, database) } func TestSqlServer_AGQueriesApplicableForDatabaseTypeSQLServer(t *testing.T) { @@ -364,13 +350,12 @@ func TestSqlServer_AGQueriesApplicableForDatabaseTypeSQLServer(t *testing.T) { require.NoError(t, err) // acc includes size metrics, and excludes memory metrics - assert.True(t, acc.HasMeasurement("sqlserver_hadr_replica_states")) - assert.True(t, acc.HasMeasurement("sqlserver_hadr_dbreplica_states")) + require.True(t, acc.HasMeasurement("sqlserver_hadr_replica_states")) + require.True(t, acc.HasMeasurement("sqlserver_hadr_dbreplica_states")) // acc2 includes memory metrics, and excludes size metrics - assert.False(t, acc2.HasMeasurement("sqlserver_hadr_replica_states")) - assert.False(t, acc2.HasMeasurement("sqlserver_hadr_dbreplica_states")) - + require.False(t, acc2.HasMeasurement("sqlserver_hadr_replica_states")) + require.False(t, acc2.HasMeasurement("sqlserver_hadr_dbreplica_states")) s.Stop() s2.Stop() } @@ -406,21 +391,20 @@ func TestSqlServer_AGQueryFieldsOutputBasedOnSQLServerVersion(t *testing.T) { require.NoError(t, err) // acc2019 includes new HADR query fields - assert.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "basic_features")) - assert.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "is_distributed")) - assert.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "seeding_mode")) - assert.True(t, acc2019.HasTag("sqlserver_hadr_replica_states", "seeding_mode_desc")) - assert.True(t, acc2019.HasField("sqlserver_hadr_dbreplica_states", "is_primary_replica")) - assert.True(t, acc2019.HasField("sqlserver_hadr_dbreplica_states", "secondary_lag_seconds")) + require.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "basic_features")) + require.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "is_distributed")) + require.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "seeding_mode")) + require.True(t, acc2019.HasTag("sqlserver_hadr_replica_states", "seeding_mode_desc")) + require.True(t, acc2019.HasField("sqlserver_hadr_dbreplica_states", "is_primary_replica")) + require.True(t, acc2019.HasField("sqlserver_hadr_dbreplica_states", "secondary_lag_seconds")) // acc2012 does not include new HADR query fields - assert.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "basic_features")) - assert.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "is_distributed")) - assert.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "seeding_mode")) - assert.False(t, acc2012.HasTag("sqlserver_hadr_replica_states", "seeding_mode_desc")) - assert.False(t, acc2012.HasField("sqlserver_hadr_dbreplica_states", "is_primary_replica")) - assert.False(t, acc2012.HasField("sqlserver_hadr_dbreplica_states", "secondary_lag_seconds")) - + require.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "basic_features")) + require.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "is_distributed")) + require.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "seeding_mode")) + require.False(t, acc2012.HasTag("sqlserver_hadr_replica_states", "seeding_mode_desc")) + require.False(t, acc2012.HasField("sqlserver_hadr_dbreplica_states", "is_primary_replica")) + require.False(t, acc2012.HasField("sqlserver_hadr_dbreplica_states", "secondary_lag_seconds")) s2019.Stop() s2012.Stop() } diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index 5e652148d3a27..bcb3052756a43 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -613,7 +613,9 @@ func (s *Stackdriver) gatherTimeSeries( if tsDesc.ValueType == metricpb.MetricDescriptor_DISTRIBUTION { dist := p.Value.GetDistributionValue() - s.addDistribution(dist, tags, ts, grouper, tsConf) + if err := s.addDistribution(dist, tags, ts, grouper, tsConf); err != nil { + return err + } } else { var value interface{} @@ -630,7 +632,9 @@ func (s *Stackdriver) gatherTimeSeries( value = p.Value.GetStringValue() } - grouper.Add(tsConf.measurement, tags, ts, tsConf.fieldKey, value) + if err := grouper.Add(tsConf.measurement, tags, ts, tsConf.fieldKey, value); err != nil { + return err + } } } } @@ -642,17 +646,27 @@ func (s *Stackdriver) gatherTimeSeries( func (s *Stackdriver) addDistribution( metric *distributionpb.Distribution, tags map[string]string, ts time.Time, grouper *lockedSeriesGrouper, tsConf *timeSeriesConf, -) { +) error { field := tsConf.fieldKey name := tsConf.measurement - grouper.Add(name, tags, ts, field+"_count", metric.Count) - grouper.Add(name, tags, ts, field+"_mean", metric.Mean) - grouper.Add(name, tags, ts, field+"_sum_of_squared_deviation", metric.SumOfSquaredDeviation) + if err := grouper.Add(name, tags, ts, field+"_count", metric.Count); err != nil { + return err + } + if err := grouper.Add(name, tags, ts, field+"_mean", metric.Mean); err != nil { + return err + } + if err := grouper.Add(name, tags, ts, field+"_sum_of_squared_deviation", metric.SumOfSquaredDeviation); err != nil { + return err + } if metric.Range != nil { - grouper.Add(name, tags, ts, field+"_range_min", metric.Range.Min) - grouper.Add(name, tags, ts, field+"_range_max", metric.Range.Max) + if err := grouper.Add(name, tags, ts, field+"_range_min", metric.Range.Min); err != nil { + return err + } + if err := grouper.Add(name, tags, ts, field+"_range_max", metric.Range.Max); err != nil { + return err + } } linearBuckets := metric.BucketOptions.GetLinearBuckets() @@ -693,8 +707,12 @@ func (s *Stackdriver) addDistribution( if i < int32(len(metric.BucketCounts)) { count += metric.BucketCounts[i] } - grouper.Add(name, tags, ts, field+"_bucket", count) + if err := grouper.Add(name, tags, ts, field+"_bucket", count); err != nil { + return err + } } + + return nil } func init() { diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index bf63b6ee41a4d..f47e3e16ec687 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -415,7 +415,9 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.wg.Add(1) go func() { defer s.wg.Done() - s.udpListen(conn) + if err := s.udpListen(conn); err != nil { + ac.AddError(err) + } }() } else { address, err := net.ResolveTCPAddr("tcp", s.ServiceAddress) @@ -433,7 +435,9 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.wg.Add(1) go func() { defer s.wg.Done() - s.tcpListen(listener) + if err := s.tcpListen(listener); err != nil { + ac.AddError(err) + } }() } @@ -442,7 +446,9 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.wg.Add(1) go func() { defer s.wg.Done() - s.parser() + if err := s.parser(); err != nil { + ac.AddError(err) + } }() } s.Log.Infof("Started the statsd service on %q", s.ServiceAddress) @@ -493,7 +499,9 @@ func (s *Statsd) tcpListen(listener *net.TCPListener) error { // udpListen starts listening for udp packets on the configured port. func (s *Statsd) udpListen(conn *net.UDPConn) error { if s.ReadBufferSize > 0 { - s.UDPlistener.SetReadBuffer(s.ReadBufferSize) + if err := s.UDPlistener.SetReadBuffer(s.ReadBufferSize); err != nil { + return err + } } buf := make([]byte, UDPMaxPacketSize) @@ -512,9 +520,14 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error { } s.UDPPacketsRecv.Incr(1) s.UDPBytesRecv.Incr(int64(n)) - b := s.bufPool.Get().(*bytes.Buffer) + b, ok := s.bufPool.Get().(*bytes.Buffer) + if !ok { + return fmt.Errorf("bufPool is not a bytes buffer") + } b.Reset() - b.Write(buf[:n]) + if _, err := b.Write(buf[:n]); err != nil { + return err + } select { case s.in <- input{ Buffer: b, @@ -536,11 +549,11 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error { // parser monitors the s.in channel, if there is a packet ready, it parses the // packet into statsd strings and then calls parseStatsdLine, which parses a // single statsd metric into a struct. -func (s *Statsd) parser() { +func (s *Statsd) parser() error { for { select { case <-s.done: - return + return nil case in := <-s.in: start := time.Now() lines := strings.Split(in.Buffer.String(), "\n") @@ -550,9 +563,13 @@ func (s *Statsd) parser() { switch { case line == "": case s.DataDogExtensions && strings.HasPrefix(line, "_e"): - s.parseEventMessage(in.Time, line, in.Addr) + if err := s.parseEventMessage(in.Time, line, in.Addr); err != nil { + return err + } default: - s.parseStatsdLine(line) + if err := s.parseStatsdLine(line); err != nil { + return err + } } } elapsed := time.Since(start) @@ -882,7 +899,11 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { // connection cleanup function defer func() { s.wg.Done() + + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() + // Add one connection potential back to channel when this one closes s.accept <- true s.forget(id) @@ -913,7 +934,10 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { b := s.bufPool.Get().(*bytes.Buffer) b.Reset() + // Writes to a bytes buffer always succeed, so do not check the errors here + //nolint:errcheck,revive b.Write(scanner.Bytes()) + //nolint:errcheck,revive b.WriteByte('\n') select { @@ -932,6 +956,8 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { // refuser refuses a TCP connection func (s *Statsd) refuser(conn *net.TCPConn) { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() s.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr()) s.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections") @@ -956,8 +982,12 @@ func (s *Statsd) Stop() { s.Log.Infof("Stopping the statsd service") close(s.done) if s.isUDP() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.UDPlistener.Close() } else { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.TCPlistener.Close() // Close all open TCP connections // - get all conns from the s.conns map and put into slice @@ -970,6 +1000,8 @@ func (s *Statsd) Stop() { } s.cleanup.Unlock() for _, conn := range conns { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() } } diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 7e6a7822359e5..3e91d4f960402 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -61,7 +61,7 @@ func TestConcurrentConns(t *testing.T) { // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8125") assert.NoError(t, err) - net.Dial("tcp", "127.0.0.1:8125") + _, err = net.Dial("tcp", "127.0.0.1:8125") assert.NoError(t, err) _, err = conn.Write([]byte(testMsg)) assert.NoError(t, err) @@ -90,7 +90,7 @@ func TestConcurrentConns1(t *testing.T) { // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8125") assert.NoError(t, err) - net.Dial("tcp", "127.0.0.1:8125") + _, err = net.Dial("tcp", "127.0.0.1:8125") assert.NoError(t, err) _, err = conn.Write([]byte(testMsg)) assert.NoError(t, err) @@ -132,16 +132,11 @@ func BenchmarkUDP(b *testing.B) { // send multiple messages to socket for n := 0; n < b.N; n++ { - err := listener.Start(acc) - if err != nil { - panic(err) - } + require.NoError(b, listener.Start(acc)) time.Sleep(time.Millisecond * 250) conn, err := net.Dial("udp", "127.0.0.1:8125") - if err != nil { - panic(err) - } + require.NoError(b, err) var wg sync.WaitGroup for i := 1; i <= producerThreads; i++ { @@ -152,7 +147,6 @@ func BenchmarkUDP(b *testing.B) { // wait for 250,000 metrics to get added to accumulator for len(listener.in) > 0 { - fmt.Printf("Left in buffer: %v \n", len(listener.in)) time.Sleep(time.Millisecond) } listener.Stop() @@ -162,6 +156,7 @@ func BenchmarkUDP(b *testing.B) { func sendRequests(conn net.Conn, wg *sync.WaitGroup) { defer wg.Done() for i := 0; i < 25000; i++ { + //nolint:errcheck,revive fmt.Fprintf(conn, testMsg) } } @@ -179,16 +174,12 @@ func BenchmarkTCP(b *testing.B) { // send multiple messages to socket for n := 0; n < b.N; n++ { - err := listener.Start(acc) - if err != nil { - panic(err) - } + require.NoError(b, listener.Start(acc)) time.Sleep(time.Millisecond * 250) conn, err := net.Dial("tcp", "127.0.0.1:8125") - if err != nil { - panic(err) - } + require.NoError(b, err) + var wg sync.WaitGroup for i := 1; i <= producerThreads; i++ { wg.Add(1) @@ -215,10 +206,7 @@ func TestParse_ValidLines(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } } @@ -246,10 +234,7 @@ func TestParse_Gauges(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -291,10 +276,7 @@ func TestParse_Gauges(t *testing.T) { } for _, test := range validations { - err := testValidateGauge(test.name, test.value, s.gauges) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge(test.name, test.value, s.gauges)) } } @@ -324,10 +306,7 @@ func TestParse_Sets(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -353,10 +332,7 @@ func TestParse_Sets(t *testing.T) { } for _, test := range validations { - err := testValidateSet(test.name, test.value, s.sets) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet(test.name, test.value, s.sets)) } } @@ -381,10 +357,7 @@ func TestParse_Counters(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -418,10 +391,7 @@ func TestParse_Counters(t *testing.T) { } for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } @@ -441,13 +411,10 @@ func TestParse_Timings(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - s.Gather(acc) + require.NoError(t, s.Gather(acc)) valid := map[string]interface{}{ "90_percentile": float64(11), @@ -478,13 +445,10 @@ func TestParse_Distributions(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - s.Gather(acc) + require.NoError(t, s.Gather(acc)) } validMeasurementMap := map[string]float64{ @@ -528,10 +492,7 @@ func TestParseScientificNotation(t *testing.T) { "scientific.notation:4.6968460083008E-5|h", } for _, line := range sciNotationLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line [%s] should not have resulted in error: %s\n", line, err) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line [%s] should not have resulted in error", line) } } @@ -550,10 +511,7 @@ func TestParse_InvalidLines(t *testing.T) { "invalid.value:1d1|c", } for _, line := range invalidLines { - err := s.parseStatsdLine(line) - if err == nil { - t.Errorf("Parsing line %s should have resulted in an error\n", line) - } + require.Errorf(t, s.parseStatsdLine(line), "Parsing line %s should have resulted in an error", line) } } @@ -568,10 +526,7 @@ func TestParse_InvalidSampleRate(t *testing.T) { } for _, line := range invalidLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } counterValidations := []struct { @@ -592,21 +547,12 @@ func TestParse_InvalidSampleRate(t *testing.T) { } for _, test := range counterValidations { - err := testValidateCounter(test.name, test.value, test.cache) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, test.cache)) } - err := testValidateGauge("invalid_sample_rate", 45, s.gauges) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("invalid_sample_rate", 45, s.gauges)) - err = testValidateSet("invalid_sample_rate", 1, s.sets) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("invalid_sample_rate", 1, s.sets)) } // Names should be parsed like . -> _ @@ -618,10 +564,7 @@ func TestParse_DefaultNameParsing(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -639,10 +582,7 @@ func TestParse_DefaultNameParsing(t *testing.T) { } for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } @@ -659,10 +599,7 @@ func TestParse_Template(t *testing.T) { } for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -681,10 +618,7 @@ func TestParse_Template(t *testing.T) { // Validate counters for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } @@ -701,10 +635,7 @@ func TestParse_TemplateFilter(t *testing.T) { } for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -723,10 +654,7 @@ func TestParse_TemplateFilter(t *testing.T) { // Validate counters for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } @@ -743,10 +671,7 @@ func TestParse_TemplateSpecificity(t *testing.T) { } for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -761,10 +686,7 @@ func TestParse_TemplateSpecificity(t *testing.T) { // Validate counters for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } @@ -791,10 +713,7 @@ func TestParse_TemplateFields(t *testing.T) { } for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } counterTests := []struct { @@ -820,10 +739,7 @@ func TestParse_TemplateFields(t *testing.T) { } // Validate counters for _, test := range counterTests { - err := testValidateCounter(test.name, test.value, s.counters, test.field) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters, test.field)) } gaugeTests := []struct { @@ -844,10 +760,7 @@ func TestParse_TemplateFields(t *testing.T) { } // Validate gauges for _, test := range gaugeTests { - err := testValidateGauge(test.name, test.value, s.gauges, test.field) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge(test.name, test.value, s.gauges, test.field)) } setTests := []struct { @@ -868,10 +781,7 @@ func TestParse_TemplateFields(t *testing.T) { } // Validate sets for _, test := range setTests { - err := testValidateSet(test.name, test.value, s.sets, test.field) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet(test.name, test.value, s.sets, test.field)) } } @@ -919,18 +829,12 @@ func TestParse_Tags(t *testing.T) { for _, test := range tests { name, _, tags := s.parseName(test.bucket) - if name != test.name { - t.Errorf("Expected: %s, got %s", test.name, name) - } + require.Equalf(t, name, test.name, "Expected: %s, got %s", test.name, name) for k, v := range test.tags { actual, ok := tags[k] - if !ok { - t.Errorf("Expected key: %s not found", k) - } - if actual != v { - t.Errorf("Expected %s, got %s", v, actual) - } + require.Truef(t, ok, "Expected key: %s not found", k) + require.Equalf(t, actual, v, "Expected %s, got %s", v, actual) } } } @@ -1045,10 +949,8 @@ func TestParse_DataDogTags(t *testing.T) { s := NewTestStatsd() s.DataDogExtensions = true - err := s.parseStatsdLine(tt.line) - require.NoError(t, err) - err = s.Gather(&acc) - require.NoError(t, err) + require.NoError(t, s.parseStatsdLine(tt.line)) + require.NoError(t, s.Gather(&acc)) testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) @@ -1080,9 +982,7 @@ func TestParseName(t *testing.T) { for _, test := range tests { name, _, _ := s.parseName(test.inName) - if name != test.outName { - t.Errorf("Expected: %s, got %s", test.outName, name) - } + require.Equalf(t, name, test.outName, "Expected: %s, got %s", test.outName, name) } // Test with separator == "." @@ -1108,9 +1008,7 @@ func TestParseName(t *testing.T) { for _, test := range tests { name, _, _ := s.parseName(test.inName) - if name != test.outName { - t.Errorf("Expected: %s, got %s", test.outName, name) - } + require.Equalf(t, name, test.outName, "Expected: %s, got %s", test.outName, name) } } @@ -1126,15 +1024,10 @@ func TestParse_MeasurementsWithSameName(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - if len(s.counters) != 2 { - t.Errorf("Expected 2 separate measurements, found %d", len(s.counters)) - } + require.Lenf(t, s.counters, 2, "Expected 2 separate measurements, found %d", len(s.counters)) } // Test that the metric caches expire (clear) an entry after the entry hasn't been updated for the configurable MaxTTL duration. @@ -1143,8 +1036,8 @@ func TestCachesExpireAfterMaxTTL(t *testing.T) { s.MaxTTL = config.Duration(100 * time.Microsecond) acc := &testutil.Accumulator{} - s.parseStatsdLine("valid:45|c") - s.parseStatsdLine("valid:45|c") + require.NoError(t, s.parseStatsdLine("valid:45|c")) + require.NoError(t, s.parseStatsdLine("valid:45|c")) require.NoError(t, s.Gather(acc)) // Max TTL goes by, our 'valid' entry is cleared. @@ -1152,9 +1045,12 @@ func TestCachesExpireAfterMaxTTL(t *testing.T) { require.NoError(t, s.Gather(acc)) // Now when we gather, we should have a counter that is reset to zero. - s.parseStatsdLine("valid:45|c") + require.NoError(t, s.parseStatsdLine("valid:45|c")) require.NoError(t, s.Gather(acc)) + // Wait for the metrics to arrive + acc.Wait(3) + testutil.RequireMetricsEqual(t, []telegraf.Metric{ testutil.MustMetric( @@ -1238,92 +1134,52 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { sMultiple := NewTestStatsd() for _, line := range singleLines { - err := sSingle.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, sSingle.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } for _, line := range multipleLines { - err := sMultiple.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, sMultiple.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - if len(sSingle.timings) != 3 { - t.Errorf("Expected 3 measurement, found %d", len(sSingle.timings)) - } + require.Lenf(t, sSingle.timings, 3, "Expected 3 measurement, found %d", len(sSingle.timings)) - if cachedtiming, ok := sSingle.timings["metric_type=timingvalid_multiple"]; !ok { - t.Errorf("Expected cached measurement with hash 'metric_type=timingvalid_multiple' not found") - } else { - if cachedtiming.name != "valid_multiple" { - t.Errorf("Expected the name to be 'valid_multiple', got %s", cachedtiming.name) - } + cachedtiming, ok := sSingle.timings["metric_type=timingvalid_multiple"] + require.Truef(t, ok, "Expected cached measurement with hash 'metric_type=timingvalid_multiple' not found") + require.Equalf(t, cachedtiming.name, "valid_multiple", "Expected the name to be 'valid_multiple', got %s", cachedtiming.name) - // A 0 at samplerate 0.1 will add 10 values of 0, - // A 0 with invalid samplerate will add a single 0, - // plus the last bit of value 1 - // which adds up to 12 individual datapoints to be cached - if cachedtiming.fields[defaultFieldName].n != 12 { - t.Errorf("Expected 12 additions, got %d", cachedtiming.fields[defaultFieldName].n) - } + // A 0 at samplerate 0.1 will add 10 values of 0, + // A 0 with invalid samplerate will add a single 0, + // plus the last bit of value 1 + // which adds up to 12 individual datapoints to be cached + require.EqualValuesf(t, cachedtiming.fields[defaultFieldName].n, 12, "Expected 12 additions, got %d", cachedtiming.fields[defaultFieldName].n) - if cachedtiming.fields[defaultFieldName].upper != 1 { - t.Errorf("Expected max input to be 1, got %f", cachedtiming.fields[defaultFieldName].upper) - } - } + require.EqualValuesf(t, cachedtiming.fields[defaultFieldName].upper, 1, "Expected max input to be 1, got %f", cachedtiming.fields[defaultFieldName].upper) // test if sSingle and sMultiple did compute the same stats for valid.multiple.duplicate - if err := testValidateSet("valid_multiple_duplicate", 2, sSingle.sets); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("valid_multiple_duplicate", 2, sSingle.sets)) - if err := testValidateSet("valid_multiple_duplicate", 2, sMultiple.sets); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("valid_multiple_duplicate", 2, sMultiple.sets)) - if err := testValidateCounter("valid_multiple_duplicate", 5, sSingle.counters); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("valid_multiple_duplicate", 5, sSingle.counters)) - if err := testValidateCounter("valid_multiple_duplicate", 5, sMultiple.counters); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("valid_multiple_duplicate", 5, sMultiple.counters)) - if err := testValidateGauge("valid_multiple_duplicate", 1, sSingle.gauges); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("valid_multiple_duplicate", 1, sSingle.gauges)) - if err := testValidateGauge("valid_multiple_duplicate", 1, sMultiple.gauges); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("valid_multiple_duplicate", 1, sMultiple.gauges)) // test if sSingle and sMultiple did compute the same stats for valid.multiple.mixed - if err := testValidateSet("valid_multiple_mixed", 1, sSingle.sets); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("valid_multiple_mixed", 1, sSingle.sets)) - if err := testValidateSet("valid_multiple_mixed", 1, sMultiple.sets); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("valid_multiple_mixed", 1, sMultiple.sets)) - if err := testValidateCounter("valid_multiple_mixed", 1, sSingle.counters); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("valid_multiple_mixed", 1, sSingle.counters)) - if err := testValidateCounter("valid_multiple_mixed", 1, sMultiple.counters); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("valid_multiple_mixed", 1, sMultiple.counters)) - if err := testValidateGauge("valid_multiple_mixed", 1, sSingle.gauges); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("valid_multiple_mixed", 1, sSingle.gauges)) - if err := testValidateGauge("valid_multiple_mixed", 1, sMultiple.gauges); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("valid_multiple_mixed", 1, sMultiple.gauges)) } // Tests low-level functionality of timings when multiple fields is enabled @@ -1348,12 +1204,9 @@ func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - s.Gather(acc) + require.NoError(t, s.Gather(acc)) valid := map[string]interface{}{ "success_90_percentile": float64(11), @@ -1399,12 +1252,9 @@ func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - s.Gather(acc) + require.NoError(t, s.Gather(acc)) expectedSuccess := map[string]interface{}{ "90_percentile": float64(11), @@ -1563,23 +1413,15 @@ func TestParse_Timings_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteTimings = true fakeacc := &testutil.Accumulator{} - var err error line := "timing:100|ms" - err = s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) - if len(s.timings) != 1 { - t.Errorf("Should be 1 timing, found %d", len(s.timings)) - } + require.Lenf(t, s.timings, 1, "Should be 1 timing, found %d", len(s.timings)) - s.Gather(fakeacc) + require.NoError(t, s.Gather(fakeacc)) - if len(s.timings) != 0 { - t.Errorf("All timings should have been deleted, found %d", len(s.timings)) - } + require.Lenf(t, s.timings, 0, "All timings should have been deleted, found %d", len(s.timings)) } // Tests the delete_gauges option @@ -1587,25 +1429,15 @@ func TestParse_Gauges_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteGauges = true fakeacc := &testutil.Accumulator{} - var err error line := "current.users:100|g" - err = s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) - err = testValidateGauge("current_users", 100, s.gauges) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("current_users", 100, s.gauges)) - s.Gather(fakeacc) + require.NoError(t, s.Gather(fakeacc)) - err = testValidateGauge("current_users", 100, s.gauges) - if err == nil { - t.Error("current_users_gauge metric should have been deleted") - } + require.Error(t, testValidateGauge("current_users", 100, s.gauges), "current_users_gauge metric should have been deleted") } // Tests the delete_sets option @@ -1613,25 +1445,15 @@ func TestParse_Sets_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteSets = true fakeacc := &testutil.Accumulator{} - var err error line := "unique.user.ids:100|s" - err = s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) - err = testValidateSet("unique_user_ids", 1, s.sets) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("unique_user_ids", 1, s.sets)) - s.Gather(fakeacc) + require.NoError(t, s.Gather(fakeacc)) - err = testValidateSet("unique_user_ids", 1, s.sets) - if err == nil { - t.Error("unique_user_ids_set metric should have been deleted") - } + require.Error(t, testValidateSet("unique_user_ids", 1, s.sets), "unique_user_ids_set metric should have been deleted") } // Tests the delete_counters option @@ -1639,43 +1461,25 @@ func TestParse_Counters_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteCounters = true fakeacc := &testutil.Accumulator{} - var err error line := "total.users:100|c" - err = s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error\n", line) - err = testValidateCounter("total_users", 100, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("total_users", 100, s.counters)) - s.Gather(fakeacc) + require.NoError(t, s.Gather(fakeacc)) - err = testValidateCounter("total_users", 100, s.counters) - if err == nil { - t.Error("total_users_counter metric should have been deleted") - } + require.Error(t, testValidateCounter("total_users", 100, s.counters), "total_users_counter metric should have been deleted") } func TestParseKeyValue(t *testing.T) { k, v := parseKeyValue("foo=bar") - if k != "foo" { - t.Errorf("Expected %s, got %s", "foo", k) - } - if v != "bar" { - t.Errorf("Expected %s, got %s", "bar", v) - } + require.Equalf(t, k, "foo", "Expected %s, got %s", "foo", k) + require.Equalf(t, v, "bar", "Expected %s, got %s", "bar", v) k2, v2 := parseKeyValue("baz") - if k2 != "" { - t.Errorf("Expected %s, got %s", "", k2) - } - if v2 != "baz" { - t.Errorf("Expected %s, got %s", "baz", v2) - } + require.Equalf(t, k2, "", "Expected %s, got %s", "", k2) + require.Equalf(t, v2, "baz", "Expected %s, got %s", "baz", v2) } // Test utility functions @@ -1789,12 +1593,10 @@ func TestTCP(t *testing.T) { conn, err := net.Dial("tcp", addr) _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) require.NoError(t, err) - err = conn.Close() - require.NoError(t, err) + require.NoError(t, conn.Close()) for { - err = statsd.Gather(&acc) - require.NoError(t, err) + require.NoError(t, statsd.Gather(&acc)) if len(acc.Metrics) > 0 { break @@ -1832,14 +1634,13 @@ func TestUdp(t *testing.T) { defer statsd.Stop() conn, err := net.Dial("udp", "127.0.0.1:14223") - _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) require.NoError(t, err) - err = conn.Close() + _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) require.NoError(t, err) + require.NoError(t, conn.Close()) for { - err = statsd.Gather(&acc) - require.NoError(t, err) + require.NoError(t, statsd.Gather(&acc)) if len(acc.Metrics) > 0 { break diff --git a/plugins/inputs/suricata/suricata.go b/plugins/inputs/suricata/suricata.go index 98ca348dce711..631c6af0a05b2 100644 --- a/plugins/inputs/suricata/suricata.go +++ b/plugins/inputs/suricata/suricata.go @@ -81,6 +81,8 @@ func (s *Suricata) Start(acc telegraf.Accumulator) error { // Stop causes the plugin to cease collecting JSON data from the socket provided // to Suricata. func (s *Suricata) Stop() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.inputListener.Close() if s.cancel != nil { s.cancel() diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index 0570c8135a418..f3204f29e5631 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -70,9 +70,11 @@ func TestSuricata(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(ex2)) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte(ex2)) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.Wait(1) @@ -115,12 +117,17 @@ func TestThreadStats(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte("")) - c.Write([]byte("\n")) - c.Write([]byte("foobard}\n")) - c.Write([]byte(ex3)) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte("")) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + _, err = c.Write([]byte("foobard}\n")) + require.NoError(t, err) + _, err = c.Write([]byte(ex3)) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.Wait(2) expected := []telegraf.Metric{ @@ -160,9 +167,11 @@ func TestSuricataInvalid(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte("sfjiowef")) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte("sfjiowef")) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.WaitError(1) } @@ -199,9 +208,11 @@ func TestSuricataTooLongLine(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(strings.Repeat("X", 20000000))) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte(strings.Repeat("X", 20000000))) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.WaitError(1) } @@ -226,8 +237,9 @@ func TestSuricataEmptyJSON(t *testing.T) { if err != nil { log.Println(err) } - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.WaitError(1) } @@ -251,15 +263,19 @@ func TestSuricataDisconnectSocket(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(ex2)) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte(ex2)) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) c, err = net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(ex3)) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte(ex3)) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.Wait(2) } diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go index 83d752ff16f8c..fc5d67d6a064a 100644 --- a/plugins/inputs/synproxy/synproxy_test.go +++ b/plugins/inputs/synproxy/synproxy_test.go @@ -59,6 +59,8 @@ func TestSynproxyFileInvalidHex(t *testing.T) { func TestNoSynproxyFile(t *testing.T) { tmpfile := makeFakeSynproxyFile([]byte(synproxyFileNormal)) // Remove file to generate "no such file" error + // Ignore errors if file does not yet exist + //nolint:errcheck,revive os.Remove(tmpfile) k := Synproxy{ diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go index b71ddfee1a762..9ec62238a17b0 100644 --- a/plugins/inputs/syslog/nontransparent_test.go +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -158,12 +158,14 @@ func testStrictNonTransparent(t *testing.T, protocol string, address string, wan require.NoError(t, e) config.ServerName = "localhost" conn, err = tls.Dial(protocol, address, config) + require.NotNil(t, conn) + require.NoError(t, err) } else { conn, err = net.Dial(protocol, address) + require.NotNil(t, conn) + require.NoError(t, err) defer conn.Close() } - require.NotNil(t, conn) - require.NoError(t, err) // Clear acc.ClearMetrics() diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index 199c380601955..2f09822156a08 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -358,12 +358,14 @@ func testStrictOctetCounting(t *testing.T, protocol string, address string, want require.NoError(t, e) config.ServerName = "localhost" conn, err = tls.Dial(protocol, address, config) + require.NotNil(t, conn) + require.NoError(t, err) } else { conn, err = net.Dial(protocol, address) + require.NotNil(t, conn) + require.NoError(t, err) defer conn.Close() } - require.NotNil(t, conn) - require.NoError(t, err) // Clear acc.ClearMetrics() diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index 2a6d937fb288e..4e4a5a2528834 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -294,7 +294,8 @@ func TestBestEffort_unixgram(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unixgram.sock") - os.Create(sock) + _, err = os.Create(sock) + require.NoError(t, err) testRFC5426(t, "unixgram", sock, true) } @@ -307,7 +308,8 @@ func TestStrict_unixgram(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unixgram.sock") - os.Create(sock) + _, err = os.Create(sock) + require.NoError(t, err) testRFC5426(t, "unixgram", sock, false) } diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 17b9b77a52c4f..2bae730fb6e08 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -141,6 +141,8 @@ func (s *Syslog) Start(acc telegraf.Accumulator) error { } if scheme == "unix" || scheme == "unixpacket" || scheme == "unixgram" { + // Accept success and failure in case the file does not exist + //nolint:errcheck,revive os.Remove(s.Address) } @@ -183,6 +185,8 @@ func (s *Syslog) Stop() { defer s.mu.Unlock() if s.Closer != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.Close() } s.wg.Wait() @@ -269,7 +273,9 @@ func (s *Syslog) listenStream(acc telegraf.Accumulator) { s.connectionsMu.Lock() if s.MaxConnections > 0 && len(s.connections) >= s.MaxConnections { s.connectionsMu.Unlock() - conn.Close() + if err := conn.Close(); err != nil { + acc.AddError(err) + } continue } s.connections[conn.RemoteAddr().String()] = conn @@ -284,7 +290,9 @@ func (s *Syslog) listenStream(acc telegraf.Accumulator) { s.connectionsMu.Lock() for _, c := range s.connections { - c.Close() + if err := c.Close(); err != nil { + acc.AddError(err) + } } s.connectionsMu.Unlock() } @@ -298,6 +306,8 @@ func (s *Syslog) removeConnection(c net.Conn) { func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { defer func() { s.removeConnection(conn) + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() }() @@ -306,7 +316,9 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { emit := func(r *syslog.Result) { s.store(*r, acc) if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { - conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) + if err := conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)); err != nil { + acc.AddError(fmt.Errorf("setting read deadline failed: %v", err)) + } } } @@ -331,7 +343,9 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { p.Parse(conn) if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { - conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) + if err := conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)); err != nil { + acc.AddError(fmt.Errorf("setting read deadline failed: %v", err)) + } } } @@ -426,7 +440,9 @@ type unixCloser struct { func (uc unixCloser) Close() error { err := uc.closer.Close() - os.Remove(uc.path) // ignore error + // Accept success and failure in case the file does not exist + //nolint:errcheck,revive + os.Remove(uc.path) return err } diff --git a/plugins/inputs/sysstat/sysstat_test.go b/plugins/inputs/sysstat/sysstat_test.go index 0ef97f0e7c999..1766130391bbb 100644 --- a/plugins/inputs/sysstat/sysstat_test.go +++ b/plugins/inputs/sysstat/sysstat_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var s = Sysstat{ @@ -260,7 +261,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- sadf -p -- -p -C tmpFile // it returns mockData["C"] output. -func TestHelperProcess(_ *testing.T) { +func TestHelperProcess(t *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } @@ -303,7 +304,8 @@ dell-xps 5 2016-03-25 16:18:10 UTC sdb %util 0.30 switch path.Base(cmd) { case "sadf": - fmt.Fprint(os.Stdout, mockData[args[3]]) + _, err := fmt.Fprint(os.Stdout, mockData[args[3]]) + require.NoError(t, err) default: } // some code here to check arguments perhaps? diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go index 32747cca20314..ded0e8ba18a22 100644 --- a/plugins/inputs/system/system.go +++ b/plugins/inputs/system/system.go @@ -86,6 +86,8 @@ func formatUptime(uptime uint64) string { if days > 1 { s = "s" } + // This will always succeed, so skip checking the error + //nolint:errcheck,revive fmt.Fprintf(w, "%d day%s, ", days, s) } @@ -94,8 +96,12 @@ func formatUptime(uptime uint64) string { hours %= 24 minutes %= 60 + // This will always succeed, so skip checking the error + //nolint:errcheck,revive fmt.Fprintf(w, "%2d:%02d", hours, minutes) + // This will always succeed, so skip checking the error + //nolint:errcheck,revive w.Flush() return buf.String() } diff --git a/plugins/inputs/tail/multiline.go b/plugins/inputs/tail/multiline.go index 7a254c1bf9676..58a9b9e1e588c 100644 --- a/plugins/inputs/tail/multiline.go +++ b/plugins/inputs/tail/multiline.go @@ -60,6 +60,8 @@ func (m *Multiline) IsEnabled() bool { func (m *Multiline) ProcessLine(text string, buffer *bytes.Buffer) string { if m.matchString(text) { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive buffer.WriteString(text) return "" } @@ -67,12 +69,16 @@ func (m *Multiline) ProcessLine(text string, buffer *bytes.Buffer) string { if m.config.MatchWhichLine == Previous { previousText := buffer.String() buffer.Reset() - buffer.WriteString(text) + if _, err := buffer.WriteString(text); err != nil { + return "" + } text = previousText } else { // Next if buffer.Len() > 0 { - buffer.WriteString(text) + if _, err := buffer.WriteString(text); err != nil { + return "" + } text = buffer.String() buffer.Reset() } diff --git a/plugins/inputs/tail/multiline_test.go b/plugins/inputs/tail/multiline_test.go index 6db50dc048b99..44bfafb2ba25f 100644 --- a/plugins/inputs/tail/multiline_test.go +++ b/plugins/inputs/tail/multiline_test.go @@ -103,7 +103,8 @@ func TestMultilineFlush(t *testing.T) { m, err := c.NewMultiline() assert.NoError(t, err, "Configuration was OK.") var buffer bytes.Buffer - buffer.WriteString("foo") + _, err = buffer.WriteString("foo") + assert.NoError(t, err) text := m.Flush(&buffer) @@ -205,31 +206,30 @@ func TestMultiLineMatchStringWithInvertTrue(t *testing.T) { func TestMultilineWhat(t *testing.T) { var w1 MultilineMatchWhichLine - w1.UnmarshalTOML([]byte(`"previous"`)) + assert.NoError(t, w1.UnmarshalTOML([]byte(`"previous"`))) assert.Equal(t, Previous, w1) var w2 MultilineMatchWhichLine - w2.UnmarshalTOML([]byte(`previous`)) + assert.NoError(t, w2.UnmarshalTOML([]byte(`previous`))) assert.Equal(t, Previous, w2) var w3 MultilineMatchWhichLine - w3.UnmarshalTOML([]byte(`'previous'`)) + assert.NoError(t, w3.UnmarshalTOML([]byte(`'previous'`))) assert.Equal(t, Previous, w3) var w4 MultilineMatchWhichLine - w4.UnmarshalTOML([]byte(`"next"`)) + assert.NoError(t, w4.UnmarshalTOML([]byte(`"next"`))) assert.Equal(t, Next, w4) var w5 MultilineMatchWhichLine - w5.UnmarshalTOML([]byte(`next`)) + assert.NoError(t, w5.UnmarshalTOML([]byte(`next`))) assert.Equal(t, Next, w5) var w6 MultilineMatchWhichLine - w6.UnmarshalTOML([]byte(`'next'`)) + assert.NoError(t, w6.UnmarshalTOML([]byte(`'next'`))) assert.Equal(t, Next, w6) var w7 MultilineMatchWhichLine - err := w7.UnmarshalTOML([]byte(`nope`)) + assert.Error(t, w7.UnmarshalTOML([]byte(`nope`))) assert.Equal(t, MultilineMatchWhichLine(-1), w7) - assert.Error(t, err) } diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 99090f70d67a8..0d8460a251a72 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -60,7 +60,7 @@ func TestTailBadLine(t *testing.T) { _, err = tmpfile.WriteString("cpu usage_idle=100\n") require.NoError(t, err) - tmpfile.Close() + require.NoError(t, tmpfile.Close()) buf := &bytes.Buffer{} log.SetOutput(buf) @@ -91,7 +91,7 @@ func TestTailDosLineEndings(t *testing.T) { defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString("cpu usage_idle=100\r\ncpu2 usage_idle=200\r\n") require.NoError(t, err) - tmpfile.Close() + require.NoError(t, tmpfile.Close()) tt := NewTestTail() tt.Log = testutil.Logger{} @@ -295,7 +295,7 @@ cpu,42 cpu,42 `) require.NoError(t, err) - tmpfile.Close() + require.NoError(t, tmpfile.Close()) plugin := NewTestTail() plugin.Log = testutil.Logger{} @@ -352,7 +352,7 @@ func TestMultipleMetricsOnFirstLine(t *testing.T) { [{"time_idle": 42}, {"time_idle": 42}] `) require.NoError(t, err) - tmpfile.Close() + require.NoError(t, tmpfile.Close()) plugin := NewTestTail() plugin.Log = testutil.Logger{} diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go index 53297c4a68fb8..aedaa7276b41e 100644 --- a/plugins/inputs/tcp_listener/tcp_listener.go +++ b/plugins/inputs/tcp_listener/tcp_listener.go @@ -133,6 +133,8 @@ func (t *TCPListener) Stop() { t.Lock() defer t.Unlock() close(t.done) + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive t.listener.Close() // Close all open TCP connections @@ -146,6 +148,8 @@ func (t *TCPListener) Stop() { } t.cleanup.Unlock() for _, conn := range conns { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() } @@ -155,18 +159,19 @@ func (t *TCPListener) Stop() { } // tcpListen listens for incoming TCP connections. -func (t *TCPListener) tcpListen() error { +func (t *TCPListener) tcpListen() { defer t.wg.Done() for { select { case <-t.done: - return nil + return default: // Accept connection: conn, err := t.listener.AcceptTCP() if err != nil { - return err + t.Log.Errorf("accepting TCP connection failed: %v", err) + return } select { @@ -188,9 +193,11 @@ func (t *TCPListener) tcpListen() error { // refuser refuses a TCP connection func (t *TCPListener) refuser(conn *net.TCPConn) { // Tell the connection why we are closing. + //nolint:errcheck,revive fmt.Fprintf(conn, "Telegraf maximum concurrent TCP connections (%d)"+ " reached, closing.\nYou may want to increase max_tcp_connections in"+ " the Telegraf tcp listener configuration.\n", t.MaxTCPConnections) + //nolint:errcheck,revive conn.Close() t.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr()) t.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections") @@ -203,7 +210,9 @@ func (t *TCPListener) handler(conn *net.TCPConn, id string) { // connection cleanup function defer func() { t.wg.Done() - conn.Close() + if err := conn.Close(); err != nil { + t.acc.AddError(err) + } // Add one connection potential back to channel when this one closes t.accept <- true t.forget(id) diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go index d6781b55020c3..9203318aff73e 100644 --- a/plugins/inputs/tcp_listener/tcp_listener_test.go +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -56,22 +56,20 @@ func BenchmarkTCP(b *testing.B) { // send multiple messages to socket for n := 0; n < b.N; n++ { - err := listener.Start(acc) - if err != nil { - panic(err) - } + require.NoError(b, listener.Start(acc)) conn, err := net.Dial("tcp", "127.0.0.1:8198") - if err != nil { - panic(err) - } + require.NoError(b, err) for i := 0; i < 100000; i++ { - fmt.Fprintf(conn, testMsg) + _, err := fmt.Fprint(conn, testMsg) + require.NoError(b, err) } - conn.(*net.TCPConn).CloseWrite() + require.NoError(b, conn.(*net.TCPConn).CloseWrite()) // wait for all 100,000 metrics to be processed buf := []byte{0} - conn.Read(buf) // will EOF when completed + // will EOF when completed + _, err = conn.Read(buf) + require.NoError(b, err) listener.Stop() } } @@ -87,15 +85,15 @@ func TestHighTrafficTCP(t *testing.T) { acc := &testutil.Accumulator{} // send multiple messages to socket - err := listener.Start(acc) - require.NoError(t, err) + require.NoError(t, listener.Start(acc)) conn, err := net.Dial("tcp", "127.0.0.1:8199") require.NoError(t, err) for i := 0; i < 100000; i++ { - fmt.Fprintf(conn, testMsg) + _, err := fmt.Fprint(conn, testMsg) + require.NoError(t, err) } - conn.(*net.TCPConn).CloseWrite() + require.NoError(t, conn.(*net.TCPConn).CloseWrite()) buf := []byte{0} _, err = conn.Read(buf) assert.Equal(t, err, io.EOF) @@ -121,7 +119,8 @@ func TestConnectTCP(t *testing.T) { require.NoError(t, err) // send single message to socket - fmt.Fprintf(conn, testMsg) + _, err = fmt.Fprint(conn, testMsg) + require.NoError(t, err) acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, @@ -129,7 +128,8 @@ func TestConnectTCP(t *testing.T) { ) // send multiple messages to socket - fmt.Fprintf(conn, testMsgs) + _, err = fmt.Fprint(conn, testMsgs) + require.NoError(t, err) acc.Wait(6) hostTags := []string{"server02", "server03", "server04", "server05", "server06"} @@ -156,17 +156,18 @@ func TestConcurrentConns(t *testing.T) { defer listener.Stop() _, err := net.Dial("tcp", "127.0.0.1:8195") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8195") - assert.NoError(t, err) + require.NoError(t, err) // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8195") - assert.NoError(t, err) - net.Dial("tcp", "127.0.0.1:8195") + require.NoError(t, err) + _, err = net.Dial("tcp", "127.0.0.1:8195") + require.NoError(t, err) buf := make([]byte, 1500) n, err := conn.Read(buf) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "Telegraf maximum concurrent TCP connections (2) reached, closing.\n"+ "You may want to increase max_tcp_connections in"+ @@ -192,15 +193,16 @@ func TestConcurrentConns1(t *testing.T) { defer listener.Stop() _, err := net.Dial("tcp", "127.0.0.1:8196") - assert.NoError(t, err) + require.NoError(t, err) // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8196") - assert.NoError(t, err) - net.Dial("tcp", "127.0.0.1:8196") + require.NoError(t, err) + _, err = net.Dial("tcp", "127.0.0.1:8196") + require.NoError(t, err) buf := make([]byte, 1500) n, err := conn.Read(buf) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "Telegraf maximum concurrent TCP connections (1) reached, closing.\n"+ "You may want to increase max_tcp_connections in"+ @@ -225,9 +227,9 @@ func TestCloseConcurrentConns(t *testing.T) { require.NoError(t, listener.Start(acc)) _, err := net.Dial("tcp", "127.0.0.1:8195") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8195") - assert.NoError(t, err) + require.NoError(t, err) listener.Stop() } @@ -245,7 +247,7 @@ func TestRunParser(t *testing.T) { go listener.tcpParser() in <- testmsg - listener.Gather(&acc) + require.NoError(t, listener.Gather(&acc)) acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu_load_short", @@ -293,7 +295,7 @@ func TestRunParserGraphiteMsg(t *testing.T) { go listener.tcpParser() in <- testmsg - listener.Gather(&acc) + require.NoError(t, listener.Gather(&acc)) acc.Wait(1) acc.AssertContainsFields(t, "cpu_load_graphite", @@ -316,7 +318,7 @@ func TestRunParserJSONMsg(t *testing.T) { go listener.tcpParser() in <- testmsg - listener.Gather(&acc) + require.NoError(t, listener.Gather(&acc)) acc.Wait(1) acc.AssertContainsFields(t, "udp_json_test", diff --git a/plugins/inputs/teamspeak/teamspeak.go b/plugins/inputs/teamspeak/teamspeak.go index ed565f086fa78..e6861f03e25af 100644 --- a/plugins/inputs/teamspeak/teamspeak.go +++ b/plugins/inputs/teamspeak/teamspeak.go @@ -55,7 +55,10 @@ func (ts *Teamspeak) Gather(acc telegraf.Accumulator) error { } for _, vserver := range ts.VirtualServers { - ts.client.Use(vserver) + if err := ts.client.Use(vserver); err != nil { + ts.connected = false + return err + } sm, err := ts.client.Server.Info() if err != nil { diff --git a/plugins/inputs/teamspeak/teamspeak_test.go b/plugins/inputs/teamspeak/teamspeak_test.go index 5faa5d795fe97..98fc5194849c7 100644 --- a/plugins/inputs/teamspeak/teamspeak_test.go +++ b/plugins/inputs/teamspeak/teamspeak_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) const welcome = `Welcome to the TeamSpeak 3 ServerQuery interface, type "help" for a list of commands and "help " for information on a specific command.` @@ -22,9 +23,7 @@ var cmd = map[string]string{ func TestGather(t *testing.T) { l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal("Initializing test server failed") - } + require.NoError(t, err, "Initializing test server failed") defer l.Close() go handleRequest(l, t) @@ -36,11 +35,7 @@ func TestGather(t *testing.T) { Password: "test", VirtualServers: []int{1}, } - err = testConfig.Gather(&acc) - - if err != nil { - t.Fatalf("Gather returned error. Error: %s\n", err) - } + require.NoError(t, testConfig.Gather(&acc), "Gather returned error. Error: %s\n", err) fields := map[string]interface{}{ "uptime": int(148), @@ -59,10 +54,9 @@ func TestGather(t *testing.T) { func handleRequest(l net.Listener, t *testing.T) { c, err := l.Accept() - if err != nil { - t.Fatal("Error accepting test connection") - } - c.Write([]byte("TS3\n\r" + welcome + "\n\r")) + require.NoError(t, err, "Error accepting test connection") + _, err = c.Write([]byte("TS3\n\r" + welcome + "\n\r")) + require.NoError(t, err) for { msg, _, err := bufio.NewReader(c).ReadLine() if err != nil { @@ -73,16 +67,21 @@ func handleRequest(l net.Listener, t *testing.T) { if exists { switch r { case "": - c.Write([]byte(ok + "\n\r")) + _, err = c.Write([]byte(ok + "\n\r")) + require.NoError(t, err) case "quit": - c.Write([]byte(ok + "\n\r")) - c.Close() + _, err = c.Write([]byte(ok + "\n\r")) + require.NoError(t, err) + err = c.Close() + require.NoError(t, err) return default: - c.Write([]byte(r + "\n\r" + ok + "\n\r")) + _, err = c.Write([]byte(r + "\n\r" + ok + "\n\r")) + require.NoError(t, err) } } else { - c.Write([]byte(errorMsg + "\n\r")) + _, err = c.Write([]byte(errorMsg + "\n\r")) + require.NoError(t, err) } } } diff --git a/plugins/inputs/tengine/tengine.go b/plugins/inputs/tengine/tengine.go index 774abff991edf..846a5411dba33 100644 --- a/plugins/inputs/tengine/tengine.go +++ b/plugins/inputs/tengine/tengine.go @@ -311,7 +311,8 @@ func (n *Tengine) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { acc.AddFields("tengine", fields, tags) } - return nil + // Return the potential error of the loop-read + return err } // Get tag(s) for the tengine plugin diff --git a/plugins/inputs/tengine/tengine_test.go b/plugins/inputs/tengine/tengine_test.go index 960998e6e16ee..d91c97465aff1 100644 --- a/plugins/inputs/tengine/tengine_test.go +++ b/plugins/inputs/tengine/tengine_test.go @@ -28,8 +28,8 @@ func TestTengineTags(t *testing.T) { func TestTengineGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - rsp := tengineSampleResponse - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, tengineSampleResponse) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/tomcat/tomcat.go b/plugins/inputs/tomcat/tomcat.go index 560594ce5a7b9..60081e1295b6c 100644 --- a/plugins/inputs/tomcat/tomcat.go +++ b/plugins/inputs/tomcat/tomcat.go @@ -131,7 +131,9 @@ func (s *Tomcat) Gather(acc telegraf.Accumulator) error { } var status TomcatStatus - xml.NewDecoder(resp.Body).Decode(&status) + if err := xml.NewDecoder(resp.Body).Decode(&status); err != nil { + return err + } // add tomcat_jvm_memory measurements tcm := map[string]interface{}{ diff --git a/plugins/inputs/tomcat/tomcat_test.go b/plugins/inputs/tomcat/tomcat_test.go index 5e206ab835583..e22cb9c88c874 100644 --- a/plugins/inputs/tomcat/tomcat_test.go +++ b/plugins/inputs/tomcat/tomcat_test.go @@ -40,7 +40,8 @@ var tomcatStatus8 = ` func TestHTTPTomcat8(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, tomcatStatus8) + _, err := fmt.Fprintln(w, tomcatStatus8) + require.NoError(t, err) })) defer ts.Close() @@ -51,8 +52,7 @@ func TestHTTPTomcat8(t *testing.T) { } var acc testutil.Accumulator - err := tc.Gather(&acc) - require.NoError(t, err) + require.NoError(t, tc.Gather(&acc)) // tomcat_jvm_memory jvmMemoryFields := map[string]interface{}{ @@ -112,7 +112,8 @@ var tomcatStatus6 = ` func TestHTTPTomcat6(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, tomcatStatus6) + _, err := fmt.Fprintln(w, tomcatStatus6) + require.NoError(t, err) })) defer ts.Close() @@ -123,8 +124,7 @@ func TestHTTPTomcat6(t *testing.T) { } var acc testutil.Accumulator - err := tc.Gather(&acc) - require.NoError(t, err) + require.NoError(t, tc.Gather(&acc)) // tomcat_jvm_memory jvmMemoryFields := map[string]interface{}{ diff --git a/plugins/inputs/trig/trig_test.go b/plugins/inputs/trig/trig_test.go index 27bee81dde2e1..de4fa07886f05 100644 --- a/plugins/inputs/trig/trig_test.go +++ b/plugins/inputs/trig/trig_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestTrig(t *testing.T) { @@ -18,7 +19,7 @@ func TestTrig(t *testing.T) { sine := math.Sin((i*math.Pi)/5.0) * s.Amplitude cosine := math.Cos((i*math.Pi)/5.0) * s.Amplitude - s.Gather(&acc) + require.NoError(t, s.Gather(&acc)) fields := make(map[string]interface{}) fields["sine"] = sine diff --git a/plugins/inputs/twemproxy/twemproxy_test.go b/plugins/inputs/twemproxy/twemproxy_test.go index dd79048e0a5f5..0da1694d557d8 100644 --- a/plugins/inputs/twemproxy/twemproxy_test.go +++ b/plugins/inputs/twemproxy/twemproxy_test.go @@ -67,8 +67,12 @@ func mockTwemproxyServer() (net.Listener, error) { go func(l net.Listener) { for { conn, _ := l.Accept() - conn.Write([]byte(sampleStats)) - conn.Close() + if _, err := conn.Write([]byte(sampleStats)); err != nil { + return + } + if err := conn.Close(); err != nil { + return + } break } }(listener) diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index 7222f3b1fb6af..07cd79cb2a610 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -110,7 +110,9 @@ func (u *UDPListener) Start(acc telegraf.Accumulator) error { u.in = make(chan []byte, u.AllowedPendingMessages) u.done = make(chan struct{}) - u.udpListen() + if err := u.udpListen(); err != nil { + return err + } u.wg.Add(1) go u.udpParser() @@ -124,6 +126,8 @@ func (u *UDPListener) Stop() { defer u.Unlock() close(u.done) u.wg.Wait() + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive u.listener.Close() close(u.in) u.Log.Infof("Stopped service on %q", u.ServiceAddress) @@ -162,7 +166,9 @@ func (u *UDPListener) udpListenLoop() { case <-u.done: return default: - u.listener.SetReadDeadline(time.Now().Add(time.Second)) + if err := u.listener.SetReadDeadline(time.Now().Add(time.Second)); err != nil { + u.Log.Error("setting read-deadline failed: " + err.Error()) + } n, _, err := u.listener.ReadFromUDP(buf) if err != nil { diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index b6c0b5f09b082..6bd5f23309e76 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -93,7 +93,8 @@ func TestConnectUDP(t *testing.T) { require.NoError(t, err) // send single message to socket - fmt.Fprintf(conn, testMsg) + _, err = fmt.Fprint(conn, testMsg) + require.NoError(t, err) acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, @@ -101,7 +102,8 @@ func TestConnectUDP(t *testing.T) { ) // send multiple messages to socket - fmt.Fprintf(conn, testMsgs) + _, err = fmt.Fprint(conn, testMsgs) + require.NoError(t, err) acc.Wait(6) hostTags := []string{"server02", "server03", "server04", "server05", "server06"} @@ -127,7 +129,7 @@ func TestRunParser(t *testing.T) { go listener.udpParser() in <- testmsg - listener.Gather(&acc) + require.NoError(t, listener.Gather(&acc)) acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu_load_short", @@ -176,7 +178,7 @@ func TestRunParserGraphiteMsg(t *testing.T) { go listener.udpParser() in <- testmsg - listener.Gather(&acc) + require.NoError(t, listener.Gather(&acc)) acc.Wait(1) acc.AssertContainsFields(t, "cpu_load_graphite", @@ -200,7 +202,7 @@ func TestRunParserJSONMsg(t *testing.T) { go listener.udpParser() in <- testmsg - listener.Gather(&acc) + require.NoError(t, listener.Gather(&acc)) acc.Wait(1) acc.AssertContainsFields(t, "udp_json_test", diff --git a/plugins/inputs/uwsgi/uwsgi_test.go b/plugins/inputs/uwsgi/uwsgi_test.go index 34581791e022f..80856c5cffa73 100644 --- a/plugins/inputs/uwsgi/uwsgi_test.go +++ b/plugins/inputs/uwsgi/uwsgi_test.go @@ -122,7 +122,7 @@ func TestBasic(t *testing.T) { Servers: []string{fakeServer.URL + "/"}, } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) require.Equal(t, 0, len(acc.Errors)) } @@ -153,7 +153,7 @@ func TestInvalidJSON(t *testing.T) { Servers: []string{fakeServer.URL + "/"}, } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) require.Equal(t, 1, len(acc.Errors)) } @@ -162,7 +162,7 @@ func TestHttpError(t *testing.T) { Servers: []string{"http://novalidurladress/"}, } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) require.Equal(t, 1, len(acc.Errors)) } @@ -171,7 +171,7 @@ func TestTcpError(t *testing.T) { Servers: []string{"tcp://novalidtcpadress/"}, } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) require.Equal(t, 1, len(acc.Errors)) } @@ -180,6 +180,6 @@ func TestUnixSocketError(t *testing.T) { Servers: []string{"unix:///novalidunixsocket"}, } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) require.Equal(t, 1, len(acc.Errors)) } diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index ee89105363235..2642782fe806d 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -26,7 +26,7 @@ func TestGather(t *testing.T) { run: fakeVarnishStat(smOutput), Stats: []string{"*"}, } - v.Gather(acc) + assert.NoError(t, v.Gather(acc)) acc.HasMeasurement("varnish") for tag, fields := range parsedSmOutput { @@ -42,9 +42,8 @@ func TestParseFullOutput(t *testing.T) { run: fakeVarnishStat(fullOutput), Stats: []string{"*"}, } - err := v.Gather(acc) + assert.NoError(t, v.Gather(acc)) - assert.NoError(t, err) acc.HasMeasurement("varnish") flat := flatten(acc.Metrics) assert.Len(t, acc.Metrics, 6) @@ -57,9 +56,8 @@ func TestFilterSomeStats(t *testing.T) { run: fakeVarnishStat(fullOutput), Stats: []string{"MGT.*", "VBE.*"}, } - err := v.Gather(acc) + assert.NoError(t, v.Gather(acc)) - assert.NoError(t, err) acc.HasMeasurement("varnish") flat := flatten(acc.Metrics) assert.Len(t, acc.Metrics, 2) @@ -80,9 +78,8 @@ func TestFieldConfig(t *testing.T) { run: fakeVarnishStat(fullOutput), Stats: strings.Split(fieldCfg, ","), } - err := v.Gather(acc) + assert.NoError(t, v.Gather(acc)) - assert.NoError(t, err) acc.HasMeasurement("varnish") flat := flatten(acc.Metrics) assert.Equal(t, expected, len(flat)) @@ -94,7 +91,10 @@ func flatten(metrics []*testutil.Metric) map[string]interface{} { for _, m := range metrics { buf := &bytes.Buffer{} for k, v := range m.Tags { - buf.WriteString(fmt.Sprintf("%s=%s", k, v)) + _, err := buf.WriteString(fmt.Sprintf("%s=%s", k, v)) + if err != nil { + return nil + } } for k, v := range m.Fields { flat[fmt.Sprintf("%s %s", buf.String(), k)] = v diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go index e49bf80f33fe5..8414ad8d81285 100644 --- a/plugins/inputs/vsphere/finder.go +++ b/plugins/inputs/vsphere/finder.go @@ -99,6 +99,8 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, if err != nil { return err } + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive defer v.Destroy(ctx) var content []types.ObjectContent @@ -117,6 +119,8 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, if err != nil { return err } + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive defer v2.Destroy(ctx) err = v2.Retrieve(ctx, []string{resType}, fields, &content) if err != nil { diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index e0bcaac1c8eca..3dcde06f5e583 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -224,9 +224,7 @@ func TestParseConfig(t *testing.T) { v := VSphere{} c := v.SampleConfig() p := regexp.MustCompile("\n#") - fmt.Printf("Source=%s", p.ReplaceAllLiteralString(c, "\n")) c = configHeader + "\n[[inputs.vsphere]]\n" + p.ReplaceAllLiteralString(c, "\n") - fmt.Printf("Source=%s", c) tab, err := toml.Parse([]byte(c)) require.NoError(t, err) require.NotNil(t, tab) @@ -512,7 +510,8 @@ func testCollection(t *testing.T, excludeClusters bool) { // We have to follow the host parent path to locate a cluster. Look up the host! finder := Finder{client} var hosts []mo.HostSystem - finder.Find(context.Background(), "HostSystem", "/**/"+hostName, &hosts) + err := finder.Find(context.Background(), "HostSystem", "/**/"+hostName, &hosts) + require.NoError(t, err) require.NotEmpty(t, hosts) hostMoid = hosts[0].Reference().Value hostCache[hostName] = hostMoid diff --git a/plugins/inputs/webhooks/github/github_webhooks.go b/plugins/inputs/webhooks/github/github_webhooks.go index 0bb792bf5df08..5febb80afb6bb 100644 --- a/plugins/inputs/webhooks/github/github_webhooks.go +++ b/plugins/inputs/webhooks/github/github_webhooks.go @@ -126,7 +126,9 @@ func checkSignature(secret string, data []byte, signature string) bool { func generateSignature(secret string, data []byte) string { mac := hmac.New(sha1.New, []byte(secret)) - mac.Write(data) + if _, err := mac.Write(data); err != nil { + return err.Error() + } result := mac.Sum(nil) return "sha1=" + hex.EncodeToString(result) } diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go index 1fedca96ca4a9..a6f02beffd5d8 100644 --- a/plugins/inputs/webhooks/webhooks.go +++ b/plugins/inputs/webhooks/webhooks.go @@ -128,6 +128,8 @@ func (wb *Webhooks) Start(acc telegraf.Accumulator) error { } func (wb *Webhooks) Stop() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive wb.srv.Close() wb.Log.Infof("Stopping the Webhooks service") } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 35e41018d82a4..4aafd3cb4090b 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -31,15 +31,12 @@ func TestGatherRemoteIntegration(t *testing.T) { t.Skip("Skipping network-dependent test due to race condition when test-all") tmpfile, err := ioutil.TempFile("", "example") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer os.Remove(tmpfile.Name()) - if _, err := tmpfile.Write([]byte(pki.ReadServerCert())); err != nil { - t.Fatal(err) - } + _, err = tmpfile.Write([]byte(pki.ReadServerCert())) + require.NoError(t, err) tests := []struct { name string @@ -61,9 +58,7 @@ func TestGatherRemoteIntegration(t *testing.T) { } pair, err := tls.X509KeyPair([]byte(pki.ReadServerCert()), []byte(pki.ReadServerKey())) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) config := &tls.Config{ InsecureSkipVerify: true, @@ -80,16 +75,12 @@ func TestGatherRemoteIntegration(t *testing.T) { } ln, err := tls.Listen("tcp", ":0", config) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer ln.Close() go func() { sconn, err := ln.Accept() - if err != nil { - return - } + require.NoError(t, err) if test.close { sconn.Close() } @@ -100,9 +91,7 @@ func TestGatherRemoteIntegration(t *testing.T) { if test.noshake { srv.Close() } - if err := srv.Handshake(); err != nil { - return - } + require.NoError(t, srv.Handshake()) }() if test.server == "" { @@ -113,7 +102,7 @@ func TestGatherRemoteIntegration(t *testing.T) { Sources: []string{test.server}, Timeout: internal.Duration{Duration: test.timeout}, } - sc.Init() + require.NoError(t, sc.Init()) sc.InsecureSkipVerify = true testErr := false @@ -159,43 +148,28 @@ func TestGatherLocal(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { f, err := ioutil.TempFile("", "x509_cert") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = f.Write([]byte(test.content)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if runtime.GOOS != "windows" { - err = f.Chmod(test.mode) - if err != nil { - t.Fatal(err) - } + require.NoError(t, f.Chmod(test.mode)) } - err = f.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(t, f.Close()) defer os.Remove(f.Name()) sc := X509Cert{ Sources: []string{f.Name()}, } - sc.Init() - - error := false + require.NoError(t, sc.Init()) acc := testutil.Accumulator{} err = sc.Gather(&acc) - if len(acc.Errors) > 0 { - error = true - } - if error != test.error { + if (len(acc.Errors) > 0) != test.error { t.Errorf("%s", err) } }) @@ -206,30 +180,22 @@ func TestTags(t *testing.T) { cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert()) f, err := ioutil.TempFile("", "x509_cert") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = f.Write([]byte(cert)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - err = f.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(t, f.Close()) defer os.Remove(f.Name()) sc := X509Cert{ Sources: []string{f.Name()}, } - sc.Init() + require.NoError(t, sc.Init()) acc := testutil.Accumulator{} - err = sc.Gather(&acc) - require.NoError(t, err) + require.NoError(t, sc.Gather(&acc)) assert.True(t, acc.HasMeasurement("x509_cert")) @@ -271,36 +237,23 @@ func TestGatherChain(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { f, err := ioutil.TempFile("", "x509_cert") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = f.Write([]byte(test.content)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - err = f.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(t, f.Close()) defer os.Remove(f.Name()) sc := X509Cert{ Sources: []string{f.Name()}, } - sc.Init() - - error := false + require.NoError(t, sc.Init()) acc := testutil.Accumulator{} err = sc.Gather(&acc) - if err != nil { - error = true - } - - if error != test.error { + if (err != nil) != test.error { t.Errorf("%s", err) } }) @@ -309,7 +262,7 @@ func TestGatherChain(t *testing.T) { func TestStrings(t *testing.T) { sc := X509Cert{} - sc.Init() + require.NoError(t, sc.Init()) tests := []struct { name string @@ -338,11 +291,10 @@ func TestGatherCertIntegration(t *testing.T) { m := &X509Cert{ Sources: []string{"https://www.influxdata.com:443"}, } - m.Init() + require.NoError(t, m.Init()) var acc testutil.Accumulator - err := m.Gather(&acc) - require.NoError(t, err) + require.NoError(t, m.Gather(&acc)) assert.True(t, acc.HasMeasurement("x509_cert")) } @@ -356,11 +308,10 @@ func TestGatherCertMustNotTimeout(t *testing.T) { Sources: []string{"https://www.influxdata.com:443"}, Timeout: internal.Duration{Duration: duration}, } - m.Init() + require.NoError(t, m.Init()) var acc testutil.Accumulator - err := m.Gather(&acc) - require.NoError(t, err) + require.NoError(t, m.Gather(&acc)) require.Empty(t, acc.Errors) assert.True(t, acc.HasMeasurement("x509_cert")) } @@ -387,7 +338,7 @@ func TestServerName(t *testing.T) { ServerName: test.fromCfg, ClientConfig: _tls.ClientConfig{ServerName: test.fromTLS}, } - sc.Init() + require.NoError(t, sc.Init()) u, err := url.Parse(test.url) require.NoError(t, err) actual, err := sc.serverName(u) diff --git a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go index 61c2eda12bd96..3889e2f2cd9ea 100644 --- a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go +++ b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go @@ -51,10 +51,10 @@ func main() { zipkin.HTTPBatchSize(BatchSize), zipkin.HTTPMaxBacklog(MaxBackLog), zipkin.HTTPBatchInterval(time.Duration(BatchTimeInterval)*time.Second)) - defer collector.Close() if err != nil { log.Fatalf("Error initializing zipkin http collector: %v\n", err) } + defer collector.Close() tracer, err := zipkin.NewTracer( zipkin.NewRecorder(collector, false, "127.0.0.1:0", "Trivial")) diff --git a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go index dde89570b8969..b26e3d73fa3fd 100644 --- a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go +++ b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go @@ -99,8 +99,6 @@ func jsonToZipkinThrift(jsonRaw []byte) ([]byte, error) { } zspans = append(zspans, spans...) - fmt.Println(spans) - buf := thrift.NewTMemoryBuffer() transport := thrift.NewTBinaryProtocolTransport(buf) diff --git a/plugins/inputs/zipkin/zipkin.go b/plugins/inputs/zipkin/zipkin.go index d0cf9b38dda64..e679de5c47223 100644 --- a/plugins/inputs/zipkin/zipkin.go +++ b/plugins/inputs/zipkin/zipkin.go @@ -125,6 +125,8 @@ func (z *Zipkin) Stop() { defer z.waitGroup.Wait() defer cancel() + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive z.server.Shutdown(ctx) } diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index 29d88dbfdce05..48c00a1d3ef7f 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -122,10 +122,14 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr // Apply deadline to connection deadline, ok := ctx.Deadline() if ok { - c.SetDeadline(deadline) + if err := c.SetDeadline(deadline); err != nil { + return err + } } - fmt.Fprintf(c, "%s\n", "mntr") + if _, err := fmt.Fprintf(c, "%s\n", "mntr"); err != nil { + return err + } rdr := bufio.NewReader(c) scanner := bufio.NewScanner(rdr) From c66ccee46f47717c399ccc0348d17c95d11f477d Mon Sep 17 00:00:00 2001 From: Nicolas Filotto Date: Thu, 8 Apr 2021 20:02:29 +0200 Subject: [PATCH 362/761] Allow to specify nanoseconds to timestamp in Starlark Processor (#9105) --- go.mod | 2 +- go.sum | 4 ++-- plugins/processors/starlark/README.md | 3 ++- .../starlark/testdata/time_timestamp.star | 3 +-- .../testdata/time_timestamp_nanos.star | 22 +++++++++++++++++++ 5 files changed, 28 insertions(+), 6 deletions(-) create mode 100644 plugins/processors/starlark/testdata/time_timestamp_nanos.star diff --git a/go.mod b/go.mod index 6bf359ff70ae2..91ec88fa382a6 100644 --- a/go.mod +++ b/go.mod @@ -124,7 +124,7 @@ require ( github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect - go.starlark.net v0.0.0-20210312235212-74c10e2c17dc + go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/multierr v1.6.0 // indirect golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d diff --git a/go.sum b/go.sum index 2fcbe42fd5fde..d67550751eed1 100644 --- a/go.sum +++ b/go.sum @@ -1134,8 +1134,8 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.starlark.net v0.0.0-20210312235212-74c10e2c17dc h1:pVkptfeOTFfx+zXZo7HEHN3d5LmhatBFvHdm/f2QnpY= -go.starlark.net v0.0.0-20210312235212-74c10e2c17dc/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= +go.starlark.net v0.0.0-20210406145628-7a1108eaa012 h1:4RGobP/iq7S22H0Bb92OEt+M8/cfBQnW+T+a2MC0sQo= +go.starlark.net v0.0.0-20210406145628-7a1108eaa012/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index e30ea506c13f7..6372aedcea3b9 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -230,7 +230,8 @@ def apply(metric): - [scale](/plugins/processors/starlark/testdata/scale.star) - Multiply any field by a number - [time date](/plugins/processors/starlark/testdata/time_date.star) - Parse a date and extract the year, month and day from it. - [time duration](/plugins/processors/starlark/testdata/time_duration.star) - Parse a duration and convert it into a total amount of seconds. -- [time timestamp](/plugins/processors/starlark/testdata/time_timestamp.star) - Filter metrics based on the timestamp. +- [time timestamp](/plugins/processors/starlark/testdata/time_timestamp.star) - Filter metrics based on the timestamp in seconds. +- [time timestamp nanoseconds](/plugins/processors/starlark/testdata/time_timestamp_nanos.star) - Filter metrics based on the timestamp with nanoseconds. - [value filter](/plugins/processors/starlark/testdata/value_filter.star) - Remove a metric based on a field value. - [logging](/plugins/processors/starlark/testdata/logging.star) - Log messages with the logger of Telegraf - [multiple metrics](/plugins/processors/starlark/testdata/multiple_metrics.star) - Return multiple metrics by using [a list](https://docs.bazel.build/versions/master/skylark/lib/list.html) of metrics. diff --git a/plugins/processors/starlark/testdata/time_timestamp.star b/plugins/processors/starlark/testdata/time_timestamp.star index dc1cbaea0296d..73e885b26c3dc 100644 --- a/plugins/processors/starlark/testdata/time_timestamp.star +++ b/plugins/processors/starlark/testdata/time_timestamp.star @@ -1,5 +1,4 @@ -# Example of filtering metrics based on the timestamp. Beware the built-in function from_timestamp -# only supports timestamps in seconds. +# Example of filtering metrics based on the timestamp in seconds. # # Example Input: # time result="KO" 1616020365100400201 diff --git a/plugins/processors/starlark/testdata/time_timestamp_nanos.star b/plugins/processors/starlark/testdata/time_timestamp_nanos.star new file mode 100644 index 0000000000000..d305cb1f22e9f --- /dev/null +++ b/plugins/processors/starlark/testdata/time_timestamp_nanos.star @@ -0,0 +1,22 @@ +# Example of filtering metrics based on the timestamp with nanoseconds. +# +# Example Input: +# time result="KO" 1617900602123455999 +# time result="OK" 1617900602123456789 +# +# Example Output: +# time result="OK" 1617900602123456789 + +load('time.star', 'time') +# loads time.parse_duration(), time.is_valid_timezone(), time.now(), time.time(), +# time.parse_time() and time.from_timestamp() + +def apply(metric): + # 1617900602123457000 nanosec = Thursday, April 8, 2021 16:50:02.123457000 GMT + refDate = time.from_timestamp(1617900602, 123457000) + # 1617900602123455999 nanosec = Thursday, April 8, 2021 16:50:02.123455999 GMT + # 1617900602123456789 nanosec = Thursday, April 8, 2021 16:50:02.123456789 GMT + metric_date = time.from_timestamp(int(metric.time / 1e9), int(metric.time % 1e9)) + # Only keep metrics with a timestamp that is not more than 1 microsecond before the reference date + if refDate - time.parse_duration("1us") < metric_date: + return metric From 9853bf6c54ff18ab0e14a141b7b4580a1e54976a Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Fri, 9 Apr 2021 19:15:04 +0200 Subject: [PATCH 363/761] Get rid of deprecated internal.{Duration,Size,Number} (#8969) --- agent/agent.go | 22 +- cmd/telegraf/telegraf.go | 10 +- config/config.go | 18 +- config/config_test.go | 303 +++++++++++------- config/testdata/azure_monitor.toml | 4 + config/types_test.go | 46 +++ go.sum | 14 + internal/internal.go | 83 ----- internal/internal_test.go | 46 --- internal/snmp/config.go | 6 +- internal/snmp/wrapper.go | 3 +- logger/logger.go | 8 +- logger/logger_test.go | 10 +- plugins/aggregators/final/final.go | 8 +- plugins/aggregators/final/final_test.go | 4 +- plugins/inputs/activemq/activemq.go | 22 +- plugins/inputs/aliyuncms/aliyuncms.go | 61 ++-- plugins/inputs/aliyuncms/aliyuncms_test.go | 38 ++- plugins/inputs/apache/apache.go | 10 +- plugins/inputs/apcupsd/apcupsd.go | 8 +- plugins/inputs/aurora/aurora.go | 18 +- plugins/inputs/beat/beat.go | 8 +- plugins/inputs/burrow/burrow.go | 12 +- plugins/inputs/clickhouse/clickhouse.go | 21 +- plugins/inputs/cloud_pubsub/pubsub.go | 17 +- .../inputs/cloud_pubsub_push/pubsub_push.go | 28 +- .../cloud_pubsub_push/pubsub_push_test.go | 14 +- plugins/inputs/dcos/dcos.go | 12 +- plugins/inputs/docker/docker.go | 16 +- plugins/inputs/docker_log/docker_log.go | 28 +- plugins/inputs/docker_log/docker_log_test.go | 4 +- plugins/inputs/ecs/ecs.go | 8 +- plugins/inputs/elasticsearch/elasticsearch.go | 34 +- plugins/inputs/exec/exec.go | 11 +- plugins/inputs/fibaro/fibaro.go | 8 +- plugins/inputs/filecount/filecount.go | 24 +- plugins/inputs/filecount/filecount_test.go | 14 +- plugins/inputs/fireboard/fireboard.go | 14 +- plugins/inputs/github/github.go | 16 +- plugins/inputs/gnmi/gnmi.go | 22 +- plugins/inputs/gnmi/gnmi_test.go | 12 +- plugins/inputs/http/http.go | 7 +- .../http_listener_v2/http_listener_v2.go | 28 +- .../http_listener_v2/http_listener_v2_test.go | 10 +- plugins/inputs/http_response/http_response.go | 24 +- .../http_response/http_response_test.go | 66 ++-- plugins/inputs/httpjson/httpjson.go | 14 +- plugins/inputs/icinga2/icinga2.go | 12 +- plugins/inputs/influxdb/influxdb.go | 15 +- .../influxdb_listener/influxdb_listener.go | 39 +-- .../influxdb_listener_benchmark_test.go | 6 +- .../influxdb_listener_test.go | 4 +- .../influxdb_v2_listener.go | 17 +- .../influxdb_v2_listener_benchmark_test.go | 6 +- .../influxdb_v2_listener_test.go | 4 +- plugins/inputs/ipmi_sensor/ipmi.go | 9 +- plugins/inputs/ipmi_sensor/ipmi_test.go | 10 +- plugins/inputs/ipset/ipset.go | 13 +- plugins/inputs/ipset/ipset_test.go | 6 +- plugins/inputs/jenkins/jenkins.go | 22 +- plugins/inputs/jenkins/jenkins_test.go | 14 +- plugins/inputs/jolokia/jolokia.go | 16 +- plugins/inputs/jolokia2/jolokia_agent.go | 7 +- plugins/inputs/jolokia2/jolokia_proxy.go | 8 +- .../openconfig_telemetry.go | 30 +- .../openconfig_telemetry_test.go | 4 +- plugins/inputs/kapacitor/kapacitor.go | 8 +- plugins/inputs/kibana/kibana.go | 8 +- plugins/inputs/kube_inventory/kube_state.go | 22 +- plugins/inputs/kubernetes/kubernetes.go | 10 +- plugins/inputs/logstash/logstash.go | 8 +- plugins/inputs/mcrouter/mcrouter.go | 10 +- plugins/inputs/modbus/modbus.go | 42 +-- plugins/inputs/monit/monit.go | 7 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 25 +- plugins/inputs/nats/nats.go | 6 +- plugins/inputs/neptune_apex/neptune_apex.go | 4 +- plugins/inputs/net_response/net_response.go | 20 +- .../inputs/net_response/net_response_test.go | 16 +- plugins/inputs/nginx/nginx.go | 10 +- plugins/inputs/nginx_plus/nginx_plus.go | 12 +- .../inputs/nginx_plus_api/nginx_plus_api.go | 14 +- plugins/inputs/nginx_sts/nginx_sts.go | 12 +- .../nginx_upstream_check.go | 8 +- plugins/inputs/nginx_vts/nginx_vts.go | 12 +- plugins/inputs/nsd/nsd.go | 13 +- plugins/inputs/nsd/nsd_test.go | 9 +- plugins/inputs/nvidia_smi/nvidia_smi.go | 7 +- plugins/inputs/openntpd/openntpd.go | 13 +- plugins/inputs/openntpd/openntpd_test.go | 9 +- plugins/inputs/opensmtpd/opensmtpd.go | 13 +- plugins/inputs/opensmtpd/opensmtpd_test.go | 9 +- .../inputs/openweathermap/openweathermap.go | 26 +- plugins/inputs/pgbouncer/pgbouncer.go | 10 +- plugins/inputs/phpfpm/phpfpm.go | 7 +- plugins/inputs/postgresql/postgresql.go | 10 +- plugins/inputs/postgresql/service.go | 7 +- .../postgresql_extensible.go | 10 +- plugins/inputs/prometheus/prometheus.go | 10 +- plugins/inputs/proxmox/proxmox.go | 3 +- plugins/inputs/proxmox/structs.go | 10 +- plugins/inputs/rabbitmq/rabbitmq.go | 14 +- plugins/inputs/ravendb/ravendb.go | 10 +- .../riemann_listener/riemann_listener.go | 26 +- .../riemann_listener/riemann_listener_test.go | 4 +- plugins/inputs/sensors/sensors.go | 9 +- plugins/inputs/sflow/sflow.go | 10 +- plugins/inputs/smart/smart.go | 37 +-- plugins/inputs/smart/smart_test.go | 46 +-- plugins/inputs/snmp/snmp.go | 6 +- plugins/inputs/snmp/snmp_test.go | 21 +- plugins/inputs/snmp_trap/snmp_trap.go | 15 +- plugins/inputs/snmp_trap/snmp_trap_test.go | 4 +- .../inputs/socket_listener/socket_listener.go | 31 +- .../socket_listener/socket_listener_test.go | 13 +- plugins/inputs/solr/solr.go | 9 +- plugins/inputs/stackdriver/stackdriver.go | 24 +- plugins/inputs/statsd/README.md | 2 +- plugins/inputs/statsd/statsd.go | 12 +- plugins/inputs/statsd/statsd_test.go | 7 +- plugins/inputs/syslog/commons_test.go | 10 +- plugins/inputs/syslog/nontransparent_test.go | 12 +- plugins/inputs/syslog/octetcounting_test.go | 12 +- plugins/inputs/syslog/syslog.go | 33 +- plugins/inputs/sysstat/sysstat.go | 7 +- .../systemd_units/systemd_units_linux.go | 15 +- .../systemd_units/systemd_units_linux_test.go | 4 +- plugins/inputs/tail/multiline.go | 9 +- plugins/inputs/tail/multiline_test.go | 12 +- plugins/inputs/tail/tail.go | 4 +- plugins/inputs/tail/tail_test.go | 21 +- plugins/inputs/tengine/tengine.go | 10 +- plugins/inputs/tomcat/tomcat.go | 8 +- plugins/inputs/unbound/unbound.go | 19 +- plugins/inputs/unbound/unbound_test.go | 3 +- plugins/inputs/uwsgi/uwsgi.go | 14 +- plugins/inputs/varnish/varnish.go | 17 +- plugins/inputs/varnish/varnish_test.go | 6 +- plugins/inputs/vsphere/client.go | 14 +- plugins/inputs/vsphere/endpoint.go | 24 +- plugins/inputs/vsphere/vsphere.go | 10 +- plugins/inputs/vsphere/vsphere_test.go | 6 +- .../win_perf_counters/win_perf_counters.go | 8 +- .../win_perf_counters_test.go | 10 +- plugins/inputs/x509_cert/x509_cert.go | 12 +- plugins/inputs/x509_cert/x509_cert_test.go | 16 +- plugins/inputs/zookeeper/zookeeper.go | 10 +- plugins/outputs/amon/amon.go | 13 +- plugins/outputs/amqp/amqp.go | 7 +- plugins/outputs/amqp/amqp_test.go | 48 +-- .../application_insights.go | 10 +- .../application_insights_test.go | 8 +- .../outputs/azure_monitor/azure_monitor.go | 10 +- plugins/outputs/bigquery/bigquery.go | 14 +- plugins/outputs/bigquery/bigquery_test.go | 4 +- plugins/outputs/cloud_pubsub/pubsub.go | 16 +- plugins/outputs/cloud_pubsub/topic_stubbed.go | 4 +- plugins/outputs/cratedb/cratedb.go | 10 +- plugins/outputs/cratedb/cratedb_test.go | 4 +- plugins/outputs/datadog/datadog.go | 13 +- plugins/outputs/dynatrace/dynatrace.go | 18 +- plugins/outputs/dynatrace/dynatrace_test.go | 4 +- .../outputs/elasticsearch/elasticsearch.go | 20 +- .../elasticsearch/elasticsearch_test.go | 14 +- plugins/outputs/exec/exec.go | 9 +- plugins/outputs/exec/exec_test.go | 4 +- plugins/outputs/file/file.go | 17 +- plugins/outputs/health/health.go | 19 +- plugins/outputs/http/http.go | 15 +- plugins/outputs/influxdb/influxdb.go | 12 +- plugins/outputs/influxdb/influxdb_test.go | 8 +- plugins/outputs/influxdb_v2/influxdb.go | 8 +- plugins/outputs/instrumental/instrumental.go | 21 +- plugins/outputs/librato/librato.go | 19 +- plugins/outputs/logzio/logzio.go | 14 +- plugins/outputs/loki/loki.go | 9 +- plugins/outputs/mqtt/mqtt.go | 11 +- plugins/outputs/newrelic/newrelic.go | 14 +- plugins/outputs/newrelic/newrelic_test.go | 6 +- .../prometheus_client/prometheus_client.go | 27 +- plugins/outputs/riemann/riemann.go | 26 +- .../outputs/socket_writer/socket_writer.go | 8 +- plugins/outputs/sumologic/sumologic.go | 20 +- plugins/outputs/syslog/syslog.go | 9 +- plugins/outputs/warp10/warp10.go | 20 +- .../yandex_cloud_monitoring.go | 14 +- plugins/processors/date/date.go | 14 +- plugins/processors/date/date_test.go | 4 +- plugins/processors/dedup/dedup.go | 12 +- plugins/processors/dedup/dedup_test.go | 4 +- plugins/processors/ifname/ifname.go | 3 +- plugins/processors/ifname/ifname_test.go | 5 +- plugins/processors/topk/topk.go | 26 +- plugins/processors/topk/topk_test.go | 6 +- 194 files changed, 1563 insertions(+), 1540 deletions(-) create mode 100644 config/testdata/azure_monitor.toml diff --git a/agent/agent.go b/agent/agent.go index 96e8596b851b2..78097bcd47731 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -98,8 +98,8 @@ type outputUnit struct { func (a *Agent) Run(ctx context.Context) error { log.Printf("I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ "Flush Interval:%s", - a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet, - a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) + time.Duration(a.Config.Agent.Interval), a.Config.Agent.Quiet, + a.Config.Agent.Hostname, time.Duration(a.Config.Agent.FlushInterval)) log.Printf("D! [agent] Initializing plugins") err := a.initPlugins() @@ -274,19 +274,19 @@ func (a *Agent) runInputs( var wg sync.WaitGroup for _, input := range unit.inputs { // Overwrite agent interval if this plugin has its own. - interval := a.Config.Agent.Interval.Duration + interval := time.Duration(a.Config.Agent.Interval) if input.Config.Interval != 0 { interval = input.Config.Interval } // Overwrite agent precision if this plugin has its own. - precision := a.Config.Agent.Precision.Duration + precision := time.Duration(a.Config.Agent.Precision) if input.Config.Precision != 0 { precision = input.Config.Precision } // Overwrite agent collection_jitter if this plugin has its own. - jitter := a.Config.Agent.CollectionJitter.Duration + jitter := time.Duration(a.Config.Agent.CollectionJitter) if input.Config.CollectionJitter != 0 { jitter = input.Config.CollectionJitter } @@ -373,13 +373,13 @@ func (a *Agent) testRunInputs( defer wg.Done() // Overwrite agent interval if this plugin has its own. - interval := a.Config.Agent.Interval.Duration + interval := time.Duration(a.Config.Agent.Interval) if input.Config.Interval != 0 { interval = input.Config.Interval } // Overwrite agent precision if this plugin has its own. - precision := a.Config.Agent.Precision.Duration + precision := time.Duration(a.Config.Agent.Precision) if input.Config.Precision != 0 { precision = input.Config.Precision } @@ -611,8 +611,8 @@ func (a *Agent) runAggregators( go func(agg *models.RunningAggregator) { defer wg.Done() - interval := a.Config.Agent.Interval.Duration - precision := a.Config.Agent.Precision.Duration + interval := time.Duration(a.Config.Agent.Interval) + precision := time.Duration(a.Config.Agent.Precision) acc := NewAccumulator(agg, unit.aggC) acc.SetPrecision(getPrecision(precision, interval)) @@ -723,8 +723,8 @@ func (a *Agent) runOutputs( var wg sync.WaitGroup // Start flush loop - interval := a.Config.Agent.FlushInterval.Duration - jitter := a.Config.Agent.FlushJitter.Duration + interval := time.Duration(a.Config.Agent.FlushInterval) + jitter := time.Duration(a.Config.Agent.FlushJitter) ctx, cancel := context.WithCancel(context.Background()) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 459f81a90dc7c..f7df792728b23 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -151,14 +151,12 @@ func runAgent(ctx context.Context, return errors.New("Error: no inputs found, did you provide a valid config file?") } - if int64(c.Agent.Interval.Duration) <= 0 { - return fmt.Errorf("Agent interval must be positive, found %s", - c.Agent.Interval.Duration) + if int64(c.Agent.Interval) <= 0 { + return fmt.Errorf("Agent interval must be positive, found %v", c.Agent.Interval) } - if int64(c.Agent.FlushInterval.Duration) <= 0 { - return fmt.Errorf("Agent flush_interval must be positive; found %s", - c.Agent.Interval.Duration) + if int64(c.Agent.FlushInterval) <= 0 { + return fmt.Errorf("Agent flush_interval must be positive; found %v", c.Agent.Interval) } ag, err := agent.NewAgent(c) diff --git a/config/config.go b/config/config.go index 097fff385f531..610f79cc3fd7e 100644 --- a/config/config.go +++ b/config/config.go @@ -82,9 +82,9 @@ func NewConfig() *Config { // Agent defaults: Agent: &AgentConfig{ - Interval: internal.Duration{Duration: 10 * time.Second}, + Interval: Duration(10 * time.Second), RoundInterval: true, - FlushInterval: internal.Duration{Duration: 10 * time.Second}, + FlushInterval: Duration(10 * time.Second), LogTarget: "file", LogfileRotationMaxArchives: 5, }, @@ -111,7 +111,7 @@ func NewConfig() *Config { // AgentConfig defines configuration that will be used by the Telegraf agent type AgentConfig struct { // Interval at which to gather information - Interval internal.Duration + Interval Duration // RoundInterval rounds collection interval to 'interval'. // ie, if Interval=10s then always collect on :00, :10, :20, etc. @@ -123,22 +123,22 @@ type AgentConfig struct { // when interval = "250ms", precision will be "1ms" // Precision will NOT be used for service inputs. It is up to each individual // service input to set the timestamp at the appropriate precision. - Precision internal.Duration + Precision Duration // CollectionJitter is used to jitter the collection by a random amount. // Each plugin will sleep for a random time within jitter before collecting. // This can be used to avoid many plugins querying things like sysfs at the // same time, which can have a measurable effect on the system. - CollectionJitter internal.Duration + CollectionJitter Duration // FlushInterval is the Interval at which to flush data - FlushInterval internal.Duration + FlushInterval Duration // FlushJitter Jitters the flush interval by a random amount. // This is primarily to avoid large write spikes for users running a large // number of telegraf instances. // ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - FlushJitter internal.Duration + FlushJitter Duration // MetricBatchSize is the maximum number of metrics that is wrote to an // output plugin in one call. @@ -178,11 +178,11 @@ type AgentConfig struct { // The file will be rotated after the time interval specified. When set // to 0 no time based rotation is performed. - LogfileRotationInterval internal.Duration `toml:"logfile_rotation_interval"` + LogfileRotationInterval Duration `toml:"logfile_rotation_interval"` // The logfile will be rotated when it becomes larger than the specified // size. When set to 0 no size based rotation is performed. - LogfileRotationMaxSize internal.Size `toml:"logfile_rotation_max_size"` + LogfileRotationMaxSize Size `toml:"logfile_rotation_max_size"` // Maximum number of rotated archives to keep, any older logs are deleted. // If set to -1, no archives are removed. diff --git a/config/config_test.go b/config/config_test.go index e238dbade1e82..3095ffdf12b08 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,6 +1,7 @@ package config import ( + "fmt" "net/http" "net/http/httptest" "os" @@ -8,30 +9,23 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/models" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/inputs/exec" - "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" - "github.com/influxdata/telegraf/plugins/inputs/memcached" - "github.com/influxdata/telegraf/plugins/inputs/procstat" - "github.com/influxdata/telegraf/plugins/outputs/azure_monitor" - httpOut "github.com/influxdata/telegraf/plugins/outputs/http" + "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { c := NewConfig() - err := os.Setenv("MY_TEST_SERVER", "192.168.1.1") - assert.NoError(t, err) - err = os.Setenv("TEST_INTERVAL", "10s") - assert.NoError(t, err) + require.NoError(t, os.Setenv("MY_TEST_SERVER", "192.168.1.1")) + require.NoError(t, os.Setenv("TEST_INTERVAL", "10s")) c.LoadConfig("./testdata/single_plugin_env_vars.toml") - memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) - memcached.Servers = []string{"192.168.1.1"} + input := inputs.Inputs["memcached"]().(*MockupInputPlugin) + input.Servers = []string{"192.168.1.1"} filter := models.Filter{ NameDrop: []string{"metricname2"}, @@ -51,26 +45,27 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { }, }, } - assert.NoError(t, filter.Compile()) - mConfig := &models.InputConfig{ + require.NoError(t, filter.Compile()) + inputConfig := &models.InputConfig{ Name: "memcached", Filter: filter, Interval: 10 * time.Second, } - mConfig.Tags = make(map[string]string) + inputConfig.Tags = make(map[string]string) - assert.Equal(t, memcached, c.Inputs[0].Input, - "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Inputs[0].Config, - "Testdata did not produce correct memcached metadata.") + // Ignore Log and Parser + c.Inputs[0].Input.(*MockupInputPlugin).Log = nil + c.Inputs[0].Input.(*MockupInputPlugin).parser = nil + require.Equal(t, input, c.Inputs[0].Input, "Testdata did not produce a correct mockup struct.") + require.Equal(t, inputConfig, c.Inputs[0].Config, "Testdata did not produce correct input metadata.") } func TestConfig_LoadSingleInput(t *testing.T) { c := NewConfig() c.LoadConfig("./testdata/single_plugin.toml") - memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) - memcached.Servers = []string{"localhost"} + input := inputs.Inputs["memcached"]().(*MockupInputPlugin) + input.Servers = []string{"localhost"} filter := models.Filter{ NameDrop: []string{"metricname2"}, @@ -90,35 +85,34 @@ func TestConfig_LoadSingleInput(t *testing.T) { }, }, } - assert.NoError(t, filter.Compile()) - mConfig := &models.InputConfig{ + require.NoError(t, filter.Compile()) + inputConfig := &models.InputConfig{ Name: "memcached", Filter: filter, Interval: 5 * time.Second, } - mConfig.Tags = make(map[string]string) + inputConfig.Tags = make(map[string]string) - assert.Equal(t, memcached, c.Inputs[0].Input, - "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Inputs[0].Config, - "Testdata did not produce correct memcached metadata.") + // Ignore Log and Parser + c.Inputs[0].Input.(*MockupInputPlugin).Log = nil + c.Inputs[0].Input.(*MockupInputPlugin).parser = nil + require.Equal(t, input, c.Inputs[0].Input, "Testdata did not produce a correct memcached struct.") + require.Equal(t, inputConfig, c.Inputs[0].Config, "Testdata did not produce correct memcached metadata.") } func TestConfig_LoadDirectory(t *testing.T) { c := NewConfig() - err := c.LoadConfig("./testdata/single_plugin.toml") - if err != nil { - t.Error(err) - } - err = c.LoadDirectory("./testdata/subconfig") - if err != nil { - t.Error(err) - } + require.NoError(t, c.LoadConfig("./testdata/single_plugin.toml")) + require.NoError(t, c.LoadDirectory("./testdata/subconfig")) - memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) - memcached.Servers = []string{"localhost"} + // Create the expected data + expectedPlugins := make([]*MockupInputPlugin, 4) + expectedConfigs := make([]*models.InputConfig, 4) - filter := models.Filter{ + expectedPlugins[0] = inputs.Inputs["memcached"]().(*MockupInputPlugin) + expectedPlugins[0].Servers = []string{"localhost"} + + filterMockup := models.Filter{ NameDrop: []string{"metricname2"}, NamePass: []string{"metricname1"}, FieldDrop: []string{"other", "stuff"}, @@ -136,120 +130,138 @@ func TestConfig_LoadDirectory(t *testing.T) { }, }, } - assert.NoError(t, filter.Compile()) - mConfig := &models.InputConfig{ + require.NoError(t, filterMockup.Compile()) + expectedConfigs[0] = &models.InputConfig{ Name: "memcached", - Filter: filter, + Filter: filterMockup, Interval: 5 * time.Second, } - mConfig.Tags = make(map[string]string) - - assert.Equal(t, memcached, c.Inputs[0].Input, - "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Inputs[0].Config, - "Testdata did not produce correct memcached metadata.") + expectedConfigs[0].Tags = make(map[string]string) - ex := inputs.Inputs["exec"]().(*exec.Exec) + expectedPlugins[1] = inputs.Inputs["exec"]().(*MockupInputPlugin) p, err := parsers.NewParser(&parsers.Config{ MetricName: "exec", DataFormat: "json", JSONStrict: true, }) - assert.NoError(t, err) - ex.SetParser(p) - ex.Command = "/usr/bin/myothercollector --foo=bar" - eConfig := &models.InputConfig{ + require.NoError(t, err) + expectedPlugins[1].SetParser(p) + expectedPlugins[1].Command = "/usr/bin/myothercollector --foo=bar" + expectedConfigs[1] = &models.InputConfig{ Name: "exec", MeasurementSuffix: "_myothercollector", } - eConfig.Tags = make(map[string]string) - - exec := c.Inputs[1].Input.(*exec.Exec) - require.NotNil(t, exec.Log) - exec.Log = nil - - assert.Equal(t, ex, c.Inputs[1].Input, - "Merged Testdata did not produce a correct exec struct.") - assert.Equal(t, eConfig, c.Inputs[1].Config, - "Merged Testdata did not produce correct exec metadata.") + expectedConfigs[1].Tags = make(map[string]string) - memcached.Servers = []string{"192.168.1.1"} - assert.Equal(t, memcached, c.Inputs[2].Input, - "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Inputs[2].Config, - "Testdata did not produce correct memcached metadata.") + expectedPlugins[2] = inputs.Inputs["memcached"]().(*MockupInputPlugin) + expectedPlugins[2].Servers = []string{"192.168.1.1"} - pstat := inputs.Inputs["procstat"]().(*procstat.Procstat) - pstat.PidFile = "/var/run/grafana-server.pid" - - pConfig := &models.InputConfig{Name: "procstat"} - pConfig.Tags = make(map[string]string) + filterMemcached := models.Filter{ + NameDrop: []string{"metricname2"}, + NamePass: []string{"metricname1"}, + FieldDrop: []string{"other", "stuff"}, + FieldPass: []string{"some", "strings"}, + TagDrop: []models.TagFilter{ + { + Name: "badtag", + Filter: []string{"othertag"}, + }, + }, + TagPass: []models.TagFilter{ + { + Name: "goodtag", + Filter: []string{"mytag"}, + }, + }, + } + require.NoError(t, filterMemcached.Compile()) + expectedConfigs[2] = &models.InputConfig{ + Name: "memcached", + Filter: filterMemcached, + Interval: 5 * time.Second, + } + expectedConfigs[2].Tags = make(map[string]string) + + expectedPlugins[3] = inputs.Inputs["procstat"]().(*MockupInputPlugin) + expectedPlugins[3].PidFile = "/var/run/grafana-server.pid" + expectedConfigs[3] = &models.InputConfig{Name: "procstat"} + expectedConfigs[3].Tags = make(map[string]string) + + // Check the generated plugins + require.Len(t, c.Inputs, len(expectedPlugins)) + require.Len(t, c.Inputs, len(expectedConfigs)) + for i, plugin := range c.Inputs { + input := plugin.Input.(*MockupInputPlugin) + // Check the logger and ignore it for comparison + require.NotNil(t, input.Log) + input.Log = nil + + // Ignore the parser if not expected + if expectedPlugins[i].parser == nil { + input.parser = nil + } - assert.Equal(t, pstat, c.Inputs[3].Input, - "Merged Testdata did not produce a correct procstat struct.") - assert.Equal(t, pConfig, c.Inputs[3].Config, - "Merged Testdata did not produce correct procstat metadata.") + require.Equalf(t, expectedPlugins[i], plugin.Input, "Plugin %d: incorrect struct produced", i) + require.Equalf(t, expectedConfigs[i], plugin.Config, "Plugin %d: incorrect config produced", i) + } } func TestConfig_LoadSpecialTypes(t *testing.T) { c := NewConfig() - err := c.LoadConfig("./testdata/special_types.toml") - assert.NoError(t, err) - require.Equal(t, 1, len(c.Inputs)) + require.NoError(t, c.LoadConfig("./testdata/special_types.toml")) + require.Len(t, c.Inputs, 1) - inputHTTPListener, ok := c.Inputs[0].Input.(*http_listener_v2.HTTPListenerV2) - assert.Equal(t, true, ok) + input, ok := c.Inputs[0].Input.(*MockupInputPlugin) + require.True(t, ok) // Tests telegraf duration parsing. - assert.Equal(t, internal.Duration{Duration: time.Second}, inputHTTPListener.WriteTimeout) + require.Equal(t, Duration(time.Second), input.WriteTimeout) // Tests telegraf size parsing. - assert.Equal(t, internal.Size{Size: 1024 * 1024}, inputHTTPListener.MaxBodySize) + require.Equal(t, Size(1024*1024), input.MaxBodySize) // Tests toml multiline basic strings. - assert.Equal(t, "/path/to/my/cert", strings.TrimRight(inputHTTPListener.TLSCert, "\r\n")) + require.Equal(t, "/path/to/my/cert", strings.TrimRight(input.TLSCert, "\r\n")) } func TestConfig_FieldNotDefined(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/invalid_field.toml") require.Error(t, err, "invalid field name") - assert.Equal(t, "Error loading config file ./testdata/invalid_field.toml: plugin inputs.http_listener_v2: line 1: configuration specified the fields [\"not_a_field\"], but they weren't used", err.Error()) + require.Equal(t, "Error loading config file ./testdata/invalid_field.toml: plugin inputs.http_listener_v2: line 1: configuration specified the fields [\"not_a_field\"], but they weren't used", err.Error()) } func TestConfig_WrongFieldType(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/wrong_field_type.toml") require.Error(t, err, "invalid field type") - assert.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Port) cannot unmarshal TOML string into int", err.Error()) + require.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: error parsing http_listener_v2, line 2: (config.MockupInputPlugin.Port) cannot unmarshal TOML string into int", err.Error()) c = NewConfig() err = c.LoadConfig("./testdata/wrong_field_type2.toml") require.Error(t, err, "invalid field type2") - assert.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Methods) cannot unmarshal TOML string into []string", err.Error()) + require.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: error parsing http_listener_v2, line 2: (config.MockupInputPlugin.Methods) cannot unmarshal TOML string into []string", err.Error()) } func TestConfig_InlineTables(t *testing.T) { // #4098 c := NewConfig() - err := c.LoadConfig("./testdata/inline_table.toml") - assert.NoError(t, err) - require.Equal(t, 2, len(c.Outputs)) - - outputHTTP, ok := c.Outputs[1].Output.(*httpOut.HTTP) - assert.Equal(t, true, ok) - assert.Equal(t, map[string]string{"Authorization": "Token $TOKEN", "Content-Type": "application/json"}, outputHTTP.Headers) - assert.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude) + require.NoError(t, c.LoadConfig("./testdata/inline_table.toml")) + require.Len(t, c.Outputs, 2) + + output, ok := c.Outputs[1].Output.(*MockupOuputPlugin) + require.True(t, ok) + require.Equal(t, map[string]string{"Authorization": "Token $TOKEN", "Content-Type": "application/json"}, output.Headers) + require.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude) } func TestConfig_SliceComment(t *testing.T) { t.Skipf("Skipping until #3642 is resolved") c := NewConfig() - err := c.LoadConfig("./testdata/slice_comment.toml") - assert.NoError(t, err) - require.Equal(t, 1, len(c.Outputs)) + require.NoError(t, c.LoadConfig("./testdata/slice_comment.toml")) + require.Len(t, c.Outputs, 1) - outputHTTP, ok := c.Outputs[0].Output.(*httpOut.HTTP) - assert.Equal(t, []string{"test"}, outputHTTP.Scopes) - assert.Equal(t, true, ok) + output, ok := c.Outputs[0].Output.(*MockupOuputPlugin) + require.True(t, ok) + require.Equal(t, []string{"test"}, output.Scopes) } func TestConfig_BadOrdering(t *testing.T) { @@ -258,27 +270,21 @@ func TestConfig_BadOrdering(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/non_slice_slice.toml") require.Error(t, err, "bad ordering") - assert.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) + require.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) } func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { // #8256 Cannot use empty string as the namespace prefix c := NewConfig() - defaultPrefixConfig := `[[outputs.azure_monitor]]` - err := c.LoadConfigData([]byte(defaultPrefixConfig)) - assert.NoError(t, err) - azureMonitor, ok := c.Outputs[0].Output.(*azure_monitor.AzureMonitor) - assert.Equal(t, "Telegraf/", azureMonitor.NamespacePrefix) - assert.Equal(t, true, ok) - - c = NewConfig() - customPrefixConfig := `[[outputs.azure_monitor]] - namespace_prefix = ""` - err = c.LoadConfigData([]byte(customPrefixConfig)) - assert.NoError(t, err) - azureMonitor, ok = c.Outputs[0].Output.(*azure_monitor.AzureMonitor) - assert.Equal(t, "", azureMonitor.NamespacePrefix) - assert.Equal(t, true, ok) + require.NoError(t, c.LoadConfig("./testdata/azure_monitor.toml")) + require.Len(t, c.Outputs, 2) + + expectedPrefix := []string{"Telegraf/", ""} + for i, plugin := range c.Outputs { + output, ok := plugin.Output.(*MockupOuputPlugin) + require.True(t, ok) + require.Equal(t, expectedPrefix[i], output.NamespacePrefix) + } } func TestConfig_URLRetries3Fails(t *testing.T) { @@ -290,9 +296,12 @@ func TestConfig_URLRetries3Fails(t *testing.T) { })) defer ts.Close() + expected := fmt.Sprintf("Error loading config file %s: Retry 3 of 3 failed to retrieve remote config: 404 Not Found", ts.URL) + c := NewConfig() err := c.LoadConfig(ts.URL) require.Error(t, err) + require.Equal(t, expected, err.Error()) require.Equal(t, 4, responseCounter) } @@ -310,7 +319,57 @@ func TestConfig_URLRetries3FailsThenPasses(t *testing.T) { defer ts.Close() c := NewConfig() - err := c.LoadConfig(ts.URL) - require.NoError(t, err) + require.NoError(t, c.LoadConfig(ts.URL)) require.Equal(t, 4, responseCounter) } + +/*** Mockup INPUT plugin for testing to avoid cyclic dependencies ***/ +type MockupInputPlugin struct { + Servers []string `toml:"servers"` + Methods []string `toml:"methods"` + Timeout Duration `toml:"timeout"` + ReadTimeout Duration `toml:"read_timeout"` + WriteTimeout Duration `toml:"write_timeout"` + MaxBodySize Size `toml:"max_body_size"` + Port int `toml:"port"` + Command string + PidFile string + Log telegraf.Logger `toml:"-"` + tls.ServerConfig + + parser parsers.Parser +} + +func (m *MockupInputPlugin) SampleConfig() string { return "Mockup test intput plugin" } +func (m *MockupInputPlugin) Description() string { return "Mockup test intput plugin" } +func (m *MockupInputPlugin) Gather(acc telegraf.Accumulator) error { return nil } +func (m *MockupInputPlugin) SetParser(parser parsers.Parser) { m.parser = parser } + +/*** Mockup OUTPUT plugin for testing to avoid cyclic dependencies ***/ +type MockupOuputPlugin struct { + URL string `toml:"url"` + Headers map[string]string `toml:"headers"` + Scopes []string `toml:"scopes"` + NamespacePrefix string `toml:"namespace_prefix"` + Log telegraf.Logger `toml:"-"` + tls.ClientConfig +} + +func (m *MockupOuputPlugin) Connect() error { return nil } +func (m *MockupOuputPlugin) Close() error { return nil } +func (m *MockupOuputPlugin) Description() string { return "Mockup test output plugin" } +func (m *MockupOuputPlugin) SampleConfig() string { return "Mockup test output plugin" } +func (m *MockupOuputPlugin) Write(metrics []telegraf.Metric) error { return nil } + +// Register the mockup plugin on loading +func init() { + // Register the mockup input plugin for the required names + inputs.Add("exec", func() telegraf.Input { return &MockupInputPlugin{Timeout: Duration(time.Second * 5)} }) + inputs.Add("http_listener_v2", func() telegraf.Input { return &MockupInputPlugin{} }) + inputs.Add("memcached", func() telegraf.Input { return &MockupInputPlugin{} }) + inputs.Add("procstat", func() telegraf.Input { return &MockupInputPlugin{} }) + + // Register the mockup output plugin for the required names + outputs.Add("azure_monitor", func() telegraf.Output { return &MockupOuputPlugin{NamespacePrefix: "Telegraf/"} }) + outputs.Add("http", func() telegraf.Output { return &MockupOuputPlugin{} }) +} diff --git a/config/testdata/azure_monitor.toml b/config/testdata/azure_monitor.toml new file mode 100644 index 0000000000000..6151bea9020c5 --- /dev/null +++ b/config/testdata/azure_monitor.toml @@ -0,0 +1,4 @@ +[[outputs.azure_monitor]] + +[[outputs.azure_monitor]] + namespace_prefix = "" diff --git a/config/types_test.go b/config/types_test.go index 8e35de6111c82..afff599e3d6e4 100644 --- a/config/types_test.go +++ b/config/types_test.go @@ -29,3 +29,49 @@ func TestConfigDuration(t *testing.T) { require.Equal(t, p.MaxParallelLookups, 13) require.Equal(t, p.Ordered, true) } + +func TestDuration(t *testing.T) { + var d config.Duration + + require.NoError(t, d.UnmarshalTOML([]byte(`"1s"`))) + require.Equal(t, time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(`1s`))) + require.Equal(t, time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(`'1s'`))) + require.Equal(t, time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(`10`))) + require.Equal(t, 10*time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(`1.5`))) + require.Equal(t, time.Second, time.Duration(d)) +} + +func TestSize(t *testing.T) { + var s config.Size + + require.NoError(t, s.UnmarshalTOML([]byte(`"1B"`))) + require.Equal(t, int64(1), int64(s)) + + s = config.Size(0) + require.NoError(t, s.UnmarshalTOML([]byte(`1`))) + require.Equal(t, int64(1), int64(s)) + + s = config.Size(0) + require.NoError(t, s.UnmarshalTOML([]byte(`'1'`))) + require.Equal(t, int64(1), int64(s)) + + s = config.Size(0) + require.NoError(t, s.UnmarshalTOML([]byte(`"1GB"`))) + require.Equal(t, int64(1000*1000*1000), int64(s)) + + s = config.Size(0) + require.NoError(t, s.UnmarshalTOML([]byte(`"12GiB"`))) + require.Equal(t, int64(12*1024*1024*1024), int64(s)) +} diff --git a/go.sum b/go.sum index d67550751eed1..f2f65d843f712 100644 --- a/go.sum +++ b/go.sum @@ -109,8 +109,12 @@ github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcV github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -372,6 +376,8 @@ github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= @@ -379,6 +385,8 @@ github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDB github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= @@ -397,6 +405,8 @@ github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= @@ -972,6 +982,8 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1079,6 +1091,8 @@ github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= +github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= diff --git a/internal/internal.go b/internal/internal.go index 8b0dfff1fd418..636d7f06a7014 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -2,7 +2,6 @@ package internal import ( "bufio" - "bytes" "compress/gzip" "context" "errors" @@ -19,8 +18,6 @@ import ( "syscall" "time" "unicode" - - "github.com/alecthomas/units" ) const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" @@ -34,20 +31,6 @@ var ( // Set via the main module var version string -// Duration just wraps time.Duration -type Duration struct { - Duration time.Duration -} - -// Size just wraps an int64 -type Size struct { - Size int64 -} - -type Number struct { - Value float64 -} - type ReadWaitCloser struct { pipeReader *io.PipeReader wg sync.WaitGroup @@ -73,72 +56,6 @@ func ProductToken() string { Version(), strings.TrimPrefix(runtime.Version(), "go")) } -// UnmarshalTOML parses the duration from the TOML config file -func (d *Duration) UnmarshalTOML(b []byte) error { - var err error - b = bytes.Trim(b, `'`) - - // see if we can directly convert it - d.Duration, err = time.ParseDuration(string(b)) - if err == nil { - return nil - } - - // Parse string duration, ie, "1s" - if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 { - d.Duration, err = time.ParseDuration(uq) - if err == nil { - return nil - } - } - - // First try parsing as integer seconds - sI, err := strconv.ParseInt(string(b), 10, 64) - if err == nil { - d.Duration = time.Second * time.Duration(sI) - return nil - } - // Second try parsing as float seconds - sF, err := strconv.ParseFloat(string(b), 64) - if err == nil { - d.Duration = time.Second * time.Duration(sF) - return nil - } - - return nil -} - -func (s *Size) UnmarshalTOML(b []byte) error { - var err error - b = bytes.Trim(b, `'`) - - val, err := strconv.ParseInt(string(b), 10, 64) - if err == nil { - s.Size = val - return nil - } - uq, err := strconv.Unquote(string(b)) - if err != nil { - return err - } - val, err = units.ParseStrictBytes(uq) - if err != nil { - return err - } - s.Size = val - return nil -} - -func (n *Number) UnmarshalTOML(b []byte) error { - value, err := strconv.ParseFloat(string(b), 64) - if err != nil { - return err - } - - n.Value = value - return nil -} - // ReadLines reads contents from a file and splits them by new lines. // A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). func ReadLines(filename string) ([]string, error) { diff --git a/internal/internal_test.go b/internal/internal_test.go index 16980ecef93af..2bed302ee0a11 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -171,52 +171,6 @@ func TestRandomSleep(t *testing.T) { assert.True(t, elapsed < time.Millisecond*150) } -func TestDuration(t *testing.T) { - var d Duration - - d.UnmarshalTOML([]byte(`"1s"`)) - assert.Equal(t, time.Second, d.Duration) - - d = Duration{} - d.UnmarshalTOML([]byte(`1s`)) - assert.Equal(t, time.Second, d.Duration) - - d = Duration{} - d.UnmarshalTOML([]byte(`'1s'`)) - assert.Equal(t, time.Second, d.Duration) - - d = Duration{} - d.UnmarshalTOML([]byte(`10`)) - assert.Equal(t, 10*time.Second, d.Duration) - - d = Duration{} - d.UnmarshalTOML([]byte(`1.5`)) - assert.Equal(t, time.Second, d.Duration) -} - -func TestSize(t *testing.T) { - var s Size - - s.UnmarshalTOML([]byte(`"1B"`)) - assert.Equal(t, int64(1), s.Size) - - s = Size{} - s.UnmarshalTOML([]byte(`1`)) - assert.Equal(t, int64(1), s.Size) - - s = Size{} - s.UnmarshalTOML([]byte(`'1'`)) - assert.Equal(t, int64(1), s.Size) - - s = Size{} - s.UnmarshalTOML([]byte(`"1GB"`)) - assert.Equal(t, int64(1000*1000*1000), s.Size) - - s = Size{} - s.UnmarshalTOML([]byte(`"12GiB"`)) - assert.Equal(t, int64(12*1024*1024*1024), s.Size) -} - func TestCompressWithGzip(t *testing.T) { testData := "the quick brown fox jumps over the lazy dog" inputBuffer := bytes.NewBuffer([]byte(testData)) diff --git a/internal/snmp/config.go b/internal/snmp/config.go index 9941f0682fe3d..0a200b7067787 100644 --- a/internal/snmp/config.go +++ b/internal/snmp/config.go @@ -1,13 +1,13 @@ package snmp import ( - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" ) type ClientConfig struct { // Timeout to wait for a response. - Timeout internal.Duration `toml:"timeout"` - Retries int `toml:"retries"` + Timeout config.Duration `toml:"timeout"` + Retries int `toml:"retries"` // Values: 1, 2, 3 Version uint8 `toml:"version"` diff --git a/internal/snmp/wrapper.go b/internal/snmp/wrapper.go index 9825d5677ff73..9220098e37f73 100644 --- a/internal/snmp/wrapper.go +++ b/internal/snmp/wrapper.go @@ -5,6 +5,7 @@ import ( "net/url" "strconv" "strings" + "time" "github.com/gosnmp/gosnmp" ) @@ -62,7 +63,7 @@ func (gs GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) { func NewWrapper(s ClientConfig) (GosnmpWrapper, error) { gs := GosnmpWrapper{&gosnmp.GoSNMP{}} - gs.Timeout = s.Timeout.Duration + gs.Timeout = time.Duration(s.Timeout) gs.Retries = s.Retries diff --git a/logger/logger.go b/logger/logger.go index 58a8b906f6596..c365c057304aa 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -8,7 +8,7 @@ import ( "regexp" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/rotate" "github.com/influxdata/wlog" ) @@ -33,9 +33,9 @@ type LogConfig struct { // logger will fallback to stderr Logfile string // will rotate when current file at the specified time interval - RotationInterval internal.Duration + RotationInterval config.Duration // will rotate when current file size exceeds this parameter. - RotationMaxSize internal.Size + RotationMaxSize config.Size // maximum rotated files to keep (older ones will be deleted) RotationMaxArchives int } @@ -105,7 +105,7 @@ func (t *telegrafLogCreator) CreateLogger(config LogConfig) (io.Writer, error) { case LogTargetFile: if config.Logfile != "" { var err error - if writer, err = rotate.NewFileWriter(config.Logfile, config.RotationInterval.Duration, config.RotationMaxSize.Size, config.RotationMaxArchives); err != nil { + if writer, err = rotate.NewFileWriter(config.Logfile, time.Duration(config.RotationInterval), int64(config.RotationMaxSize), config.RotationMaxArchives); err != nil { log.Printf("E! Unable to open %s (%s), using stderr", config.Logfile, err) writer = defaultWriter } diff --git a/logger/logger_test.go b/logger/logger_test.go index a5f53ca17e89b..8bb01e8e5328b 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -9,7 +9,7 @@ import ( "path/filepath" "testing" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -99,10 +99,10 @@ func TestWriteToTruncatedFile(t *testing.T) { func TestWriteToFileInRotation(t *testing.T) { tempDir, err := ioutil.TempDir("", "LogRotation") require.NoError(t, err) - config := createBasicLogConfig(filepath.Join(tempDir, "test.log")) - config.LogTarget = LogTargetFile - config.RotationMaxSize = internal.Size{Size: int64(30)} - writer := newLogWriter(config) + cfg := createBasicLogConfig(filepath.Join(tempDir, "test.log")) + cfg.LogTarget = LogTargetFile + cfg.RotationMaxSize = config.Size(30) + writer := newLogWriter(cfg) // Close the writer here, otherwise the temp folder cannot be deleted because the current log file is in use. closer, isCloser := writer.(io.Closer) assert.True(t, isCloser) diff --git a/plugins/aggregators/final/final.go b/plugins/aggregators/final/final.go index 53ad0a47c9d95..3ef32a10ab39f 100644 --- a/plugins/aggregators/final/final.go +++ b/plugins/aggregators/final/final.go @@ -4,7 +4,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/aggregators" ) @@ -20,7 +20,7 @@ var sampleConfig = ` ` type Final struct { - SeriesTimeout internal.Duration `toml:"series_timeout"` + SeriesTimeout config.Duration `toml:"series_timeout"` // The last metric for all series which are active metricCache map[uint64]telegraf.Metric @@ -28,7 +28,7 @@ type Final struct { func NewFinal() *Final { return &Final{ - SeriesTimeout: internal.Duration{Duration: 5 * time.Minute}, + SeriesTimeout: config.Duration(5 * time.Minute), metricCache: make(map[uint64]telegraf.Metric), } } @@ -51,7 +51,7 @@ func (m *Final) Push(acc telegraf.Accumulator) { acc.SetPrecision(time.Nanosecond) for id, metric := range m.metricCache { - if time.Since(metric.Time()) > m.SeriesTimeout.Duration { + if time.Since(metric.Time()) > time.Duration(m.SeriesTimeout) { fields := map[string]interface{}{} for _, field := range metric.FieldList() { fields[field.Key+"_final"] = field.Value diff --git a/plugins/aggregators/final/final_test.go b/plugins/aggregators/final/final_test.go index 1b3367fa5b3ad..a4add9a5ce20c 100644 --- a/plugins/aggregators/final/final_test.go +++ b/plugins/aggregators/final/final_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" ) @@ -93,7 +93,7 @@ func TestTwoTags(t *testing.T) { func TestLongDifference(t *testing.T) { acc := testutil.Accumulator{} final := NewFinal() - final.SeriesTimeout = internal.Duration{Duration: 30 * time.Second} + final.SeriesTimeout = config.Duration(30 * time.Second) tags := map[string]string{"foo": "bar"} now := time.Now() diff --git a/plugins/inputs/activemq/activemq.go b/plugins/inputs/activemq/activemq.go index cb8793619ff8f..0674b7ae0fe52 100644 --- a/plugins/inputs/activemq/activemq.go +++ b/plugins/inputs/activemq/activemq.go @@ -12,19 +12,19 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type ActiveMQ struct { - Server string `toml:"server"` - Port int `toml:"port"` - URL string `toml:"url"` - Username string `toml:"username"` - Password string `toml:"password"` - Webadmin string `toml:"webadmin"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + Server string `toml:"server"` + Port int `toml:"port"` + URL string `toml:"url"` + Username string `toml:"username"` + Password string `toml:"password"` + Webadmin string `toml:"webadmin"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client @@ -127,15 +127,15 @@ func (a *ActiveMQ) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: a.ResponseTimeout.Duration, + Timeout: time.Duration(a.ResponseTimeout), } return client, nil } func (a *ActiveMQ) Init() error { - if a.ResponseTimeout.Duration < time.Second { - a.ResponseTimeout.Duration = time.Second * 5 + if a.ResponseTimeout < config.Duration(time.Second) { + a.ResponseTimeout = config.Duration(time.Second * 5) } var err error diff --git a/plugins/inputs/aliyuncms/aliyuncms.go b/plugins/inputs/aliyuncms/aliyuncms.go index 6aebf99b836fa..3b521579b12de 100644 --- a/plugins/inputs/aliyuncms/aliyuncms.go +++ b/plugins/inputs/aliyuncms/aliyuncms.go @@ -12,6 +12,7 @@ import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" "github.com/aliyun/alibaba-cloud-sdk-go/services/cms" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/limiter" "github.com/influxdata/telegraf/plugins/inputs" @@ -31,7 +32,7 @@ const ( ## 5) RSA keypair credential ## 6) Environment variables credential ## 7) Instance metadata credential - + # access_key_id = "" # access_key_secret = "" # access_key_sts_token = "" @@ -40,7 +41,7 @@ const ( # private_key = "" # public_key_id = "" # role_name = "" - + # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all # metrics are made available to the 1 minute period. Some are collected at # 3 minute, 5 minute, or larger intervals. @@ -51,22 +52,22 @@ const ( # ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) period = "5m" - + ## Collection Delay (required - must account for metrics availability via AliyunCMS API) delay = "1m" - + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid ## gaps or overlap in pulled data interval = "5m" - + ## Metric Statistic Project (required) project = "acs_slb_dashboard" - + ## Maximum requests per second, default value is 200 ratelimit = 200 - + ## Discovery regions set the scope for object discovery, the discovered info can be used to enrich - ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then + ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then ## it will be reported on the start - foo example for 'acs_cdn' project: ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) ## Currently, discovery supported for the following projects: @@ -78,28 +79,28 @@ const ( ## If not set, all regions would be covered, it can provide a significant load on API, so the recommendation here ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm discovery_regions = ["cn-hongkong"] - + ## how often the discovery API call executed (default 1m) #discovery_interval = "1m" - + ## Metrics to Pull (Required) [[inputs.aliyuncms.metrics]] - ## Metrics names to be requested, + ## Metrics names to be requested, ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq names = ["InstanceActiveConnection", "InstanceNewConnection"] - + ## Dimension filters for Metric (these are optional). ## This allows to get additional metric dimension. If dimension is not specified it can be returned or ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq ## ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) ## Values specified here would be added into the list of discovered objects. - ## You can specify either single dimension: + ## You can specify either single dimension: #dimensions = '{"instanceId": "p-example"}' - + ## Or you can specify several dimensions at once: #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' - + ## Enrichment tags, can be added from discovery (if supported) ## Notation is : ## To figure out which fields are available, consult the Describe API per project. @@ -110,10 +111,10 @@ const ( # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" # ] ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. - + ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery - ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage - ## of discovery scope vs monitoring scope + ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage + ## of discovery scope vs monitoring scope #allow_dps_without_discovery = false ` ) @@ -130,13 +131,13 @@ type ( PublicKeyID string `toml:"public_key_id"` RoleName string `toml:"role_name"` - DiscoveryRegions []string `toml:"discovery_regions"` - DiscoveryInterval internal.Duration `toml:"discovery_interval"` - Period internal.Duration `toml:"period"` - Delay internal.Duration `toml:"delay"` - Project string `toml:"project"` - Metrics []*Metric `toml:"metrics"` - RateLimit int `toml:"ratelimit"` + DiscoveryRegions []string `toml:"discovery_regions"` + DiscoveryInterval config.Duration `toml:"discovery_interval"` + Period config.Duration `toml:"period"` + Delay config.Duration `toml:"delay"` + Project string `toml:"project"` + Metrics []*Metric `toml:"metrics"` + RateLimit int `toml:"ratelimit"` Log telegraf.Logger `toml:"-"` @@ -238,7 +239,7 @@ func (s *AliyunCMS) Init() error { //Init discovery... if s.dt == nil { //Support for tests - s.dt, err = NewDiscoveryTool(s.DiscoveryRegions, s.Project, s.Log, credential, int(float32(s.RateLimit)*0.2), s.DiscoveryInterval.Duration) + s.dt, err = NewDiscoveryTool(s.DiscoveryRegions, s.Project, s.Log, credential, int(float32(s.RateLimit)*0.2), time.Duration(s.DiscoveryInterval)) if err != nil { s.Log.Errorf("Discovery tool is not activated: %v", err) s.dt = nil @@ -310,11 +311,11 @@ func (s *AliyunCMS) updateWindow(relativeTo time.Time) { //opening left and closing right, and startTime cannot be equal //to or greater than endTime. - windowEnd := relativeTo.Add(-s.Delay.Duration) + windowEnd := relativeTo.Add(-time.Duration(s.Delay)) if s.windowEnd.IsZero() { // this is the first run, no window info, so just get a single period - s.windowStart = windowEnd.Add(-s.Period.Duration) + s.windowStart = windowEnd.Add(-time.Duration(s.Period)) } else { // subsequent window, start where last window left off s.windowStart = s.windowEnd @@ -326,7 +327,7 @@ func (s *AliyunCMS) updateWindow(relativeTo time.Time) { // Gather given metric and emit error func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, metric *Metric) error { req := cms.CreateDescribeMetricListRequest() - req.Period = strconv.FormatInt(int64(s.Period.Duration.Seconds()), 10) + req.Period = strconv.FormatInt(int64(time.Duration(s.Period).Seconds()), 10) req.MetricName = metricName req.Length = "10000" req.Namespace = s.Project @@ -547,7 +548,7 @@ func init() { inputs.Add("aliyuncms", func() telegraf.Input { return &AliyunCMS{ RateLimit: 200, - DiscoveryInterval: internal.Duration{Duration: time.Minute}, + DiscoveryInterval: config.Duration(time.Minute), dimensionKey: "instanceId", } }) diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go index a844ab4ee7a3a..b9028c8ba22a0 100644 --- a/plugins/inputs/aliyuncms/aliyuncms_test.go +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -13,7 +13,7 @@ import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" "github.com/aliyun/alibaba-cloud-sdk-go/services/cms" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" "github.com/pkg/errors" @@ -34,11 +34,11 @@ func (m *mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetr resp.Period = "60" resp.Datapoints = ` [{ - "timestamp": 1490152860000, - "Maximum": 200, - "userId": "1234567898765432", - "Minimum": 100, - "instanceId": "i-abcdefgh123456", + "timestamp": 1490152860000, + "Maximum": 200, + "userId": "1234567898765432", + "Minimum": 100, + "instanceId": "i-abcdefgh123456", "Average": 150, "Value": 300 }]` @@ -50,11 +50,11 @@ func (m *mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetr resp.Period = "60" resp.Datapoints = ` [{ - "timestamp": 1490152860000, - "Maximum": 200, - "userId": "1234567898765432", - "Minimum": 100, - "instanceId": "i-abcdefgh123456", + "timestamp": 1490152860000, + "Maximum": 200, + "userId": "1234567898765432", + "Minimum": 100, + "instanceId": "i-abcdefgh123456", "Average": 150, }]` case "EmptyDatapoint": @@ -113,7 +113,7 @@ func getMockSdkCli(httpResp *http.Response) (mockAliyunSDKCli, error) { func TestPluginDefaults(t *testing.T) { require.Equal(t, &AliyunCMS{RateLimit: 200, - DiscoveryInterval: internal.Duration{Duration: time.Minute}, + DiscoveryInterval: config.Duration(time.Minute), dimensionKey: "instanceId", }, inputs.Inputs["aliyuncms"]()) } @@ -136,7 +136,7 @@ func TestPluginInitialize(t *testing.T) { `{ "LoadBalancers": { - "LoadBalancer": [ + "LoadBalancer": [ {"LoadBalancerId":"bla"} ] }, @@ -187,9 +187,7 @@ func TestPluginInitialize(t *testing.T) { func TestUpdateWindow(t *testing.T) { duration, _ := time.ParseDuration("1m") - internalDuration := internal.Duration{ - Duration: duration, - } + internalDuration := config.Duration(duration) plugin := &AliyunCMS{ Project: "acs_slb_dashboard", @@ -208,14 +206,14 @@ func TestUpdateWindow(t *testing.T) { newStartTime := plugin.windowEnd // initial window just has a single period - require.EqualValues(t, plugin.windowEnd, now.Add(-plugin.Delay.Duration)) - require.EqualValues(t, plugin.windowStart, now.Add(-plugin.Delay.Duration).Add(-plugin.Period.Duration)) + require.EqualValues(t, plugin.windowEnd, now.Add(-time.Duration(plugin.Delay))) + require.EqualValues(t, plugin.windowStart, now.Add(-time.Duration(plugin.Delay)).Add(-time.Duration(plugin.Period))) now = time.Now() plugin.updateWindow(now) // subsequent window uses previous end time as start time - require.EqualValues(t, plugin.windowEnd, now.Add(-plugin.Delay.Duration)) + require.EqualValues(t, plugin.windowEnd, now.Add(-time.Duration(plugin.Delay))) require.EqualValues(t, plugin.windowStart, newStartTime) } @@ -363,7 +361,7 @@ func TestGetDiscoveryDataAllRegions(t *testing.T) { `{ "LoadBalancers": { - "LoadBalancer": [ + "LoadBalancer": [ {"LoadBalancerId":"bla"} ] }, diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index 12edc62cc845c..429d1cb9e69cc 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -12,7 +12,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -21,7 +21,7 @@ type Apache struct { Urls []string Username string Password string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig client *http.Client @@ -62,8 +62,8 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error { if len(n.Urls) == 0 { n.Urls = []string{"http://localhost/server-status?auto"} } - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } if n.client == nil { @@ -102,7 +102,7 @@ func (n *Apache) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil diff --git a/plugins/inputs/apcupsd/apcupsd.go b/plugins/inputs/apcupsd/apcupsd.go index 4acadffe38dd2..b41a91b829af2 100644 --- a/plugins/inputs/apcupsd/apcupsd.go +++ b/plugins/inputs/apcupsd/apcupsd.go @@ -8,18 +8,18 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/mdlayher/apcupsd" ) const defaultAddress = "tcp://127.0.0.1:3551" -var defaultTimeout = internal.Duration{Duration: time.Second * 5} +var defaultTimeout = config.Duration(5 * time.Second) type ApcUpsd struct { Servers []string - Timeout internal.Duration + Timeout config.Duration } func (*ApcUpsd) Description() string { @@ -51,7 +51,7 @@ func (h *ApcUpsd) Gather(acc telegraf.Accumulator) error { addrBits.Scheme = "tcp" } - ctx, cancel := context.WithTimeout(ctx, h.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(h.Timeout)) defer cancel() status, err := fetchStatus(ctx, addrBits) diff --git a/plugins/inputs/aurora/aurora.go b/plugins/inputs/aurora/aurora.go index fc6f82aadda17..04737adbabd86 100644 --- a/plugins/inputs/aurora/aurora.go +++ b/plugins/inputs/aurora/aurora.go @@ -11,7 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -43,11 +43,11 @@ var ( type Vars map[string]interface{} type Aurora struct { - Schedulers []string `toml:"schedulers"` - Roles []string `toml:"roles"` - Timeout internal.Duration `toml:"timeout"` - Username string `toml:"username"` - Password string `toml:"password"` + Schedulers []string `toml:"schedulers"` + Roles []string `toml:"roles"` + Timeout config.Duration `toml:"timeout"` + Username string `toml:"username"` + Password string `toml:"password"` tls.ClientConfig client *http.Client @@ -95,7 +95,7 @@ func (a *Aurora) Gather(acc telegraf.Accumulator) error { } } - ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(a.Timeout)) defer cancel() var wg sync.WaitGroup @@ -147,8 +147,8 @@ func (a *Aurora) initialize() error { urls = append(urls, loc) } - if a.Timeout.Duration < time.Second { - a.Timeout.Duration = defaultTimeout + if a.Timeout < config.Duration(time.Second) { + a.Timeout = config.Duration(defaultTimeout) } if len(a.Roles) == 0 { diff --git a/plugins/inputs/beat/beat.go b/plugins/inputs/beat/beat.go index 79769df9ddbbc..2d57a6deadfca 100644 --- a/plugins/inputs/beat/beat.go +++ b/plugins/inputs/beat/beat.go @@ -8,7 +8,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -80,7 +80,7 @@ type Beat struct { Method string `toml:"method"` Headers map[string]string `toml:"headers"` HostHeader string `toml:"host_header"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` tls.ClientConfig client *http.Client @@ -92,7 +92,7 @@ func NewBeat() *Beat { Includes: []string{"beat", "libbeat", "filebeat"}, Method: "GET", Headers: make(map[string]string), - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } } @@ -133,7 +133,7 @@ func (beat *Beat) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: beat.Timeout.Duration, + Timeout: time.Duration(beat.Timeout), } return client, nil diff --git a/plugins/inputs/burrow/burrow.go b/plugins/inputs/burrow/burrow.go index 501fddf16ad77..d575ea7f5eb0e 100644 --- a/plugins/inputs/burrow/burrow.go +++ b/plugins/inputs/burrow/burrow.go @@ -11,8 +11,8 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -73,7 +73,7 @@ type ( Servers []string Username string Password string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration ConcurrentConnections int APIPrefix string `toml:"api_prefix"` @@ -188,10 +188,8 @@ func (b *burrow) setDefaults() { if b.ConcurrentConnections < 1 { b.ConcurrentConnections = defaultConcurrentConnections } - if b.ResponseTimeout.Duration < time.Second { - b.ResponseTimeout = internal.Duration{ - Duration: defaultResponseTimeout, - } + if time.Duration(b.ResponseTimeout) < time.Second { + b.ResponseTimeout = config.Duration(defaultResponseTimeout) } } @@ -224,7 +222,7 @@ func (b *burrow) createClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: b.ResponseTimeout.Duration, + Timeout: time.Duration(b.ResponseTimeout), } return client, nil diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go index 866ddeb8c164f..4e87431c0b032 100644 --- a/plugins/inputs/clickhouse/clickhouse.go +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -14,6 +14,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -101,20 +102,20 @@ func init() { ClientConfig: tls.ClientConfig{ InsecureSkipVerify: false, }, - Timeout: internal.Duration{Duration: defaultTimeout}, + Timeout: config.Duration(defaultTimeout), } }) } // ClickHouse Telegraf Input Plugin type ClickHouse struct { - Username string `toml:"username"` - Password string `toml:"password"` - Servers []string `toml:"servers"` - AutoDiscovery bool `toml:"auto_discovery"` - ClusterInclude []string `toml:"cluster_include"` - ClusterExclude []string `toml:"cluster_exclude"` - Timeout internal.Duration `toml:"timeout"` + Username string `toml:"username"` + Password string `toml:"password"` + Servers []string `toml:"servers"` + AutoDiscovery bool `toml:"auto_discovery"` + ClusterInclude []string `toml:"cluster_include"` + ClusterExclude []string `toml:"cluster_exclude"` + Timeout config.Duration `toml:"timeout"` HTTPClient http.Client tls.ClientConfig } @@ -132,8 +133,8 @@ func (*ClickHouse) Description() string { // Start ClickHouse input service func (ch *ClickHouse) Start(telegraf.Accumulator) error { timeout := defaultTimeout - if ch.Timeout.Duration != 0 { - timeout = ch.Timeout.Duration + if time.Duration(ch.Timeout) != 0 { + timeout = time.Duration(ch.Timeout) } tlsCfg, err := ch.ClientConfig.TLSConfig() if err != nil { diff --git a/plugins/inputs/cloud_pubsub/pubsub.go b/plugins/inputs/cloud_pubsub/pubsub.go index 230c459045727..806f84f61340d 100644 --- a/plugins/inputs/cloud_pubsub/pubsub.go +++ b/plugins/inputs/cloud_pubsub/pubsub.go @@ -10,6 +10,7 @@ import ( "cloud.google.com/go/pubsub" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" @@ -31,10 +32,10 @@ type PubSub struct { Subscription string `toml:"subscription"` // Subscription ReceiveSettings - MaxExtension internal.Duration `toml:"max_extension"` - MaxOutstandingMessages int `toml:"max_outstanding_messages"` - MaxOutstandingBytes int `toml:"max_outstanding_bytes"` - MaxReceiverGoRoutines int `toml:"max_receiver_go_routines"` + MaxExtension config.Duration `toml:"max_extension"` + MaxOutstandingMessages int `toml:"max_outstanding_messages"` + MaxOutstandingBytes int `toml:"max_outstanding_bytes"` + MaxReceiverGoRoutines int `toml:"max_receiver_go_routines"` // Agent settings MaxMessageLen int `toml:"max_message_len"` @@ -277,7 +278,7 @@ func (ps *PubSub) getGCPSubscription(subID string) (subscription, error) { s := client.Subscription(subID) s.ReceiveSettings = pubsub.ReceiveSettings{ NumGoroutines: ps.MaxReceiverGoRoutines, - MaxExtension: ps.MaxExtension.Duration, + MaxExtension: time.Duration(ps.MaxExtension), MaxOutstandingMessages: ps.MaxOutstandingMessages, MaxOutstandingBytes: ps.MaxOutstandingBytes, } @@ -312,8 +313,8 @@ const sampleConfig = ` ## Application Default Credentials, which is preferred. # credentials_file = "path/to/my/creds.json" - ## Optional. Number of seconds to wait before attempting to restart the - ## PubSub subscription receiver after an unexpected error. + ## Optional. Number of seconds to wait before attempting to restart the + ## PubSub subscription receiver after an unexpected error. ## If the streaming pull for a PubSub Subscription fails (receiver), ## the agent attempts to restart receiving messages after this many seconds. # retry_delay_seconds = 5 @@ -362,7 +363,7 @@ const sampleConfig = ` ## processed concurrently (use "max_outstanding_messages" instead). # max_receiver_go_routines = 0 - ## Optional. If true, Telegraf will attempt to base64 decode the + ## Optional. If true, Telegraf will attempt to base64 decode the ## PubSub message data before parsing # base64_data = false ` diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push.go b/plugins/inputs/cloud_pubsub_push/pubsub_push.go index 575bdae61f8c9..5b434599a986f 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push.go @@ -11,7 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" @@ -27,9 +27,9 @@ type PubSubPush struct { ServiceAddress string Token string Path string - ReadTimeout internal.Duration - WriteTimeout internal.Duration - MaxBodySize internal.Size + ReadTimeout config.Duration + WriteTimeout config.Duration + MaxBodySize config.Size AddMeta bool Log telegraf.Logger @@ -129,15 +129,15 @@ func (p *PubSubPush) SetParser(parser parsers.Parser) { // Start starts the http listener service. func (p *PubSubPush) Start(acc telegraf.Accumulator) error { - if p.MaxBodySize.Size == 0 { - p.MaxBodySize.Size = defaultMaxBodySize + if p.MaxBodySize == 0 { + p.MaxBodySize = config.Size(defaultMaxBodySize) } - if p.ReadTimeout.Duration < time.Second { - p.ReadTimeout.Duration = time.Second * 10 + if p.ReadTimeout < config.Duration(time.Second) { + p.ReadTimeout = config.Duration(time.Second * 10) } - if p.WriteTimeout.Duration < time.Second { - p.WriteTimeout.Duration = time.Second * 10 + if p.WriteTimeout < config.Duration(time.Second) { + p.WriteTimeout = config.Duration(time.Second * 10) } tlsConf, err := p.ServerConfig.TLSConfig() @@ -147,8 +147,8 @@ func (p *PubSubPush) Start(acc telegraf.Accumulator) error { p.server = &http.Server{ Addr: p.ServiceAddress, - Handler: http.TimeoutHandler(p, p.WriteTimeout.Duration, "timed out processing metric"), - ReadTimeout: p.ReadTimeout.Duration, + Handler: http.TimeoutHandler(p, time.Duration(p.WriteTimeout), "timed out processing metric"), + ReadTimeout: time.Duration(p.ReadTimeout), TLSConfig: tlsConf, } @@ -206,7 +206,7 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { } // Check that the content length is not too large for us to handle. - if req.ContentLength > p.MaxBodySize.Size { + if req.ContentLength > int64(p.MaxBodySize) { res.WriteHeader(http.StatusRequestEntityTooLarge) return } @@ -216,7 +216,7 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { return } - body := http.MaxBytesReader(res, req.Body, p.MaxBodySize.Size) + body := http.MaxBytesReader(res, req.Body, int64(p.MaxBodySize)) bytes, err := ioutil.ReadAll(body) if err != nil { res.WriteHeader(http.StatusRequestEntityTooLarge) diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go index ccce488a81c6e..bd958e961dd0a 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go @@ -15,7 +15,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -119,15 +119,13 @@ func TestServeHTTP(t *testing.T) { rr := httptest.NewRecorder() pubPush := &PubSubPush{ - Log: testutil.Logger{}, - Path: "/", - MaxBodySize: internal.Size{ - Size: test.maxsize, - }, + Log: testutil.Logger{}, + Path: "/", + MaxBodySize: config.Size(test.maxsize), sem: make(chan struct{}, 1), undelivered: make(map[telegraf.TrackingID]chan bool), mu: &sync.Mutex{}, - WriteTimeout: internal.Duration{Duration: time.Second * 1}, + WriteTimeout: config.Duration(time.Second * 1), } pubPush.ctx, pubPush.cancel = context.WithCancel(context.Background()) @@ -162,7 +160,7 @@ func TestServeHTTP(t *testing.T) { } }(dst) - ctx, cancel := context.WithTimeout(req.Context(), pubPush.WriteTimeout.Duration) + ctx, cancel := context.WithTimeout(req.Context(), time.Duration(pubPush.WriteTimeout)) req = req.WithContext(ctx) pubPush.ServeHTTP(rr, req) diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index 20cb47fd34288..25e4e4755cc30 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -11,8 +11,8 @@ import ( jwt "github.com/dgrijalva/jwt-go/v4" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -56,7 +56,7 @@ type DCOS struct { AppExclude []string MaxConnections int - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig client Client @@ -359,7 +359,7 @@ func (d *DCOS) createClient() (Client, error) { client := NewClusterClient( url, - d.ResponseTimeout.Duration, + time.Duration(d.ResponseTimeout), d.MaxConnections, tlsCfg, ) @@ -421,10 +421,8 @@ func (d *DCOS) createFilters() error { func init() { inputs.Add("dcos", func() telegraf.Input { return &DCOS{ - MaxConnections: defaultMaxConnections, - ResponseTimeout: internal.Duration{ - Duration: defaultResponseTimeout, - }, + MaxConnections: defaultMaxConnections, + ResponseTimeout: config.Duration(defaultResponseTimeout), } }) } diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 087e106ad4112..1b44351178d41 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -16,8 +16,8 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/internal/docker" tlsint "github.com/influxdata/telegraf/plugins/common/tls" @@ -31,7 +31,7 @@ type Docker struct { GatherServices bool `toml:"gather_services"` - Timeout internal.Duration + Timeout config.Duration PerDevice bool `toml:"perdevice"` PerDeviceInclude []string `toml:"perdevice_include"` Total bool `toml:"total"` @@ -259,7 +259,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { opts := types.ContainerListOptions{ Filters: filterArgs, } - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() containers, err := d.client.ContainerList(ctx, opts) @@ -287,7 +287,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { } func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() services, err := d.client.ServiceList(ctx, types.ServiceListOptions{}) @@ -364,7 +364,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { now := time.Now() // Get info from docker daemon - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() info, err := d.client.Info(ctx) @@ -524,7 +524,7 @@ func (d *Docker) gatherContainer( tags["source"] = hostnameFromID(container.ID) } - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() r, err := d.client.ContainerStats(ctx, container.ID, false) @@ -562,7 +562,7 @@ func (d *Docker) gatherContainerInspect( daemonOSType string, v *types.StatsJSON, ) error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() info, err := d.client.ContainerInspect(ctx, container.ID) @@ -1010,7 +1010,7 @@ func init() { PerDevice: true, PerDeviceInclude: []string{"cpu"}, TotalInclude: []string{"cpu", "blkio", "network"}, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), Endpoint: defaultEndpoint, newEnvClient: NewEnvClient, newClient: NewClient, diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index 878bf4a63fbe3..73bcefb3d887c 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -16,8 +16,8 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/pkg/stdcopy" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/docker" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -73,16 +73,16 @@ var ( ) type DockerLogs struct { - Endpoint string `toml:"endpoint"` - FromBeginning bool `toml:"from_beginning"` - Timeout internal.Duration `toml:"timeout"` - LabelInclude []string `toml:"docker_label_include"` - LabelExclude []string `toml:"docker_label_exclude"` - ContainerInclude []string `toml:"container_name_include"` - ContainerExclude []string `toml:"container_name_exclude"` - ContainerStateInclude []string `toml:"container_state_include"` - ContainerStateExclude []string `toml:"container_state_exclude"` - IncludeSourceTag bool `toml:"source_tag"` + Endpoint string `toml:"endpoint"` + FromBeginning bool `toml:"from_beginning"` + Timeout config.Duration `toml:"timeout"` + LabelInclude []string `toml:"docker_label_include"` + LabelExclude []string `toml:"docker_label_exclude"` + ContainerInclude []string `toml:"container_name_include"` + ContainerExclude []string `toml:"container_name_exclude"` + ContainerStateInclude []string `toml:"container_state_include"` + ContainerStateExclude []string `toml:"container_state_exclude"` + IncludeSourceTag bool `toml:"source_tag"` tlsint.ClientConfig @@ -199,7 +199,7 @@ func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { ctx := context.Background() acc.SetPrecision(time.Nanosecond) - ctx, cancel := context.WithTimeout(ctx, d.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(d.Timeout)) defer cancel() containers, err := d.client.ContainerList(ctx, d.opts) if err != nil { @@ -235,7 +235,7 @@ func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { } func (d *DockerLogs) hasTTY(ctx context.Context, container types.Container) (bool, error) { - ctx, cancel := context.WithTimeout(ctx, d.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(d.Timeout)) defer cancel() c, err := d.client.ContainerInspect(ctx, container.ID) if err != nil { @@ -450,7 +450,7 @@ func (d *DockerLogs) createContainerStateFilters() error { func init() { inputs.Add("docker_log", func() telegraf.Input { return &DockerLogs{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), Endpoint: defaultEndpoint, newEnvClient: NewEnvClient, newClient: NewClient, diff --git a/plugins/inputs/docker_log/docker_log_test.go b/plugins/inputs/docker_log/docker_log_test.go index c8903c9d8ec28..6d92b73ee6d41 100644 --- a/plugins/inputs/docker_log/docker_log_test.go +++ b/plugins/inputs/docker_log/docker_log_test.go @@ -12,7 +12,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/stdcopy" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -165,7 +165,7 @@ func Test(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator plugin := &DockerLogs{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), newClient: func(string, *tls.Config) (Client, error) { return tt.client, nil }, containerList: make(map[string]context.CancelFunc), IncludeSourceTag: true, diff --git a/plugins/inputs/ecs/ecs.go b/plugins/inputs/ecs/ecs.go index 2194257f1913d..d563fef5038d5 100644 --- a/plugins/inputs/ecs/ecs.go +++ b/plugins/inputs/ecs/ecs.go @@ -6,15 +6,15 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) // Ecs config object type Ecs struct { EndpointURL string `toml:"endpoint_url"` - Timeout internal.Duration + Timeout config.Duration ContainerNameInclude []string `toml:"container_name_include"` ContainerNameExclude []string `toml:"container_name_exclude"` @@ -114,7 +114,7 @@ func initSetup(ecs *Ecs) error { if ecs.client == nil { resolveEndpoint(ecs) - c, err := ecs.newClient(ecs.Timeout.Duration, ecs.EndpointURL, ecs.metadataVersion) + c, err := ecs.newClient(time.Duration(ecs.Timeout), ecs.EndpointURL, ecs.metadataVersion) if err != nil { return err } @@ -262,7 +262,7 @@ func init() { inputs.Add("ecs", func() telegraf.Input { return &Ecs{ EndpointURL: "", - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), newClient: NewClient, filtersCreated: false, } diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 505bb69835041..aac23d707edba 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -12,8 +12,8 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" @@ -147,19 +147,19 @@ const sampleConfig = ` // Elasticsearch is a plugin to read stats from one or many Elasticsearch // servers. type Elasticsearch struct { - Local bool `toml:"local"` - Servers []string `toml:"servers"` - HTTPTimeout internal.Duration `toml:"http_timeout"` - ClusterHealth bool `toml:"cluster_health"` - ClusterHealthLevel string `toml:"cluster_health_level"` - ClusterStats bool `toml:"cluster_stats"` - ClusterStatsOnlyFromMaster bool `toml:"cluster_stats_only_from_master"` - IndicesInclude []string `toml:"indices_include"` - IndicesLevel string `toml:"indices_level"` - NodeStats []string `toml:"node_stats"` - Username string `toml:"username"` - Password string `toml:"password"` - NumMostRecentIndices int `toml:"num_most_recent_indices"` + Local bool `toml:"local"` + Servers []string `toml:"servers"` + HTTPTimeout config.Duration `toml:"http_timeout"` + ClusterHealth bool `toml:"cluster_health"` + ClusterHealthLevel string `toml:"cluster_health_level"` + ClusterStats bool `toml:"cluster_stats"` + ClusterStatsOnlyFromMaster bool `toml:"cluster_stats_only_from_master"` + IndicesInclude []string `toml:"indices_include"` + IndicesLevel string `toml:"indices_level"` + NodeStats []string `toml:"node_stats"` + Username string `toml:"username"` + Password string `toml:"password"` + NumMostRecentIndices int `toml:"num_most_recent_indices"` tls.ClientConfig @@ -180,7 +180,7 @@ func (i serverInfo) isMaster() bool { // NewElasticsearch return a new instance of Elasticsearch func NewElasticsearch() *Elasticsearch { return &Elasticsearch{ - HTTPTimeout: internal.Duration{Duration: time.Second * 5}, + HTTPTimeout: config.Duration(time.Second * 5), ClusterStatsOnlyFromMaster: true, ClusterHealthLevel: "indices", } @@ -340,12 +340,12 @@ func (e *Elasticsearch) createHTTPClient() (*http.Client, error) { return nil, err } tr := &http.Transport{ - ResponseHeaderTimeout: e.HTTPTimeout.Duration, + ResponseHeaderTimeout: time.Duration(e.HTTPTimeout), TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: e.HTTPTimeout.Duration, + Timeout: time.Duration(e.HTTPTimeout), } return client, nil diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index fc498c799c966..3cd8beb029a7f 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -12,6 +12,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" @@ -43,9 +44,9 @@ const sampleConfig = ` const MaxStderrBytes int = 512 type Exec struct { - Commands []string `toml:"commands"` - Command string `toml:"command"` - Timeout internal.Duration `toml:"timeout"` + Commands []string `toml:"commands"` + Command string `toml:"command"` + Timeout config.Duration `toml:"timeout"` parser parsers.Parser @@ -56,7 +57,7 @@ type Exec struct { func NewExec() *Exec { return &Exec{ runner: CommandRunner{}, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } } @@ -138,7 +139,7 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync defer wg.Done() _, isNagios := e.parser.(*nagios.NagiosParser) - out, errbuf, runErr := e.runner.Run(command, e.Timeout.Duration) + out, errbuf, runErr := e.runner.Run(command, time.Duration(e.Timeout)) if !isNagios && runErr != nil { err := fmt.Errorf("exec: %s for command '%s': %s", runErr, command, string(errbuf)) acc.AddError(err) diff --git a/plugins/inputs/fibaro/fibaro.go b/plugins/inputs/fibaro/fibaro.go index 6cfe9e64834cf..b60eecfa8e2a6 100644 --- a/plugins/inputs/fibaro/fibaro.go +++ b/plugins/inputs/fibaro/fibaro.go @@ -8,7 +8,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -37,7 +37,7 @@ type Fibaro struct { Username string `toml:"username"` Password string `toml:"password"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` client *http.Client } @@ -126,7 +126,7 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: f.Timeout.Duration, + Timeout: time.Duration(f.Timeout), } } @@ -221,7 +221,7 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("fibaro", func() telegraf.Input { return &Fibaro{ - Timeout: internal.Duration{Duration: defaultTimeout}, + Timeout: config.Duration(defaultTimeout), } }) } diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index 87c98075487e1..cc72fb348386b 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -6,7 +6,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" "github.com/karrick/godirwalk" @@ -57,8 +57,8 @@ type FileCount struct { Recursive bool RegularOnly bool FollowSymlinks bool - Size internal.Size - MTime internal.Duration `toml:"mtime"` + Size config.Size + MTime config.Duration `toml:"mtime"` fileFilters []fileFilterFunc globPaths []globpath.GlobPath Fs fileSystem @@ -108,7 +108,7 @@ func (fc *FileCount) regularOnlyFilter() fileFilterFunc { } func (fc *FileCount) sizeFilter() fileFilterFunc { - if fc.Size.Size == 0 { + if fc.Size == 0 { return nil } @@ -116,22 +116,22 @@ func (fc *FileCount) sizeFilter() fileFilterFunc { if !f.Mode().IsRegular() { return false, nil } - if fc.Size.Size < 0 { - return f.Size() < -fc.Size.Size, nil + if fc.Size < 0 { + return f.Size() < -int64(fc.Size), nil } - return f.Size() >= fc.Size.Size, nil + return f.Size() >= int64(fc.Size), nil } } func (fc *FileCount) mtimeFilter() fileFilterFunc { - if fc.MTime.Duration == 0 { + if time.Duration(fc.MTime) == 0 { return nil } return func(f os.FileInfo) (bool, error) { - age := absDuration(fc.MTime.Duration) + age := absDuration(time.Duration(fc.MTime)) mtime := time.Now().Add(-age) - if fc.MTime.Duration < 0 { + if time.Duration(fc.MTime) < 0 { return f.ModTime().After(mtime), nil } return f.ModTime().Before(mtime), nil @@ -302,8 +302,8 @@ func NewFileCount() *FileCount { Recursive: true, RegularOnly: true, FollowSymlinks: false, - Size: internal.Size{Size: 0}, - MTime: internal.Duration{Duration: 0}, + Size: config.Size(0), + MTime: config.Duration(0), fileFilters: nil, Fs: osFS{}, } diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index a4c073bf15d80..35bb0f080c73a 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -14,7 +14,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -95,12 +95,12 @@ func TestRegularOnlyFilter(t *testing.T) { func TestSizeFilter(t *testing.T) { fc := getNoFilterFileCount() - fc.Size = internal.Size{Size: -100} + fc.Size = config.Size(-100) matches := []string{"foo", "bar", "baz", "subdir/quux", "subdir/quuz"} fileCountEquals(t, fc, len(matches), 0) - fc.Size = internal.Size{Size: 100} + fc.Size = config.Size(100) matches = []string{"qux", "subdir/nested2//qux"} fileCountEquals(t, fc, len(matches), 800) @@ -111,14 +111,14 @@ func TestMTimeFilter(t *testing.T) { fileAge := time.Since(mtime) - (60 * time.Second) fc := getNoFilterFileCount() - fc.MTime = internal.Duration{Duration: -fileAge} + fc.MTime = config.Duration(-fileAge) matches := []string{"foo", "bar", "qux", "subdir/", "subdir/quux", "subdir/quuz", "subdir/nested2", "subdir/nested2/qux"} fileCountEquals(t, fc, len(matches), 5096) - fc.MTime = internal.Duration{Duration: fileAge} + fc.MTime = config.Duration(fileAge) matches = []string{"baz"} fileCountEquals(t, fc, len(matches), 0) } @@ -175,8 +175,8 @@ func getNoFilterFileCount() FileCount { Name: "*", Recursive: true, RegularOnly: false, - Size: internal.Size{Size: 0}, - MTime: internal.Duration{Duration: 0}, + Size: config.Size(0), + MTime: config.Duration(0), fileFilters: nil, Fs: getFakeFileSystem(getTestdataDir()), } diff --git a/plugins/inputs/fireboard/fireboard.go b/plugins/inputs/fireboard/fireboard.go index 92846a0760cfd..eba11d6196409 100644 --- a/plugins/inputs/fireboard/fireboard.go +++ b/plugins/inputs/fireboard/fireboard.go @@ -8,15 +8,15 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) // Fireboard gathers statistics from the fireboard.io servers type Fireboard struct { - AuthToken string `toml:"auth_token"` - URL string `toml:"url"` - HTTPTimeout internal.Duration `toml:"http_timeout"` + AuthToken string `toml:"auth_token"` + URL string `toml:"url"` + HTTPTimeout config.Duration `toml:"http_timeout"` client *http.Client } @@ -76,11 +76,11 @@ func (r *Fireboard) Init() error { r.URL = "https://fireboard.io/api/v1/devices.json" } // Have a default timeout of 4s - if r.HTTPTimeout.Duration == 0 { - r.HTTPTimeout.Duration = time.Second * 4 + if r.HTTPTimeout == 0 { + r.HTTPTimeout = config.Duration(time.Second * 4) } - r.client.Timeout = r.HTTPTimeout.Duration + r.client.Timeout = time.Duration(r.HTTPTimeout) return nil } diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go index c7e3888f9c4e6..020775cb43e8c 100644 --- a/plugins/inputs/github/github.go +++ b/plugins/inputs/github/github.go @@ -10,7 +10,7 @@ import ( "github.com/google/go-github/v32/github" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/selfstat" "golang.org/x/oauth2" @@ -18,11 +18,11 @@ import ( // GitHub - plugin main structure type GitHub struct { - Repositories []string `toml:"repositories"` - AccessToken string `toml:"access_token"` - AdditionalFields []string `toml:"additional_fields"` - EnterpriseBaseURL string `toml:"enterprise_base_url"` - HTTPTimeout internal.Duration `toml:"http_timeout"` + Repositories []string `toml:"repositories"` + AccessToken string `toml:"access_token"` + AdditionalFields []string `toml:"additional_fields"` + EnterpriseBaseURL string `toml:"enterprise_base_url"` + HTTPTimeout config.Duration `toml:"http_timeout"` githubClient *github.Client obfuscatedToken string @@ -73,7 +73,7 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: g.HTTPTimeout.Duration, + Timeout: time.Duration(g.HTTPTimeout), } g.obfuscatedToken = "Unauthenticated" @@ -249,7 +249,7 @@ func (g *GitHub) getPullRequestFields(ctx context.Context, owner, repo string) ( func init() { inputs.Add("github", func() telegraf.Input { return &GitHub{ - HTTPTimeout: internal.Duration{Duration: time.Second * 5}, + HTTPTimeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index 5d4c80edd5538..19207717a5582 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -15,7 +15,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" internaltls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -44,7 +44,7 @@ type GNMI struct { Password string // Redial - Redial internal.Duration + Redial config.Duration // GRPC TLS settings EnableTLS bool `toml:"enable_tls"` @@ -66,12 +66,12 @@ type Subscription struct { Path string // Subscription mode and interval - SubscriptionMode string `toml:"subscription_mode"` - SampleInterval internal.Duration `toml:"sample_interval"` + SubscriptionMode string `toml:"subscription_mode"` + SampleInterval config.Duration `toml:"sample_interval"` // Duplicate suppression - SuppressRedundant bool `toml:"suppress_redundant"` - HeartbeatInterval internal.Duration `toml:"heartbeat_interval"` + SuppressRedundant bool `toml:"suppress_redundant"` + HeartbeatInterval config.Duration `toml:"heartbeat_interval"` } // Start the http listener service @@ -86,7 +86,7 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { // Validate configuration if request, err = c.newSubscribeRequest(); err != nil { return err - } else if c.Redial.Duration.Nanoseconds() <= 0 { + } else if time.Duration(c.Redial).Nanoseconds() <= 0 { return fmt.Errorf("redial duration must be positive") } @@ -143,7 +143,7 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { select { case <-ctx.Done(): - case <-time.After(c.Redial.Duration): + case <-time.After(time.Duration(c.Redial)): } } }(addr) @@ -167,9 +167,9 @@ func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { subscriptions[i] = &gnmi.Subscription{ Path: gnmiPath, Mode: gnmi.SubscriptionMode(mode), - SampleInterval: uint64(subscription.SampleInterval.Duration.Nanoseconds()), + SampleInterval: uint64(time.Duration(subscription.SampleInterval).Nanoseconds()), SuppressRedundant: subscription.SuppressRedundant, - HeartbeatInterval: uint64(subscription.HeartbeatInterval.Duration.Nanoseconds()), + HeartbeatInterval: uint64(time.Duration(subscription.HeartbeatInterval).Nanoseconds()), } } @@ -555,7 +555,7 @@ func (c *GNMI) Gather(_ telegraf.Accumulator) error { func New() telegraf.Input { return &GNMI{ Encoding: "proto", - Redial: internal.Duration{Duration: 10 * time.Second}, + Redial: config.Duration(10 * time.Second), } } diff --git a/plugins/inputs/gnmi/gnmi_test.go b/plugins/inputs/gnmi/gnmi_test.go index 1846fd67a9951..25840db46ef2f 100644 --- a/plugins/inputs/gnmi/gnmi_test.go +++ b/plugins/inputs/gnmi/gnmi_test.go @@ -10,7 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/openconfig/gnmi/proto/gnmi" "github.com/stretchr/testify/assert" @@ -77,7 +77,7 @@ func TestWaitError(t *testing.T) { Log: testutil.Logger{}, Addresses: []string{listener.Addr().String()}, Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}, + Redial: config.Duration(1 * time.Second), } var acc testutil.Accumulator @@ -135,7 +135,7 @@ func TestUsernamePassword(t *testing.T) { Username: "theusername", Password: "thepassword", Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}, + Redial: config.Duration(1 * time.Second), } var acc testutil.Accumulator @@ -218,7 +218,7 @@ func TestNotification(t *testing.T) { plugin: &GNMI{ Log: testutil.Logger{}, Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}, + Redial: config.Duration(1 * time.Second), Subscriptions: []Subscription{ { Name: "alias", @@ -302,7 +302,7 @@ func TestNotification(t *testing.T) { plugin: &GNMI{ Log: testutil.Logger{}, Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}, + Redial: config.Duration(1 * time.Second), Subscriptions: []Subscription{ { Name: "PHY_COUNTERS", @@ -435,7 +435,7 @@ func TestRedial(t *testing.T) { Log: testutil.Logger{}, Addresses: []string{listener.Addr().String()}, Encoding: "proto", - Redial: internal.Duration{Duration: 10 * time.Millisecond}, + Redial: config.Duration(10 * time.Millisecond), } grpcServer := grpc.NewServer() diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index 58a5bd51040ed..3c5b80a8e3f95 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -10,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/common/tls" @@ -37,7 +38,7 @@ type HTTP struct { SuccessStatusCodes []int `toml:"success_status_codes"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` client *http.Client @@ -124,7 +125,7 @@ func (h *HTTP) Init() error { h.client = &http.Client{ Transport: transport, - Timeout: h.Timeout.Duration, + Timeout: time.Duration(h.Timeout), } // Set default as [200] @@ -261,7 +262,7 @@ func makeRequestBodyReader(contentEncoding, body string) (io.ReadCloser, error) func init() { inputs.Add("http", func() telegraf.Input { return &HTTP{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), Method: "GET", } }) diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 6a6d45592033d..89714bb0818b1 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -14,7 +14,7 @@ import ( "github.com/golang/snappy" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" @@ -39,9 +39,9 @@ type HTTPListenerV2 struct { Path string `toml:"path"` Methods []string `toml:"methods"` DataSource string `toml:"data_source"` - ReadTimeout internal.Duration `toml:"read_timeout"` - WriteTimeout internal.Duration `toml:"write_timeout"` - MaxBodySize internal.Size `toml:"max_body_size"` + ReadTimeout config.Duration `toml:"read_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + MaxBodySize config.Size `toml:"max_body_size"` Port int `toml:"port"` BasicUsername string `toml:"basic_username"` BasicPassword string `toml:"basic_password"` @@ -125,15 +125,15 @@ func (h *HTTPListenerV2) SetParser(parser parsers.Parser) { // Start starts the http listener service. func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { - if h.MaxBodySize.Size == 0 { - h.MaxBodySize.Size = defaultMaxBodySize + if h.MaxBodySize == 0 { + h.MaxBodySize = config.Size(defaultMaxBodySize) } - if h.ReadTimeout.Duration < time.Second { - h.ReadTimeout.Duration = time.Second * 10 + if h.ReadTimeout < config.Duration(time.Second) { + h.ReadTimeout = config.Duration(time.Second * 10) } - if h.WriteTimeout.Duration < time.Second { - h.WriteTimeout.Duration = time.Second * 10 + if h.WriteTimeout < config.Duration(time.Second) { + h.WriteTimeout = config.Duration(time.Second * 10) } h.acc = acc @@ -146,8 +146,8 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { server := &http.Server{ Addr: h.ServiceAddress, Handler: h, - ReadTimeout: h.ReadTimeout.Duration, - WriteTimeout: h.WriteTimeout.Duration, + ReadTimeout: time.Duration(h.ReadTimeout), + WriteTimeout: time.Duration(h.WriteTimeout), TLSConfig: tlsConf, } @@ -198,7 +198,7 @@ func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) { // Check that the content length is not too large for us to handle. - if req.ContentLength > h.MaxBodySize.Size { + if req.ContentLength > int64(h.MaxBodySize) { if err := tooLarge(res); err != nil { h.Log.Debugf("error in too-large: %v", err) } @@ -271,7 +271,7 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) return nil, false } defer r.Close() - maxReader := http.MaxBytesReader(res, r, h.MaxBodySize.Size) + maxReader := http.MaxBytesReader(res, r, int64(h.MaxBodySize)) bytes, err := ioutil.ReadAll(maxReader) if err != nil { if err := tooLarge(res); err != nil { diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index e4507984c3394..6b906f9cec3e3 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -14,7 +14,7 @@ import ( "time" "github.com/golang/snappy" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -53,7 +53,7 @@ func newTestHTTPListenerV2() *HTTPListenerV2 { Methods: []string{"POST"}, Parser: parser, TimeFunc: time.Now, - MaxBodySize: internal.Size{Size: 70000}, + MaxBodySize: config.Size(70000), DataSource: "body", } return listener @@ -114,7 +114,7 @@ func TestInvalidListenerConfig(t *testing.T) { Methods: []string{"POST"}, Parser: parser, TimeFunc: time.Now, - MaxBodySize: internal.Size{Size: 70000}, + MaxBodySize: config.Size(70000), DataSource: "body", } @@ -260,7 +260,7 @@ func TestWriteHTTPExactMaxBodySize(t *testing.T) { Path: "/write", Methods: []string{"POST"}, Parser: parser, - MaxBodySize: internal.Size{Size: int64(len(hugeMetric))}, + MaxBodySize: config.Size(len(hugeMetric)), TimeFunc: time.Now, } @@ -283,7 +283,7 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) { Path: "/write", Methods: []string{"POST"}, Parser: parser, - MaxBodySize: internal.Size{Size: 4096}, + MaxBodySize: config.Size(4096), TimeFunc: time.Now, } diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 7ec46ea486ab0..d8a4e0e1438cd 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -15,7 +15,7 @@ import ( "unicode/utf8" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -33,14 +33,14 @@ type HTTPResponse struct { HTTPProxy string `toml:"http_proxy"` Body string Method string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration HTTPHeaderTags map[string]string `toml:"http_header_tags"` Headers map[string]string FollowRedirects bool // Absolute path to file with Bearer token - BearerToken string `toml:"bearer_token"` - ResponseBodyField string `toml:"response_body_field"` - ResponseBodyMaxSize internal.Size `toml:"response_body_max_size"` + BearerToken string `toml:"bearer_token"` + ResponseBodyField string `toml:"response_body_field"` + ResponseBodyMaxSize config.Size `toml:"response_body_max_size"` ResponseStringMatch string ResponseStatusCode int Interface string @@ -185,7 +185,7 @@ func (h *HTTPResponse) createHTTPClient() (*http.Client, error) { DisableKeepAlives: true, TLSClientConfig: tlsCfg, }, - Timeout: h.ResponseTimeout.Duration, + Timeout: time.Duration(h.ResponseTimeout), } if !h.FollowRedirects { @@ -336,12 +336,12 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] tags["status_code"] = strconv.Itoa(resp.StatusCode) fields["http_response_code"] = resp.StatusCode - if h.ResponseBodyMaxSize.Size == 0 { - h.ResponseBodyMaxSize.Size = defaultResponseBodyMaxSize + if h.ResponseBodyMaxSize == 0 { + h.ResponseBodyMaxSize = config.Size(defaultResponseBodyMaxSize) } - bodyBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, h.ResponseBodyMaxSize.Size+1)) + bodyBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, int64(h.ResponseBodyMaxSize)+1)) // Check first if the response body size exceeds the limit. - if err == nil && int64(len(bodyBytes)) > h.ResponseBodyMaxSize.Size { + if err == nil && int64(len(bodyBytes)) > int64(h.ResponseBodyMaxSize) { h.setBodyReadError("The body of the HTTP Response is too large", bodyBytes, fields, tags) return fields, tags, nil } else if err != nil { @@ -413,8 +413,8 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { } // Set default values - if h.ResponseTimeout.Duration < time.Second { - h.ResponseTimeout.Duration = time.Second * 5 + if h.ResponseTimeout < config.Duration(time.Second) { + h.ResponseTimeout = config.Duration(time.Second * 5) } // Check send and expected string if h.Method == "" { diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 4772024c569d1..557639027ff03 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -16,7 +16,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -177,7 +177,7 @@ func TestHeaders(t *testing.T) { Log: testutil.Logger{}, URLs: []string{ts.URL}, Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 2}, + ResponseTimeout: config.Duration(time.Second * 2), Headers: map[string]string{ "Content-Type": "application/json", "Host": "Hello", @@ -214,7 +214,7 @@ func TestFields(t *testing.T) { URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -252,7 +252,7 @@ func TestResponseBodyField(t *testing.T) { URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -287,7 +287,7 @@ func TestResponseBodyField(t *testing.T) { URLs: []string{ts.URL + "/invalidUTF8"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -321,11 +321,11 @@ func TestResponseBodyMaxSize(t *testing.T) { URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, - ResponseBodyMaxSize: internal.Size{Size: 5}, + ResponseBodyMaxSize: config.Size(5), FollowRedirects: true, } @@ -355,7 +355,7 @@ func TestHTTPHeaderTags(t *testing.T) { URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, Headers: map[string]string{ "Content-Type": "application/json", @@ -390,7 +390,7 @@ func TestHTTPHeaderTags(t *testing.T) { URLs: []string{ts.URL + "/noheader"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, Headers: map[string]string{ "Content-Type": "application/json", @@ -416,7 +416,7 @@ func TestHTTPHeaderTags(t *testing.T) { URLs: []string{"https:/nonexistent.nonexistent"}, // Any non-routable IP works here Body: "", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + ResponseTimeout: config.Duration(time.Second * 5), HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, FollowRedirects: false, } @@ -472,7 +472,7 @@ func TestInterface(t *testing.T) { URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -511,7 +511,7 @@ func TestRedirects(t *testing.T) { URLs: []string{ts.URL + "/redirect"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -542,7 +542,7 @@ func TestRedirects(t *testing.T) { URLs: []string{ts.URL + "/badredirect"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -579,7 +579,7 @@ func TestMethod(t *testing.T) { URLs: []string{ts.URL + "/mustbepostmethod"}, Body: "{ 'test': 'data'}", Method: "POST", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -610,7 +610,7 @@ func TestMethod(t *testing.T) { URLs: []string{ts.URL + "/mustbepostmethod"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -642,7 +642,7 @@ func TestMethod(t *testing.T) { URLs: []string{ts.URL + "/mustbepostmethod"}, Body: "{ 'test': 'data'}", Method: "head", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -679,7 +679,7 @@ func TestBody(t *testing.T) { URLs: []string{ts.URL + "/musthaveabody"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -709,7 +709,7 @@ func TestBody(t *testing.T) { Log: testutil.Logger{}, URLs: []string{ts.URL + "/musthaveabody"}, Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -745,7 +745,7 @@ func TestStringMatch(t *testing.T) { Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "hit the good page", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -783,7 +783,7 @@ func TestStringMatchJson(t *testing.T) { Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "\"service_status\": \"up\"", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -821,7 +821,7 @@ func TestStringMatchFail(t *testing.T) { Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "hit the bad page", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -863,7 +863,7 @@ func TestTimeout(t *testing.T) { URLs: []string{ts.URL + "/twosecondnap"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second}, + ResponseTimeout: config.Duration(time.Second), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -898,7 +898,7 @@ func TestBadRegex(t *testing.T) { Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "bad regex:[[", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -930,7 +930,7 @@ func TestNetworkErrors(t *testing.T) { URLs: []string{"https://nonexistent.nonexistent"}, // Any non-resolvable URL works here Body: "", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), FollowRedirects: false, client: &fakeClient{err: &url.Error{Err: &net.OpError{Err: &net.DNSError{Err: "DNS error"}}}}, } @@ -958,7 +958,7 @@ func TestNetworkErrors(t *testing.T) { URLs: []string{"https:/nonexistent.nonexistent"}, // Any non-routable IP works here Body: "", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + ResponseTimeout: config.Duration(time.Second * 5), FollowRedirects: false, } @@ -990,7 +990,7 @@ func TestContentLength(t *testing.T) { URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -1021,7 +1021,7 @@ func TestContentLength(t *testing.T) { URLs: []string{ts.URL + "/musthaveabody"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -1109,7 +1109,7 @@ func TestBasicAuth(t *testing.T) { URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Username: "me", Password: "mypassword", Headers: map[string]string{ @@ -1147,7 +1147,7 @@ func TestStatusCodeMatchFail(t *testing.T) { Log: testutil.Logger{}, URLs: []string{ts.URL + "/nocontent"}, ResponseStatusCode: http.StatusOK, - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), } var acc testutil.Accumulator @@ -1180,7 +1180,7 @@ func TestStatusCodeMatch(t *testing.T) { Log: testutil.Logger{}, URLs: []string{ts.URL + "/nocontent"}, ResponseStatusCode: http.StatusNoContent, - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), } var acc testutil.Accumulator @@ -1214,7 +1214,7 @@ func TestStatusCodeAndStringMatch(t *testing.T) { URLs: []string{ts.URL + "/good"}, ResponseStatusCode: http.StatusOK, ResponseStringMatch: "hit the good page", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), } var acc testutil.Accumulator @@ -1249,7 +1249,7 @@ func TestStatusCodeAndStringMatchFail(t *testing.T) { URLs: []string{ts.URL + "/nocontent"}, ResponseStatusCode: http.StatusOK, ResponseStringMatch: "hit the good page", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), } var acc testutil.Accumulator @@ -1285,7 +1285,7 @@ func TestSNI(t *testing.T) { Log: testutil.Logger{}, URLs: []string{ts.URL + "/good"}, Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), ClientConfig: tls.ClientConfig{ InsecureSkipVerify: true, ServerName: "super-special-hostname.example.com", diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index 502a1473b9231..f5d97b90989c0 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -11,7 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" @@ -27,7 +27,7 @@ type HTTPJSON struct { Servers []string Method string TagKeys []string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration Parameters map[string]string Headers map[string]string tls.ClientConfig @@ -131,12 +131,12 @@ func (h *HTTPJSON) Gather(acc telegraf.Accumulator) error { return err } tr := &http.Transport{ - ResponseHeaderTimeout: h.ResponseTimeout.Duration, + ResponseHeaderTimeout: time.Duration(h.ResponseTimeout), TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: h.ResponseTimeout.Duration, + Timeout: time.Duration(h.ResponseTimeout), } h.client.SetHTTPClient(client) } @@ -286,10 +286,8 @@ func (h *HTTPJSON) sendRequest(serverURL string) (string, float64, error) { func init() { inputs.Add("httpjson", func() telegraf.Input { return &HTTPJSON{ - client: &RealHTTPClient{}, - ResponseTimeout: internal.Duration{ - Duration: 5 * time.Second, - }, + client: &RealHTTPClient{}, + ResponseTimeout: config.Duration(5 * time.Second), } }) } diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go index 9dbf52f243e3f..17069e169c81b 100644 --- a/plugins/inputs/icinga2/icinga2.go +++ b/plugins/inputs/icinga2/icinga2.go @@ -8,7 +8,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -18,7 +18,7 @@ type Icinga2 struct { ObjectType string Username string Password string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig Log telegraf.Logger @@ -125,15 +125,15 @@ func (i *Icinga2) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: i.ResponseTimeout.Duration, + Timeout: time.Duration(i.ResponseTimeout), } return client, nil } func (i *Icinga2) Gather(acc telegraf.Accumulator) error { - if i.ResponseTimeout.Duration < time.Second { - i.ResponseTimeout.Duration = time.Second * 5 + if i.ResponseTimeout < config.Duration(time.Second) { + i.ResponseTimeout = config.Duration(time.Second * 5) } if i.client == nil { @@ -186,7 +186,7 @@ func init() { return &Icinga2{ Server: "https://localhost:5665", ObjectType: "services", - ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + ResponseTimeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go index d7eb66153034a..b8c028f05aff8 100644 --- a/plugins/inputs/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -10,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -33,10 +34,10 @@ func (e *APIError) Error() string { } type InfluxDB struct { - URLs []string `toml:"urls"` - Username string `toml:"username"` - Password string `toml:"password"` - Timeout internal.Duration `toml:"timeout"` + URLs []string `toml:"urls"` + Username string `toml:"username"` + Password string `toml:"password"` + Timeout config.Duration `toml:"timeout"` tls.ClientConfig client *http.Client @@ -86,10 +87,10 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error { } i.client = &http.Client{ Transport: &http.Transport{ - ResponseHeaderTimeout: i.Timeout.Duration, + ResponseHeaderTimeout: time.Duration(i.Timeout), TLSClientConfig: tlsCfg, }, - Timeout: i.Timeout.Duration, + Timeout: time.Duration(i.Timeout), } } @@ -318,7 +319,7 @@ func readResponseError(resp *http.Response) error { func init() { inputs.Add("influxdb", func() telegraf.Input { return &InfluxDB{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go index 269ba17d6fa67..6b5c67ea07999 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -29,14 +30,14 @@ type InfluxDBListener struct { port int tlsint.ServerConfig - ReadTimeout internal.Duration `toml:"read_timeout"` - WriteTimeout internal.Duration `toml:"write_timeout"` - MaxBodySize internal.Size `toml:"max_body_size"` - MaxLineSize internal.Size `toml:"max_line_size"` // deprecated in 1.14; ignored - BasicUsername string `toml:"basic_username"` - BasicPassword string `toml:"basic_password"` - DatabaseTag string `toml:"database_tag"` - RetentionPolicyTag string `toml:"retention_policy_tag"` + ReadTimeout config.Duration `toml:"read_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + MaxBodySize config.Size `toml:"max_body_size"` + MaxLineSize config.Size `toml:"max_line_size"` // deprecated in 1.14; ignored + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + DatabaseTag string `toml:"database_tag"` + RetentionPolicyTag string `toml:"retention_policy_tag"` timeFunc influx.TimeFunc @@ -137,19 +138,19 @@ func (h *InfluxDBListener) Init() error { h.authFailures = selfstat.Register("influxdb_listener", "auth_failures", tags) h.routes() - if h.MaxBodySize.Size == 0 { - h.MaxBodySize.Size = defaultMaxBodySize + if h.MaxBodySize == 0 { + h.MaxBodySize = config.Size(defaultMaxBodySize) } - if h.MaxLineSize.Size != 0 { + if h.MaxLineSize != 0 { h.Log.Warnf("Use of deprecated configuration: 'max_line_size'; parser now handles lines of unlimited length and option is ignored") } - if h.ReadTimeout.Duration < time.Second { - h.ReadTimeout.Duration = time.Second * 10 + if h.ReadTimeout < config.Duration(time.Second) { + h.ReadTimeout = config.Duration(time.Second * 10) } - if h.WriteTimeout.Duration < time.Second { - h.WriteTimeout.Duration = time.Second * 10 + if h.WriteTimeout < config.Duration(time.Second) { + h.WriteTimeout = config.Duration(time.Second * 10) } return nil @@ -167,8 +168,8 @@ func (h *InfluxDBListener) Start(acc telegraf.Accumulator) error { h.server = http.Server{ Addr: h.ServiceAddress, Handler: h, - ReadTimeout: h.ReadTimeout.Duration, - WriteTimeout: h.WriteTimeout.Duration, + ReadTimeout: time.Duration(h.ReadTimeout), + WriteTimeout: time.Duration(h.WriteTimeout), TLSConfig: tlsConf, } @@ -259,7 +260,7 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { return func(res http.ResponseWriter, req *http.Request) { defer h.writesServed.Incr(1) // Check that the content length is not too large for us to handle. - if req.ContentLength > h.MaxBodySize.Size { + if req.ContentLength > int64(h.MaxBodySize) { if err := tooLarge(res); err != nil { h.Log.Debugf("error in too-large: %v", err) } @@ -270,7 +271,7 @@ func (h *InfluxDBListener) handleWrite() http.HandlerFunc { rp := req.URL.Query().Get("rp") body := req.Body - body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) + body = http.MaxBytesReader(res, body, int64(h.MaxBodySize)) // Handle gzip request bodies if req.Header.Get("Content-Encoding") == "gzip" { var err error diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go index d3dc552192007..f0bfc695c98a3 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/testutil" ) @@ -20,9 +20,7 @@ func newListener() *InfluxDBListener { acc: &testutil.NopAccumulator{}, bytesRecv: selfstat.Register("influxdb_listener", "bytes_received", map[string]string{}), writesServed: selfstat.Register("influxdb_listener", "writes_served", map[string]string{}), - MaxBodySize: internal.Size{ - Size: defaultMaxBodySize, - }, + MaxBodySize: config.Size(defaultMaxBodySize), } return listener } diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go index 09c02fb7b0662..8a082a855a7f8 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -14,7 +14,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -308,7 +308,7 @@ func TestWriteVerySmallMaxBody(t *testing.T) { listener := &InfluxDBListener{ Log: testutil.Logger{}, ServiceAddress: "localhost:0", - MaxBodySize: internal.Size{Size: 4096}, + MaxBodySize: config.Size(4096), timeFunc: time.Now, } diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go index ab1d83732c96a..64907d12a52dc 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go @@ -12,6 +12,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -39,9 +40,9 @@ type InfluxDBV2Listener struct { port int tlsint.ServerConfig - MaxBodySize internal.Size `toml:"max_body_size"` - Token string `toml:"token"` - BucketTag string `toml:"bucket_tag"` + MaxBodySize config.Size `toml:"max_body_size"` + Token string `toml:"token"` + BucketTag string `toml:"bucket_tag"` timeFunc influx.TimeFunc @@ -134,8 +135,8 @@ func (h *InfluxDBV2Listener) Init() error { h.authFailures = selfstat.Register("influxdb_v2_listener", "auth_failures", tags) h.routes() - if h.MaxBodySize.Size == 0 { - h.MaxBodySize.Size = defaultMaxBodySize + if h.MaxBodySize == 0 { + h.MaxBodySize = config.Size(defaultMaxBodySize) } return nil @@ -227,8 +228,8 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { return func(res http.ResponseWriter, req *http.Request) { defer h.writesServed.Incr(1) // Check that the content length is not too large for us to handle. - if req.ContentLength > h.MaxBodySize.Size { - if err := tooLarge(res, h.MaxBodySize.Size); err != nil { + if req.ContentLength > int64(h.MaxBodySize) { + if err := tooLarge(res, int64(h.MaxBodySize)); err != nil { h.Log.Debugf("error in too-large: %v", err) } return @@ -237,7 +238,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { bucket := req.URL.Query().Get("bucket") body := req.Body - body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) + body = http.MaxBytesReader(res, body, int64(h.MaxBodySize)) // Handle gzip request bodies if req.Header.Get("Content-Encoding") == "gzip" { var err error diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go index e1e2c7090b359..219d59a93863e 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/testutil" ) @@ -20,9 +20,7 @@ func newListener() *InfluxDBV2Listener { acc: &testutil.NopAccumulator{}, bytesRecv: selfstat.Register("influxdb_v2_listener", "bytes_received", map[string]string{}), writesServed: selfstat.Register("influxdb_v2_listener", "writes_served", map[string]string{}), - MaxBodySize: internal.Size{ - Size: defaultMaxBodySize, - }, + MaxBodySize: config.Size(defaultMaxBodySize), } return listener } diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go index 9d327b41bc377..be99c93f51a8a 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -265,7 +265,7 @@ func TestWriteVerySmallMaxBody(t *testing.T) { listener := &InfluxDBV2Listener{ Log: testutil.Logger{}, ServiceAddress: "localhost:0", - MaxBodySize: internal.Size{Size: 4096}, + MaxBodySize: config.Size(4096), timeFunc: time.Now, } diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index 89ade652fbf55..c7f23dbc30e36 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -15,6 +15,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -33,7 +34,7 @@ type Ipmi struct { Privilege string HexKey string `toml:"hex_key"` Servers []string - Timeout internal.Duration + Timeout config.Duration MetricVersion int UseSudo bool UseCache bool @@ -147,7 +148,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { name = "sudo" } cmd := execCommand(name, dumpOpts...) - out, err := internal.CombinedOutputTimeout(cmd, m.Timeout.Duration) + out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) if err != nil { return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } @@ -165,7 +166,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { name = "sudo" } cmd := execCommand(name, opts...) - out, err := internal.CombinedOutputTimeout(cmd, m.Timeout.Duration) + out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) timestamp := time.Now() if err != nil { return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) @@ -329,7 +330,7 @@ func init() { if len(path) > 0 { m.Path = path } - m.Timeout = internal.Duration{Duration: time.Second * 20} + m.Timeout = config.Duration(time.Second * 20) m.UseCache = false m.CachePath = os.TempDir() inputs.Add("ipmi_sensor", func() telegraf.Input { diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index 80332abc0d924..cf53214dbbd66 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -18,7 +18,7 @@ func TestGather(t *testing.T) { Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"}, Path: "ipmitool", Privilege: "USER", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), HexKey: "1234567F", } @@ -126,7 +126,7 @@ func TestGather(t *testing.T) { i = &Ipmi{ Path: "ipmitool", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } err = acc.GatherError(i.Gather) @@ -390,7 +390,7 @@ func TestGatherV2(t *testing.T) { Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"}, Path: "ipmitool", Privilege: "USER", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), MetricVersion: 2, HexKey: "0000000F", } @@ -432,7 +432,7 @@ func TestGatherV2(t *testing.T) { i = &Ipmi{ Path: "ipmitool", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), MetricVersion: 2, } diff --git a/plugins/inputs/ipset/ipset.go b/plugins/inputs/ipset/ipset.go index e9f3ccabe1241..82854a35f44f3 100644 --- a/plugins/inputs/ipset/ipset.go +++ b/plugins/inputs/ipset/ipset.go @@ -10,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -18,15 +19,15 @@ import ( type Ipset struct { IncludeUnmatchedSets bool UseSudo bool - Timeout internal.Duration + Timeout config.Duration lister setLister } -type setLister func(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) +type setLister func(Timeout config.Duration, UseSudo bool) (*bytes.Buffer, error) const measurement = "ipset" -var defaultTimeout = internal.Duration{Duration: time.Second} +var defaultTimeout = config.Duration(time.Second) // Description returns a short description of the plugin func (i *Ipset) Description() string { @@ -90,7 +91,7 @@ func (i *Ipset) Gather(acc telegraf.Accumulator) error { return nil } -func setList(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { +func setList(timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { // Is ipset installed ? ipsetPath, err := exec.LookPath("ipset") if err != nil { @@ -98,7 +99,7 @@ func setList(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { } var args []string cmdName := ipsetPath - if UseSudo { + if useSudo { cmdName = "sudo" args = append(args, ipsetPath) } @@ -108,7 +109,7 @@ func setList(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { var out bytes.Buffer cmd.Stdout = &out - err = internal.RunTimeout(cmd, Timeout.Duration) + err = internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running ipset save: %s", err) } diff --git a/plugins/inputs/ipset/ipset_test.go b/plugins/inputs/ipset/ipset_test.go index 0480debe1bb4f..f205728c0dbad 100644 --- a/plugins/inputs/ipset/ipset_test.go +++ b/plugins/inputs/ipset/ipset_test.go @@ -7,7 +7,7 @@ import ( "reflect" "testing" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) @@ -80,7 +80,7 @@ func TestIpset(t *testing.T) { t.Run(tt.name, func(t *testing.T) { i++ ips := &Ipset{ - lister: func(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { + lister: func(timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { return bytes.NewBufferString(tt.value), nil }, } @@ -123,7 +123,7 @@ func TestIpset(t *testing.T) { func TestIpset_Gather_listerError(t *testing.T) { errFoo := errors.New("error foobar") ips := &Ipset{ - lister: func(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { + lister: func(timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { return new(bytes.Buffer), errFoo }, } diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index fa5727ced32bc..6d764d175ce58 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -11,8 +11,8 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -25,19 +25,19 @@ type Jenkins struct { Source string Port string // HTTP Timeout specified as a string - 3s, 1m, 1h - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig client *client Log telegraf.Logger - MaxConnections int `toml:"max_connections"` - MaxBuildAge internal.Duration `toml:"max_build_age"` - MaxSubJobDepth int `toml:"max_subjob_depth"` - MaxSubJobPerLayer int `toml:"max_subjob_per_layer"` - JobExclude []string `toml:"job_exclude"` - JobInclude []string `toml:"job_include"` + MaxConnections int `toml:"max_connections"` + MaxBuildAge config.Duration `toml:"max_build_age"` + MaxSubJobDepth int `toml:"max_subjob_depth"` + MaxSubJobPerLayer int `toml:"max_subjob_per_layer"` + JobExclude []string `toml:"job_exclude"` + JobInclude []string `toml:"job_include"` jobFilterExclude filter.Filter jobFilterInclude filter.Filter @@ -138,7 +138,7 @@ func (j *Jenkins) newHTTPClient() (*http.Client, error) { TLSClientConfig: tlsCfg, MaxIdleConns: j.MaxConnections, }, - Timeout: j.ResponseTimeout.Duration, + Timeout: time.Duration(j.ResponseTimeout), }, nil } @@ -353,7 +353,7 @@ func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { // stop if build is too old // Higher up in gatherJobs - cutoff := time.Now().Add(-1 * j.MaxBuildAge.Duration) + cutoff := time.Now().Add(-1 * time.Duration(j.MaxBuildAge)) // Here we just test if build.GetTimestamp().Before(cutoff) { @@ -501,7 +501,7 @@ func mapResultCode(s string) int { func init() { inputs.Add("jenkins", func() telegraf.Input { return &Jenkins{ - MaxBuildAge: internal.Duration{Duration: time.Hour}, + MaxBuildAge: config.Duration(time.Hour), MaxConnections: 5, MaxSubJobPerLayer: 10, } diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index 833b36fcbd60d..b97da5a0f00a4 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) @@ -304,7 +304,7 @@ func TestGatherNodeData(t *testing.T) { j := &Jenkins{ Log: testutil.Logger{}, URL: ts.URL, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + ResponseTimeout: config.Duration(time.Microsecond), NodeExclude: []string{"ignore-1", "ignore-2"}, } te := j.initialize(&http.Client{Transport: &http.Transport{}}) @@ -360,7 +360,7 @@ func TestInitialize(t *testing.T) { input: &Jenkins{ Log: testutil.Logger{}, URL: "http://a bad url", - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + ResponseTimeout: config.Duration(time.Microsecond), }, wantErr: true, }, @@ -369,7 +369,7 @@ func TestInitialize(t *testing.T) { input: &Jenkins{ Log: testutil.Logger{}, URL: ts.URL, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + ResponseTimeout: config.Duration(time.Microsecond), JobInclude: []string{"jobA", "jobB"}, JobExclude: []string{"job1", "job2"}, NodeExclude: []string{"node1", "node2"}, @@ -380,7 +380,7 @@ func TestInitialize(t *testing.T) { input: &Jenkins{ Log: testutil.Logger{}, URL: ts.URL, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + ResponseTimeout: config.Duration(time.Microsecond), }, output: &Jenkins{ Log: testutil.Logger{}, @@ -807,8 +807,8 @@ func TestGatherJobs(t *testing.T) { j := &Jenkins{ Log: testutil.Logger{}, URL: ts.URL, - MaxBuildAge: internal.Duration{Duration: time.Hour}, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + MaxBuildAge: config.Duration(time.Hour), + ResponseTimeout: config.Duration(time.Microsecond), JobInclude: []string{ "*", }, diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 6e7a3d5a524fc..9e4cac511683b 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -10,13 +10,13 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) // Default http timeouts -var DefaultResponseHeaderTimeout = internal.Duration{Duration: 3 * time.Second} -var DefaultClientTimeout = internal.Duration{Duration: 4 * time.Second} +var DefaultResponseHeaderTimeout = config.Duration(3 * time.Second) +var DefaultClientTimeout = config.Duration(4 * time.Second) type Server struct { Name string @@ -54,9 +54,9 @@ type Jolokia struct { Proxy Server Delimiter string - ResponseHeaderTimeout internal.Duration `toml:"response_header_timeout"` - ClientTimeout internal.Duration `toml:"client_timeout"` - Log telegraf.Logger `toml:"-"` + ResponseHeaderTimeout config.Duration `toml:"response_header_timeout"` + ClientTimeout config.Duration `toml:"client_timeout"` + Log telegraf.Logger `toml:"-"` } const sampleConfig = ` @@ -263,10 +263,10 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error { "in favor of the jolokia2 plugin " + "(https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2)") - tr := &http.Transport{ResponseHeaderTimeout: j.ResponseHeaderTimeout.Duration} + tr := &http.Transport{ResponseHeaderTimeout: time.Duration(j.ResponseHeaderTimeout)} j.jClient = &JolokiaClientImpl{&http.Client{ Transport: tr, - Timeout: j.ClientTimeout.Duration, + Timeout: time.Duration(j.ClientTimeout), }} } diff --git a/plugins/inputs/jolokia2/jolokia_agent.go b/plugins/inputs/jolokia2/jolokia_agent.go index 2489ac19ddc66..5b2e3da37c16e 100644 --- a/plugins/inputs/jolokia2/jolokia_agent.go +++ b/plugins/inputs/jolokia2/jolokia_agent.go @@ -3,9 +3,10 @@ package jolokia2 import ( "fmt" "sync" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" ) @@ -17,7 +18,7 @@ type JolokiaAgent struct { URLs []string `toml:"urls"` Username string Password string - ResponseTimeout internal.Duration `toml:"response_timeout"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig @@ -108,7 +109,7 @@ func (ja *JolokiaAgent) createClient(url string) (*Client, error) { return NewClient(url, &ClientConfig{ Username: ja.Username, Password: ja.Password, - ResponseTimeout: ja.ResponseTimeout.Duration, + ResponseTimeout: time.Duration(ja.ResponseTimeout), ClientConfig: ja.ClientConfig, }) } diff --git a/plugins/inputs/jolokia2/jolokia_proxy.go b/plugins/inputs/jolokia2/jolokia_proxy.go index 6428a88515aee..1f91e1cb911fe 100644 --- a/plugins/inputs/jolokia2/jolokia_proxy.go +++ b/plugins/inputs/jolokia2/jolokia_proxy.go @@ -1,8 +1,10 @@ package jolokia2 import ( + "time" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" ) @@ -18,7 +20,7 @@ type JolokiaProxy struct { Username string Password string - ResponseTimeout internal.Duration `toml:"response_timeout"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig Metrics []MetricConfig `toml:"metric"` @@ -116,7 +118,7 @@ func (jp *JolokiaProxy) createClient() (*Client, error) { return NewClient(jp.URL, &ClientConfig{ Username: jp.Username, Password: jp.Password, - ResponseTimeout: jp.ResponseTimeout.Duration, + ResponseTimeout: time.Duration(jp.ResponseTimeout), ClientConfig: jp.ClientConfig, ProxyConfig: proxyConfig, }) diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go index ca087e12f6904..96dce5a88c7e7 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" internaltls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/auth" @@ -22,15 +22,15 @@ import ( ) type OpenConfigTelemetry struct { - Servers []string `toml:"servers"` - Sensors []string `toml:"sensors"` - Username string `toml:"username"` - Password string `toml:"password"` - ClientID string `toml:"client_id"` - SampleFrequency internal.Duration `toml:"sample_frequency"` - StrAsTags bool `toml:"str_as_tags"` - RetryDelay internal.Duration `toml:"retry_delay"` - EnableTLS bool `toml:"enable_tls"` + Servers []string `toml:"servers"` + Sensors []string `toml:"sensors"` + Username string `toml:"username"` + Password string `toml:"password"` + ClientID string `toml:"client_id"` + SampleFrequency config.Duration `toml:"sample_frequency"` + StrAsTags bool `toml:"str_as_tags"` + RetryDelay config.Duration `toml:"retry_delay"` + EnableTLS bool `toml:"enable_tls"` internaltls.ClientConfig Log telegraf.Logger @@ -219,7 +219,7 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int { m.sensorsConfig = make([]sensorConfig, 0) for _, sensor := range m.Sensors { spathSplit := strings.Fields(sensor) - reportingRate = uint32(m.SampleFrequency.Duration / time.Millisecond) + reportingRate = uint32(time.Duration(m.SampleFrequency) / time.Millisecond) // Extract measurement name and custom reporting rate if specified. Custom // reporting rate will be specified at the beginning of sensor list, @@ -296,9 +296,9 @@ func (m *OpenConfigTelemetry) collectData( } // Retry with delay. If delay is not provided, use default - if m.RetryDelay.Duration > 0 { - m.Log.Debugf("Retrying %s with timeout %v", grpcServer, m.RetryDelay.Duration) - time.Sleep(m.RetryDelay.Duration) + if time.Duration(m.RetryDelay) > 0 { + m.Log.Debugf("Retrying %s with timeout %v", grpcServer, time.Duration(m.RetryDelay)) + time.Sleep(time.Duration(m.RetryDelay)) continue } return @@ -408,7 +408,7 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { func init() { inputs.Add("jti_openconfig_telemetry", func() telegraf.Input { return &OpenConfigTelemetry{ - RetryDelay: internal.Duration{Duration: time.Second}, + RetryDelay: config.Duration(time.Second), StrAsTags: false, } }) diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go index 99185e53d015d..8db4ce0d543bc 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go @@ -10,7 +10,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -19,7 +19,7 @@ import ( var cfg = &OpenConfigTelemetry{ Log: testutil.Logger{}, Servers: []string{"127.0.0.1:50051"}, - SampleFrequency: internal.Duration{Duration: time.Second * 2}, + SampleFrequency: config.Duration(time.Second * 2), } var data = &telemetry.OpenConfigData{ diff --git a/plugins/inputs/kapacitor/kapacitor.go b/plugins/inputs/kapacitor/kapacitor.go index 073344ed41a93..b2e8da4cc0ef0 100644 --- a/plugins/inputs/kapacitor/kapacitor.go +++ b/plugins/inputs/kapacitor/kapacitor.go @@ -8,7 +8,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -19,7 +19,7 @@ const ( type Kapacitor struct { URLs []string `toml:"urls"` - Timeout internal.Duration + Timeout config.Duration tls.ClientConfig client *http.Client @@ -83,7 +83,7 @@ func (k *Kapacitor) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: k.Timeout.Duration, + Timeout: time.Duration(k.Timeout), } return client, nil @@ -247,7 +247,7 @@ func init() { inputs.Add("kapacitor", func() telegraf.Input { return &Kapacitor{ URLs: []string{defaultURL}, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index 1626f78a25e1d..c94438eb38d4d 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -12,7 +12,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -104,7 +104,7 @@ type Kibana struct { Servers []string Username string Password string - Timeout internal.Duration + Timeout config.Duration tls.ClientConfig client *http.Client @@ -112,7 +112,7 @@ type Kibana struct { func NewKibana() *Kibana { return &Kibana{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } } @@ -176,7 +176,7 @@ func (k *Kibana) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: k.Timeout.Duration, + Timeout: time.Duration(k.Timeout), } return client, nil diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index 3aec920886f54..bcfae4ce8f52f 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -13,8 +13,8 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -25,14 +25,14 @@ const ( // KubernetesInventory represents the config object for the plugin. type KubernetesInventory struct { - URL string `toml:"url"` - BearerToken string `toml:"bearer_token"` - BearerTokenString string `toml:"bearer_token_string"` - Namespace string `toml:"namespace"` - ResponseTimeout internal.Duration `toml:"response_timeout"` // Timeout specified as a string - 3s, 1m, 1h - ResourceExclude []string `toml:"resource_exclude"` - ResourceInclude []string `toml:"resource_include"` - MaxConfigMapAge internal.Duration `toml:"max_config_map_age"` + URL string `toml:"url"` + BearerToken string `toml:"bearer_token"` + BearerTokenString string `toml:"bearer_token_string"` + Namespace string `toml:"namespace"` + ResponseTimeout config.Duration `toml:"response_timeout"` // Timeout specified as a string - 3s, 1m, 1h + ResourceExclude []string `toml:"resource_exclude"` + ResourceInclude []string `toml:"resource_include"` + MaxConfigMapAge config.Duration `toml:"max_config_map_age"` SelectorInclude []string `toml:"selector_include"` SelectorExclude []string `toml:"selector_exclude"` @@ -109,7 +109,7 @@ func (ki *KubernetesInventory) Init() error { } var err error - ki.client, err = newClient(ki.URL, ki.Namespace, ki.BearerTokenString, ki.ResponseTimeout.Duration, ki.ClientConfig) + ki.client, err = newClient(ki.URL, ki.Namespace, ki.BearerTokenString, time.Duration(ki.ResponseTimeout), ki.ClientConfig) if err != nil { return err @@ -211,7 +211,7 @@ var ( func init() { inputs.Add("kube_inventory", func() telegraf.Input { return &KubernetesInventory{ - ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + ResponseTimeout: config.Duration(time.Second * 5), Namespace: "default", SelectorInclude: []string{}, SelectorExclude: []string{"*"}, diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index e4ebe268755ec..32bfc04a061e6 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -9,8 +9,8 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -29,7 +29,7 @@ type Kubernetes struct { labelFilter filter.Filter // HTTP Timeout specified as a string - 3s, 1m, 1h - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig @@ -204,13 +204,13 @@ func (k *Kubernetes) LoadJSON(url string, v interface{}) error { return err } if k.RoundTripper == nil { - if k.ResponseTimeout.Duration < time.Second { - k.ResponseTimeout.Duration = time.Second * 5 + if k.ResponseTimeout < config.Duration(time.Second) { + k.ResponseTimeout = config.Duration(time.Second * 5) } k.RoundTripper = &http.Transport{ TLSHandshakeTimeout: 5 * time.Second, TLSClientConfig: tlsCfg, - ResponseHeaderTimeout: k.ResponseTimeout.Duration, + ResponseHeaderTimeout: time.Duration(k.ResponseTimeout), } } req.Header.Set("Authorization", "Bearer "+k.BearerTokenString) diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index c9833f028654d..76f75bc63a6a0 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -11,7 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -59,7 +59,7 @@ type Logstash struct { Username string `toml:"username"` Password string `toml:"password"` Headers map[string]string `toml:"headers"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` tls.ClientConfig client *http.Client @@ -72,7 +72,7 @@ func NewLogstash() *Logstash { SinglePipeline: false, Collect: []string{"pipelines", "process", "jvm"}, Headers: make(map[string]string), - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } } @@ -171,7 +171,7 @@ func (logstash *Logstash) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: logstash.Timeout.Duration, + Timeout: time.Duration(logstash.Timeout), } return client, nil diff --git a/plugins/inputs/mcrouter/mcrouter.go b/plugins/inputs/mcrouter/mcrouter.go index b93044f1c1e6c..af197c3072089 100644 --- a/plugins/inputs/mcrouter/mcrouter.go +++ b/plugins/inputs/mcrouter/mcrouter.go @@ -11,14 +11,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) // Mcrouter is a mcrouter plugin type Mcrouter struct { Servers []string - Timeout internal.Duration + Timeout config.Duration } // enum for statType @@ -127,11 +127,11 @@ func (m *Mcrouter) Description() string { func (m *Mcrouter) Gather(acc telegraf.Accumulator) error { ctx := context.Background() - if m.Timeout.Duration < 1*time.Second { - m.Timeout.Duration = defaultTimeout + if m.Timeout < config.Duration(1*time.Second) { + m.Timeout = config.Duration(defaultTimeout) } - ctx, cancel := context.WithTimeout(ctx, m.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(m.Timeout)) defer cancel() if len(m.Servers) == 0 { diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index d7c5b1d92f0c5..f4236c722f4cf 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -11,29 +11,29 @@ import ( mb "github.com/goburrow/modbus" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" ) // Modbus holds all data relevant to the plugin type Modbus struct { - Name string `toml:"name"` - Controller string `toml:"controller"` - TransmissionMode string `toml:"transmission_mode"` - BaudRate int `toml:"baud_rate"` - DataBits int `toml:"data_bits"` - Parity string `toml:"parity"` - StopBits int `toml:"stop_bits"` - SlaveID int `toml:"slave_id"` - Timeout internal.Duration `toml:"timeout"` - Retries int `toml:"busy_retries"` - RetriesWaitTime internal.Duration `toml:"busy_retries_wait"` - DiscreteInputs []fieldContainer `toml:"discrete_inputs"` - Coils []fieldContainer `toml:"coils"` - HoldingRegisters []fieldContainer `toml:"holding_registers"` - InputRegisters []fieldContainer `toml:"input_registers"` - Log telegraf.Logger `toml:"-"` + Name string `toml:"name"` + Controller string `toml:"controller"` + TransmissionMode string `toml:"transmission_mode"` + BaudRate int `toml:"baud_rate"` + DataBits int `toml:"data_bits"` + Parity string `toml:"parity"` + StopBits int `toml:"stop_bits"` + SlaveID int `toml:"slave_id"` + Timeout config.Duration `toml:"timeout"` + Retries int `toml:"busy_retries"` + RetriesWaitTime config.Duration `toml:"busy_retries_wait"` + DiscreteInputs []fieldContainer `toml:"discrete_inputs"` + Coils []fieldContainer `toml:"coils"` + HoldingRegisters []fieldContainer `toml:"holding_registers"` + InputRegisters []fieldContainer `toml:"input_registers"` + Log telegraf.Logger `toml:"-"` registers []register isConnected bool tcpHandler *mb.TCPClientHandler @@ -264,7 +264,7 @@ func connect(m *Modbus) error { return err } m.tcpHandler = mb.NewTCPClientHandler(host + ":" + port) - m.tcpHandler.Timeout = m.Timeout.Duration + m.tcpHandler.Timeout = time.Duration(m.Timeout) m.tcpHandler.SlaveId = byte(m.SlaveID) m.client = mb.NewClient(m.tcpHandler) err := m.tcpHandler.Connect() @@ -276,7 +276,7 @@ func connect(m *Modbus) error { case "file": if m.TransmissionMode == "RTU" { m.rtuHandler = mb.NewRTUClientHandler(u.Path) - m.rtuHandler.Timeout = m.Timeout.Duration + m.rtuHandler.Timeout = time.Duration(m.Timeout) m.rtuHandler.SlaveId = byte(m.SlaveID) m.rtuHandler.BaudRate = m.BaudRate m.rtuHandler.DataBits = m.DataBits @@ -291,7 +291,7 @@ func connect(m *Modbus) error { return nil } else if m.TransmissionMode == "ASCII" { m.asciiHandler = mb.NewASCIIClientHandler(u.Path) - m.asciiHandler.Timeout = m.Timeout.Duration + m.asciiHandler.Timeout = time.Duration(m.Timeout) m.asciiHandler.SlaveId = byte(m.SlaveID) m.asciiHandler.BaudRate = m.BaudRate m.asciiHandler.DataBits = m.DataBits @@ -679,7 +679,7 @@ func (m *Modbus) Gather(acc telegraf.Accumulator) error { mberr, ok := err.(*mb.ModbusError) if ok && mberr.ExceptionCode == mb.ExceptionCodeServerDeviceBusy && retry < m.Retries { m.Log.Infof("Device busy! Retrying %d more time(s)...", m.Retries-retry) - time.Sleep(m.RetriesWaitTime.Duration) + time.Sleep(time.Duration(m.RetriesWaitTime)) continue } // Ignore return error to not shadow the initial error diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go index a25efad58e723..1cb1a4ba57da9 100644 --- a/plugins/inputs/monit/monit.go +++ b/plugins/inputs/monit/monit.go @@ -4,9 +4,10 @@ import ( "encoding/xml" "fmt" "net/http" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "golang.org/x/net/html/charset" @@ -178,7 +179,7 @@ type Monit struct { Password string `toml:"password"` client http.Client tls.ClientConfig - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` } type Messagebody struct { @@ -223,7 +224,7 @@ func (m *Monit) Init() error { TLSClientConfig: tlsCfg, Proxy: http.ProxyFromEnvironment, }, - Timeout: m.Timeout.Duration, + Timeout: time.Duration(m.Timeout), } return nil } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 603b4228db5d1..815f27a727abf 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -10,6 +10,7 @@ import ( mqtt "github.com/eclipse/paho.mqtt.golang" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -18,7 +19,7 @@ import ( var ( // 30 Seconds is the default used by paho.mqtt.golang - defaultConnectionTimeout = internal.Duration{Duration: 30 * time.Second} + defaultConnectionTimeout = config.Duration(30 * time.Second) defaultMaxUndeliveredMessages = 1000 ) @@ -43,14 +44,14 @@ type Client interface { type ClientFactory func(o *mqtt.ClientOptions) Client type MQTTConsumer struct { - Servers []string `toml:"servers"` - Topics []string `toml:"topics"` - TopicTag *string `toml:"topic_tag"` - Username string `toml:"username"` - Password string `toml:"password"` - QoS int `toml:"qos"` - ConnectionTimeout internal.Duration `toml:"connection_timeout"` - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + Servers []string `toml:"servers"` + Topics []string `toml:"topics"` + TopicTag *string `toml:"topic_tag"` + Username string `toml:"username"` + Password string `toml:"password"` + QoS int `toml:"qos"` + ConnectionTimeout config.Duration `toml:"connection_timeout"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` parser parsers.Parser @@ -169,8 +170,8 @@ func (m *MQTTConsumer) Init() error { return fmt.Errorf("qos value must be 0, 1, or 2: %d", m.QoS) } - if m.ConnectionTimeout.Duration < 1*time.Second { - return fmt.Errorf("connection_timeout must be greater than 1s: %s", m.ConnectionTimeout.Duration) + if time.Duration(m.ConnectionTimeout) < 1*time.Second { + return fmt.Errorf("connection_timeout must be greater than 1s: %s", time.Duration(m.ConnectionTimeout)) } m.topicTag = "topic" @@ -320,7 +321,7 @@ func (m *MQTTConsumer) Gather(_ telegraf.Accumulator) error { func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { opts := mqtt.NewClientOptions() - opts.ConnectTimeout = m.ConnectionTimeout.Duration + opts.ConnectTimeout = time.Duration(m.ConnectionTimeout) if m.ClientID == "" { opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5)) diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index 94e1ad74e1d69..53f688bb3bcd9 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -11,14 +11,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" gnatsd "github.com/nats-io/nats-server/v2/server" ) type Nats struct { Server string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration client *http.Client } @@ -93,7 +93,7 @@ func (n *Nats) createHTTPClient() *http.Client { transport := &http.Transport{ Proxy: http.ProxyFromEnvironment, } - timeout := n.ResponseTimeout.Duration + timeout := time.Duration(n.ResponseTimeout) if timeout == time.Duration(0) { timeout = 5 * time.Second } diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index 8161ac7b4880a..dad4c8e5857f6 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -14,7 +14,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -51,7 +51,7 @@ type outlet struct { // NeptuneApex implements telegraf.Input. type NeptuneApex struct { Servers []string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration httpClient *http.Client } diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index cb0e008f3d7c0..a7fcec4353c81 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -26,8 +26,8 @@ const ( // NetResponse struct type NetResponse struct { Address string - Timeout internal.Duration - ReadTimeout internal.Duration + Timeout config.Duration + ReadTimeout config.Duration Send string Expect string Protocol string @@ -80,7 +80,7 @@ func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, er // Start Timer start := time.Now() // Connecting - conn, err := net.DialTimeout("tcp", n.Address, n.Timeout.Duration) + conn, err := net.DialTimeout("tcp", n.Address, time.Duration(n.Timeout)) // Stop timer responseTime := time.Since(start).Seconds() // Handle error @@ -105,7 +105,7 @@ func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, er // Read string if needed if n.Expect != "" { // Set read timeout - if gerr := conn.SetReadDeadline(time.Now().Add(n.ReadTimeout.Duration)); gerr != nil { + if gerr := conn.SetReadDeadline(time.Now().Add(time.Duration(n.ReadTimeout))); gerr != nil { return nil, nil, gerr } // Prepare reader @@ -169,7 +169,7 @@ func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, er } // Read string // Set read timeout - if gerr := conn.SetReadDeadline(time.Now().Add(n.ReadTimeout.Duration)); gerr != nil { + if gerr := conn.SetReadDeadline(time.Now().Add(time.Duration(n.ReadTimeout))); gerr != nil { return nil, nil, gerr } // Read @@ -204,11 +204,11 @@ func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, er // also fill an Accumulator that is supplied. func (n *NetResponse) Gather(acc telegraf.Accumulator) error { // Set default values - if n.Timeout.Duration == 0 { - n.Timeout.Duration = time.Second + if n.Timeout == 0 { + n.Timeout = config.Duration(time.Second) } - if n.ReadTimeout.Duration == 0 { - n.ReadTimeout.Duration = time.Second + if n.ReadTimeout == 0 { + n.ReadTimeout = config.Duration(time.Second) } // Check send and expected string if n.Protocol == "udp" && n.Send == "" { diff --git a/plugins/inputs/net_response/net_response_test.go b/plugins/inputs/net_response/net_response_test.go index 48e3d80dc23ef..34a7992e3ddf1 100644 --- a/plugins/inputs/net_response/net_response_test.go +++ b/plugins/inputs/net_response/net_response_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -86,7 +86,7 @@ func TestTCPError(t *testing.T) { c := NetResponse{ Protocol: "tcp", Address: ":9999", - Timeout: internal.Duration{Duration: time.Second * 30}, + Timeout: config.Duration(time.Second * 30), } // Gather require.NoError(t, c.Gather(&acc)) @@ -113,8 +113,8 @@ func TestTCPOK1(t *testing.T) { Address: "127.0.0.1:2004", Send: "test", Expect: "test", - ReadTimeout: internal.Duration{Duration: time.Second * 3}, - Timeout: internal.Duration{Duration: time.Second}, + ReadTimeout: config.Duration(time.Second * 3), + Timeout: config.Duration(time.Second), Protocol: "tcp", } // Start TCP server @@ -157,8 +157,8 @@ func TestTCPOK2(t *testing.T) { Address: "127.0.0.1:2004", Send: "test", Expect: "test2", - ReadTimeout: internal.Duration{Duration: time.Second * 3}, - Timeout: internal.Duration{Duration: time.Second}, + ReadTimeout: config.Duration(time.Second * 3), + Timeout: config.Duration(time.Second), Protocol: "tcp", } // Start TCP server @@ -237,8 +237,8 @@ func TestUDPOK1(t *testing.T) { Address: "127.0.0.1:2004", Send: "test", Expect: "test", - ReadTimeout: internal.Duration{Duration: time.Second * 3}, - Timeout: internal.Duration{Duration: time.Second}, + ReadTimeout: config.Duration(time.Second * 3), + Timeout: config.Duration(time.Second), Protocol: "udp", } // Start UDP server diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go index a3aa3b7e5356a..5e15022708682 100644 --- a/plugins/inputs/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -12,14 +12,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type Nginx struct { Urls []string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig // HTTP client @@ -86,15 +86,15 @@ func (n *Nginx) createHTTPClient() (*http.Client, error) { return nil, err } - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } client := &http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil diff --git a/plugins/inputs/nginx_plus/nginx_plus.go b/plugins/inputs/nginx_plus/nginx_plus.go index d41c03c801a96..32a8516986f64 100644 --- a/plugins/inputs/nginx_plus/nginx_plus.go +++ b/plugins/inputs/nginx_plus/nginx_plus.go @@ -13,14 +13,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type NginxPlus struct { - Urls []string `toml:"urls"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + Urls []string `toml:"urls"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client @@ -82,8 +82,8 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { } func (n *NginxPlus) createHTTPClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } tlsConfig, err := n.ClientConfig.TLSConfig() @@ -95,7 +95,7 @@ func (n *NginxPlus) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api.go b/plugins/inputs/nginx_plus_api/nginx_plus_api.go index b2ab91762ae58..09fe3fca3cb01 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api.go @@ -8,15 +8,15 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type NginxPlusAPI struct { - Urls []string `toml:"urls"` - APIVersion int64 `toml:"api_version"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + Urls []string `toml:"urls"` + APIVersion int64 `toml:"api_version"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client @@ -106,8 +106,8 @@ func (n *NginxPlusAPI) Gather(acc telegraf.Accumulator) error { } func (n *NginxPlusAPI) createHTTPClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } tlsConfig, err := n.ClientConfig.TLSConfig() @@ -119,7 +119,7 @@ func (n *NginxPlusAPI) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil diff --git a/plugins/inputs/nginx_sts/nginx_sts.go b/plugins/inputs/nginx_sts/nginx_sts.go index 046460069c65d..d3e9118577f6f 100644 --- a/plugins/inputs/nginx_sts/nginx_sts.go +++ b/plugins/inputs/nginx_sts/nginx_sts.go @@ -12,14 +12,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type NginxSTS struct { - Urls []string `toml:"urls"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + Urls []string `toml:"urls"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client @@ -81,8 +81,8 @@ func (n *NginxSTS) Gather(acc telegraf.Accumulator) error { } func (n *NginxSTS) createHTTPClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } tlsConfig, err := n.ClientConfig.TLSConfig() @@ -94,7 +94,7 @@ func (n *NginxSTS) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index da2f7b08f989c..fb40643409056 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -11,7 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -55,7 +55,7 @@ type NginxUpstreamCheck struct { Method string `toml:"method"` Headers map[string]string `toml:"headers"` HostHeader string `toml:"host_header"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` tls.ClientConfig client *http.Client @@ -67,7 +67,7 @@ func NewNginxUpstreamCheck() *NginxUpstreamCheck { Method: "GET", Headers: make(map[string]string), HostHeader: "", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } } @@ -115,7 +115,7 @@ func (check *NginxUpstreamCheck) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: check.Timeout.Duration, + Timeout: time.Duration(check.Timeout), } return client, nil diff --git a/plugins/inputs/nginx_vts/nginx_vts.go b/plugins/inputs/nginx_vts/nginx_vts.go index 57453c0b4e3b0..bca7c62db9b83 100644 --- a/plugins/inputs/nginx_vts/nginx_vts.go +++ b/plugins/inputs/nginx_vts/nginx_vts.go @@ -12,14 +12,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type NginxVTS struct { - Urls []string `toml:"urls"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + Urls []string `toml:"urls"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client @@ -81,8 +81,8 @@ func (n *NginxVTS) Gather(acc telegraf.Accumulator) error { } func (n *NginxVTS) createHTTPClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } tlsConfig, err := n.ClientConfig.TLSConfig() @@ -94,7 +94,7 @@ func (n *NginxVTS) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil diff --git a/plugins/inputs/nsd/nsd.go b/plugins/inputs/nsd/nsd.go index ef6c20a9aad4c..f75f700eaa2f9 100644 --- a/plugins/inputs/nsd/nsd.go +++ b/plugins/inputs/nsd/nsd.go @@ -11,16 +11,17 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) +type runner func(cmdName string, timeout config.Duration, useSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) // NSD is used to store configuration values type NSD struct { Binary string - Timeout internal.Duration + Timeout config.Duration UseSudo bool Server string ConfigFile string @@ -29,7 +30,7 @@ type NSD struct { } var defaultBinary = "/usr/sbin/nsd-control" -var defaultTimeout = internal.Duration{Duration: time.Second} +var defaultTimeout = config.Duration(time.Second) var sampleConfig = ` ## Address of server to connect to, optionally ':port'. Defaults to the @@ -60,7 +61,7 @@ func (s *NSD) SampleConfig() string { } // Shell out to nsd_stat and return the output -func nsdRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) { +func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) { cmdArgs := []string{"stats_noreset"} if Server != "" { @@ -78,14 +79,14 @@ func nsdRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server s cmd := exec.Command(cmdName, cmdArgs...) - if UseSudo { + if useSudo { cmdArgs = append([]string{cmdName}, cmdArgs...) cmd = exec.Command("sudo", cmdArgs...) } var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running nsd-control: %s (%s %v)", err, cmdName, cmdArgs) } diff --git a/plugins/inputs/nsd/nsd_test.go b/plugins/inputs/nsd/nsd_test.go index 67ea6863c5208..d64cad7dcea63 100644 --- a/plugins/inputs/nsd/nsd_test.go +++ b/plugins/inputs/nsd/nsd_test.go @@ -3,15 +3,18 @@ package nsd import ( "bytes" "testing" + "time" "github.com/stretchr/testify/assert" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) -func NSDControl(output string) func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) { - return func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) { +var TestTimeout = config.Duration(time.Second) + +func NSDControl(output string) func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { + return func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index f1ebfa38babb9..3e4fb03f04221 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -10,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -19,7 +20,7 @@ const measurement = "nvidia_smi" // NvidiaSMI holds the methods for this plugin type NvidiaSMI struct { BinPath string - Timeout internal.Duration + Timeout config.Duration } // Description returns the description of the NvidiaSMI plugin @@ -61,14 +62,14 @@ func init() { inputs.Add("nvidia_smi", func() telegraf.Input { return &NvidiaSMI{ BinPath: "/usr/bin/nvidia-smi", - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), } }) } func (smi *NvidiaSMI) pollSMI() ([]byte, error) { // Construct and execute metrics query - ret, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, "-q", "-x"), smi.Timeout.Duration) + ret, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, "-q", "-x"), time.Duration(smi.Timeout)) if err != nil { return nil, err } diff --git a/plugins/inputs/openntpd/openntpd.go b/plugins/inputs/openntpd/openntpd.go index b4a35fb55b8d7..2689c9cc7c845 100644 --- a/plugins/inputs/openntpd/openntpd.go +++ b/plugins/inputs/openntpd/openntpd.go @@ -10,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -34,19 +35,19 @@ var intI = map[string]int{ "poll": 4, } -type runner func(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) +type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) // Openntpd is used to store configuration values type Openntpd struct { Binary string - Timeout internal.Duration + Timeout config.Duration UseSudo bool run runner } var defaultBinary = "/usr/sbin/ntpctl" -var defaultTimeout = internal.Duration{Duration: 5 * time.Second} +var defaultTimeout = config.Duration(5 * time.Second) func (n *Openntpd) Description() string { return "Get standard NTP query metrics from OpenNTPD." @@ -66,19 +67,19 @@ func (n *Openntpd) SampleConfig() string { } // Shell out to ntpctl and return the output -func openntpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { +func openntpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { cmdArgs := []string{"-s", "peers"} cmd := exec.Command(cmdName, cmdArgs...) - if UseSudo { + if useSudo { cmdArgs = append([]string{cmdName}, cmdArgs...) cmd = exec.Command("sudo", cmdArgs...) } var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running ntpctl: %s", err) } diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go index f9823e355f69a..f26419a71101e 100644 --- a/plugins/inputs/openntpd/openntpd_test.go +++ b/plugins/inputs/openntpd/openntpd_test.go @@ -3,15 +3,18 @@ package openntpd import ( "bytes" "testing" + "time" "github.com/stretchr/testify/assert" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) -func OpenntpdCTL(output string) func(string, internal.Duration, bool) (*bytes.Buffer, error) { - return func(string, internal.Duration, bool) (*bytes.Buffer, error) { +var TestTimeout = config.Duration(time.Second) + +func OpenntpdCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { + return func(string, config.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go index 47850db09f012..9ce6ec5421ff1 100644 --- a/plugins/inputs/opensmtpd/opensmtpd.go +++ b/plugins/inputs/opensmtpd/opensmtpd.go @@ -10,24 +10,25 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) +type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) // Opensmtpd is used to store configuration values type Opensmtpd struct { Binary string - Timeout internal.Duration + Timeout config.Duration UseSudo bool run runner } var defaultBinary = "/usr/sbin/smtpctl" -var defaultTimeout = internal.Duration{Duration: time.Second} +var defaultTimeout = config.Duration(time.Second) var sampleConfig = ` ## If running as a restricted user you can prepend sudo for additional access: @@ -50,19 +51,19 @@ func (s *Opensmtpd) SampleConfig() string { } // Shell out to opensmtpd_stat and return the output -func opensmtpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { +func opensmtpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { cmdArgs := []string{"show", "stats"} cmd := exec.Command(cmdName, cmdArgs...) - if UseSudo { + if useSudo { cmdArgs = append([]string{cmdName}, cmdArgs...) cmd = exec.Command("sudo", cmdArgs...) } var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running smtpctl: %s", err) } diff --git a/plugins/inputs/opensmtpd/opensmtpd_test.go b/plugins/inputs/opensmtpd/opensmtpd_test.go index 4ae3eb9868d40..fb3afa82e0171 100644 --- a/plugins/inputs/opensmtpd/opensmtpd_test.go +++ b/plugins/inputs/opensmtpd/opensmtpd_test.go @@ -2,14 +2,15 @@ package opensmtpd import ( "bytes" - "github.com/influxdata/telegraf/internal" + "testing" + + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" - "testing" ) -func SMTPCTL(output string) func(string, internal.Duration, bool) (*bytes.Buffer, error) { - return func(string, internal.Duration, bool) (*bytes.Buffer, error) { +func SMTPCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { + return func(string, config.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index 426d412d09114..fcc22343b435e 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -13,7 +13,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -30,13 +30,13 @@ const ( ) type OpenWeatherMap struct { - AppID string `toml:"app_id"` - CityID []string `toml:"city_id"` - Lang string `toml:"lang"` - Fetch []string `toml:"fetch"` - BaseURL string `toml:"base_url"` - ResponseTimeout internal.Duration `toml:"response_timeout"` - Units string `toml:"units"` + AppID string `toml:"app_id"` + CityID []string `toml:"city_id"` + Lang string `toml:"lang"` + Fetch []string `toml:"fetch"` + BaseURL string `toml:"base_url"` + ResponseTimeout config.Duration `toml:"response_timeout"` + Units string `toml:"units"` client *http.Client baseURL *url.URL @@ -132,13 +132,13 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { } func (n *OpenWeatherMap) createHTTPClient() *http.Client { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = defaultResponseTimeout + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(defaultResponseTimeout) } client := &http.Client{ Transport: &http.Transport{}, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client @@ -299,9 +299,7 @@ func gatherForecast(acc telegraf.Accumulator, status *Status) { func init() { inputs.Add("openweathermap", func() telegraf.Input { - tmout := internal.Duration{ - Duration: defaultResponseTimeout, - } + tmout := config.Duration(defaultResponseTimeout) return &OpenWeatherMap{ ResponseTimeout: tmout, BaseURL: defaultBaseURL, diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index 24a7f1e8fb7d2..2547a617e14d7 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -5,7 +5,7 @@ import ( "strconv" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/postgresql" _ "github.com/jackc/pgx/stdlib" // register driver @@ -193,11 +193,9 @@ func init() { inputs.Add("pgbouncer", func() telegraf.Input { return &PgBouncer{ Service: postgresql.Service{ - MaxIdle: 1, - MaxOpen: 1, - MaxLifetime: internal.Duration{ - Duration: 0, - }, + MaxIdle: 1, + MaxOpen: 1, + MaxLifetime: config.Duration(0), IsPgBouncer: true, }, } diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index cc326c3b55577..77c4bf0aeee56 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -10,9 +10,10 @@ import ( "strconv" "strings" "sync" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -39,7 +40,7 @@ type poolStat map[string]metric type phpfpm struct { Urls []string - Timeout internal.Duration + Timeout config.Duration tls.ClientConfig client *http.Client @@ -96,7 +97,7 @@ func (p *phpfpm) Init() error { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: p.Timeout.Duration, + Timeout: time.Duration(p.Timeout), } return nil } diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 3ce8963e90c3e..1f1c22dc00e21 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -9,7 +9,7 @@ import ( _ "github.com/jackc/pgx/stdlib" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -195,11 +195,9 @@ func init() { inputs.Add("postgresql", func() telegraf.Input { return &Postgresql{ Service: Service{ - MaxIdle: 1, - MaxOpen: 1, - MaxLifetime: internal.Duration{ - Duration: 0, - }, + MaxIdle: 1, + MaxOpen: 1, + MaxLifetime: config.Duration(0), IsPgBouncer: false, }, } diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go index d4be13ee7bca2..db4438e416939 100644 --- a/plugins/inputs/postgresql/service.go +++ b/plugins/inputs/postgresql/service.go @@ -8,13 +8,14 @@ import ( "regexp" "sort" "strings" + "time" "github.com/jackc/pgx" "github.com/jackc/pgx/pgtype" "github.com/jackc/pgx/stdlib" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" ) // pulled from lib/pq @@ -92,7 +93,7 @@ type Service struct { Outputaddress string MaxIdle int MaxOpen int - MaxLifetime internal.Duration + MaxLifetime config.Duration DB *sql.DB IsPgBouncer bool } @@ -145,7 +146,7 @@ func (p *Service) Start(telegraf.Accumulator) (err error) { p.DB.SetMaxOpenConns(p.MaxOpen) p.DB.SetMaxIdleConns(p.MaxIdle) - p.DB.SetConnMaxLifetime(p.MaxLifetime.Duration) + p.DB.SetConnMaxLifetime(time.Duration(p.MaxLifetime)) return nil } diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index ceb6c0be5fe9c..ef66c26cf9e7e 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -11,7 +11,7 @@ import ( _ "github.com/jackc/pgx/stdlib" //to register stdlib from PostgreSQL Driver and Toolkit "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/postgresql" ) @@ -342,11 +342,9 @@ func init() { inputs.Add("postgresql_extensible", func() telegraf.Input { return &Postgresql{ Service: postgresql.Service{ - MaxIdle: 1, - MaxOpen: 1, - MaxLifetime: internal.Duration{ - Duration: 0, - }, + MaxIdle: 1, + MaxOpen: 1, + MaxLifetime: config.Duration(0), IsPgBouncer: false, }, } diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 319f96a69b70c..cc894fc7a7f8d 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -14,7 +14,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" parser_v2 "github.com/influxdata/telegraf/plugins/parsers/prometheus" @@ -48,7 +48,7 @@ type Prometheus struct { Username string `toml:"username"` Password string `toml:"password"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + ResponseTimeout config.Duration `toml:"response_timeout"` MetricVersion int `toml:"metric_version"` @@ -308,7 +308,7 @@ func (p *Prometheus) createHTTPClient() (*http.Client, error) { TLSClientConfig: tlsCfg, DisableKeepAlives: true, }, - Timeout: p.ResponseTimeout.Duration, + Timeout: time.Duration(p.ResponseTimeout), } return client, nil @@ -341,7 +341,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return c, err }, }, - Timeout: p.ResponseTimeout.Duration, + Timeout: time.Duration(p.ResponseTimeout), } } else { if u.URL.Path == "" { @@ -474,7 +474,7 @@ func (p *Prometheus) Stop() { func init() { inputs.Add("prometheus", func() telegraf.Input { return &Prometheus{ - ResponseTimeout: internal.Duration{Duration: time.Second * 3}, + ResponseTimeout: config.Duration(time.Second * 3), kubernetesPods: map[string]URLAndAddress{}, URLTag: "url", } diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index a66aa5286fac8..ec34a7b2f5a36 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -8,6 +8,7 @@ import ( "net/url" "os" "strings" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -66,7 +67,7 @@ func (px *Proxmox) Init() error { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: px.ResponseTimeout.Duration, + Timeout: time.Duration(px.ResponseTimeout), } return nil diff --git a/plugins/inputs/proxmox/structs.go b/plugins/inputs/proxmox/structs.go index ef207693e4b5f..c064150c061f6 100644 --- a/plugins/inputs/proxmox/structs.go +++ b/plugins/inputs/proxmox/structs.go @@ -6,15 +6,15 @@ import ( "net/url" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" ) type Proxmox struct { - BaseURL string `toml:"base_url"` - APIToken string `toml:"api_token"` - ResponseTimeout internal.Duration `toml:"response_timeout"` - NodeName string `toml:"node_name"` + BaseURL string `toml:"base_url"` + APIToken string `toml:"api_token"` + ResponseTimeout config.Duration `toml:"response_timeout"` + NodeName string `toml:"node_name"` tls.ClientConfig diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index fa92fc744f97f..fd39bd090dbc5 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -9,8 +9,8 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -40,8 +40,8 @@ type RabbitMQ struct { Password string `toml:"password"` tls.ClientConfig - ResponseHeaderTimeout internal.Duration `toml:"header_timeout"` - ClientTimeout internal.Duration `toml:"client_timeout"` + ResponseHeaderTimeout config.Duration `toml:"header_timeout"` + ClientTimeout config.Duration `toml:"client_timeout"` Nodes []string `toml:"nodes"` Queues []string `toml:"queues"` @@ -331,12 +331,12 @@ func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error { return err } tr := &http.Transport{ - ResponseHeaderTimeout: r.ResponseHeaderTimeout.Duration, + ResponseHeaderTimeout: time.Duration(r.ResponseHeaderTimeout), TLSClientConfig: tlsCfg, } r.Client = &http.Client{ Transport: tr, - Timeout: r.ClientTimeout.Duration, + Timeout: time.Duration(r.ClientTimeout), } } @@ -762,8 +762,8 @@ func (r *RabbitMQ) shouldGatherFederationLink(link FederationLink) bool { func init() { inputs.Add("rabbitmq", func() telegraf.Input { return &RabbitMQ{ - ResponseHeaderTimeout: internal.Duration{Duration: DefaultResponseHeaderTimeout * time.Second}, - ClientTimeout: internal.Duration{Duration: DefaultClientTimeout * time.Second}, + ResponseHeaderTimeout: config.Duration(DefaultResponseHeaderTimeout * time.Second), + ClientTimeout: config.Duration(DefaultClientTimeout * time.Second), } }) } diff --git a/plugins/inputs/ravendb/ravendb.go b/plugins/inputs/ravendb/ravendb.go index f246bd8e97689..efc1b9517cc24 100644 --- a/plugins/inputs/ravendb/ravendb.go +++ b/plugins/inputs/ravendb/ravendb.go @@ -10,7 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -28,7 +28,7 @@ type RavenDB struct { URL string `toml:"url"` Name string `toml:"name"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` StatsInclude []string `toml:"stats_include"` DbStatsDbs []string `toml:"db_stats_dbs"` @@ -133,12 +133,12 @@ func (r *RavenDB) ensureClient() error { return err } tr := &http.Transport{ - ResponseHeaderTimeout: r.Timeout.Duration, + ResponseHeaderTimeout: time.Duration(r.Timeout), TLSClientConfig: tlsCfg, } r.client = &http.Client{ Transport: tr, - Timeout: r.Timeout.Duration, + Timeout: time.Duration(r.Timeout), } return nil @@ -418,7 +418,7 @@ func (r *RavenDB) Init() error { func init() { inputs.Add("ravendb", func() telegraf.Input { return &RavenDB{ - Timeout: internal.Duration{Duration: defaultTimeout * time.Second}, + Timeout: config.Duration(defaultTimeout * time.Second), StatsInclude: []string{"server", "databases", "indexes", "collections"}, } }) diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index 5c075017a8430..b20094176ceb8 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -19,7 +19,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" riemanngo "github.com/riemann/riemann-go-client" @@ -27,12 +27,12 @@ import ( ) type RiemannSocketListener struct { - ServiceAddress string `toml:"service_address"` - MaxConnections int `toml:"max_connections"` - ReadBufferSize internal.Size `toml:"read_buffer_size"` - ReadTimeout *internal.Duration `toml:"read_timeout"` - KeepAlivePeriod *internal.Duration `toml:"keep_alive_period"` - SocketMode string `toml:"socket_mode"` + ServiceAddress string `toml:"service_address"` + MaxConnections int `toml:"max_connections"` + ReadBufferSize config.Size `toml:"read_buffer_size"` + ReadTimeout *config.Duration `toml:"read_timeout"` + KeepAlivePeriod *config.Duration `toml:"keep_alive_period"` + SocketMode string `toml:"socket_mode"` tlsint.ServerConfig wg sync.WaitGroup @@ -75,9 +75,9 @@ func (rsl *riemannListener) listen(ctx context.Context) { break } - if rsl.ReadBufferSize.Size > 0 { + if rsl.ReadBufferSize > 0 { if srb, ok := c.(setReadBufferer); ok { - if err := srb.SetReadBuffer(int(rsl.ReadBufferSize.Size)); err != nil { + if err := srb.SetReadBuffer(int(rsl.ReadBufferSize)); err != nil { rsl.Log.Warnf("Setting read buffer failed: %v", err) } } else { @@ -129,13 +129,13 @@ func (rsl *riemannListener) setKeepAlive(c net.Conn) error { if !ok { return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(rsl.ServiceAddress, "://", 2)[0]) } - if rsl.KeepAlivePeriod.Duration == 0 { + if *rsl.KeepAlivePeriod == 0 { return tcpc.SetKeepAlive(false) } if err := tcpc.SetKeepAlive(true); err != nil { return err } - return tcpc.SetKeepAlivePeriod(rsl.KeepAlivePeriod.Duration) + return tcpc.SetKeepAlivePeriod(time.Duration(*rsl.KeepAlivePeriod)) } func (rsl *riemannListener) removeConnection(c net.Conn) { @@ -175,8 +175,8 @@ func (rsl *riemannListener) read(conn net.Conn) { var err error for { - if rsl.ReadTimeout != nil && rsl.ReadTimeout.Duration > 0 { - if err := conn.SetDeadline(time.Now().Add(rsl.ReadTimeout.Duration)); err != nil { + if rsl.ReadTimeout != nil && *rsl.ReadTimeout > 0 { + if err := conn.SetDeadline(time.Now().Add(time.Duration(*rsl.ReadTimeout))); err != nil { rsl.Log.Warnf("Setting deadline failed: %v", err) } } diff --git a/plugins/inputs/riemann_listener/riemann_listener_test.go b/plugins/inputs/riemann_listener/riemann_listener_test.go index 92af46e8afd03..92dc829ac1312 100644 --- a/plugins/inputs/riemann_listener/riemann_listener_test.go +++ b/plugins/inputs/riemann_listener/riemann_listener_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" riemanngo "github.com/riemann/riemann-go-client" "github.com/stretchr/testify/require" @@ -18,7 +18,7 @@ func TestSocketListener_tcp(t *testing.T) { sl := newRiemannSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "tcp://127.0.0.1:5555" - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) acc := &testutil.Accumulator{} err := sl.Start(acc) diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index 01fd2a45af6ee..d3a8ba762f379 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -12,6 +12,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -19,12 +20,12 @@ import ( var ( execCommand = exec.Command // execCommand is used to mock commands in tests. numberRegp = regexp.MustCompile("[0-9]+") - defaultTimeout = internal.Duration{Duration: 5 * time.Second} + defaultTimeout = config.Duration(5 * time.Second) ) type Sensors struct { - RemoveNumbers bool `toml:"remove_numbers"` - Timeout internal.Duration `toml:"timeout"` + RemoveNumbers bool `toml:"remove_numbers"` + Timeout config.Duration `toml:"timeout"` path string } @@ -59,7 +60,7 @@ func (s *Sensors) parse(acc telegraf.Accumulator) error { fields := map[string]interface{}{} chip := "" cmd := execCommand(s.path, "-A", "-u") - out, err := internal.StdOutputTimeout(cmd, s.Timeout.Duration) + out, err := internal.StdOutputTimeout(cmd, time.Duration(s.Timeout)) if err != nil { return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } diff --git a/plugins/inputs/sflow/sflow.go b/plugins/inputs/sflow/sflow.go index 45578d5396cc3..3b18409c13a77 100644 --- a/plugins/inputs/sflow/sflow.go +++ b/plugins/inputs/sflow/sflow.go @@ -10,7 +10,7 @@ import ( "sync" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -31,8 +31,8 @@ const ( ) type SFlow struct { - ServiceAddress string `toml:"service_address"` - ReadBufferSize internal.Size `toml:"read_buffer_size"` + ServiceAddress string `toml:"service_address"` + ReadBufferSize config.Size `toml:"read_buffer_size"` Log telegraf.Logger `toml:"-"` @@ -83,8 +83,8 @@ func (s *SFlow) Start(acc telegraf.Accumulator) error { s.closer = conn s.addr = conn.LocalAddr() - if s.ReadBufferSize.Size > 0 { - if err := conn.SetReadBuffer(int(s.ReadBufferSize.Size)); err != nil { + if s.ReadBufferSize > 0 { + if err := conn.SetReadBuffer(int(s.ReadBufferSize)); err != nil { return err } } diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index 121edb0acf71b..4533ea768432d 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -14,6 +14,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -268,17 +269,17 @@ var ( // Smart plugin reads metrics from storage devices supporting S.M.A.R.T. type Smart struct { - Path string `toml:"path"` //deprecated - to keep backward compatibility - PathSmartctl string `toml:"path_smartctl"` - PathNVMe string `toml:"path_nvme"` - Nocheck string `toml:"nocheck"` - EnableExtensions []string `toml:"enable_extensions"` - Attributes bool `toml:"attributes"` - Excludes []string `toml:"excludes"` - Devices []string `toml:"devices"` - UseSudo bool `toml:"use_sudo"` - Timeout internal.Duration `toml:"timeout"` - Log telegraf.Logger `toml:"-"` + Path string `toml:"path"` //deprecated - to keep backward compatibility + PathSmartctl string `toml:"path_smartctl"` + PathNVMe string `toml:"path_nvme"` + Nocheck string `toml:"nocheck"` + EnableExtensions []string `toml:"enable_extensions"` + Attributes bool `toml:"attributes"` + Excludes []string `toml:"excludes"` + Devices []string `toml:"devices"` + UseSudo bool `toml:"use_sudo"` + Timeout config.Duration `toml:"timeout"` + Log telegraf.Logger `toml:"-"` } type nvmeDevice struct { @@ -332,7 +333,7 @@ var sampleConfig = ` func newSmart() *Smart { return &Smart{ - Timeout: internal.Duration{Duration: time.Second * 30}, + Timeout: config.Duration(time.Second * 30), } } @@ -477,12 +478,12 @@ func (m *Smart) scanDevices(ignoreExcludes bool, scanArgs ...string) ([]string, } // Wrap with sudo -var runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { +var runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { cmd := exec.Command(command, args...) if sudo { cmd = exec.Command("sudo", append([]string{"-n", command}, args...)...) } - return internal.CombinedOutputTimeout(cmd, timeout.Duration) + return internal.CombinedOutputTimeout(cmd, time.Duration(timeout)) } func excludedDev(excludes []string, deviceLine string) bool { @@ -529,7 +530,7 @@ func (m *Smart) getVendorNVMeAttributes(acc telegraf.Accumulator, devices []stri wg.Wait() } -func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme string, timeout internal.Duration, useSudo bool) []nvmeDevice { +func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme string, timeout config.Duration, useSudo bool) []nvmeDevice { var NVMeDevices []nvmeDevice for _, device := range devices { @@ -549,7 +550,7 @@ func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme return NVMeDevices } -func gatherNVMeDeviceInfo(nvme, device string, timeout internal.Duration, useSudo bool) (string, string, string, error) { +func gatherNVMeDeviceInfo(nvme, device string, timeout config.Duration, useSudo bool) (string, string, string, error) { args := []string{"id-ctrl"} args = append(args, strings.Split(device, " ")...) out, err := runCmd(timeout, useSudo, nvme, args...) @@ -589,7 +590,7 @@ func findNVMeDeviceInfo(output string) (string, string, string, error) { return vid, sn, mn, nil } -func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo bool, nvme string, device nvmeDevice, wg *sync.WaitGroup) { +func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout config.Duration, usesudo bool, nvme string, device nvmeDevice, wg *sync.WaitGroup) { defer wg.Done() args := []string{"intel", "smart-log-add"} @@ -636,7 +637,7 @@ func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout internal.Duration, us } } -func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, collectAttributes bool, smartctl, nocheck, device string, wg *sync.WaitGroup) { +func gatherDisk(acc telegraf.Accumulator, timeout config.Duration, usesudo, collectAttributes bool, smartctl, nocheck, device string, wg *sync.WaitGroup) { defer wg.Done() // smartctl 5.41 & 5.42 have are broken regarding handling of --nocheck/-n args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nocheck, "--format=brief"} diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index e82307d391565..5a1799381cebe 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,9 +17,9 @@ func TestGatherAttributes(t *testing.T) { s := newSmart() s.Attributes = true - assert.Equal(t, time.Second*30, s.Timeout.Duration) + assert.Equal(t, time.Second*30, time.Duration(s.Timeout)) - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { if len(args) > 0 { if args[0] == "--info" && args[7] == "/dev/ada0" { return []byte(mockInfoAttributeData), nil @@ -81,9 +81,9 @@ func TestGatherNoAttributes(t *testing.T) { s := newSmart() s.Attributes = false - assert.Equal(t, time.Second*30, s.Timeout.Duration) + assert.Equal(t, time.Second*30, time.Duration(s.Timeout)) - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { if len(args) > 0 { if args[0] == "--scan" && len(args) == 1 { return []byte(mockScanData), nil @@ -124,7 +124,7 @@ func TestExcludedDev(t *testing.T) { } func TestGatherSATAInfo(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(hgstSATAInfoData), nil } @@ -134,13 +134,13 @@ func TestGatherSATAInfo(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) assert.Equal(t, 101, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(20), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherSATAInfo65(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(hgstSATAInfoData65), nil } @@ -150,13 +150,13 @@ func TestGatherSATAInfo65(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) assert.Equal(t, 91, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(18), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherHgstSAS(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(hgstSASInfoData), nil } @@ -166,13 +166,13 @@ func TestGatherHgstSAS(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) assert.Equal(t, 6, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(4), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherHtSAS(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(htSASInfoData), nil } @@ -182,13 +182,13 @@ func TestGatherHtSAS(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) testutil.RequireMetricsEqual(t, testHtsasAtributtes, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) } func TestGatherSSD(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(ssdInfoData), nil } @@ -198,13 +198,13 @@ func TestGatherSSD(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) assert.Equal(t, 105, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(26), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherSSDRaid(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(ssdRaidInfoData), nil } @@ -214,13 +214,13 @@ func TestGatherSSDRaid(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) assert.Equal(t, 74, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(15), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherNvme(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(smartctlNvmeInfoData), nil } @@ -230,14 +230,14 @@ func TestGatherNvme(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "nvme0", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "nvme0", wg) testutil.RequireMetricsEqual(t, testSmartctlNvmeAttributes, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) } func TestGatherIntelNvme(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(nvmeIntelInfoData), nil } @@ -252,7 +252,7 @@ func TestGatherIntelNvme(t *testing.T) { ) wg.Add(1) - gatherIntelNVMeDisk(acc, internal.Duration{Duration: time.Second * 30}, true, "", device, wg) + gatherIntelNVMeDisk(acc, config.Duration(time.Second*30), true, "", device, wg) result := acc.GetTelegrafMetrics() testutil.RequireMetricsEqual(t, testIntelInvmeAttributes, result, @@ -292,7 +292,7 @@ func Test_difference(t *testing.T) { } func Test_integerOverflow(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(smartctlNvmeInfoDataWithOverflow), nil } @@ -303,7 +303,7 @@ func Test_integerOverflow(t *testing.T) { t.Run("If data raw_value is out of int64 range, there should be no metrics for that attribute", func(t *testing.T) { wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "nvme0", wg) + gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "nvme0", wg) result := acc.GetTelegrafMetrics() testutil.RequireMetricsEqual(t, testOverflowAttributes, result, diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 8f88211d56924..325121be4d1a8 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -16,7 +16,7 @@ import ( "github.com/gosnmp/gosnmp" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/wlog" @@ -26,7 +26,7 @@ const description = `Retrieves SNMP values from remote agents` const sampleConfig = ` ## Agent addresses to retrieve values from. ## format: agents = [":"] - ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. + ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. ## default is udp ## port: optional ## example: agents = ["udp://127.0.0.1:161"] @@ -314,7 +314,7 @@ func init() { ClientConfig: snmp.ClientConfig{ Retries: 3, MaxRepetitions: 10, - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), Version: 2, Community: "public", }, diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index b589a60f72969..ef849f07b138c 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -10,9 +10,8 @@ import ( "time" "github.com/gosnmp/gosnmp" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" - config "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" @@ -93,8 +92,8 @@ func TestSampleConfig(t *testing.T) { expected := &Snmp{ Agents: []string{"udp://127.0.0.1:161"}, AgentHostTag: "", - ClientConfig: config.ClientConfig{ - Timeout: internal.Duration{Duration: 5 * time.Second}, + ClientConfig: snmp.ClientConfig{ + Timeout: config.Duration(5 * time.Second), Version: 2, Community: "public", MaxRepetitions: 10, @@ -239,8 +238,8 @@ func TestSnmpInit_noTranslate(t *testing.T) { func TestGetSNMPConnection_v2(t *testing.T) { s := &Snmp{ Agents: []string{"1.2.3.4:567", "1.2.3.4", "udp://127.0.0.1"}, - ClientConfig: config.ClientConfig{ - Timeout: internal.Duration{Duration: 3 * time.Second}, + ClientConfig: snmp.ClientConfig{ + Timeout: config.Duration(3 * time.Second), Retries: 4, Version: 2, Community: "foo", @@ -308,7 +307,7 @@ func stubTCPServer(wg *sync.WaitGroup) { func TestGetSNMPConnection_v3(t *testing.T) { s := &Snmp{ Agents: []string{"1.2.3.4"}, - ClientConfig: config.ClientConfig{ + ClientConfig: snmp.ClientConfig{ Version: 3, MaxRepetitions: 20, ContextName: "mycontext", @@ -356,7 +355,7 @@ func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { Algorithm: gosnmp.AES192, Config: &Snmp{ Agents: []string{"1.2.3.4"}, - ClientConfig: config.ClientConfig{ + ClientConfig: snmp.ClientConfig{ Version: 3, MaxRepetitions: 20, ContextName: "mycontext", @@ -377,7 +376,7 @@ func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { Algorithm: gosnmp.AES192C, Config: &Snmp{ Agents: []string{"1.2.3.4"}, - ClientConfig: config.ClientConfig{ + ClientConfig: snmp.ClientConfig{ Version: 3, MaxRepetitions: 20, ContextName: "mycontext", @@ -398,7 +397,7 @@ func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { Algorithm: gosnmp.AES256, Config: &Snmp{ Agents: []string{"1.2.3.4"}, - ClientConfig: config.ClientConfig{ + ClientConfig: snmp.ClientConfig{ Version: 3, MaxRepetitions: 20, ContextName: "mycontext", @@ -419,7 +418,7 @@ func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { Algorithm: gosnmp.AES256C, Config: &Snmp{ Agents: []string{"1.2.3.4"}, - ClientConfig: config.ClientConfig{ + ClientConfig: snmp.ClientConfig{ Version: 3, MaxRepetitions: 20, ContextName: "mycontext", diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index b02483af768c1..acc97790855e4 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -12,16 +12,17 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/soniah/gosnmp" ) -var defaultTimeout = internal.Duration{Duration: time.Second * 5} +var defaultTimeout = config.Duration(time.Second * 5) type handler func(*gosnmp.SnmpPacket, *net.UDPAddr) -type execer func(internal.Duration, string, ...string) ([]byte, error) +type execer func(config.Duration, string, ...string) ([]byte, error) type mibEntry struct { mibName string @@ -29,9 +30,9 @@ type mibEntry struct { } type SnmpTrap struct { - ServiceAddress string `toml:"service_address"` - Timeout internal.Duration `toml:"timeout"` - Version string `toml:"version"` + ServiceAddress string `toml:"service_address"` + Timeout config.Duration `toml:"timeout"` + Version string `toml:"version"` // Settings for version 3 // Values: "noAuthNoPriv", "authNoPriv", "authPriv" @@ -111,11 +112,11 @@ func init() { }) } -func realExecCmd(timeout internal.Duration, arg0 string, args ...string) ([]byte, error) { +func realExecCmd(timeout config.Duration, arg0 string, args ...string) ([]byte, error) { cmd := exec.Command(arg0, args...) var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return nil, err } diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index 87938e9837790..062c2cf1fe153 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -11,7 +11,7 @@ import ( "github.com/soniah/gosnmp" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -36,7 +36,7 @@ func TestLoad(t *testing.T) { require.Equal(t, "coldStart", e.oidText) } -func fakeExecCmd(_ internal.Duration, x string, y ...string) ([]byte, error) { +func fakeExecCmd(_ config.Duration, x string, y ...string) ([]byte, error) { return nil, fmt.Errorf("mock " + x + " " + strings.Join(y, " ")) } diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index 4ffa01a3440a8..362316ee5c468 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -13,6 +13,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -47,9 +48,9 @@ func (ssl *streamSocketListener) listen() { break } - if ssl.ReadBufferSize.Size > 0 { + if ssl.ReadBufferSize > 0 { if srb, ok := c.(setReadBufferer); ok { - if err := srb.SetReadBuffer(int(ssl.ReadBufferSize.Size)); err != nil { + if err := srb.SetReadBuffer(int(ssl.ReadBufferSize)); err != nil { ssl.Log.Error(err.Error()) break } @@ -99,13 +100,13 @@ func (ssl *streamSocketListener) setKeepAlive(c net.Conn) error { if !ok { return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(ssl.ServiceAddress, "://", 2)[0]) } - if ssl.KeepAlivePeriod.Duration == 0 { + if *ssl.KeepAlivePeriod == 0 { return tcpc.SetKeepAlive(false) } if err := tcpc.SetKeepAlive(true); err != nil { return err } - return tcpc.SetKeepAlivePeriod(ssl.KeepAlivePeriod.Duration) + return tcpc.SetKeepAlivePeriod(time.Duration(*ssl.KeepAlivePeriod)) } func (ssl *streamSocketListener) removeConnection(c net.Conn) { @@ -126,8 +127,8 @@ func (ssl *streamSocketListener) read(c net.Conn) { scnr := bufio.NewScanner(decoder) for { - if ssl.ReadTimeout != nil && ssl.ReadTimeout.Duration > 0 { - if err := c.SetReadDeadline(time.Now().Add(ssl.ReadTimeout.Duration)); err != nil { + if ssl.ReadTimeout != nil && *ssl.ReadTimeout > 0 { + if err := c.SetReadDeadline(time.Now().Add(time.Duration(*ssl.ReadTimeout))); err != nil { ssl.Log.Error("setting read deadline failed: %v", err) return } @@ -193,13 +194,13 @@ func (psl *packetSocketListener) listen() { } type SocketListener struct { - ServiceAddress string `toml:"service_address"` - MaxConnections int `toml:"max_connections"` - ReadBufferSize internal.Size `toml:"read_buffer_size"` - ReadTimeout *internal.Duration `toml:"read_timeout"` - KeepAlivePeriod *internal.Duration `toml:"keep_alive_period"` - SocketMode string `toml:"socket_mode"` - ContentEncoding string `toml:"content_encoding"` + ServiceAddress string `toml:"service_address"` + MaxConnections int `toml:"max_connections"` + ReadBufferSize config.Size `toml:"read_buffer_size"` + ReadTimeout *config.Duration `toml:"read_timeout"` + KeepAlivePeriod *config.Duration `toml:"keep_alive_period"` + SocketMode string `toml:"socket_mode"` + ContentEncoding string `toml:"content_encoding"` tlsint.ServerConfig wg sync.WaitGroup @@ -372,9 +373,9 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { } } - if sl.ReadBufferSize.Size > 0 { + if sl.ReadBufferSize > 0 { if srb, ok := pc.(setReadBufferer); ok { - if err := srb.SetReadBuffer(int(sl.ReadBufferSize.Size)); err != nil { + if err := srb.SetReadBuffer(int(sl.ReadBufferSize)); err != nil { sl.Log.Warnf("Setting read buffer on a %s socket failed: %v", protocol, err) } } else { diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index 2a24850eaf889..c33e59f7129b6 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/wlog" @@ -99,7 +100,7 @@ func TestSocketListener_tcp(t *testing.T) { sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "tcp://127.0.0.1:0" - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) acc := &testutil.Accumulator{} err := sl.Start(acc) @@ -118,7 +119,7 @@ func TestSocketListener_udp(t *testing.T) { sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "udp://127.0.0.1:0" - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) acc := &testutil.Accumulator{} err := sl.Start(acc) @@ -144,7 +145,7 @@ func TestSocketListener_unix(t *testing.T) { sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "unix://" + sock - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) acc := &testutil.Accumulator{} err = sl.Start(acc) @@ -174,7 +175,7 @@ func TestSocketListener_unixgram(t *testing.T) { sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "unixgram://" + sock - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) acc := &testutil.Accumulator{} err = sl.Start(acc) @@ -193,7 +194,7 @@ func TestSocketListenerDecode_tcp(t *testing.T) { sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "tcp://127.0.0.1:0" - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) sl.ContentEncoding = "gzip" acc := &testutil.Accumulator{} @@ -213,7 +214,7 @@ func TestSocketListenerDecode_udp(t *testing.T) { sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "udp://127.0.0.1:0" - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) sl.ContentEncoding = "gzip" acc := &testutil.Accumulator{} diff --git a/plugins/inputs/solr/solr.go b/plugins/inputs/solr/solr.go index 9a850bbdb0362..08531e7433b34 100644 --- a/plugins/inputs/solr/solr.go +++ b/plugins/inputs/solr/solr.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -36,7 +37,7 @@ type Solr struct { Servers []string Username string Password string - HTTPTimeout internal.Duration + HTTPTimeout config.Duration Cores []string client *http.Client } @@ -121,7 +122,7 @@ type Cache struct { // NewSolr return a new instance of Solr func NewSolr() *Solr { return &Solr{ - HTTPTimeout: internal.Duration{Duration: time.Second * 5}, + HTTPTimeout: config.Duration(time.Second * 5), } } @@ -461,11 +462,11 @@ func (s *Solr) mbeansURL(server string, core string) string { func (s *Solr) createHTTPClient() *http.Client { tr := &http.Transport{ - ResponseHeaderTimeout: s.HTTPTimeout.Duration, + ResponseHeaderTimeout: time.Duration(s.HTTPTimeout), } client := &http.Client{ Transport: tr, - Timeout: s.HTTPTimeout.Duration, + Timeout: time.Duration(s.HTTPTimeout), } return client diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index bcb3052756a43..885913f91dd1c 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -13,7 +13,7 @@ import ( googlepbduration "github.com/golang/protobuf/ptypes/duration" googlepbts "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/limiter" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" // Imports the Stackdriver Monitoring client package. @@ -108,9 +108,9 @@ const ( ) var ( - defaultCacheTTL = internal.Duration{Duration: 1 * time.Hour} - defaultWindow = internal.Duration{Duration: 1 * time.Minute} - defaultDelay = internal.Duration{Duration: 5 * time.Minute} + defaultCacheTTL = config.Duration(1 * time.Hour) + defaultWindow = config.Duration(1 * time.Minute) + defaultDelay = config.Duration(5 * time.Minute) ) type ( @@ -118,9 +118,9 @@ type ( Stackdriver struct { Project string `toml:"project"` RateLimit int `toml:"rate_limit"` - Window internal.Duration `toml:"window"` - Delay internal.Duration `toml:"delay"` - CacheTTL internal.Duration `toml:"cache_ttl"` + Window config.Duration `toml:"window"` + Delay config.Duration `toml:"delay"` + CacheTTL config.Duration `toml:"cache_ttl"` MetricTypePrefixInclude []string `toml:"metric_type_prefix_include"` MetricTypePrefixExclude []string `toml:"metric_type_prefix_exclude"` GatherRawDistributionBuckets bool `toml:"gather_raw_distribution_buckets"` @@ -322,14 +322,14 @@ func (s *Stackdriver) Gather(acc telegraf.Accumulator) error { // Returns the start and end time for the next collection. func (s *Stackdriver) updateWindow(prevEnd time.Time) (time.Time, time.Time) { var start time.Time - if s.Window.Duration != 0 { - start = time.Now().Add(-s.Delay.Duration).Add(-s.Window.Duration) + if time.Duration(s.Window) != 0 { + start = time.Now().Add(-time.Duration(s.Delay)).Add(-time.Duration(s.Window)) } else if prevEnd.IsZero() { - start = time.Now().Add(-s.Delay.Duration).Add(-defaultWindow.Duration) + start = time.Now().Add(-time.Duration(s.Delay)).Add(-time.Duration(defaultWindow)) } else { start = prevEnd } - end := time.Now().Add(-s.Delay.Duration) + end := time.Now().Add(-time.Duration(s.Delay)) return start, end } @@ -579,7 +579,7 @@ func (s *Stackdriver) generatetimeSeriesConfs( s.timeSeriesConfCache = &timeSeriesConfCache{ TimeSeriesConfs: ret, Generated: time.Now(), - TTL: s.CacheTTL.Duration, + TTL: time.Duration(s.CacheTTL), } return ret, nil diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index a302f4095e63f..ca60dbe3a2a79 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -190,7 +190,7 @@ metric type: - **max_tcp_connections** []int: Maximum number of concurrent TCP connections to allow. Used when protocol is set to tcp. - **tcp_keep_alive** boolean: Enable TCP keep alive probes -- **tcp_keep_alive_period** internal.Duration: Specifies the keep-alive period for an active network connection +- **tcp_keep_alive_period** duration: Specifies the keep-alive period for an active network connection - **service_address** string: Address to listen for statsd UDP packets on - **delete_gauges** boolean: Delete gauges on every collection interval - **delete_counters** boolean: Delete counters on every collection interval diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index f47e3e16ec687..1aded7f9f1894 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -49,7 +49,7 @@ type Statsd struct { // Percentiles specifies the percentiles that will be calculated for timing // and histogram stats. - Percentiles []internal.Number + Percentiles []float64 PercentileLimit int DeleteGauges bool @@ -119,8 +119,8 @@ type Statsd struct { MaxTCPConnections int `toml:"max_tcp_connections"` - TCPKeepAlive bool `toml:"tcp_keep_alive"` - TCPKeepAlivePeriod *internal.Duration `toml:"tcp_keep_alive_period"` + TCPKeepAlive bool `toml:"tcp_keep_alive"` + TCPKeepAlivePeriod *config.Duration `toml:"tcp_keep_alive_period"` // Max duration for each metric to stay cached without being updated. MaxTTL config.Duration `toml:"max_ttl"` @@ -304,8 +304,8 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { fields[prefix+"lower"] = stats.Lower() fields[prefix+"count"] = stats.Count() for _, percentile := range s.Percentiles { - name := fmt.Sprintf("%s%v_percentile", prefix, percentile.Value) - fields[name] = stats.Percentile(percentile.Value) + name := fmt.Sprintf("%s%v_percentile", prefix, percentile) + fields[name] = stats.Percentile(percentile) } } @@ -474,7 +474,7 @@ func (s *Statsd) tcpListen(listener *net.TCPListener) error { } if s.TCPKeepAlivePeriod != nil { - if err = conn.SetKeepAlivePeriod(s.TCPKeepAlivePeriod.Duration); err != nil { + if err = conn.SetKeepAlivePeriod(time.Duration(*s.TCPKeepAlivePeriod)); err != nil { return err } } diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 3e91d4f960402..bef21b8de9eff 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -9,7 +9,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/internal" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -398,7 +397,7 @@ func TestParse_Counters(t *testing.T) { // Tests low-level functionality of timings func TestParse_Timings(t *testing.T) { s := NewTestStatsd() - s.Percentiles = []internal.Number{{Value: 90.0}} + s.Percentiles = []float64{90.0} acc := &testutil.Accumulator{} // Test that timings work @@ -1187,7 +1186,7 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{"measurement.field"} - s.Percentiles = []internal.Number{{Value: 90.0}} + s.Percentiles = []float64{90.0} acc := &testutil.Accumulator{} validLines := []string{ @@ -1235,7 +1234,7 @@ func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{} - s.Percentiles = []internal.Number{{Value: 90.0}} + s.Percentiles = []float64{90.0} acc := &testutil.Accumulator{} validLines := []string{ diff --git a/plugins/inputs/syslog/commons_test.go b/plugins/inputs/syslog/commons_test.go index 10f2ddf511d22..5b30b3778ec8e 100644 --- a/plugins/inputs/syslog/commons_test.go +++ b/plugins/inputs/syslog/commons_test.go @@ -4,7 +4,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" ) @@ -40,17 +40,15 @@ func newUDPSyslogReceiver(address string, bestEffort bool) *Syslog { } } -func newTCPSyslogReceiver(address string, keepAlive *internal.Duration, maxConn int, bestEffort bool, f framing.Framing) *Syslog { - d := &internal.Duration{ - Duration: defaultReadTimeout, - } +func newTCPSyslogReceiver(address string, keepAlive *config.Duration, maxConn int, bestEffort bool, f framing.Framing) *Syslog { + d := config.Duration(defaultReadTimeout) s := &Syslog{ Address: address, now: func() time.Time { return defaultTime }, Framing: f, - ReadTimeout: d, + ReadTimeout: &d, BestEffort: bestEffort, Separator: "_", } diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go index 9ec62238a17b0..4d29daaf53915 100644 --- a/plugins/inputs/syslog/nontransparent_test.go +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" ) @@ -136,7 +136,7 @@ func getTestCasesForNonTransparent() []testCaseStream { return testCases } -func testStrictNonTransparent(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { +func testStrictNonTransparent(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *config.Duration) { for _, tc := range getTestCasesForNonTransparent() { t.Run(tc.name, func(t *testing.T) { // Creation of a strict mode receiver @@ -195,7 +195,7 @@ func testStrictNonTransparent(t *testing.T, protocol string, address string, wan } func testBestEffortNonTransparent(t *testing.T, protocol string, address string, wantTLS bool) { - keepAlive := (*internal.Duration)(nil) + keepAlive := (*config.Duration)(nil) for _, tc := range getTestCasesForNonTransparent() { t.Run(tc.name, func(t *testing.T) { // Creation of a best effort mode receiver @@ -260,11 +260,13 @@ func TestNonTransparentBestEffort_tcp_tls(t *testing.T) { } func TestNonTransparentStrictWithKeepAlive_tcp_tls(t *testing.T) { - testStrictNonTransparent(t, "tcp", address, true, &internal.Duration{Duration: time.Minute}) + d := config.Duration(time.Minute) + testStrictNonTransparent(t, "tcp", address, true, &d) } func TestNonTransparentStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { - testStrictNonTransparent(t, "tcp", address, true, &internal.Duration{Duration: 0}) + d := config.Duration(0) + testStrictNonTransparent(t, "tcp", address, true, &d) } func TestNonTransparentStrict_unix(t *testing.T) { diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index 2f09822156a08..53fee69d112a5 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" ) @@ -336,7 +336,7 @@ func getTestCasesForOctetCounting() []testCaseStream { return testCases } -func testStrictOctetCounting(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { +func testStrictOctetCounting(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *config.Duration) { for _, tc := range getTestCasesForOctetCounting() { t.Run(tc.name, func(t *testing.T) { // Creation of a strict mode receiver @@ -395,7 +395,7 @@ func testStrictOctetCounting(t *testing.T, protocol string, address string, want } func testBestEffortOctetCounting(t *testing.T, protocol string, address string, wantTLS bool) { - keepAlive := (*internal.Duration)(nil) + keepAlive := (*config.Duration)(nil) for _, tc := range getTestCasesForOctetCounting() { t.Run(tc.name, func(t *testing.T) { // Creation of a best effort mode receiver @@ -460,11 +460,13 @@ func TestOctetCountingBestEffort_tcp_tls(t *testing.T) { } func TestOctetCountingStrictWithKeepAlive_tcp_tls(t *testing.T) { - testStrictOctetCounting(t, "tcp", address, true, &internal.Duration{Duration: time.Minute}) + d := config.Duration(time.Minute) + testStrictOctetCounting(t, "tcp", address, true, &d) } func TestOctetCountingStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { - testStrictOctetCounting(t, "tcp", address, true, &internal.Duration{Duration: 0}) + d := config.Duration(0) + testStrictOctetCounting(t, "tcp", address, true, &d) } func TestOctetCountingStrict_unix(t *testing.T) { diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 2bae730fb6e08..19e07913b72c4 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -19,7 +19,7 @@ import ( "github.com/influxdata/go-syslog/v2/rfc5424" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" tlsConfig "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -32,9 +32,9 @@ const ipMaxPacketSize = 64 * 1024 type Syslog struct { tlsConfig.ServerConfig Address string `toml:"server"` - KeepAlivePeriod *internal.Duration + KeepAlivePeriod *config.Duration MaxConnections int - ReadTimeout *internal.Duration + ReadTimeout *config.Duration Framing framing.Framing Trailer nontransparent.TrailerType BestEffort bool @@ -315,8 +315,8 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { emit := func(r *syslog.Result) { s.store(*r, acc) - if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { - if err := conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)); err != nil { + if s.ReadTimeout != nil && time.Duration(*s.ReadTimeout) > 0 { + if err := conn.SetReadDeadline(time.Now().Add(time.Duration(*s.ReadTimeout))); err != nil { acc.AddError(fmt.Errorf("setting read deadline failed: %v", err)) } } @@ -342,8 +342,8 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { p.Parse(conn) - if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { - if err := conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)); err != nil { + if s.ReadTimeout != nil && time.Duration(*s.ReadTimeout) > 0 { + if err := conn.SetReadDeadline(time.Now().Add(time.Duration(*s.ReadTimeout))); err != nil { acc.AddError(fmt.Errorf("setting read deadline failed: %v", err)) } } @@ -354,13 +354,13 @@ func (s *Syslog) setKeepAlive(c *net.TCPConn) error { return nil } - if s.KeepAlivePeriod.Duration == 0 { + if *s.KeepAlivePeriod == 0 { return c.SetKeepAlive(false) } if err := c.SetKeepAlive(true); err != nil { return err } - return c.SetKeepAlivePeriod(s.KeepAlivePeriod.Duration) + return c.SetKeepAlivePeriod(time.Duration(*s.KeepAlivePeriod)) } func (s *Syslog) store(res syslog.Result, acc telegraf.Accumulator) { @@ -460,16 +460,15 @@ func getNanoNow() time.Time { } func init() { + defaultTimeout := config.Duration(defaultReadTimeout) inputs.Add("syslog", func() telegraf.Input { return &Syslog{ - Address: ":6514", - now: getNanoNow, - ReadTimeout: &internal.Duration{ - Duration: defaultReadTimeout, - }, - Framing: framing.OctetCounting, - Trailer: nontransparent.LF, - Separator: "_", + Address: ":6514", + now: getNanoNow, + ReadTimeout: &defaultTimeout, + Framing: framing.OctetCounting, + Trailer: nontransparent.LF, + Separator: "_", } }) } diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 6eb649ebaacdc..01b4db9fa4af9 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -16,6 +16,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -33,7 +34,7 @@ type Sysstat struct { Sadc string `toml:"sadc_path"` // Force the execution time of sadc - SadcInterval internal.Duration `toml:"sadc_interval"` + SadcInterval config.Duration `toml:"sadc_interval"` // Sadf represents the path to the sadf cmd. Sadf string `toml:"sadf_path"` @@ -135,9 +136,9 @@ func (*Sysstat) SampleConfig() string { } func (s *Sysstat) Gather(acc telegraf.Accumulator) error { - if s.SadcInterval.Duration != 0 { + if time.Duration(s.SadcInterval) != 0 { // Collect interval is calculated as interval - parseInterval - s.interval = int(s.SadcInterval.Duration.Seconds()) + parseInterval + s.interval = int(time.Duration(s.SadcInterval).Seconds()) + parseInterval } if s.interval == 0 { diff --git a/plugins/inputs/systemd_units/systemd_units_linux.go b/plugins/inputs/systemd_units/systemd_units_linux.go index e7a4be077d7f4..e94b9432136e4 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux.go +++ b/plugins/inputs/systemd_units/systemd_units_linux.go @@ -9,18 +9,19 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) // SystemdUnits is a telegraf plugin to gather systemd unit status type SystemdUnits struct { - Timeout internal.Duration + Timeout config.Duration UnitType string `toml:"unittype"` systemctl systemctl } -type systemctl func(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) +type systemctl func(timeout config.Duration, unitType string) (*bytes.Buffer, error) const measurement = "systemd_units" @@ -112,7 +113,7 @@ var subMap = map[string]int{ } var ( - defaultTimeout = internal.Duration{Duration: time.Second} + defaultTimeout = config.Duration(time.Second) defaultUnitType = "service" ) @@ -191,20 +192,20 @@ func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { return nil } -func setSystemctl(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) { +func setSystemctl(timeout config.Duration, unitType string) (*bytes.Buffer, error) { // is systemctl available ? systemctlPath, err := exec.LookPath("systemctl") if err != nil { return nil, err } - cmd := exec.Command(systemctlPath, "list-units", "--all", "--plain", fmt.Sprintf("--type=%s", UnitType), "--no-legend") + cmd := exec.Command(systemctlPath, "list-units", "--all", "--plain", fmt.Sprintf("--type=%s", unitType), "--no-legend") var out bytes.Buffer cmd.Stdout = &out - err = internal.RunTimeout(cmd, Timeout.Duration) + err = internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { - return &out, fmt.Errorf("error running systemctl list-units --all --plain --type=%s --no-legend: %s", UnitType, err) + return &out, fmt.Errorf("error running systemctl list-units --all --plain --type=%s --no-legend: %s", unitType, err) } return &out, nil diff --git a/plugins/inputs/systemd_units/systemd_units_linux_test.go b/plugins/inputs/systemd_units/systemd_units_linux_test.go index 01af08105998d..a6cfbd6552771 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux_test.go +++ b/plugins/inputs/systemd_units/systemd_units_linux_test.go @@ -6,7 +6,7 @@ import ( "reflect" "testing" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) @@ -74,7 +74,7 @@ func TestSystemdUnits(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { systemdUnits := &SystemdUnits{ - systemctl: func(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) { + systemctl: func(timeout config.Duration, unitType string) (*bytes.Buffer, error) { return bytes.NewBufferString(tt.line), nil }, } diff --git a/plugins/inputs/tail/multiline.go b/plugins/inputs/tail/multiline.go index 58a9b9e1e588c..7ea2e460b88d6 100644 --- a/plugins/inputs/tail/multiline.go +++ b/plugins/inputs/tail/multiline.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" ) // Indicates relation to the multiline event: previous or next @@ -23,7 +23,7 @@ type MultilineConfig struct { Pattern string MatchWhichLine MultilineMatchWhichLine `toml:"match_which_line"` InvertMatch bool - Timeout *internal.Duration + Timeout *config.Duration } const ( @@ -43,8 +43,9 @@ func (m *MultilineConfig) NewMultiline() (*Multiline, error) { if r, err = regexp.Compile(m.Pattern); err != nil { return nil, err } - if m.Timeout == nil || m.Timeout.Duration.Nanoseconds() == int64(0) { - m.Timeout = &internal.Duration{Duration: 5 * time.Second} + if m.Timeout == nil || time.Duration(*m.Timeout).Nanoseconds() == int64(0) { + d := config.Duration(5 * time.Second) + m.Timeout = &d } } diff --git a/plugins/inputs/tail/multiline_test.go b/plugins/inputs/tail/multiline_test.go index 44bfafb2ba25f..26a7e80292772 100644 --- a/plugins/inputs/tail/multiline_test.go +++ b/plugins/inputs/tail/multiline_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/stretchr/testify/assert" ) @@ -32,20 +32,20 @@ func TestMultilineConfigError(t *testing.T) { } func TestMultilineConfigTimeoutSpecified(t *testing.T) { - duration, _ := time.ParseDuration("10s") + duration := config.Duration(10 * time.Second) c := &MultilineConfig{ Pattern: ".*", MatchWhichLine: Previous, - Timeout: &internal.Duration{Duration: duration}, + Timeout: &duration, } m, err := c.NewMultiline() assert.NoError(t, err, "Configuration was OK.") - assert.Equal(t, duration, m.config.Timeout.Duration) + assert.Equal(t, duration, *m.config.Timeout) } func TestMultilineConfigDefaultTimeout(t *testing.T) { - duration, _ := time.ParseDuration("5s") + duration := config.Duration(5 * time.Second) c := &MultilineConfig{ Pattern: ".*", MatchWhichLine: Previous, @@ -53,7 +53,7 @@ func TestMultilineConfigDefaultTimeout(t *testing.T) { m, err := c.NewMultiline() assert.NoError(t, err, "Configuration was OK.") - assert.Equal(t, duration, m.config.Timeout.Duration) + assert.Equal(t, duration, *m.config.Timeout) } func TestMultilineIsEnabled(t *testing.T) { diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 84a91635540bf..d84c09ff8d3c2 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -325,7 +325,7 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { // The multiline mode requires a timer in order to flush the multiline buffer // if no new lines are incoming. if t.multiline.IsEnabled() { - timer = time.NewTimer(t.MultilineConfig.Timeout.Duration) + timer = time.NewTimer(time.Duration(*t.MultilineConfig.Timeout)) timeout = timer.C } @@ -337,7 +337,7 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { line = nil if timer != nil { - timer.Reset(t.MultilineConfig.Timeout.Duration) + timer.Reset(time.Duration(*t.MultilineConfig.Timeout)) } select { diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 0d8460a251a72..b855691e6f1ab 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/plugins/parsers/influx" @@ -120,9 +120,9 @@ func TestTailDosLineEndings(t *testing.T) { func TestGrokParseLogFilesWithMultiline(t *testing.T) { //we make sure the timeout won't kick in - duration, _ := time.ParseDuration("100s") - - tt := NewTestTail() + d, _ := time.ParseDuration("100s") + duration := config.Duration(d) + tt := NewTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{filepath.Join(testdataDir, "test_multiline.log")} @@ -130,7 +130,7 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { Pattern: `^[^\[]`, MatchWhichLine: Previous, InvertMatch: false, - Timeout: &internal.Duration{Duration: duration}, + Timeout: &duration, } tt.SetParserFunc(createGrokParser) @@ -183,9 +183,10 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { require.NoError(t, tmpfile.Sync()) // set tight timeout for tests - duration := 10 * time.Millisecond + d := 10 * time.Millisecond + duration := config.Duration(d) + tt := NewTail() - tt := NewTestTail() tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} @@ -193,7 +194,7 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { Pattern: `^[^\[]`, MatchWhichLine: Previous, InvertMatch: false, - Timeout: &internal.Duration{Duration: duration}, + Timeout: &duration, } tt.SetParserFunc(createGrokParser) @@ -236,7 +237,7 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *testing.T) { //we make sure the timeout won't kick in - duration := 100 * time.Second + duration := config.Duration(100 * time.Second) tt := NewTestTail() tt.Log = testutil.Logger{} @@ -246,7 +247,7 @@ func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *test Pattern: `^[^\[]`, MatchWhichLine: Previous, InvertMatch: false, - Timeout: &internal.Duration{Duration: duration}, + Timeout: &duration, } tt.SetParserFunc(createGrokParser) diff --git a/plugins/inputs/tengine/tengine.go b/plugins/inputs/tengine/tengine.go index 846a5411dba33..1787354c22cff 100644 --- a/plugins/inputs/tengine/tengine.go +++ b/plugins/inputs/tengine/tengine.go @@ -14,14 +14,14 @@ import ( "io" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type Tengine struct { Urls []string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig client *http.Client @@ -87,15 +87,15 @@ func (n *Tengine) createHTTPClient() (*http.Client, error) { return nil, err } - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } client := &http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil diff --git a/plugins/inputs/tomcat/tomcat.go b/plugins/inputs/tomcat/tomcat.go index 60081e1295b6c..5b869fb4d8c76 100644 --- a/plugins/inputs/tomcat/tomcat.go +++ b/plugins/inputs/tomcat/tomcat.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -63,7 +63,7 @@ type Tomcat struct { URL string Username string Password string - Timeout internal.Duration + Timeout config.Duration tls.ClientConfig client *http.Client @@ -199,7 +199,7 @@ func (s *Tomcat) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: s.Timeout.Duration, + Timeout: time.Duration(s.Timeout), } return client, nil @@ -211,7 +211,7 @@ func init() { URL: "http://127.0.0.1:8080/manager/status/all?XML=true", Username: "tomcat", Password: "s3cret", - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), } }) } diff --git a/plugins/inputs/unbound/unbound.go b/plugins/inputs/unbound/unbound.go index 441d44c852f92..72a9e4db5965d 100644 --- a/plugins/inputs/unbound/unbound.go +++ b/plugins/inputs/unbound/unbound.go @@ -12,6 +12,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" @@ -21,18 +22,18 @@ type runner func(unbound Unbound) (*bytes.Buffer, error) // Unbound is used to store configuration values type Unbound struct { - Binary string `toml:"binary"` - Timeout internal.Duration `toml:"timeout"` - UseSudo bool `toml:"use_sudo"` - Server string `toml:"server"` - ThreadAsTag bool `toml:"thread_as_tag"` - ConfigFile string `toml:"config_file"` + Binary string `toml:"binary"` + Timeout config.Duration `toml:"timeout"` + UseSudo bool `toml:"use_sudo"` + Server string `toml:"server"` + ThreadAsTag bool `toml:"thread_as_tag"` + ConfigFile string `toml:"config_file"` run runner } var defaultBinary = "/usr/sbin/unbound-control" -var defaultTimeout = internal.Duration{Duration: time.Second} +var defaultTimeout = config.Duration(time.Second) var sampleConfig = ` ## Address of server to connect to, read from unbound conf default, optionally ':port' @@ -82,7 +83,7 @@ func unboundRunner(unbound Unbound) (*bytes.Buffer, error) { // Unbound control requires an IP address, and we want to be nice to the user resolver := net.Resolver{} - ctx, lookUpCancel := context.WithTimeout(context.Background(), unbound.Timeout.Duration) + ctx, lookUpCancel := context.WithTimeout(context.Background(), time.Duration(unbound.Timeout)) defer lookUpCancel() serverIps, err := resolver.LookupIPAddr(ctx, host) if err != nil { @@ -112,7 +113,7 @@ func unboundRunner(unbound Unbound) (*bytes.Buffer, error) { var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, unbound.Timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(unbound.Timeout)) if err != nil { return &out, fmt.Errorf("error running unbound-control: %s (%s %v)", err, unbound.Binary, cmdArgs) } diff --git a/plugins/inputs/unbound/unbound_test.go b/plugins/inputs/unbound/unbound_test.go index cac0316d7db01..d3900602441f1 100644 --- a/plugins/inputs/unbound/unbound_test.go +++ b/plugins/inputs/unbound/unbound_test.go @@ -4,9 +4,8 @@ import ( "bytes" "testing" - "github.com/stretchr/testify/assert" - "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" ) func UnboundControl(output string) func(unbound Unbound) (*bytes.Buffer, error) { diff --git a/plugins/inputs/uwsgi/uwsgi.go b/plugins/inputs/uwsgi/uwsgi.go index 81dd3c350cf3d..f536e4b27c44f 100644 --- a/plugins/inputs/uwsgi/uwsgi.go +++ b/plugins/inputs/uwsgi/uwsgi.go @@ -15,14 +15,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) // Uwsgi server struct type Uwsgi struct { - Servers []string `toml:"servers"` - Timeout internal.Duration `toml:"timeout"` + Servers []string `toml:"servers"` + Timeout config.Duration `toml:"timeout"` client *http.Client } @@ -51,7 +51,7 @@ func (u *Uwsgi) SampleConfig() string { func (u *Uwsgi) Gather(acc telegraf.Accumulator) error { if u.client == nil { u.client = &http.Client{ - Timeout: u.Timeout.Duration, + Timeout: time.Duration(u.Timeout), } } wg := &sync.WaitGroup{} @@ -85,13 +85,13 @@ func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, url *url.URL) error { switch url.Scheme { case "tcp": - r, err = net.DialTimeout(url.Scheme, url.Host, u.Timeout.Duration) + r, err = net.DialTimeout(url.Scheme, url.Host, time.Duration(u.Timeout)) if err != nil { return err } s.source = url.Host case "unix": - r, err = net.DialTimeout(url.Scheme, url.Path, u.Timeout.Duration) + r, err = net.DialTimeout(url.Scheme, url.Path, time.Duration(u.Timeout)) if err != nil { return err } @@ -216,7 +216,7 @@ func (u *Uwsgi) gatherCores(acc telegraf.Accumulator, s *StatsServer) { func init() { inputs.Add("uwsgi", func() telegraf.Input { return &Uwsgi{ - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), } }) } diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index c2dcce699d55b..e4f18bee42ed3 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -12,12 +12,13 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, UseSudo bool, InstanceName string, Timeout internal.Duration) (*bytes.Buffer, error) +type runner func(cmdName string, useSudo bool, instanceName string, timeout config.Duration) (*bytes.Buffer, error) // Varnish is used to store configuration values type Varnish struct { @@ -25,7 +26,7 @@ type Varnish struct { Binary string UseSudo bool InstanceName string - Timeout internal.Duration + Timeout config.Duration filter filter.Filter run runner @@ -33,7 +34,7 @@ type Varnish struct { var defaultStats = []string{"MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"} var defaultBinary = "/usr/bin/varnishstat" -var defaultTimeout = internal.Duration{Duration: time.Second} +var defaultTimeout = config.Duration(time.Second) var sampleConfig = ` ## If running as a restricted user you can prepend sudo for additional access: @@ -66,16 +67,16 @@ func (s *Varnish) SampleConfig() string { } // Shell out to varnish_stat and return the output -func varnishRunner(cmdName string, UseSudo bool, InstanceName string, Timeout internal.Duration) (*bytes.Buffer, error) { +func varnishRunner(cmdName string, useSudo bool, instanceName string, timeout config.Duration) (*bytes.Buffer, error) { cmdArgs := []string{"-1"} - if InstanceName != "" { - cmdArgs = append(cmdArgs, []string{"-n", InstanceName}...) + if instanceName != "" { + cmdArgs = append(cmdArgs, []string{"-n", instanceName}...) } cmd := exec.Command(cmdName, cmdArgs...) - if UseSudo { + if useSudo { cmdArgs = append([]string{cmdName}, cmdArgs...) cmdArgs = append([]string{"-n"}, cmdArgs...) cmd = exec.Command("sudo", cmdArgs...) @@ -84,7 +85,7 @@ func varnishRunner(cmdName string, UseSudo bool, InstanceName string, Timeout in var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running varnishstat: %s", err) } diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index 2642782fe806d..4ba9e941a52ee 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -10,12 +10,12 @@ import ( "github.com/stretchr/testify/assert" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) -func fakeVarnishStat(output string) func(string, bool, string, internal.Duration) (*bytes.Buffer, error) { - return func(string, bool, string, internal.Duration) (*bytes.Buffer, error) { +func fakeVarnishStat(output string) func(string, bool, string, config.Duration) (*bytes.Buffer, error) { + return func(string, bool, string, config.Duration) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index 0eae3be6ffb97..2795c94ae6698 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -75,11 +75,11 @@ func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { // Execute a dummy call against the server to make sure the client is // still functional. If not, try to log back in. If that doesn't work, // we give up. - ctx1, cancel1 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout)) defer cancel1() if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil { cf.parent.Log.Info("Client session seems to have time out. Reauthenticating!") - ctx2, cancel2 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) + ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(cf.parent.Timeout)) defer cancel2() if err := cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)); err != nil { if !retrying { @@ -131,7 +131,7 @@ func NewClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*Client, } } - ctx1, cancel1 := context.WithTimeout(ctx, vs.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(vs.Timeout)) defer cancel1() vimClient, err := vim25.NewClient(ctx1, soapClient) if err != nil { @@ -141,7 +141,7 @@ func NewClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*Client, // If TSLKey is specified, try to log in as an extension using a cert. if vs.TLSKey != "" { - ctx2, cancel2 := context.WithTimeout(ctx, vs.Timeout.Duration) + ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(vs.Timeout)) defer cancel2() if err := sm.LoginExtensionByCertificate(ctx2, vs.TLSKey); err != nil { return nil, err @@ -161,7 +161,7 @@ func NewClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*Client, } } - c.Timeout = vs.Timeout.Duration + c.Timeout = time.Duration(vs.Timeout) m := view.NewManager(c.Client) v, err := m.CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{}, true) @@ -178,10 +178,10 @@ func NewClient(ctx context.Context, vSphereURL *url.URL, vs *VSphere) (*Client, Root: v, Perf: p, Valid: true, - Timeout: vs.Timeout.Duration, + Timeout: time.Duration(vs.Timeout), } // Adjust max query size if needed - ctx3, cancel3 := context.WithTimeout(ctx, vs.Timeout.Duration) + ctx3, cancel3 := context.WithTimeout(ctx, time.Duration(vs.Timeout)) defer cancel3() n, err := client.GetMaxQueryMetrics(ctx3) if err != nil { diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 85fda786b17ba..bff3701653c8d 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -258,7 +258,7 @@ func isSimple(include []string, exclude []string) bool { } func (e *Endpoint) startDiscovery(ctx context.Context) { - e.discoveryTicker = time.NewTicker(e.Parent.ObjectDiscoveryInterval.Duration) + e.discoveryTicker = time.NewTicker(time.Duration(e.Parent.ObjectDiscoveryInterval)) go func() { for { select { @@ -300,7 +300,7 @@ func (e *Endpoint) init(ctx context.Context) error { } } - if e.Parent.ObjectDiscoveryInterval.Duration > 0 { + if time.Duration(e.Parent.ObjectDiscoveryInterval) > 0 { e.Parent.Log.Debug("Running initial discovery") e.initalDiscovery(ctx) } @@ -339,7 +339,7 @@ func (e *Endpoint) getMetadata(ctx context.Context, obj *objectRef, sampling int return nil, err } - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() metrics, err := client.Perf.AvailableMetric(ctx1, obj.ref.Reference(), sampling) if err != nil { @@ -367,7 +367,7 @@ func (e *Endpoint) getAncestorName(ctx context.Context, client *Client, resource path = append(path, here.Reference().String()) o := object.NewCommon(client.Client.Client, r) var result mo.ManagedEntity - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() err := o.Properties(ctx1, here, []string{"parent", "name"}, &result) if err != nil { @@ -429,7 +429,7 @@ func (e *Endpoint) discover(ctx context.Context) error { paths: res.paths, excludePaths: res.excludePaths} - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) objects, err := res.getObjects(ctx1, e, &rf) cancel1() if err != nil { @@ -588,7 +588,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, func getDatacenters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.Datacenter - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() err := filter.FindAll(ctx1, &resources) if err != nil { @@ -609,7 +609,7 @@ func getDatacenters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (o func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.ClusterComputeResource - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() err := filter.FindAll(ctx1, &resources) if err != nil { @@ -623,7 +623,7 @@ func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (obje // We're not interested in the immediate parent (a folder), but the data center. p, ok := cache[r.Parent.Value] if !ok { - ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx2, cancel2 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel2() client, err := e.clientFactory.GetClient(ctx2) if err != nil { @@ -631,7 +631,7 @@ func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (obje } o := object.NewFolder(client.Client.Client, *r.Parent) var folder mo.Folder - ctx3, cancel3 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx3, cancel3 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel3() err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder) if err != nil { @@ -679,7 +679,7 @@ func getHosts(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectM func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.VirtualMachine - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() err := filter.FindAll(ctx1, &resources) if err != nil { @@ -769,7 +769,7 @@ func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap func getDatastores(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { var resources []mo.Datastore - ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() err := filter.FindAll(ctx1, &resources) if err != nil { @@ -840,7 +840,7 @@ func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error } // If discovery interval is disabled (0), discover on each collection cycle - if e.Parent.ObjectDiscoveryInterval.Duration == 0 { + if time.Duration(e.Parent.ObjectDiscoveryInterval) == 0 { err := e.discover(ctx) if err != nil { return err diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index a18a5ca70f982..7e688b73c55fc 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -6,7 +6,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/vmware/govmomi/vim25/soap" @@ -54,8 +54,8 @@ type VSphere struct { CollectConcurrency int DiscoverConcurrency int ForceDiscoverOnInit bool - ObjectDiscoveryInterval internal.Duration - Timeout internal.Duration + ObjectDiscoveryInterval config.Duration + Timeout config.Duration endpoints []*Endpoint cancel context.CancelFunc @@ -364,8 +364,8 @@ func init() { CollectConcurrency: 1, DiscoverConcurrency: 1, ForceDiscoverOnInit: true, - ObjectDiscoveryInterval: internal.Duration{Duration: time.Second * 300}, - Timeout: internal.Duration{Duration: time.Second * 60}, + ObjectDiscoveryInterval: config.Duration(time.Second * 300), + Timeout: config.Duration(time.Second * 60), } }) } diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index 3dcde06f5e583..ae2ce57b9a88e 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -11,7 +11,7 @@ import ( "time" "unsafe" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" itls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" @@ -147,8 +147,8 @@ func defaultVSphere() *VSphere { MaxQueryObjects: 256, MaxQueryMetrics: 256, - ObjectDiscoveryInterval: internal.Duration{Duration: time.Second * 300}, - Timeout: internal.Duration{Duration: time.Second * 20}, + ObjectDiscoveryInterval: config.Duration(time.Second * 300), + Timeout: config.Duration(time.Second * 20), ForceDiscoverOnInit: true, DiscoverConcurrency: 1, CollectConcurrency: 1, diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 73cc7dc7311d4..4bcbbfb1b2318 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -143,7 +143,7 @@ type Win_PerfCounters struct { PreVistaSupport bool UsePerfCounterTime bool Object []perfobject - CountersRefreshInterval internal.Duration + CountersRefreshInterval config.Duration UseWildcardsExpansion bool Log telegraf.Logger @@ -345,7 +345,7 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { // Parse the config once var err error - if m.lastRefreshed.IsZero() || (m.CountersRefreshInterval.Duration.Nanoseconds() > 0 && m.lastRefreshed.Add(m.CountersRefreshInterval.Duration).Before(time.Now())) { + if m.lastRefreshed.IsZero() || (m.CountersRefreshInterval > 0 && m.lastRefreshed.Add(time.Duration(m.CountersRefreshInterval)).Before(time.Now())) { if m.counters != nil { m.counters = m.counters[:0] } @@ -477,6 +477,6 @@ func isKnownCounterDataError(err error) bool { func init() { inputs.Add("win_perf_counters", func() telegraf.Input { - return &Win_PerfCounters{query: &PerformanceQueryImpl{}, CountersRefreshInterval: internal.Duration{Duration: time.Second * 60}} + return &Win_PerfCounters{query: &PerformanceQueryImpl{}, CountersRefreshInterval: config.Duration(time.Second * 60)} }) } diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index a11f0ace8da3a..930e923754ac8 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -734,7 +734,7 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { Object: perfObjects, UseWildcardsExpansion: true, query: fpm, - CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}, + CountersRefreshInterval: config.Duration(time.Second * 10), } var acc1 testutil.Accumulator err = m.Gather(&acc1) @@ -791,7 +791,7 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1) acc2.AssertContainsTaggedFields(t, measurement, fields2, tags2) acc2.AssertDoesNotContainsTaggedFields(t, measurement, fields3, tags3) - time.Sleep(m.CountersRefreshInterval.Duration) + time.Sleep(time.Duration(m.CountersRefreshInterval)) var acc3 testutil.Accumulator err = m.Gather(&acc3) @@ -827,7 +827,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { Object: perfObjects, UseWildcardsExpansion: false, query: fpm, - CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}} + CountersRefreshInterval: config.Duration(time.Second * 10)} var acc1 testutil.Accumulator err = m.Gather(&acc1) assert.Len(t, m.counters, 2) @@ -902,7 +902,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { fpm.Open() - time.Sleep(m.CountersRefreshInterval.Duration) + time.Sleep(time.Duration(m.CountersRefreshInterval)) var acc3 testutil.Accumulator err = m.Gather(&acc3) diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 3128b90686d0c..7c1b0657c7e80 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -15,7 +15,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/globpath" _tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -43,9 +43,9 @@ const description = "Reads metrics from a SSL certificate" // X509Cert holds the configuration of the plugin. type X509Cert struct { - Sources []string `toml:"sources"` - Timeout internal.Duration `toml:"timeout"` - ServerName string `toml:"server_name"` + Sources []string `toml:"sources"` + Timeout config.Duration `toml:"timeout"` + ServerName string `toml:"server_name"` tlsCfg *tls.Config _tls.ClientConfig locations []*url.URL @@ -252,7 +252,7 @@ func (c *X509Cert) Gather(acc telegraf.Accumulator) error { } for _, location := range append(c.locations, collectedUrls...) { - certs, err := c.getCert(location, c.Timeout.Duration*time.Second) + certs, err := c.getCert(location, time.Duration(c.Timeout)) if err != nil { acc.AddError(fmt.Errorf("cannot get SSL cert '%s': %s", location, err.Error())) } @@ -322,7 +322,7 @@ func init() { inputs.Add("x509_cert", func() telegraf.Input { return &X509Cert{ Sources: []string{}, - Timeout: internal.Duration{Duration: 5 * time.Second}, // set default timeout to 5s + Timeout: config.Duration(5 * time.Second), // set default timeout to 5s } }) } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 4aafd3cb4090b..3253c9ac9c7ae 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" _tls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" ) @@ -60,7 +60,7 @@ func TestGatherRemoteIntegration(t *testing.T) { pair, err := tls.X509KeyPair([]byte(pki.ReadServerCert()), []byte(pki.ReadServerKey())) require.NoError(t, err) - config := &tls.Config{ + cfg := &tls.Config{ InsecureSkipVerify: true, Certificates: []tls.Certificate{pair}, } @@ -68,13 +68,13 @@ func TestGatherRemoteIntegration(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { if test.unset { - config.Certificates = nil - config.GetCertificate = func(i *tls.ClientHelloInfo) (*tls.Certificate, error) { + cfg.Certificates = nil + cfg.GetCertificate = func(i *tls.ClientHelloInfo) (*tls.Certificate, error) { return nil, nil } } - ln, err := tls.Listen("tcp", ":0", config) + ln, err := tls.Listen("tcp", ":0", cfg) require.NoError(t, err) defer ln.Close() @@ -85,7 +85,7 @@ func TestGatherRemoteIntegration(t *testing.T) { sconn.Close() } - serverConfig := config.Clone() + serverConfig := cfg.Clone() srv := tls.Server(sconn, serverConfig) if test.noshake { @@ -100,7 +100,7 @@ func TestGatherRemoteIntegration(t *testing.T) { sc := X509Cert{ Sources: []string{test.server}, - Timeout: internal.Duration{Duration: test.timeout}, + Timeout: config.Duration(test.timeout), } require.NoError(t, sc.Init()) @@ -306,7 +306,7 @@ func TestGatherCertMustNotTimeout(t *testing.T) { duration := time.Duration(15) * time.Second m := &X509Cert{ Sources: []string{"https://www.influxdata.com:443"}, - Timeout: internal.Duration{Duration: duration}, + Timeout: config.Duration(duration), } require.NoError(t, m.Init()) diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index 48c00a1d3ef7f..82a05bc1778b2 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -12,7 +12,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -22,7 +22,7 @@ var zookeeperFormatRE = regexp.MustCompile(`^zk_(\w[\w\.\-]*)\s+([\w\.\-]+)`) // Zookeeper is a zookeeper plugin type Zookeeper struct { Servers []string - Timeout internal.Duration + Timeout config.Duration EnableTLS bool `toml:"enable_tls"` EnableSSL bool `toml:"enable_ssl"` // deprecated in 1.7; use enable_tls @@ -89,11 +89,11 @@ func (z *Zookeeper) Gather(acc telegraf.Accumulator) error { z.initialized = true } - if z.Timeout.Duration < 1*time.Second { - z.Timeout.Duration = defaultTimeout + if z.Timeout < config.Duration(1*time.Second) { + z.Timeout = config.Duration(defaultTimeout) } - ctx, cancel := context.WithTimeout(ctx, z.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(z.Timeout)) defer cancel() if len(z.Servers) == 0 { diff --git a/plugins/outputs/amon/amon.go b/plugins/outputs/amon/amon.go index 0ee62f1e94fc2..5bbbba9814e38 100644 --- a/plugins/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -6,17 +6,18 @@ import ( "fmt" "net/http" "strings" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" ) type Amon struct { - ServerKey string `toml:"server_key"` - AmonInstance string `toml:"amon_instance"` - Timeout internal.Duration `toml:"timeout"` - Log telegraf.Logger `toml:"-"` + ServerKey string `toml:"server_key"` + AmonInstance string `toml:"amon_instance"` + Timeout config.Duration `toml:"timeout"` + Log telegraf.Logger `toml:"-"` client *http.Client } @@ -51,7 +52,7 @@ func (a *Amon) Connect() error { Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: a.Timeout.Duration, + Timeout: time.Duration(a.Timeout), } return nil } diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index 8bf469a590248..95da1f99b0f9f 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -6,6 +6,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" @@ -50,7 +51,7 @@ type AMQP struct { RetentionPolicy string `toml:"retention_policy"` // deprecated in 1.7; use headers Precision string `toml:"precision"` // deprecated; has no effect Headers map[string]string `toml:"headers"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` UseBatchFormat bool `toml:"use_batch_format"` ContentEncoding string `toml:"content_encoding"` Log telegraf.Logger `toml:"-"` @@ -319,7 +320,7 @@ func (q *AMQP) makeClientConfig() (*ClientConfig, error) { exchangeType: q.ExchangeType, exchangePassive: q.ExchangePassive, encoding: q.ContentEncoding, - timeout: q.Timeout.Duration, + timeout: time.Duration(q.Timeout), } switch q.ExchangeDurability { @@ -397,7 +398,7 @@ func init() { AuthMethod: DefaultAuthMethod, Database: DefaultDatabase, RetentionPolicy: DefaultRetentionPolicy, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), connect: connect, } }) diff --git a/plugins/outputs/amqp/amqp_test.go b/plugins/outputs/amqp/amqp_test.go index 537dd7d049b8c..05c25ea515194 100644 --- a/plugins/outputs/amqp/amqp_test.go +++ b/plugins/outputs/amqp/amqp_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/streadway/amqp" "github.com/stretchr/testify/require" ) @@ -53,24 +53,24 @@ func TestConnect(t *testing.T) { AuthMethod: DefaultAuthMethod, Database: DefaultDatabase, RetentionPolicy: DefaultRetentionPolicy, - Timeout: internal.Duration{Duration: time.Second * 5}, - connect: func(config *ClientConfig) (Client, error) { + Timeout: config.Duration(time.Second * 5), + connect: func(_ *ClientConfig) (Client, error) { return NewMockClient(), nil }, }, errFunc: func(t *testing.T, output *AMQP, err error) { - config := output.config - require.Equal(t, []string{DefaultURL}, config.brokers) - require.Equal(t, "", config.exchange) - require.Equal(t, "topic", config.exchangeType) - require.Equal(t, false, config.exchangePassive) - require.Equal(t, true, config.exchangeDurable) - require.Equal(t, amqp.Table(nil), config.exchangeArguments) + cfg := output.config + require.Equal(t, []string{DefaultURL}, cfg.brokers) + require.Equal(t, "", cfg.exchange) + require.Equal(t, "topic", cfg.exchangeType) + require.Equal(t, false, cfg.exchangePassive) + require.Equal(t, true, cfg.exchangeDurable) + require.Equal(t, amqp.Table(nil), cfg.exchangeArguments) require.Equal(t, amqp.Table{ "database": DefaultDatabase, "retention_policy": DefaultRetentionPolicy, - }, config.headers) - require.Equal(t, amqp.Transient, config.deliveryMode) + }, cfg.headers) + require.Equal(t, amqp.Transient, cfg.deliveryMode) require.NoError(t, err) }, }, @@ -80,15 +80,15 @@ func TestConnect(t *testing.T) { Headers: map[string]string{ "foo": "bar", }, - connect: func(config *ClientConfig) (Client, error) { + connect: func(_ *ClientConfig) (Client, error) { return NewMockClient(), nil }, }, errFunc: func(t *testing.T, output *AMQP, err error) { - config := output.config + cfg := output.config require.Equal(t, amqp.Table{ "foo": "bar", - }, config.headers) + }, cfg.headers) require.NoError(t, err) }, }, @@ -98,15 +98,15 @@ func TestConnect(t *testing.T) { ExchangeArguments: map[string]string{ "foo": "bar", }, - connect: func(config *ClientConfig) (Client, error) { + connect: func(_ *ClientConfig) (Client, error) { return NewMockClient(), nil }, }, errFunc: func(t *testing.T, output *AMQP, err error) { - config := output.config + cfg := output.config require.Equal(t, amqp.Table{ "foo": "bar", - }, config.exchangeArguments) + }, cfg.exchangeArguments) require.NoError(t, err) }, }, @@ -116,18 +116,18 @@ func TestConnect(t *testing.T) { URL: "amqp://foo:bar@localhost", Username: "telegraf", Password: "pa$$word", - connect: func(config *ClientConfig) (Client, error) { + connect: func(_ *ClientConfig) (Client, error) { return NewMockClient(), nil }, }, errFunc: func(t *testing.T, output *AMQP, err error) { - config := output.config + cfg := output.config require.Equal(t, []amqp.Authentication{ &amqp.PlainAuth{ Username: "telegraf", Password: "pa$$word", }, - }, config.auth) + }, cfg.auth) require.NoError(t, err) }, @@ -136,13 +136,13 @@ func TestConnect(t *testing.T) { name: "url support", output: &AMQP{ URL: DefaultURL, - connect: func(config *ClientConfig) (Client, error) { + connect: func(_ *ClientConfig) (Client, error) { return NewMockClient(), nil }, }, errFunc: func(t *testing.T, output *AMQP, err error) { - config := output.config - require.Equal(t, []string{DefaultURL}, config.brokers) + cfg := output.config + require.Equal(t, []string{DefaultURL}, cfg.brokers) require.NoError(t, err) }, }, diff --git a/plugins/outputs/application_insights/application_insights.go b/plugins/outputs/application_insights/application_insights.go index 6bcb924865780..950a9fcf7e3ff 100644 --- a/plugins/outputs/application_insights/application_insights.go +++ b/plugins/outputs/application_insights/application_insights.go @@ -8,7 +8,7 @@ import ( "github.com/Microsoft/ApplicationInsights-Go/appinsights" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -24,7 +24,7 @@ type DiagnosticsMessageSubscriber interface { type ApplicationInsights struct { InstrumentationKey string `toml:"instrumentation_key"` EndpointURL string `toml:"endpoint_url"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` EnableDiagnosticLogging bool `toml:"enable_diagnostic_logging"` ContextTagSources map[string]string `toml:"context_tag_sources"` Log telegraf.Logger `toml:"-"` @@ -112,8 +112,8 @@ func (a *ApplicationInsights) Close() error { select { case <-a.transmitter.Close(): a.Log.Info("Closed") - case <-time.After(a.Timeout.Duration): - a.Log.Warnf("Close operation timed out after %v", a.Timeout.Duration) + case <-time.After(time.Duration(a.Timeout)): + a.Log.Warnf("Close operation timed out after %v", time.Duration(a.Timeout)) } return nil @@ -337,7 +337,7 @@ func toInt(value interface{}) (int, error) { func init() { outputs.Add("application_insights", func() telegraf.Output { return &ApplicationInsights{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), diagMsgSubscriber: diagnosticsMessageSubscriber{}, // It is very common to set Cloud.RoleName and Cloud.RoleInstance context properties, hence initial capacity of two ContextTagSources: make(map[string]string, 2), diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go index 8d4fb755646f5..ce36c141b2b52 100644 --- a/plugins/outputs/application_insights/application_insights_test.go +++ b/plugins/outputs/application_insights/application_insights_test.go @@ -9,7 +9,7 @@ import ( "github.com/Microsoft/ApplicationInsights-Go/appinsights" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/application_insights/mocks" "github.com/stretchr/testify/assert" @@ -25,7 +25,7 @@ func TestConnectFailsIfNoIkey(t *testing.T) { ai := ApplicationInsights{ transmitter: transmitter, // Very long timeout to ensure we do not rely on timeouts for closing the transmitter - Timeout: internal.Duration{Duration: time.Hour}, + Timeout: config.Duration(time.Hour), Log: testutil.Logger{}, } @@ -41,7 +41,7 @@ func TestOutputCloseTimesOut(t *testing.T) { ai := ApplicationInsights{ transmitter: transmitter, - Timeout: internal.Duration{Duration: time.Millisecond * 50}, + Timeout: config.Duration(time.Millisecond * 50), Log: testutil.Logger{}, } @@ -66,7 +66,7 @@ func TestCloseRemovesDiagMsgListener(t *testing.T) { ai := ApplicationInsights{ transmitter: transmitter, - Timeout: internal.Duration{Duration: time.Hour}, + Timeout: config.Duration(time.Hour), EnableDiagnosticLogging: true, diagMsgSubscriber: diagMsgSubscriber, InstrumentationKey: "1234", // Fake, but necessary to enable tracking diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index c295b553f963a..193c9b2c40c4f 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -16,7 +16,7 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure/auth" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/selfstat" @@ -25,7 +25,7 @@ import ( // AzureMonitor allows publishing of metrics to the Azure Monitor custom metrics // service type AzureMonitor struct { - Timeout internal.Duration + Timeout config.Duration NamespacePrefix string `toml:"namespace_prefix"` StringsAsDimensions bool `toml:"strings_as_dimensions"` Region string `toml:"region"` @@ -144,15 +144,15 @@ func (a *AzureMonitor) SampleConfig() string { func (a *AzureMonitor) Connect() error { a.cache = make(map[time.Time]map[uint64]*aggregate, 36) - if a.Timeout.Duration == 0 { - a.Timeout.Duration = defaultRequestTimeout + if a.Timeout == 0 { + a.Timeout = config.Duration(defaultRequestTimeout) } a.client = &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: a.Timeout.Duration, + Timeout: time.Duration(a.Timeout), } var err error diff --git a/plugins/outputs/bigquery/bigquery.go b/plugins/outputs/bigquery/bigquery.go index fd1f3c7bc6f4b..41af19d38d88e 100644 --- a/plugins/outputs/bigquery/bigquery.go +++ b/plugins/outputs/bigquery/bigquery.go @@ -13,15 +13,15 @@ import ( "google.golang.org/api/option" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" ) const timeStampFieldName = "timestamp" -var defaultTimeout = internal.Duration{Duration: 5 * time.Second} +var defaultTimeout = config.Duration(5 * time.Second) -const sampleConfig = ` +const sampleConfig = ` ## Credentials File credentials_file = "/path/to/service/account/key.json" @@ -43,8 +43,8 @@ type BigQuery struct { Project string `toml:"project"` Dataset string `toml:"dataset"` - Timeout internal.Duration `toml:"timeout"` - ReplaceHyphenTo string `toml:"replace_hyphen_to"` + Timeout config.Duration `toml:"timeout"` + ReplaceHyphenTo string `toml:"replace_hyphen_to"` Log telegraf.Logger `toml:"-"` @@ -85,7 +85,7 @@ func (s *BigQuery) setUpDefaultClient() error { var credentialsOption option.ClientOption ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, s.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(s.Timeout)) defer cancel() if s.CredentialsFile != "" { @@ -205,7 +205,7 @@ func valueToBqType(v interface{}) bigquery.FieldType { func (s *BigQuery) insertToTable(metricName string, metrics []bigquery.ValueSaver) { ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, s.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(s.Timeout)) defer cancel() tableName := s.metricToTable(metricName) diff --git a/plugins/outputs/bigquery/bigquery_test.go b/plugins/outputs/bigquery/bigquery_test.go index 34d889fcb4c14..dd029f23f7ff6 100644 --- a/plugins/outputs/bigquery/bigquery_test.go +++ b/plugins/outputs/bigquery/bigquery_test.go @@ -10,7 +10,7 @@ import ( "time" "cloud.google.com/go/bigquery" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" "google.golang.org/api/option" @@ -21,7 +21,7 @@ const ( ) var testingHost string -var testDuration = internal.Duration{Duration: 5 * time.Second} +var testDuration = config.Duration(5 * time.Second) var receivedBody map[string]json.RawMessage type Row struct { diff --git a/plugins/outputs/cloud_pubsub/pubsub.go b/plugins/outputs/cloud_pubsub/pubsub.go index 9fd89eadf0897..38f037dd13296 100644 --- a/plugins/outputs/cloud_pubsub/pubsub.go +++ b/plugins/outputs/cloud_pubsub/pubsub.go @@ -5,9 +5,11 @@ import ( "encoding/base64" "fmt" "sync" + "time" "cloud.google.com/go/pubsub" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" @@ -71,12 +73,12 @@ type PubSub struct { Topic string `toml:"topic"` Attributes map[string]string `toml:"attributes"` - SendBatched bool `toml:"send_batched"` - PublishCountThreshold int `toml:"publish_count_threshold"` - PublishByteThreshold int `toml:"publish_byte_threshold"` - PublishNumGoroutines int `toml:"publish_num_go_routines"` - PublishTimeout internal.Duration `toml:"publish_timeout"` - Base64Data bool `toml:"base64_data"` + SendBatched bool `toml:"send_batched"` + PublishCountThreshold int `toml:"publish_count_threshold"` + PublishByteThreshold int `toml:"publish_byte_threshold"` + PublishNumGoroutines int `toml:"publish_num_go_routines"` + PublishTimeout config.Duration `toml:"publish_timeout"` + Base64Data bool `toml:"base64_data"` Log telegraf.Logger `toml:"-"` @@ -190,7 +192,7 @@ func (ps *PubSub) publishSettings() pubsub.PublishSettings { settings.NumGoroutines = ps.PublishNumGoroutines } - if ps.PublishTimeout.Duration > 0 { + if time.Duration(ps.PublishTimeout) > 0 { settings.CountThreshold = 1 } diff --git a/plugins/outputs/cloud_pubsub/topic_stubbed.go b/plugins/outputs/cloud_pubsub/topic_stubbed.go index e23a765366704..c66e573a60115 100644 --- a/plugins/outputs/cloud_pubsub/topic_stubbed.go +++ b/plugins/outputs/cloud_pubsub/topic_stubbed.go @@ -13,7 +13,7 @@ import ( "cloud.google.com/go/pubsub" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/serializers" "google.golang.org/api/support/bundler" @@ -84,7 +84,7 @@ func getTestResources(tT *testing.T, settings pubsub.PublishSettings, testM []te PublishCountThreshold: settings.CountThreshold, PublishByteThreshold: settings.ByteThreshold, PublishNumGoroutines: settings.NumGoroutines, - PublishTimeout: internal.Duration{Duration: settings.Timeout}, + PublishTimeout: config.Duration(settings.Timeout), } ps.SetSerializer(s) diff --git a/plugins/outputs/cratedb/cratedb.go b/plugins/outputs/cratedb/cratedb.go index 1c8aff47aa248..6e43b58f71563 100644 --- a/plugins/outputs/cratedb/cratedb.go +++ b/plugins/outputs/cratedb/cratedb.go @@ -12,7 +12,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" _ "github.com/jackc/pgx/stdlib" //to register stdlib from PostgreSQL Driver and Toolkit ) @@ -21,7 +21,7 @@ const MaxInt64 = int64(^uint64(0) >> 1) type CrateDB struct { URL string - Timeout internal.Duration + Timeout config.Duration Table string TableCreate bool `toml:"table_create"` DB *sql.DB @@ -55,7 +55,7 @@ CREATE TABLE IF NOT EXISTS ` + c.Table + ` ( PRIMARY KEY ("timestamp", "hash_id","day") ) PARTITIONED BY("day"); ` - ctx, cancel := context.WithTimeout(context.Background(), c.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(c.Timeout)) defer cancel() if _, err := db.ExecContext(ctx, sql); err != nil { return err @@ -66,7 +66,7 @@ CREATE TABLE IF NOT EXISTS ` + c.Table + ` ( } func (c *CrateDB) Write(metrics []telegraf.Metric) error { - ctx, cancel := context.WithTimeout(context.Background(), c.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(c.Timeout)) defer cancel() if sql, err := insertSQL(c.Table, metrics); err != nil { return err @@ -233,7 +233,7 @@ func (c *CrateDB) Close() error { func init() { outputs.Add("cratedb", func() telegraf.Output { return &CrateDB{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/outputs/cratedb/cratedb_test.go b/plugins/outputs/cratedb/cratedb_test.go index 43297afe2e5e7..8d7fe1c80ae0a 100644 --- a/plugins/outputs/cratedb/cratedb_test.go +++ b/plugins/outputs/cratedb/cratedb_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -37,7 +37,7 @@ func TestConnectAndWriteIntegration(t *testing.T) { c := &CrateDB{ URL: url, Table: table, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), TableCreate: true, } diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 6b5d4437b63b1..0e019de0eb334 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -8,17 +8,18 @@ import ( "net/http" "net/url" "strings" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" ) type Datadog struct { - Apikey string `toml:"apikey"` - Timeout internal.Duration `toml:"timeout"` - URL string `toml:"url"` - Log telegraf.Logger `toml:"-"` + Apikey string `toml:"apikey"` + Timeout config.Duration `toml:"timeout"` + URL string `toml:"url"` + Log telegraf.Logger `toml:"-"` client *http.Client } @@ -58,7 +59,7 @@ func (d *Datadog) Connect() error { Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: d.Timeout.Duration, + Timeout: time.Duration(d.Timeout), } return nil } diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 2c57d6fc584a0..0b13f5886fd83 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -13,7 +13,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -31,12 +31,12 @@ var ( // Dynatrace Configuration for the Dynatrace output plugin type Dynatrace struct { - URL string `toml:"url"` - APIToken string `toml:"api_token"` - Prefix string `toml:"prefix"` - Log telegraf.Logger `toml:"-"` - Timeout internal.Duration `toml:"timeout"` - AddCounterMetrics []string `toml:"additional_counters"` + URL string `toml:"url"` + APIToken string `toml:"api_token"` + Prefix string `toml:"prefix"` + Log telegraf.Logger `toml:"-"` + Timeout config.Duration `toml:"timeout"` + AddCounterMetrics []string `toml:"additional_counters"` State map[string]string SendCounter int @@ -312,7 +312,7 @@ func (d *Dynatrace) Init() error { Proxy: http.ProxyFromEnvironment, TLSClientConfig: tlsCfg, }, - Timeout: d.Timeout.Duration, + Timeout: time.Duration(d.Timeout), } return nil } @@ -320,7 +320,7 @@ func (d *Dynatrace) Init() error { func init() { outputs.Add("dynatrace", func() telegraf.Output { return &Dynatrace{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), SendCounter: 0, } }) diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index 45f4f24d7d73e..6dc558836a59c 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -3,7 +3,7 @@ package dynatrace import ( "encoding/json" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -22,7 +22,7 @@ func TestNilMetrics(t *testing.T) { defer ts.Close() d := &Dynatrace{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } d.URL = ts.URL diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go index c41495ec07df5..6d85b7e97742f 100644 --- a/plugins/outputs/elasticsearch/elasticsearch.go +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -14,7 +14,7 @@ import ( "crypto/sha256" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "gopkg.in/olivere/elastic.v5" @@ -28,8 +28,8 @@ type Elasticsearch struct { Username string Password string EnableSniffer bool - Timeout internal.Duration - HealthCheckInterval internal.Duration + Timeout config.Duration + HealthCheckInterval config.Duration ManageTemplate bool TemplateName string OverwriteTemplate bool @@ -174,7 +174,7 @@ func (a *Elasticsearch) Connect() error { return fmt.Errorf("Elasticsearch urls or index_name is not defined") } - ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(a.Timeout)) defer cancel() var clientOptions []elastic.ClientOptionFunc @@ -189,14 +189,14 @@ func (a *Elasticsearch) Connect() error { httpclient := &http.Client{ Transport: tr, - Timeout: a.Timeout.Duration, + Timeout: time.Duration(a.Timeout), } clientOptions = append(clientOptions, elastic.SetHttpClient(httpclient), elastic.SetSniff(a.EnableSniffer), elastic.SetURL(a.URLs...), - elastic.SetHealthcheckInterval(a.HealthCheckInterval.Duration), + elastic.SetHealthcheckInterval(time.Duration(a.HealthCheckInterval)), ) if a.Username != "" && a.Password != "" { @@ -205,7 +205,7 @@ func (a *Elasticsearch) Connect() error { ) } - if a.HealthCheckInterval.Duration == 0 { + if time.Duration(a.HealthCheckInterval) == 0 { clientOptions = append(clientOptions, elastic.SetHealthcheck(false), ) @@ -295,7 +295,7 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { bulkRequest.Add(br) } - ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(a.Timeout)) defer cancel() res, err := bulkRequest.Do(ctx) @@ -438,8 +438,8 @@ func (a *Elasticsearch) Close() error { func init() { outputs.Add("elasticsearch", func() telegraf.Output { return &Elasticsearch{ - Timeout: internal.Duration{Duration: time.Second * 5}, - HealthCheckInterval: internal.Duration{Duration: time.Second * 10}, + Timeout: config.Duration(time.Second * 5), + HealthCheckInterval: config.Duration(time.Second * 10), } }) } diff --git a/plugins/outputs/elasticsearch/elasticsearch_test.go b/plugins/outputs/elasticsearch/elasticsearch_test.go index d21d191cb0e52..baf6e3162555c 100644 --- a/plugins/outputs/elasticsearch/elasticsearch_test.go +++ b/plugins/outputs/elasticsearch/elasticsearch_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -21,11 +21,11 @@ func TestConnectAndWriteIntegration(t *testing.T) { e := &Elasticsearch{ URLs: urls, IndexName: "test-%Y.%m.%d", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), ManageTemplate: true, TemplateName: "telegraf", OverwriteTemplate: false, - HealthCheckInterval: internal.Duration{Duration: time.Second * 10}, + HealthCheckInterval: config.Duration(time.Second * 10), } // Verify that we can connect to Elasticsearch @@ -49,7 +49,7 @@ func TestTemplateManagementEmptyTemplateIntegration(t *testing.T) { e := &Elasticsearch{ URLs: urls, IndexName: "test-%Y.%m.%d", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), ManageTemplate: true, TemplateName: "", OverwriteTemplate: true, @@ -69,13 +69,13 @@ func TestTemplateManagementIntegration(t *testing.T) { e := &Elasticsearch{ URLs: urls, IndexName: "test-%Y.%m.%d", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), ManageTemplate: true, TemplateName: "telegraf", OverwriteTemplate: true, } - ctx, cancel := context.WithTimeout(context.Background(), e.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout)) defer cancel() err := e.Connect() @@ -95,7 +95,7 @@ func TestTemplateInvalidIndexPatternIntegration(t *testing.T) { e := &Elasticsearch{ URLs: urls, IndexName: "{{host}}-%Y.%m.%d", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), ManageTemplate: true, TemplateName: "telegraf", OverwriteTemplate: true, diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go index 25637bd1984c0..b0313a382045a 100644 --- a/plugins/outputs/exec/exec.go +++ b/plugins/outputs/exec/exec.go @@ -10,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" @@ -19,8 +20,8 @@ const maxStderrBytes = 512 // Exec defines the exec output plugin. type Exec struct { - Command []string `toml:"command"` - Timeout internal.Duration `toml:"timeout"` + Command []string `toml:"command"` + Timeout config.Duration `toml:"timeout"` runner Runner serializer serializers.Serializer @@ -82,7 +83,7 @@ func (e *Exec) Write(metrics []telegraf.Metric) error { return nil } - return e.runner.Run(e.Timeout.Duration, e.Command, &buffer) + return e.runner.Run(time.Duration(e.Timeout), e.Command, &buffer) } // Runner provides an interface for running exec.Cmd. @@ -155,7 +156,7 @@ func init() { outputs.Add("exec", func() telegraf.Output { return &Exec{ runner: &CommandRunner{}, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/outputs/exec/exec_test.go b/plugins/outputs/exec/exec_test.go index f57bf50a1b571..e75e1829d3894 100644 --- a/plugins/outputs/exec/exec_test.go +++ b/plugins/outputs/exec/exec_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -52,7 +52,7 @@ func TestExec(t *testing.T) { t.Run(tt.name, func(t *testing.T) { e := &Exec{ Command: tt.command, - Timeout: internal.Duration{Duration: time.Second}, + Timeout: config.Duration(time.Second), runner: &CommandRunner{}, } diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 16cb923ee2db5..0c8ff903e97bc 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -4,21 +4,22 @@ import ( "fmt" "io" "os" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/rotate" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) type File struct { - Files []string `toml:"files"` - RotationInterval internal.Duration `toml:"rotation_interval"` - RotationMaxSize internal.Size `toml:"rotation_max_size"` - RotationMaxArchives int `toml:"rotation_max_archives"` - UseBatchFormat bool `toml:"use_batch_format"` - Log telegraf.Logger `toml:"-"` + Files []string `toml:"files"` + RotationInterval config.Duration `toml:"rotation_interval"` + RotationMaxSize config.Size `toml:"rotation_max_size"` + RotationMaxArchives int `toml:"rotation_max_archives"` + UseBatchFormat bool `toml:"use_batch_format"` + Log telegraf.Logger `toml:"-"` writer io.Writer closers []io.Closer @@ -69,7 +70,7 @@ func (f *File) Connect() error { writers = append(writers, os.Stdout) } else { of, err := rotate.NewFileWriter( - file, f.RotationInterval.Duration, f.RotationMaxSize.Size, f.RotationMaxArchives) + file, time.Duration(f.RotationInterval), int64(f.RotationMaxSize), f.RotationMaxArchives) if err != nil { return err } diff --git a/plugins/outputs/health/health.go b/plugins/outputs/health/health.go index 7447c404c9b2d..4541659cec030 100644 --- a/plugins/outputs/health/health.go +++ b/plugins/outputs/health/health.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" @@ -67,11 +68,11 @@ type Checker interface { } type Health struct { - ServiceAddress string `toml:"service_address"` - ReadTimeout internal.Duration `toml:"read_timeout"` - WriteTimeout internal.Duration `toml:"write_timeout"` - BasicUsername string `toml:"basic_username"` - BasicPassword string `toml:"basic_password"` + ServiceAddress string `toml:"service_address"` + ReadTimeout config.Duration `toml:"read_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` tlsint.ServerConfig Compares []*Compares `toml:"compares"` @@ -141,8 +142,8 @@ func (h *Health) Connect() error { h.server = &http.Server{ Addr: h.ServiceAddress, Handler: authHandler(h), - ReadTimeout: h.ReadTimeout.Duration, - WriteTimeout: h.WriteTimeout.Duration, + ReadTimeout: time.Duration(h.ReadTimeout), + WriteTimeout: time.Duration(h.WriteTimeout), TLSConfig: h.tlsConf, } @@ -257,8 +258,8 @@ func (h *Health) isHealthy() bool { func NewHealth() *Health { return &Health{ ServiceAddress: defaultServiceAddress, - ReadTimeout: internal.Duration{Duration: defaultReadTimeout}, - WriteTimeout: internal.Duration{Duration: defaultWriteTimeout}, + ReadTimeout: config.Duration(defaultReadTimeout), + WriteTimeout: config.Duration(defaultWriteTimeout), healthy: true, } } diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 53d51f3e7c0e1..82ae230eceb0f 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" @@ -79,7 +80,7 @@ const ( type HTTP struct { URL string `toml:"url"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` Method string `toml:"method"` Username string `toml:"username"` Password string `toml:"password"` @@ -89,7 +90,7 @@ type HTTP struct { TokenURL string `toml:"token_url"` Scopes []string `toml:"scopes"` ContentEncoding string `toml:"content_encoding"` - IdleConnTimeout internal.Duration `toml:"idle_conn_timeout"` + IdleConnTimeout config.Duration `toml:"idle_conn_timeout"` tls.ClientConfig client *http.Client @@ -110,9 +111,9 @@ func (h *HTTP) createClient(ctx context.Context) (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, Proxy: http.ProxyFromEnvironment, - IdleConnTimeout: h.IdleConnTimeout.Duration, + IdleConnTimeout: time.Duration(h.IdleConnTimeout), }, - Timeout: h.Timeout.Duration, + Timeout: time.Duration(h.Timeout), } if h.ClientID != "" && h.ClientSecret != "" && h.TokenURL != "" { @@ -138,8 +139,8 @@ func (h *HTTP) Connect() error { return fmt.Errorf("invalid method [%s] %s", h.URL, h.Method) } - if h.Timeout.Duration == 0 { - h.Timeout.Duration = defaultClientTimeout + if h.Timeout == 0 { + h.Timeout = config.Duration(defaultClientTimeout) } ctx := context.Background() @@ -228,7 +229,7 @@ func (h *HTTP) write(reqBody []byte) error { func init() { outputs.Add("http", func() telegraf.Output { return &HTTP{ - Timeout: internal.Duration{Duration: defaultClientTimeout}, + Timeout: config.Duration(defaultClientTimeout), Method: defaultMethod, URL: defaultURL, } diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 49ca7d6435249..0bb4c01cc6996 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers/influx" @@ -43,8 +43,8 @@ type InfluxDB struct { ExcludeRetentionPolicyTag bool `toml:"exclude_retention_policy_tag"` UserAgent string `toml:"user_agent"` WriteConsistency string `toml:"write_consistency"` - Timeout internal.Duration `toml:"timeout"` - UDPPayload internal.Size `toml:"udp_payload"` + Timeout config.Duration `toml:"timeout"` + UDPPayload config.Size `toml:"udp_payload"` HTTPProxy string `toml:"http_proxy"` HTTPHeaders map[string]string `toml:"http_headers"` ContentEncoding string `toml:"content_encoding"` @@ -239,7 +239,7 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { func (i *InfluxDB) udpClient(url *url.URL) (Client, error) { config := &UDPConfig{ URL: url, - MaxPayloadSize: int(i.UDPPayload.Size), + MaxPayloadSize: int(i.UDPPayload), Serializer: i.newSerializer(), Log: i.Log, } @@ -260,7 +260,7 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) config := &HTTPConfig{ URL: url, - Timeout: i.Timeout.Duration, + Timeout: time.Duration(i.Timeout), TLSConfig: tlsConfig, UserAgent: i.UserAgent, Username: i.Username, @@ -308,7 +308,7 @@ func (i *InfluxDB) newSerializer() *influx.Serializer { func init() { outputs.Add("influxdb", func() telegraf.Output { return &InfluxDB{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), CreateHTTPClientF: func(config *HTTPConfig) (Client, error) { return NewHTTPClient(*config) }, diff --git a/plugins/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go index 476211069af7b..9de246ec7f3f7 100644 --- a/plugins/outputs/influxdb/influxdb_test.go +++ b/plugins/outputs/influxdb/influxdb_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs/influxdb" @@ -95,7 +95,7 @@ func TestConnectUDPConfig(t *testing.T) { output := influxdb.InfluxDB{ URLs: []string{"udp://localhost:8089"}, - UDPPayload: internal.Size{Size: 42}, + UDPPayload: config.Size(42), CreateUDPClientF: func(config *influxdb.UDPConfig) (influxdb.Client, error) { actual = config @@ -120,7 +120,7 @@ func TestConnectHTTPConfig(t *testing.T) { Database: "telegraf", RetentionPolicy: "default", WriteConsistency: "any", - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), Username: "guy", Password: "smiley", UserAgent: "telegraf", @@ -153,7 +153,7 @@ func TestConnectHTTPConfig(t *testing.T) { require.Equal(t, output.URLs[0], actual.URL.String()) require.Equal(t, output.UserAgent, actual.UserAgent) - require.Equal(t, output.Timeout.Duration, actual.Timeout) + require.Equal(t, time.Duration(output.Timeout), actual.Timeout) require.Equal(t, output.Username, actual.Username) require.Equal(t, output.Password, actual.Password) require.Equal(t, output.HTTPProxy, actual.Proxy.String()) diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go index c7dd20a5ca0db..e188ddbae94d1 100644 --- a/plugins/outputs/influxdb_v2/influxdb.go +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers/influx" @@ -87,7 +87,7 @@ type InfluxDB struct { Bucket string `toml:"bucket"` BucketTag string `toml:"bucket_tag"` ExcludeBucketTag bool `toml:"exclude_bucket_tag"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` HTTPHeaders map[string]string `toml:"http_headers"` HTTPProxy string `toml:"http_proxy"` UserAgent string `toml:"user_agent"` @@ -183,7 +183,7 @@ func (i *InfluxDB) getHTTPClient(url *url.URL, proxy *url.URL) (Client, error) { Bucket: i.Bucket, BucketTag: i.BucketTag, ExcludeBucketTag: i.ExcludeBucketTag, - Timeout: i.Timeout.Duration, + Timeout: time.Duration(i.Timeout), Headers: i.HTTPHeaders, Proxy: proxy, UserAgent: i.UserAgent, @@ -212,7 +212,7 @@ func (i *InfluxDB) newSerializer() *influx.Serializer { func init() { outputs.Add("influxdb_v2", func() telegraf.Output { return &InfluxDB{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), ContentEncoding: "gzip", } }) diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index ab6f05f866f4e..87148ed08d9d9 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -7,9 +7,10 @@ import ( "net" "regexp" "strings" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/graphite" @@ -21,14 +22,14 @@ var ( ) type Instrumental struct { - Host string `toml:"host"` - APIToken string `toml:"api_token"` - Prefix string `toml:"prefix"` - DataFormat string `toml:"data_format"` - Template string `toml:"template"` - Templates []string `toml:"templates"` - Timeout internal.Duration `toml:"timeout"` - Debug bool `toml:"debug"` + Host string `toml:"host"` + APIToken string `toml:"api_token"` + Prefix string `toml:"prefix"` + DataFormat string `toml:"data_format"` + Template string `toml:"template"` + Templates []string `toml:"templates"` + Timeout config.Duration `toml:"timeout"` + Debug bool `toml:"debug"` Log telegraf.Logger `toml:"-"` @@ -57,7 +58,7 @@ var sampleConfig = ` ` func (i *Instrumental) Connect() error { - connection, err := net.DialTimeout("tcp", i.Host+":8000", i.Timeout.Duration) + connection, err := net.DialTimeout("tcp", i.Host+":8000", time.Duration(i.Timeout)) if err != nil { i.conn = nil diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 1d9f6725206a8..d4aa3e6e92bb7 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -7,22 +7,23 @@ import ( "io/ioutil" "net/http" "regexp" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers/graphite" ) // Librato structure for configuration and client type Librato struct { - APIUser string `toml:"api_user"` - APIToken string `toml:"api_token"` - Debug bool `toml:"debug"` - SourceTag string `toml:"source_tag"` // Deprecated, keeping for backward-compatibility - Timeout internal.Duration `toml:"timeout"` - Template string `toml:"template"` - Log telegraf.Logger `toml:"-"` + APIUser string `toml:"api_user"` + APIToken string `toml:"api_token"` + Debug bool `toml:"debug"` + SourceTag string `toml:"source_tag"` // Deprecated, keeping for backward-compatibility + Timeout config.Duration `toml:"timeout"` + Template string `toml:"template"` + Log telegraf.Logger `toml:"-"` APIUrl string client *http.Client @@ -83,7 +84,7 @@ func (l *Librato) Connect() error { Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: l.Timeout.Duration, + Timeout: time.Duration(l.Timeout), } return nil } diff --git a/plugins/outputs/logzio/logzio.go b/plugins/outputs/logzio/logzio.go index b0ca8c7b3bc80..caec293b1c46f 100644 --- a/plugins/outputs/logzio/logzio.go +++ b/plugins/outputs/logzio/logzio.go @@ -10,7 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -39,10 +39,10 @@ var sampleConfig = ` ` type Logzio struct { - Log telegraf.Logger `toml:"-"` - Timeout internal.Duration `toml:"timeout"` - Token string `toml:"token"` - URL string `toml:"url"` + Log telegraf.Logger `toml:"-"` + Timeout config.Duration `toml:"timeout"` + Token string `toml:"token"` + URL string `toml:"url"` tls.ClientConfig client *http.Client @@ -77,7 +77,7 @@ func (l *Logzio) Connect() error { Proxy: http.ProxyFromEnvironment, TLSClientConfig: tlsCfg, }, - Timeout: l.Timeout.Duration, + Timeout: time.Duration(l.Timeout), } return nil @@ -169,7 +169,7 @@ func init() { outputs.Add("logzio", func() telegraf.Output { return &Logzio{ URL: defaultLogzioURL, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/outputs/loki/loki.go b/plugins/outputs/loki/loki.go index c097d21fd8bf0..21cc66776d682 100644 --- a/plugins/outputs/loki/loki.go +++ b/plugins/outputs/loki/loki.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" @@ -52,7 +53,7 @@ var sampleConfig = ` type Loki struct { Domain string `toml:"domain"` Endpoint string `toml:"endpoint"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` Username string `toml:"username"` Password string `toml:"password"` Headers map[string]string `toml:"headers"` @@ -86,7 +87,7 @@ func (l *Loki) createClient(ctx context.Context) (*http.Client, error) { TLSClientConfig: tlsCfg, Proxy: http.ProxyFromEnvironment, }, - Timeout: l.Timeout.Duration, + Timeout: time.Duration(l.Timeout), } if l.ClientID != "" && l.ClientSecret != "" && l.TokenURL != "" { @@ -114,8 +115,8 @@ func (l *Loki) Connect() (err error) { l.url = fmt.Sprintf("%s%s", l.Domain, l.Endpoint) - if l.Timeout.Duration == 0 { - l.Timeout.Duration = defaultClientTimeout + if l.Timeout == 0 { + l.Timeout = config.Duration(defaultClientTimeout) } ctx := context.Background() diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 0e07b1bca8ab7..584a79ffd2ef1 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -9,6 +9,7 @@ import ( paho "github.com/eclipse/paho.mqtt.golang" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" @@ -66,7 +67,7 @@ type MQTT struct { Username string Password string Database string - Timeout internal.Duration + Timeout config.Duration TopicPrefix string QoS int `toml:"qos"` ClientID string `toml:"client_id"` @@ -180,7 +181,7 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error { func (m *MQTT) publish(topic string, body []byte) error { token := m.client.Publish(topic, byte(m.QoS), m.Retain, body) - token.WaitTimeout(m.Timeout.Duration) + token.WaitTimeout(time.Duration(m.Timeout)) if token.Error() != nil { return token.Error() } @@ -191,10 +192,10 @@ func (m *MQTT) createOpts() (*paho.ClientOptions, error) { opts := paho.NewClientOptions() opts.KeepAlive = 0 - if m.Timeout.Duration < time.Second { - m.Timeout.Duration = 5 * time.Second + if m.Timeout < config.Duration(time.Second) { + m.Timeout = config.Duration(5 * time.Second) } - opts.WriteTimeout = m.Timeout.Duration + opts.WriteTimeout = time.Duration(m.Timeout) if m.ClientID != "" { opts.SetClientID(m.ClientID) diff --git a/plugins/outputs/newrelic/newrelic.go b/plugins/outputs/newrelic/newrelic.go index ccd8b50d70b11..4f67c5de06914 100644 --- a/plugins/outputs/newrelic/newrelic.go +++ b/plugins/outputs/newrelic/newrelic.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" "github.com/newrelic/newrelic-telemetry-sdk-go/cumulative" "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry" @@ -17,10 +17,10 @@ import ( // NewRelic nr structure type NewRelic struct { - InsightsKey string `toml:"insights_key"` - MetricPrefix string `toml:"metric_prefix"` - Timeout internal.Duration `toml:"timeout"` - HTTPProxy string `toml:"http_proxy"` + InsightsKey string `toml:"insights_key"` + MetricPrefix string `toml:"metric_prefix"` + Timeout config.Duration `toml:"timeout"` + HTTPProxy string `toml:"http_proxy"` harvestor *telemetry.Harvester dc *cumulative.DeltaCalculator @@ -67,7 +67,7 @@ func (nr *NewRelic) Connect() error { func(cfg *telemetry.Config) { cfg.Product = "NewRelic-Telegraf-Plugin" cfg.ProductVersion = "1.0" - cfg.HarvestTimeout = nr.Timeout.Duration + cfg.HarvestTimeout = time.Duration(nr.Timeout) cfg.Client = &nr.client cfg.ErrorLogger = func(e map[string]interface{}) { var errorString string @@ -161,7 +161,7 @@ func (nr *NewRelic) Write(metrics []telegraf.Metric) error { func init() { outputs.Add("newrelic", func() telegraf.Output { return &NewRelic{ - Timeout: internal.Duration{Duration: time.Second * 15}, + Timeout: config.Duration(time.Second * 15), } }) } diff --git a/plugins/outputs/newrelic/newrelic_test.go b/plugins/outputs/newrelic/newrelic_test.go index 2d679bf3cecbc..7071176fcfae8 100644 --- a/plugins/outputs/newrelic/newrelic_test.go +++ b/plugins/outputs/newrelic/newrelic_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry" "github.com/stretchr/testify/assert" @@ -17,7 +17,7 @@ func TestBasic(t *testing.T) { nr := &NewRelic{ MetricPrefix: "Test", InsightsKey: "12345", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } if testing.Short() { t.Skip("skipping test in short mode.") @@ -161,7 +161,7 @@ func TestNewRelic_Connect(t *testing.T) { name: "Test: Insights key and Timeout", newrelic: &NewRelic{ InsightsKey: "12312133", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), }, wantErr: false, }, diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index b9ef7c3a6eb47..a37404f268056 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" @@ -23,7 +24,7 @@ import ( var ( defaultListen = ":9273" defaultPath = "/metrics" - defaultExpirationInterval = internal.Duration{Duration: 60 * time.Second} + defaultExpirationInterval = config.Duration(60 * time.Second) ) var sampleConfig = ` @@ -79,16 +80,16 @@ type Collector interface { } type PrometheusClient struct { - Listen string `toml:"listen"` - MetricVersion int `toml:"metric_version"` - BasicUsername string `toml:"basic_username"` - BasicPassword string `toml:"basic_password"` - IPRange []string `toml:"ip_range"` - ExpirationInterval internal.Duration `toml:"expiration_interval"` - Path string `toml:"path"` - CollectorsExclude []string `toml:"collectors_exclude"` - StringAsLabel bool `toml:"string_as_label"` - ExportTimestamp bool `toml:"export_timestamp"` + Listen string `toml:"listen"` + MetricVersion int `toml:"metric_version"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + IPRange []string `toml:"ip_range"` + ExpirationInterval config.Duration `toml:"expiration_interval"` + Path string `toml:"path"` + CollectorsExclude []string `toml:"collectors_exclude"` + StringAsLabel bool `toml:"string_as_label"` + ExportTimestamp bool `toml:"export_timestamp"` tlsint.ServerConfig Log telegraf.Logger `toml:"-"` @@ -133,13 +134,13 @@ func (p *PrometheusClient) Init() error { fallthrough case 1: p.Log.Warnf("Use of deprecated configuration: metric_version = 1; please update to metric_version = 2") - p.collector = v1.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel, p.Log) + p.collector = v1.NewCollector(time.Duration(p.ExpirationInterval), p.StringAsLabel, p.Log) err := registry.Register(p.collector) if err != nil { return err } case 2: - p.collector = v2.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel, p.ExportTimestamp) + p.collector = v2.NewCollector(time.Duration(p.ExpirationInterval), p.StringAsLabel, p.ExportTimestamp) err := registry.Register(p.collector) if err != nil { return err diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go index 9c202d8cf9af3..bad1e44a0c1a1 100644 --- a/plugins/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -10,21 +10,21 @@ import ( "github.com/amir/raidman" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" ) type Riemann struct { - URL string `toml:"url"` - TTL float32 `toml:"ttl"` - Separator string `toml:"separator"` - MeasurementAsAttribute bool `toml:"measurement_as_attribute"` - StringAsState bool `toml:"string_as_state"` - TagKeys []string `toml:"tag_keys"` - Tags []string `toml:"tags"` - DescriptionText string `toml:"description_text"` - Timeout internal.Duration `toml:"timeout"` - Log telegraf.Logger `toml:"-"` + URL string `toml:"url"` + TTL float32 `toml:"ttl"` + Separator string `toml:"separator"` + MeasurementAsAttribute bool `toml:"measurement_as_attribute"` + StringAsState bool `toml:"string_as_state"` + TagKeys []string `toml:"tag_keys"` + Tags []string `toml:"tags"` + DescriptionText string `toml:"description_text"` + Timeout config.Duration `toml:"timeout"` + Log telegraf.Logger `toml:"-"` client *raidman.Client } @@ -68,7 +68,7 @@ func (r *Riemann) Connect() error { return err } - client, err := raidman.DialWithTimeout(parsedURL.Scheme, parsedURL.Host, r.Timeout.Duration) + client, err := raidman.DialWithTimeout(parsedURL.Scheme, parsedURL.Host, time.Duration(r.Timeout)) if err != nil { r.client = nil return err @@ -217,7 +217,7 @@ func (r *Riemann) tags(tags map[string]string) []string { func init() { outputs.Add("riemann", func() telegraf.Output { return &Riemann{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/outputs/socket_writer/socket_writer.go b/plugins/outputs/socket_writer/socket_writer.go index dae7edc0e806f..2546faa6779d7 100644 --- a/plugins/outputs/socket_writer/socket_writer.go +++ b/plugins/outputs/socket_writer/socket_writer.go @@ -6,8 +6,10 @@ import ( "log" "net" "strings" + "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" @@ -17,7 +19,7 @@ import ( type SocketWriter struct { ContentEncoding string `toml:"content_encoding"` Address string - KeepAlivePeriod *internal.Duration + KeepAlivePeriod *config.Duration tlsint.ClientConfig serializers.Serializer @@ -117,13 +119,13 @@ func (sw *SocketWriter) setKeepAlive(c net.Conn) error { if !ok { return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(sw.Address, "://", 2)[0]) } - if sw.KeepAlivePeriod.Duration == 0 { + if *sw.KeepAlivePeriod == 0 { return tcpc.SetKeepAlive(false) } if err := tcpc.SetKeepAlive(true); err != nil { return err } - return tcpc.SetKeepAlivePeriod(sw.KeepAlivePeriod.Duration) + return tcpc.SetKeepAlivePeriod(time.Duration(*sw.KeepAlivePeriod)) } // Write writes the given metrics to the destination. diff --git a/plugins/outputs/sumologic/sumologic.go b/plugins/outputs/sumologic/sumologic.go index 22b64a8e6a114..088210b9d1ff9 100644 --- a/plugins/outputs/sumologic/sumologic.go +++ b/plugins/outputs/sumologic/sumologic.go @@ -27,7 +27,7 @@ const ( ## Data format to be used for sending metrics. ## This will set the "Content-Type" header accordingly. - ## Currently supported formats: + ## Currently supported formats: ## * graphite - for Content-Type of application/vnd.sumologic.graphite ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2 ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus @@ -42,7 +42,7 @@ const ( ## Timeout used for HTTP request # timeout = "5s" - + ## Max HTTP request body size in bytes before compression (if applied). ## By default 1MB is recommended. ## NOTE: @@ -92,9 +92,9 @@ const ( ) type SumoLogic struct { - URL string `toml:"url"` - Timeout internal.Duration `toml:"timeout"` - MaxRequstBodySize config.Size `toml:"max_request_body_size"` + URL string `toml:"url"` + Timeout config.Duration `toml:"timeout"` + MaxRequstBodySize config.Size `toml:"max_request_body_size"` SourceName string `toml:"source_name"` SourceHost string `toml:"source_host"` @@ -143,7 +143,7 @@ func (s *SumoLogic) createClient() *http.Client { Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: s.Timeout.Duration, + Timeout: time.Duration(s.Timeout), } } @@ -152,8 +152,8 @@ func (s *SumoLogic) Connect() error { return errors.Wrap(s.err, "sumologic: incorrect configuration") } - if s.Timeout.Duration == 0 { - s.Timeout.Duration = defaultClientTimeout + if s.Timeout == 0 { + s.Timeout = config.Duration(defaultClientTimeout) } s.client = s.createClient() @@ -329,9 +329,7 @@ func setHeaderIfSetInConfig(r *http.Request, h header, value string) { func Default() *SumoLogic { return &SumoLogic{ - Timeout: internal.Duration{ - Duration: defaultClientTimeout, - }, + Timeout: config.Duration(defaultClientTimeout), MaxRequstBodySize: defaultMaxRequestBodySize, headers: make(map[string]string), } diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go index 1b46d02e210b0..39f1f6ec5817d 100644 --- a/plugins/outputs/syslog/syslog.go +++ b/plugins/outputs/syslog/syslog.go @@ -7,11 +7,12 @@ import ( "net" "strconv" "strings" + "time" "github.com/influxdata/go-syslog/v2/nontransparent" "github.com/influxdata/go-syslog/v2/rfc5424" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" @@ -19,7 +20,7 @@ import ( type Syslog struct { Address string - KeepAlivePeriod *internal.Duration + KeepAlivePeriod *config.Duration DefaultSdid string DefaultSeverityCode uint8 DefaultFacilityCode uint8 @@ -149,13 +150,13 @@ func (s *Syslog) setKeepAlive(c net.Conn) error { if !ok { return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(s.Address, "://", 2)[0]) } - if s.KeepAlivePeriod.Duration == 0 { + if *s.KeepAlivePeriod == 0 { return tcpc.SetKeepAlive(false) } if err := tcpc.SetKeepAlive(true); err != nil { return err } - return tcpc.SetKeepAlivePeriod(s.KeepAlivePeriod.Duration) + return tcpc.SetKeepAlivePeriod(time.Duration(*s.KeepAlivePeriod)) } func (s *Syslog) Close() error { diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index 32018329f0984..7826047d7873d 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -14,7 +14,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -25,12 +25,12 @@ const ( // Warp10 output plugin type Warp10 struct { - Prefix string `toml:"prefix"` - WarpURL string `toml:"warp_url"` - Token string `toml:"token"` - Timeout internal.Duration `toml:"timeout"` - PrintErrorBody bool `toml:"print_error_body"` - MaxStringErrorSize int `toml:"max_string_error_size"` + Prefix string `toml:"prefix"` + WarpURL string `toml:"warp_url"` + Token string `toml:"token"` + Timeout config.Duration `toml:"timeout"` + PrintErrorBody bool `toml:"print_error_body"` + MaxStringErrorSize int `toml:"max_string_error_size"` client *http.Client tls.ClientConfig } @@ -76,8 +76,8 @@ func (w *Warp10) createClient() (*http.Client, error) { return nil, err } - if w.Timeout.Duration == 0 { - w.Timeout.Duration = defaultClientTimeout + if w.Timeout == 0 { + w.Timeout = config.Duration(defaultClientTimeout) } client := &http.Client{ @@ -85,7 +85,7 @@ func (w *Warp10) createClient() (*http.Client, error) { TLSClientConfig: tlsCfg, Proxy: http.ProxyFromEnvironment, }, - Timeout: w.Timeout.Duration, + Timeout: time.Duration(w.Timeout), } return client, nil diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go index b3578be904da0..c6eb9db2ae5b5 100644 --- a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/selfstat" ) @@ -17,9 +17,9 @@ import ( // YandexCloudMonitoring allows publishing of metrics to the Yandex Cloud Monitoring custom metrics // service type YandexCloudMonitoring struct { - Timeout internal.Duration `toml:"timeout"` - EndpointURL string `toml:"endpoint_url"` - Service string `toml:"service"` + Timeout config.Duration `toml:"timeout"` + EndpointURL string `toml:"endpoint_url"` + Service string `toml:"service"` Log telegraf.Logger @@ -86,8 +86,8 @@ func (a *YandexCloudMonitoring) SampleConfig() string { // Connect initializes the plugin and validates connectivity func (a *YandexCloudMonitoring) Connect() error { - if a.Timeout.Duration <= 0 { - a.Timeout.Duration = defaultRequestTimeout + if a.Timeout <= 0 { + a.Timeout = config.Duration(defaultRequestTimeout) } if a.EndpointURL == "" { a.EndpointURL = defaultEndpointURL @@ -106,7 +106,7 @@ func (a *YandexCloudMonitoring) Connect() error { Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: a.Timeout.Duration, + Timeout: time.Duration(a.Timeout), } var err error diff --git a/plugins/processors/date/date.go b/plugins/processors/date/date.go index ef8609811c1f7..b1705826dc912 100644 --- a/plugins/processors/date/date.go +++ b/plugins/processors/date/date.go @@ -5,7 +5,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/processors" ) @@ -37,11 +37,11 @@ const sampleConfig = ` const defaultTimezone = "UTC" type Date struct { - TagKey string `toml:"tag_key"` - FieldKey string `toml:"field_key"` - DateFormat string `toml:"date_format"` - DateOffset internal.Duration `toml:"date_offset"` - Timezone string `toml:"timezone"` + TagKey string `toml:"tag_key"` + FieldKey string `toml:"field_key"` + DateFormat string `toml:"date_format"` + DateOffset config.Duration `toml:"date_offset"` + Timezone string `toml:"timezone"` location *time.Location } @@ -70,7 +70,7 @@ func (d *Date) Init() error { func (d *Date) Apply(in ...telegraf.Metric) []telegraf.Metric { for _, point := range in { - tm := point.Time().In(d.location).Add(d.DateOffset.Duration) + tm := point.Time().In(d.location).Add(time.Duration(d.DateOffset)) if len(d.TagKey) > 0 { point.AddTag(d.TagKey, tm.Format(d.DateFormat)) } else if len(d.FieldKey) > 0 { diff --git a/plugins/processors/date/date_test.go b/plugins/processors/date/date_test.go index 9bafc0654adff..c6d98051e7dec 100644 --- a/plugins/processors/date/date_test.go +++ b/plugins/processors/date/date_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -165,7 +165,7 @@ func TestDateOffset(t *testing.T) { plugin := &Date{ TagKey: "hour", DateFormat: "15", - DateOffset: internal.Duration{Duration: 2 * time.Hour}, + DateOffset: config.Duration(2 * time.Hour), } err := plugin.Init() diff --git a/plugins/processors/dedup/dedup.go b/plugins/processors/dedup/dedup.go index 3823b393e27fd..1ffe183256a1c 100644 --- a/plugins/processors/dedup/dedup.go +++ b/plugins/processors/dedup/dedup.go @@ -4,7 +4,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/processors" ) @@ -14,7 +14,7 @@ var sampleConfig = ` ` type Dedup struct { - DedupInterval internal.Duration `toml:"dedup_interval"` + DedupInterval config.Duration `toml:"dedup_interval"` FlushTime time.Time Cache map[uint64]telegraf.Metric } @@ -36,13 +36,13 @@ func remove(slice []telegraf.Metric, i int) []telegraf.Metric { // Remove expired items from cache func (d *Dedup) cleanup() { // No need to cleanup cache too often. Lets save some CPU - if time.Since(d.FlushTime) < d.DedupInterval.Duration { + if time.Since(d.FlushTime) < time.Duration(d.DedupInterval) { return } d.FlushTime = time.Now() keep := make(map[uint64]telegraf.Metric) for id, metric := range d.Cache { - if time.Since(metric.Time()) < d.DedupInterval.Duration { + if time.Since(metric.Time()) < time.Duration(d.DedupInterval) { keep[id] = metric } } @@ -68,7 +68,7 @@ func (d *Dedup) Apply(metrics ...telegraf.Metric) []telegraf.Metric { } // If cache item has expired then refresh it - if time.Since(m.Time()) >= d.DedupInterval.Duration { + if time.Since(m.Time()) >= time.Duration(d.DedupInterval) { d.save(metric, id) continue } @@ -120,7 +120,7 @@ func (d *Dedup) Apply(metrics ...telegraf.Metric) []telegraf.Metric { func init() { processors.Add("dedup", func() telegraf.Processor { return &Dedup{ - DedupInterval: internal.Duration{Duration: 10 * time.Minute}, + DedupInterval: config.Duration(10 * time.Minute), FlushTime: time.Now(), Cache: make(map[uint64]telegraf.Metric), } diff --git a/plugins/processors/dedup/dedup_test.go b/plugins/processors/dedup/dedup_test.go index b2fc9ca1fc546..80dde9057b0da 100644 --- a/plugins/processors/dedup/dedup_test.go +++ b/plugins/processors/dedup/dedup_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" ) @@ -24,7 +24,7 @@ func createMetric(value int64, when time.Time) telegraf.Metric { func createDedup(initTime time.Time) Dedup { return Dedup{ - DedupInterval: internal.Duration{Duration: 10 * time.Minute}, + DedupInterval: config.Duration(10 * time.Minute), FlushTime: initTime, Cache: make(map[uint64]telegraf.Metric), } diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index 52a9161b0f6fe..714578779a7a0 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -8,7 +8,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/common/parallel" si "github.com/influxdata/telegraf/plugins/inputs/snmp" @@ -338,7 +337,7 @@ func init() { ClientConfig: snmp.ClientConfig{ Retries: 3, MaxRepetitions: 10, - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), Version: 2, Community: "public", }, diff --git a/plugins/processors/ifname/ifname_test.go b/plugins/processors/ifname/ifname_test.go index cea03cfd3fe62..4052818f7509b 100644 --- a/plugins/processors/ifname/ifname_test.go +++ b/plugins/processors/ifname/ifname_test.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/snmp" si "github.com/influxdata/telegraf/plugins/inputs/snmp" "github.com/influxdata/telegraf/testutil" @@ -25,7 +24,7 @@ func TestTable(t *testing.T) { config := snmp.ClientConfig{ Version: 2, - Timeout: internal.Duration{Duration: 5 * time.Second}, // Doesn't work with 0 timeout + Timeout: config.Duration(5 * time.Second), // Doesn't work with 0 timeout } gs, err := snmp.NewWrapper(config) require.NoError(t, err) @@ -51,7 +50,7 @@ func TestIfNameIntegration(t *testing.T) { CacheSize: 1000, ClientConfig: snmp.ClientConfig{ Version: 2, - Timeout: internal.Duration{Duration: 5 * time.Second}, // Doesn't work with 0 timeout + Timeout: config.Duration(5 * time.Second), // Doesn't work with 0 timeout }, } err := d.Init() diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index 28a2cb7fc2498..d94d452ace262 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -7,23 +7,23 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/processors" ) type TopK struct { - Period internal.Duration `toml:"period"` - K int `toml:"k"` - GroupBy []string `toml:"group_by"` - Fields []string `toml:"fields"` - Aggregation string `toml:"aggregation"` - Bottomk bool `toml:"bottomk"` - AddGroupByTag string `toml:"add_groupby_tag"` - AddRankFields []string `toml:"add_rank_fields"` - AddAggregateFields []string `toml:"add_aggregate_fields"` - Log telegraf.Logger `toml:"-"` + Period config.Duration `toml:"period"` + K int `toml:"k"` + GroupBy []string `toml:"group_by"` + Fields []string `toml:"fields"` + Aggregation string `toml:"aggregation"` + Bottomk bool `toml:"bottomk"` + AddGroupByTag string `toml:"add_groupby_tag"` + AddRankFields []string `toml:"add_rank_fields"` + AddAggregateFields []string `toml:"add_aggregate_fields"` + Log telegraf.Logger `toml:"-"` cache map[string][]telegraf.Metric tagsGlobs filter.Filter @@ -37,7 +37,7 @@ func New() *TopK { topk := TopK{} // Setup defaults - topk.Period = internal.Duration{Duration: time.Second * time.Duration(10)} + topk.Period = config.Duration(time.Second * time.Duration(10)) topk.K = 10 topk.Fields = []string{"value"} topk.Aggregation = "mean" @@ -231,7 +231,7 @@ func (t *TopK) Apply(in ...telegraf.Metric) []telegraf.Metric { // If enough time has passed elapsed := time.Since(t.lastAggregation) - if elapsed >= t.Period.Duration { + if elapsed >= time.Duration(t.Period) { return t.push() } diff --git a/plugins/processors/topk/topk_test.go b/plugins/processors/topk/topk_test.go index 9df10c761eddf..27e18e34a564e 100644 --- a/plugins/processors/topk/topk_test.go +++ b/plugins/processors/topk/topk_test.go @@ -5,11 +5,11 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) -var oneSecondDuration = internal.Duration{Duration: time.Second} +var oneSecondDuration = config.Duration(time.Second) // Key, value pair that represents a telegraf.Metric Field type field struct { @@ -122,7 +122,7 @@ func equalSets(l1 []telegraf.Metric, l2 []telegraf.Metric) bool { func runAndCompare(topk *TopK, metrics []telegraf.Metric, answer []telegraf.Metric, testID string, t *testing.T) { // Sleep for `period`, otherwise the processor will only // cache the metrics, but it will not process them - time.Sleep(topk.Period.Duration) + time.Sleep(time.Duration(topk.Period)) // Run the processor ret := topk.Apply(metrics...) From 114e4c2f68baa1d8998324eace116f01be40446b Mon Sep 17 00:00:00 2001 From: R290 <46033588+R290@users.noreply.github.com> Date: Fri, 9 Apr 2021 20:23:32 +0200 Subject: [PATCH 364/761] Fix opcua integration test (#9107) --- docker-compose.yml | 4 ++++ plugins/inputs/opcua/opcua_client_test.go | 6 ++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 1da9d2a3b0de6..3c929f656b7de 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -78,6 +78,10 @@ services: image: ncarlier/mqtt ports: - "1883:1883" + opcua: + image: open62541/open62541 + ports: + - "4840:4840" riemann: image: stealthly/docker-riemann ports: diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go index f13e5ba9a1ee4..ffa8521dd05a8 100644 --- a/plugins/inputs/opcua/opcua_client_test.go +++ b/plugins/inputs/opcua/opcua_client_test.go @@ -20,7 +20,9 @@ type OPCTags struct { } func TestClient1Integration(t *testing.T) { - t.Skip("Skipping due to dial tcp 195.254.227.245:4840: connect: connection refused") + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } var testopctags = []OPCTags{ {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, @@ -32,7 +34,7 @@ func TestClient1Integration(t *testing.T) { var err error o.MetricName = "testing" - o.Endpoint = "opc.tcp://opcua.rocks:4840" + o.Endpoint = "opc.tcp://localhost:4840" o.AuthMethod = "Anonymous" o.ConnectTimeout = config.Duration(10 * time.Second) o.RequestTimeout = config.Duration(1 * time.Second) From 411df7d76317f5e9a17da099d2bccd6bd6d4b687 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 13 Apr 2021 08:04:10 -0700 Subject: [PATCH 365/761] update promremotewrite readme make influxdb 1.x alignment a header --- plugins/parsers/prometheusremotewrite/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/parsers/prometheusremotewrite/README.md b/plugins/parsers/prometheusremotewrite/README.md index b523174e9184a..213bb601529de 100644 --- a/plugins/parsers/prometheusremotewrite/README.md +++ b/plugins/parsers/prometheusremotewrite/README.md @@ -41,6 +41,6 @@ prompb.WriteRequest{ prometheus_remote_write,instance=localhost:9090,job=prometheus,quantile=0.99 go_gc_duration_seconds=4.63 1614889298859000000 ``` -**For alignment with the [InfluxDB v1.x Prometheus Remote Write Spec](https://docs.influxdata.com/influxdb/v1.8/supported_protocols/prometheus/#how-prometheus-metrics-are-parsed-in-influxdb)** +## For alignment with the [InfluxDB v1.x Prometheus Remote Write Spec](https://docs.influxdata.com/influxdb/v1.8/supported_protocols/prometheus/#how-prometheus-metrics-are-parsed-in-influxdb) -- Use the [Starlark processor rename prometheus remote write script](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star) to rename the measurement name to the fieldname and rename the fieldname to value. \ No newline at end of file +- Use the [Starlark processor rename prometheus remote write script](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star) to rename the measurement name to the fieldname and rename the fieldname to value. From 842a7880229a719a5419cc6fc443fb1bef6697c3 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Tue, 13 Apr 2021 14:40:03 -0400 Subject: [PATCH 366/761] Remove error return type from metric.New method (#9116) * Remove error return type from metric.New method. * Formatting changes for linter + gofmt * Additional linter fixes. * More linter fixes. * Linter fix. * address comments --- agent/accumulator.go | 5 +- metric/metric.go | 4 +- metric/metric_test.go | 27 +- metric/series_grouper.go | 16 +- metric/series_grouper_test.go | 2 +- metric/tracking_test.go | 5 +- models/buffer_test.go | 5 +- models/filter_test.go | 21 +- models/running_input_test.go | 42 +- .../aggregators/basicstats/basicstats_test.go | 12 +- .../aggregators/derivative/derivative_test.go | 22 +- plugins/aggregators/final/final_test.go | 20 +- .../aggregators/histogram/histogram_test.go | 6 +- plugins/aggregators/merge/merge_test.go | 4 +- plugins/aggregators/minmax/minmax_test.go | 4 +- .../valuecounter/valuecounter_test.go | 4 +- plugins/common/parallel/parallel_test.go | 10 +- plugins/common/shim/output_test.go | 2 +- plugins/common/shim/processor_test.go | 2 +- plugins/inputs/execd/execd_test.go | 4 +- plugins/inputs/prometheus/parser.go | 6 +- .../riemann_listener/riemann_listener.go | 8 +- plugins/inputs/sflow/metricencoder.go | 5 +- .../webhooks/github/github_webhooks_models.go | 42 +- .../application_insights_test.go | 28 +- .../outputs/azure_monitor/azure_monitor.go | 6 +- plugins/outputs/cloudwatch/cloudwatch_test.go | 6 +- plugins/outputs/cratedb/cratedb_test.go | 3 +- plugins/outputs/dynatrace/dynatrace_test.go | 23 +- plugins/outputs/execd/execd_test.go | 3 +- plugins/outputs/graphite/graphite_test.go | 47 +- plugins/outputs/http/http_test.go | 6 +- plugins/outputs/influxdb/http_test.go | 8 +- plugins/outputs/influxdb/influxdb_test.go | 3 +- plugins/outputs/influxdb/udp_test.go | 12 +- .../outputs/instrumental/instrumental_test.go | 12 +- plugins/outputs/kafka/kafka_test.go | 4 +- plugins/outputs/librato/librato_test.go | 10 +- plugins/outputs/riemann/riemann_test.go | 9 +- plugins/outputs/signalfx/signalfx_test.go | 12 +- plugins/outputs/sumologic/sumologic_test.go | 6 +- plugins/outputs/syslog/syslog_mapper_test.go | 14 +- plugins/outputs/syslog/syslog_test.go | 8 +- plugins/outputs/wavefront/wavefront_test.go | 13 +- plugins/parsers/collectd/parser.go | 11 +- plugins/parsers/csv/parser.go | 6 +- plugins/parsers/csv/parser_test.go | 9 +- plugins/parsers/dropwizard/parser.go | 6 +- plugins/parsers/dropwizard/parser_test.go | 67 +- plugins/parsers/form_urlencoded/parser.go | 7 +- plugins/parsers/graphite/parser.go | 2 +- plugins/parsers/graphite/parser_test.go | 23 +- plugins/parsers/grok/parser.go | 4 +- plugins/parsers/influx/handler.go | 5 +- plugins/parsers/influx/parser_test.go | 639 ++++++++--------- plugins/parsers/json/parser.go | 8 +- plugins/parsers/logfmt/parser.go | 5 +- plugins/parsers/logfmt/parser_test.go | 6 +- plugins/parsers/nagios/parser.go | 22 +- plugins/parsers/nagios/parser_test.go | 5 +- plugins/parsers/prometheus/parser.go | 33 +- .../parsers/prometheusremotewrite/parser.go | 5 +- plugins/parsers/value/parser.go | 7 +- plugins/parsers/wavefront/parser.go | 5 +- plugins/parsers/wavefront/parser_test.go | 96 +-- plugins/parsers/xml/parser.go | 2 +- plugins/processors/clone/clone_test.go | 4 +- plugins/processors/date/date_test.go | 2 +- plugins/processors/dedup/dedup_test.go | 10 +- plugins/processors/enum/enum_test.go | 4 +- plugins/processors/execd/execd_test.go | 7 +- plugins/processors/override/override_test.go | 4 +- plugins/processors/parser/parser_test.go | 376 +++++----- plugins/processors/regex/regex_test.go | 4 +- plugins/processors/rename/rename_test.go | 2 +- .../processors/reverse_dns/reversedns_test.go | 2 +- plugins/processors/starlark/builtins.go | 5 +- plugins/processors/strings/strings_test.go | 10 +- .../processors/tag_limit/tag_limit_test.go | 2 +- plugins/processors/topk/test_sets.go | 25 +- plugins/processors/topk/topk.go | 5 +- plugins/serializers/carbon2/carbon2_test.go | 60 +- plugins/serializers/graphite/graphite_test.go | 131 ++-- plugins/serializers/influx/influx_test.go | 659 ++++++++---------- plugins/serializers/influx/reader_test.go | 269 ++++--- plugins/serializers/json/json_test.go | 55 +- plugins/serializers/msgpack/msgpack_test.go | 3 +- .../serializers/nowmetric/nowmetric_test.go | 56 +- .../splunkmetric/splunkmetric_test.go | 137 ++-- .../serializers/wavefront/wavefront_test.go | 29 +- selfstat/selfstat.go | 9 +- testutil/metric.go | 10 +- testutil/metric_test.go | 4 +- testutil/testutil.go | 2 +- 94 files changed, 1433 insertions(+), 1937 deletions(-) diff --git a/agent/accumulator.go b/agent/accumulator.go index 65000fd98a541..3683b6767d47f 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -90,10 +90,7 @@ func (ac *accumulator) addFields( tp telegraf.ValueType, t ...time.Time, ) { - m, err := metric.New(measurement, tags, fields, ac.getTime(t), tp) - if err != nil { - return - } + m := metric.New(measurement, tags, fields, ac.getTime(t), tp) if m := ac.maker.MakeMetric(m); m != nil { ac.metrics <- m } diff --git a/metric/metric.go b/metric/metric.go index d28503b743f89..f8483459a93bf 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -24,7 +24,7 @@ func New( fields map[string]interface{}, tm time.Time, tp ...telegraf.ValueType, -) (telegraf.Metric, error) { +) telegraf.Metric { var vtype telegraf.ValueType if len(tp) > 0 { vtype = tp[0] @@ -60,7 +60,7 @@ func New( } } - return m, nil + return m } // FromMetric returns a deep copy of the metric with any tracking information diff --git a/metric/metric_test.go b/metric/metric_test.go index b85f0c89ffdeb..d4d1cb11bb8ed 100644 --- a/metric/metric_test.go +++ b/metric/metric_test.go @@ -20,8 +20,7 @@ func TestNewMetric(t *testing.T) { "usage_idle": float64(99), "usage_busy": float64(1), } - m, err := New("cpu", tags, fields, now) - require.NoError(t, err) + m := New("cpu", tags, fields, now) require.Equal(t, "cpu", m.Name()) require.Equal(t, tags, m.Tags()) @@ -38,10 +37,7 @@ func baseMetric() telegraf.Metric { } now := time.Now() - m, err := New("cpu", tags, fields, now) - if err != nil { - panic(err) - } + m := New("cpu", tags, fields, now) return m } @@ -176,7 +172,7 @@ func TestTagList_Sorted(t *testing.T) { func TestEquals(t *testing.T) { now := time.Now() - m1, err := New("cpu", + m1 := New("cpu", map[string]string{ "host": "localhost", }, @@ -185,9 +181,8 @@ func TestEquals(t *testing.T) { }, now, ) - require.NoError(t, err) - m2, err := New("cpu", + m2 := New("cpu", map[string]string{ "host": "localhost", }, @@ -196,7 +191,6 @@ func TestEquals(t *testing.T) { }, now, ) - require.NoError(t, err) lhs := m1.(*metric) require.Equal(t, lhs, m2) @@ -208,7 +202,7 @@ func TestEquals(t *testing.T) { } func TestHashID(t *testing.T) { - m, _ := New( + m := New( "cpu", map[string]string{ "datacenter": "us-east-1", @@ -241,7 +235,7 @@ func TestHashID(t *testing.T) { } func TestHashID_Consistency(t *testing.T) { - m, _ := New( + m := New( "cpu", map[string]string{ "datacenter": "us-east-1", @@ -255,7 +249,7 @@ func TestHashID_Consistency(t *testing.T) { ) hash := m.HashID() - m2, _ := New( + m2 := New( "cpu", map[string]string{ "datacenter": "us-east-1", @@ -274,7 +268,7 @@ func TestHashID_Consistency(t *testing.T) { } func TestHashID_Delimiting(t *testing.T) { - m1, _ := New( + m1 := New( "cpu", map[string]string{ "a": "x", @@ -286,7 +280,7 @@ func TestHashID_Delimiting(t *testing.T) { }, time.Now(), ) - m2, _ := New( + m2 := New( "cpu", map[string]string{ "a": "xbycz", @@ -328,8 +322,7 @@ func TestValueType(t *testing.T) { fields := map[string]interface{}{ "value": float64(42), } - m, err := New("cpu", tags, fields, now, telegraf.Gauge) - assert.NoError(t, err) + m := New("cpu", tags, fields, now, telegraf.Gauge) assert.Equal(t, telegraf.Gauge, m.Type()) } diff --git a/metric/series_grouper.go b/metric/series_grouper.go index c6ba23793d478..03f110abcb429 100644 --- a/metric/series_grouper.go +++ b/metric/series_grouper.go @@ -50,18 +50,14 @@ func (g *SeriesGrouper) Add( } sort.Slice(taglist, func(i, j int) bool { return taglist[i].Key < taglist[j].Key }) - var err error id := groupID(g.hashSeed, measurement, taglist, tm) - metric := g.metrics[id] - if metric == nil { - metric, err = New(measurement, tags, map[string]interface{}{field: fieldValue}, tm) - if err != nil { - return err - } - g.metrics[id] = metric - g.ordered = append(g.ordered, metric) + m := g.metrics[id] + if m == nil { + m = New(measurement, tags, map[string]interface{}{field: fieldValue}, tm) + g.metrics[id] = m + g.ordered = append(g.ordered, m) } else { - metric.AddField(field, fieldValue) + m.AddField(field, fieldValue) } return nil } diff --git a/metric/series_grouper_test.go b/metric/series_grouper_test.go index 32fbecb6e41b2..eee338a41d130 100644 --- a/metric/series_grouper_test.go +++ b/metric/series_grouper_test.go @@ -6,7 +6,7 @@ import ( "time" ) -var m, _ = New( +var m = New( "mymetric", map[string]string{ "host": "host.example.com", diff --git a/metric/tracking_test.go b/metric/tracking_test.go index 3464ea15f2ecb..4d89a32c18623 100644 --- a/metric/tracking_test.go +++ b/metric/tracking_test.go @@ -16,10 +16,7 @@ func mustMetric( tm time.Time, tp ...telegraf.ValueType, ) telegraf.Metric { - m, err := New(name, tags, fields, tm, tp...) - if err != nil { - panic("mustMetric") - } + m := New(name, tags, fields, tm, tp...) return m } diff --git a/models/buffer_test.go b/models/buffer_test.go index 9aef94fb86585..d830ac91c6dd9 100644 --- a/models/buffer_test.go +++ b/models/buffer_test.go @@ -34,7 +34,7 @@ func Metric() telegraf.Metric { } func MetricTime(sec int64) telegraf.Metric { - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -42,9 +42,6 @@ func MetricTime(sec int64) telegraf.Metric { }, time.Unix(sec, 0), ) - if err != nil { - panic(err) - } return m } diff --git a/models/filter_test.go b/models/filter_test.go index 7e82ba0007240..aa32e095163c4 100644 --- a/models/filter_test.go +++ b/models/filter_test.go @@ -15,11 +15,10 @@ func TestFilter_ApplyEmpty(t *testing.T) { require.NoError(t, f.Compile()) require.False(t, f.IsActive()) - m, err := metric.New("m", + m := metric.New("m", map[string]string{}, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) require.True(t, f.Select(m)) } @@ -37,11 +36,10 @@ func TestFilter_ApplyTagsDontPass(t *testing.T) { require.NoError(t, f.Compile()) require.True(t, f.IsActive()) - m, err := metric.New("m", + m := metric.New("m", map[string]string{"cpu": "cpu-total"}, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) require.False(t, f.Select(m)) } @@ -53,14 +51,13 @@ func TestFilter_ApplyDeleteFields(t *testing.T) { require.NoError(t, f.Compile()) require.True(t, f.IsActive()) - m, err := metric.New("m", + m := metric.New("m", map[string]string{}, map[string]interface{}{ "value": int64(1), "value2": int64(2), }, time.Now()) - require.NoError(t, err) require.True(t, f.Select(m)) f.Modify(m) require.Equal(t, map[string]interface{}{"value2": int64(2)}, m.Fields()) @@ -74,14 +71,13 @@ func TestFilter_ApplyDeleteAllFields(t *testing.T) { require.NoError(t, f.Compile()) require.True(t, f.IsActive()) - m, err := metric.New("m", + m := metric.New("m", map[string]string{}, map[string]interface{}{ "value": int64(1), "value2": int64(2), }, time.Now()) - require.NoError(t, err) require.True(t, f.Select(m)) f.Modify(m) require.Len(t, m.FieldList(), 0) @@ -332,14 +328,13 @@ func TestFilter_TagDrop(t *testing.T) { } func TestFilter_FilterTagsNoMatches(t *testing.T) { - m, err := metric.New("m", + m := metric.New("m", map[string]string{ "host": "localhost", "mytag": "foobar", }, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) f := Filter{ TagExclude: []string{"nomatch"}, } @@ -361,14 +356,13 @@ func TestFilter_FilterTagsNoMatches(t *testing.T) { } func TestFilter_FilterTagsMatches(t *testing.T) { - m, err := metric.New("m", + m := metric.New("m", map[string]string{ "host": "localhost", "mytag": "foobar", }, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) f := Filter{ TagExclude: []string{"ho*"}, } @@ -379,14 +373,13 @@ func TestFilter_FilterTagsMatches(t *testing.T) { "mytag": "foobar", }, m.Tags()) - m, err = metric.New("m", + m = metric.New("m", map[string]string{ "host": "localhost", "mytag": "foobar", }, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) f = Filter{ TagInclude: []string{"my*"}, } diff --git a/models/running_input_test.go b/models/running_input_test.go index 5c639e6929ef3..8f9390f53b730 100644 --- a/models/running_input_test.go +++ b/models/running_input_test.go @@ -23,17 +23,16 @@ func TestMakeMetricFilterAfterApplyingGlobalTags(t *testing.T) { require.NoError(t, ri.Config.Filter.Compile()) ri.SetDefaultTags(map[string]string{"a": "x", "b": "y"}) - m, err := metric.New("cpu", + m := metric.New("cpu", map[string]string{}, map[string]interface{}{ "value": 42, }, now) - require.NoError(t, err) actual := ri.MakeMetric(m) - expected, err := metric.New("cpu", + expected := metric.New("cpu", map[string]string{ "b": "y", }, @@ -41,7 +40,6 @@ func TestMakeMetricFilterAfterApplyingGlobalTags(t *testing.T) { "value": 42, }, now) - require.NoError(t, err) testutil.RequireMetricEqual(t, expected, actual) } @@ -52,13 +50,12 @@ func TestMakeMetricNoFields(t *testing.T) { Name: "TestRunningInput", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{}, now, telegraf.Untyped) m = ri.MakeMetric(m) - require.NoError(t, err) assert.Nil(t, m) } @@ -69,7 +66,7 @@ func TestMakeMetricNilFields(t *testing.T) { Name: "TestRunningInput", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), @@ -77,17 +74,15 @@ func TestMakeMetricNilFields(t *testing.T) { }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) - expected, err := metric.New("RITest", + expected := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int(101), }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -110,7 +105,7 @@ func TestMakeMetricWithPluginTags(t *testing.T) { telegraf.Untyped) m = ri.MakeMetric(m) - expected, err := metric.New("RITest", + expected := metric.New("RITest", map[string]string{ "foo": "bar", }, @@ -119,7 +114,6 @@ func TestMakeMetricWithPluginTags(t *testing.T) { }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -135,7 +129,7 @@ func TestMakeMetricFilteredOut(t *testing.T) { assert.NoError(t, ri.Config.Filter.Compile()) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), @@ -143,7 +137,6 @@ func TestMakeMetricFilteredOut(t *testing.T) { now, telegraf.Untyped) m = ri.MakeMetric(m) - require.NoError(t, err) assert.Nil(t, m) } @@ -164,7 +157,7 @@ func TestMakeMetricWithDaemonTags(t *testing.T) { now, telegraf.Untyped) m = ri.MakeMetric(m) - expected, err := metric.New("RITest", + expected := metric.New("RITest", map[string]string{ "foo": "bar", }, @@ -173,7 +166,6 @@ func TestMakeMetricWithDaemonTags(t *testing.T) { }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -184,23 +176,21 @@ func TestMakeMetricNameOverride(t *testing.T) { NameOverride: "foobar", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) - expected, err := metric.New("foobar", + expected := metric.New("foobar", nil, map[string]interface{}{ "value": 101, }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -211,23 +201,21 @@ func TestMakeMetricNamePrefix(t *testing.T) { MeasurementPrefix: "foobar_", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) - expected, err := metric.New("foobar_RITest", + expected := metric.New("foobar_RITest", nil, map[string]interface{}{ "value": 101, }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -238,23 +226,21 @@ func TestMakeMetricNameSuffix(t *testing.T) { MeasurementSuffix: "_foobar", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) - expected, err := metric.New("RITest_foobar", + expected := metric.New("RITest_foobar", nil, map[string]interface{}{ "value": 101, }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go index 8e8ee6da7915a..51ecd5c992442 100644 --- a/plugins/aggregators/basicstats/basicstats_test.go +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" ) -var m1, _ = metric.New("m1", +var m1 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "a": int64(1), @@ -21,7 +21,7 @@ var m1, _ = metric.New("m1", }, time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), ) -var m2, _ = metric.New("m1", +var m2 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "a": int64(1), @@ -326,28 +326,28 @@ func TestBasicStatsWithOnlySum(t *testing.T) { // implementations of sum were calculated from mean and count, which // e.g. summed "1, 1, 5, 1" as "7.999999..." instead of 8. func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) { - var sum1, _ = metric.New("m1", + var sum1 = metric.New("m1", map[string]string{}, map[string]interface{}{ "a": int64(1), }, time.Now(), ) - var sum2, _ = metric.New("m1", + var sum2 = metric.New("m1", map[string]string{}, map[string]interface{}{ "a": int64(1), }, time.Now(), ) - var sum3, _ = metric.New("m1", + var sum3 = metric.New("m1", map[string]string{}, map[string]interface{}{ "a": int64(5), }, time.Now(), ) - var sum4, _ = metric.New("m1", + var sum4 = metric.New("m1", map[string]string{}, map[string]interface{}{ "a": int64(1), diff --git a/plugins/aggregators/derivative/derivative_test.go b/plugins/aggregators/derivative/derivative_test.go index 9a2f34ac7dc20..fb84dae6ff54a 100644 --- a/plugins/aggregators/derivative/derivative_test.go +++ b/plugins/aggregators/derivative/derivative_test.go @@ -8,7 +8,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -var start, _ = metric.New("TestMetric", +var start = metric.New("TestMetric", map[string]string{"state": "full"}, map[string]interface{}{ "increasing": int64(0), @@ -20,7 +20,7 @@ var start, _ = metric.New("TestMetric", time.Now(), ) -var finish, _ = metric.New("TestMetric", +var finish = metric.New("TestMetric", map[string]string{"state": "full"}, map[string]interface{}{ "increasing": int64(1000), @@ -94,14 +94,14 @@ func TestTwoFullEventsWithoutParameter(t *testing.T) { duration, _ := time.ParseDuration("2s") endTime := startTime.Add(duration) - first, _ := metric.New("One Field", + first := metric.New("One Field", map[string]string{}, map[string]interface{}{ "value": int64(10), }, startTime, ) - last, _ := metric.New("One Field", + last := metric.New("One Field", map[string]string{}, map[string]interface{}{ "value": int64(20), @@ -222,7 +222,7 @@ func TestIgnoresMissingVariable(t *testing.T) { derivative.Log = testutil.Logger{} derivative.Init() - noParameter, _ := metric.New("TestMetric", + noParameter := metric.New("TestMetric", map[string]string{"state": "no_parameter"}, map[string]interface{}{ "increasing": int64(100), @@ -265,17 +265,17 @@ func TestMergesDifferenMetricsWithSameHash(t *testing.T) { startTime := time.Now() duration, _ := time.ParseDuration("2s") endTime := startTime.Add(duration) - part1, _ := metric.New("TestMetric", + part1 := metric.New("TestMetric", map[string]string{"state": "full"}, map[string]interface{}{"field1": int64(10)}, startTime, ) - part2, _ := metric.New("TestMetric", + part2 := metric.New("TestMetric", map[string]string{"state": "full"}, map[string]interface{}{"field2": int64(20)}, startTime, ) - final, _ := metric.New("TestMetric", + final := metric.New("TestMetric", map[string]string{"state": "full"}, map[string]interface{}{ "field1": int64(30), @@ -359,7 +359,7 @@ func TestCalculatesCorrectDerivativeOnTwoConsecutivePeriods(t *testing.T) { derivative.Init() startTime := time.Now() - first, _ := metric.New("One Field", + first := metric.New("One Field", map[string]string{}, map[string]interface{}{ "value": int64(10), @@ -370,7 +370,7 @@ func TestCalculatesCorrectDerivativeOnTwoConsecutivePeriods(t *testing.T) { derivative.Push(&acc) derivative.Reset() - second, _ := metric.New("One Field", + second := metric.New("One Field", map[string]string{}, map[string]interface{}{ "value": int64(20), @@ -386,7 +386,7 @@ func TestCalculatesCorrectDerivativeOnTwoConsecutivePeriods(t *testing.T) { }) acc.ClearMetrics() - third, _ := metric.New("One Field", + third := metric.New("One Field", map[string]string{}, map[string]interface{}{ "value": int64(40), diff --git a/plugins/aggregators/final/final_test.go b/plugins/aggregators/final/final_test.go index a4add9a5ce20c..6b0c6e8e38c24 100644 --- a/plugins/aggregators/final/final_test.go +++ b/plugins/aggregators/final/final_test.go @@ -15,15 +15,15 @@ func TestSimple(t *testing.T) { final := NewFinal() tags := map[string]string{"foo": "bar"} - m1, _ := metric.New("m1", + m1 := metric.New("m1", tags, map[string]interface{}{"a": int64(1)}, time.Unix(1530939936, 0)) - m2, _ := metric.New("m1", + m2 := metric.New("m1", tags, map[string]interface{}{"a": int64(2)}, time.Unix(1530939937, 0)) - m3, _ := metric.New("m1", + m3 := metric.New("m1", tags, map[string]interface{}{"a": int64(3)}, time.Unix(1530939938, 0)) @@ -52,15 +52,15 @@ func TestTwoTags(t *testing.T) { tags1 := map[string]string{"foo": "bar"} tags2 := map[string]string{"foo": "baz"} - m1, _ := metric.New("m1", + m1 := metric.New("m1", tags1, map[string]interface{}{"a": int64(1)}, time.Unix(1530939936, 0)) - m2, _ := metric.New("m1", + m2 := metric.New("m1", tags2, map[string]interface{}{"a": int64(2)}, time.Unix(1530939937, 0)) - m3, _ := metric.New("m1", + m3 := metric.New("m1", tags1, map[string]interface{}{"a": int64(3)}, time.Unix(1530939938, 0)) @@ -98,19 +98,19 @@ func TestLongDifference(t *testing.T) { now := time.Now() - m1, _ := metric.New("m", + m1 := metric.New("m", tags, map[string]interface{}{"a": int64(1)}, now.Add(time.Second*-290)) - m2, _ := metric.New("m", + m2 := metric.New("m", tags, map[string]interface{}{"a": int64(2)}, now.Add(time.Second*-275)) - m3, _ := metric.New("m", + m3 := metric.New("m", tags, map[string]interface{}{"a": int64(3)}, now.Add(time.Second*-100)) - m4, _ := metric.New("m", + m4 := metric.New("m", tags, map[string]interface{}{"a": int64(4)}, now.Add(time.Second*-20)) diff --git a/plugins/aggregators/histogram/histogram_test.go b/plugins/aggregators/histogram/histogram_test.go index aa6214b3babab..c2a05cc283c3d 100644 --- a/plugins/aggregators/histogram/histogram_test.go +++ b/plugins/aggregators/histogram/histogram_test.go @@ -25,7 +25,7 @@ func NewTestHistogram(cfg []config, reset bool, cumulative bool) telegraf.Aggreg } // firstMetric1 is the first test metric -var firstMetric1, _ = metric.New( +var firstMetric1 = metric.New( "first_metric_name", tags{}, fields{ @@ -36,7 +36,7 @@ var firstMetric1, _ = metric.New( ) // firstMetric1 is the first test metric with other value -var firstMetric2, _ = metric.New( +var firstMetric2 = metric.New( "first_metric_name", tags{}, fields{ @@ -47,7 +47,7 @@ var firstMetric2, _ = metric.New( ) // secondMetric is the second metric -var secondMetric, _ = metric.New( +var secondMetric = metric.New( "second_metric_name", tags{}, fields{ diff --git a/plugins/aggregators/merge/merge_test.go b/plugins/aggregators/merge/merge_test.go index 552c8618e3482..94e54590b586f 100644 --- a/plugins/aggregators/merge/merge_test.go +++ b/plugins/aggregators/merge/merge_test.go @@ -187,7 +187,7 @@ func TestReset(t *testing.T) { testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) } -var m1, _ = metric.New( +var m1 = metric.New( "mymetric", map[string]string{ "host": "host.example.com", @@ -206,7 +206,7 @@ var m1, _ = metric.New( }, time.Now(), ) -var m2, _ = metric.New( +var m2 = metric.New( "mymetric", map[string]string{ "host": "host.example.com", diff --git a/plugins/aggregators/minmax/minmax_test.go b/plugins/aggregators/minmax/minmax_test.go index e7c3cf4eb2024..7835d95e9c72e 100644 --- a/plugins/aggregators/minmax/minmax_test.go +++ b/plugins/aggregators/minmax/minmax_test.go @@ -8,7 +8,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -var m1, _ = metric.New("m1", +var m1 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "a": int64(1), @@ -24,7 +24,7 @@ var m1, _ = metric.New("m1", }, time.Now(), ) -var m2, _ = metric.New("m1", +var m2 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "a": int64(1), diff --git a/plugins/aggregators/valuecounter/valuecounter_test.go b/plugins/aggregators/valuecounter/valuecounter_test.go index 8cec5f36653c4..75aa6deb01bf4 100644 --- a/plugins/aggregators/valuecounter/valuecounter_test.go +++ b/plugins/aggregators/valuecounter/valuecounter_test.go @@ -19,7 +19,7 @@ func NewTestValueCounter(fields []string) telegraf.Aggregator { return vc } -var m1, _ = metric.New("m1", +var m1 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "status": 200, @@ -28,7 +28,7 @@ var m1, _ = metric.New("m1", time.Now(), ) -var m2, _ = metric.New("m1", +var m2 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "status": "OK", diff --git a/plugins/common/parallel/parallel_test.go b/plugins/common/parallel/parallel_test.go index c24f67e17c79d..1e2eaccb98654 100644 --- a/plugins/common/parallel/parallel_test.go +++ b/plugins/common/parallel/parallel_test.go @@ -18,14 +18,13 @@ func TestOrderedJobsStayOrdered(t *testing.T) { p := parallel.NewOrdered(acc, jobFunc, 10000, 10) now := time.Now() for i := 0; i < 20000; i++ { - m, err := metric.New("test", + m := metric.New("test", map[string]string{}, map[string]interface{}{ "val": i, }, now, ) - require.NoError(t, err) now = now.Add(1) p.Enqueue(m) } @@ -51,14 +50,13 @@ func TestUnorderedJobsDontDropAnyJobs(t *testing.T) { expectedTotal := 0 for i := 0; i < 20000; i++ { expectedTotal += i - m, err := metric.New("test", + m := metric.New("test", map[string]string{}, map[string]interface{}{ "val": i, }, now, ) - require.NoError(t, err) now = now.Add(1) p.Enqueue(m) } @@ -79,7 +77,7 @@ func BenchmarkOrdered(b *testing.B) { p := parallel.NewOrdered(acc, jobFunc, 10000, 10) - m, _ := metric.New("test", + m := metric.New("test", map[string]string{}, map[string]interface{}{ "val": 1, @@ -99,7 +97,7 @@ func BenchmarkUnordered(b *testing.B) { p := parallel.NewUnordered(acc, jobFunc, 10) - m, _ := metric.New("test", + m := metric.New("test", map[string]string{}, map[string]interface{}{ "val": 1, diff --git a/plugins/common/shim/output_test.go b/plugins/common/shim/output_test.go index 5a74d59edb240..468ae28e05eee 100644 --- a/plugins/common/shim/output_test.go +++ b/plugins/common/shim/output_test.go @@ -34,7 +34,7 @@ func TestOutputShim(t *testing.T) { serializer, _ := serializers.NewInfluxSerializer() - m, _ := metric.New("thing", + m := metric.New("thing", map[string]string{ "a": "b", }, diff --git a/plugins/common/shim/processor_test.go b/plugins/common/shim/processor_test.go index b4cf01ae0236f..6126656b8fcc6 100644 --- a/plugins/common/shim/processor_test.go +++ b/plugins/common/shim/processor_test.go @@ -40,7 +40,7 @@ func TestProcessorShim(t *testing.T) { serializer, _ := serializers.NewInfluxSerializer() parser, _ := parsers.NewInfluxParser() - m, _ := metric.New("thing", + m := metric.New("thing", map[string]string{ "a": "b", }, diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index ab67af0ab84cd..e95ed133f9cba 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -170,7 +170,7 @@ func runCounterProgram() { scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { - metric, _ := metric.New("counter", + m := metric.New("counter", map[string]string{}, map[string]interface{}{ "count": i, @@ -179,7 +179,7 @@ func runCounterProgram() { ) i++ - b, err := serializer.Serialize(metric) + b, err := serializer.Serialize(m) if err != nil { fmt.Fprintf(os.Stderr, "ERR %v\n", err) os.Exit(1) diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index 9a4b5a4837643..7d3140dc7d627 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -81,10 +81,8 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { } else { t = now } - metric, err := metric.New(metricName, tags, fields, t, common.ValueType(mf.GetType())) - if err == nil { - metrics = append(metrics, metric) - } + m := metric.New(metricName, tags, fields, t, common.ValueType(mf.GetType())) + metrics = append(metrics, m) } } } diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index b20094176ceb8..24bdd11540e1b 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -221,13 +221,7 @@ func (rsl *riemannListener) read(conn net.Conn) { tags["State"] = m.State fieldValues["Metric"] = m.Metric fieldValues["TTL"] = m.TTL.Seconds() - singleMetric, err := metric.New(m.Service, tags, fieldValues, m.Time, telegraf.Untyped) - if err != nil { - rsl.Log.Debugf("Could not create metric for service %s at %s", m.Service, m.Time.String()) - riemannReturnErrorResponse(conn, "Could not create metric") - return - } - + singleMetric := metric.New(m.Service, tags, fieldValues, m.Time, telegraf.Untyped) rsl.AddMetric(singleMetric) } riemannReturnResponse(conn) diff --git a/plugins/inputs/sflow/metricencoder.go b/plugins/inputs/sflow/metricencoder.go index ffc9d8e023849..2dc1fb122b096 100644 --- a/plugins/inputs/sflow/metricencoder.go +++ b/plugins/inputs/sflow/metricencoder.go @@ -34,10 +34,7 @@ func makeMetrics(p *V5Format) ([]telegraf.Metric, error) { for k, v := range fields { fields2[k] = v } - m, err := metric.New("sflow", tags2, fields2, now) - if err != nil { - return nil, err - } + m := metric.New("sflow", tags2, fields2, now) metrics = append(metrics, m) } } diff --git a/plugins/inputs/webhooks/github/github_webhooks_models.go b/plugins/inputs/webhooks/github/github_webhooks_models.go index 88c75526b28cf..497d3f13c575e 100644 --- a/plugins/inputs/webhooks/github/github_webhooks_models.go +++ b/plugins/inputs/webhooks/github/github_webhooks_models.go @@ -106,7 +106,7 @@ func (s CommitCommentEvent) NewMetric() telegraf.Metric { "commit": s.Comment.Commit, "comment": s.Comment.Body, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -133,7 +133,7 @@ func (s CreateEvent) NewMetric() telegraf.Metric { "ref": s.Ref, "refType": s.RefType, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -160,7 +160,7 @@ func (s DeleteEvent) NewMetric() telegraf.Metric { "ref": s.Ref, "refType": s.RefType, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -188,7 +188,7 @@ func (s DeploymentEvent) NewMetric() telegraf.Metric { "environment": s.Deployment.Environment, "description": s.Deployment.Description, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -219,7 +219,7 @@ func (s DeploymentStatusEvent) NewMetric() telegraf.Metric { "depState": s.DeploymentStatus.State, "depDescription": s.DeploymentStatus.Description, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -244,7 +244,7 @@ func (s ForkEvent) NewMetric() telegraf.Metric { "issues": s.Repository.Issues, "fork": s.Forkee.Repository, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -269,7 +269,7 @@ func (s GollumEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -298,7 +298,7 @@ func (s IssueCommentEvent) NewMetric() telegraf.Metric { "comments": s.Issue.Comments, "body": s.Comment.Body, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -327,7 +327,7 @@ func (s IssuesEvent) NewMetric() telegraf.Metric { "title": s.Issue.Title, "comments": s.Issue.Comments, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -353,7 +353,7 @@ func (s MemberEvent) NewMetric() telegraf.Metric { "newMember": s.Member.User, "newMemberStatus": s.Member.Admin, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -376,7 +376,7 @@ func (s MembershipEvent) NewMetric() telegraf.Metric { "newMember": s.Member.User, "newMemberStatus": s.Member.Admin, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -399,7 +399,7 @@ func (s PageBuildEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -422,7 +422,7 @@ func (s PublicEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -456,7 +456,7 @@ func (s PullRequestEvent) NewMetric() telegraf.Metric { "deletions": s.PullRequest.Deletions, "changedFiles": s.PullRequest.ChangedFiles, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -491,7 +491,7 @@ func (s PullRequestReviewCommentEvent) NewMetric() telegraf.Metric { "commentFile": s.Comment.File, "comment": s.Comment.Comment, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -520,7 +520,7 @@ func (s PushEvent) NewMetric() telegraf.Metric { "before": s.Before, "after": s.After, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -545,7 +545,7 @@ func (s ReleaseEvent) NewMetric() telegraf.Metric { "issues": s.Repository.Issues, "tagName": s.Release.TagName, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -568,7 +568,7 @@ func (s RepositoryEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -595,7 +595,7 @@ func (s StatusEvent) NewMetric() telegraf.Metric { "commit": s.Commit, "state": s.State, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -620,7 +620,7 @@ func (s TeamAddEvent) NewMetric() telegraf.Metric { "issues": s.Repository.Issues, "teamName": s.Team.Name, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } @@ -643,6 +643,6 @@ func (s WatchEvent) NewMetric() telegraf.Metric { "forks": s.Repository.Forks, "issues": s.Repository.Issues, } - m, _ := metric.New(meas, t, f, time.Now()) + m := metric.New(meas, t, f, time.Now()) return m } diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go index ce36c141b2b52..c5a5c0a3eebed 100644 --- a/plugins/outputs/application_insights/application_insights_test.go +++ b/plugins/outputs/application_insights/application_insights_test.go @@ -1,11 +1,12 @@ package application_insights import ( - "github.com/influxdata/telegraf/testutil" "math" "testing" "time" + "github.com/influxdata/telegraf/testutil" + "github.com/Microsoft/ApplicationInsights-Go/appinsights" "github.com/influxdata/telegraf" @@ -143,13 +144,12 @@ func TestAggregateMetricCreated(t *testing.T) { transmitter.On("Track", mock.Anything) metricName := "ShouldBeAggregateMetric" - m, err := metric.New( + m := metric.New( metricName, nil, // tags tt.fields, now, ) - assert.NoError(err) ai := ApplicationInsights{ transmitter: transmitter, @@ -157,7 +157,7 @@ func TestAggregateMetricCreated(t *testing.T) { Log: testutil.Logger{}, } - err = ai.Connect() + err := ai.Connect() assert.NoError(err) mSet := []telegraf.Metric{m} @@ -202,13 +202,12 @@ func TestSimpleMetricCreated(t *testing.T) { transmitter.On("Track", mock.Anything) metricName := "ShouldBeSimpleMetric" - m, err := metric.New( + m := metric.New( metricName, nil, // tags tt.fields, now, ) - assert.NoError(err) ai := ApplicationInsights{ transmitter: transmitter, @@ -216,7 +215,7 @@ func TestSimpleMetricCreated(t *testing.T) { Log: testutil.Logger{}, } - err = ai.Connect() + err := ai.Connect() assert.NoError(err) mSet := []telegraf.Metric{m} @@ -273,13 +272,12 @@ func TestTagsAppliedToTelemetry(t *testing.T) { transmitter.On("Track", mock.Anything) metricName := "ShouldBeSimpleMetric" - m, err := metric.New( + m := metric.New( metricName, tt.tags, tt.fields, now, ) - assert.NoError(err) ai := ApplicationInsights{ transmitter: transmitter, @@ -287,7 +285,7 @@ func TestTagsAppliedToTelemetry(t *testing.T) { Log: testutil.Logger{}, } - err = ai.Connect() + err := ai.Connect() assert.NoError(err) mSet := []telegraf.Metric{m} @@ -310,13 +308,12 @@ func TestContextTagsSetOnSimpleTelemetry(t *testing.T) { transmitter := new(mocks.Transmitter) transmitter.On("Track", mock.Anything) - m, err := metric.New( + m := metric.New( "SimpleMetric", map[string]string{"kubernetes_container_name": "atcsvc", "kubernetes_pod_name": "bunkie17554"}, map[string]interface{}{"value": 23.0}, now, ) - assert.NoError(err) ai := ApplicationInsights{ transmitter: transmitter, @@ -329,7 +326,7 @@ func TestContextTagsSetOnSimpleTelemetry(t *testing.T) { Log: testutil.Logger{}, } - err = ai.Connect() + err := ai.Connect() assert.NoError(err) mSet := []telegraf.Metric{m} @@ -348,13 +345,12 @@ func TestContextTagsSetOnAggregateTelemetry(t *testing.T) { transmitter := new(mocks.Transmitter) transmitter.On("Track", mock.Anything) - m, err := metric.New( + m := metric.New( "AggregateMetric", map[string]string{"kubernetes_container_name": "atcsvc", "kubernetes_pod_name": "bunkie17554"}, map[string]interface{}{"value": 23.0, "count": 5}, now, ) - assert.NoError(err) ai := ApplicationInsights{ transmitter: transmitter, @@ -367,7 +363,7 @@ func TestContextTagsSetOnAggregateTelemetry(t *testing.T) { Log: testutil.Logger{}, } - err = ai.Connect() + err := ai.Connect() assert.NoError(err) mSet := []telegraf.Metric{m} diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index 193c9b2c40c4f..e513dbdca23e9 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -599,7 +599,7 @@ func (a *AzureMonitor) Push() []telegraf.Metric { tags[tag.name] = tag.value } - m, err := metric.New(agg.name, + m := metric.New(agg.name, tags, map[string]interface{}{ "min": agg.min, @@ -610,10 +610,6 @@ func (a *AzureMonitor) Push() []telegraf.Metric { tbucket, ) - if err != nil { - a.Log.Errorf("Could not create metric for aggregation %q; discarding point", agg.name) - } - metrics = append(metrics, m) } } diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index f0956689a5685..95987f591830d 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -83,7 +83,7 @@ func TestBuildMetricDatums(t *testing.T) { assert.Equal(0, len(datums), fmt.Sprintf("Valid point should not create a Datum {value: %v}", point)) } - statisticMetric, _ := metric.New( + statisticMetric := metric.New( "test1", map[string]string{"tag1": "value1"}, map[string]interface{}{"value_max": float64(10), "value_min": float64(0), "value_sum": float64(100), "value_count": float64(20)}, @@ -92,7 +92,7 @@ func TestBuildMetricDatums(t *testing.T) { datums := BuildMetricDatum(true, false, statisticMetric) assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", statisticMetric)) - multiFieldsMetric, _ := metric.New( + multiFieldsMetric := metric.New( "test1", map[string]string{"tag1": "value1"}, map[string]interface{}{"valueA": float64(10), "valueB": float64(0), "valueC": float64(100), "valueD": float64(20)}, @@ -101,7 +101,7 @@ func TestBuildMetricDatums(t *testing.T) { datums = BuildMetricDatum(true, false, multiFieldsMetric) assert.Equal(4, len(datums), fmt.Sprintf("Each field should create a Datum {value: %v}", multiFieldsMetric)) - multiStatisticMetric, _ := metric.New( + multiStatisticMetric := metric.New( "test1", map[string]string{"tag1": "value1"}, map[string]interface{}{ diff --git a/plugins/outputs/cratedb/cratedb_test.go b/plugins/outputs/cratedb/cratedb_test.go index 8d7fe1c80ae0a..66a2bfa794cc9 100644 --- a/plugins/outputs/cratedb/cratedb_test.go +++ b/plugins/outputs/cratedb/cratedb_test.go @@ -203,13 +203,12 @@ func Test_hashID(t *testing.T) { } for i, test := range tests { - m, err := metric.New( + m := metric.New( test.Name, test.Tags, test.Fields, time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), ) - require.NoError(t, err) if got := hashID(m); got != test.Want { t.Errorf("test #%d: got=%d want=%d", i, got, test.Want) } diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index 6dc558836a59c..a930a542d3692 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -2,16 +2,17 @@ package dynatrace import ( "encoding/json" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" "io/ioutil" "net/http" "net/http/httptest" "testing" "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestNilMetrics(t *testing.T) { @@ -142,14 +143,14 @@ func TestSendMetric(t *testing.T) { // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1", "nix": "nix"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, @@ -191,7 +192,7 @@ func TestSendSingleMetricWithUnorderedTags(t *testing.T) { // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"a": "test", "c": "test", "b": "test"}, map[string]interface{}{"myfield": float64(3.14)}, @@ -233,7 +234,7 @@ func TestSendMetricWithoutTags(t *testing.T) { // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{}, map[string]interface{}{"myfield": float64(3.14)}, @@ -275,7 +276,7 @@ func TestSendMetricWithUpperCaseTagKeys(t *testing.T) { // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"AAA": "test", "CcC": "test", "B B": "test"}, map[string]interface{}{"myfield": float64(3.14)}, @@ -317,7 +318,7 @@ func TestSendBooleanMetricWithoutTags(t *testing.T) { // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{}, map[string]interface{}{"myfield": bool(true)}, diff --git a/plugins/outputs/execd/execd_test.go b/plugins/outputs/execd/execd_test.go index 46bde795ec2ed..c14339d31a85a 100644 --- a/plugins/outputs/execd/execd_test.go +++ b/plugins/outputs/execd/execd_test.go @@ -55,13 +55,12 @@ func TestExternalOutputWorks(t *testing.T) { wg.Done() } - m, err := metric.New( + m := metric.New( "cpu", map[string]string{"name": "cpu1"}, map[string]interface{}{"idle": 50, "sys": 30}, now, ) - require.NoError(t, err) require.NoError(t, e.Connect()) require.NoError(t, e.Write([]telegraf.Metric{m})) diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go index 38a9691e0b73a..1cb58b19485fc 100644 --- a/plugins/outputs/graphite/graphite_test.go +++ b/plugins/outputs/graphite/graphite_test.go @@ -2,13 +2,14 @@ package graphite import ( "bufio" - "github.com/influxdata/telegraf/testutil" "net" "net/textproto" "sync" "testing" "time" + "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" @@ -24,7 +25,7 @@ func TestGraphiteError(t *testing.T) { Log: testutil.Logger{}, } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"mymeasurement": float64(3.14)}, @@ -56,19 +57,19 @@ func TestGraphiteOK(t *testing.T) { } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, @@ -118,19 +119,19 @@ func TestGraphiteOkWithSeparatorDot(t *testing.T) { } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, @@ -180,19 +181,19 @@ func TestGraphiteOkWithSeparatorUnderscore(t *testing.T) { } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, @@ -246,19 +247,19 @@ func TestGraphiteOKWithMultipleTemplates(t *testing.T) { } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1", "mytag": "valuetag"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1", "mytag": "valuetag"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1", "mytag": "valuetag"}, map[string]interface{}{"value": float64(3.14)}, @@ -308,19 +309,19 @@ func TestGraphiteOkWithTags(t *testing.T) { } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, @@ -371,19 +372,19 @@ func TestGraphiteOkWithTagsAndSeparatorDot(t *testing.T) { } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, @@ -434,19 +435,19 @@ func TestGraphiteOkWithTagsAndSeparatorUnderscore(t *testing.T) { } // Init metrics - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "my_measurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index abcf2db33dabc..a09f7dd7ea28f 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -18,7 +18,7 @@ import ( ) func getMetric() telegraf.Metric { - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -26,9 +26,7 @@ func getMetric() telegraf.Metric { }, time.Unix(0, 0), ) - if err != nil { - panic(err) - } + return m } diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index 2115ad5918a65..2f46e2441e937 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -490,7 +490,7 @@ func TestHTTP_Write(t *testing.T) { ctx := context.Background() - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -498,7 +498,6 @@ func TestHTTP_Write(t *testing.T) { }, time.Unix(0, 0), ) - require.NoError(t, err) metrics := []telegraf.Metric{m} client, err := influxdb.NewHTTPClient(tt.config) @@ -541,7 +540,7 @@ func TestHTTP_WritePathPrefix(t *testing.T) { ctx := context.Background() - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -549,7 +548,6 @@ func TestHTTP_WritePathPrefix(t *testing.T) { }, time.Unix(0, 0), ) - require.NoError(t, err) metrics := []telegraf.Metric{m} config := influxdb.HTTPConfig{ @@ -595,7 +593,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { ctx := context.Background() - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ diff --git a/plugins/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go index 9de246ec7f3f7..d0f50bbfed94f 100644 --- a/plugins/outputs/influxdb/influxdb_test.go +++ b/plugins/outputs/influxdb/influxdb_test.go @@ -200,7 +200,7 @@ func TestWriteRecreateDatabaseIfDatabaseNotFound(t *testing.T) { err := output.Connect() require.NoError(t, err) - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -208,7 +208,6 @@ func TestWriteRecreateDatabaseIfDatabaseNotFound(t *testing.T) { }, time.Unix(0, 0), ) - require.NoError(t, err) metrics := []telegraf.Metric{m} err = output.Write(metrics) diff --git a/plugins/outputs/influxdb/udp_test.go b/plugins/outputs/influxdb/udp_test.go index 1c5696cf10fe5..25e03f72173ee 100644 --- a/plugins/outputs/influxdb/udp_test.go +++ b/plugins/outputs/influxdb/udp_test.go @@ -23,7 +23,7 @@ var ( ) func getMetric() telegraf.Metric { - metric, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -31,10 +31,8 @@ func getMetric() telegraf.Metric { }, time.Unix(0, 0), ) - if err != nil { - panic(err) - } - return metric + + return m } func getURL() *url.URL { @@ -202,7 +200,7 @@ func TestUDP_ErrorLogging(t *testing.T) { }, metrics: []telegraf.Metric{ func() telegraf.Metric { - metric, _ := metric.New( + m := metric.New( "cpu", map[string]string{ "host": "example.org", @@ -210,7 +208,7 @@ func TestUDP_ErrorLogging(t *testing.T) { map[string]interface{}{}, time.Unix(0, 0), ) - return metric + return m }(), }, logContains: `could not serialize metric: "cpu,host=example.org": no serializable fields`, diff --git a/plugins/outputs/instrumental/instrumental_test.go b/plugins/outputs/instrumental/instrumental_test.go index 6752069bb226b..f72b9e90f0806 100644 --- a/plugins/outputs/instrumental/instrumental_test.go +++ b/plugins/outputs/instrumental/instrumental_test.go @@ -25,13 +25,13 @@ func TestWrite(t *testing.T) { } // Default to gauge - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1", "metric_type": "set"}, map[string]interface{}{"value": float64(3.14)}, @@ -42,27 +42,27 @@ func TestWrite(t *testing.T) { i.Write(metrics) // Counter and Histogram are increments - m3, _ := metric.New( + m3 := metric.New( "my_histogram", map[string]string{"host": "192.168.0.1", "metric_type": "histogram"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) // We will modify metric names that won't be accepted by Instrumental - m4, _ := metric.New( + m4 := metric.New( "bad_metric_name", map[string]string{"host": "192.168.0.1:8888::123", "metric_type": "counter"}, map[string]interface{}{"value": 1}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) // We will drop metric values that won't be accepted by Instrumental - m5, _ := metric.New( + m5 := metric.New( "bad_values", map[string]string{"host": "192.168.0.1", "metric_type": "counter"}, map[string]interface{}{"value": "\" 3:30\""}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m6, _ := metric.New( + m6 := metric.New( "my_counter", map[string]string{"host": "192.168.0.1", "metric_type": "counter"}, map[string]interface{}{"value": float64(3.14)}, diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index 2d786013c9a24..0edaed31f41f3 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -117,7 +117,7 @@ func TestRoutingKey(t *testing.T) { RoutingKey: "static", }, metric: func() telegraf.Metric { - m, _ := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -137,7 +137,7 @@ func TestRoutingKey(t *testing.T) { RoutingKey: "random", }, metric: func() telegraf.Metric { - m, _ := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ diff --git a/plugins/outputs/librato/librato_test.go b/plugins/outputs/librato/librato_test.go index 5a425afca5f1f..f88ced5b67f33 100644 --- a/plugins/outputs/librato/librato_test.go +++ b/plugins/outputs/librato/librato_test.go @@ -161,7 +161,7 @@ func TestBuildGauge(t *testing.T) { } func newHostMetric(value interface{}, name, host string) telegraf.Metric { - m, _ := metric.New( + m := metric.New( name, map[string]string{"host": host}, map[string]interface{}{"value": value}, @@ -172,19 +172,19 @@ func newHostMetric(value interface{}, name, host string) telegraf.Metric { func TestBuildGaugeWithSource(t *testing.T) { mtime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - pt1, _ := metric.New( + pt1 := metric.New( "test1", map[string]string{"hostname": "192.168.0.1", "tag1": "value1"}, map[string]interface{}{"value": 0.0}, mtime, ) - pt2, _ := metric.New( + pt2 := metric.New( "test2", map[string]string{"hostnam": "192.168.0.1", "tag1": "value1"}, map[string]interface{}{"value": 1.0}, mtime, ) - pt3, _ := metric.New( + pt3 := metric.New( "test3", map[string]string{ "hostname": "192.168.0.1", @@ -193,7 +193,7 @@ func TestBuildGaugeWithSource(t *testing.T) { map[string]interface{}{"value": 1.0}, mtime, ) - pt4, _ := metric.New( + pt4 := metric.New( "test4", map[string]string{ "hostname": "192.168.0.1", diff --git a/plugins/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go index e53258c1bff7a..b56fb33e114be 100644 --- a/plugins/outputs/riemann/riemann_test.go +++ b/plugins/outputs/riemann/riemann_test.go @@ -1,10 +1,11 @@ package riemann import ( - "github.com/influxdata/telegraf/testutil" "testing" "time" + "github.com/influxdata/telegraf/testutil" + "github.com/amir/raidman" "github.com/influxdata/telegraf/metric" "github.com/stretchr/testify/require" @@ -76,7 +77,7 @@ func TestMetricEvents(t *testing.T) { } // build a single event - m, _ := metric.New( + m := metric.New( "test1", map[string]string{"tag1": "value1", "host": "abc123"}, map[string]interface{}{"value": 5.6}, @@ -101,7 +102,7 @@ func TestMetricEvents(t *testing.T) { require.Equal(t, expectedEvent, events[0]) // build 2 events - m, _ = metric.New( + m = metric.New( "test2", map[string]string{"host": "xyz987"}, map[string]interface{}{"point": 1}, @@ -136,7 +137,7 @@ func TestStateEvents(t *testing.T) { } // string metrics will be skipped unless explicitly enabled - m, _ := metric.New( + m := metric.New( "test", map[string]string{"host": "host"}, map[string]interface{}{"value": "running"}, diff --git a/plugins/outputs/signalfx/signalfx_test.go b/plugins/outputs/signalfx/signalfx_test.go index 3f081a16cbbd8..d21cff82f62a2 100644 --- a/plugins/outputs/signalfx/signalfx_test.go +++ b/plugins/outputs/signalfx/signalfx_test.go @@ -429,12 +429,10 @@ func TestSignalFx_SignalFx(t *testing.T) { measurements := []telegraf.Metric{} for _, measurement := range tt.measurements { - m, err := metric.New( + m := metric.New( measurement.name, measurement.tags, measurement.fields, measurement.time, measurement.tp, ) - if err != nil { - t.Errorf("Error creating measurement %v", measurement) - } + measurements = append(measurements, m) } @@ -594,12 +592,10 @@ func TestSignalFx_Errors(t *testing.T) { } for _, measurement := range tt.measurements { - m, err := metric.New( + m := metric.New( measurement.name, measurement.tags, measurement.fields, measurement.time, measurement.tp, ) - if err != nil { - t.Errorf("Error creating measurement %v", measurement) - } + s.Write([]telegraf.Metric{m}) } for !(len(s.client.(*errorsink).dps) == len(tt.want.datapoints) && len(s.client.(*errorsink).evs) == len(tt.want.events)) { diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index b7fc917b43368..5ce502bab2c0e 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -27,7 +27,7 @@ import ( ) func getMetric(t *testing.T) telegraf.Metric { - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -35,7 +35,6 @@ func getMetric(t *testing.T) telegraf.Metric { }, time.Unix(0, 0), ) - require.NoError(t, err) return m } @@ -44,7 +43,7 @@ func getMetrics(t *testing.T) []telegraf.Metric { var metrics = make([]telegraf.Metric, count) for i := 0; i < count; i++ { - m, err := metric.New( + m := metric.New( fmt.Sprintf("cpu-%d", i), map[string]string{ "ec2_instance": "aws-129038123", @@ -59,7 +58,6 @@ func getMetrics(t *testing.T) []telegraf.Metric { }, time.Unix(0, 0), ) - require.NoError(t, err) metrics[i] = m } return metrics diff --git a/plugins/outputs/syslog/syslog_mapper_test.go b/plugins/outputs/syslog/syslog_mapper_test.go index 300d5fcabe561..d4bbc1d6f0ed9 100644 --- a/plugins/outputs/syslog/syslog_mapper_test.go +++ b/plugins/outputs/syslog/syslog_mapper_test.go @@ -15,7 +15,7 @@ func TestSyslogMapperWithDefaults(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{}, map[string]interface{}{}, @@ -34,7 +34,7 @@ func TestSyslogMapperWithHostname(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "hostname": "testhost", @@ -54,7 +54,7 @@ func TestSyslogMapperWithHostnameSourceFallback(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "source": "sourcevalue", @@ -74,7 +74,7 @@ func TestSyslogMapperWithHostnameHostFallback(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "host": "hostvalue", @@ -94,7 +94,7 @@ func TestSyslogMapperWithDefaultSdid(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "appname": "testapp", @@ -130,7 +130,7 @@ func TestSyslogMapperWithDefaultSdidAndOtherSdids(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "appname": "testapp", @@ -167,7 +167,7 @@ func TestSyslogMapperWithNoSdids(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "appname": "testapp", diff --git a/plugins/outputs/syslog/syslog_test.go b/plugins/outputs/syslog/syslog_test.go index 7581a7b5380d5..d9e082e5f9042 100644 --- a/plugins/outputs/syslog/syslog_test.go +++ b/plugins/outputs/syslog/syslog_test.go @@ -20,7 +20,7 @@ func TestGetSyslogMessageWithFramingOctectCounting(t *testing.T) { s.initializeSyslogMapper() // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "hostname": "testhost", @@ -44,7 +44,7 @@ func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) { s.Framing = framing.NonTransparent // Init metrics - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{ "hostname": "testhost", @@ -92,7 +92,7 @@ func TestSyslogWriteWithUdp(t *testing.T) { func testSyslogWriteWithStream(t *testing.T, s *Syslog, lconn net.Conn) { metrics := []telegraf.Metric{} - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{}, map[string]interface{}{}, @@ -116,7 +116,7 @@ func testSyslogWriteWithStream(t *testing.T, s *Syslog, lconn net.Conn) { func testSyslogWriteWithPacket(t *testing.T, s *Syslog, lconn net.PacketConn) { s.Framing = framing.NonTransparent metrics := []telegraf.Metric{} - m1, _ := metric.New( + m1 := metric.New( "testmetric", map[string]string{}, map[string]interface{}{}, diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go index e96e05919b6ef..d745108dc7e94 100644 --- a/plugins/outputs/wavefront/wavefront_test.go +++ b/plugins/outputs/wavefront/wavefront_test.go @@ -1,14 +1,15 @@ package wavefront import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" "reflect" "strings" "testing" "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) // default config used by Tests @@ -32,7 +33,7 @@ func TestBuildMetrics(t *testing.T) { pathReplacer = strings.NewReplacer("_", w.MetricSeparator) - testMetric1, _ := metric.New( + testMetric1 := metric.New( "test.simple.metric", map[string]string{"tag1": "value1", "host": "testHost"}, map[string]interface{}{"value": 123}, @@ -121,7 +122,7 @@ func TestBuildMetricsWithSimpleFields(t *testing.T) { pathReplacer = strings.NewReplacer("_", w.MetricSeparator) - testMetric1, _ := metric.New( + testMetric1 := metric.New( "test.simple.metric", map[string]string{"tag1": "value1"}, map[string]interface{}{"value": 123}, diff --git a/plugins/parsers/collectd/parser.go b/plugins/parsers/collectd/parser.go index 692307abe868b..f0f9773472c4f 100644 --- a/plugins/parsers/collectd/parser.go +++ b/plugins/parsers/collectd/parser.go @@ -156,11 +156,7 @@ func UnmarshalValueList(vl *api.ValueList, multiValue string) []telegraf.Metric } // Drop invalid points - m, err := metric.New(name, tags, fields, timestamp) - if err != nil { - log.Printf("E! Dropping metric %v: %v", name, err) - continue - } + m := metric.New(name, tags, fields, timestamp) metrics = append(metrics, m) } @@ -192,10 +188,7 @@ func UnmarshalValueList(vl *api.ValueList, multiValue string) []telegraf.Metric } } - m, err := metric.New(name, tags, fields, timestamp) - if err != nil { - log.Printf("E! Dropping metric %v: %v", name, err) - } + m := metric.New(name, tags, fields, timestamp) metrics = append(metrics, m) default: diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 9b3219a0580fb..8f4969efb70bd 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -280,10 +280,8 @@ outer: delete(recordFields, p.TimestampColumn) delete(recordFields, p.MeasurementColumn) - m, err := metric.New(measurementName, tags, recordFields, metricTime) - if err != nil { - return nil, err - } + m := metric.New(measurementName, tags, recordFields, metricTime) + return m, nil } diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index f942eb0716346..8e4a5181c7969 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -226,10 +226,8 @@ func TestValueConversion(t *testing.T) { metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) - expectedMetric, err1 := metric.New("test_value", expectedTags, expectedFields, time.Unix(0, 0)) - returnedMetric, err2 := metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) - require.NoError(t, err1) - require.NoError(t, err2) + expectedMetric := metric.New("test_value", expectedTags, expectedFields, time.Unix(0, 0)) + returnedMetric := metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) //deep equal fields require.Equal(t, expectedMetric.Fields(), returnedMetric.Fields()) @@ -240,8 +238,7 @@ func TestValueConversion(t *testing.T) { metrics, err = p.Parse([]byte(testCSV)) require.NoError(t, err) - returnedMetric, err2 = metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) - require.NoError(t, err2) + returnedMetric = metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) //deep equal fields require.Equal(t, expectedMetric.Fields(), returnedMetric.Fields()) diff --git a/plugins/parsers/dropwizard/parser.go b/plugins/parsers/dropwizard/parser.go index 43b8c139f3220..2115bd8a07e78 100644 --- a/plugins/parsers/dropwizard/parser.go +++ b/plugins/parsers/dropwizard/parser.go @@ -227,11 +227,7 @@ func (p *parser) readDWMetrics(metricType string, dwms interface{}, metrics []te parsed, err := p.seriesParser.Parse([]byte(measurementName)) var m telegraf.Metric if err != nil || len(parsed) != 1 { - m, err = metric.New(measurementName, map[string]string{}, map[string]interface{}{}, tm) - if err != nil { - log.Printf("W! failed to create metric of type '%s': %s\n", metricType, err) - continue - } + m = metric.New(measurementName, map[string]string{}, map[string]interface{}{}, tm) } else { m = parsed[0] m.SetTime(tm) diff --git a/plugins/parsers/dropwizard/parser_test.go b/plugins/parsers/dropwizard/parser_test.go index df75c7f252969..b867670c9400e 100644 --- a/plugins/parsers/dropwizard/parser_test.go +++ b/plugins/parsers/dropwizard/parser_test.go @@ -497,13 +497,6 @@ func containsAll(t1 map[string]string, t2 map[string]string) bool { return true } -func Metric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - func NoError(t *testing.T, err error) { require.NoError(t, err) } @@ -519,17 +512,15 @@ func TestDropWizard(t *testing.T) { name: "minimal", input: []byte(`{"version": "3.0.0", "counters": {"cpu": {"value": 42}}}`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "metric_type": "counter", - }, - map[string]interface{}{ - "value": 42.0, - }, - testTimeFunc(), - ), + metric.New( + "cpu", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 42.0, + }, + testTimeFunc(), ), }, errFunc: NoError, @@ -538,17 +529,15 @@ func TestDropWizard(t *testing.T) { name: "name with space unescaped", input: []byte(`{"version": "3.0.0", "counters": {"hello world": {"value": 42}}}`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "hello world", - map[string]string{ - "metric_type": "counter", - }, - map[string]interface{}{ - "value": 42.0, - }, - testTimeFunc(), - ), + metric.New( + "hello world", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 42.0, + }, + testTimeFunc(), ), }, errFunc: NoError, @@ -564,17 +553,15 @@ func TestDropWizard(t *testing.T) { name: "name with space double slash escape", input: []byte(`{"version": "3.0.0", "counters": {"hello\\ world": {"value": 42}}}`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "hello world", - map[string]string{ - "metric_type": "counter", - }, - map[string]interface{}{ - "value": 42.0, - }, - testTimeFunc(), - ), + metric.New( + "hello world", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 42.0, + }, + testTimeFunc(), ), }, errFunc: NoError, diff --git a/plugins/parsers/form_urlencoded/parser.go b/plugins/parsers/form_urlencoded/parser.go index f38d87a80eac0..f26740709251a 100644 --- a/plugins/parsers/form_urlencoded/parser.go +++ b/plugins/parsers/form_urlencoded/parser.go @@ -47,12 +47,9 @@ func (p Parser) Parse(buf []byte) ([]telegraf.Metric, error) { tags[key] = value } - metric, err := metric.New(p.MetricName, tags, fields, time.Now().UTC()) - if err != nil { - return nil, err - } + m := metric.New(p.MetricName, tags, fields, time.Now().UTC()) - return []telegraf.Metric{metric}, nil + return []telegraf.Metric{m}, nil } // ParseLine delegates a single line of text to the Parse function diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go index 5c0f3a8070452..dac4f55f83f25 100644 --- a/plugins/parsers/graphite/parser.go +++ b/plugins/parsers/graphite/parser.go @@ -174,7 +174,7 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) { } } - return metric.New(measurement, tags, fieldValues, timestamp) + return metric.New(measurement, tags, fieldValues, timestamp), nil } // ApplyTemplate extracts the template fields from the given line and diff --git a/plugins/parsers/graphite/parser_test.go b/plugins/parsers/graphite/parser_test.go index 1bc4f6363c3e4..991cce661762c 100644 --- a/plugins/parsers/graphite/parser_test.go +++ b/plugins/parsers/graphite/parser_test.go @@ -472,11 +472,10 @@ func TestFilterMatchDefault(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("miss.servers.localhost.cpu_load", + exp := metric.New("miss.servers.localhost.cpu_load", map[string]string{}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("miss.servers.localhost.cpu_load 11 1435077219") assert.NoError(t, err) @@ -490,11 +489,10 @@ func TestFilterMatchMultipleMeasurement(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("cpu.cpu_load.10", + exp := metric.New("cpu.cpu_load.10", map[string]string{"host": "localhost"}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("servers.localhost.cpu.cpu_load.10 11 1435077219") assert.NoError(t, err) @@ -509,11 +507,10 @@ func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) { ) assert.NoError(t, err) - exp, err := metric.New("cpu_cpu_load_10", + exp := metric.New("cpu_cpu_load_10", map[string]string{"host": "localhost"}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("servers.localhost.cpu.cpu_load.10 11 1435077219") assert.NoError(t, err) @@ -527,7 +524,7 @@ func TestFilterMatchSingle(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("cpu_load", + exp := metric.New("cpu_load", map[string]string{"host": "localhost"}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) @@ -544,11 +541,10 @@ func TestParseNoMatch(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("servers.localhost.memory.VmallocChunk", + exp := metric.New("servers.localhost.memory.VmallocChunk", map[string]string{}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("servers.localhost.memory.VmallocChunk 11 1435077219") assert.NoError(t, err) @@ -562,11 +558,10 @@ func TestFilterMatchWildcard(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("cpu_load", + exp := metric.New("cpu_load", map[string]string{"host": "localhost"}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219") assert.NoError(t, err) @@ -582,11 +577,10 @@ func TestFilterMatchExactBeforeWildcard(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("cpu_load", + exp := metric.New("cpu_load", map[string]string{"host": "localhost"}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219") assert.NoError(t, err) @@ -631,11 +625,10 @@ func TestFilterMatchMultipleWildcards(t *testing.T) { t.Fatalf("unexpected error creating parser, got %v", err) } - exp, err := metric.New("cpu_load", + exp := metric.New("cpu_load", map[string]string{"host": "server01"}, map[string]interface{}{"value": float64(11)}, time.Unix(1435077219, 0)) - assert.NoError(t, err) m, err := p.ParseLine("servers.server01.cpu_load 11 1435077219") assert.NoError(t, err) diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index 7037473e851da..57e6269994ed2 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -370,10 +370,10 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } if p.UniqueTimestamp != "auto" { - return metric.New(p.Measurement, tags, fields, timestamp) + return metric.New(p.Measurement, tags, fields, timestamp), nil } - return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp)) + return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp)), nil } func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { diff --git a/plugins/parsers/influx/handler.go b/plugins/parsers/influx/handler.go index efd7329ca9c29..7d1a3af3e12a1 100644 --- a/plugins/parsers/influx/handler.go +++ b/plugins/parsers/influx/handler.go @@ -46,10 +46,9 @@ func (h *MetricHandler) Metric() (telegraf.Metric, error) { } func (h *MetricHandler) SetMeasurement(name []byte) error { - var err error - h.metric, err = metric.New(nameUnescape(name), + h.metric = metric.New(nameUnescape(name), nil, nil, time.Time{}) - return err + return nil } func (h *MetricHandler) AddTag(key []byte, value []byte) error { diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index 5c780f070fce5..c5a39801782c1 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -15,13 +15,6 @@ import ( "github.com/stretchr/testify/require" ) -func Metric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - var DefaultTime = func() time.Time { return time.Unix(42, 0) } @@ -37,15 +30,13 @@ var ptests = []struct { name: "minimal", input: []byte("cpu value=42 0"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, err: nil, @@ -54,15 +45,13 @@ var ptests = []struct { name: "minimal with newline", input: []byte("cpu value=42 0\n"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, err: nil, @@ -71,15 +60,13 @@ var ptests = []struct { name: "measurement escape space", input: []byte(`c\ pu value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "c pu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "c pu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -88,15 +75,13 @@ var ptests = []struct { name: "measurement escape comma", input: []byte(`c\,pu value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "c,pu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "c,pu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -105,18 +90,16 @@ var ptests = []struct { name: "tags", input: []byte(`cpu,cpu=cpu0,host=localhost value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "cpu": "cpu0", - "host": "localhost", - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + "cpu": "cpu0", + "host": "localhost", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -125,17 +108,15 @@ var ptests = []struct { name: "tags escape unescapable", input: []byte(`cpu,ho\st=localhost value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - `ho\st`: "localhost", - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + `ho\st`: "localhost", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -144,17 +125,15 @@ var ptests = []struct { name: "tags escape equals", input: []byte(`cpu,ho\=st=localhost value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "ho=st": "localhost", - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + "ho=st": "localhost", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -163,17 +142,15 @@ var ptests = []struct { name: "tags escape comma", input: []byte(`cpu,ho\,st=localhost value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "ho,st": "localhost", - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + "ho,st": "localhost", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -182,17 +159,15 @@ var ptests = []struct { name: "tag value escape space", input: []byte(`cpu,host=two\ words value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "host": "two words", - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + "host": "two words", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -201,17 +176,15 @@ var ptests = []struct { name: "tag value double escape space", input: []byte(`cpu,host=two\\ words value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "host": `two\ words`, - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + "host": `two\ words`, + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -220,17 +193,15 @@ var ptests = []struct { name: "tag value triple escape space", input: []byte(`cpu,host=two\\\ words value=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "host": `two\\ words`, - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{ + "host": `two\\ words`, + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -239,15 +210,13 @@ var ptests = []struct { name: "field key escape not escapable", input: []byte(`cpu va\lue=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - `va\lue`: 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + `va\lue`: 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -256,15 +225,13 @@ var ptests = []struct { name: "field key escape equals", input: []byte(`cpu va\=lue=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - `va=lue`: 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + `va=lue`: 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -273,15 +240,13 @@ var ptests = []struct { name: "field key escape comma", input: []byte(`cpu va\,lue=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - `va,lue`: 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + `va,lue`: 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -290,15 +255,13 @@ var ptests = []struct { name: "field key escape space", input: []byte(`cpu va\ lue=42`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - `va lue`: 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + `va lue`: 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -307,15 +270,13 @@ var ptests = []struct { name: "field int", input: []byte("cpu value=42i"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(42, 0), ), }, err: nil, @@ -336,15 +297,13 @@ var ptests = []struct { name: "field int max value", input: []byte("cpu value=9223372036854775807i"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": int64(9223372036854775807), - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": int64(9223372036854775807), + }, + time.Unix(42, 0), ), }, err: nil, @@ -353,15 +312,13 @@ var ptests = []struct { name: "field uint", input: []byte("cpu value=42u"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": uint64(42), - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": uint64(42), + }, + time.Unix(42, 0), ), }, err: nil, @@ -382,15 +339,13 @@ var ptests = []struct { name: "field uint max value", input: []byte("cpu value=18446744073709551615u"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": uint64(18446744073709551615), - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": uint64(18446744073709551615), + }, + time.Unix(42, 0), ), }, err: nil, @@ -399,15 +354,13 @@ var ptests = []struct { name: "field boolean", input: []byte("cpu value=true"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": true, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": true, + }, + time.Unix(42, 0), ), }, err: nil, @@ -416,15 +369,13 @@ var ptests = []struct { name: "field string", input: []byte(`cpu value="42"`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": "42", - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": "42", + }, + time.Unix(42, 0), ), }, err: nil, @@ -433,15 +384,13 @@ var ptests = []struct { name: "field string escape quote", input: []byte(`cpu value="how\"dy"`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - `value`: `how"dy`, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + `value`: `how"dy`, + }, + time.Unix(42, 0), ), }, err: nil, @@ -450,15 +399,13 @@ var ptests = []struct { name: "field string escape backslash", input: []byte(`cpu value="how\\dy"`), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - `value`: `how\dy`, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + `value`: `how\dy`, + }, + time.Unix(42, 0), ), }, err: nil, @@ -467,15 +414,13 @@ var ptests = []struct { name: "field string newline", input: []byte("cpu value=\"4\n2\""), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": "4\n2", - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": "4\n2", + }, + time.Unix(42, 0), ), }, err: nil, @@ -484,15 +429,13 @@ var ptests = []struct { name: "no timestamp", input: []byte("cpu value=42"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -504,15 +447,13 @@ var ptests = []struct { return time.Unix(42, 123456789) }, metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 123456789), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 123456789), ), }, err: nil, @@ -521,25 +462,21 @@ var ptests = []struct { name: "multiple lines", input: []byte("cpu value=42\ncpu value=42"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), ), }, err: nil, @@ -560,69 +497,67 @@ var ptests = []struct { name: "procstat", input: []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "procstat", - map[string]string{ - "exe": "bash", - "process_name": "bash", - }, - map[string]interface{}{ - "cpu_time": 0, - "cpu_time_guest": float64(0), - "cpu_time_guest_nice": float64(0), - "cpu_time_idle": float64(0), - "cpu_time_iowait": float64(0), - "cpu_time_irq": float64(0), - "cpu_time_nice": float64(0), - "cpu_time_soft_irq": float64(0), - "cpu_time_steal": float64(0), - "cpu_time_system": float64(0), - "cpu_time_user": float64(0.02), - "cpu_usage": float64(0), - "involuntary_context_switches": 2, - "memory_data": 1576960, - "memory_locked": 0, - "memory_rss": 5103616, - "memory_stack": 139264, - "memory_swap": 0, - "memory_vms": 21659648, - "nice_priority": 20, - "num_fds": 4, - "num_threads": 1, - "pid": 29417, - "read_bytes": 0, - "read_count": 259, - "realtime_priority": 0, - "rlimit_cpu_time_hard": 2147483647, - "rlimit_cpu_time_soft": 2147483647, - "rlimit_file_locks_hard": 2147483647, - "rlimit_file_locks_soft": 2147483647, - "rlimit_memory_data_hard": 2147483647, - "rlimit_memory_data_soft": 2147483647, - "rlimit_memory_locked_hard": 65536, - "rlimit_memory_locked_soft": 65536, - "rlimit_memory_rss_hard": 2147483647, - "rlimit_memory_rss_soft": 2147483647, - "rlimit_memory_stack_hard": 2147483647, - "rlimit_memory_stack_soft": 8388608, - "rlimit_memory_vms_hard": 2147483647, - "rlimit_memory_vms_soft": 2147483647, - "rlimit_nice_priority_hard": 0, - "rlimit_nice_priority_soft": 0, - "rlimit_num_fds_hard": 4096, - "rlimit_num_fds_soft": 1024, - "rlimit_realtime_priority_hard": 0, - "rlimit_realtime_priority_soft": 0, - "rlimit_signals_pending_hard": 78994, - "rlimit_signals_pending_soft": 78994, - "signals_pending": 0, - "voluntary_context_switches": 42, - "write_bytes": 106496, - "write_count": 35, - }, - time.Unix(0, 1517620624000000000), - ), + metric.New( + "procstat", + map[string]string{ + "exe": "bash", + "process_name": "bash", + }, + map[string]interface{}{ + "cpu_time": 0, + "cpu_time_guest": float64(0), + "cpu_time_guest_nice": float64(0), + "cpu_time_idle": float64(0), + "cpu_time_iowait": float64(0), + "cpu_time_irq": float64(0), + "cpu_time_nice": float64(0), + "cpu_time_soft_irq": float64(0), + "cpu_time_steal": float64(0), + "cpu_time_system": float64(0), + "cpu_time_user": float64(0.02), + "cpu_usage": float64(0), + "involuntary_context_switches": 2, + "memory_data": 1576960, + "memory_locked": 0, + "memory_rss": 5103616, + "memory_stack": 139264, + "memory_swap": 0, + "memory_vms": 21659648, + "nice_priority": 20, + "num_fds": 4, + "num_threads": 1, + "pid": 29417, + "read_bytes": 0, + "read_count": 259, + "realtime_priority": 0, + "rlimit_cpu_time_hard": 2147483647, + "rlimit_cpu_time_soft": 2147483647, + "rlimit_file_locks_hard": 2147483647, + "rlimit_file_locks_soft": 2147483647, + "rlimit_memory_data_hard": 2147483647, + "rlimit_memory_data_soft": 2147483647, + "rlimit_memory_locked_hard": 65536, + "rlimit_memory_locked_soft": 65536, + "rlimit_memory_rss_hard": 2147483647, + "rlimit_memory_rss_soft": 2147483647, + "rlimit_memory_stack_hard": 2147483647, + "rlimit_memory_stack_soft": 8388608, + "rlimit_memory_vms_hard": 2147483647, + "rlimit_memory_vms_soft": 2147483647, + "rlimit_nice_priority_hard": 0, + "rlimit_nice_priority_soft": 0, + "rlimit_num_fds_hard": 4096, + "rlimit_num_fds_soft": 1024, + "rlimit_realtime_priority_hard": 0, + "rlimit_realtime_priority_soft": 0, + "rlimit_signals_pending_hard": 78994, + "rlimit_signals_pending_soft": 78994, + "signals_pending": 0, + "voluntary_context_switches": 42, + "write_bytes": 106496, + "write_count": 35, + }, + time.Unix(0, 1517620624000000000), ), }, err: nil, @@ -712,13 +647,11 @@ func TestSeriesParser(t *testing.T) { name: "minimal", input: []byte("cpu"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{}, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), ), }, }, @@ -726,16 +659,14 @@ func TestSeriesParser(t *testing.T) { name: "tags", input: []byte("cpu,a=x,b=y"), metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{ - "a": "x", - "b": "y", - }, - map[string]interface{}{}, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{ + "a": "x", + "b": "y", + }, + map[string]interface{}{}, + time.Unix(0, 0), ), }, }, diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index 1830e2a6a4d0e..a651ae5343770 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -142,11 +142,9 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) ( } tags, nFields := p.switchFieldToTag(tags, f.Fields) - metric, err := metric.New(name, tags, nFields, timestamp) - if err != nil { - return nil, err - } - return []telegraf.Metric{metric}, nil + m := metric.New(name, tags, nFields, timestamp) + + return []telegraf.Metric{m}, nil } // will take in field map with strings and bools, diff --git a/plugins/parsers/logfmt/parser.go b/plugins/parsers/logfmt/parser.go index 603dbbae862b9..01da916a2850d 100644 --- a/plugins/parsers/logfmt/parser.go +++ b/plugins/parsers/logfmt/parser.go @@ -67,10 +67,7 @@ func (p *Parser) Parse(b []byte) ([]telegraf.Metric, error) { continue } - m, err := metric.New(p.MetricName, map[string]string{}, fields, p.Now()) - if err != nil { - return nil, err - } + m := metric.New(p.MetricName, map[string]string{}, fields, p.Now()) metrics = append(metrics, m) } diff --git a/plugins/parsers/logfmt/parser_test.go b/plugins/parsers/logfmt/parser_test.go index dfacd8c8fae0d..f2a7174891fa9 100644 --- a/plugins/parsers/logfmt/parser_test.go +++ b/plugins/parsers/logfmt/parser_test.go @@ -11,10 +11,8 @@ import ( func MustMetric(t *testing.T, m *testutil.Metric) telegraf.Metric { t.Helper() - v, err := metric.New(m.Measurement, m.Tags, m.Fields, m.Time) - if err != nil { - t.Fatal(err) - } + v := metric.New(m.Measurement, m.Tags, m.Fields, m.Time) + return v } diff --git a/plugins/parsers/nagios/parser.go b/plugins/parsers/nagios/parser.go index b347195ab3c37..81e116178bf2b 100644 --- a/plugins/parsers/nagios/parser.go +++ b/plugins/parsers/nagios/parser.go @@ -65,10 +65,8 @@ func TryAddState(runErr error, metrics []telegraf.Metric) ([]telegraf.Metric, er f := map[string]interface{}{ "state": state, } - m, err := metric.New("nagios_state", nil, f, ts) - if err != nil { - return metrics, err - } + m := metric.New("nagios_state", nil, f, ts) + metrics = append(metrics, m) return metrics, nil } @@ -166,12 +164,8 @@ func (p *NagiosParser) Parse(buf []byte) ([]telegraf.Metric, error) { fields["long_service_output"] = longmsg.String() } - m, err := metric.New("nagios_state", nil, fields, ts) - if err == nil { - metrics = append(metrics, m) - } else { - log.Printf("E! [parser.nagios] failed to add nagios_state: %s\n", err) - } + m := metric.New("nagios_state", nil, fields, ts) + metrics = append(metrics, m) return metrics, nil } @@ -247,12 +241,10 @@ func parsePerfData(perfdatas string, timestamp time.Time) ([]telegraf.Metric, er } // Create metric - metric, err := metric.New("nagios", tags, fields, timestamp) - if err != nil { - return nil, err - } + m := metric.New("nagios", tags, fields, timestamp) + // Add Metric - metrics = append(metrics, metric) + metrics = append(metrics, m) } return metrics, nil diff --git a/plugins/parsers/nagios/parser_test.go b/plugins/parsers/nagios/parser_test.go index d269debd62ce7..2173af15214ba 100644 --- a/plugins/parsers/nagios/parser_test.go +++ b/plugins/parsers/nagios/parser_test.go @@ -74,10 +74,7 @@ func (b *metricBuilder) f(k string, v interface{}) *metricBuilder { } func (b *metricBuilder) b() telegraf.Metric { - m, err := metric.New(b.name, b.tags, b.fields, b.timestamp) - if err != nil { - panic(err) - } + m := metric.New(b.name, b.tags, b.fields, b.timestamp) return m } diff --git a/plugins/parsers/prometheus/parser.go b/plugins/parsers/prometheus/parser.go index bf5ebf7f12f19..e55789f7957b4 100644 --- a/plugins/parsers/prometheus/parser.go +++ b/plugins/parsers/prometheus/parser.go @@ -4,13 +4,14 @@ import ( "bufio" "bytes" "fmt" - "github.com/matttproud/golang_protobuf_extensions/pbutil" "io" "math" "mime" "net/http" "time" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" @@ -80,10 +81,8 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { // converting to telegraf metric if len(fields) > 0 { t := getTimestamp(m, now) - metric, err := metric.New("prometheus", tags, fields, t, common.ValueType(mf.GetType())) - if err == nil { - metrics = append(metrics, metric) - } + m := metric.New("prometheus", tags, fields, t, common.ValueType(mf.GetType())) + metrics = append(metrics, m) } } } @@ -121,10 +120,8 @@ func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, met fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) - met, err := metric.New("prometheus", tags, fields, t, common.ValueType(metricType)) - if err == nil { - metrics = append(metrics, met) - } + met := metric.New("prometheus", tags, fields, t, common.ValueType(metricType)) + metrics = append(metrics, met) for _, q := range m.GetSummary().Quantile { newTags := tags @@ -133,10 +130,8 @@ func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, met newTags["quantile"] = fmt.Sprint(q.GetQuantile()) fields[metricName] = float64(q.GetValue()) - quantileMetric, err := metric.New("prometheus", newTags, fields, t, common.ValueType(metricType)) - if err == nil { - metrics = append(metrics, quantileMetric) - } + quantileMetric := metric.New("prometheus", newTags, fields, t, common.ValueType(metricType)) + metrics = append(metrics, quantileMetric) } return metrics } @@ -150,10 +145,8 @@ func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metri fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) - met, err := metric.New("prometheus", tags, fields, t, common.ValueType(metricType)) - if err == nil { - metrics = append(metrics, met) - } + met := metric.New("prometheus", tags, fields, t, common.ValueType(metricType)) + metrics = append(metrics, met) for _, b := range m.GetHistogram().Bucket { newTags := tags @@ -161,10 +154,8 @@ func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metri newTags["le"] = fmt.Sprint(b.GetUpperBound()) fields[metricName+"_bucket"] = float64(b.GetCumulativeCount()) - histogramMetric, err := metric.New("prometheus", newTags, fields, t, common.ValueType(metricType)) - if err == nil { - metrics = append(metrics, histogramMetric) - } + histogramMetric := metric.New("prometheus", newTags, fields, t, common.ValueType(metricType)) + metrics = append(metrics, histogramMetric) } return metrics } diff --git a/plugins/parsers/prometheusremotewrite/parser.go b/plugins/parsers/prometheusremotewrite/parser.go index 90921dfb14e7a..9f0a08a682a19 100644 --- a/plugins/parsers/prometheusremotewrite/parser.go +++ b/plugins/parsers/prometheusremotewrite/parser.go @@ -55,10 +55,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { if s.Timestamp > 0 { t = time.Unix(0, s.Timestamp*1000000) } - m, err := metric.New("prometheus_remote_write", tags, fields, t) - if err != nil { - return nil, fmt.Errorf("unable to convert to telegraf metric: %s", err) - } + m := metric.New("prometheus_remote_write", tags, fields, t) metrics = append(metrics, m) } } diff --git a/plugins/parsers/value/parser.go b/plugins/parsers/value/parser.go index d4a9046e020a9..dc496663e98d9 100644 --- a/plugins/parsers/value/parser.go +++ b/plugins/parsers/value/parser.go @@ -48,13 +48,10 @@ func (v *ValueParser) Parse(buf []byte) ([]telegraf.Metric, error) { } fields := map[string]interface{}{v.FieldName: value} - metric, err := metric.New(v.MetricName, v.DefaultTags, + m := metric.New(v.MetricName, v.DefaultTags, fields, time.Now().UTC()) - if err != nil { - return nil, err - } - return []telegraf.Metric{metric}, nil + return []telegraf.Metric{m}, nil } func (v *ValueParser) ParseLine(line string) (telegraf.Metric, error) { diff --git a/plugins/parsers/wavefront/parser.go b/plugins/parsers/wavefront/parser.go index 5fba2d5ded74b..ad3e704c58390 100644 --- a/plugins/parsers/wavefront/parser.go +++ b/plugins/parsers/wavefront/parser.go @@ -152,10 +152,7 @@ func (p *PointParser) convertPointToTelegrafMetric(points []Point) ([]telegraf.M } fields["value"] = v - m, err := metric.New(point.Name, tags, fields, time.Unix(point.Timestamp, 0)) - if err != nil { - return nil, err - } + m := metric.New(point.Name, tags, fields, time.Unix(point.Timestamp, 0)) metrics = append(metrics, m) } diff --git a/plugins/parsers/wavefront/parser_test.go b/plugins/parsers/wavefront/parser_test.go index 1f1801730c73b..0165b499946e0 100644 --- a/plugins/parsers/wavefront/parser_test.go +++ b/plugins/parsers/wavefront/parser_test.go @@ -14,77 +14,65 @@ func TestParse(t *testing.T) { parsedMetrics, err := parser.Parse([]byte("test.metric 1")) assert.NoError(t, err) - testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) - assert.NoError(t, err) + testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) assert.Equal(t, parsedMetrics[0].Name(), testMetric.Name()) assert.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields()) parsedMetrics, err = parser.Parse([]byte("\u2206test.delta 1 1530939936")) assert.NoError(t, err) - testMetric, err = metric.New("\u2206test.delta", map[string]string{}, + testMetric = metric.New("\u2206test.delta", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1 1530939936")) assert.NoError(t, err) - testMetric, err = metric.New("\u0394test.delta", map[string]string{}, + testMetric = metric.New("\u0394test.delta", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1.234 1530939936 source=\"mysource\" tag2=value2")) assert.NoError(t, err) - testMetric, err = metric.New("\u0394test.delta", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("\u0394test.delta", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" -1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": -1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": -1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e04 1530939936 \"source\"=\"mysource\" tag2=value2")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e04}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e04}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e-04 1530939936 \"source\"=\"mysource\" tag2=value2")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e-04}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e-04}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) } @@ -93,39 +81,33 @@ func TestParseLine(t *testing.T) { parsedMetric, err := parser.ParseLine("test.metric 1") assert.NoError(t, err) - testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) - assert.NoError(t, err) + testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) assert.Equal(t, parsedMetric.Name(), testMetric.Name()) assert.Equal(t, parsedMetric.Fields(), testMetric.Fields()) parsedMetric, err = parser.ParseLine("test.metric 1 1530939936") assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 source=mysource") assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 source=\"mysource\"") assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2") assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ") assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) } @@ -134,10 +116,8 @@ func TestParseMultiple(t *testing.T) { parsedMetrics, err := parser.Parse([]byte("test.metric 1\ntest.metric2 2 1530939936")) assert.NoError(t, err) - testMetric1, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) - assert.NoError(t, err) - testMetric2, err := metric.New("test.metric2", map[string]string{}, map[string]interface{}{"value": 2.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric1 := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) + testMetric2 := metric.New("test.metric2", map[string]string{}, map[string]interface{}{"value": 2.}, time.Unix(1530939936, 0)) testMetrics := []telegraf.Metric{testMetric1, testMetric2} assert.Equal(t, parsedMetrics[0].Name(), testMetrics[0].Name()) assert.Equal(t, parsedMetrics[0].Fields(), testMetrics[0].Fields()) @@ -145,30 +125,23 @@ func TestParseMultiple(t *testing.T) { parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) assert.NoError(t, err) - testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) - testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) testMetrics = []telegraf.Metric{testMetric1, testMetric2} assert.EqualValues(t, parsedMetrics, testMetrics) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2\ntest.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) assert.NoError(t, err) - testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) - testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) testMetrics = []telegraf.Metric{testMetric1, testMetric2} assert.EqualValues(t, parsedMetrics, testMetrics) parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"\ntest.metric3 333 1530939936 tagit=valueit")) assert.NoError(t, err) - testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) - testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) - testMetric3, err := metric.New("test.metric3", map[string]string{"tagit": "valueit"}, map[string]interface{}{"value": 333.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + testMetric3 := metric.New("test.metric3", map[string]string{"tagit": "valueit"}, map[string]interface{}{"value": 333.}, time.Unix(1530939936, 0)) testMetrics = []telegraf.Metric{testMetric1, testMetric2, testMetric3} assert.EqualValues(t, parsedMetrics, testMetrics) } @@ -178,14 +151,12 @@ func TestParseSpecial(t *testing.T) { parsedMetric, err := parser.ParseLine("\"test.metric\" 1 1530939936") assert.NoError(t, err) - testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 tag1=\"val\\\"ue1\"") assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"tag1": "val\\\"ue1"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"tag1": "val\\\"ue1"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetric, testMetric) } @@ -225,19 +196,16 @@ func TestParseDefaultTags(t *testing.T) { parsedMetrics, err := parser.Parse([]byte("test.metric 1 1530939936")) assert.NoError(t, err) - testMetric, err := metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric := metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2", "source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2", "source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 another=\"test3\"")) assert.NoError(t, err) - testMetric, err = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.NoError(t, err) + testMetric = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) assert.EqualValues(t, parsedMetrics[0], testMetric) } diff --git a/plugins/parsers/xml/parser.go b/plugins/parsers/xml/parser.go index 75c79fbd71bae..9282aab1f2e25 100644 --- a/plugins/parsers/xml/parser.go +++ b/plugins/parsers/xml/parser.go @@ -292,7 +292,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c } } - return metric.New(metricname, tags, fields, timestamp) + return metric.New(metricname, tags, fields, timestamp), nil } func getNodePath(node, relativeTo *xmlquery.Node, sep string) string { diff --git a/plugins/processors/clone/clone_test.go b/plugins/processors/clone/clone_test.go index f1b8dc5b29c03..20bec925e7acb 100644 --- a/plugins/processors/clone/clone_test.go +++ b/plugins/processors/clone/clone_test.go @@ -10,12 +10,12 @@ import ( ) func createTestMetric() telegraf.Metric { - metric, _ := metric.New("m1", + m := metric.New("m1", map[string]string{"metric_tag": "from_metric"}, map[string]interface{}{"value": int64(1)}, time.Now(), ) - return metric + return m } func calculateProcessedTags(processor Clone, metric telegraf.Metric) map[string]string { diff --git a/plugins/processors/date/date_test.go b/plugins/processors/date/date_test.go index c6d98051e7dec..aa7efc64edbff 100644 --- a/plugins/processors/date/date_test.go +++ b/plugins/processors/date/date_test.go @@ -19,7 +19,7 @@ func MustMetric(name string, tags map[string]string, fields map[string]interface if fields == nil { fields = map[string]interface{}{} } - m, _ := metric.New(name, tags, fields, metricTime) + m := metric.New(name, tags, fields, metricTime) return m } diff --git a/plugins/processors/dedup/dedup_test.go b/plugins/processors/dedup/dedup_test.go index 80dde9057b0da..4f3d109345b32 100644 --- a/plugins/processors/dedup/dedup_test.go +++ b/plugins/processors/dedup/dedup_test.go @@ -14,7 +14,7 @@ import ( const metricName = "m1" func createMetric(value int64, when time.Time) telegraf.Metric { - m, _ := metric.New(metricName, + m := metric.New(metricName, map[string]string{"tag": "tag_value"}, map[string]interface{}{"value": value}, when, @@ -162,7 +162,7 @@ func TestSameTimestamp(t *testing.T) { var in telegraf.Metric var out []telegraf.Metric - in, _ = metric.New("metric", + in = metric.New("metric", map[string]string{"tag": "value"}, map[string]interface{}{"foo": 1}, // field now, @@ -170,7 +170,7 @@ func TestSameTimestamp(t *testing.T) { out = dedup.Apply(in) require.Equal(t, []telegraf.Metric{in}, out) // pass - in, _ = metric.New("metric", + in = metric.New("metric", map[string]string{"tag": "value"}, map[string]interface{}{"bar": 1}, // different field now, @@ -178,7 +178,7 @@ func TestSameTimestamp(t *testing.T) { out = dedup.Apply(in) require.Equal(t, []telegraf.Metric{in}, out) // pass - in, _ = metric.New("metric", + in = metric.New("metric", map[string]string{"tag": "value"}, map[string]interface{}{"bar": 2}, // same field different value now, @@ -186,7 +186,7 @@ func TestSameTimestamp(t *testing.T) { out = dedup.Apply(in) require.Equal(t, []telegraf.Metric{in}, out) // pass - in, _ = metric.New("metric", + in = metric.New("metric", map[string]string{"tag": "value"}, map[string]interface{}{"bar": 2}, // same field same value now, diff --git a/plugins/processors/enum/enum_test.go b/plugins/processors/enum/enum_test.go index fdf9131ef3d8d..f8e3a34d0381c 100644 --- a/plugins/processors/enum/enum_test.go +++ b/plugins/processors/enum/enum_test.go @@ -11,7 +11,7 @@ import ( ) func createTestMetric() telegraf.Metric { - metric, _ := metric.New("m1", + m := metric.New("m1", map[string]string{ "tag": "tag_value", "duplicate_tag": "tag_value", @@ -26,7 +26,7 @@ func createTestMetric() telegraf.Metric { }, time.Now(), ) - return metric + return m } func calculateProcessedValues(mapper EnumMapper, metric telegraf.Metric) map[string]interface{} { diff --git a/plugins/processors/execd/execd_test.go b/plugins/processors/execd/execd_test.go index 3cccc9fbb156e..c226725e1844e 100644 --- a/plugins/processors/execd/execd_test.go +++ b/plugins/processors/execd/execd_test.go @@ -34,7 +34,7 @@ func TestExternalProcessorWorks(t *testing.T) { orig := now metrics := []telegraf.Metric{} for i := 0; i < 10; i++ { - m, err := metric.New("test", + m := metric.New("test", map[string]string{ "city": "Toronto", }, @@ -43,7 +43,6 @@ func TestExternalProcessorWorks(t *testing.T) { "count": 1, }, now) - require.NoError(t, err) metrics = append(metrics, m) now = now.Add(1) @@ -96,7 +95,7 @@ func TestParseLinesWithNewLines(t *testing.T) { now := time.Now() orig := now - m, err := metric.New("test", + m := metric.New("test", map[string]string{ "author": "Mr. Gopher", }, @@ -106,8 +105,6 @@ func TestParseLinesWithNewLines(t *testing.T) { }, now) - require.NoError(t, err) - e.Add(m, acc) acc.Wait(1) diff --git a/plugins/processors/override/override_test.go b/plugins/processors/override/override_test.go index 433751af96255..5e3c118e8f268 100644 --- a/plugins/processors/override/override_test.go +++ b/plugins/processors/override/override_test.go @@ -10,12 +10,12 @@ import ( ) func createTestMetric() telegraf.Metric { - metric, _ := metric.New("m1", + m := metric.New("m1", map[string]string{"metric_tag": "from_metric"}, map[string]interface{}{"value": int64(1)}, time.Now(), ) - return metric + return m } func calculateProcessedTags(processor Override, metric telegraf.Metric) map[string]string { diff --git a/plugins/processors/parser/parser_test.go b/plugins/processors/parser/parser_test.go index ac042848f67ec..512a6118dd0f4 100644 --- a/plugins/processors/parser/parser_test.go +++ b/plugins/processors/parser/parser_test.go @@ -21,13 +21,6 @@ func compareMetrics(t *testing.T, expected, actual []telegraf.Metric) { } } -func Metric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - func TestApply(t *testing.T) { tests := []struct { name string @@ -51,18 +44,17 @@ func TestApply(t *testing.T) { "method", }, }, - input: Metric( - metric.New( - "singleField", - map[string]string{ - "some": "tag", - }, - map[string]interface{}{ - "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "singleField", map[string]string{ "ts": "2018-07-24T19:43:40.275Z", @@ -71,7 +63,7 @@ func TestApply(t *testing.T) { "method": "POST", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -88,18 +80,17 @@ func TestApply(t *testing.T) { "method", }, }, - input: Metric( - metric.New( - "singleField", - map[string]string{ - "some": "tag", - }, - map[string]interface{}{ - "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "singleField", map[string]string{ "some": "tag", @@ -111,7 +102,7 @@ func TestApply(t *testing.T) { map[string]interface{}{ "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -127,18 +118,17 @@ func TestApply(t *testing.T) { "method", }, }, - input: Metric( - metric.New( - "singleField", - map[string]string{ - "some": "tag", - }, - map[string]interface{}{ - "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "singleField", map[string]string{ "some": "tag", @@ -146,8 +136,8 @@ func TestApply(t *testing.T) { map[string]interface{}{ "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, }, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "singleField", map[string]string{ "ts": "2018-07-24T19:43:40.275Z", @@ -156,7 +146,7 @@ func TestApply(t *testing.T) { "method": "POST", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -166,23 +156,22 @@ func TestApply(t *testing.T) { DataFormat: "influx", }, dropOriginal: false, - input: Metric( - metric.New( - "influxField", - map[string]string{}, - map[string]interface{}{ - "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", - }, - time.Unix(0, 0))), + input: metric.New( + "influxField", + map[string]string{}, + map[string]interface{}{ + "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "influxField", map[string]string{}, map[string]interface{}{ "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", }, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "deal", map[string]string{ "computer_name": "hosta", @@ -190,7 +179,7 @@ func TestApply(t *testing.T) { map[string]interface{}{ "message": "stuff", }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -201,18 +190,17 @@ func TestApply(t *testing.T) { config: parsers.Config{ DataFormat: "influx", }, - input: Metric( - metric.New( - "influxField", - map[string]string{ - "some": "tag", - }, - map[string]interface{}{ - "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", - }, - time.Unix(0, 0))), + input: metric.New( + "influxField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "deal", map[string]string{ "computer_name": "hosta", @@ -221,7 +209,7 @@ func TestApply(t *testing.T) { map[string]interface{}{ "message": "stuff", }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -232,16 +220,15 @@ func TestApply(t *testing.T) { DataFormat: "grok", GrokPatterns: []string{"%{COMBINED_LOG_FORMAT}"}, }, - input: Metric( - metric.New( - "success", - map[string]string{}, - map[string]interface{}{ - "grokSample": "127.0.0.1 - - [11/Dec/2013:00:01:45 -0800] \"GET /xampp/status.php HTTP/1.1\" 200 3891 \"http://cadenza/xampp/navi.php\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0\"", - }, - time.Unix(0, 0))), + input: metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "grokSample": "127.0.0.1 - - [11/Dec/2013:00:01:45 -0800] \"GET /xampp/status.php HTTP/1.1\" 200 3891 \"http://cadenza/xampp/navi.php\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0\"", + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "success", map[string]string{ "resp_code": "200", @@ -257,7 +244,7 @@ func TestApply(t *testing.T) { "ident": "-", "http_version": float64(1.1), }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -268,30 +255,29 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl", "err"}, }, - input: Metric( - metric.New( - "bigMeasure", - map[string]string{}, - map[string]interface{}{ - "field_1": `{"lvl":"info","msg":"http request"}`, - "field_2": `{"err":"fatal","fatal":"security threat"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "bigMeasure", map[string]string{ "lvl": "info", }, map[string]interface{}{}, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "bigMeasure", map[string]string{ "err": "fatal", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -303,17 +289,16 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl", "msg", "err", "fatal"}, }, - input: Metric( - metric.New( - "bigMeasure", - map[string]string{}, - map[string]interface{}{ - "field_1": `{"lvl":"info","msg":"http request"}`, - "field_2": `{"err":"fatal","fatal":"security threat"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "bigMeasure", map[string]string{ "lvl": "info", @@ -325,7 +310,7 @@ func TestApply(t *testing.T) { "field_1": `{"lvl":"info","msg":"http request"}`, "field_2": `{"err":"fatal","fatal":"security threat"}`, }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -336,40 +321,39 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl", "msg", "err", "fatal"}, }, - input: Metric( - metric.New( - "bigMeasure", - map[string]string{}, - map[string]interface{}{ - "field_1": `{"lvl":"info","msg":"http request"}`, - "field_2": `{"err":"fatal","fatal":"security threat"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "bigMeasure", map[string]string{}, map[string]interface{}{ "field_1": `{"lvl":"info","msg":"http request"}`, "field_2": `{"err":"fatal","fatal":"security threat"}`, }, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "bigMeasure", map[string]string{ "lvl": "info", "msg": "http request", }, map[string]interface{}{}, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "bigMeasure", map[string]string{ "err": "fatal", "fatal": "security threat", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -380,31 +364,30 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl"}, }, - input: Metric( - metric.New( - "success", - map[string]string{}, - map[string]interface{}{ - "good": `{"lvl":"info"}`, - "bad": "why", - }, - time.Unix(0, 0))), + input: metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "success", map[string]string{}, map[string]interface{}{ "good": `{"lvl":"info"}`, "bad": "why", }, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "success", map[string]string{ "lvl": "info", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -415,18 +398,17 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl", "thing"}, }, - input: Metric( - metric.New( - "success", - map[string]string{}, - map[string]interface{}{ - "bad": "why", - "good": `{"lvl":"info"}`, - "ok": `{"thing":"thang"}`, - }, - time.Unix(0, 0))), + input: metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "bad": "why", + "good": `{"lvl":"info"}`, + "ok": `{"thing":"thang"}`, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "success", map[string]string{}, map[string]interface{}{ @@ -434,21 +416,21 @@ func TestApply(t *testing.T) { "good": `{"lvl":"info"}`, "ok": `{"thing":"thang"}`, }, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "success", map[string]string{ "lvl": "info", }, map[string]interface{}{}, - time.Unix(0, 0))), - Metric(metric.New( + time.Unix(0, 0)), + metric.New( "success", map[string]string{ "thing": "thang", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -460,19 +442,18 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl"}, }, - input: Metric( - metric.New( - "success", - map[string]string{ - "a": "tag", - }, - map[string]interface{}{ - "good": `{"lvl":"info"}`, - "bad": "why", - }, - time.Unix(0, 0))), + input: metric.New( + "success", + map[string]string{ + "a": "tag", + }, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "success", map[string]string{ "a": "tag", @@ -482,7 +463,7 @@ func TestApply(t *testing.T) { "good": `{"lvl":"info"}`, "bad": "why", }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -493,25 +474,24 @@ func TestApply(t *testing.T) { DataFormat: "json", TagKeys: []string{"lvl"}, }, - input: Metric( - metric.New( - "success", - map[string]string{ - "thing": "tag", - }, - map[string]interface{}{ - "good": `{"lvl":"info"}`, - "bad": "why", - }, - time.Unix(0, 0))), + input: metric.New( + "success", + map[string]string{ + "thing": "tag", + }, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "success", map[string]string{ "lvl": "info", }, map[string]interface{}{}, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, } @@ -546,22 +526,21 @@ func TestBadApply(t *testing.T) { config: parsers.Config{ DataFormat: "json", }, - input: Metric( - metric.New( - "bad", - map[string]string{}, - map[string]interface{}{ - "some_field": 5, - }, - time.Unix(0, 0))), + input: metric.New( + "bad", + map[string]string{}, + map[string]interface{}{ + "some_field": 5, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "bad", map[string]string{}, map[string]interface{}{ "some_field": 5, }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, { @@ -570,22 +549,21 @@ func TestBadApply(t *testing.T) { config: parsers.Config{ DataFormat: "json", }, - input: Metric( - metric.New( - "bad", - map[string]string{}, - map[string]interface{}{ - "some_field": 5, - }, - time.Unix(0, 0))), + input: metric.New( + "bad", + map[string]string{}, + map[string]interface{}{ + "some_field": 5, + }, + time.Unix(0, 0)), expected: []telegraf.Metric{ - Metric(metric.New( + metric.New( "bad", map[string]string{}, map[string]interface{}{ "some_field": 5, }, - time.Unix(0, 0))), + time.Unix(0, 0)), }, }, } @@ -626,7 +604,7 @@ func getMetricFieldList(metric telegraf.Metric) interface{} { } func BenchmarkFieldListing(b *testing.B) { - metric := Metric(metric.New( + m := metric.New( "test", map[string]string{ "some": "tag", @@ -640,15 +618,15 @@ func BenchmarkFieldListing(b *testing.B) { "field5": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, "field6": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, }, - time.Unix(0, 0))) + time.Unix(0, 0)) for n := 0; n < b.N; n++ { - getMetricFieldList(metric) + getMetricFieldList(m) } } func BenchmarkFields(b *testing.B) { - metric := Metric(metric.New( + m := metric.New( "test", map[string]string{ "some": "tag", @@ -662,9 +640,9 @@ func BenchmarkFields(b *testing.B) { "field5": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, "field6": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, }, - time.Unix(0, 0))) + time.Unix(0, 0)) for n := 0; n < b.N; n++ { - getMetricFields(metric) + getMetricFields(m) } } diff --git a/plugins/processors/regex/regex_test.go b/plugins/processors/regex/regex_test.go index b0ddf47d08a7b..2f8890bba7e9e 100644 --- a/plugins/processors/regex/regex_test.go +++ b/plugins/processors/regex/regex_test.go @@ -10,7 +10,7 @@ import ( ) func newM1() telegraf.Metric { - m1, _ := metric.New("access_log", + m1 := metric.New("access_log", map[string]string{ "verb": "GET", "resp_code": "200", @@ -24,7 +24,7 @@ func newM1() telegraf.Metric { } func newM2() telegraf.Metric { - m2, _ := metric.New("access_log", + m2 := metric.New("access_log", map[string]string{ "verb": "GET", "resp_code": "200", diff --git a/plugins/processors/rename/rename_test.go b/plugins/processors/rename/rename_test.go index 1f8e0b7db3a4a..36e8aaeed43a0 100644 --- a/plugins/processors/rename/rename_test.go +++ b/plugins/processors/rename/rename_test.go @@ -16,7 +16,7 @@ func newMetric(name string, tags map[string]string, fields map[string]interface{ if fields == nil { fields = map[string]interface{}{} } - m, _ := metric.New(name, tags, fields, time.Now()) + m := metric.New(name, tags, fields, time.Now()) return m } diff --git a/plugins/processors/reverse_dns/reversedns_test.go b/plugins/processors/reverse_dns/reversedns_test.go index 660c25e3015e8..5fcce5fb4725a 100644 --- a/plugins/processors/reverse_dns/reversedns_test.go +++ b/plugins/processors/reverse_dns/reversedns_test.go @@ -14,7 +14,7 @@ import ( func TestSimpleReverseLookup(t *testing.T) { now := time.Now() - m, _ := metric.New("name", map[string]string{ + m := metric.New("name", map[string]string{ "dest_ip": "8.8.8.8", }, map[string]interface{}{ "source_ip": "127.0.0.1", diff --git a/plugins/processors/starlark/builtins.go b/plugins/processors/starlark/builtins.go index 8537c92f34953..6876fe9636ab5 100644 --- a/plugins/processors/starlark/builtins.go +++ b/plugins/processors/starlark/builtins.go @@ -15,10 +15,7 @@ func newMetric(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwa return nil, err } - m, err := metric.New(string(name), nil, nil, time.Now()) - if err != nil { - return nil, err - } + m := metric.New(string(name), nil, nil, time.Now()) return &Metric{metric: m}, nil } diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go index 2c1be510ef9b6..40d798a6db0e3 100644 --- a/plugins/processors/strings/strings_test.go +++ b/plugins/processors/strings/strings_test.go @@ -12,7 +12,7 @@ import ( ) func newM1() telegraf.Metric { - m1, _ := metric.New("IIS_log", + m1 := metric.New("IIS_log", map[string]string{ "verb": "GET", "s-computername": "MIXEDCASE_hostname", @@ -27,7 +27,7 @@ func newM1() telegraf.Metric { } func newM2() telegraf.Metric { - m1, _ := metric.New("IIS_log", + m1 := metric.New("IIS_log", map[string]string{ "verb": "GET", "S-ComputerName": "MIXEDCASE_hostname", @@ -795,7 +795,7 @@ func TestMultipleConversions(t *testing.T) { }, } - m, _ := metric.New("IIS_log", + m := metric.New("IIS_log", map[string]string{ "verb": "GET", "resp_code": "200", @@ -856,7 +856,7 @@ func TestReadmeExample(t *testing.T) { }, } - m, _ := metric.New("iis_log", + m := metric.New("iis_log", map[string]string{ "verb": "get", "uri_stem": "/API/HealthCheck", @@ -895,7 +895,7 @@ func TestReadmeExample(t *testing.T) { func newMetric(name string) telegraf.Metric { tags := map[string]string{} fields := map[string]interface{}{} - m, _ := metric.New(name, tags, fields, time.Now()) + m := metric.New(name, tags, fields, time.Now()) return m } diff --git a/plugins/processors/tag_limit/tag_limit_test.go b/plugins/processors/tag_limit/tag_limit_test.go index 9412d866b78e8..d9c361ed07296 100644 --- a/plugins/processors/tag_limit/tag_limit_test.go +++ b/plugins/processors/tag_limit/tag_limit_test.go @@ -16,7 +16,7 @@ func MustMetric(name string, tags map[string]string, fields map[string]interface if fields == nil { fields = map[string]interface{}{} } - m, _ := metric.New(name, tags, fields, metricTime) + m := metric.New(name, tags, fields, metricTime) return m } diff --git a/plugins/processors/topk/test_sets.go b/plugins/processors/topk/test_sets.go index aea2c44c8052b..69f957817eca0 100644 --- a/plugins/processors/topk/test_sets.go +++ b/plugins/processors/topk/test_sets.go @@ -1,13 +1,14 @@ package topk import ( + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "time" ) ///// Test set 1 ///// -var metric11, _ = metric.New( +var metric11 = metric.New( "m1", map[string]string{"tag_name": "tag_value1"}, map[string]interface{}{ @@ -17,7 +18,7 @@ var metric11, _ = metric.New( time.Now(), ) -var metric12, _ = metric.New( +var metric12 = metric.New( "m1", map[string]string{"tag_name": "tag_value1"}, map[string]interface{}{ @@ -26,7 +27,7 @@ var metric12, _ = metric.New( time.Now(), ) -var metric13, _ = metric.New( +var metric13 = metric.New( "m1", map[string]string{"tag_name": "tag_value1"}, map[string]interface{}{ @@ -36,7 +37,7 @@ var metric13, _ = metric.New( time.Now(), ) -var metric14, _ = metric.New( +var metric14 = metric.New( "m1", map[string]string{"tag_name": "tag_value1"}, map[string]interface{}{ @@ -46,7 +47,7 @@ var metric14, _ = metric.New( time.Now(), ) -var metric15, _ = metric.New( +var metric15 = metric.New( "m1", map[string]string{"tag_name": "tag_value1"}, map[string]interface{}{ @@ -60,7 +61,7 @@ var metric15, _ = metric.New( var MetricsSet1 = []telegraf.Metric{metric11, metric12, metric13, metric14, metric15} ///// Test set 2 ///// -var metric21, _ = metric.New( +var metric21 = metric.New( "metric1", map[string]string{ "id": "1", @@ -77,7 +78,7 @@ var metric21, _ = metric.New( time.Now(), ) -var metric22, _ = metric.New( +var metric22 = metric.New( "metric1", map[string]string{ "id": "2", @@ -93,7 +94,7 @@ var metric22, _ = metric.New( time.Now(), ) -var metric23, _ = metric.New( +var metric23 = metric.New( "metric1", map[string]string{ "id": "3", @@ -110,7 +111,7 @@ var metric23, _ = metric.New( time.Now(), ) -var metric24, _ = metric.New( +var metric24 = metric.New( "metric2", map[string]string{ "id": "4", @@ -126,7 +127,7 @@ var metric24, _ = metric.New( time.Now(), ) -var metric25, _ = metric.New( +var metric25 = metric.New( "metric2", map[string]string{ "id": "5", @@ -143,7 +144,7 @@ var metric25, _ = metric.New( time.Now(), ) -var metric26, _ = metric.New( +var metric26 = metric.New( "metric2", map[string]string{ "id": "6", diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index d94d452ace262..b7c8f50d9dfde 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -312,10 +312,7 @@ func (t *TopK) push() []telegraf.Metric { result := make([]telegraf.Metric, 0, len(ret)) for _, m := range ret { - newMetric, err := metric.New(m.Name(), m.Tags(), m.Fields(), m.Time(), m.Type()) - if err != nil { - continue - } + newMetric := metric.New(m.Name(), m.Tags(), m.Fields(), m.Time(), m.Type()) result = append(result, newMetric) } diff --git a/plugins/serializers/carbon2/carbon2_test.go b/plugins/serializers/carbon2/carbon2_test.go index 4afc0932cc7ba..86f1b66db8932 100644 --- a/plugins/serializers/carbon2/carbon2_test.go +++ b/plugins/serializers/carbon2/carbon2_test.go @@ -12,13 +12,6 @@ import ( "github.com/influxdata/telegraf/metric" ) -func MustMetric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - func TestSerializeMetricFloat(t *testing.T) { now := time.Now() tags := map[string]string{ @@ -27,8 +20,7 @@ func TestSerializeMetricFloat(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - require.NoError(t, err) + m := metric.New("cpu", tags, fields, now) testcases := []struct { format format @@ -65,8 +57,7 @@ func TestSerializeMetricWithEmptyStringTag(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - require.NoError(t, err) + m := metric.New("cpu", tags, fields, now) testcases := []struct { format format @@ -103,8 +94,7 @@ func TestSerializeWithSpaces(t *testing.T) { fields := map[string]interface{}{ "usage_idle 1": float64(91.5), } - m, err := metric.New("cpu metric", tags, fields, now) - require.NoError(t, err) + m := metric.New("cpu metric", tags, fields, now) testcases := []struct { format format @@ -141,8 +131,7 @@ func TestSerializeMetricInt(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(90), } - m, err := metric.New("cpu", tags, fields, now) - require.NoError(t, err) + m := metric.New("cpu", tags, fields, now) testcases := []struct { format format @@ -179,8 +168,7 @@ func TestSerializeMetricString(t *testing.T) { fields := map[string]interface{}{ "usage_idle": "foobar", } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) testcases := []struct { format format @@ -218,8 +206,7 @@ func TestSerializeMetricBool(t *testing.T) { "java_lang_GarbageCollector_Valid": value, } - m, err := metric.New("cpu", tags, fields, tim) - require.NoError(t, err) + m := metric.New("cpu", tags, fields, tim) return m } @@ -267,15 +254,13 @@ func TestSerializeMetricBool(t *testing.T) { } func TestSerializeBatch(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ) metrics := []telegraf.Metric{m, m} @@ -315,14 +300,14 @@ func TestSerializeMetricIsProperlySanitized(t *testing.T) { now := time.Now() testcases := []struct { - metricFunc func() (telegraf.Metric, error) + metricFunc func() telegraf.Metric format format expected string replaceChar string expectedErr bool }{ { - metricFunc: func() (telegraf.Metric, error) { + metricFunc: func() telegraf.Metric { fields := map[string]interface{}{ "usage_idle": float64(91.5), } @@ -333,7 +318,7 @@ func TestSerializeMetricIsProperlySanitized(t *testing.T) { replaceChar: DefaultSanitizeReplaceChar, }, { - metricFunc: func() (telegraf.Metric, error) { + metricFunc: func() telegraf.Metric { fields := map[string]interface{}{ "usage_idle": float64(91.5), } @@ -344,7 +329,7 @@ func TestSerializeMetricIsProperlySanitized(t *testing.T) { replaceChar: "_", }, { - metricFunc: func() (telegraf.Metric, error) { + metricFunc: func() telegraf.Metric { fields := map[string]interface{}{ "usage_idle": float64(91.5), } @@ -355,7 +340,7 @@ func TestSerializeMetricIsProperlySanitized(t *testing.T) { replaceChar: DefaultSanitizeReplaceChar, }, { - metricFunc: func() (telegraf.Metric, error) { + metricFunc: func() telegraf.Metric { fields := map[string]interface{}{ "usage_idle": float64(91.5), } @@ -366,7 +351,7 @@ func TestSerializeMetricIsProperlySanitized(t *testing.T) { replaceChar: DefaultSanitizeReplaceChar, }, { - metricFunc: func() (telegraf.Metric, error) { + metricFunc: func() telegraf.Metric { fields := map[string]interface{}{ "usage_idle": float64(91.5), } @@ -377,7 +362,7 @@ func TestSerializeMetricIsProperlySanitized(t *testing.T) { replaceChar: DefaultSanitizeReplaceChar, }, { - metricFunc: func() (telegraf.Metric, error) { + metricFunc: func() telegraf.Metric { fields := map[string]interface{}{ "usage_idle": float64(91.5), } @@ -388,7 +373,7 @@ func TestSerializeMetricIsProperlySanitized(t *testing.T) { replaceChar: "_", }, { - metricFunc: func() (telegraf.Metric, error) { + metricFunc: func() telegraf.Metric { fields := map[string]interface{}{ "usage_idle": float64(91.5), } @@ -402,8 +387,7 @@ func TestSerializeMetricIsProperlySanitized(t *testing.T) { for _, tc := range testcases { t.Run(string(tc.format), func(t *testing.T) { - m, err := tc.metricFunc() - require.NoError(t, err) + m := tc.metricFunc() s, err := NewSerializer(string(tc.format), tc.replaceChar) if tc.expectedErr { diff --git a/plugins/serializers/graphite/graphite_test.go b/plugins/serializers/graphite/graphite_test.go index 2b93b16df4e4d..0a2e0bd7beaa1 100644 --- a/plugins/serializers/graphite/graphite_test.go +++ b/plugins/serializers/graphite/graphite_test.go @@ -32,19 +32,19 @@ const ( ) func TestGraphiteTags(t *testing.T) { - m1, _ := metric.New( + m1 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m2, _ := metric.New( + m2 := metric.New( "mymeasurement", map[string]string{"host": "192.168.0.1", "afoo": "first", "bfoo": "second"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) - m3, _ := metric.New( + m3 := metric.New( "mymeasurement", map[string]string{"afoo": "first", "bfoo": "second"}, map[string]interface{}{"value": float64(3.14)}, @@ -70,13 +70,11 @@ func TestSerializeMetricNoHost(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu0.us-west-2.cpu.usage_idle 91.5 %d", now.Unix()), @@ -97,8 +95,7 @@ func TestSerializeMetricNoHostWithTagSupport(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -106,7 +103,6 @@ func TestSerializeMetricNoHostWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu.usage_idle;cpu=cpu0;datacenter=us-west-2 91.5 %d", now.Unix()), @@ -128,13 +124,11 @@ func TestSerializeMetricHost(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu.usage_idle 91.5 %d", now.Unix()), @@ -156,9 +150,8 @@ func TestSerializeMetricHostWithMultipleTemplates(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m1, err := metric.New("cpu", tags, fields, now) - m2, err := metric.New("new_cpu", tags, fields, now) - assert.NoError(t, err) + m1 := metric.New("cpu", tags, fields, now) + m2 := metric.New("new_cpu", tags, fields, now) templates, defaultTemplate, err := InitGraphiteTemplates([]string{ "cp* tags.measurement.host.field", @@ -201,9 +194,8 @@ func TestSerializeMetricHostWithMultipleTemplatesWithDefault(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m1, err := metric.New("cpu", tags, fields, now) - m2, err := metric.New("new_cpu", tags, fields, now) - assert.NoError(t, err) + m1 := metric.New("cpu", tags, fields, now) + m2 := metric.New("new_cpu", tags, fields, now) templates, defaultTemplate, err := InitGraphiteTemplates([]string{ "cp* tags.measurement.host.field", @@ -247,8 +239,7 @@ func TestSerializeMetricHostWithTagSupport(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -256,7 +247,6 @@ func TestSerializeMetricHostWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu.usage_idle;cpu=cpu0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), @@ -278,13 +268,11 @@ func TestSerializeValueField(t *testing.T) { fields := map[string]interface{}{ "value": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu 91.5 %d", now.Unix()), @@ -302,8 +290,7 @@ func TestSerializeValueFieldWithTagSupport(t *testing.T) { fields := map[string]interface{}{ "value": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -311,7 +298,6 @@ func TestSerializeValueFieldWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu;cpu=cpu0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), @@ -330,15 +316,13 @@ func TestSerializeValueField2(t *testing.T) { fields := map[string]interface{}{ "value": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: "host.field.tags.measurement", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu 91.5 %d", now.Unix()), @@ -356,15 +340,13 @@ func TestSerializeValueString(t *testing.T) { fields := map[string]interface{}{ "value": "asdasd", } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: "host.field.tags.measurement", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) assert.Equal(t, "", mS[0]) } @@ -378,8 +360,7 @@ func TestSerializeValueStringWithTagSupport(t *testing.T) { fields := map[string]interface{}{ "value": "asdasd", } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -387,7 +368,6 @@ func TestSerializeValueStringWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) assert.Equal(t, "", mS[0]) } @@ -402,15 +382,13 @@ func TestSerializeValueBoolean(t *testing.T) { "enabled": true, "disabled": false, } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: "host.field.tags.measurement", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.enabled.cpu0.us-west-2.cpu 1 %d", now.Unix()), @@ -432,8 +410,7 @@ func TestSerializeValueBooleanWithTagSupport(t *testing.T) { "enabled": true, "disabled": false, } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -441,7 +418,6 @@ func TestSerializeValueBooleanWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu.enabled;cpu=cpu0;datacenter=us-west-2;host=localhost 1 %d", now.Unix()), @@ -458,8 +434,7 @@ func TestSerializeValueUnsigned(t *testing.T) { fields := map[string]interface{}{ "free": uint64(42), } - m, err := metric.New("mem", tags, fields, now) - require.NoError(t, err) + m := metric.New("mem", tags, fields, now) s := GraphiteSerializer{} buf, err := s.Serialize(m) @@ -479,15 +454,13 @@ func TestSerializeFieldWithSpaces(t *testing.T) { fields := map[string]interface{}{ `field\ with\ spaces`: float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: "host.tags.measurement.field", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu.field_with_spaces 91.5 %d", now.Unix()), @@ -505,8 +478,7 @@ func TestSerializeFieldWithSpacesWithTagSupport(t *testing.T) { fields := map[string]interface{}{ `field\ with\ spaces`: float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -514,7 +486,6 @@ func TestSerializeFieldWithSpacesWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu.field_with_spaces;cpu=cpu0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), @@ -533,15 +504,13 @@ func TestSerializeTagWithSpaces(t *testing.T) { fields := map[string]interface{}{ `field_with_spaces`: float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: "host.tags.measurement.field", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.cpu_0.us-west-2.cpu.field_with_spaces 91.5 %d", now.Unix()), @@ -559,8 +528,7 @@ func TestSerializeTagWithSpacesWithTagSupport(t *testing.T) { fields := map[string]interface{}{ `field_with_spaces`: float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ TagSupport: true, @@ -568,7 +536,6 @@ func TestSerializeTagWithSpacesWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("cpu.field_with_spaces;cpu=cpu_0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), @@ -587,15 +554,13 @@ func TestSerializeValueField3(t *testing.T) { fields := map[string]interface{}{ "value": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: "field.host.tags.measurement", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu 91.5 %d", now.Unix()), @@ -614,15 +579,13 @@ func TestSerializeValueField5(t *testing.T) { fields := map[string]interface{}{ "value": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Template: template5, } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("localhost.us-west-2.cpu0.cpu 91.5 %d", now.Unix()), @@ -641,13 +604,11 @@ func TestSerializeMetricPrefix(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{Prefix: "prefix"} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("prefix.localhost.cpu0.us-west-2.cpu.usage_idle 91.5 %d", now.Unix()), @@ -669,8 +630,7 @@ func TestSerializeMetricPrefixWithTagSupport(t *testing.T) { "usage_idle": float64(91.5), "usage_busy": float64(8.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := GraphiteSerializer{ Prefix: "prefix", @@ -679,7 +639,6 @@ func TestSerializeMetricPrefixWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{ fmt.Sprintf("prefix.cpu.usage_idle;cpu=cpu0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), @@ -699,8 +658,7 @@ func TestSerializeBucketNameNoHost(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), "", "") @@ -713,8 +671,7 @@ func TestSerializeBucketNameHost(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), "", "") @@ -727,8 +684,7 @@ func TestSerializeBucketNamePrefix(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), "", "prefix") @@ -741,8 +697,7 @@ func TestTemplate1(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), template1, "") @@ -755,8 +710,7 @@ func TestTemplate2(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), template2, "") @@ -769,8 +723,7 @@ func TestTemplate3(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), template3, "") @@ -783,8 +736,7 @@ func TestTemplate4(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), template4, "") @@ -797,8 +749,7 @@ func TestTemplate6(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", defaultTags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", defaultTags, fields, now) mS := SerializeBucketName(m.Name(), m.Tags(), template6, "") @@ -890,8 +841,7 @@ func TestClean(t *testing.T) { s := GraphiteSerializer{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New(tt.metricName, tt.tags, tt.fields, now) - assert.NoError(t, err) + m := metric.New(tt.metricName, tt.tags, tt.fields, now) actual, _ := s.Serialize(m) require.Equal(t, tt.expected, string(actual)) }) @@ -985,8 +935,7 @@ func TestCleanWithTagsSupport(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New(tt.metricName, tt.tags, tt.fields, now) - assert.NoError(t, err) + m := metric.New(tt.metricName, tt.tags, tt.fields, now) actual, _ := s.Serialize(m) require.Equal(t, tt.expected, string(actual)) }) @@ -1014,8 +963,7 @@ func TestSerializeBatch(t *testing.T) { s := GraphiteSerializer{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New(tt.metricName, tt.tags, tt.fields, now) - assert.NoError(t, err) + m := metric.New(tt.metricName, tt.tags, tt.fields, now) actual, _ := s.SerializeBatch([]telegraf.Metric{m, m}) require.Equal(t, tt.expected, string(actual)) }) @@ -1046,8 +994,7 @@ func TestSerializeBatchWithTagsSupport(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m, err := metric.New(tt.metricName, tt.tags, tt.fields, now) - assert.NoError(t, err) + m := metric.New(tt.metricName, tt.tags, tt.fields, now) actual, _ := s.SerializeBatch([]telegraf.Metric{m, m}) require.Equal(t, tt.expected, string(actual)) }) diff --git a/plugins/serializers/influx/influx_test.go b/plugins/serializers/influx/influx_test.go index a86215d94bf4b..f80718b3aa8e6 100644 --- a/plugins/serializers/influx/influx_test.go +++ b/plugins/serializers/influx/influx_test.go @@ -10,13 +10,6 @@ import ( "github.com/stretchr/testify/require" ) -func MustMetric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - var tests = []struct { name string maxBytes int @@ -27,506 +20,446 @@ var tests = []struct { }{ { name: "minimal", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), output: []byte("cpu value=42 0\n"), }, { name: "multiple tags", - input: MustMetric( - metric.New( - "cpu", - map[string]string{ - "host": "localhost", - "cpu": "CPU0", - }, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{ + "host": "localhost", + "cpu": "CPU0", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), output: []byte("cpu,cpu=CPU0,host=localhost value=42 0\n"), }, { name: "multiple fields", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "x": 42.0, - "y": 42.0, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "x": 42.0, + "y": 42.0, + }, + time.Unix(0, 0), ), output: []byte("cpu x=42,y=42 0\n"), }, { name: "float NaN", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "x": math.NaN(), - "y": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "x": math.NaN(), + "y": 42, + }, + time.Unix(0, 0), ), output: []byte("cpu y=42i 0\n"), }, { name: "float NaN only", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": math.NaN(), - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": math.NaN(), + }, + time.Unix(0, 0), ), errReason: NoFields, }, { name: "float Inf", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": math.Inf(1), - "y": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": math.Inf(1), + "y": 42, + }, + time.Unix(0, 0), ), output: []byte("cpu y=42i 0\n"), }, { name: "integer field", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("cpu value=42i 0\n"), }, { name: "integer field 64-bit", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": int64(123456789012345), - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": int64(123456789012345), + }, + time.Unix(0, 0), ), output: []byte("cpu value=123456789012345i 0\n"), }, { name: "uint field", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": uint64(42), - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": uint64(42), + }, + time.Unix(0, 0), ), output: []byte("cpu value=42u 0\n"), typeSupport: UintSupport, }, { name: "uint field max value", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": uint64(18446744073709551615), - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": uint64(18446744073709551615), + }, + time.Unix(0, 0), ), output: []byte("cpu value=18446744073709551615u 0\n"), typeSupport: UintSupport, }, { name: "uint field no uint support", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": uint64(42), - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": uint64(42), + }, + time.Unix(0, 0), ), output: []byte("cpu value=42i 0\n"), }, { name: "uint field no uint support overflow", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": uint64(18446744073709551615), - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": uint64(18446744073709551615), + }, + time.Unix(0, 0), ), output: []byte("cpu value=9223372036854775807i 0\n"), }, { name: "bool field", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": true, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": true, + }, + time.Unix(0, 0), ), output: []byte("cpu value=true 0\n"), }, { name: "string field", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": "howdy", - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": "howdy", + }, + time.Unix(0, 0), ), output: []byte("cpu value=\"howdy\" 0\n"), }, { name: "timestamp", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(1519194109, 42), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(1519194109, 42), ), output: []byte("cpu value=42 1519194109000000042\n"), }, { name: "split fields exact", maxBytes: 33, - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "abc": 123, - "def": 456, - }, - time.Unix(1519194109, 42), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "abc": 123, + "def": 456, + }, + time.Unix(1519194109, 42), ), output: []byte("cpu abc=123i 1519194109000000042\ncpu def=456i 1519194109000000042\n"), }, { name: "split fields extra", maxBytes: 34, - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "abc": 123, - "def": 456, - }, - time.Unix(1519194109, 42), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "abc": 123, + "def": 456, + }, + time.Unix(1519194109, 42), ), output: []byte("cpu abc=123i 1519194109000000042\ncpu def=456i 1519194109000000042\n"), }, { name: "split_fields_overflow", maxBytes: 43, - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "abc": 123, - "def": 456, - "ghi": 789, - "jkl": 123, - }, - time.Unix(1519194109, 42), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "abc": 123, + "def": 456, + "ghi": 789, + "jkl": 123, + }, + time.Unix(1519194109, 42), ), output: []byte("cpu abc=123i,def=456i 1519194109000000042\ncpu ghi=789i,jkl=123i 1519194109000000042\n"), }, { name: "name newline", - input: MustMetric( - metric.New( - "c\npu", - map[string]string{}, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "c\npu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("c\\npu value=42i 0\n"), }, { name: "tag newline", - input: MustMetric( - metric.New( - "cpu", - map[string]string{ - "host": "x\ny", - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{ + "host": "x\ny", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("cpu,host=x\\ny value=42i 0\n"), }, { name: "empty tag value is removed", - input: MustMetric( - metric.New( - "cpu", - map[string]string{ - "host": "", - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{ + "host": "", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("cpu value=42i 0\n"), }, { name: "empty tag key is removed", - input: MustMetric( - metric.New( - "cpu", - map[string]string{ - "": "example.org", - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{ + "": "example.org", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("cpu value=42i 0\n"), }, { name: "tag value ends with backslash is trimmed", - input: MustMetric( - metric.New( - "disk", - map[string]string{ - "path": `C:\`, - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "disk", + map[string]string{ + "path": `C:\`, + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("disk,path=C: value=42i 0\n"), }, { name: "tag key ends with backslash is trimmed", - input: MustMetric( - metric.New( - "disk", - map[string]string{ - `path\`: "/", - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "disk", + map[string]string{ + `path\`: "/", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("disk,path=/ value=42i 0\n"), }, { name: "tag key backslash is trimmed and removed", - input: MustMetric( - metric.New( - "disk", - map[string]string{ - `\`: "example.org", - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "disk", + map[string]string{ + `\`: "example.org", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("disk value=42i 0\n"), }, { name: "tag value backslash is trimmed and removed", - input: MustMetric( - metric.New( - "disk", - map[string]string{ - "host": `\`, - }, - map[string]interface{}{ - "value": 42, - }, - time.Unix(0, 0), - ), + input: metric.New( + "disk", + map[string]string{ + "host": `\`, + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), ), output: []byte("disk value=42i 0\n"), }, { name: "string newline", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": "x\ny", - }, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": "x\ny", + }, + time.Unix(0, 0), ), output: []byte("cpu value=\"x\ny\" 0\n"), }, { name: "need more space", maxBytes: 32, - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "abc": 123, - "def": 456, - }, - time.Unix(1519194109, 42), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "abc": 123, + "def": 456, + }, + time.Unix(1519194109, 42), ), output: nil, errReason: NeedMoreSpace, }, { name: "no fields", - input: MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{}, - time.Unix(0, 0), - ), + input: metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), ), errReason: NoFields, }, { name: "procstat", - input: MustMetric( - metric.New( - "procstat", - map[string]string{ - "exe": "bash", - "process_name": "bash", - }, - map[string]interface{}{ - "cpu_time": 0, - "cpu_time_guest": float64(0), - "cpu_time_guest_nice": float64(0), - "cpu_time_idle": float64(0), - "cpu_time_iowait": float64(0), - "cpu_time_irq": float64(0), - "cpu_time_nice": float64(0), - "cpu_time_soft_irq": float64(0), - "cpu_time_steal": float64(0), - "cpu_time_system": float64(0), - "cpu_time_user": float64(0.02), - "cpu_usage": float64(0), - "involuntary_context_switches": 2, - "memory_data": 1576960, - "memory_locked": 0, - "memory_rss": 5103616, - "memory_stack": 139264, - "memory_swap": 0, - "memory_vms": 21659648, - "nice_priority": 20, - "num_fds": 4, - "num_threads": 1, - "pid": 29417, - "read_bytes": 0, - "read_count": 259, - "realtime_priority": 0, - "rlimit_cpu_time_hard": 2147483647, - "rlimit_cpu_time_soft": 2147483647, - "rlimit_file_locks_hard": 2147483647, - "rlimit_file_locks_soft": 2147483647, - "rlimit_memory_data_hard": 2147483647, - "rlimit_memory_data_soft": 2147483647, - "rlimit_memory_locked_hard": 65536, - "rlimit_memory_locked_soft": 65536, - "rlimit_memory_rss_hard": 2147483647, - "rlimit_memory_rss_soft": 2147483647, - "rlimit_memory_stack_hard": 2147483647, - "rlimit_memory_stack_soft": 8388608, - "rlimit_memory_vms_hard": 2147483647, - "rlimit_memory_vms_soft": 2147483647, - "rlimit_nice_priority_hard": 0, - "rlimit_nice_priority_soft": 0, - "rlimit_num_fds_hard": 4096, - "rlimit_num_fds_soft": 1024, - "rlimit_realtime_priority_hard": 0, - "rlimit_realtime_priority_soft": 0, - "rlimit_signals_pending_hard": 78994, - "rlimit_signals_pending_soft": 78994, - "signals_pending": 0, - "voluntary_context_switches": 42, - "write_bytes": 106496, - "write_count": 35, - }, - time.Unix(0, 1517620624000000000), - ), + input: metric.New( + "procstat", + map[string]string{ + "exe": "bash", + "process_name": "bash", + }, + map[string]interface{}{ + "cpu_time": 0, + "cpu_time_guest": float64(0), + "cpu_time_guest_nice": float64(0), + "cpu_time_idle": float64(0), + "cpu_time_iowait": float64(0), + "cpu_time_irq": float64(0), + "cpu_time_nice": float64(0), + "cpu_time_soft_irq": float64(0), + "cpu_time_steal": float64(0), + "cpu_time_system": float64(0), + "cpu_time_user": float64(0.02), + "cpu_usage": float64(0), + "involuntary_context_switches": 2, + "memory_data": 1576960, + "memory_locked": 0, + "memory_rss": 5103616, + "memory_stack": 139264, + "memory_swap": 0, + "memory_vms": 21659648, + "nice_priority": 20, + "num_fds": 4, + "num_threads": 1, + "pid": 29417, + "read_bytes": 0, + "read_count": 259, + "realtime_priority": 0, + "rlimit_cpu_time_hard": 2147483647, + "rlimit_cpu_time_soft": 2147483647, + "rlimit_file_locks_hard": 2147483647, + "rlimit_file_locks_soft": 2147483647, + "rlimit_memory_data_hard": 2147483647, + "rlimit_memory_data_soft": 2147483647, + "rlimit_memory_locked_hard": 65536, + "rlimit_memory_locked_soft": 65536, + "rlimit_memory_rss_hard": 2147483647, + "rlimit_memory_rss_soft": 2147483647, + "rlimit_memory_stack_hard": 2147483647, + "rlimit_memory_stack_soft": 8388608, + "rlimit_memory_vms_hard": 2147483647, + "rlimit_memory_vms_soft": 2147483647, + "rlimit_nice_priority_hard": 0, + "rlimit_nice_priority_soft": 0, + "rlimit_num_fds_hard": 4096, + "rlimit_num_fds_soft": 1024, + "rlimit_realtime_priority_hard": 0, + "rlimit_realtime_priority_soft": 0, + "rlimit_signals_pending_hard": 78994, + "rlimit_signals_pending_soft": 78994, + "signals_pending": 0, + "voluntary_context_switches": 42, + "write_bytes": 106496, + "write_count": 35, + }, + time.Unix(0, 1517620624000000000), ), output: []byte("procstat,exe=bash,process_name=bash cpu_time=0i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_system=0,cpu_time_user=0.02,cpu_usage=0,involuntary_context_switches=2i,memory_data=1576960i,memory_locked=0i,memory_rss=5103616i,memory_stack=139264i,memory_swap=0i,memory_vms=21659648i,nice_priority=20i,num_fds=4i,num_threads=1i,pid=29417i,read_bytes=0i,read_count=259i,realtime_priority=0i,rlimit_cpu_time_hard=2147483647i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_file_locks_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_data_soft=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_memory_locked_soft=65536i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_stack_hard=2147483647i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=4096i,rlimit_num_fds_soft=1024i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=78994i,rlimit_signals_pending_soft=78994i,signals_pending=0i,voluntary_context_switches=42i,write_bytes=106496i,write_count=35i 1517620624000000000\n"), }, @@ -565,15 +498,13 @@ func BenchmarkSerializer(b *testing.B) { } func TestSerialize_SerializeBatch(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ) metrics := []telegraf.Metric{m, m} diff --git a/plugins/serializers/influx/reader_test.go b/plugins/serializers/influx/reader_test.go index 1bb4a3b61cd4e..217660e43f4bd 100644 --- a/plugins/serializers/influx/reader_test.go +++ b/plugins/serializers/influx/reader_test.go @@ -24,15 +24,13 @@ func TestReader(t *testing.T) { maxLineBytes: 4096, bufferSize: 20, input: []telegraf.Metric{ - MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, expected: []byte("cpu value=42 0\n"), @@ -42,25 +40,21 @@ func TestReader(t *testing.T) { maxLineBytes: 4096, bufferSize: 20, input: []telegraf.Metric{ - MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), - MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, expected: []byte("cpu value=42 0\ncpu value=42 0\n"), @@ -70,15 +64,13 @@ func TestReader(t *testing.T) { maxLineBytes: 4096, bufferSize: 15, input: []telegraf.Metric{ - MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, expected: []byte("cpu value=42 0\n"), @@ -88,25 +80,21 @@ func TestReader(t *testing.T) { maxLineBytes: 4096, bufferSize: 15, input: []telegraf.Metric{ - MustMetric( - metric.New( - "", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), - MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, expected: []byte("cpu value=42 0\n"), @@ -116,25 +104,21 @@ func TestReader(t *testing.T) { maxLineBytes: 4096, bufferSize: 15, input: []telegraf.Metric{ - MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), - MustMetric( - metric.New( - "", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + metric.New( + "", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ), }, expected: []byte("cpu value=42 0\n"), @@ -169,15 +153,13 @@ func TestReader(t *testing.T) { } func TestZeroLengthBufferNoError(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ) serializer := NewSerializer() serializer.SetFieldSortOrder(SortFields) @@ -191,71 +173,68 @@ func TestZeroLengthBufferNoError(t *testing.T) { } func BenchmarkReader(b *testing.B) { - m := MustMetric( - metric.New( - "procstat", - map[string]string{ - "exe": "bash", - "process_name": "bash", - }, - map[string]interface{}{ - "cpu_time": 0, - "cpu_time_guest": float64(0), - "cpu_time_guest_nice": float64(0), - "cpu_time_idle": float64(0), - "cpu_time_iowait": float64(0), - "cpu_time_irq": float64(0), - "cpu_time_nice": float64(0), - "cpu_time_soft_irq": float64(0), - "cpu_time_steal": float64(0), - "cpu_time_system": float64(0), - "cpu_time_user": float64(0.02), - "cpu_usage": float64(0), - "involuntary_context_switches": 2, - "memory_data": 1576960, - "memory_locked": 0, - "memory_rss": 5103616, - "memory_stack": 139264, - "memory_swap": 0, - "memory_vms": 21659648, - "nice_priority": 20, - "num_fds": 4, - "num_threads": 1, - "pid": 29417, - "read_bytes": 0, - "read_count": 259, - "realtime_priority": 0, - "rlimit_cpu_time_hard": 2147483647, - "rlimit_cpu_time_soft": 2147483647, - "rlimit_file_locks_hard": 2147483647, - "rlimit_file_locks_soft": 2147483647, - "rlimit_memory_data_hard": 2147483647, - "rlimit_memory_data_soft": 2147483647, - "rlimit_memory_locked_hard": 65536, - "rlimit_memory_locked_soft": 65536, - "rlimit_memory_rss_hard": 2147483647, - "rlimit_memory_rss_soft": 2147483647, - "rlimit_memory_stack_hard": 2147483647, - "rlimit_memory_stack_soft": 8388608, - "rlimit_memory_vms_hard": 2147483647, - "rlimit_memory_vms_soft": 2147483647, - "rlimit_nice_priority_hard": 0, - "rlimit_nice_priority_soft": 0, - "rlimit_num_fds_hard": 4096, - "rlimit_num_fds_soft": 1024, - "rlimit_realtime_priority_hard": 0, - "rlimit_realtime_priority_soft": 0, - "rlimit_signals_pending_hard": 78994, - "rlimit_signals_pending_soft": 78994, - "signals_pending": 0, - "voluntary_context_switches": 42, - "write_bytes": 106496, - "write_count": 35, - }, - time.Unix(0, 1517620624000000000), - ), + m := metric.New( + "procstat", + map[string]string{ + "exe": "bash", + "process_name": "bash", + }, + map[string]interface{}{ + "cpu_time": 0, + "cpu_time_guest": float64(0), + "cpu_time_guest_nice": float64(0), + "cpu_time_idle": float64(0), + "cpu_time_iowait": float64(0), + "cpu_time_irq": float64(0), + "cpu_time_nice": float64(0), + "cpu_time_soft_irq": float64(0), + "cpu_time_steal": float64(0), + "cpu_time_system": float64(0), + "cpu_time_user": float64(0.02), + "cpu_usage": float64(0), + "involuntary_context_switches": 2, + "memory_data": 1576960, + "memory_locked": 0, + "memory_rss": 5103616, + "memory_stack": 139264, + "memory_swap": 0, + "memory_vms": 21659648, + "nice_priority": 20, + "num_fds": 4, + "num_threads": 1, + "pid": 29417, + "read_bytes": 0, + "read_count": 259, + "realtime_priority": 0, + "rlimit_cpu_time_hard": 2147483647, + "rlimit_cpu_time_soft": 2147483647, + "rlimit_file_locks_hard": 2147483647, + "rlimit_file_locks_soft": 2147483647, + "rlimit_memory_data_hard": 2147483647, + "rlimit_memory_data_soft": 2147483647, + "rlimit_memory_locked_hard": 65536, + "rlimit_memory_locked_soft": 65536, + "rlimit_memory_rss_hard": 2147483647, + "rlimit_memory_rss_soft": 2147483647, + "rlimit_memory_stack_hard": 2147483647, + "rlimit_memory_stack_soft": 8388608, + "rlimit_memory_vms_hard": 2147483647, + "rlimit_memory_vms_soft": 2147483647, + "rlimit_nice_priority_hard": 0, + "rlimit_nice_priority_soft": 0, + "rlimit_num_fds_hard": 4096, + "rlimit_num_fds_soft": 1024, + "rlimit_realtime_priority_hard": 0, + "rlimit_realtime_priority_soft": 0, + "rlimit_signals_pending_hard": 78994, + "rlimit_signals_pending_soft": 78994, + "signals_pending": 0, + "voluntary_context_switches": 42, + "write_bytes": 106496, + "write_count": 35, + }, + time.Unix(0, 1517620624000000000), ) - metrics := make([]telegraf.Metric, 1000) for i := range metrics { metrics[i] = m diff --git a/plugins/serializers/json/json_test.go b/plugins/serializers/json/json_test.go index 9ea304c88eedb..74d7f94166621 100644 --- a/plugins/serializers/json/json_test.go +++ b/plugins/serializers/json/json_test.go @@ -28,12 +28,11 @@ func TestSerializeMetricFloat(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(0) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":91.5},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n") assert.Equal(t, string(expS), string(buf)) @@ -78,15 +77,13 @@ func TestSerialize_TimestampUnits(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(1525478795, 123456789), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(1525478795, 123456789), ) s, _ := NewSerializer(tt.timestampUnits) actual, err := s.Serialize(m) @@ -104,12 +101,11 @@ func TestSerializeMetricInt(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(90), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(0) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":90},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n") @@ -124,12 +120,11 @@ func TestSerializeMetricString(t *testing.T) { fields := map[string]interface{}{ "usage_idle": "foobar", } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(0) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":"foobar"},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n") @@ -145,12 +140,11 @@ func TestSerializeMultiFields(t *testing.T) { "usage_idle": int64(90), "usage_total": 8559615, } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(0) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":90,"usage_total":8559615},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n") @@ -165,8 +159,7 @@ func TestSerializeMetricWithEscapes(t *testing.T) { fields := map[string]interface{}{ "U,age=Idle": int64(90), } - m, err := metric.New("My CPU", tags, fields, now) - assert.NoError(t, err) + m := metric.New("My CPU", tags, fields, now) s, _ := NewSerializer(0) buf, err := s.Serialize(m) @@ -177,15 +170,13 @@ func TestSerializeMetricWithEscapes(t *testing.T) { } func TestSerializeBatch(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ) metrics := []telegraf.Metric{m, m} diff --git a/plugins/serializers/msgpack/msgpack_test.go b/plugins/serializers/msgpack/msgpack_test.go index a44ffae4515e3..36cc66ea52c59 100644 --- a/plugins/serializers/msgpack/msgpack_test.go +++ b/plugins/serializers/msgpack/msgpack_test.go @@ -10,8 +10,7 @@ import ( ) func toTelegrafMetric(m Metric) telegraf.Metric { - tm, _ := metric.New(m.Name, m.Tags, m.Fields, m.Time.time) - + tm := metric.New(m.Name, m.Tags, m.Fields, m.Time.time) return tm } diff --git a/plugins/serializers/nowmetric/nowmetric_test.go b/plugins/serializers/nowmetric/nowmetric_test.go index e49b81c2d232c..b9e7914a6adbf 100644 --- a/plugins/serializers/nowmetric/nowmetric_test.go +++ b/plugins/serializers/nowmetric/nowmetric_test.go @@ -27,12 +27,11 @@ func TestSerializeMetricFloat(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer() var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":91.5,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)))) assert.Equal(t, string(expS), string(buf)) @@ -67,15 +66,13 @@ func TestSerialize_TimestampUnits(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(1525478795, 123456789), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(1525478795, 123456789), ) s, _ := NewSerializer() actual, err := s.Serialize(m) @@ -93,12 +90,11 @@ func TestSerializeMetricInt(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(90), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer() var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)))) @@ -113,12 +109,11 @@ func TestSerializeMetricString(t *testing.T) { fields := map[string]interface{}{ "usage_idle": "foobar", } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer() var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) assert.Equal(t, "null", string(buf)) @@ -133,8 +128,7 @@ func TestSerializeMultiFields(t *testing.T) { "usage_idle": int64(90), "usage_total": 8559615, } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) // Sort for predictable field order sort.Slice(m.FieldList(), func(i, j int) bool { @@ -143,7 +137,7 @@ func TestSerializeMultiFields(t *testing.T) { s, _ := NewSerializer() var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"},{"metric_type":"usage_total","resource":"","node":"","value":8559615,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)), (now.UnixNano() / int64(time.Millisecond)))) @@ -158,8 +152,7 @@ func TestSerializeMetricWithEscapes(t *testing.T) { fields := map[string]interface{}{ "U,age=Idle": int64(90), } - m, err := metric.New("My CPU", tags, fields, now) - assert.NoError(t, err) + m := metric.New("My CPU", tags, fields, now) s, _ := NewSerializer() buf, err := s.Serialize(m) @@ -170,17 +163,14 @@ func TestSerializeMetricWithEscapes(t *testing.T) { } func TestSerializeBatch(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ) - metrics := []telegraf.Metric{m, m} s, _ := NewSerializer() buf, err := s.SerializeBatch(metrics) diff --git a/plugins/serializers/splunkmetric/splunkmetric_test.go b/plugins/serializers/splunkmetric/splunkmetric_test.go index f00d5d8da2176..c088d99f7f1a4 100644 --- a/plugins/serializers/splunkmetric/splunkmetric_test.go +++ b/plugins/serializers/splunkmetric/splunkmetric_test.go @@ -25,12 +25,11 @@ func TestSerializeMetricFloat(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(false, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":1529875740.819}` assert.Equal(t, expS, string(buf)) @@ -45,12 +44,11 @@ func TestSerializeMetricFloatHec(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(true, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"time":1529875740.819,"fields":{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` assert.Equal(t, expS, string(buf)) @@ -64,12 +62,11 @@ func TestSerializeMetricInt(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(90), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(false, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` @@ -84,12 +81,11 @@ func TestSerializeMetricIntHec(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(90), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(true, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"time":0,"fields":{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` @@ -104,12 +100,11 @@ func TestSerializeMetricBool(t *testing.T) { fields := map[string]interface{}{ "oomkiller": bool(true), } - m, err := metric.New("docker", tags, fields, now) - assert.NoError(t, err) + m := metric.New("docker", tags, fields, now) s, _ := NewSerializer(false, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"_value":1,"container-name":"telegraf-test","metric_name":"docker.oomkiller","time":0}` @@ -124,12 +119,11 @@ func TestSerializeMetricBoolHec(t *testing.T) { fields := map[string]interface{}{ "oomkiller": bool(false), } - m, err := metric.New("docker", tags, fields, now) - assert.NoError(t, err) + m := metric.New("docker", tags, fields, now) s, _ := NewSerializer(true, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"time":0,"fields":{"_value":0,"container-name":"telegraf-test","metric_name":"docker.oomkiller"}}` @@ -145,12 +139,11 @@ func TestSerializeMetricString(t *testing.T) { "processorType": "ARMv7 Processor rev 4 (v7l)", "usage_idle": int64(5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s, _ := NewSerializer(false, false) var buf []byte - buf, err = s.Serialize(m) + buf, err := s.Serialize(m) assert.NoError(t, err) expS := `{"_value":5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` @@ -159,25 +152,22 @@ func TestSerializeMetricString(t *testing.T) { } func TestSerializeBatch(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ) - n := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 92.0, - }, - time.Unix(0, 0), - ), + + n := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 92.0, + }, + time.Unix(0, 0), ) metrics := []telegraf.Metric{m, n} @@ -190,16 +180,14 @@ func TestSerializeBatch(t *testing.T) { } func TestSerializeMulti(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "user": 42.0, - "system": 8.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "user": 42.0, + "system": 8.0, + }, + time.Unix(0, 0), ) metrics := []telegraf.Metric{m} @@ -212,27 +200,22 @@ func TestSerializeMulti(t *testing.T) { } func TestSerializeBatchHec(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), ) - n := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 92.0, - }, - time.Unix(0, 0), - ), + n := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 92.0, + }, + time.Unix(0, 0), ) - metrics := []telegraf.Metric{m, n} s, _ := NewSerializer(true, false) buf, err := s.SerializeBatch(metrics) @@ -243,16 +226,14 @@ func TestSerializeBatchHec(t *testing.T) { } func TestSerializeMultiHec(t *testing.T) { - m := MustMetric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "usage": 42.0, - "system": 8.0, - }, - time.Unix(0, 0), - ), + m := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "usage": 42.0, + "system": 8.0, + }, + time.Unix(0, 0), ) metrics := []telegraf.Metric{m} diff --git a/plugins/serializers/wavefront/wavefront_test.go b/plugins/serializers/wavefront/wavefront_test.go index 548326e703e6c..ee653c62b4072 100755 --- a/plugins/serializers/wavefront/wavefront_test.go +++ b/plugins/serializers/wavefront/wavefront_test.go @@ -178,13 +178,11 @@ func TestSerializeMetricFloat(t *testing.T) { fields := map[string]interface{}{ "usage_idle": float64(91.5), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 91.500000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} assert.Equal(t, expS, mS) @@ -199,13 +197,11 @@ func TestSerializeMetricInt(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(91), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} assert.Equal(t, expS, mS) @@ -220,13 +216,11 @@ func TestSerializeMetricBoolTrue(t *testing.T) { fields := map[string]interface{}{ "usage_idle": true, } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 1.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} assert.Equal(t, expS, mS) @@ -241,13 +235,11 @@ func TestSerializeMetricBoolFalse(t *testing.T) { fields := map[string]interface{}{ "usage_idle": false, } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 0.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} assert.Equal(t, expS, mS) @@ -262,13 +254,11 @@ func TestSerializeMetricFieldValue(t *testing.T) { fields := map[string]interface{}{ "value": int64(91), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{fmt.Sprintf("\"cpu\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} assert.Equal(t, expS, mS) @@ -283,13 +273,11 @@ func TestSerializeMetricPrefix(t *testing.T) { fields := map[string]interface{}{ "usage_idle": int64(91), } - m, err := metric.New("cpu", tags, fields, now) - assert.NoError(t, err) + m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{Prefix: "telegraf."} buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) expS := []string{fmt.Sprintf("\"telegraf.cpu.usage.idle\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} assert.Equal(t, expS, mS) @@ -306,10 +294,7 @@ func benchmarkMetrics(b *testing.B) [4]telegraf.Metric { fields := map[string]interface{}{ "usage_idle": v, } - m, err := metric.New("cpu", tags, fields, now) - if err != nil { - b.Fatal(err) - } + m := metric.New("cpu", tags, fields, now) return m } return [4]telegraf.Metric{ diff --git a/selfstat/selfstat.go b/selfstat/selfstat.go index bafd3bd129bd5..78107447d780c 100644 --- a/selfstat/selfstat.go +++ b/selfstat/selfstat.go @@ -7,7 +7,6 @@ package selfstat import ( "hash/fnv" - "log" "sort" "sync" "time" @@ -96,12 +95,8 @@ func Metrics() []telegraf.Metric { fields[fieldname] = stat.Get() j++ } - metric, err := metric.New(name, tags, fields, now) - if err != nil { - log.Printf("E! Error creating selfstat metric: %s", err) - continue - } - metrics[i] = metric + m := metric.New(name, tags, fields, now) + metrics[i] = m i++ } } diff --git a/testutil/metric.go b/testutil/metric.go index 1fb18991e1558..cd0a7ad25efc7 100644 --- a/testutil/metric.go +++ b/testutil/metric.go @@ -185,17 +185,11 @@ func MustMetric( tm time.Time, tp ...telegraf.ValueType, ) telegraf.Metric { - m, err := metric.New(name, tags, fields, tm, tp...) - if err != nil { - panic("MustMetric") - } + m := metric.New(name, tags, fields, tm, tp...) return m } func FromTestMetric(met *Metric) telegraf.Metric { - m, err := metric.New(met.Measurement, met.Tags, met.Fields, met.Time, met.Type) - if err != nil { - panic("MustMetric") - } + m := metric.New(met.Measurement, met.Tags, met.Fields, met.Time, met.Type) return m } diff --git a/testutil/metric_test.go b/testutil/metric_test.go index 0c999185a8fd4..e84fc569ed638 100644 --- a/testutil/metric_test.go +++ b/testutil/metric_test.go @@ -18,7 +18,7 @@ func TestRequireMetricEqual(t *testing.T) { { name: "equal metrics should be equal", got: func() telegraf.Metric { - m, _ := metric.New( + m := metric.New( "test", map[string]string{ "t1": "v1", @@ -34,7 +34,7 @@ func TestRequireMetricEqual(t *testing.T) { return m }(), want: func() telegraf.Metric { - m, _ := metric.New( + m := metric.New( "test", map[string]string{ "t1": "v1", diff --git a/testutil/testutil.go b/testutil/testutil.go index f2d95560d7fd6..1184aec05cb5b 100644 --- a/testutil/testutil.go +++ b/testutil/testutil.go @@ -56,7 +56,7 @@ func TestMetric(value interface{}, name ...string) telegraf.Metric { measurement = name[0] } tags := map[string]string{"tag1": "value1"} - pt, _ := metric.New( + pt := metric.New( measurement, tags, map[string]interface{}{"value": value}, From 66e12e062d0403c2f60044f5925d49168284b38f Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 13 Apr 2021 16:13:06 -0400 Subject: [PATCH 367/761] remove deprecation warning (#9125) --- etc/telegraf.conf | 4 ++-- plugins/inputs/prometheus/README.md | 2 +- plugins/inputs/prometheus/prometheus.go | 5 +---- plugins/outputs/prometheus_client/README.md | 2 +- plugins/outputs/prometheus_client/prometheus_client.go | 3 +-- 5 files changed, 6 insertions(+), 10 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index dae67c3bf6b59..32ccfd62b98c6 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1259,7 +1259,7 @@ # ## Prometheus format. When using the prometheus input, use the same value in # ## both plugins to ensure metrics are round-tripped without modification. # ## -# ## example: metric_version = 1; deprecated in 1.13 +# ## example: metric_version = 1; # ## metric_version = 2; recommended version # # metric_version = 1 # @@ -7577,7 +7577,7 @@ # ## value in both plugins to ensure metrics are round-tripped without # ## modification. # ## -# ## example: metric_version = 1; deprecated in 1.13 +# ## example: metric_version = 1; # ## metric_version = 2; recommended version # # metric_version = 1 # diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index 57a1753536888..ee49e047436e4 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -16,7 +16,7 @@ in Prometheus format. ## value in both plugins to ensure metrics are round-tripped without ## modification. ## - ## example: metric_version = 1; deprecated in 1.13 + ## example: metric_version = 1; ## metric_version = 2; recommended version # metric_version = 1 diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index cc894fc7a7f8d..01ebfb61b2a24 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -86,7 +86,7 @@ var sampleConfig = ` ## value in both plugins to ensure metrics are round-tripped without ## modification. ## - ## example: metric_version = 1; deprecated in 1.13 + ## example: metric_version = 1; ## metric_version = 2; recommended version # metric_version = 1 @@ -155,9 +155,6 @@ func (p *Prometheus) Description() string { } func (p *Prometheus) Init() error { - if p.MetricVersion != 2 { - p.Log.Warnf("Use of deprecated configuration: 'metric_version = 1'; please update to 'metric_version = 2'") - } // Config proccessing for node scrape scope for monitor_kubernetes_pods p.isNodeScrapeScope = strings.EqualFold(p.PodScrapeScope, "node") diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index 9beaa062da1eb..844cf3f2d1790 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -14,7 +14,7 @@ all metrics on `/metrics` (default) to be polled by a Prometheus server. ## Prometheus format. When using the prometheus input, use the same value in ## both plugins to ensure metrics are round-tripped without modification. ## - ## example: metric_version = 1; deprecated in 1.13 + ## example: metric_version = 1; ## metric_version = 2; recommended version # metric_version = 1 diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index a37404f268056..13ba74f822e8f 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -35,7 +35,7 @@ var sampleConfig = ` ## Prometheus format. When using the prometheus input, use the same value in ## both plugins to ensure metrics are round-tripped without modification. ## - ## example: metric_version = 1; deprecated in 1.13 + ## example: metric_version = 1; ## metric_version = 2; recommended version # metric_version = 1 @@ -133,7 +133,6 @@ func (p *PrometheusClient) Init() error { default: fallthrough case 1: - p.Log.Warnf("Use of deprecated configuration: metric_version = 1; please update to metric_version = 2") p.collector = v1.NewCollector(time.Duration(p.ExpirationInterval), p.StringAsLabel, p.Log) err := registry.Register(p.collector) if err != nil { From 5f2658258289da71d68dda27085090061ee067d6 Mon Sep 17 00:00:00 2001 From: Pontus Rydin Date: Tue, 13 Apr 2021 17:07:54 -0400 Subject: [PATCH 368/761] Added MetricLookback setting (#9045) * Added MetricLookback setting * Fixed go mod issue --- plugins/inputs/vsphere/README.md | 6 ++++++ plugins/inputs/vsphere/endpoint.go | 4 +--- plugins/inputs/vsphere/vsphere.go | 8 ++++++++ 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 108637bab05d7..9bb33211d29e4 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -181,6 +181,12 @@ vm_metric_exclude = [ "*" ] ## preserve the full precision when averaging takes place. # use_int_samples = true + ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In + ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported + ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing + ## it too much may cause performance issues. + # metric_lookback = 3 + ## Custom attributes from vCenter can be very useful for queries in order to slice the ## metrics along different dimension and for forming ad-hoc relationships. They are disabled ## by default, since they can add a considerable amount of tags to the resulting metrics. To diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index bff3701653c8d..a9c226edf80bb 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -29,8 +29,6 @@ var isIPv4 = regexp.MustCompile("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$") var isIPv6 = regexp.MustCompile("^(?:[A-Fa-f0-9]{0,4}:){1,7}[A-Fa-f0-9]{1,4}$") -const metricLookback = 3 // Number of time periods to look back at for non-realtime metrics - const maxSampleConst = 10 // Absolute maximum number of samples regardless of period const maxMetadataSamples = 100 // Number of resources to sample for metric metadata @@ -901,7 +899,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim } start, ok := e.hwMarks.Get(object.ref.Value, metricName) if !ok { - start = latest.Add(time.Duration(-res.sampling) * time.Second * (metricLookback - 1)) + start = latest.Add(time.Duration(-res.sampling) * time.Second * (time.Duration(e.Parent.MetricLookback) - 1)) } start = start.Truncate(20 * time.Second) // Truncate to maximum resolution diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 7e688b73c55fc..b014f2f764c79 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -48,6 +48,7 @@ type VSphere struct { CustomAttributeExclude []string UseIntSamples bool IPAddresses []string + MetricLookback int MaxQueryObjects int MaxQueryMetrics int @@ -237,6 +238,12 @@ var sampleConfig = ` # custom_attribute_include = [] # custom_attribute_exclude = ["*"] + ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In + ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported + ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing + ## it too much may cause performance issues. + # metric_lookback = 3 + ## Optional SSL Config # ssl_ca = "/path/to/cafile" # ssl_cert = "/path/to/certfile" @@ -363,6 +370,7 @@ func init() { MaxQueryMetrics: 256, CollectConcurrency: 1, DiscoverConcurrency: 1, + MetricLookback: 3, ForceDiscoverOnInit: true, ObjectDiscoveryInterval: config.Duration(time.Second * 300), Timeout: config.Duration(time.Second * 60), From f3229f5ec1ea3234b22ca2b895a1b602eae73efa Mon Sep 17 00:00:00 2001 From: Dominic Tootell Date: Tue, 13 Apr 2021 22:13:46 +0100 Subject: [PATCH 369/761] Change to NewStreamParser to accept larger inputs from scanner (#8892) * change to NewStreamParser to accept larger inputs from scanner * fmt changes --- plugins/common/shim/processor.go | 26 +++++++++++--------- plugins/common/shim/processor_test.go | 35 +++++++++++++++++++++------ 2 files changed, 42 insertions(+), 19 deletions(-) diff --git a/plugins/common/shim/processor.go b/plugins/common/shim/processor.go index 33dceba872759..d8f660b360cd6 100644 --- a/plugins/common/shim/processor.go +++ b/plugins/common/shim/processor.go @@ -1,14 +1,13 @@ package shim import ( - "bufio" "fmt" "sync" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" - "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/processors" ) @@ -37,12 +36,7 @@ func (s *Shim) RunProcessor() error { acc := agent.NewAccumulator(s, s.metricCh) acc.SetPrecision(time.Nanosecond) - parser, err := parsers.NewInfluxParser() - if err != nil { - return fmt.Errorf("Failed to create new parser: %w", err) - } - - err = s.Processor.Start(acc) + err := s.Processor.Start(acc) if err != nil { return fmt.Errorf("failed to start processor: %w", err) } @@ -54,13 +48,21 @@ func (s *Shim) RunProcessor() error { wg.Done() }() - scanner := bufio.NewScanner(s.stdin) - for scanner.Scan() { - m, err := parser.ParseLine(scanner.Text()) + parser := influx.NewStreamParser(s.stdin) + for { + m, err := parser.Next() if err != nil { - fmt.Fprintf(s.stderr, "Failed to parse metric: %s\b", err) + if err == influx.EOF { + break // stream ended + } + if parseErr, isParseError := err.(*influx.ParseError); isParseError { + fmt.Fprintf(s.stderr, "Failed to parse metric: %s\b", parseErr) + continue + } + fmt.Fprintf(s.stderr, "Failure during reading stdin: %s\b", err) continue } + s.Processor.Add(m, acc) } diff --git a/plugins/common/shim/processor_test.go b/plugins/common/shim/processor_test.go index 6126656b8fcc6..ea2e61a459469 100644 --- a/plugins/common/shim/processor_test.go +++ b/plugins/common/shim/processor_test.go @@ -4,6 +4,7 @@ import ( "bufio" "io" "io/ioutil" + "math/rand" "sync" "testing" "time" @@ -16,7 +17,21 @@ import ( ) func TestProcessorShim(t *testing.T) { - p := &testProcessor{} + testSendAndRecieve(t, "f1", "fv1") +} + +func TestProcessorShimWithLargerThanDefaultScannerBufferSize(t *testing.T) { + letters := []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]rune, bufio.MaxScanTokenSize*2) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + + testSendAndRecieve(t, "f1", string(b)) +} + +func testSendAndRecieve(t *testing.T, fieldKey string, fieldValue string) { + p := &testProcessor{"hi", "mom"} stdinReader, stdinWriter := io.Pipe() stdoutReader, stdoutWriter := io.Pipe() @@ -45,7 +60,8 @@ func TestProcessorShim(t *testing.T) { "a": "b", }, map[string]interface{}{ - "v": 1, + "v": 1, + fieldKey: fieldValue, }, time.Now(), ) @@ -62,19 +78,24 @@ func TestProcessorShim(t *testing.T) { mOut, err := parser.ParseLine(out) require.NoError(t, err) - val, ok := mOut.GetTag("hi") + val, ok := mOut.GetTag(p.tagName) require.True(t, ok) - require.Equal(t, "mom", val) - + require.Equal(t, p.tagValue, val) + val2, ok := mOut.Fields()[fieldKey] + require.True(t, ok) + require.Equal(t, fieldValue, val2) go ioutil.ReadAll(r) wg.Wait() } -type testProcessor struct{} +type testProcessor struct { + tagName string + tagValue string +} func (p *testProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { for _, metric := range in { - metric.AddTag("hi", "mom") + metric.AddTag(p.tagName, p.tagValue) } return in } From 7cbde183de4381ab0df2cb055cdcb57cda4304b4 Mon Sep 17 00:00:00 2001 From: Tuan Nguyen Huy Date: Wed, 14 Apr 2021 04:31:07 +0700 Subject: [PATCH 370/761] Support float64 in enum processor (#8911) --- plugins/processors/enum/README.md | 2 +- plugins/processors/enum/enum.go | 2 ++ plugins/processors/enum/enum_test.go | 16 +++++++--------- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/plugins/processors/enum/README.md b/plugins/processors/enum/README.md index 651e58e7d2fce..0aecaaa430474 100644 --- a/plugins/processors/enum/README.md +++ b/plugins/processors/enum/README.md @@ -2,7 +2,7 @@ The Enum Processor allows the configuration of value mappings for metric tags or fields. The main use-case for this is to rewrite status codes such as _red_, _amber_ and -_green_ by numeric values such as 0, 1, 2. The plugin supports string, int and bool +_green_ by numeric values such as 0, 1, 2. The plugin supports string, int, float64 and bool types for the field values. Multiple tags or fields can be configured with separate value mappings for each. Default mapping values can be configured to be used for all values, which are not contained in the value_mappings. The diff --git a/plugins/processors/enum/enum.go b/plugins/processors/enum/enum.go index 6a4a7f67afffd..f31987775b6a5 100644 --- a/plugins/processors/enum/enum.go +++ b/plugins/processors/enum/enum.go @@ -145,6 +145,8 @@ func adjustValue(in interface{}) interface{} { return strconv.FormatBool(val) case int64: return strconv.FormatInt(val, 10) + case float64: + return strconv.FormatFloat(val, 'f', -1, 64) case uint64: return strconv.FormatUint(val, 10) default: diff --git a/plugins/processors/enum/enum_test.go b/plugins/processors/enum/enum_test.go index f8e3a34d0381c..53603ae0153c7 100644 --- a/plugins/processors/enum/enum_test.go +++ b/plugins/processors/enum/enum_test.go @@ -63,6 +63,7 @@ func TestRetainsMetric(t *testing.T) { assertFieldValue(t, "test", "string_value", fields) assertFieldValue(t, 200, "int_value", fields) assertFieldValue(t, 500, "uint_value", fields) + assertFieldValue(t, float64(3.14), "float_value", fields) assertFieldValue(t, true, "true_value", fields) assert.Equal(t, "m1", target.Name()) assert.Equal(t, source.Tags(), target.Tags()) @@ -78,15 +79,6 @@ func TestMapsSingleStringValueTag(t *testing.T) { assertTagValue(t, "valuable", "tag", tags) } -func TestNoFailureOnMappingsOnNonSupportedValuedFields(t *testing.T) { - mapper := EnumMapper{Mappings: []Mapping{{Field: "float_value", ValueMappings: map[string]interface{}{"3.14": "pi"}}}} - err := mapper.Init() - require.Nil(t, err) - fields := calculateProcessedValues(mapper, createTestMetric()) - - assertFieldValue(t, float64(3.14), "float_value", fields) -} - func TestMappings(t *testing.T) { mappings := []map[string][]interface{}{ { @@ -113,6 +105,12 @@ func TestMappings(t *testing.T) { "mapped_value": []interface{}{"internal_error", 1, false, false, false, false}, "expected_value": []interface{}{"internal_error", 1, false, 500, 500, 500}, }, + { + "field_name": []interface{}{"float_value"}, + "target_value": []interface{}{"3.14", "3.14", "3.14", "3.14", "not_float", "5"}, + "mapped_value": []interface{}{"pi", 1, false, float64(100.2), float64(3.14), "pi"}, + "expected_value": []interface{}{"pi", 1, false, float64(100.2), float64(3.14), float64(3.14)}, + }, } for _, mapping := range mappings { From 4b2e2c5263bb8bd030d2ae101438810c1af61945 Mon Sep 17 00:00:00 2001 From: Thomas Casteleyn Date: Tue, 13 Apr 2021 23:46:34 +0200 Subject: [PATCH 371/761] Log snmpv3 auth failures (#8917) --- go.mod | 2 +- go.sum | 4 ++-- plugins/inputs/snmp/snmp.go | 13 ++++++++++++- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 91ec88fa382a6..523e4fbdf4b0a 100644 --- a/go.mod +++ b/go.mod @@ -66,7 +66,7 @@ require ( github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.13 github.com/gorilla/mux v1.7.3 - github.com/gosnmp/gosnmp v1.30.0 + github.com/gosnmp/gosnmp v1.31.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 diff --git a/go.sum b/go.sum index f2f65d843f712..795772ffccca6 100644 --- a/go.sum +++ b/go.sum @@ -590,8 +590,8 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gosnmp/gosnmp v1.30.0 h1:P6uUvPaoZCZh2EXvSUIgsxYZ1vdD/Sonl2BSVCGieG8= -github.com/gosnmp/gosnmp v1.30.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvNDMpgF0= +github.com/gosnmp/gosnmp v1.31.0 h1:l18tqymKfReKBPr3kMK4mMM+n3DHlIpsZbBBSy8nuko= +github.com/gosnmp/gosnmp v1.31.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvNDMpgF0= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 325121be4d1a8..ec881205c6f68 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "encoding/binary" + "errors" "fmt" "log" "math" @@ -434,7 +435,17 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { // empty string. This results in all the non-table fields sharing the same // index, and being added on the same row. if pkt, err := gs.Get([]string{oid}); err != nil { - return nil, fmt.Errorf("performing get on field %s: %w", f.Name, err) + if errors.Is(err, gosnmp.ErrUnknownSecurityLevel) { + return nil, fmt.Errorf("unknown security level (sec_level)") + } else if errors.Is(err, gosnmp.ErrUnknownUsername) { + return nil, fmt.Errorf("unknown username (sec_name)") + } else if errors.Is(err, gosnmp.ErrWrongDigest) { + return nil, fmt.Errorf("wrong digest (auth_protocol, auth_password)") + } else if errors.Is(err, gosnmp.ErrDecryption) { + return nil, fmt.Errorf("decryption error (priv_protocol, priv_password)") + } else { + return nil, fmt.Errorf("performing get on field %s: %w", f.Name, err) + } } else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance { ent := pkt.Variables[0] fv, err := fieldConvert(f.Conversion, ent.Value) From 91690803d63fe3dbf447c6f7e89766ed61f2bdf5 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 14 Apr 2021 09:48:56 -0400 Subject: [PATCH 372/761] Add custom docker image info to nvidia plugin (#9124) --- plugins/inputs/nvidia_smi/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index f147137f36b77..c889e016fc464 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -89,3 +89,5 @@ nvidia_smi,compute_mode=Default,host=8218cf,index=2,name=GeForce\ GTX\ 1080,psta ### Limitations Note that there seems to be an issue with getting current memory clock values when the memory is overclocked. This may or may not apply to everyone but it's confirmed to be an issue on an EVGA 2080 Ti. + +**NOTE:** For use with docker either generate your own custom docker image based on nvidia/cuda which also installs a telegraf package or use [volume mount binding](https://docs.docker.com/storage/bind-mounts/) to inject the required binary into the docker container. From 1fa9259392668fb1e9bead6c51ac92b7ae80b164 Mon Sep 17 00:00:00 2001 From: Wilfried OLLIVIER Date: Wed, 14 Apr 2021 20:43:07 +0200 Subject: [PATCH 373/761] Fix: sync nfsclient ops map with nfsclient struct (#9128) --- plugins/inputs/nfsclient/nfsclient.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/inputs/nfsclient/nfsclient.go b/plugins/inputs/nfsclient/nfsclient.go index 6b621e4bd2265..543ba759f772c 100644 --- a/plugins/inputs/nfsclient/nfsclient.go +++ b/plugins/inputs/nfsclient/nfsclient.go @@ -467,6 +467,9 @@ func (n *NFSClient) Init() error { } } + n.nfs3Ops = nfs3Ops + n.nfs4Ops = nfs4Ops + if len(n.IncludeMounts) > 0 { n.Log.Debugf("Including these mount patterns: %v", n.IncludeMounts) } else { From e5b7a094daffd9adacb8c71c09b738f3155fc2ab Mon Sep 17 00:00:00 2001 From: Gennady Date: Wed, 14 Apr 2021 14:13:32 -0500 Subject: [PATCH 374/761] Gather all mysql channels (#5517) --- plugins/inputs/mysql/README.md | 10 +++++++- plugins/inputs/mysql/mysql.go | 45 ++++++++++++++++++++++++++++++---- 2 files changed, 49 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index 43a6515b04e2c..0a96f9c9b1447 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -63,9 +63,15 @@ This plugin gathers the statistic data from MySQL server ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS # gather_innodb_metrics = false + ## gather metrics from all channels from SHOW SLAVE STATUS command output + # gather_all_slave_channels = false + ## gather metrics from SHOW SLAVE STATUS command output # gather_slave_status = false + ## use SHOW ALL SLAVES STATUS command output for MariaDB + # mariadb_dialect = false + ## gather metrics from SHOW BINARY LOGS command output # gather_binary_logs = false @@ -205,7 +211,9 @@ measurement name. * Slave status - metrics from `SHOW SLAVE STATUS` the metrics are gathered when the single-source replication is on. If the multi-source replication is set, then everything works differently, this metric does not work with multi-source -replication. +replication, unless you set `gather_all_slave_channels = true`. For MariaDB, +`mariadb_dialect = true` should be set to address the field names and commands +differences. * slave_[column name]() * Binary logs - all metrics including size and count of all binary files. Requires to be turned on in configuration. diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 5f8c2918abdd6..6e81b3df2f757 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -28,6 +28,8 @@ type Mysql struct { GatherInfoSchemaAutoInc bool `toml:"gather_info_schema_auto_inc"` GatherInnoDBMetrics bool `toml:"gather_innodb_metrics"` GatherSlaveStatus bool `toml:"gather_slave_status"` + GatherAllSlaveChannels bool `toml:"gather_all_slave_channels"` + MariadbDialect bool `toml:"mariadb_dialect"` GatherBinaryLogs bool `toml:"gather_binary_logs"` GatherTableIOWaits bool `toml:"gather_table_io_waits"` GatherTableLockWaits bool `toml:"gather_table_lock_waits"` @@ -47,6 +49,7 @@ type Mysql struct { lastT time.Time initDone bool scanIntervalSlow uint32 + getStatusQuery string } const sampleConfig = ` @@ -94,6 +97,12 @@ const sampleConfig = ` ## gather metrics from SHOW SLAVE STATUS command output # gather_slave_status = false + ## gather metrics from all channels from SHOW SLAVE STATUS command output + # gather_all_slave_channels = false + + ## use MariaDB dialect for all channels SHOW SLAVE STATUS + # mariadb_dialect = false + ## gather metrics from SHOW BINARY LOGS command output # gather_binary_logs = false @@ -166,6 +175,11 @@ func (m *Mysql) InitMysql() { m.scanIntervalSlow = uint32(interval.Seconds()) } } + if m.MariadbDialect { + m.getStatusQuery = slaveStatusQueryMariadb + } else { + m.getStatusQuery = slaveStatusQuery + } m.initDone = true } @@ -295,6 +309,7 @@ const ( globalStatusQuery = `SHOW GLOBAL STATUS` globalVariablesQuery = `SHOW GLOBAL VARIABLES` slaveStatusQuery = `SHOW SLAVE STATUS` + slaveStatusQueryMariadb = `SHOW ALL SLAVES STATUS` binaryLogsQuery = `SHOW BINARY LOGS` infoSchemaProcessListQuery = ` SELECT COALESCE(command,''),COALESCE(state,''),count(*) @@ -657,7 +672,10 @@ func (m *Mysql) parseGlobalVariables(key string, value sql.RawBytes) (interface{ // This code does not work with multi-source replication. func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumulator) error { // run query - rows, err := db.Query(slaveStatusQuery) + var rows *sql.Rows + var err error + + rows, err = db.Query(m.getStatusQuery) if err != nil { return err } @@ -668,9 +686,11 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu tags := map[string]string{"server": servtag} fields := make(map[string]interface{}) - // to save the column names as a field key - // scanning keys and values separately - if rows.Next() { + // for each channel record + for rows.Next() { + // to save the column names as a field key + // scanning keys and values separately + // get columns names, and create an array with its length cols, err := rows.Columns() if err != nil { @@ -689,11 +709,26 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu if m.MetricVersion >= 2 { col = strings.ToLower(col) } - if value, ok := m.parseValue(*vals[i].(*sql.RawBytes)); ok { + + if m.GatherAllSlaveChannels && + (strings.ToLower(col) == "channel_name" || strings.ToLower(col) == "connection_name") { + // Since the default channel name is empty, we need this block + channelName := "default" + if len(*vals[i].(*sql.RawBytes)) > 0 { + channelName = string(*vals[i].(*sql.RawBytes)) + } + tags["channel"] = channelName + } else if value, ok := m.parseValue(*vals[i].(*sql.RawBytes)); ok { fields["slave_"+col] = value } } acc.AddFields("mysql", fields, tags) + + // Only the first row is relevant if not all slave-channels should be gathered, + // so break here and skip the remaining rows + if !m.GatherAllSlaveChannels { + break + } } return nil From e9a69a0c6b94cb6a709d1aa8a9c8dd6dd1f48607 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Fri, 16 Apr 2021 13:35:13 -0500 Subject: [PATCH 375/761] Only run CI for certain file changes (#9076) Updated circle-ci config to check for certain patterns in the latest commit to see if the jobs should run. Credit to this blog post for the idea: https://dev.to/acro5piano/exit-circleci-jobs-if-changed-files-do-not-match-specific-pattern-mel --- .circleci/config.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index f14fa48e0a074..efbb07f986376 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -25,6 +25,14 @@ executors: GOFLAGS: -p=8 commands: + check-changed-files-or-halt: + steps: + - run: git diff master --name-only --no-color | egrep -e "^(\.circleci\/.*)|(.*\.(go|mod|sum))|Makefile$" || circleci step halt + check-changed-files-or-halt-windows: + steps: + - run: + command: git diff master --name-only --no-color | egrep -e "^(\.circleci\/.*)|(.*\.(go|mod|sum))|Makefile$" || circleci step halt + shell: bash.exe test-go: parameters: goarch: @@ -32,6 +40,7 @@ commands: default: "amd64" steps: - checkout + - check-changed-files-or-halt - attach_workspace: at: '/go' - run: 'GOARCH=<< parameters.goarch >> make' @@ -45,6 +54,7 @@ commands: default: false steps: - checkout + - check-changed-files-or-halt - attach_workspace: at: '/go' - when: @@ -70,6 +80,7 @@ jobs: - checkout - restore_cache: key: go-mod-v1-{{ checksum "go.sum" }} + - check-changed-files-or-halt - run: 'make deps' - run: 'make tidy' - save_cache: @@ -105,6 +116,7 @@ jobs: - checkout - restore_cache: key: mac-go-mod-v0-{{ checksum "go.sum" }} + - check-changed-files-or-halt - run: 'sh ./scripts/mac_installgo.sh' - save_cache: name: 'Saving cache' @@ -124,6 +136,7 @@ jobs: shell: powershell.exe steps: - checkout + - check-changed-files-or-halt-windows - run: choco upgrade golang --version=1.16.2 - run: choco install make - run: git config --system core.longpaths true @@ -148,6 +161,7 @@ jobs: shell: powershell.exe steps: - checkout + - check-changed-files-or-halt - attach_workspace: at: '/build' - run: @@ -172,6 +186,7 @@ jobs: shell: /bin/bash --login -o pipefail steps: - checkout + - check-changed-files-or-halt - attach_workspace: at: '.' - run: From 1a86fd1a2dd30dcd60d7a704873500acaa752f0e Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Fri, 16 Apr 2021 14:39:19 -0400 Subject: [PATCH 376/761] Config Option to Enable Logging with Local Time (#9123) * Configurable local time logging * make timezone configurable * Address linter feedback. * update with example --- cmd/telegraf/telegraf.go | 1 + config/config.go | 8 +++++++- docs/CONFIGURATION.md | 4 ++++ etc/telegraf.conf | 4 ++++ etc/telegraf_windows.conf | 4 ++++ logger/logger.go | 29 ++++++++++++++++++++++++----- logger/logger_test.go | 5 ++++- 7 files changed, 48 insertions(+), 7 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index f7df792728b23..60001fb60e064 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -174,6 +174,7 @@ func runAgent(ctx context.Context, RotationInterval: ag.Config.Agent.LogfileRotationInterval, RotationMaxSize: ag.Config.Agent.LogfileRotationMaxSize, RotationMaxArchives: ag.Config.Agent.LogfileRotationMaxArchives, + LogWithTimezone: ag.Config.Agent.LogWithTimezone, } logger.SetupLogging(logConfig) diff --git a/config/config.go b/config/config.go index 610f79cc3fd7e..c1bf9235f1583 100644 --- a/config/config.go +++ b/config/config.go @@ -188,6 +188,9 @@ type AgentConfig struct { // If set to -1, no archives are removed. LogfileRotationMaxArchives int `toml:"logfile_rotation_max_archives"` + // Pick a timezone to use when logging or type 'local' for local time. + LogWithTimezone string `toml:"log_with_timezone"` + Hostname string OmitHostname bool } @@ -356,11 +359,14 @@ var agentConfig = ` ## If set to -1, no archives are removed. # logfile_rotation_max_archives = 5 + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" + ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false - ` var outputHeader = ` diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index d97c86ba082d3..4965a4337f8d8 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -219,6 +219,10 @@ The agent table configures Telegraf and the defaults used across all plugins. Maximum number of rotated archives to keep, any older logs are deleted. If set to -1, no archives are removed. +- **log_with_timezone**: + Pick a timezone to use when logging or type 'local' for local time. Example: 'America/Chicago'. + [See this page for options/formats.](https://socketloop.com/tutorials/golang-display-list-of-timezones-with-gmt) + - **hostname**: Override default hostname, if empty use os.Hostname() - **omit_hostname**: diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 32ccfd62b98c6..9e02cc4c38ca0 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -90,6 +90,10 @@ ## If set to -1, no archives are removed. # logfile_rotation_max_archives = 5 + ## Pick a timezone to use when logging or type 'local' for local time. Example: 'America/Chicago'. + ## See https://socketloop.com/tutorials/golang-display-list-of-timezones-with-gmt for timezone formatting options. + # log_with_timezone = "" + ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 5b70928994158..5b7ca95057444 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -90,6 +90,10 @@ ## If set to -1, no archives are removed. # logfile_rotation_max_archives = 5 + ## Pick a timezone to use when logging or type 'local' for local time. Example: 'America/Chicago'. + ## See https://socketloop.com/tutorials/golang-display-list-of-timezones-with-gmt for timezone formatting options. + # log_with_timezone = "" + ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. diff --git a/logger/logger.go b/logger/logger.go index c365c057304aa..27e3c79f1fa06 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -6,6 +6,7 @@ import ( "log" "os" "regexp" + "strings" "time" "github.com/influxdata/telegraf/config" @@ -38,6 +39,8 @@ type LogConfig struct { RotationMaxSize config.Size // maximum rotated files to keep (older ones will be deleted) RotationMaxArchives int + // pick a timezone to use when logging. or type 'local' for local time. + LogWithTimezone string } type LoggerCreator interface { @@ -56,15 +59,19 @@ func registerLogger(name string, loggerCreator LoggerCreator) { type telegrafLog struct { writer io.Writer internalWriter io.Writer + timezone *time.Location } func (t *telegrafLog) Write(b []byte) (n int, err error) { var line []byte + timeToPrint := time.Now().In(t.timezone) + if !prefixRegex.Match(b) { - line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" I! "), b...) + line = append([]byte(timeToPrint.Format(time.RFC3339)+" I! "), b...) } else { - line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...) + line = append([]byte(timeToPrint.Format(time.RFC3339)+" "), b...) } + return t.writer.Write(line) } @@ -82,11 +89,23 @@ func (t *telegrafLog) Close() error { } // newTelegrafWriter returns a logging-wrapped writer. -func newTelegrafWriter(w io.Writer) io.Writer { +func newTelegrafWriter(w io.Writer, c LogConfig) (io.Writer, error) { + timezoneName := c.LogWithTimezone + + if strings.ToLower(timezoneName) == "local" { + timezoneName = "Local" + } + + tz, err := time.LoadLocation(timezoneName) + if err != nil { + return nil, errors.New("error while setting logging timezone: " + err.Error()) + } + return &telegrafLog{ writer: wlog.NewWriter(w), internalWriter: w, - } + timezone: tz, + }, nil } // SetupLogging configures the logging output. @@ -119,7 +138,7 @@ func (t *telegrafLogCreator) CreateLogger(config LogConfig) (io.Writer, error) { writer = defaultWriter } - return newTelegrafWriter(writer), nil + return newTelegrafWriter(writer, config) } // Keep track what is actually set as a log output, because log package doesn't provide a getter. diff --git a/logger/logger_test.go b/logger/logger_test.go index 8bb01e8e5328b..d2c699da52644 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -137,7 +137,10 @@ func TestLogTargetSettings(t *testing.T) { func BenchmarkTelegrafLogWrite(b *testing.B) { var msg = []byte("test") var buf bytes.Buffer - w := newTelegrafWriter(&buf) + w, err := newTelegrafWriter(&buf, LogConfig{}) + if err != nil { + panic("Unable to create log writer.") + } for i := 0; i < b.N; i++ { buf.Reset() w.Write(msg) From 9d163f6a8326d0a64594a675a48dfd4d72a95c07 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Fri, 16 Apr 2021 16:24:50 -0500 Subject: [PATCH 377/761] Fix CI config to check if branch is master before skipping (#9140) Related to: https://github.com/influxdata/telegraf/pull/9076 In order to support skipping a job depending on file changes, only works on non-master branches. This pull requests updates the config to check the current branch. --- .circleci/config.yml | 4 ++-- scripts/check-file-changes.sh | 7 +++++++ 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100755 scripts/check-file-changes.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index efbb07f986376..5496f67875e65 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -27,11 +27,11 @@ executors: commands: check-changed-files-or-halt: steps: - - run: git diff master --name-only --no-color | egrep -e "^(\.circleci\/.*)|(.*\.(go|mod|sum))|Makefile$" || circleci step halt + - run: ./scripts/check-file-changes.sh check-changed-files-or-halt-windows: steps: - run: - command: git diff master --name-only --no-color | egrep -e "^(\.circleci\/.*)|(.*\.(go|mod|sum))|Makefile$" || circleci step halt + command: ./scripts/check-file-changes.sh shell: bash.exe test-go: parameters: diff --git a/scripts/check-file-changes.sh b/scripts/check-file-changes.sh new file mode 100755 index 0000000000000..4897f2d3fd73f --- /dev/null +++ b/scripts/check-file-changes.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +BRANCH="$(git rev-parse --abbrev-ref HEAD)" +echo $BRANCH +if [[ "$BRANCH" != "master" ]]; then + git diff master --name-only --no-color | egrep -e "^(\.circleci\/.*)|(.*\.(go|mod|sum))|Makefile$" || circleci step halt; +fi From da5991d16c104c811184cfb7b29b10b2d08b580e Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Mon, 19 Apr 2021 11:14:53 -0400 Subject: [PATCH 378/761] Add time.now() starlark processor example test. (#9133) --- plugins/processors/starlark/starlark.go | 15 ++- plugins/processors/starlark/starlark_test.go | 124 ++++++++++++------ .../starlark/testdata/compare_metrics.star | 1 + .../processors/starlark/testdata/iops.star | 3 +- .../starlark/testdata/json_nested.star | 3 +- .../starlark/testdata/multiple_metrics.star | 2 +- .../testdata/multiple_metrics_with_json.star | 7 +- .../processors/starlark/testdata/pivot.star | 4 +- .../rename_prometheus_remote_write.star | 4 +- .../starlark/testdata/schema_sizing.star | 2 +- .../starlark/testdata/time_set_timestamp.star | 15 +++ .../starlark/testdata/value_filter.star | 6 +- 12 files changed, 125 insertions(+), 61 deletions(-) create mode 100644 plugins/processors/starlark/testdata/time_set_timestamp.star diff --git a/plugins/processors/starlark/starlark.go b/plugins/processors/starlark/starlark.go index dceee7bfb5f12..44f78fa6b6988 100644 --- a/plugins/processors/starlark/starlark.go +++ b/plugins/processors/starlark/starlark.go @@ -46,10 +46,11 @@ type Starlark struct { Log telegraf.Logger `toml:"-"` - thread *starlark.Thread - applyFunc *starlark.Function - args starlark.Tuple - results []telegraf.Metric + thread *starlark.Thread + applyFunc *starlark.Function + args starlark.Tuple + results []telegraf.Metric + starlarkLoadFunc func(module string, logger telegraf.Logger) (starlark.StringDict, error) } func (s *Starlark) Init() error { @@ -63,7 +64,7 @@ func (s *Starlark) Init() error { s.thread = &starlark.Thread{ Print: func(_ *starlark.Thread, msg string) { s.Log.Debug(msg) }, Load: func(thread *starlark.Thread, module string) (starlark.StringDict, error) { - return loadFunc(module, s.Log) + return s.starlarkLoadFunc(module, s.Log) }, } @@ -240,7 +241,9 @@ func init() { func init() { processors.AddStreaming("starlark", func() telegraf.StreamingProcessor { - return &Starlark{} + return &Starlark{ + starlarkLoadFunc: loadFunc, + } }) } diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index f506e26ecfa0b..15152a2f349c3 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -15,6 +15,9 @@ import ( "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + starlarktime "go.starlark.net/lib/time" + "go.starlark.net/starlark" + "go.starlark.net/starlarkstruct" ) // Tests for runtime errors in the processors Init function. @@ -26,8 +29,9 @@ func TestInitError(t *testing.T) { { name: "source must define apply", plugin: &Starlark{ - Source: "", - Log: testutil.Logger{}, + Source: "", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, { @@ -36,7 +40,8 @@ func TestInitError(t *testing.T) { Source: ` apply = 42 `, - Log: testutil.Logger{}, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, { @@ -46,7 +51,8 @@ apply = 42 def apply(): pass `, - Log: testutil.Logger{}, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, { @@ -55,13 +61,15 @@ def apply(): Source: ` for `, - Log: testutil.Logger{}, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, { name: "no source no script", plugin: &Starlark{ - Log: testutil.Logger{}, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, { @@ -71,15 +79,17 @@ for def apply(): pass `, - Script: "testdata/ratio.star", - Log: testutil.Logger{}, + Script: "testdata/ratio.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, { name: "script file not found", plugin: &Starlark{ - Script: "testdata/file_not_found.star", - Log: testutil.Logger{}, + Script: "testdata/file_not_found.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, }, } @@ -219,8 +229,9 @@ def apply(metric): for _, tt := range applyTests { t.Run(tt.name, func(t *testing.T) { plugin := &Starlark{ - Source: tt.source, - Log: testutil.Logger{}, + Source: tt.source, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, } err := plugin.Init() require.NoError(t, err) @@ -2476,9 +2487,10 @@ def apply(metric): for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { plugin := &Starlark{ - Source: tt.source, - Log: testutil.Logger{}, - Constants: tt.constants, + Source: tt.source, + Log: testutil.Logger{}, + Constants: tt.constants, + starlarkLoadFunc: testLoadFunc, } err := plugin.Init() require.NoError(t, err) @@ -2618,8 +2630,9 @@ func TestScript(t *testing.T) { { name: "rename", plugin: &Starlark{ - Script: "testdata/rename.star", - Log: testutil.Logger{}, + Script: "testdata/rename.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, input: []telegraf.Metric{ testutil.MustMetric("cpu", @@ -2645,8 +2658,9 @@ func TestScript(t *testing.T) { { name: "drop fields by type", plugin: &Starlark{ - Script: "testdata/drop_string_fields.star", - Log: testutil.Logger{}, + Script: "testdata/drop_string_fields.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, input: []telegraf.Metric{ testutil.MustMetric("device", @@ -2676,8 +2690,9 @@ func TestScript(t *testing.T) { { name: "drop fields with unexpected type", plugin: &Starlark{ - Script: "testdata/drop_fields_with_unexpected_type.star", - Log: testutil.Logger{}, + Script: "testdata/drop_fields_with_unexpected_type.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, input: []telegraf.Metric{ testutil.MustMetric("device", @@ -2710,8 +2725,9 @@ func TestScript(t *testing.T) { { name: "scale", plugin: &Starlark{ - Script: "testdata/scale.star", - Log: testutil.Logger{}, + Script: "testdata/scale.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, input: []telegraf.Metric{ testutil.MustMetric("cpu", @@ -2731,8 +2747,9 @@ func TestScript(t *testing.T) { { name: "ratio", plugin: &Starlark{ - Script: "testdata/ratio.star", - Log: testutil.Logger{}, + Script: "testdata/ratio.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, input: []telegraf.Metric{ testutil.MustMetric("mem", @@ -2759,8 +2776,9 @@ func TestScript(t *testing.T) { { name: "logging", plugin: &Starlark{ - Script: "testdata/logging.star", - Log: testutil.Logger{}, + Script: "testdata/logging.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, input: []telegraf.Metric{ testutil.MustMetric("log", @@ -2784,8 +2802,9 @@ func TestScript(t *testing.T) { { name: "multiple_metrics", plugin: &Starlark{ - Script: "testdata/multiple_metrics.star", - Log: testutil.Logger{}, + Script: "testdata/multiple_metrics.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, input: []telegraf.Metric{ testutil.MustMetric("mm", @@ -2816,8 +2835,9 @@ func TestScript(t *testing.T) { { name: "multiple_metrics_with_json", plugin: &Starlark{ - Script: "testdata/multiple_metrics_with_json.star", - Log: testutil.Logger{}, + Script: "testdata/multiple_metrics_with_json.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, input: []telegraf.Metric{ testutil.MustMetric("json", @@ -2825,7 +2845,7 @@ func TestScript(t *testing.T) { map[string]interface{}{ "value": "[{\"label\": \"hello\"}, {\"label\": \"world\"}]", }, - time.Unix(0, 0), + time.Unix(1618488000, 999), ), }, expected: []telegraf.Metric{ @@ -2834,22 +2854,23 @@ func TestScript(t *testing.T) { map[string]interface{}{ "value": "hello", }, - time.Unix(0, 0), + time.Unix(1618488000, 999), ), testutil.MustMetric("json", map[string]string{}, map[string]interface{}{ "value": "world", }, - time.Unix(0, 0), + time.Unix(1618488000, 999), ), }, }, { name: "fail", plugin: &Starlark{ - Script: "testdata/fail.star", - Log: testutil.Logger{}, + Script: "testdata/fail.star", + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, }, input: []telegraf.Metric{ testutil.MustMetric("fail", @@ -3137,8 +3158,9 @@ def apply(metric): for _, tt := range tests { b.Run(tt.name, func(b *testing.B) { plugin := &Starlark{ - Source: tt.source, - Log: testutil.Logger{}, + Source: tt.source, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, } err := plugin.Init() @@ -3182,8 +3204,9 @@ func TestAllScriptTestData(t *testing.T) { outputMetrics = parseMetricsFrom(t, lines, "Example Output:") } plugin := &Starlark{ - Script: fn, - Log: testutil.Logger{}, + Script: fn, + Log: testutil.Logger{}, + starlarkLoadFunc: testLoadFunc, } require.NoError(t, plugin.Init()) @@ -3204,7 +3227,7 @@ func TestAllScriptTestData(t *testing.T) { err = plugin.Stop() require.NoError(t, err) - testutil.RequireMetricsEqual(t, outputMetrics, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) + testutil.RequireMetricsEqual(t, outputMetrics, acc.GetTelegrafMetrics(), testutil.SortMetrics()) }) return nil }) @@ -3256,3 +3279,22 @@ func parseErrorMessage(t *testing.T, lines []string, header string) string { require.True(t, startIdx < len(lines), fmt.Sprintf("Expected to find the error message after %q, but found none", header)) return strings.TrimLeft(lines[startIdx], "# ") } + +func testLoadFunc(module string, logger telegraf.Logger) (starlark.StringDict, error) { + result, err := loadFunc(module, logger) + if err != nil { + return nil, err + } + + if module == "time.star" { + customModule := result["time"].(*starlarkstruct.Module) + customModule.Members["now"] = starlark.NewBuiltin("now", testNow) + result["time"] = customModule + } + + return result, nil +} + +func testNow(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { + return starlarktime.Time(time.Date(2021, 4, 15, 12, 0, 0, 999, time.UTC)), nil +} diff --git a/plugins/processors/starlark/testdata/compare_metrics.star b/plugins/processors/starlark/testdata/compare_metrics.star index 79555729d1814..5e855df443be8 100644 --- a/plugins/processors/starlark/testdata/compare_metrics.star +++ b/plugins/processors/starlark/testdata/compare_metrics.star @@ -22,4 +22,5 @@ def apply(metric): result = Metric("cpu_diff") # Set the field "value" to the difference between the value of the last metric and the current one result.fields["value"] = last.fields["value"] - metric.fields["value"] + result.time = metric.time return result diff --git a/plugins/processors/starlark/testdata/iops.star b/plugins/processors/starlark/testdata/iops.star index e92b79e0ab782..fad572f27b77d 100644 --- a/plugins/processors/starlark/testdata/iops.star +++ b/plugins/processors/starlark/testdata/iops.star @@ -41,7 +41,8 @@ def apply(metric): diskiops.fields["iops"] = ( io / interval_seconds ) diskiops.tags["name"] = disk_name diskiops.tags["host"] = metric.tags["host"] - return [diskiops] + diskiops.time = metric.time + return diskiops # This could be aggregated to obtain max IOPS using: # diff --git a/plugins/processors/starlark/testdata/json_nested.star b/plugins/processors/starlark/testdata/json_nested.star index 3ffa20d0cbfb2..cc391d6a5f91b 100644 --- a/plugins/processors/starlark/testdata/json_nested.star +++ b/plugins/processors/starlark/testdata/json_nested.star @@ -27,7 +27,7 @@ # json value="[{\"fields\": {\"LogEndOffset\": 339238, \"LogStartOffset\": 339238, \"NumLogSegments\": 1, \"Size\": 0, \"UnderReplicatedPartitions\": 0}, \"name\": \"partition\", \"tags\": {\"host\": \"CUD1-001559\", \"jolokia_agent_url\": \"http://localhost:7777/jolokia\", \"partition\": \"1\", \"topic\": \"qa-kafka-connect-logs\"}, \"timestamp\": 1591124461}]" # Example Output: -# partition,host=CUD1-001559,jolokia_agent_url=http://localhost:7777/jolokia,partition=1,topic=qa-kafka-connect-logs LogEndOffset=339238i,LogStartOffset=339238i,NumLogSegments=1i,Size=0i,UnderReplicatedPartitions=0i 1610056029037925000 +# partition,host=CUD1-001559,jolokia_agent_url=http://localhost:7777/jolokia,partition=1,topic=qa-kafka-connect-logs LogEndOffset=339238i,LogStartOffset=339238i,NumLogSegments=1i,Size=0i,UnderReplicatedPartitions=0i 1591124461000000000 load("json.star", "json") @@ -41,5 +41,6 @@ def apply(metric): new_metric.tags[str(tag[0])] = tag[1] for field in obj["fields"].items(): # 5 Fields to iterate through new_metric.fields[str(field[0])] = field[1] + new_metric.time = int(obj["timestamp"] * 1e9) metrics.append(new_metric) return metrics diff --git a/plugins/processors/starlark/testdata/multiple_metrics.star b/plugins/processors/starlark/testdata/multiple_metrics.star index 3d2e3d85f9e57..6abf567f66c97 100644 --- a/plugins/processors/starlark/testdata/multiple_metrics.star +++ b/plugins/processors/starlark/testdata/multiple_metrics.star @@ -15,7 +15,7 @@ def apply(metric): # Set the field "value" to b metric2.fields["value"] = "b" # Reset the time (only needed for testing purpose) - metric2.time = 0 + metric2.time = metric.time # Add metric2 to the list of metrics metrics.append(metric2) # Rename the original metric to "mm1" diff --git a/plugins/processors/starlark/testdata/multiple_metrics_with_json.star b/plugins/processors/starlark/testdata/multiple_metrics_with_json.star index 78f318e62c7ac..fa4dfcc483e1b 100644 --- a/plugins/processors/starlark/testdata/multiple_metrics_with_json.star +++ b/plugins/processors/starlark/testdata/multiple_metrics_with_json.star @@ -4,11 +4,12 @@ # json value="[{\"label\": \"hello\"}, {\"label\": \"world\"}]" # # Example Output: -# json value="hello" 1465839830100400201 -# json value="world" 1465839830100400201 +# json value="hello" 1618488000000000999 +# json value="world" 1618488000000000999 # loads json.encode(), json.decode(), json.indent() load("json.star", "json") +load("time.star", "time") def apply(metric): # Initialize a list of metrics @@ -20,7 +21,7 @@ def apply(metric): # Set the field "value" to the label extracted from the current json object current_metric.fields["value"] = obj["label"] # Reset the time (only needed for testing purpose) - current_metric.time = 0 + current_metric.time = time.now().unix_nano # Add metric to the list of metrics metrics.append(current_metric) return metrics diff --git a/plugins/processors/starlark/testdata/pivot.star b/plugins/processors/starlark/testdata/pivot.star index f32ebf45d9763..c57d13d5fa420 100644 --- a/plugins/processors/starlark/testdata/pivot.star +++ b/plugins/processors/starlark/testdata/pivot.star @@ -4,10 +4,10 @@ In this example it pivots the value of key `sensor` to be the key of the value in key `value` Example Input: -temperature sensor="001A0",value=111.48 +temperature sensor="001A0",value=111.48 1618488000000000999 Example Output: -temperature 001A0=111.48 +temperature 001A0=111.48 1618488000000000999 ''' def apply(metric): diff --git a/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star b/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star index cee49196c48ff..87c4e764bf678 100644 --- a/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star +++ b/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star @@ -2,10 +2,10 @@ # Assumes there is only one field as is the case for prometheus remote write. # # Example Input: -# prometheus_remote_write,instance=localhost:9090,job=prometheus,quantile=0.99 go_gc_duration_seconds=4.63 1614889298859000000 +# prometheus_remote_write,instance=localhost:9090,job=prometheus,quantile=0.99 go_gc_duration_seconds=4.63 1618488000000000999 # # Example Output: -# go_gc_duration_seconds,instance=localhost:9090,job=prometheus,quantile=0.99 value=4.63 1614889299000000000 +# go_gc_duration_seconds,instance=localhost:9090,job=prometheus,quantile=0.99 value=4.63 1618488000000000999 def apply(metric): if metric.name == "prometheus_remote_write": diff --git a/plugins/processors/starlark/testdata/schema_sizing.star b/plugins/processors/starlark/testdata/schema_sizing.star index d382749cb06a5..c716a153c7a23 100644 --- a/plugins/processors/starlark/testdata/schema_sizing.star +++ b/plugins/processors/starlark/testdata/schema_sizing.star @@ -51,7 +51,7 @@ def apply(metric): produce_pairs(new_metric, str_keys, "str", key=True) produce_pairs(new_metric, str_vals, "str") - + new_metric.time = metric.time return new_metric def produce_pairs(metric, li, field_type, key=False): diff --git a/plugins/processors/starlark/testdata/time_set_timestamp.star b/plugins/processors/starlark/testdata/time_set_timestamp.star new file mode 100644 index 0000000000000..bc64457dce880 --- /dev/null +++ b/plugins/processors/starlark/testdata/time_set_timestamp.star @@ -0,0 +1,15 @@ +# Example of setting the metric timestamp to the current time. +# +# Example Input: +# time result="OK" 1515581000000000000 +# +# Example Output: +# time result="OK" 1618488000000000999 + +load('time.star', 'time') + +def apply(metric): + # You can set the timestamp by using the current time. + metric.time = time.now().unix_nano + + return metric \ No newline at end of file diff --git a/plugins/processors/starlark/testdata/value_filter.star b/plugins/processors/starlark/testdata/value_filter.star index eeb2432f6679f..a4ceb28a68a72 100644 --- a/plugins/processors/starlark/testdata/value_filter.star +++ b/plugins/processors/starlark/testdata/value_filter.star @@ -4,11 +4,11 @@ In this example we look at the `value` field of the metric. If the value is zeor, we delete all the fields, effectively dropping the metric. Example Input: -temperature sensor="001A0",value=111.48 -temperature sensor="001B0",value=0.0 +temperature sensor="001A0",value=111.48 1618488000000000999 +temperature sensor="001B0",value=0.0 1618488000000000999 Example Output: -temperature sensor="001A0",value=111.48 +temperature sensor="001A0",value=111.48 1618488000000000999 ''' def apply(metric): From 916ac99675a321ba679bd6f0b4f3188770b92c0c Mon Sep 17 00:00:00 2001 From: reimda Date: Mon, 19 Apr 2021 16:21:06 -0600 Subject: [PATCH 379/761] Add doc on supported platforms (#9100) --- docs/SUPPORTED_PLATFORMS.md | 199 ++++++++++++++++++++++++++++++++++++ 1 file changed, 199 insertions(+) create mode 100644 docs/SUPPORTED_PLATFORMS.md diff --git a/docs/SUPPORTED_PLATFORMS.md b/docs/SUPPORTED_PLATFORMS.md new file mode 100644 index 0000000000000..9df5dfa2cbaf9 --- /dev/null +++ b/docs/SUPPORTED_PLATFORMS.md @@ -0,0 +1,199 @@ +# Supported Platforms + +Telegraf is a cross-platform application. This doc helps define which +operating systems, distributions, and releases Telegraf supports. + +Telegraf is supported on Linux, FreeBSD, Windows, and macOS. It is +written in Go which supports these operating systems and +more. Telegraf may work on Go's other operating systems and users are +welcome to build their own binaries for them. Bug reports should be +submitted only for supported platforms. + +Golang.org has a [table][go-table] of valid OS and architecture +combinations and the golang wiki has more specific [minimum +requirements][go-reqs] for Go itself. + +[go-table]: https://golang.org/doc/install/source#environment +[go-reqs]: https://github.com/golang/go/wiki/MinimumRequirements#operating-systems + +## Linux + +Telegraf intent: *Support latest versions of major linux +distributions* + +Telegraf supports RHEL, Fedora, Debian, and Ubuntu. InfluxData +provides package repositories for these distributions. Instructions +for using the package repositories can be found on +[docs.influxdata.com][repo-docs]. Bug reports should be submitted only +for supported distributions and releases. + +Telegraf's Debian or Ubuntu packages are likely to work on other +Debian-based distributions although these are not +supported. Similarly, Telegraf's Fedora and RHEL packages are likely +to work on other Redhat-based distributions although again these are +not supported. + +Telegraf releases include .tar.gz packages for use with other +distributions, for building container images, or for installation +without a package manager. As part of telegraf's release process we +publish [official images][docker-hub] to Docker Hub. + +Distrowatch lists [major distributions][dw-major] and tracks +[popularity][dw-pop] of distributions. Wikipedia lists [linux +distributions][wp-distro] by the major distribution they're based on. + +[repo-docs]: https://docs.influxdata.com/telegraf/latest/introduction/installation/ +[docker-hub]: https://hub.docker.com/_/telegraf +[dw-major]: https://distrowatch.com/dwres.php?resource=major +[dw-pop]: https://distrowatch.com/dwres.php?resource=popularity +[wp-distro]: https://en.wikipedia.org/wiki/List_of_Linux_distributions + +### RHEL + +Red Hat makes a major release every four to five years and supports +each release in production for ten years. Extended support is +available for three or more years. + +Telegraf intent: *Support releases in RHEL production, but not in +extended support.* + +Redhat publishes [release history][rh-history] and wikipedia has a +[summary timeline][wp-rhel]. + +As of April 2021, 7 and 8 are production releases. + +[rh-history]: https://access.redhat.com/articles/3078 +[wp-rhel]: https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Version_history_and_timeline + +### Ubuntu + +Ubuntu makes two releases a year. Every two years one of the releases +is an LTS (long-term support) release. Interim (non-LTS) releases are +in standard support for nine months. LTS releases are in maintenance +for five years, then in extended security maintenance for up to three +more years. + +Telegraf intent: *Support interim releases and LTS releases in Ubuntu +maintenance, but not in extended security maintenance.* + +Ubuntu publishes [release history][ub-history] and wikipedia has a +[table][wp-ub] of all releases and support status. + +As of April 2021, Ubuntu 20.10 is in standard support. Ubuntu 18.04 +LTS and 20.04 LTS are in maintenance. + +[ub-history]: https://ubuntu.com/about/release-cycle +[wp-ub]: https://en.wikipedia.org/wiki/Ubuntu_version_history#Table_of_versions + +### Debian + +Debian generally makes major releases every two years and provides +security support for each release for three years. After security +support expires the release enters long term support (LTS) until at +least five years after release. + +Telegraf intent: *Support releases under Debian security support* + +Debian publishes [releases and support status][deb-history] and +wikipedia has a [summary table][wp-deb]. + +As of April 2021, Debian 10 is in security support. + +[deb-history]: https://www.debian.org/releases/ +[wp-deb]: https://en.wikipedia.org/wiki/Debian_version_history#Release_table + +### Fedora + +Fedora makes two releases a year and supports each release for a year. + +Telegraf intent: *Support releases supported by Fedora* + +Fedora publishes [release history][fed-history] and wikipedia has a +[summary table][wp-fed]. + +[fed-history]: https://fedoraproject.org/wiki/Releases +[wp-fed]: https://en.wikipedia.org/wiki/Fedora_version_history#Version_history + +## FreeBSD + +FreeBSD makes major releases about every two years. Releases reach end +of life after five years. + +Telegraf intent: *Support releases under FreeBSD security support* + +FreeBSD publishes [release history][freebsd-history] and wikipedia has +a [summary table][wp-freebsd]. + +As of April 2021, releases 11 and 12 are under security support. + +[freebsd-history]: https://www.freebsd.org/security/#sup +[wp-freebsd]: https://en.wikipedia.org/wiki/FreeBSD#Version_history + +## Windows + +Telegraf intent: *Support current versions of Windows and Windows +Server* + +Microsoft has two release channels, the semi-annual channel (SAC) and +the long-term servicing channel (LTSC). The semi-annual channel is for +mainstream feature releases. + +Microsoft publishes [lifecycle policy by release][ms-lifecycle] and a +[product lifecycle faq][ms-lifecycle-faq]. + +[ms-lifecycle]: https://docs.microsoft.com/en-us/lifecycle/products/?terms=windows +[ms-lifecycle-faq]: https://docs.microsoft.com/en-us/lifecycle/faq/windows + +### Windows 10 + +Windows 10 makes SAC releases twice a year and supports those releases +for [18 or 30 months][w10-timeline]. They also make LTSC releases +which are supported for 10 years but are intended only for medical or +industrial devices that require a static feature set. + +Telegraf intent: *Support semi-annual channel releases supported by +Microsoft* + +Microsoft publishes Windows 10 [release information][w10-history], and +[servicing channels][w10-channels]. Wikipedia has a [summary +table][wp-w10] of support status. + +As of April 2021, versions 19H2, 20H1, and 20H2 are supported. + +[w10-timeline]: https://docs.microsoft.com/en-us/lifecycle/faq/windows#what-is-the-servicing-timeline-for-a-version-feature-update-of-windows-10 +[w10-history]: https://docs.microsoft.com/en-us/windows/release-health/release-information +[w10-channels]: https://docs.microsoft.com/en-us/windows/deployment/update/get-started-updates-channels-tools +[wp-w10]: https://en.wikipedia.org/wiki/Windows_10_version_history#Channels + +### Windows Server + +Windows Server makes SAC releases for that are supported for 18 months +and LTSC releases that are supported for five years under mainstream +support and five more years under extended support. + +Telegraf intent: *Support current semi-annual channel releases +supported by Microsoft and long-term releases under mainstream +support* + +Microsoft publishes Windows Server [release information][ws-history] +and [servicing channels][ws-channels]. + +As of April 2021, Server 2016 (version 1607) and Server 2019 (version +1809) are LTSC releases under mainstream support and versions 1909, +2004, and 20H2 are supported SAC releases. + +[ws-history]: https://docs.microsoft.com/en-us/windows-server/get-started/windows-server-release-info +[ws-channels]: https://docs.microsoft.com/en-us/windows-server/get-started-19/servicing-channels-19 + +## macOS + +MacOS makes one major release a year and provides support for each +release for three years. + +Telegraf intent: *Support releases supported by Apple* + +Release history is available from [wikipedia][wp-macos]. + +As of April 2021, 10.14, 10.15, and 11 are supported. + +[wp-macos]: https://en.wikipedia.org/wiki/MacOS#Release_history From 1cfe8da93f87c7b14a91088da5187ff1fbd9e165 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 20 Apr 2021 11:24:24 -0400 Subject: [PATCH 380/761] clarify pollIntervalDisabled message (#9158) closes #9155 --- plugins/common/shim/example/cmd/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/common/shim/example/cmd/main.go b/plugins/common/shim/example/cmd/main.go index 4f51f7f878fb3..7326cc492476c 100644 --- a/plugins/common/shim/example/cmd/main.go +++ b/plugins/common/shim/example/cmd/main.go @@ -13,7 +13,7 @@ import ( ) var pollInterval = flag.Duration("poll_interval", 1*time.Second, "how often to send metrics") -var pollIntervalDisabled = flag.Bool("poll_interval_disabled", false, "how often to send metrics") +var pollIntervalDisabled = flag.Bool("poll_interval_disabled", false, "set to true to disable polling. You want to use this when you are sending metrics on your own schedule") var configFile = flag.String("config", "", "path to the config file for this plugin") var err error From 243488c266f1b42c9dc4711e87d6539d96b75eb5 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 20 Apr 2021 14:26:15 -0700 Subject: [PATCH 381/761] add starlark current timestamp example --- plugins/processors/starlark/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index 6372aedcea3b9..c573521bd2f04 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -232,6 +232,7 @@ def apply(metric): - [time duration](/plugins/processors/starlark/testdata/time_duration.star) - Parse a duration and convert it into a total amount of seconds. - [time timestamp](/plugins/processors/starlark/testdata/time_timestamp.star) - Filter metrics based on the timestamp in seconds. - [time timestamp nanoseconds](/plugins/processors/starlark/testdata/time_timestamp_nanos.star) - Filter metrics based on the timestamp with nanoseconds. +- [time timestamp current](/plugins/processors/starlark/testdata/time_set_timestamp.star) - Setting the metric timestamp to the current/local time. - [value filter](/plugins/processors/starlark/testdata/value_filter.star) - Remove a metric based on a field value. - [logging](/plugins/processors/starlark/testdata/logging.star) - Log messages with the logger of Telegraf - [multiple metrics](/plugins/processors/starlark/testdata/multiple_metrics.star) - Return multiple metrics by using [a list](https://docs.bazel.build/versions/master/skylark/lib/list.html) of metrics. From e29bca741909f9266d80540f4ecf1676e1087461 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Tue, 20 Apr 2021 17:29:58 -0400 Subject: [PATCH 382/761] Add Glob / Wildcard support to Cloudwatch input for 'Dimensions' configuration (#9136) I believe this will resolve #4046 --- plugins/inputs/cloudwatch/README.md | 1 + plugins/inputs/cloudwatch/cloudwatch.go | 22 ++++++++++++++++---- plugins/inputs/cloudwatch/cloudwatch_test.go | 6 ++++-- 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index c86e66e674c6d..d7c803c8c83b9 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -101,6 +101,7 @@ API endpoint. In the following order the plugin will attempt to authenticate. # # ## Dimension filters for Metric. All dimensions defined for the metric names # ## must be specified in order to retrieve the metric statistics. + # ## 'value' has wildcard / 'glob' matching support such as `p-*`. # [[inputs.cloudwatch.metrics.dimensions]] # name = "LoadBalancerName" # value = "p-example" diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 22fdcab38e0b6..f108aceb68f44 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -66,8 +66,9 @@ type Metric struct { // Dimension defines a simplified Cloudwatch dimension (provides metric filtering). type Dimension struct { - Name string `toml:"name"` - Value string `toml:"value"` + Name string `toml:"name"` + Value string `toml:"value"` + valueMatcher filter.Filter } // metricCache caches metrics, their filters, and generated queries. @@ -170,6 +171,7 @@ func (c *CloudWatch) SampleConfig() string { # # ## Dimension filters for Metric. All dimensions defined for the metric names # ## must be specified in order to retrieve the metric statistics. + # ## 'value' has wildcard / 'glob' matching support. such as 'p-*'. # [[inputs.cloudwatch.metrics.dimensions]] # name = "LoadBalancerName" # value = "p-example" @@ -294,6 +296,18 @@ func (c *CloudWatch) initializeCloudWatch() error { loglevel := aws.LogOff c.client = cloudwatch.New(configProvider, cfg.WithLogLevel(loglevel)) + // Initialize regex matchers for each Dimension value. + for _, m := range c.Metrics { + for _, dimension := range m.Dimensions { + matcher, err := filter.NewIncludeExcludeFilter([]string{dimension.Value}, nil) + if err != nil { + return err + } + + dimension.valueMatcher = matcher + } + } + return nil } @@ -633,7 +647,7 @@ func (f *metricCache) isValid() bool { func hasWildcard(dimensions []*Dimension) bool { for _, d := range dimensions { - if d.Value == "" || d.Value == "*" { + if d.Value == "" || strings.ContainsAny(d.Value, "*?[") { return true } } @@ -651,7 +665,7 @@ func isSelected(name string, metric *cloudwatch.Metric, dimensions []*Dimension) selected := false for _, d2 := range metric.Dimensions { if d.Name == *d2.Name { - if d.Value == "" || d.Value == "*" || d.Value == *d2.Value { + if d.Value == "" || d.valueMatcher.Match(*d2.Value) { selected = true } } diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 43fb01f058821..56aee346886e2 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -201,16 +201,18 @@ func TestSelectMetrics(t *testing.T) { Dimensions: []*Dimension{ { Name: "LoadBalancerName", - Value: "*", + Value: "lb*", }, { Name: "AvailabilityZone", - Value: "*", + Value: "us-east*", }, }, }, }, } + err := c.initializeCloudWatch() + assert.NoError(t, err) c.client = &mockSelectMetricsCloudWatchClient{} filtered, err := getFilteredMetrics(c) // We've asked for 2 (out of 4) metrics, over all 3 load balancers in all 2 From 4336dae3b59e5eb04a06734a0b662354f23dced1 Mon Sep 17 00:00:00 2001 From: Julien Riou Date: Tue, 20 Apr 2021 23:47:14 +0200 Subject: [PATCH 383/761] Make JSON format compatible with nulls (#9110) --- plugins/parsers/json/parser.go | 9 +++++++-- plugins/parsers/json/parser_test.go | 12 ++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index a651ae5343770..7e138e33adf5c 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -193,10 +193,13 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { if p.query != "" { result := gjson.GetBytes(buf, p.query) buf = []byte(result.Raw) - if !result.IsArray() && !result.IsObject() { - err := fmt.Errorf("E! Query path must lead to a JSON object or array of objects, but lead to: %v", result.Type) + if !result.IsArray() && !result.IsObject() && result.Type != gjson.Null { + err := fmt.Errorf("E! Query path must lead to a JSON object, array of objects or null, but lead to: %v", result.Type) return nil, err } + if result.Type == gjson.Null { + return nil, nil + } } buf = bytes.TrimSpace(buf) @@ -217,6 +220,8 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { return p.parseObject(v, timestamp) case []interface{}: return p.parseArray(v, timestamp) + case nil: + return nil, nil default: return nil, ErrWrongType } diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index 9abe853eca0c5..1010d7971249d 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -892,6 +892,18 @@ func TestParse(t *testing.T) { input: []byte(`[]`), expected: []telegraf.Metric{}, }, + { + name: "parse null", + config: &Config{}, + input: []byte(`null`), + expected: []telegraf.Metric{}, + }, + { + name: "parse null with query", + config: &Config{Query: "result.data"}, + input: []byte(`{"error":null,"result":{"data":null,"items_per_page":10,"total_items":0,"total_pages":0}}`), + expected: []telegraf.Metric{}, + }, { name: "parse simple array", config: &Config{ From f39d68d1faf4f688d24657b589ba01c9ffb93a18 Mon Sep 17 00:00:00 2001 From: Avinash Nigam <56562150+avinash-nigam@users.noreply.github.com> Date: Wed, 21 Apr 2021 09:02:07 -0700 Subject: [PATCH 384/761] SQL Server input plugin - Enable Azure Active Directory (AAD) authentication support (#8822) ### Required for all PRs: - [ ] Associated README.md updated. - [ ] Has appropriate unit tests. Associated to feature request - [Azure Active Directory (AAD) authentication support in SQL Server input plugin](https://github.com/influxdata/telegraf/issues/8808#issue-801695311) Co-authored-by: Sven Rebhan <36194019+srebhan@users.noreply.github.com> --- go.mod | 1 + go.sum | 5 +- plugins/inputs/sqlserver/README.md | 28 ++++ plugins/inputs/sqlserver/sqlserver.go | 148 +++++++++++++++++++-- plugins/inputs/sqlserver/sqlserver_test.go | 4 +- 5 files changed, 172 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index 523e4fbdf4b0a..57ee3c129648b 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/Azure/azure-event-hubs-go/v3 v3.2.0 github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 github.com/Azure/go-autorest/autorest v0.11.17 + github.com/Azure/go-autorest/autorest/adal v0.9.10 github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 github.com/BurntSushi/toml v0.3.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee diff --git a/go.sum b/go.sum index 795772ffccca6..f29d3c36bd0a9 100644 --- a/go.sum +++ b/go.sum @@ -352,6 +352,7 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -518,6 +519,7 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -895,6 +897,7 @@ github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -1039,7 +1042,6 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= @@ -1403,7 +1405,6 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index ee2dc52c369ca..d5ad22ee7a204 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -172,10 +172,38 @@ GO ## - VolumeSpace ## - PerformanceMetrics +``` + +### Support for Azure Active Directory (AAD) authentication using [Managed Identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview) + +Azure SQL Database supports 2 main methods of authentication: [SQL authentication and AAD authentication](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). The recommended practice is to [use AAD authentication when possible](https://docs.microsoft.com/en-us/azure/azure-sql/database/authentication-aad-overview). +AAD is a more modern authentication protocol, allows for easier credential/role management, and can eliminate the need to include passwords in a connection string. +To enable support for AAD authentication, we leverage the existing AAD authentication support in the [SQL Server driver for Go](https://github.com/denisenkom/go-mssqldb#azure-active-directory-authentication---preview) +#### How to use AAD Auth with MSI + +- Configure "system-assigned managed identity" for Azure resources on the Monitoring VM (the VM that'd connect to the SQL server/database) [using the Azure portal](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm). +- On the database being monitored, create/update a USER with the name of the Monitoring VM as the principal using the below script. This might require allow-listing the client machine's IP address (from where the below SQL script is being run) on the SQL Server resource. +```sql +EXECUTE ('IF EXISTS(SELECT * FROM sys.database_principals WHERE name = '''') + BEGIN + DROP USER [] + END') +EXECUTE ('CREATE USER [] FROM EXTERNAL PROVIDER') +EXECUTE ('GRANT VIEW DATABASE STATE TO []') +``` +- On the SQL Server resource of the database(s) being monitored, go to "Firewalls and Virtual Networks" tab and allowlist the monitoring VM IP address. +- On the Monitoring VM, update the telegraf config file with the database connection string in the following format. Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). +- On the Monitoring VM, update the telegraf config file with the database connection string in the following format. +- On the Monitoring VM, update the telegraf config file with the database connection string in the following format. The connection string only provides the server and database name, but no password (since the VM's system-assigned managed identity would be used for authentication). +```toml + servers = [ + "Server=.database.windows.net;Port=1433;Database=;app name=telegraf;log=1;", + ] ``` +- Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). ### Metrics: To provide backwards compatibility, this plugin support two versions of metrics queries. diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index db499a7472578..7da1218c084ae 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -2,12 +2,15 @@ package sqlserver import ( "database/sql" + "errors" "fmt" "log" + "regexp" "sync" "time" - _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization + "github.com/Azure/go-autorest/autorest/adal" + mssql "github.com/denisenkom/go-mssqldb" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" @@ -24,6 +27,8 @@ type SQLServer struct { HealthMetric bool `toml:"health_metric"` pools []*sql.DB queries MapQuery + adalToken *adal.Token + muCacheLock sync.RWMutex } // Query struct @@ -60,6 +65,9 @@ const ( healthMetricDatabaseType = "database_type" ) +// resource id for Azure SQL Database +const sqlAzureResourceID = "https://database.windows.net/" + const sampleConfig = ` ## Specify instances to monitor with a list of connection strings. ## All connection parameters are optional. @@ -272,15 +280,48 @@ func (s *SQLServer) Start(acc telegraf.Accumulator) error { return err } - if len(s.Servers) == 0 { - s.Servers = append(s.Servers, defaultServer) - } + // initialize mutual exclusion lock + s.muCacheLock = sync.RWMutex{} for _, serv := range s.Servers { - pool, err := sql.Open("mssql", serv) - if err != nil { - acc.AddError(err) - return err + var pool *sql.DB + + // setup connection based on authentication + rx := regexp.MustCompile(`\b(?:(Password=((?:&(?:[a-z]+|#[0-9]+);|[^;]){0,})))\b`) + + // when password is provided in connection string, use SQL auth + if rx.MatchString(serv) { + var err error + pool, err = sql.Open("mssql", serv) + + if err != nil { + acc.AddError(err) + continue + } + } else { + // otherwise assume AAD Auth with system-assigned managed identity (MSI) + + // AAD Auth is only supported for Azure SQL Database or Azure SQL Managed Instance + if s.DatabaseType == "SQLServer" { + err := errors.New("database connection failed : AAD auth is not supported for SQL VM i.e. DatabaseType=SQLServer") + acc.AddError(err) + continue + } + + // get token from in-memory cache variable or from Azure Active Directory + tokenProvider, err := s.getTokenProvider() + if err != nil { + acc.AddError(fmt.Errorf("error creating AAD token provider for system assigned Azure managed identity : %s", err.Error())) + continue + } + + connector, err := mssql.NewAccessTokenConnector(serv, tokenProvider) + if err != nil { + acc.AddError(fmt.Errorf("error creating the SQL connector : %s", err.Error())) + continue + } + + pool = sql.OpenDB(connector) } s.pools = append(s.pools, pool) @@ -300,8 +341,7 @@ func (s *SQLServer) gatherServer(pool *sql.DB, query Query, acc telegraf.Accumul // execute query rows, err := pool.Query(query.Script) if err != nil { - return fmt.Errorf("Script %s failed: %w", query.ScriptName, err) - //return err + return fmt.Errorf("script %s failed: %w", query.ScriptName, err) } defer rows.Close() @@ -423,6 +463,94 @@ func (s *SQLServer) Init() error { return nil } +// Get Token Provider by loading cached token or refreshed token +func (s *SQLServer) getTokenProvider() (func() (string, error), error) { + var tokenString string + + // load token + s.muCacheLock.RLock() + token, err := s.loadToken() + s.muCacheLock.RUnlock() + + // if there's error while loading token or found an expired token, refresh token and save it + if err != nil || token.IsExpired() { + // refresh token within a write-lock + s.muCacheLock.Lock() + defer s.muCacheLock.Unlock() + + // load token again, in case it's been refreshed by another thread + token, err = s.loadToken() + + // check loaded token's error/validity, then refresh/save token + if err != nil || token.IsExpired() { + // get new token + spt, err := s.refreshToken() + if err != nil { + return nil, err + } + + // use the refreshed token + tokenString = spt.OAuthToken() + } else { + // use locally cached token + tokenString = token.OAuthToken() + } + } else { + // use locally cached token + tokenString = token.OAuthToken() + } + + // return acquired token + return func() (string, error) { + return tokenString, nil + }, nil +} + +// Load token from in-mem cache +func (s *SQLServer) loadToken() (*adal.Token, error) { + // This method currently does a simplistic task of reading a from variable (in-mem cache), + // however it's been structured here to allow extending the cache mechanism to a different approach in future + + if s.adalToken == nil { + return nil, fmt.Errorf("token is nil or failed to load existing token") + } + + return s.adalToken, nil +} + +// Refresh token for the resource, and save to in-mem cache +func (s *SQLServer) refreshToken() (*adal.Token, error) { + // get MSI endpoint to get a token + msiEndpoint, err := adal.GetMSIVMEndpoint() + if err != nil { + return nil, err + } + + // get new token for the resource id + spt, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, sqlAzureResourceID) + if err != nil { + return nil, err + } + + // ensure token is fresh + if err := spt.EnsureFresh(); err != nil { + return nil, err + } + + // save token to local in-mem cache + s.adalToken = &adal.Token{ + AccessToken: spt.Token().AccessToken, + RefreshToken: spt.Token().RefreshToken, + ExpiresIn: spt.Token().ExpiresIn, + ExpiresOn: spt.Token().ExpiresOn, + NotBefore: spt.Token().NotBefore, + Resource: spt.Token().Resource, + Type: spt.Token().Type, + } + + return s.adalToken, nil +} + func init() { inputs.Add("sqlserver", func() telegraf.Input { return &SQLServer{Servers: []string{defaultServer}} diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index 580bfe5ee9e9d..3d1ddd3094025 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -184,8 +184,8 @@ func TestSqlServer_MultipleInstanceWithHealthMetricIntegration(t *testing.T) { } func TestSqlServer_HealthMetric(t *testing.T) { - fakeServer1 := "localhost\\fakeinstance1;Database=fakedb1" - fakeServer2 := "localhost\\fakeinstance2;Database=fakedb2" + fakeServer1 := "localhost\\fakeinstance1;Database=fakedb1;Password=ABCabc01;" + fakeServer2 := "localhost\\fakeinstance2;Database=fakedb2;Password=ABCabc01;" s1 := &SQLServer{ Servers: []string{fakeServer1, fakeServer2}, From ad942052f06bb13ce8dad899cba9066e9ad307ed Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Wed, 21 Apr 2021 13:32:34 -0400 Subject: [PATCH 385/761] Speed up package step by running in parallel. (#9096) * test * more test * Updated config.yml * Updated config.yml * Updated config.yml * Update Makefile * Update Makefile * Update Makefile * Update Makefile * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml * Updated config.yml --- .circleci/config.yml | 100 +++++++++++++++++++++++++++++++++++++------ Makefile | 14 ++++++ 2 files changed, 102 insertions(+), 12 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 5496f67875e65..690eaa9150989 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -47,25 +47,35 @@ commands: - run: 'GOARCH=<< parameters.goarch >> make check' - run: 'GOARCH=<< parameters.goarch >> make check-deps' - run: 'GOARCH=<< parameters.goarch >> make test' - package: + package-build: parameters: + release: + type: boolean + default: false nightly: type: boolean default: false + type: + type: string + default: "" steps: - checkout - check-changed-files-or-halt - attach_workspace: at: '/go' + - when: + condition: << parameters.release >> + steps: + - run: 'debian=1 centos=1 mac=1 freebsd=1 linux=1 windows=1 make package' - when: condition: << parameters.nightly >> steps: - - run: 'NIGHTLY=1 make package' + - run: 'debian=1 centos=1 mac=1 freebsd=1 linux=1 windows=1 NIGHTLY=1 make package' - run: 'make upload-nightly' - unless: condition: << parameters.nightly >> steps: - - run: 'make package' + - run: '<< parameters.type >>=1 make package' - store_artifacts: path: './build/dist' destination: 'build/dist' @@ -142,19 +152,57 @@ jobs: - run: git config --system core.longpaths true - run: make test-windows - package: + windows-package: + executor: go-1_16 + steps: + - package-build: + type: windows + debian-package: + executor: go-1_16 + steps: + - package-build: + type: debian + centos-package: executor: go-1_16 steps: - - package + - package-build: + type: centos + mac-package: + executor: go-1_16 + steps: + - package-build: + type: mac + freebsd-package: + executor: go-1_16 + steps: + - package-build: + type: freebsd + linux-package: + executor: go-1_16 + steps: + - package-build: + type: linux + release: executor: go-1_16 steps: - - package + - package-build: + release: true nightly: executor: go-1_16 steps: - - package: + - package-build: nightly: true + package-consolidate: + executor: + name: win/default + shell: powershell.exe + steps: + - attach_workspace: + at: '/build' + - store_artifacts: + path: './build/dist' + destination: 'build/dist' package-sign-windows: executor: name: win/default @@ -195,6 +243,12 @@ jobs: - store_artifacts: path: './dist' destination: 'build/dist' + test-awaiter: + executor: go-1_16 + steps: + - run: + command: | + echo "Go tests complete." share-artifacts: executor: aws-cli/default steps: @@ -244,17 +298,38 @@ workflows: filters: tags: only: /.*/ - - 'package': + - 'test-awaiter': requires: - - 'test-go-windows' - - 'test-go-mac' - 'test-go-1_15' - 'test-go-1_15-386' - 'test-go-1_16' - 'test-go-1_16-386' + - 'windows-package': + requires: + - 'test-go-windows' + - 'debian-package': + requires: + - 'test-awaiter' + - 'centos-package': + requires: + - 'test-awaiter' + - 'mac-package': + requires: + - 'test-go-mac' + - 'freebsd-package': + requires: + - 'test-awaiter' + - 'linux-package': + requires: + - 'test-awaiter' - 'share-artifacts': requires: - - 'package' + - 'linux-package' + - 'freebsd-package' + - 'mac-package' + - 'centos-package' + - 'debian-package' + - 'windows-package' filters: branches: ignore: @@ -284,6 +359,7 @@ workflows: filters: tags: only: /.*/ + nightly: jobs: - 'deps' @@ -315,4 +391,4 @@ workflows: filters: branches: only: - - master + - master \ No newline at end of file diff --git a/Makefile b/Makefile index 7f090ca574514..2e3e7e3554e25 100644 --- a/Makefile +++ b/Makefile @@ -228,6 +228,7 @@ $(buildbin): @mkdir -pv $(dir $@) go build -o $(dir $@) -ldflags "$(LDFLAGS)" ./cmd/telegraf +ifdef debian debs := telegraf_$(deb_version)_amd64.deb debs += telegraf_$(deb_version)_arm64.deb debs += telegraf_$(deb_version)_armel.deb @@ -237,7 +238,9 @@ debs += telegraf_$(deb_version)_mips.deb debs += telegraf_$(deb_version)_mipsel.deb debs += telegraf_$(deb_version)_s390x.deb debs += telegraf_$(deb_version)_ppc64el.deb +endif +ifdef centos rpms += telegraf-$(rpm_version).aarch64.rpm rpms += telegraf-$(rpm_version).armel.rpm rpms += telegraf-$(rpm_version).armv6hl.rpm @@ -245,10 +248,18 @@ rpms += telegraf-$(rpm_version).i386.rpm rpms += telegraf-$(rpm_version).s390x.rpm rpms += telegraf-$(rpm_version).ppc64le.rpm rpms += telegraf-$(rpm_version).x86_64.rpm +endif +ifdef mac tars += telegraf-$(tar_version)_darwin_amd64.tar.gz +endif + +ifdef freebsd tars += telegraf-$(tar_version)_freebsd_amd64.tar.gz tars += telegraf-$(tar_version)_freebsd_i386.tar.gz +endif + +ifdef linux tars += telegraf-$(tar_version)_linux_amd64.tar.gz tars += telegraf-$(tar_version)_linux_arm64.tar.gz tars += telegraf-$(tar_version)_linux_armel.tar.gz @@ -259,9 +270,12 @@ tars += telegraf-$(tar_version)_linux_mipsel.tar.gz tars += telegraf-$(tar_version)_linux_s390x.tar.gz tars += telegraf-$(tar_version)_linux_ppc64le.tar.gz tars += telegraf-$(tar_version)_static_linux_amd64.tar.gz +endif +ifdef windows zips += telegraf-$(tar_version)_windows_amd64.zip zips += telegraf-$(tar_version)_windows_i386.zip +endif dists := $(debs) $(rpms) $(tars) $(zips) From 4d00e216307ec653b6a0d07e5a534556e0feaec5 Mon Sep 17 00:00:00 2001 From: Nicolas Filotto Date: Wed, 21 Apr 2021 21:16:02 +0200 Subject: [PATCH 386/761] Add time.star and math.star to Starlark readme lib section (#9167) --- plugins/processors/starlark/README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index c573521bd2f04..9ca231c5aeb8b 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -102,8 +102,10 @@ While Starlark is similar to Python, there are important differences to note: The ability to load external scripts other than your own is pretty limited. The following libraries are available for loading: -* json: `load("json.star", "json")` provides the following functions: `json.encode()`, `json.decode()`, `json.indent()`. See [json.star](/plugins/processors/starlark/testdata/json.star) for an example. -* log: `load("logging.star", "log")` provides the following functions: `log.debug()`, `log.info()`, `log.warn()`, `log.error()`. See [logging.star](/plugins/processors/starlark/testdata/logging.star) for an example. +* json: `load("json.star", "json")` provides the following functions: `json.encode()`, `json.decode()`, `json.indent()`. See [json.star](/plugins/processors/starlark/testdata/json.star) for an example. For more details about the functions, please refer to [the documentation of this library](https://pkg.go.dev/go.starlark.net/lib/json). +* log: `load("logging.star", "log")` provides the following functions: `log.debug()`, `log.info()`, `log.warn()`, `log.error()`. See [logging.star](/plugins/processors/starlark/testdata/logging.star) for an example. +* math: `load("math.star", "math")` provides [the following functions and constants](https://pkg.go.dev/go.starlark.net/lib/math). See [math.star](/plugins/processors/starlark/testdata/math.star) for an example. +* time: `load("time.star", "time")` provides the following functions: `time.from_timestamp()`, `time.is_valid_timezone()`, `time.now()`, `time.parse_duration()`, `time.parseTime()`, `time.time()`. See [time_date.star](/plugins/processors/starlark/testdata/time_date.star), [time_duration.star](/plugins/processors/starlark/testdata/time_duration.star) and/or [time_timestamp.star](/plugins/processors/starlark/testdata/time_timestamp.star) for an example. For more details about the functions, please refer to [the documentation of this library](https://pkg.go.dev/go.starlark.net/lib/time). If you would like to see support for something else here, please open an issue. From 03b2daeb1bf1ed7e791ed075fe1294e4daa43087 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 22 Apr 2021 23:08:03 +0200 Subject: [PATCH 387/761] Linter fixes (unhandled errors) -- Part 2 (#9122) --- plugins/inputs/activemq/activemq_test.go | 21 +-- plugins/inputs/apache/apache_test.go | 3 +- plugins/inputs/apcupsd/apcupsd_test.go | 2 +- plugins/inputs/aurora/aurora.go | 4 +- plugins/inputs/aurora/aurora_test.go | 24 ++-- plugins/inputs/bcache/bcache.go | 7 +- plugins/inputs/beanstalkd/beanstalkd.go | 5 +- plugins/inputs/beanstalkd/beanstalkd_test.go | 44 +++++-- plugins/inputs/beat/beat_test.go | 80 ++++-------- plugins/inputs/bind/json_stats.go | 8 +- plugins/inputs/bind/xml_stats_v2.go | 4 +- plugins/inputs/bind/xml_stats_v3.go | 8 +- plugins/inputs/bond/bond_test.go | 5 +- plugins/inputs/ceph/ceph.go | 2 + plugins/inputs/ceph/ceph_test.go | 95 +++++++------- plugins/inputs/chrony/chrony_test.go | 3 + .../cisco_telemetry_mdt.go | 32 ++++- .../cisco_telemetry_mdt_test.go | 121 +++++++++++------- plugins/inputs/clickhouse/clickhouse_test.go | 72 +++++++---- .../inputs/cloud_pubsub_push/pubsub_push.go | 9 +- .../cloud_pubsub_push/pubsub_push_test.go | 1 + plugins/inputs/cloudwatch/cloudwatch.go | 4 +- plugins/inputs/cloudwatch/cloudwatch_test.go | 4 +- plugins/inputs/conntrack/conntrack_test.go | 23 ++-- plugins/inputs/couchdb/couchdb.go | 10 +- plugins/inputs/dcos/client.go | 1 + .../directory_monitor/directory_monitor.go | 16 ++- .../directory_monitor_test.go | 19 ++- plugins/inputs/diskio/diskio_linux.go | 5 +- plugins/inputs/diskio/diskio_linux_test.go | 27 ++-- plugins/inputs/disque/disque.go | 15 ++- plugins/inputs/disque/disque_test.go | 16 ++- plugins/inputs/docker/docker_test.go | 2 +- plugins/inputs/docker_log/docker_log.go | 3 + plugins/inputs/docker_log/docker_log_test.go | 4 +- plugins/inputs/dovecot/dovecot.go | 12 +- plugins/inputs/ecs/ecs_test.go | 8 +- .../elasticsearch/elasticsearch_test.go | 75 +++-------- plugins/inputs/exec/exec.go | 1 + plugins/inputs/exec/exec_test.go | 19 ++- plugins/inputs/execd/execd_posix.go | 12 +- plugins/inputs/execd/execd_test.go | 17 ++- plugins/inputs/execd/shim/goshim.go | 17 ++- plugins/inputs/execd/shim/shim_posix_test.go | 4 +- plugins/inputs/execd/shim/shim_test.go | 11 +- plugins/inputs/fail2ban/fail2ban_test.go | 6 + plugins/inputs/fibaro/fibaro_test.go | 6 +- plugins/inputs/file/file_test.go | 21 ++- plugins/inputs/filecount/filecount_test.go | 8 +- plugins/inputs/filestat/filestat_test.go | 27 ++-- plugins/inputs/fireboard/fireboard_test.go | 3 +- plugins/inputs/fluentd/fluentd_test.go | 27 ++-- plugins/inputs/gnmi/gnmi.go | 52 ++++++-- plugins/inputs/gnmi/gnmi_test.go | 29 +++-- plugins/inputs/haproxy/haproxy_test.go | 34 ++--- plugins/inputs/internal/internal_test.go | 12 +- 56 files changed, 640 insertions(+), 460 deletions(-) diff --git a/plugins/inputs/activemq/activemq_test.go b/plugins/inputs/activemq/activemq_test.go index 0cec2e3306a46..1e733a4eed201 100644 --- a/plugins/inputs/activemq/activemq_test.go +++ b/plugins/inputs/activemq/activemq_test.go @@ -30,7 +30,7 @@ func TestGatherQueuesMetrics(t *testing.T) { queues := Queues{} - xml.Unmarshal([]byte(s), &queues) + require.NoError(t, xml.Unmarshal([]byte(s), &queues)) records := make(map[string]interface{}) tags := make(map[string]string) @@ -49,7 +49,7 @@ func TestGatherQueuesMetrics(t *testing.T) { activeMQ := new(ActiveMQ) activeMQ.Server = "localhost" activeMQ.Port = 8161 - activeMQ.Init() + require.NoError(t, activeMQ.Init()) activeMQ.GatherQueuesMetrics(&acc, queues) acc.AssertContainsTaggedFields(t, "activemq_queues", records, tags) @@ -76,7 +76,7 @@ func TestGatherTopicsMetrics(t *testing.T) { topics := Topics{} - xml.Unmarshal([]byte(s), &topics) + require.NoError(t, xml.Unmarshal([]byte(s), &topics)) records := make(map[string]interface{}) tags := make(map[string]string) @@ -95,7 +95,7 @@ func TestGatherTopicsMetrics(t *testing.T) { activeMQ := new(ActiveMQ) activeMQ.Server = "localhost" activeMQ.Port = 8161 - activeMQ.Init() + require.NoError(t, activeMQ.Init()) activeMQ.GatherTopicsMetrics(&acc, topics) acc.AssertContainsTaggedFields(t, "activemq_topics", records, tags) @@ -110,7 +110,7 @@ func TestGatherSubscribersMetrics(t *testing.T) { subscribers := Subscribers{} - xml.Unmarshal([]byte(s), &subscribers) + require.NoError(t, xml.Unmarshal([]byte(s), &subscribers)) records := make(map[string]interface{}) tags := make(map[string]string) @@ -135,7 +135,7 @@ func TestGatherSubscribersMetrics(t *testing.T) { activeMQ := new(ActiveMQ) activeMQ.Server = "localhost" activeMQ.Port = 8161 - activeMQ.Init() + require.NoError(t, activeMQ.Init()) activeMQ.GatherSubscribersMetrics(&acc, subscribers) acc.AssertContainsTaggedFields(t, "activemq_subscribers", records, tags) @@ -149,13 +149,16 @@ func TestURLs(t *testing.T) { switch r.URL.Path { case "/admin/xml/queues.jsp": w.WriteHeader(http.StatusOK) - w.Write([]byte("")) + _, err := w.Write([]byte("")) + require.NoError(t, err) case "/admin/xml/topics.jsp": w.WriteHeader(http.StatusOK) - w.Write([]byte("")) + _, err := w.Write([]byte("")) + require.NoError(t, err) case "/admin/xml/subscribers.jsp": w.WriteHeader(http.StatusOK) - w.Write([]byte("")) + _, err := w.Write([]byte("")) + require.NoError(t, err) default: w.WriteHeader(http.StatusNotFound) t.Fatalf("unexpected path: " + r.URL.Path) diff --git a/plugins/inputs/apache/apache_test.go b/plugins/inputs/apache/apache_test.go index ca8f4733c6bc5..534f6f9e1f7e9 100644 --- a/plugins/inputs/apache/apache_test.go +++ b/plugins/inputs/apache/apache_test.go @@ -31,7 +31,8 @@ Scoreboard: WW_____W_RW_R_W__RRR____WR_W___WW________W_WW_W_____R__R_WR__WRWR_RR func TestHTTPApache(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, apacheStatus) + _, err := fmt.Fprintln(w, apacheStatus) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/apcupsd/apcupsd_test.go b/plugins/inputs/apcupsd/apcupsd_test.go index 3cd90812bba15..dd3c986afdc79 100644 --- a/plugins/inputs/apcupsd/apcupsd_test.go +++ b/plugins/inputs/apcupsd/apcupsd_test.go @@ -43,7 +43,7 @@ func listen(ctx context.Context, t *testing.T, out [][]byte) (string, error) { continue } defer conn.Close() - conn.SetReadDeadline(time.Now().Add(time.Second)) + require.NoError(t, conn.SetReadDeadline(time.Now().Add(time.Second))) in := make([]byte, 128) n, err := conn.Read(in) diff --git a/plugins/inputs/aurora/aurora.go b/plugins/inputs/aurora/aurora.go index 04737adbabd86..45a2fabb6249a 100644 --- a/plugins/inputs/aurora/aurora.go +++ b/plugins/inputs/aurora/aurora.go @@ -190,7 +190,9 @@ func (a *Aurora) gatherRole(ctx context.Context, origin *url.URL) (RoleType, err if err != nil { return Unknown, err } - resp.Body.Close() + if err := resp.Body.Close(); err != nil { + return Unknown, fmt.Errorf("closing body failed: %v", err) + } switch resp.StatusCode { case http.StatusOK: diff --git a/plugins/inputs/aurora/aurora_test.go b/plugins/inputs/aurora/aurora_test.go index 6e2c004f2e7b3..e22488929e545 100644 --- a/plugins/inputs/aurora/aurora_test.go +++ b/plugins/inputs/aurora/aurora_test.go @@ -46,7 +46,8 @@ func TestAurora(t *testing.T) { "variable_scrape_micros_total_per_sec": 1485.0 }` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -86,7 +87,8 @@ func TestAurora(t *testing.T) { }, varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte("{}")) + _, err := w.Write([]byte("{}")) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -104,7 +106,8 @@ func TestAurora(t *testing.T) { "foo": "bar" }` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -123,7 +126,8 @@ func TestAurora(t *testing.T) { "foo": 1e309 }` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -142,7 +146,8 @@ func TestAurora(t *testing.T) { "foo": 9223372036854775808 }` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -158,7 +163,8 @@ func TestAurora(t *testing.T) { varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) { body := `{]` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -176,7 +182,8 @@ func TestAurora(t *testing.T) { "value": 42 }` w.WriteHeader(http.StatusServiceUnavailable) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -244,7 +251,8 @@ func TestBasicAuth(t *testing.T) { require.Equal(t, tt.username, username) require.Equal(t, tt.password, password) w.WriteHeader(http.StatusOK) - w.Write([]byte("{}")) + _, err := w.Write([]byte("{}")) + require.NoError(t, err) }) var acc testutil.Accumulator diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index c94af73f93dd4..8c21c701f3da3 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -6,6 +6,7 @@ package bcache import ( "errors" + "fmt" "io/ioutil" "os" "path/filepath" @@ -128,7 +129,7 @@ func (b *Bcache) Gather(acc telegraf.Accumulator) error { } bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*") if len(bdevs) < 1 { - return errors.New("Can't find any bcache device") + return errors.New("can't find any bcache device") } for _, bdev := range bdevs { if restrictDevs { @@ -137,7 +138,9 @@ func (b *Bcache) Gather(acc telegraf.Accumulator) error { continue } } - b.gatherBcache(bdev, acc) + if err := b.gatherBcache(bdev, acc); err != nil { + return fmt.Errorf("gathering bcache failed: %v", err) + } } return nil } diff --git a/plugins/inputs/beanstalkd/beanstalkd.go b/plugins/inputs/beanstalkd/beanstalkd.go index fa6075589dabf..b8a5c97974eef 100644 --- a/plugins/inputs/beanstalkd/beanstalkd.go +++ b/plugins/inputs/beanstalkd/beanstalkd.go @@ -62,7 +62,10 @@ func (b *Beanstalkd) Gather(acc telegraf.Accumulator) error { for _, tube := range tubes { wg.Add(1) go func(tube string) { - b.gatherTubeStats(connection, tube, acc) + err := b.gatherTubeStats(connection, tube, acc) + if err != nil { + acc.AddError(err) + } wg.Done() }(tube) } diff --git a/plugins/inputs/beanstalkd/beanstalkd_test.go b/plugins/inputs/beanstalkd/beanstalkd_test.go index 92c108e06aa91..9d97a682c4873 100644 --- a/plugins/inputs/beanstalkd/beanstalkd_test.go +++ b/plugins/inputs/beanstalkd/beanstalkd_test.go @@ -22,6 +22,7 @@ func TestBeanstalkd(t *testing.T) { tubesConfig []string expectedTubes []tubeStats notExpectedTubes []tubeStats + expectedError string }{ { name: "All tubes stats", @@ -50,15 +51,14 @@ func TestBeanstalkd(t *testing.T) { {name: "default", fields: defaultTubeFields}, {name: "test", fields: testTubeFields}, }, + expectedError: "input does not match format", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { server, err := startTestServer(t) - if err != nil { - t.Fatalf("Unable to create test server") - } + require.NoError(t, err, "Unable to create test server") defer server.Close() serverAddress := server.Addr().String() @@ -68,8 +68,13 @@ func TestBeanstalkd(t *testing.T) { } var acc testutil.Accumulator - require.NoError(t, acc.GatherError(plugin.Gather)) - + err = acc.GatherError(plugin.Gather) + if test.expectedError == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Equal(t, test.expectedError, err.Error()) + } acc.AssertContainsTaggedFields(t, "beanstalkd_overview", overviewFields, getOverviewTags(serverAddress), @@ -110,8 +115,8 @@ func startTestServer(t *testing.T) (net.Listener, error) { tp := textproto.NewConn(connection) defer tp.Close() - sendSuccessResponse := func(body string) { - tp.PrintfLine("OK %d\r\n%s", len(body), body) + sendSuccessResponse := func(body string) error { + return tp.PrintfLine("OK %d\r\n%s", len(body), body) } for { @@ -125,15 +130,30 @@ func startTestServer(t *testing.T) (net.Listener, error) { switch cmd { case "list-tubes": - sendSuccessResponse(listTubesResponse) + if err := sendSuccessResponse(listTubesResponse); err != nil { + t.Logf("sending response %q failed: %v", listTubesResponse, err) + return + } case "stats": - sendSuccessResponse(statsResponse) + if err := sendSuccessResponse(statsResponse); err != nil { + t.Logf("sending response %q failed: %v", statsResponse, err) + return + } case "stats-tube default": - sendSuccessResponse(statsTubeDefaultResponse) + if err := sendSuccessResponse(statsTubeDefaultResponse); err != nil { + t.Logf("sending response %q failed: %v", statsTubeDefaultResponse, err) + return + } case "stats-tube test": - sendSuccessResponse(statsTubeTestResponse) + if err := sendSuccessResponse(statsTubeTestResponse); err != nil { + t.Logf("sending response %q failed: %v", statsTubeTestResponse, err) + return + } case "stats-tube unknown": - tp.PrintfLine("NOT_FOUND") + if err := tp.PrintfLine("NOT_FOUND"); err != nil { + t.Logf("sending response %q failed: %v", "NOT_FOUND", err) + return + } default: t.Log("Test server: unknown command") } diff --git a/plugins/inputs/beat/beat_test.go b/plugins/inputs/beat/beat_test.go index 777fe6b98175c..8f2c5c9c2fbee 100644 --- a/plugins/inputs/beat/beat_test.go +++ b/plugins/inputs/beat/beat_test.go @@ -10,18 +10,15 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func Test_BeatStats(test *testing.T) { +func Test_BeatStats(t *testing.T) { var beat6StatsAccumulator testutil.Accumulator var beatTest = NewBeat() // System stats are disabled by default beatTest.Includes = []string{"beat", "libbeat", "system", "filebeat"} - err := beatTest.Init() - if err != nil { - panic(fmt.Sprintf("could not init beat: %s", err)) - } + require.NoError(t, beatTest.Init()) fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, request *http.Request) { var jsonFilePath string @@ -31,35 +28,26 @@ func Test_BeatStats(test *testing.T) { case suffixStats: jsonFilePath = "beat6_stats.json" default: - panic("Cannot handle request") + require.FailNow(t, "cannot handle request") } data, err := ioutil.ReadFile(jsonFilePath) - - if err != nil { - panic(fmt.Sprintf("could not read from data file %s", jsonFilePath)) - } - w.Write(data) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) + _, err = w.Write(data) + require.NoError(t, err, "could not write data") })) requestURL, err := url.Parse(beatTest.URL) - if err != nil { - test.Logf("Can't parse URL %s", beatTest.URL) - } + require.NoErrorf(t, err, "can't parse URL %s", beatTest.URL) fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) - if err != nil { - test.Logf("Can't listen for %s: %v", requestURL, err) - } + require.NoErrorf(t, err, "can't listen for %s: %v", requestURL, err) fakeServer.Start() defer fakeServer.Close() - err = beatTest.Gather(&beat6StatsAccumulator) - if err != nil { - test.Logf("Can't gather stats") - } + require.NoError(t, err, beatTest.Gather(&beat6StatsAccumulator)) beat6StatsAccumulator.AssertContainsTaggedFields( - test, + t, "beat", map[string]interface{}{ "cpu_system_ticks": float64(626970), @@ -85,7 +73,7 @@ func Test_BeatStats(test *testing.T) { }, ) beat6StatsAccumulator.AssertContainsTaggedFields( - test, + t, "beat_filebeat", map[string]interface{}{ "events_active": float64(0), @@ -108,7 +96,7 @@ func Test_BeatStats(test *testing.T) { }, ) beat6StatsAccumulator.AssertContainsTaggedFields( - test, + t, "beat_libbeat", map[string]interface{}{ "config_module_running": float64(0), @@ -148,7 +136,7 @@ func Test_BeatStats(test *testing.T) { }, ) beat6StatsAccumulator.AssertContainsTaggedFields( - test, + t, "beat_system", map[string]interface{}{ "cpu_cores": float64(32), @@ -169,15 +157,12 @@ func Test_BeatStats(test *testing.T) { ) } -func Test_BeatRequest(test *testing.T) { +func Test_BeatRequest(t *testing.T) { var beat6StatsAccumulator testutil.Accumulator beatTest := NewBeat() // System stats are disabled by default beatTest.Includes = []string{"beat", "libbeat", "system", "filebeat"} - err := beatTest.Init() - if err != nil { - panic(fmt.Sprintf("could not init beat: %s", err)) - } + require.NoError(t, beatTest.Init()) fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, request *http.Request) { var jsonFilePath string @@ -187,30 +172,24 @@ func Test_BeatRequest(test *testing.T) { case suffixStats: jsonFilePath = "beat6_stats.json" default: - panic("Cannot handle request") + require.FailNow(t, "cannot handle request") } data, err := ioutil.ReadFile(jsonFilePath) - - if err != nil { - panic(fmt.Sprintf("could not read from data file %s", jsonFilePath)) - } - assert.Equal(test, request.Host, "beat.test.local") - assert.Equal(test, request.Method, "POST") - assert.Equal(test, request.Header.Get("Authorization"), "Basic YWRtaW46UFdE") - assert.Equal(test, request.Header.Get("X-Test"), "test-value") - - w.Write(data) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) + require.Equal(t, request.Host, "beat.test.local") + require.Equal(t, request.Method, "POST") + require.Equal(t, request.Header.Get("Authorization"), "Basic YWRtaW46UFdE") + require.Equal(t, request.Header.Get("X-Test"), "test-value") + + _, err = w.Write(data) + require.NoError(t, err, "could not write data") })) requestURL, err := url.Parse(beatTest.URL) - if err != nil { - test.Logf("Can't parse URL %s", beatTest.URL) - } + require.NoErrorf(t, err, "can't parse URL %s", beatTest.URL) fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) - if err != nil { - test.Logf("Can't listen for %s: %v", requestURL, err) - } + require.NoErrorf(t, err, "can't listen for %s: %v", requestURL, err) fakeServer.Start() defer fakeServer.Close() @@ -220,8 +199,5 @@ func Test_BeatRequest(test *testing.T) { beatTest.Username = "admin" beatTest.Password = "PWD" - err = beatTest.Gather(&beat6StatsAccumulator) - if err != nil { - test.Logf("Can't gather stats") - } + require.NoError(t, beatTest.Gather(&beat6StatsAccumulator)) } diff --git a/plugins/inputs/bind/json_stats.go b/plugins/inputs/bind/json_stats.go index 06c21008a5364..96a5a9b6ec9e6 100644 --- a/plugins/inputs/bind/json_stats.go +++ b/plugins/inputs/bind/json_stats.go @@ -58,7 +58,9 @@ func addJSONCounter(acc telegraf.Accumulator, commonTags map[string]string, stat tags[k] = v } - grouper.Add("bind_counter", tags, ts, name, value) + if err := grouper.Add("bind_counter", tags, ts, name, value); err != nil { + acc.AddError(fmt.Errorf("adding field %q to group failed: %v", name, err)) + } } //Add grouped metrics @@ -133,7 +135,9 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st "type": cntrType, } - grouper.Add("bind_counter", tags, ts, cntrName, value) + if err := grouper.Add("bind_counter", tags, ts, cntrName, value); err != nil { + acc.AddError(fmt.Errorf("adding tags %q to group failed: %v", tags, err)) + } } } } diff --git a/plugins/inputs/bind/xml_stats_v2.go b/plugins/inputs/bind/xml_stats_v2.go index 5e0d976afa8fc..ce98b2ddc90e0 100644 --- a/plugins/inputs/bind/xml_stats_v2.go +++ b/plugins/inputs/bind/xml_stats_v2.go @@ -75,7 +75,9 @@ func addXMLv2Counter(acc telegraf.Accumulator, commonTags map[string]string, sta tags[k] = v } - grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + if err := grouper.Add("bind_counter", tags, ts, c.Name, c.Value); err != nil { + acc.AddError(fmt.Errorf("adding field %q to group failed: %v", c.Name, err)) + } } //Add grouped metrics diff --git a/plugins/inputs/bind/xml_stats_v3.go b/plugins/inputs/bind/xml_stats_v3.go index 448360caf28b0..c4fe7e1992674 100644 --- a/plugins/inputs/bind/xml_stats_v3.go +++ b/plugins/inputs/bind/xml_stats_v3.go @@ -81,7 +81,9 @@ func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort s tags := map[string]string{"url": hostPort, "source": host, "port": port, "type": cg.Type} - grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + if err := grouper.Add("bind_counter", tags, ts, c.Name, c.Value); err != nil { + acc.AddError(fmt.Errorf("adding tags %q to group failed: %v", tags, err)) + } } } @@ -118,7 +120,9 @@ func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort s "type": cg.Type, } - grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + if err := grouper.Add("bind_counter", tags, ts, c.Name, c.Value); err != nil { + acc.AddError(fmt.Errorf("adding tags %q to group failed: %v", tags, err)) + } } } } diff --git a/plugins/inputs/bond/bond_test.go b/plugins/inputs/bond/bond_test.go index c07224350352c..342a3f4eb831d 100644 --- a/plugins/inputs/bond/bond_test.go +++ b/plugins/inputs/bond/bond_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var sampleTest802 = ` @@ -65,12 +66,12 @@ func TestGatherBondInterface(t *testing.T) { var acc testutil.Accumulator bond := &Bond{} - bond.gatherBondInterface("bond802", sampleTest802, &acc) + require.NoError(t, bond.gatherBondInterface("bond802", sampleTest802, &acc)) acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bond802"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth1"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 3, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth2"}) - bond.gatherBondInterface("bondAB", sampleTestAB, &acc) + require.NoError(t, bond.gatherBondInterface("bondAB", sampleTestAB, &acc)) acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"active_slave": "eth2", "status": 1}, map[string]string{"bond": "bondAB"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 0}, map[string]string{"bond": "bondAB", "interface": "eth3"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bondAB", "interface": "eth2"}) diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index b4e83844fcfcb..3445b2d12ed42 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -253,8 +253,10 @@ func (m *metric) name() string { buf := bytes.Buffer{} for i := len(m.pathStack) - 1; i >= 0; i-- { if buf.Len() > 0 { + //nolint:errcheck,revive // should never return an error buf.WriteString(".") } + //nolint:errcheck,revive // should never return an error buf.WriteString(m.pathStack[i]) } return buf.String() diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index 08075fd03be49..5cb120e578b18 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( @@ -25,41 +25,41 @@ type expectedResult struct { func TestParseSockId(t *testing.T) { s := parseSockID(sockFile(osdPrefix, 1), osdPrefix, sockSuffix) - assert.Equal(t, s, "1") + require.Equal(t, s, "1") } func TestParseMonDump(t *testing.T) { dump, err := parseDump(monPerfDump) - assert.NoError(t, err) - assert.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon) - assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon) + require.NoError(t, err) + require.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon) + require.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon) } func TestParseOsdDump(t *testing.T) { dump, err := parseDump(osdPerfDump) - assert.NoError(t, err) - assert.InEpsilon(t, 552132.109360000, dump["filestore"]["commitcycle_interval.sum"], epsilon) - assert.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"]) + require.NoError(t, err) + require.InEpsilon(t, 552132.109360000, dump["filestore"]["commitcycle_interval.sum"], epsilon) + require.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"]) } func TestParseMdsDump(t *testing.T) { dump, err := parseDump(mdsPerfDump) - assert.NoError(t, err) - assert.InEpsilon(t, 2408386.600934982, dump["mds"]["reply_latency.sum"], epsilon) - assert.Equal(t, float64(0), dump["throttle-write_buf_throttle"]["wait.avgcount"]) + require.NoError(t, err) + require.InEpsilon(t, 2408386.600934982, dump["mds"]["reply_latency.sum"], epsilon) + require.Equal(t, float64(0), dump["throttle-write_buf_throttle"]["wait.avgcount"]) } func TestParseRgwDump(t *testing.T) { dump, err := parseDump(rgwPerfDump) - assert.NoError(t, err) - assert.InEpsilon(t, 0.002219876, dump["rgw"]["get_initial_lat.sum"], epsilon) - assert.Equal(t, float64(0), dump["rgw"]["put_initial_lat.avgcount"]) + require.NoError(t, err) + require.InEpsilon(t, 0.002219876, dump["rgw"]["get_initial_lat.sum"], epsilon) + require.Equal(t, float64(0), dump["rgw"]["put_initial_lat.avgcount"]) } func TestDecodeStatus(t *testing.T) { acc := &testutil.Accumulator{} err := decodeStatus(acc, clusterStatusDump) - assert.NoError(t, err) + require.NoError(t, err) for _, r := range cephStatusResults { acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) @@ -69,7 +69,7 @@ func TestDecodeStatus(t *testing.T) { func TestDecodeDf(t *testing.T) { acc := &testutil.Accumulator{} err := decodeDf(acc, cephDFDump) - assert.NoError(t, err) + require.NoError(t, err) for _, r := range cephDfResults { acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) @@ -79,14 +79,14 @@ func TestDecodeDf(t *testing.T) { func TestDecodeOSDPoolStats(t *testing.T) { acc := &testutil.Accumulator{} err := decodeOsdPoolStats(acc, cephODSPoolStatsDump) - assert.NoError(t, err) + require.NoError(t, err) for _, r := range cephOSDPoolStatsResults { acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) } } -func TestGather(_ *testing.T) { +func TestGather(t *testing.T) { saveFind := findSockets saveDump := perfDump defer func() { @@ -104,15 +104,15 @@ func TestGather(_ *testing.T) { acc := &testutil.Accumulator{} c := &Ceph{} - c.Gather(acc) + require.NoError(t, c.Gather(acc)) } func TestFindSockets(t *testing.T) { tmpdir, err := ioutil.TempDir("", "socktest") - assert.NoError(t, err) + require.NoError(t, err) defer func() { err := os.Remove(tmpdir) - assert.NoError(t, err) + require.NoError(t, err) }() c := &Ceph{ CephBinary: "foo", @@ -129,10 +129,10 @@ func TestFindSockets(t *testing.T) { } for _, st := range sockTestParams { - createTestFiles(tmpdir, st) + require.NoError(t, createTestFiles(tmpdir, st)) sockets, err := findSockets(c) - assert.NoError(t, err) + require.NoError(t, err) for i := 1; i <= st.osds; i++ { assertFoundSocket(t, tmpdir, typeOsd, i, sockets) @@ -147,7 +147,7 @@ func TestFindSockets(t *testing.T) { for i := 1; i <= st.rgws; i++ { assertFoundSocket(t, tmpdir, typeRgw, i, sockets) } - cleanupTestFiles(tmpdir, st) + require.NoError(t, cleanupTestFiles(tmpdir, st)) } } @@ -165,54 +165,61 @@ func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*soc expected := filepath.Join(dir, sockFile(prefix, i)) found := false for _, s := range sockets { - fmt.Printf("Checking %s\n", s.socket) + _, err := fmt.Printf("Checking %s\n", s.socket) + require.NoError(t, err) if s.socket == expected { found = true - assert.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s) - assert.Equal(t, s.sockID, strconv.Itoa(i)) + require.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s) + require.Equal(t, s.sockID, strconv.Itoa(i)) } } - assert.True(t, found, "Did not find socket: %s", expected) + require.True(t, found, "Did not find socket: %s", expected) } func sockFile(prefix string, i int) string { return strings.Join([]string{prefix, strconv.Itoa(i), sockSuffix}, ".") } -func createTestFiles(dir string, st *SockTest) { - writeFile := func(prefix string, i int) { +func createTestFiles(dir string, st *SockTest) error { + writeFile := func(prefix string, i int) error { f := sockFile(prefix, i) fpath := filepath.Join(dir, f) - ioutil.WriteFile(fpath, []byte(""), 0777) + return ioutil.WriteFile(fpath, []byte(""), 0777) } - tstFileApply(st, writeFile) + return tstFileApply(st, writeFile) } -func cleanupTestFiles(dir string, st *SockTest) { - rmFile := func(prefix string, i int) { +func cleanupTestFiles(dir string, st *SockTest) error { + rmFile := func(prefix string, i int) error { f := sockFile(prefix, i) fpath := filepath.Join(dir, f) - err := os.Remove(fpath) - if err != nil { - fmt.Printf("Error removing test file %s: %v\n", fpath, err) - } + return os.Remove(fpath) } - tstFileApply(st, rmFile) + return tstFileApply(st, rmFile) } -func tstFileApply(st *SockTest, fn func(prefix string, i int)) { +func tstFileApply(st *SockTest, fn func(string, int) error) error { for i := 1; i <= st.osds; i++ { - fn(osdPrefix, i) + if err := fn(osdPrefix, i); err != nil { + return err + } } for i := 1; i <= st.mons; i++ { - fn(monPrefix, i) + if err := fn(monPrefix, i); err != nil { + return err + } } for i := 1; i <= st.mdss; i++ { - fn(mdsPrefix, i) + if err := fn(mdsPrefix, i); err != nil { + return err + } } for i := 1; i <= st.rgws; i++ { - fn(rgwPrefix, i) + if err := fn(rgwPrefix, i); err != nil { + return err + } } + return nil } type SockTest struct { diff --git a/plugins/inputs/chrony/chrony_test.go b/plugins/inputs/chrony/chrony_test.go index 7c614dbbc75ce..60cb69da79933 100644 --- a/plugins/inputs/chrony/chrony_test.go +++ b/plugins/inputs/chrony/chrony_test.go @@ -94,11 +94,14 @@ Leap status : Not synchronized if cmd == "chronyc" { if args[0] == "tracking" { + //nolint:errcheck,revive // test will fail anyway fmt.Fprint(os.Stdout, lookup+mockData) } else { + //nolint:errcheck,revive // test will fail anyway fmt.Fprint(os.Stdout, noLookup+mockData) } } else { + //nolint:errcheck,revive // test will fail anyway fmt.Fprint(os.Stdout, "command not found") os.Exit(1) } diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index 6dad06061f1cd..20c5362b3e692 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -152,6 +152,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { var opts []grpc.ServerOption tlsConfig, err := c.ServerConfig.TLSConfig() if err != nil { + //nolint:errcheck,revive // we cannot do anything if the closing fails c.listener.Close() return err } else if tlsConfig != nil { @@ -167,11 +168,14 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { c.wg.Add(1) go func() { - c.grpcServer.Serve(c.listener) + if err := c.grpcServer.Serve(c.listener); err != nil { + c.Log.Errorf("serving GRPC server failed: %v", err) + } c.wg.Done() }() default: + //nolint:errcheck,revive // we cannot do anything if the closing fails c.listener.Close() return fmt.Errorf("invalid Cisco MDT transport: %s", c.Transport) } @@ -210,7 +214,9 @@ func (c *CiscoTelemetryMDT) acceptTCPClients() { delete(clients, conn) mutex.Unlock() - conn.Close() + if err := conn.Close(); err != nil { + c.Log.Warnf("closing connection failed: %v", err) + } c.wg.Done() }() } @@ -295,7 +301,9 @@ func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutS if packet.TotalSize == 0 { c.handleTelemetry(packet.Data) } else if int(packet.TotalSize) <= c.MaxMsgSize { - chunkBuffer.Write(packet.Data) + if _, err := chunkBuffer.Write(packet.Data); err != nil { + c.acc.AddError(fmt.Errorf("writing packet %q failed: %v", packet.Data, err)) + } if chunkBuffer.Len() >= int(packet.TotalSize) { c.handleTelemetry(chunkBuffer.Bytes()) chunkBuffer.Reset() @@ -460,7 +468,9 @@ func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telem tags[subfield.Name] = decodeTag(subfield) } if value := decodeValue(subfield); value != nil { - grouper.Add(measurement, tags, timestamp, subfield.Name, value) + if err := grouper.Add(measurement, tags, timestamp, subfield.Name, value); err != nil { + c.Log.Errorf("adding field %q to group failed: %v", subfield.Name, err) + } } if subfield.Name != "nextHop" { continue @@ -475,7 +485,9 @@ func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telem } if value := decodeValue(ff); value != nil { name := "nextHop/" + ff.Name - grouper.Add(measurement, tags, timestamp, name, value) + if err := grouper.Add(measurement, tags, timestamp, name, value); err != nil { + c.Log.Errorf("adding field %q to group failed: %v", name, err) + } } } } @@ -540,9 +552,13 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie } if val := c.nxosValueXform(field, value, encodingPath); val != nil { - grouper.Add(measurement, tags, timestamp, name, val) + if err := grouper.Add(measurement, tags, timestamp, name, val); err != nil { + c.Log.Errorf("adding field %q to group failed: %v", name, err) + } } else { - grouper.Add(measurement, tags, timestamp, name, value) + if err := grouper.Add(measurement, tags, timestamp, name, value); err != nil { + c.Log.Errorf("adding field %q to group failed: %v", name, err) + } } return } @@ -652,9 +668,11 @@ func (c *CiscoTelemetryMDT) Address() net.Addr { func (c *CiscoTelemetryMDT) Stop() { if c.grpcServer != nil { // Stop server and terminate all running dialout routines + //nolint:errcheck,revive // we cannot do anything if the stopping fails c.grpcServer.Stop() } if c.listener != nil { + //nolint:errcheck,revive // we cannot do anything if the closing fails c.listener.Close() } c.wg.Wait() diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index 0a9bde81acaeb..69b2fd1159637 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/binary" "errors" + "io" "net" "testing" @@ -78,7 +79,8 @@ func TestHandleTelemetryTwoSimple(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -149,7 +151,8 @@ func TestHandleTelemetrySingleNested(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -218,7 +221,8 @@ func TestHandleEmbeddedTags(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -306,7 +310,8 @@ func TestHandleNXAPI(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -382,7 +387,8 @@ func TestHandleNXAPIXformNXAPI(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -467,7 +473,8 @@ func TestHandleNXXformMulti(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -539,7 +546,8 @@ func TestHandleNXDME(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -566,9 +574,10 @@ func TestTCPDialoutOverflow(t *testing.T) { addr := c.Address() conn, err := net.Dial(addr.Network(), addr.String()) require.NoError(t, err) - binary.Write(conn, binary.BigEndian, hdr) - conn.Read([]byte{0}) - conn.Close() + require.NoError(t, binary.Write(conn, binary.BigEndian, hdr)) + _, err = conn.Read([]byte{0}) + require.True(t, err == nil || err == io.EOF) + require.NoError(t, conn.Close()) c.Stop() @@ -629,32 +638,42 @@ func TestTCPDialoutMultiple(t *testing.T) { conn, err := net.Dial(addr.Network(), addr.String()) require.NoError(t, err) - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) hdr.MsgLen = uint32(len(data)) - binary.Write(conn, binary.BigEndian, hdr) - conn.Write(data) + require.NoError(t, binary.Write(conn, binary.BigEndian, hdr)) + _, err = conn.Write(data) + require.NoError(t, err) conn2, err := net.Dial(addr.Network(), addr.String()) require.NoError(t, err) telemetry.EncodingPath = "type:model/parallel/path" - data, _ = proto.Marshal(telemetry) + data, err = proto.Marshal(telemetry) + require.NoError(t, err) hdr.MsgLen = uint32(len(data)) - binary.Write(conn2, binary.BigEndian, hdr) - conn2.Write(data) - conn2.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) - conn2.Read([]byte{0}) - conn2.Close() + require.NoError(t, binary.Write(conn2, binary.BigEndian, hdr)) + _, err = conn2.Write(data) + require.NoError(t, err) + _, err = conn2.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) + require.NoError(t, err) + _, err = conn2.Read([]byte{0}) + require.True(t, err == nil || err == io.EOF) + require.NoError(t, conn2.Close()) telemetry.EncodingPath = "type:model/other/path" - data, _ = proto.Marshal(telemetry) + data, err = proto.Marshal(telemetry) + require.NoError(t, err) hdr.MsgLen = uint32(len(data)) - binary.Write(conn, binary.BigEndian, hdr) - conn.Write(data) - conn.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) - conn.Read([]byte{0}) + require.NoError(t, binary.Write(conn, binary.BigEndian, hdr)) + _, err = conn.Write(data) + require.NoError(t, err) + _, err = conn.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) + require.NoError(t, err) + _, err = conn.Read([]byte{0}) + require.True(t, err == nil || err == io.EOF) c.Stop() - conn.Close() + require.NoError(t, conn.Close()) // We use the invalid dialout flags to let the server close the connection require.Equal(t, acc.Errors, []error{errors.New("invalid dialout flags: 257"), errors.New("invalid dialout flags: 257")}) @@ -679,15 +698,18 @@ func TestGRPCDialoutError(t *testing.T) { require.NoError(t, err) addr := c.Address() - conn, _ := grpc.Dial(addr.String(), grpc.WithInsecure()) + conn, err := grpc.Dial(addr.String(), grpc.WithInsecure()) + require.NoError(t, err) client := dialout.NewGRPCMdtDialoutClient(conn) - stream, _ := client.MdtDialout(context.Background()) + stream, err := client.MdtDialout(context.Background()) + require.NoError(t, err) args := &dialout.MdtDialoutArgs{Errors: "foobar"} - stream.Send(args) + require.NoError(t, stream.Send(args)) // Wait for the server to close - stream.Recv() + _, err = stream.Recv() + require.True(t, err == nil || err == io.EOF) c.Stop() require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: foobar")}) @@ -702,35 +724,44 @@ func TestGRPCDialoutMultiple(t *testing.T) { telemetry := mockTelemetryMessage() addr := c.Address() - conn, _ := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + require.NoError(t, err) client := dialout.NewGRPCMdtDialoutClient(conn) - stream, _ := client.MdtDialout(context.TODO()) + stream, err := client.MdtDialout(context.TODO()) + require.NoError(t, err) - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) args := &dialout.MdtDialoutArgs{Data: data, ReqId: 456} - stream.Send(args) + require.NoError(t, stream.Send(args)) - conn2, _ := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + conn2, err := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + require.NoError(t, err) client2 := dialout.NewGRPCMdtDialoutClient(conn2) - stream2, _ := client2.MdtDialout(context.TODO()) + stream2, err := client2.MdtDialout(context.TODO()) + require.NoError(t, err) telemetry.EncodingPath = "type:model/parallel/path" - data, _ = proto.Marshal(telemetry) + data, err = proto.Marshal(telemetry) + require.NoError(t, err) args = &dialout.MdtDialoutArgs{Data: data} - stream2.Send(args) - stream2.Send(&dialout.MdtDialoutArgs{Errors: "testclose"}) - stream2.Recv() - conn2.Close() + require.NoError(t, stream2.Send(args)) + require.NoError(t, stream2.Send(&dialout.MdtDialoutArgs{Errors: "testclose"})) + _, err = stream2.Recv() + require.True(t, err == nil || err == io.EOF) + require.NoError(t, conn2.Close()) telemetry.EncodingPath = "type:model/other/path" - data, _ = proto.Marshal(telemetry) + data, err = proto.Marshal(telemetry) + require.NoError(t, err) args = &dialout.MdtDialoutArgs{Data: data} - stream.Send(args) - stream.Send(&dialout.MdtDialoutArgs{Errors: "testclose"}) - stream.Recv() + require.NoError(t, stream.Send(args)) + require.NoError(t, stream.Send(&dialout.MdtDialoutArgs{Errors: "testclose"})) + _, err = stream.Recv() + require.True(t, err == nil || err == io.EOF) c.Stop() - conn.Close() + require.NoError(t, conn.Close()) require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: testclose"), errors.New("GRPC dialout error: testclose")}) diff --git a/plugins/inputs/clickhouse/clickhouse_test.go b/plugins/inputs/clickhouse/clickhouse_test.go index bf53fdae007d8..d6dcf44221252 100644 --- a/plugins/inputs/clickhouse/clickhouse_test.go +++ b/plugins/inputs/clickhouse/clickhouse_test.go @@ -57,7 +57,7 @@ func TestGather(t *testing.T) { enc := json.NewEncoder(w) switch query := r.URL.Query().Get("query"); { case strings.Contains(query, "system.parts"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Database string `json:"database"` Table string `json:"table"` @@ -74,8 +74,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "system.events"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Metric string `json:"metric"` Value chUInt64 `json:"value"` @@ -90,8 +91,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "system.metrics"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Metric string `json:"metric"` Value chUInt64 `json:"value"` @@ -106,8 +108,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "system.asynchronous_metrics"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Metric string `json:"metric"` Value chUInt64 `json:"value"` @@ -122,8 +125,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "zk_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ZkExists chUInt64 `json:"zk_exists"` }{ @@ -132,8 +136,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "zk_root_nodes"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ZkRootNodes chUInt64 `json:"zk_root_nodes"` }{ @@ -142,8 +147,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "replication_queue_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ReplicationQueueExists chUInt64 `json:"replication_queue_exists"` }{ @@ -152,8 +158,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "replication_too_many_tries_replicas"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { TooManyTriesReplicas chUInt64 `json:"replication_too_many_tries_replicas"` NumTriesReplicas chUInt64 `json:"replication_num_tries_replicas"` @@ -164,8 +171,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "system.detached_parts"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { DetachedParts chUInt64 `json:"detached_parts"` }{ @@ -174,8 +182,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "system.dictionaries"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Origin string `json:"origin"` Status string `json:"status"` @@ -188,8 +197,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "system.mutations"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Failed chUInt64 `json:"failed"` Completed chUInt64 `json:"completed"` @@ -202,8 +212,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "system.disks"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Name string `json:"name"` Path string `json:"path"` @@ -218,8 +229,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "system.processes"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { QueryType string `json:"query_type"` Percentile50 float64 `json:"p50"` @@ -246,8 +258,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "text_log_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { TextLogExists chUInt64 `json:"text_log_exists"` }{ @@ -256,8 +269,9 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "system.text_log"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Level string `json:"level"` LastMessagesLast10Min chUInt64 `json:"messages_last_10_min"` @@ -284,6 +298,7 @@ func TestGather(t *testing.T) { }, }, }) + assert.NoError(t, err) } })) ch = &ClickHouse{ @@ -294,7 +309,7 @@ func TestGather(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - ch.Gather(acc) + assert.NoError(t, ch.Gather(acc)) acc.AssertContainsTaggedFields(t, "clickhouse_tables", map[string]interface{}{ @@ -427,7 +442,7 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { enc := json.NewEncoder(w) switch query := r.URL.Query().Get("query"); { case strings.Contains(query, "zk_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ZkExists chUInt64 `json:"zk_exists"` }{ @@ -436,8 +451,9 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "replication_queue_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ReplicationQueueExists chUInt64 `json:"replication_queue_exists"` }{ @@ -446,8 +462,9 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { }, }, }) + assert.NoError(t, err) case strings.Contains(query, "text_log_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { TextLogExists chUInt64 `json:"text_log_exists"` }{ @@ -456,6 +473,7 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { }, }, }) + assert.NoError(t, err) } })) ch = &ClickHouse{ @@ -467,7 +485,7 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - ch.Gather(acc) + assert.NoError(t, ch.Gather(acc)) acc.AssertDoesNotContainMeasurement(t, "clickhouse_zookeeper") acc.AssertDoesNotContainMeasurement(t, "clickhouse_replication_queue") @@ -482,9 +500,10 @@ func TestWrongJSONMarshalling(t *testing.T) { } enc := json.NewEncoder(w) //wrong data section json - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct{}{}, }) + assert.NoError(t, err) })) ch = &ClickHouse{ Servers: []string{ @@ -495,7 +514,7 @@ func TestWrongJSONMarshalling(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - ch.Gather(acc) + assert.NoError(t, ch.Gather(acc)) assert.Equal(t, 0, len(acc.Metrics)) allMeasurements := []string{ @@ -528,7 +547,7 @@ func TestOfflineServer(t *testing.T) { }, } ) - ch.Gather(acc) + assert.NoError(t, ch.Gather(acc)) assert.Equal(t, 0, len(acc.Metrics)) allMeasurements := []string{ @@ -548,7 +567,7 @@ func TestOfflineServer(t *testing.T) { assert.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) } -func TestAutoDiscovery(_ *testing.T) { +func TestAutoDiscovery(t *testing.T) { var ( ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { type result struct { @@ -557,7 +576,7 @@ func TestAutoDiscovery(_ *testing.T) { enc := json.NewEncoder(w) switch query := r.URL.Query().Get("query"); { case strings.Contains(query, "system.clusters"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Cluster string `json:"test"` Hostname string `json:"localhost"` @@ -570,6 +589,7 @@ func TestAutoDiscovery(_ *testing.T) { }, }, }) + assert.NoError(t, err) } })) ch = &ClickHouse{ @@ -582,5 +602,5 @@ func TestAutoDiscovery(_ *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - ch.Gather(acc) + assert.NoError(t, ch.Gather(acc)) } diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push.go b/plugins/inputs/cloud_pubsub_push/pubsub_push.go index 5b434599a986f..ef43a3d5eb161 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push.go @@ -169,9 +169,13 @@ func (p *PubSubPush) Start(acc telegraf.Accumulator) error { go func() { defer p.wg.Done() if tlsConf != nil { - p.server.ListenAndServeTLS("", "") + if err := p.server.ListenAndServeTLS("", ""); err != nil && err != http.ErrServerClosed { + p.Log.Errorf("listening and serving TLS failed: %v", err) + } } else { - p.server.ListenAndServe() + if err := p.server.ListenAndServe(); err != nil { + p.Log.Errorf("listening and serving TLS failed: %v", err) + } } }() @@ -181,6 +185,7 @@ func (p *PubSubPush) Start(acc telegraf.Accumulator) error { // Stop cleans up all resources func (p *PubSubPush) Stop() { p.cancel() + //nolint:errcheck,revive // we cannot do anything if the shutdown fails p.server.Shutdown(p.ctx) p.wg.Wait() } diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go index bd958e961dd0a..0523375229429 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go @@ -156,6 +156,7 @@ func TestServeHTTP(t *testing.T) { defer wg.Done() for m := range d { ro.AddMetric(m) + //nolint:errcheck,revive // test will fail anyway if the write fails ro.Write() } }(dst) diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index f108aceb68f44..34088110ea398 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -584,7 +584,9 @@ func (c *CloudWatch) aggregateMetrics( tags["region"] = c.Region for i := range result.Values { - grouper.Add(namespace, tags, *result.Timestamps[i], *result.Label, *result.Values[i]) + if err := grouper.Add(namespace, tags, *result.Timestamps[i], *result.Label, *result.Values[i]); err != nil { + acc.AddError(err) + } } } diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 56aee346886e2..158f29a1bc26a 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -241,7 +241,7 @@ func TestGenerateStatisticsInputParams(t *testing.T) { Period: internalDuration, } - c.initializeCloudWatch() + require.NoError(t, c.initializeCloudWatch()) now := time.Now() @@ -278,7 +278,7 @@ func TestGenerateStatisticsInputParamsFiltered(t *testing.T) { Period: internalDuration, } - c.initializeCloudWatch() + require.NoError(t, c.initializeCloudWatch()) now := time.Now() diff --git a/plugins/inputs/conntrack/conntrack_test.go b/plugins/inputs/conntrack/conntrack_test.go index 9c144afe84e53..e554f4e90d262 100644 --- a/plugins/inputs/conntrack/conntrack_test.go +++ b/plugins/inputs/conntrack/conntrack_test.go @@ -11,7 +11,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func restoreDflts(savedFiles, savedDirs []string) { @@ -28,18 +28,18 @@ func TestNoFilesFound(t *testing.T) { acc := &testutil.Accumulator{} err := c.Gather(acc) - assert.EqualError(t, err, "Conntrack input failed to collect metrics. "+ + require.EqualError(t, err, "Conntrack input failed to collect metrics. "+ "Is the conntrack kernel module loaded?") } func TestDefaultsUsed(t *testing.T) { defer restoreDflts(dfltFiles, dfltDirs) tmpdir, err := ioutil.TempDir("", "tmp1") - assert.NoError(t, err) + require.NoError(t, err) defer os.Remove(tmpdir) tmpFile, err := ioutil.TempFile(tmpdir, "ip_conntrack_count") - assert.NoError(t, err) + require.NoError(t, err) defer os.Remove(tmpFile.Name()) dfltDirs = []string{tmpdir} @@ -47,11 +47,11 @@ func TestDefaultsUsed(t *testing.T) { dfltFiles = []string{fname} count := 1234321 - ioutil.WriteFile(tmpFile.Name(), []byte(strconv.Itoa(count)), 0660) + require.NoError(t, ioutil.WriteFile(tmpFile.Name(), []byte(strconv.Itoa(count)), 0660)) c := &Conntrack{} acc := &testutil.Accumulator{} - c.Gather(acc) + require.NoError(t, c.Gather(acc)) acc.AssertContainsFields(t, inputName, map[string]interface{}{ fname: float64(count)}) } @@ -59,12 +59,13 @@ func TestDefaultsUsed(t *testing.T) { func TestConfigsUsed(t *testing.T) { defer restoreDflts(dfltFiles, dfltDirs) tmpdir, err := ioutil.TempDir("", "tmp1") - assert.NoError(t, err) + require.NoError(t, err) defer os.Remove(tmpdir) cntFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_count") + require.NoError(t, err) maxFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_max") - assert.NoError(t, err) + require.NoError(t, err) defer os.Remove(cntFile.Name()) defer os.Remove(maxFile.Name()) @@ -75,12 +76,12 @@ func TestConfigsUsed(t *testing.T) { count := 1234321 max := 9999999 - ioutil.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0660) - ioutil.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0660) + require.NoError(t, ioutil.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0660)) + require.NoError(t, ioutil.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0660)) c := &Conntrack{} acc := &testutil.Accumulator{} - c.Gather(acc) + require.NoError(t, c.Gather(acc)) fix := func(s string) string { return strings.Replace(s, "nf_", "ip_", 1) diff --git a/plugins/inputs/couchdb/couchdb.go b/plugins/inputs/couchdb/couchdb.go index d96c73f836977..fc165f4cf676c 100644 --- a/plugins/inputs/couchdb/couchdb.go +++ b/plugins/inputs/couchdb/couchdb.go @@ -140,9 +140,9 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri req.SetBasicAuth(c.BasicUsername, c.BasicPassword) } - response, error := c.client.Do(req) - if error != nil { - return error + response, err := c.client.Do(req) + if err != nil { + return err } defer response.Body.Close() @@ -152,7 +152,9 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri stats := Stats{} decoder := json.NewDecoder(response.Body) - decoder.Decode(&stats) + if err := decoder.Decode(&stats); err != nil { + return fmt.Errorf("failed to decode stats from couchdb: HTTP body %q", response.Body) + } fields := map[string]interface{}{} diff --git a/plugins/inputs/dcos/client.go b/plugins/inputs/dcos/client.go index f31c4588ba555..534c2fcb1eab7 100644 --- a/plugins/inputs/dcos/client.go +++ b/plugins/inputs/dcos/client.go @@ -292,6 +292,7 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er return err } defer func() { + //nolint:errcheck,revive // we cannot do anything if the closing fails resp.Body.Close() <-c.semaphore }() diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go index 06ac3ea9568f5..d8ed8acf04764 100644 --- a/plugins/inputs/directory_monitor/directory_monitor.go +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -39,7 +39,7 @@ const sampleConfig = ` ## The amount of time a file is allowed to sit in the directory before it is picked up. ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, ## set this higher so that the plugin will wait until the file is fully copied to the directory. - # directory_duration_threshold = "50ms" + # directory_duration_threshold = "50ms" # ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. # files_to_monitor = ["^.*\.csv"] @@ -118,6 +118,7 @@ func (monitor *DirectoryMonitor) Gather(_ telegraf.Accumulator) error { // We've been cancelled via Stop(). if monitor.context.Err() != nil { + //nolint:nilerr // context cancelation is not an error return nil } @@ -266,7 +267,9 @@ func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Read firstLine = false } - monitor.sendMetrics(metrics) + if err := monitor.sendMetrics(metrics); err != nil { + return err + } } return nil @@ -295,13 +298,16 @@ func (monitor *DirectoryMonitor) parseLine(parser parsers.Parser, line []byte, f } } -func (monitor *DirectoryMonitor) sendMetrics(metrics []telegraf.Metric) { +func (monitor *DirectoryMonitor) sendMetrics(metrics []telegraf.Metric) error { // Report the metrics for the file. for _, m := range metrics { // Block until metric can be written. - monitor.sem.Acquire(monitor.context, 1) + if err := monitor.sem.Acquire(monitor.context, 1); err != nil { + return err + } monitor.acc.AddTrackingMetricGroup([]telegraf.Metric{m}) } + return nil } func (monitor *DirectoryMonitor) moveFile(filePath string, directory string) { @@ -344,7 +350,7 @@ func (monitor *DirectoryMonitor) SetParserFunc(fn parsers.ParserFunc) { func (monitor *DirectoryMonitor) Init() error { if monitor.Directory == "" || monitor.FinishedDirectory == "" { - return errors.New("Missing one of the following required config options: directory, finished_directory.") + return errors.New("missing one of the following required config options: directory, finished_directory") } if monitor.FileQueueSize <= 0 { diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go index e74a1b27667de..3cad4ee6857b9 100644 --- a/plugins/inputs/directory_monitor/directory_monitor_test.go +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -49,15 +49,20 @@ func TestCSVGZImport(t *testing.T) { // Write csv file to process into the 'process' directory. f, err := os.Create(filepath.Join(processDirectory, testCsvFile)) require.NoError(t, err) - f.WriteString("thing,color\nsky,blue\ngrass,green\nclifford,red\n") - f.Close() + _, err = f.WriteString("thing,color\nsky,blue\ngrass,green\nclifford,red\n") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) // Write csv.gz file to process into the 'process' directory. var b bytes.Buffer w := gzip.NewWriter(&b) - w.Write([]byte("thing,color\nsky,blue\ngrass,green\nclifford,red\n")) - w.Close() + _, err = w.Write([]byte("thing,color\nsky,blue\ngrass,green\nclifford,red\n")) + require.NoError(t, err) + err = w.Close() + require.NoError(t, err) err = ioutil.WriteFile(filepath.Join(processDirectory, testCsvGzFile), b.Bytes(), 0666) + require.NoError(t, err) // Start plugin before adding file. err = r.Start(&acc) @@ -112,8 +117,10 @@ func TestMultipleJSONFileImports(t *testing.T) { // Write csv file to process into the 'process' directory. f, err := os.Create(filepath.Join(processDirectory, testJSONFile)) require.NoError(t, err) - f.WriteString("{\"Name\": \"event1\",\"Speed\": 100.1,\"Length\": 20.1}\n{\"Name\": \"event2\",\"Speed\": 500,\"Length\": 1.4}\n{\"Name\": \"event3\",\"Speed\": 200,\"Length\": 10.23}\n{\"Name\": \"event4\",\"Speed\": 80,\"Length\": 250}\n{\"Name\": \"event5\",\"Speed\": 120.77,\"Length\": 25.97}") - f.Close() + _, err = f.WriteString("{\"Name\": \"event1\",\"Speed\": 100.1,\"Length\": 20.1}\n{\"Name\": \"event2\",\"Speed\": 500,\"Length\": 1.4}\n{\"Name\": \"event3\",\"Speed\": 200,\"Length\": 10.23}\n{\"Name\": \"event4\",\"Speed\": 80,\"Length\": 250}\n{\"Name\": \"event5\",\"Speed\": 120.77,\"Length\": 25.97}") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) err = r.Start(&acc) r.Log = testutil.Logger{} diff --git a/plugins/inputs/diskio/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go index 01ca7055e3db4..c356d49cb7b68 100644 --- a/plugins/inputs/diskio/diskio_linux.go +++ b/plugins/inputs/diskio/diskio_linux.go @@ -58,10 +58,10 @@ func (d *DiskIO) diskInfo(devName string) (map[string]string, error) { } // Final open of the confirmed (or the previously detected/used) udev file f, err := os.Open(udevDataPath) - defer f.Close() if err != nil { return nil, err } + defer f.Close() di := map[string]string{} @@ -80,9 +80,12 @@ func (d *DiskIO) diskInfo(devName string) (map[string]string, error) { } if l[:2] == "S:" { if devlinks.Len() > 0 { + //nolint:errcheck,revive // this will never fail devlinks.WriteString(" ") } + //nolint:errcheck,revive // this will never fail devlinks.WriteString("/dev/") + //nolint:errcheck,revive // this will never fail devlinks.WriteString(l[2:]) continue } diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index 8895afeec1563..222cb783f1870 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -7,7 +7,6 @@ import ( "os" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -19,7 +18,7 @@ S:foo/bar/devlink1 `) // setupNullDisk sets up fake udev info as if /dev/null were a disk. -func setupNullDisk(t *testing.T, s *DiskIO, devName string) func() error { +func setupNullDisk(t *testing.T, s *DiskIO, devName string) func() { td, err := ioutil.TempFile("", ".telegraf.DiskInfoTest") require.NoError(t, err) @@ -37,9 +36,10 @@ func setupNullDisk(t *testing.T, s *DiskIO, devName string) func() error { } origUdevPath := ic.udevDataPath - cleanFunc := func() error { + cleanFunc := func() { ic.udevDataPath = origUdevPath - return os.Remove(td.Name()) + //nolint:errcheck,revive // we cannot do anything if file cannot be removed + os.Remove(td.Name()) } ic.udevDataPath = td.Name() @@ -58,19 +58,18 @@ func TestDiskInfo(t *testing.T) { defer clean() di, err := s.diskInfo("null") require.NoError(t, err) - assert.Equal(t, "myval1", di["MY_PARAM_1"]) - assert.Equal(t, "myval2", di["MY_PARAM_2"]) - assert.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) + require.Equal(t, "myval1", di["MY_PARAM_1"]) + require.Equal(t, "myval2", di["MY_PARAM_2"]) + require.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) // test that data is cached - err = clean() - require.NoError(t, err) + clean() di, err = s.diskInfo("null") require.NoError(t, err) - assert.Equal(t, "myval1", di["MY_PARAM_1"]) - assert.Equal(t, "myval2", di["MY_PARAM_2"]) - assert.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) + require.Equal(t, "myval1", di["MY_PARAM_1"]) + require.Equal(t, "myval2", di["MY_PARAM_2"]) + require.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) // unfortunately we can't adjust mtime on /dev/null to test cache invalidation } @@ -98,7 +97,7 @@ func TestDiskIOStats_diskName(t *testing.T) { } defer setupNullDisk(t, &s, "null")() name, _ := s.diskName("null") - assert.Equal(t, tc.expected, name, "Templates: %#v", tc.templates) + require.Equal(t, tc.expected, name, "Templates: %#v", tc.templates) } } @@ -110,5 +109,5 @@ func TestDiskIOStats_diskTags(t *testing.T) { } defer setupNullDisk(t, s, "null")() dt := s.diskTags("null") - assert.Equal(t, map[string]string{"MY_PARAM_2": "myval2"}, dt) + require.Equal(t, map[string]string{"MY_PARAM_2": "myval2"}, dt) } diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index 8ae098011b0a1..6c2606af4ad94 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -68,8 +68,7 @@ func (d *Disque) Gather(acc telegraf.Accumulator) error { url := &url.URL{ Host: ":7711", } - d.gatherServer(url, acc) - return nil + return d.gatherServer(url, acc) } var wg sync.WaitGroup @@ -114,7 +113,9 @@ func (d *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { if addr.User != nil { pwd, set := addr.User.Password() if set && pwd != "" { - c.Write([]byte(fmt.Sprintf("AUTH %s\r\n", pwd))) + if _, err := c.Write([]byte(fmt.Sprintf("AUTH %s\r\n", pwd))); err != nil { + return err + } r := bufio.NewReader(c) @@ -132,9 +133,13 @@ func (d *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { } // Extend connection - d.c.SetDeadline(time.Now().Add(defaultTimeout)) + if err := d.c.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return err + } - d.c.Write([]byte("info\r\n")) + if _, err := d.c.Write([]byte("info\r\n")); err != nil { + return err + } r := bufio.NewReader(d.c) diff --git a/plugins/inputs/disque/disque_test.go b/plugins/inputs/disque/disque_test.go index e215e78a5f777..4eacbd76c6a1e 100644 --- a/plugins/inputs/disque/disque_test.go +++ b/plugins/inputs/disque/disque_test.go @@ -38,8 +38,12 @@ func TestDisqueGeneratesMetricsIntegration(t *testing.T) { return } - fmt.Fprintf(c, "$%d\n", len(testOutput)) - c.Write([]byte(testOutput)) + if _, err := fmt.Fprintf(c, "$%d\n", len(testOutput)); err != nil { + return + } + if _, err := c.Write([]byte(testOutput)); err != nil { + return + } } }() @@ -104,8 +108,12 @@ func TestDisqueCanPullStatsFromMultipleServersIntegration(t *testing.T) { return } - fmt.Fprintf(c, "$%d\n", len(testOutput)) - c.Write([]byte(testOutput)) + if _, err := fmt.Fprintf(c, "$%d\n", len(testOutput)); err != nil { + return + } + if _, err := c.Write([]byte(testOutput)); err != nil { + return + } } }() diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index c9c19da3c2f6e..88adc600e77eb 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -911,7 +911,7 @@ func TestDockerGatherSwarmInfo(t *testing.T) { err := acc.GatherError(d.Gather) require.NoError(t, err) - d.gatherSwarmInfo(&acc) + require.NoError(t, d.gatherSwarmInfo(&acc)) // test docker_container_net measurement acc.AssertContainsTaggedFields(t, diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index 73bcefb3d887c..f877961ba2676 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -398,8 +398,11 @@ func tailMultiplexed( }() _, err := stdcopy.StdCopy(outWriter, errWriter, src) + //nolint:errcheck,revive // we cannot do anything if the closing fails outWriter.Close() + //nolint:errcheck,revive // we cannot do anything if the closing fails errWriter.Close() + //nolint:errcheck,revive // we cannot do anything if the closing fails src.Close() wg.Wait() return err diff --git a/plugins/inputs/docker_log/docker_log_test.go b/plugins/inputs/docker_log/docker_log_test.go index 6d92b73ee6d41..49a73ebe9f1bb 100644 --- a/plugins/inputs/docker_log/docker_log_test.go +++ b/plugins/inputs/docker_log/docker_log_test.go @@ -138,8 +138,8 @@ func Test(t *testing.T) { ContainerLogsF: func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { var buf bytes.Buffer w := stdcopy.NewStdWriter(&buf, stdcopy.Stdout) - w.Write([]byte("2020-04-28T18:42:16.432691200Z hello from stdout")) - return &Response{Reader: &buf}, nil + _, err := w.Write([]byte("2020-04-28T18:42:16.432691200Z hello from stdout")) + return &Response{Reader: &buf}, err }, }, expected: []telegraf.Metric{ diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index be2ea49d48134..94c941655ccc8 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -90,7 +90,9 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype stri defer c.Close() // Extend connection - c.SetDeadline(time.Now().Add(defaultTimeout)) + if err := c.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return fmt.Errorf("setting deadline failed for dovecot server '%s': %s", addr, err) + } msg := fmt.Sprintf("EXPORT\t%s", qtype) if len(filter) > 0 { @@ -98,9 +100,13 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype stri } msg += "\n" - c.Write([]byte(msg)) + if _, err := c.Write([]byte(msg)); err != nil { + return fmt.Errorf("writing message %q failed for dovecot server '%s': %s", msg, addr, err) + } var buf bytes.Buffer - io.Copy(&buf, c) + if _, err := io.Copy(&buf, c); err != nil { + return fmt.Errorf("copying message failed for dovecot server '%s': %s", addr, err) + } host, _, _ := net.SplitHostPort(addr) diff --git a/plugins/inputs/ecs/ecs_test.go b/plugins/inputs/ecs/ecs_test.go index 5d64fef01efad..5a837d1ae4517 100644 --- a/plugins/inputs/ecs/ecs_test.go +++ b/plugins/inputs/ecs/ecs_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/docker/docker/api/types" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // codified golden objects for tests @@ -800,10 +800,10 @@ func TestResolveEndpoint(t *testing.T) { { name: "Endpoint is not set, ECS_CONTAINER_METADATA_URI is set => use v3 metadata", preF: func() { - os.Setenv("ECS_CONTAINER_METADATA_URI", "v3-endpoint.local") + require.NoError(t, os.Setenv("ECS_CONTAINER_METADATA_URI", "v3-endpoint.local")) }, afterF: func() { - os.Unsetenv("ECS_CONTAINER_METADATA_URI") + require.NoError(t, os.Unsetenv("ECS_CONTAINER_METADATA_URI")) }, given: Ecs{ EndpointURL: "", @@ -825,7 +825,7 @@ func TestResolveEndpoint(t *testing.T) { act := tt.given resolveEndpoint(&act) - assert.Equal(t, tt.exp, act) + require.Equal(t, tt.exp, act) }) } } diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 4a02e927678da..1a24d3caaf66e 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -8,7 +8,6 @@ import ( "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -49,14 +48,7 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { return res, nil } -func (t *transportMock) CancelRequest(_ *http.Request) { -} - -func checkIsMaster(es *Elasticsearch, server string, expected bool, t *testing.T) { - if es.serverInfo[server].isMaster() != expected { - assert.Fail(t, "IsMaster set incorrectly") - } -} +func (t *transportMock) CancelRequest(_ *http.Request) {} func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) { tags := defaultTags() @@ -79,11 +71,8 @@ func TestGather(t *testing.T) { es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := acc.GatherError(es.Gather); err != nil { - t.Fatal(err) - } - - checkIsMaster(es, es.Servers[0], false, t) + require.NoError(t, acc.GatherError(es.Gather)) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") checkNodeStatsResult(t, &acc) } @@ -96,11 +85,8 @@ func TestGatherIndividualStats(t *testing.T) { es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := acc.GatherError(es.Gather); err != nil { - t.Fatal(err) - } - - checkIsMaster(es, es.Servers[0], false, t) + require.NoError(t, acc.GatherError(es.Gather)) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") tags := defaultTags() acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags) @@ -122,11 +108,8 @@ func TestGatherNodeStats(t *testing.T) { es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := es.gatherNodeStats("junk", &acc); err != nil { - t.Fatal(err) - } - - checkIsMaster(es, es.Servers[0], false, t) + require.NoError(t, es.gatherNodeStats("junk", &acc)) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") checkNodeStatsResult(t, &acc) } @@ -141,8 +124,7 @@ func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) { var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - - checkIsMaster(es, es.Servers[0], false, t) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, @@ -168,8 +150,7 @@ func TestGatherClusterHealthSpecificClusterHealth(t *testing.T) { var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - - checkIsMaster(es, es.Servers[0], false, t) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, @@ -195,8 +176,7 @@ func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) { var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - - checkIsMaster(es, es.Servers[0], false, t) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, @@ -227,19 +207,14 @@ func TestGatherClusterStatsMaster(t *testing.T) { es.serverInfo["http://example.com:9200"] = info IsMasterResultTokens := strings.Split(string(IsMasterResult), " ") - if masterID != IsMasterResultTokens[0] { - assert.Fail(t, "catmaster is incorrect") - } + require.Equal(t, masterID, IsMasterResultTokens[0], "catmaster is incorrect") // now get node status, which determines whether we're master var acc testutil.Accumulator es.Local = true es.client.Transport = newTransportMock(nodeStatsResponse) - if err := es.gatherNodeStats("junk", &acc); err != nil { - t.Fatal(err) - } - - checkIsMaster(es, es.Servers[0], true, t) + require.NoError(t, es.gatherNodeStats("junk", &acc)) + require.True(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") checkNodeStatsResult(t, &acc) // now test the clusterstats method @@ -270,20 +245,16 @@ func TestGatherClusterStatsNonMaster(t *testing.T) { require.NoError(t, err) IsNotMasterResultTokens := strings.Split(string(IsNotMasterResult), " ") - if masterID != IsNotMasterResultTokens[0] { - assert.Fail(t, "catmaster is incorrect") - } + require.Equal(t, masterID, IsNotMasterResultTokens[0], "catmaster is incorrect") // now get node status, which determines whether we're master var acc testutil.Accumulator es.Local = true es.client.Transport = newTransportMock(nodeStatsResponse) - if err := es.gatherNodeStats("junk", &acc); err != nil { - t.Fatal(err) - } + require.NoError(t, es.gatherNodeStats("junk", &acc)) // ensure flag is clear so Cluster Stats would not be done - checkIsMaster(es, es.Servers[0], false, t) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") checkNodeStatsResult(t, &acc) } @@ -296,9 +267,7 @@ func TestGatherClusterIndicesStats(t *testing.T) { es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := es.gatherIndicesStats("junk", &acc); err != nil { - t.Fatal(err) - } + require.NoError(t, es.gatherIndicesStats("junk", &acc)) acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", clusterIndicesExpected, @@ -313,12 +282,10 @@ func TestGatherDateStampedIndicesStats(t *testing.T) { es.client.Transport = newTransportMock(dateStampedIndicesResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() - es.Init() + require.NoError(t, es.Init()) var acc testutil.Accumulator - if err := es.gatherIndicesStats(es.Servers[0]+"/"+strings.Join(es.IndicesInclude, ",")+"/_stats", &acc); err != nil { - t.Fatal(err) - } + require.NoError(t, es.gatherIndicesStats(es.Servers[0]+"/"+strings.Join(es.IndicesInclude, ",")+"/_stats", &acc)) // includes 2 most recent indices for "twitter", only expect the most recent two. acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", @@ -357,9 +324,7 @@ func TestGatherClusterIndiceShardsStats(t *testing.T) { es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := es.gatherIndicesStats("junk", &acc); err != nil { - t.Fatal(err) - } + require.NoError(t, es.gatherIndicesStats("junk", &acc)) acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", clusterIndicesExpected, diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 3cd8beb029a7f..afc6beb6a7a80 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -111,6 +111,7 @@ func (c CommandRunner) truncate(buf bytes.Buffer) bytes.Buffer { buf.Truncate(i) } if didTruncate { + //nolint:errcheck,revive // Will always return nil or panic buf.WriteString("...") } return buf diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index 67609bf64af9e..bdd11433d1ab6 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -200,12 +200,14 @@ func TestTruncate(t *testing.T) { name: "should not truncate", bufF: func() *bytes.Buffer { var b bytes.Buffer - b.WriteString("hello world") + _, err := b.WriteString("hello world") + require.NoError(t, err) return &b }, expF: func() *bytes.Buffer { var b bytes.Buffer - b.WriteString("hello world") + _, err := b.WriteString("hello world") + require.NoError(t, err) return &b }, }, @@ -213,12 +215,14 @@ func TestTruncate(t *testing.T) { name: "should truncate up to the new line", bufF: func() *bytes.Buffer { var b bytes.Buffer - b.WriteString("hello world\nand all the people") + _, err := b.WriteString("hello world\nand all the people") + require.NoError(t, err) return &b }, expF: func() *bytes.Buffer { var b bytes.Buffer - b.WriteString("hello world...") + _, err := b.WriteString("hello world...") + require.NoError(t, err) return &b }, }, @@ -227,16 +231,17 @@ func TestTruncate(t *testing.T) { bufF: func() *bytes.Buffer { var b bytes.Buffer for i := 0; i < 2*MaxStderrBytes; i++ { - b.WriteByte('b') + require.NoError(t, b.WriteByte('b')) } return &b }, expF: func() *bytes.Buffer { var b bytes.Buffer for i := 0; i < MaxStderrBytes; i++ { - b.WriteByte('b') + require.NoError(t, b.WriteByte('b')) } - b.WriteString("...") + _, err := b.WriteString("...") + require.NoError(t, err) return &b }, }, diff --git a/plugins/inputs/execd/execd_posix.go b/plugins/inputs/execd/execd_posix.go index 08275c62db5be..9593aaba0af29 100644 --- a/plugins/inputs/execd/execd_posix.go +++ b/plugins/inputs/execd/execd_posix.go @@ -23,17 +23,19 @@ func (e *Execd) Gather(_ telegraf.Accumulator) error { } switch e.Signal { case "SIGHUP": - osProcess.Signal(syscall.SIGHUP) + return osProcess.Signal(syscall.SIGHUP) case "SIGUSR1": - osProcess.Signal(syscall.SIGUSR1) + return osProcess.Signal(syscall.SIGUSR1) case "SIGUSR2": - osProcess.Signal(syscall.SIGUSR2) + return osProcess.Signal(syscall.SIGUSR2) case "STDIN": if osStdin, ok := e.process.Stdin.(*os.File); ok { - osStdin.SetWriteDeadline(time.Now().Add(1 * time.Second)) + if err := osStdin.SetWriteDeadline(time.Now().Add(1 * time.Second)); err != nil { + return fmt.Errorf("setting write deadline failed: %s", err) + } } if _, err := io.WriteString(e.process.Stdin, "\n"); err != nil { - return fmt.Errorf("Error writing to stdin: %s", err) + return fmt.Errorf("writing to stdin failed: %s", err) } case "none": default: diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index e95ed133f9cba..72c84e1d12cc6 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -153,19 +153,22 @@ var counter = flag.Bool("counter", false, func TestMain(m *testing.M) { flag.Parse() if *counter { - runCounterProgram() + if err := runCounterProgram(); err != nil { + os.Exit(1) + } os.Exit(0) } code := m.Run() os.Exit(code) } -func runCounterProgram() { +func runCounterProgram() error { i := 0 serializer, err := serializers.NewInfluxSerializer() if err != nil { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintln(os.Stderr, "ERR InfluxSerializer failed to load") - os.Exit(1) + return err } scanner := bufio.NewScanner(os.Stdin) @@ -181,9 +184,13 @@ func runCounterProgram() { b, err := serializer.Serialize(m) if err != nil { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintf(os.Stderr, "ERR %v\n", err) - os.Exit(1) + return err + } + if _, err := fmt.Fprint(os.Stdout, string(b)); err != nil { + return err } - fmt.Fprint(os.Stdout, string(b)) } + return nil } diff --git a/plugins/inputs/execd/shim/goshim.go b/plugins/inputs/execd/shim/goshim.go index 920d40f8dfddf..075d2cf55ab62 100644 --- a/plugins/inputs/execd/shim/goshim.go +++ b/plugins/inputs/execd/shim/goshim.go @@ -57,8 +57,7 @@ var ( // New creates a new shim interface func New() *Shim { - fmt.Fprintf(os.Stderr, "%s is deprecated; please change your import to %s\n", - oldpkg, newpkg) + _, _ = fmt.Fprintf(os.Stderr, "%s is deprecated; please change your import to %s\n", oldpkg, newpkg) return &Shim{ stdin: os.Stdin, stdout: os.Stdout, @@ -155,7 +154,9 @@ loop: return fmt.Errorf("failed to serialize metric: %s", err) } // Write this to stdout - fmt.Fprint(s.stdout, string(b)) + if _, err := fmt.Fprint(s.stdout, string(b)); err != nil { + return fmt.Errorf("failed to write %q to stdout: %s", string(b), err) + } } } @@ -232,11 +233,17 @@ func (s *Shim) startGathering(ctx context.Context, input telegraf.Input, acc tel return case <-gatherPromptCh: if err := input.Gather(acc); err != nil { - fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err) + if _, perr := fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err); perr != nil { + acc.AddError(err) + acc.AddError(perr) + } } case <-t.C: if err := input.Gather(acc); err != nil { - fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err) + if _, perr := fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err); perr != nil { + acc.AddError(err) + acc.AddError(perr) + } } } } diff --git a/plugins/inputs/execd/shim/shim_posix_test.go b/plugins/inputs/execd/shim/shim_posix_test.go index 594985d23ffc1..75484c45c78a0 100644 --- a/plugins/inputs/execd/shim/shim_posix_test.go +++ b/plugins/inputs/execd/shim/shim_posix_test.go @@ -37,7 +37,7 @@ func TestShimUSR1SignalingWorks(t *testing.T) { return // test is done default: // test isn't done, keep going. - process.Signal(syscall.SIGUSR1) + require.NoError(t, process.Signal(syscall.SIGUSR1)) time.Sleep(200 * time.Millisecond) } } @@ -51,7 +51,7 @@ func TestShimUSR1SignalingWorks(t *testing.T) { require.NoError(t, err) require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) - stdinWriter.Close() + require.NoError(t, stdinWriter.Close()) readUntilEmpty(r) <-exited diff --git a/plugins/inputs/execd/shim/shim_test.go b/plugins/inputs/execd/shim/shim_test.go index 07afde130e04c..396928ff44036 100644 --- a/plugins/inputs/execd/shim/shim_test.go +++ b/plugins/inputs/execd/shim/shim_test.go @@ -36,7 +36,8 @@ func TestShimStdinSignalingWorks(t *testing.T) { metricProcessed, exited := runInputPlugin(t, 40*time.Second, stdinReader, stdoutWriter, nil) - stdinWriter.Write([]byte("\n")) + _, err := stdinWriter.Write([]byte("\n")) + require.NoError(t, err) <-metricProcessed @@ -45,7 +46,7 @@ func TestShimStdinSignalingWorks(t *testing.T) { require.NoError(t, err) require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) - stdinWriter.Close() + require.NoError(t, stdinWriter.Close()) readUntilEmpty(r) @@ -71,7 +72,7 @@ func runInputPlugin(t *testing.T, interval time.Duration, stdin io.Reader, stdou shim.stderr = stderr } - shim.AddInput(inp) + require.NoError(t, shim.AddInput(inp)) go func() { err := shim.Run(interval) require.NoError(t, err) @@ -112,8 +113,8 @@ func (i *testInput) Stop() { } func TestLoadConfig(t *testing.T) { - os.Setenv("SECRET_TOKEN", "xxxxxxxxxx") - os.Setenv("SECRET_VALUE", `test"\test`) + require.NoError(t, os.Setenv("SECRET_TOKEN", "xxxxxxxxxx")) + require.NoError(t, os.Setenv("SECRET_VALUE", `test"\test`)) inputs.Add("test", func() telegraf.Input { return &serviceInput{} diff --git a/plugins/inputs/fail2ban/fail2ban_test.go b/plugins/inputs/fail2ban/fail2ban_test.go index ecb539acd3166..8ec313a1fbdda 100644 --- a/plugins/inputs/fail2ban/fail2ban_test.go +++ b/plugins/inputs/fail2ban/fail2ban_test.go @@ -101,25 +101,31 @@ func TestHelperProcess(_ *testing.T) { cmd, args := args[3], args[4:] if !strings.HasSuffix(cmd, "fail2ban-client") { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, "command not found") os.Exit(1) } if len(args) == 1 && args[0] == "status" { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusOutput) os.Exit(0) } else if len(args) == 2 && args[0] == "status" { if args[1] == "sshd" { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusSshdOutput) os.Exit(0) } else if args[1] == "postfix" { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusPostfixOutput) os.Exit(0) } else if args[1] == "dovecot" { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusDovecotOutput) os.Exit(0) } } + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, "invalid argument") os.Exit(1) } diff --git a/plugins/inputs/fibaro/fibaro_test.go b/plugins/inputs/fibaro/fibaro_test.go index 32a1447e3ef4d..dac8bc6fdf47a 100644 --- a/plugins/inputs/fibaro/fibaro_test.go +++ b/plugins/inputs/fibaro/fibaro_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -162,7 +161,8 @@ func TestJSONSuccess(t *testing.T) { payload = devicesJSON } w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, payload) + _, err := fmt.Fprintln(w, payload) + require.NoError(t, err) })) defer ts.Close() @@ -178,7 +178,7 @@ func TestJSONSuccess(t *testing.T) { require.NoError(t, err) // Gather should add 5 metrics - assert.Equal(t, uint64(5), acc.NMetrics()) + require.Equal(t, uint64(5), acc.NMetrics()) // Ensure fields / values are correct - Device 1 tags := map[string]string{"deviceId": "1", "section": "Section 1", "room": "Room 1", "name": "Device 1", "type": "com.fibaro.binarySwitch"} diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go index a5cacec21a03c..f8f7d773f719d 100644 --- a/plugins/inputs/file/file_test.go +++ b/plugins/inputs/file/file_test.go @@ -15,7 +15,6 @@ import ( "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -29,7 +28,7 @@ func TestRefreshFilePaths(t *testing.T) { err = r.refreshFilePaths() require.NoError(t, err) - assert.Equal(t, 2, len(r.filenames)) + require.Equal(t, 2, len(r.filenames)) } func TestFileTag(t *testing.T) { @@ -47,7 +46,7 @@ func TestFileTag(t *testing.T) { DataFormat: "json", } nParser, err := parsers.NewParser(&parserConfig) - assert.NoError(t, err) + require.NoError(t, err) r.parser = nParser err = r.Gather(&acc) @@ -55,8 +54,8 @@ func TestFileTag(t *testing.T) { for _, m := range acc.Metrics { for key, value := range m.Tags { - assert.Equal(t, r.FileTag, key) - assert.Equal(t, filepath.Base(r.Files[0]), value) + require.Equal(t, r.FileTag, key) + require.Equal(t, filepath.Base(r.Files[0]), value) } } } @@ -74,12 +73,12 @@ func TestJSONParserCompile(t *testing.T) { TagKeys: []string{"parent_ignored_child"}, } nParser, err := parsers.NewParser(&parserConfig) - assert.NoError(t, err) + require.NoError(t, err) r.parser = nParser - r.Gather(&acc) - assert.Equal(t, map[string]string{"parent_ignored_child": "hi"}, acc.Metrics[0].Tags) - assert.Equal(t, 5, len(acc.Metrics[0].Fields)) + require.NoError(t, r.Gather(&acc)) + require.Equal(t, map[string]string{"parent_ignored_child": "hi"}, acc.Metrics[0].Tags) + require.Equal(t, 5, len(acc.Metrics[0].Fields)) } func TestGrokParser(t *testing.T) { @@ -98,10 +97,10 @@ func TestGrokParser(t *testing.T) { nParser, err := parsers.NewParser(&parserConfig) r.parser = nParser - assert.NoError(t, err) + require.NoError(t, err) err = r.Gather(&acc) - assert.Equal(t, len(acc.Metrics), 2) + require.Equal(t, len(acc.Metrics), 2) } func TestCharacterEncoding(t *testing.T) { diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 35bb0f080c73a..74a3e2ec391c5 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -35,7 +35,7 @@ func TestNoFiltersOnChildDir(t *testing.T) { tags := map[string]string{"directory": getTestdataDir() + "/subdir"} acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) + require.NoError(t, acc.GatherError(fc.Gather)) require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(600))) } @@ -48,7 +48,7 @@ func TestNoRecursiveButSuperMeta(t *testing.T) { tags := map[string]string{"directory": getTestdataDir() + "/subdir"} acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) + require.NoError(t, acc.GatherError(fc.Gather)) require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(200))) @@ -77,7 +77,7 @@ func TestDoubleAndSimpleStar(t *testing.T) { tags := map[string]string{"directory": getTestdataDir() + "/subdir/nested2"} acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) + require.NoError(t, acc.GatherError(fc.Gather)) require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(400))) @@ -235,7 +235,7 @@ func getFakeFileSystem(basePath string) fakeFileSystem { func fileCountEquals(t *testing.T, fc FileCount, expectedCount int, expectedSize int) { tags := map[string]string{"directory": getTestdataDir()} acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) + require.NoError(t, acc.GatherError(fc.Gather)) require.True(t, acc.HasPoint("filecount", tags, "count", int64(expectedCount))) require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(expectedSize))) } diff --git a/plugins/inputs/filestat/filestat_test.go b/plugins/inputs/filestat/filestat_test.go index f0b843dcbc3b4..1c827f8dbe9ea 100644 --- a/plugins/inputs/filestat/filestat_test.go +++ b/plugins/inputs/filestat/filestat_test.go @@ -10,7 +10,6 @@ import ( "path/filepath" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" @@ -30,7 +29,7 @@ func TestGatherNoMd5(t *testing.T) { } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ "file": filepath.Join(testdataDir, "log1.log"), @@ -61,7 +60,7 @@ func TestGatherExplicitFiles(t *testing.T) { } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ "file": filepath.Join(testdataDir, "log1.log"), @@ -94,10 +93,10 @@ func TestNonExistentFile(t *testing.T) { require.NoError(t, acc.GatherError(fs.Gather)) acc.AssertContainsFields(t, "filestat", map[string]interface{}{"exists": int64(0)}) - assert.False(t, acc.HasField("filestat", "error")) - assert.False(t, acc.HasField("filestat", "md5_sum")) - assert.False(t, acc.HasField("filestat", "size_bytes")) - assert.False(t, acc.HasField("filestat", "modification_time")) + require.False(t, acc.HasField("filestat", "error")) + require.False(t, acc.HasField("filestat", "md5_sum")) + require.False(t, acc.HasField("filestat", "size_bytes")) + require.False(t, acc.HasField("filestat", "modification_time")) } func TestGatherGlob(t *testing.T) { @@ -109,7 +108,7 @@ func TestGatherGlob(t *testing.T) { } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ "file": filepath.Join(testdataDir, "log1.log"), @@ -135,7 +134,7 @@ func TestGatherSuperAsterisk(t *testing.T) { } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ "file": filepath.Join(testdataDir, "log1.log"), @@ -167,7 +166,7 @@ func TestModificationTime(t *testing.T) { } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ "file": filepath.Join(testdataDir, "log1.log"), @@ -185,7 +184,7 @@ func TestNoModificationTime(t *testing.T) { } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ "file": filepath.Join(testdataDir, "non_existent_file"), @@ -196,11 +195,11 @@ func TestNoModificationTime(t *testing.T) { func TestGetMd5(t *testing.T) { md5, err := getMd5(filepath.Join(testdataDir, "test.conf")) - assert.NoError(t, err) - assert.Equal(t, "5a7e9b77fa25e7bb411dbd17cf403c1f", md5) + require.NoError(t, err) + require.Equal(t, "5a7e9b77fa25e7bb411dbd17cf403c1f", md5) md5, err = getMd5("/tmp/foo/bar/fooooo") - assert.Error(t, err) + require.Error(t, err) } func getTestdataDir() string { diff --git a/plugins/inputs/fireboard/fireboard_test.go b/plugins/inputs/fireboard/fireboard_test.go index a5e93a4533e59..8fe1c21bd757d 100644 --- a/plugins/inputs/fireboard/fireboard_test.go +++ b/plugins/inputs/fireboard/fireboard_test.go @@ -16,7 +16,8 @@ func TestFireboard(t *testing.T) { // Create a test server with the const response JSON ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, response) + _, err := fmt.Fprintln(w, response) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/fluentd/fluentd_test.go b/plugins/inputs/fluentd/fluentd_test.go index 41166085a8876..61cd6576ec648 100644 --- a/plugins/inputs/fluentd/fluentd_test.go +++ b/plugins/inputs/fluentd/fluentd_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // sampleJSON from fluentd version '0.14.9' @@ -122,7 +122,8 @@ func Test_Gather(t *testing.T) { ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") - fmt.Fprintf(w, "%s", string(sampleJSON)) + _, err := fmt.Fprintf(w, "%s", string(sampleJSON)) + require.NoError(t, err) })) requestURL, err := url.Parse(fluentdTest.Endpoint) @@ -144,15 +145,15 @@ func Test_Gather(t *testing.T) { t.Errorf("acc.HasMeasurement: expected fluentd") } - assert.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"]) - assert.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"]) - assert.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"]) - assert.Equal(t, *expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"]) - - assert.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"]) - assert.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"]) - assert.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"]) - assert.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"]) - assert.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"]) - assert.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"]) + require.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"]) + require.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"]) + require.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"]) + require.Equal(t, *expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"]) + + require.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"]) + require.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"]) + require.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"]) + require.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"]) + require.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"]) + require.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"]) } diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index 19207717a5582..34bea672d7925 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -114,8 +114,14 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { return err } - longPath, _ := c.handlePath(gnmiLongPath, nil, "") - shortPath, _ := c.handlePath(gnmiShortPath, nil, "") + longPath, _, err := c.handlePath(gnmiLongPath, nil, "") + if err != nil { + return fmt.Errorf("handling long-path failed: %v", err) + } + shortPath, _, err := c.handlePath(gnmiShortPath, nil, "") + if err != nil { + return fmt.Errorf("handling short-path failed: %v", err) + } name := subscription.Name // If the user didn't provide a measurement name, use last path element @@ -257,7 +263,10 @@ func (c *GNMI) handleSubscribeResponseUpdate(address string, response *gnmi.Subs prefixTags := make(map[string]string) if response.Update.Prefix != nil { - prefix, prefixAliasPath = c.handlePath(response.Update.Prefix, prefixTags, "") + var err error + if prefix, prefixAliasPath, err = c.handlePath(response.Update.Prefix, prefixTags, ""); err != nil { + c.Log.Errorf("handling path %q failed: %v", response.Update.Prefix, err) + } } prefixTags["source"], _, _ = net.SplitHostPort(address) prefixTags["path"] = prefix @@ -307,7 +316,9 @@ func (c *GNMI) handleSubscribeResponseUpdate(address string, response *gnmi.Subs } } - grouper.Add(name, tags, timestamp, key, v) + if err := grouper.Add(name, tags, timestamp, key, v); err != nil { + c.Log.Errorf("cannot add to grouper: %v", err) + } } lastAliasPath = aliasPath @@ -321,14 +332,17 @@ func (c *GNMI) handleSubscribeResponseUpdate(address string, response *gnmi.Subs // HandleTelemetryField and add it to a measurement func (c *GNMI) handleTelemetryField(update *gnmi.Update, tags map[string]string, prefix string) (string, map[string]interface{}) { - path, aliasPath := c.handlePath(update.Path, tags, prefix) + gpath, aliasPath, err := c.handlePath(update.Path, tags, prefix) + if err != nil { + c.Log.Errorf("handling path %q failed: %v", update.Path, err) + } var value interface{} var jsondata []byte // Make sure a value is actually set if update.Val == nil || update.Val.Value == nil { - c.Log.Infof("Discarded empty or legacy type value with path: %q", path) + c.Log.Infof("Discarded empty or legacy type value with path: %q", gpath) return aliasPath, nil } @@ -355,7 +369,7 @@ func (c *GNMI) handleTelemetryField(update *gnmi.Update, tags map[string]string, jsondata = val.JsonVal } - name := strings.Replace(path, "-", "_", -1) + name := strings.Replace(gpath, "-", "_", -1) fields := make(map[string]interface{}) if value != nil { fields[name] = value @@ -364,28 +378,38 @@ func (c *GNMI) handleTelemetryField(update *gnmi.Update, tags map[string]string, c.acc.AddError(fmt.Errorf("failed to parse JSON value: %v", err)) } else { flattener := jsonparser.JSONFlattener{Fields: fields} - flattener.FullFlattenJSON(name, value, true, true) + if err := flattener.FullFlattenJSON(name, value, true, true); err != nil { + c.acc.AddError(fmt.Errorf("failed to flatten JSON: %v", err)) + } } } return aliasPath, fields } // Parse path to path-buffer and tag-field -func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string) (string, string) { +func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string) (string, string, error) { var aliasPath string builder := bytes.NewBufferString(prefix) // Prefix with origin if len(path.Origin) > 0 { - builder.WriteString(path.Origin) - builder.WriteRune(':') + if _, err := builder.WriteString(path.Origin); err != nil { + return "", "", err + } + if _, err := builder.WriteRune(':'); err != nil { + return "", "", err + } } // Parse generic keys from prefix for _, elem := range path.Elem { if len(elem.Name) > 0 { - builder.WriteRune('/') - builder.WriteString(elem.Name) + if _, err := builder.WriteRune('/'); err != nil { + return "", "", err + } + if _, err := builder.WriteString(elem.Name); err != nil { + return "", "", err + } } name := builder.String() @@ -407,7 +431,7 @@ func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string } } - return builder.String(), aliasPath + return builder.String(), aliasPath, nil } //ParsePath from XPath-like string to gNMI path structure diff --git a/plugins/inputs/gnmi/gnmi_test.go b/plugins/inputs/gnmi/gnmi_test.go index 25840db46ef2f..cfc43e8246186 100644 --- a/plugins/inputs/gnmi/gnmi_test.go +++ b/plugins/inputs/gnmi/gnmi_test.go @@ -231,13 +231,18 @@ func TestNotification(t *testing.T) { server: &MockServer{ SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { notification := mockGNMINotification() - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}) + err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + if err != nil { + return err + } + err = server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}) + if err != nil { + return err + } notification.Prefix.Elem[0].Key["foo"] = "bar2" notification.Update[0].Path.Elem[1].Key["name"] = "str2" notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}} - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - return nil + return server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) }, }, expected: []telegraf.Metric{ @@ -348,8 +353,7 @@ func TestNotification(t *testing.T) { }, }, } - server.Send(response) - return nil + return server.Send(response) }, }, expected: []telegraf.Metric{ @@ -419,10 +423,9 @@ func TestSubscribeResponseError(t *testing.T) { var mc uint32 = 7 ml := &MockLogger{} plugin := &GNMI{Log: ml} - errorResponse := &gnmi.SubscribeResponse_Error{ - Error: &gnmi.Error{Message: me, Code: mc}} - plugin.handleSubscribeResponse( - "127.0.0.1:0", &gnmi.SubscribeResponse{Response: errorResponse}) + // TODO: FIX SA1019: gnmi.Error is deprecated: Do not use. + errorResponse := &gnmi.SubscribeResponse_Error{Error: &gnmi.Error{Message: me, Code: mc}} + plugin.handleSubscribeResponse("127.0.0.1:0", &gnmi.SubscribeResponse{Response: errorResponse}) require.NotEmpty(t, ml.lastFormat) require.Equal(t, ml.lastArgs, []interface{}{mc, me}) } @@ -442,8 +445,7 @@ func TestRedial(t *testing.T) { gnmiServer := &MockServer{ SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { notification := mockGNMINotification() - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - return nil + return server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) }, GRPCServer: grpcServer, } @@ -476,8 +478,7 @@ func TestRedial(t *testing.T) { notification.Prefix.Elem[0].Key["foo"] = "bar2" notification.Update[0].Path.Elem[1].Key["name"] = "str2" notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_BoolVal{BoolVal: false}} - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - return nil + return server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) }, GRPCServer: grpcServer, } diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index 0a360c351a644..c5c06e930c15c 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -13,7 +13,6 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -27,13 +26,15 @@ func (s statServer) serverSocket(l net.Listener) { } go func(c net.Conn) { + defer c.Close() + buf := make([]byte, 1024) n, _ := c.Read(buf) data := buf[:n] if string(data) == "show stat\n" { + //nolint:errcheck,revive // we return anyway c.Write([]byte(csvOutputSample)) - c.Close() } }(conn) } @@ -45,15 +46,18 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) { username, password, ok := r.BasicAuth() if !ok { w.WriteHeader(http.StatusNotFound) - fmt.Fprint(w, "Unauthorized") + _, err := fmt.Fprint(w, "Unauthorized") + require.NoError(t, err) return } if username == "user" && password == "password" { - fmt.Fprint(w, csvOutputSample) + _, err := fmt.Fprint(w, csvOutputSample) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) - fmt.Fprint(w, "Unauthorized") + _, err := fmt.Fprint(w, "Unauthorized") + require.NoError(t, err) } })) defer ts.Close() @@ -83,13 +87,14 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) { Servers: []string{ts.URL}, } - r.Gather(&acc) + require.NoError(t, r.Gather(&acc)) require.NotEmpty(t, acc.Errors) } func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, csvOutputSample) + _, err := fmt.Fprint(w, csvOutputSample) + require.NoError(t, err) })) defer ts.Close() @@ -99,8 +104,7 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) { var acc testutil.Accumulator - err := r.Gather(&acc) - require.NoError(t, err) + require.NoError(t, r.Gather(&acc)) tags := map[string]string{ "server": ts.Listener.Addr().String(), @@ -121,7 +125,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { _badmask := filepath.Join(os.TempDir(), "test-fail-haproxy*.sock") for i := 0; i < 5; i++ { - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) sockname := filepath.Join(os.TempDir(), fmt.Sprintf("test-haproxy%d.sock", randomNumber)) sock, err := net.Listen("unix", sockname) @@ -161,7 +165,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { // This mask should not match any socket r.Servers = []string{_badmask} - r.Gather(&acc) + require.NoError(t, r.Gather(&acc)) require.NotEmpty(t, acc.Errors) } @@ -174,12 +178,13 @@ func TestHaproxyDefaultGetFromLocalhost(t *testing.T) { err := r.Gather(&acc) require.Error(t, err) - assert.Contains(t, err.Error(), "127.0.0.1:1936/haproxy?stats/;csv") + require.Contains(t, err.Error(), "127.0.0.1:1936/haproxy?stats/;csv") } func TestHaproxyKeepFieldNames(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, csvOutputSample) + _, err := fmt.Fprint(w, csvOutputSample) + require.NoError(t, err) })) defer ts.Close() @@ -190,8 +195,7 @@ func TestHaproxyKeepFieldNames(t *testing.T) { var acc testutil.Accumulator - err := r.Gather(&acc) - require.NoError(t, err) + require.NoError(t, r.Gather(&acc)) tags := map[string]string{ "server": ts.Listener.Addr().String(), diff --git a/plugins/inputs/internal/internal_test.go b/plugins/inputs/internal/internal_test.go index 4cdba9099edf0..0b89a974a0a74 100644 --- a/plugins/inputs/internal/internal_test.go +++ b/plugins/inputs/internal/internal_test.go @@ -6,21 +6,21 @@ import ( "github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestSelfPlugin(t *testing.T) { s := NewSelf() acc := &testutil.Accumulator{} - s.Gather(acc) - assert.True(t, acc.HasMeasurement("internal_memstats")) + require.NoError(t, s.Gather(acc)) + require.True(t, acc.HasMeasurement("internal_memstats")) // test that a registered stat is incremented stat := selfstat.Register("mytest", "test", map[string]string{"test": "foo"}) stat.Incr(1) stat.Incr(2) - s.Gather(acc) + require.NoError(t, s.Gather(acc)) acc.AssertContainsTaggedFields(t, "internal_mytest", map[string]interface{}{ "test": int64(3), @@ -34,7 +34,7 @@ func TestSelfPlugin(t *testing.T) { // test that a registered stat is set properly stat.Set(101) - s.Gather(acc) + require.NoError(t, s.Gather(acc)) acc.AssertContainsTaggedFields(t, "internal_mytest", map[string]interface{}{ "test": int64(101), @@ -51,7 +51,7 @@ func TestSelfPlugin(t *testing.T) { timing := selfstat.RegisterTiming("mytest", "test_ns", map[string]string{"test": "foo"}) timing.Incr(100) timing.Incr(200) - s.Gather(acc) + require.NoError(t, s.Gather(acc)) acc.AssertContainsTaggedFields(t, "internal_mytest", map[string]interface{}{ "test": int64(101), From 1bc87ccc3cae59182ef6c019c69ed7668ff53e03 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 22 Apr 2021 16:51:21 -0700 Subject: [PATCH 388/761] Move wiki information to docs (#9126) --- CONTRIBUTING.md | 19 +++ docs/developers/CODE_STYLE.md | 7 ++ docs/developers/DEPRECATION.md | 88 ++++++++++++++ docs/developers/LOGGING.md | 75 ++++++++++++ docs/developers/METRIC_FORMAT_CHANGES.md | 42 +++++++ docs/developers/PACKAGING.md | 44 +++++++ docs/developers/PROFILING.md | 55 +++++++++ docs/developers/REVIEWS.md | 147 +++++++++++++++++++++++ docs/developers/SAMPLE_CONFIG.md | 76 ++++++++++++ docs/maintainers/CHANGELOG.md | 43 +++++++ docs/maintainers/LABELS.md | 34 ++++++ docs/maintainers/PULL_REQUESTS.md | 67 +++++++++++ docs/maintainers/RELEASES.md | 97 +++++++++++++++ 13 files changed, 794 insertions(+) create mode 100644 docs/developers/CODE_STYLE.md create mode 100644 docs/developers/DEPRECATION.md create mode 100644 docs/developers/LOGGING.md create mode 100644 docs/developers/METRIC_FORMAT_CHANGES.md create mode 100644 docs/developers/PACKAGING.md create mode 100644 docs/developers/PROFILING.md create mode 100644 docs/developers/REVIEWS.md create mode 100644 docs/developers/SAMPLE_CONFIG.md create mode 100644 docs/maintainers/CHANGELOG.md create mode 100644 docs/maintainers/LABELS.md create mode 100644 docs/maintainers/PULL_REQUESTS.md create mode 100644 docs/maintainers/RELEASES.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a5ff6b2977560..2ada24a762335 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -74,6 +74,17 @@ make test-all Use `make docker-kill` to stop the containers. +### For more developer resources +- [Code Style][codestyle] +- [Deprecation][deprecation] +- [Logging][logging] +- [Metric Format Changes][metricformat] +- [Packaging][packaging] +- [Logging][logging] +- [Packaging][packaging] +- [Profiling][profiling] +- [Reviews][reviews] +- [Sample Config][sample config] [cla]: https://www.influxdata.com/legal/cla/ [new issue]: https://github.com/influxdata/telegraf/issues/new/choose @@ -82,3 +93,11 @@ Use `make docker-kill` to stop the containers. [processors]: /docs/PROCESSORS.md [aggregators]: /docs/AGGREGATORS.md [outputs]: /docs/OUTPUTS.md +[codestyle]: /docs/developers/CODE_STYLE.md +[deprecation]: /docs/developers/DEPRECATION.md +[logging]: /docs/developers/LOGGING.md +[metricformat]: /docs/developers/METRIC_FORMAT_CHANGES.md +[packaging]: /docs/developers/PACKAGING.md +[profiling]: /docs/developers/PROFILING.md +[reviews]: /docs/developers/REVIEWS.md +[sample config]: /docs/developers/SAMPLE_CONFIG.md diff --git a/docs/developers/CODE_STYLE.md b/docs/developers/CODE_STYLE.md new file mode 100644 index 0000000000000..1bbb2b14d84c4 --- /dev/null +++ b/docs/developers/CODE_STYLE.md @@ -0,0 +1,7 @@ +# Code Style +Code is required to be formatted using `gofmt`, this covers most code style +requirements. It is also highly recommended to use `goimports` to +automatically order imports. + +Please try to keep lines length under 80 characters, the exact number of +characters is not strict but it generally helps with readability. diff --git a/docs/developers/DEPRECATION.md b/docs/developers/DEPRECATION.md new file mode 100644 index 0000000000000..a3da79a5ac8e8 --- /dev/null +++ b/docs/developers/DEPRECATION.md @@ -0,0 +1,88 @@ +# Deprecation +Deprecation is the primary tool for making changes in Telegraf. A deprecation +indicates that the community should move away from using a feature, and +documents that the feature will be removed in the next major update (2.0). + +Key to deprecation is that the feature remains in Telegraf and the behavior is +not changed. + +We do not have a strict definition of a breaking change. All code changes +change behavior, the decision to deprecate or make the change immediately is +decided based on the impact. + +## Deprecate plugins + +Add a comment to the plugin's sample config, include the deprecation version +and any replacement. + +```toml +[[inputs.logparser]] + ## DEPRECATED: The 'logparser' plugin is deprecated in 1.10. Please use the + ## 'tail' plugin with the grok data_format as a replacement. +``` + +Add the deprecation warning to the plugin's README: + +```markdown +# Logparser Input Plugin + +### **Deprecated in 1.10**: Please use the [tail][] plugin along with the +`grok` [data format][]. + +[tail]: /plugins/inputs/tail/README.md +[data formats]: /docs/DATA_FORMATS_INPUT.md +``` + +Log a warning message if the plugin is used. If the plugin is a +ServiceInput, place this in the `Start()` function, for regular Input's log it only the first +time the `Gather` function is called. +```go +log.Println("W! [inputs.logparser] The logparser plugin is deprecated in 1.10. " + + "Please use the tail plugin with the grok data_format as a replacement.") +``` +## Deprecate options + +Mark the option as deprecated in the sample config, include the deprecation +version and any replacement. +```toml + ## Broker URL + ## deprecated in 1.7; use the brokers option + # url = "amqp://localhost:5672/influxdb" +``` + +In the plugins configuration struct, mention that the option is deprecated: + +```go +type AMQPConsumer struct { + URL string `toml:"url"` // deprecated in 1.7; use brokers +} +``` + +Finally, use the plugin's `Init() error` method to display a log message at warn level. The message should include the offending configuration option and any suggested replacement: +```go +func (a *AMQPConsumer) Init() error { + if p.URL != "" { + p.Log.Warnf("Use of deprecated configuration: 'url'; please use the 'brokers' option") + } + return nil +} +``` + +## Deprecate metrics + +In the README document the metric as deprecated. If there is a replacement field, +tag, or measurement then mention it. + +```markdown +- system + - fields: + - uptime_format (string, deprecated in 1.10: use `uptime` field) +``` + +Add filtering to the sample config, leave it commented out. + +```toml +[[inputs.system]] + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] +``` diff --git a/docs/developers/LOGGING.md b/docs/developers/LOGGING.md new file mode 100644 index 0000000000000..60de15699a6e8 --- /dev/null +++ b/docs/developers/LOGGING.md @@ -0,0 +1,75 @@ +# Logging + +## Plugin Logging + +You can access the Logger for a plugin by defining a field named `Log`. This +`Logger` is configured internally with the plugin name and alias so they do not +need to be specified for each log call. + +```go +type MyPlugin struct { + Log telegraf.Logger `toml:"-"` +} +``` + +You can then use this Logger in the plugin. Use the method corresponding to +the log level of the message. +```go +p.Log.Errorf("Unable to write to file: %v", err) +``` + +## Agent Logging + +In other sections of the code it is required to add the log level and module +manually: +```go +log.Printf("E! [agent] Error writing to %s: %v", output.LogName(), err) +``` + +## When to Log + +Log a message if an error occurs but the plugin can continue working. For +example if the plugin handles several servers and only one of them has a fatal +error, it can be logged as an error. + +Use logging judiciously for debug purposes. Since Telegraf does not currently +support setting the log level on a per module basis, it is especially important +to not over do it with debug logging. + +If the plugin is listening on a socket, log a message with the address of the socket: +```go +p.log.InfoF("Listening on %s://%s", protocol, l.Addr()) +``` + +## When not to Log + +Don't use logging to emit performance data or other meta data about the plugin, +instead use the `internal` plugin and the `selfstats` package. + +Don't log fatal errors in the plugin that require the plugin to return, instead +return them from the function and Telegraf will handle the logging. + +Don't log for static configuration errors, check for them in a plugin `Init()` +function and return an error there. + +Don't log a warning every time a plugin is called for situations that are +normal on some systems. + +## Log Level + +The log level is indicated by a single character at the start of the log +message. Adding this prefix is not required when using the Plugin Logger. +- `D!` Debug +- `I!` Info +- `W!` Warning +- `E!` Error + +## Style + +Log messages should be capitalized and be a single line. + +If it includes data received from another system or process, such as the text +of an error message, the text should be quoted with `%q`. + +Use the `%v` format for the Go error type instead of `%s` to ensure a nil error +is printed. diff --git a/docs/developers/METRIC_FORMAT_CHANGES.md b/docs/developers/METRIC_FORMAT_CHANGES.md new file mode 100644 index 0000000000000..32bfe0a2db5a7 --- /dev/null +++ b/docs/developers/METRIC_FORMAT_CHANGES.md @@ -0,0 +1,42 @@ +# Metric Format Changes + +When making changes to an existing input plugin, care must be taken not to change the metric format in ways that will cause trouble for existing users. This document helps developers understand how to make metric format changes safely. + +## Changes can cause incompatibilities +If the metric format changes, data collected in the new format can be incompatible with data in the old format. Database queries designed around the old format may not work with the new format. This can cause application failures. + +Some metric format changes don't cause incompatibilities. Also, some unsafe changes are necessary. How do you know what changes are safe and what to do if your change isn't safe? + +## Guidelines +The main guideline is just to keep compatibility in mind when making changes. Often developers are focused on making a change that fixes their particular problem and they forget that many people use the existing code and will upgrade. When you're coding, keep existing users and applications in mind. + +### Renaming, removing, reusing +Database queries refer to the metric and its tags and fields by name. Any Telegraf code change that changes those names has the potential to break an existing query. Similarly, removing tags or fields can break queries. + +Changing the meaning of an existing tag value or field value or reusing an existing one in a new way isn't safe. Although queries that use these tags/field may not break, they will not work as they did before the change. + +Adding a field doesn't break existing queries. Queries that select all fields and/or tags (like "select * from") will return an extra series but this is often useful. + +### Performance and storage +Time series databases can store large amounts of data but many of them don't perform well on high cardinality data. If a metric format change includes a new tag that holds high cardinality data, database performance could be reduced enough to cause existing applications not to work as they previously did. Metric format changes that dramatically increase the number of tags or fields of a metric can increase database storage requirements unexpectedly. Both of these types of changes are unsafe. + +### Make unsafe changes opt-in +If your change has the potential to seriously affect existing users, the change must be opt-in. To do this, add a plugin configuration setting that lets the user select the metric format. Make the setting's default value select the old metric format. When new users add the plugin they can choose the new format and get its benefits. When existing users upgrade, their config files won't have the new setting so the default will ensure that there is no change. + +When adding a setting, avoid using a boolean and consider instead a string or int for future flexibility. A boolean can only handle two formats but a string can handle many. For example, compare use_new_format=true and features=["enable_foo_fields"]; the latter is much easier to extend and still very descriptive. + +If you want to encourage existing users to use the new format you can log a warning once on startup when the old format is selected. The warning should tell users in a gentle way that they can upgrade to a better metric format. If it doesn't make sense to maintain multiple metric formats forever, you can change the default on a major release or even remove the old format completely. See [[Deprecation]] for details. + +### Utility +Changes should be useful to many or most users. A change that is only useful for a small number of users may not accepted, even if it's off by default. + +## Summary table + +| | delete | rename | add | +| ------- | ------ | ------ | --- | +| metric | unsafe | unsafe | safe | +| tag | unsafe | unsafe | be careful with cardinality | +| field | unsafe | unsafe | ok as long as it's useful for existing users and is worth the added space | + +## References +InfluxDB Documentation: "Schema and data layout" diff --git a/docs/developers/PACKAGING.md b/docs/developers/PACKAGING.md new file mode 100644 index 0000000000000..836fd01973d3f --- /dev/null +++ b/docs/developers/PACKAGING.md @@ -0,0 +1,44 @@ +# Packaging + +## Package using Docker + +This packaging method uses the CI images, and is very similar to how the +official packages are created on release. This is the recommended method for +building the rpm/deb as it is less system dependent. + +Pull the CI images from quay, the version corresponds to the version of Go +that is used to build the binary: +``` +docker pull quay.io/influxdb/telegraf-ci:1.9.7 +``` + +Start a shell in the container: +``` +docker run -ti quay.io/influxdb/telegraf-ci:1.9.7 /bin/bash +``` + +From within the container: +``` +go get -d github.com/influxdata/telegraf +cd /go/src/github.com/influxdata/telegraf + +# Use tag of Telegraf version you would like to build +git checkout release-1.10 +git reset --hard 1.10.2 +make deps + +# This builds _all_ platforms and architectures; will take a long time +./scripts/build.py --release --package +``` + +If you would like to only build a subset of the packages run this: + +``` +# Use the platform and arch arguments to skip unwanted packages: +./scripts/build.py --release --package --platform=linux --arch=amd64 +``` + +From the host system, copy the build artifacts out of the container: +``` +docker cp romantic_ptolemy:/go/src/github.com/influxdata/telegraf/build/telegraf-1.10.2-1.x86_64.rpm . +``` diff --git a/docs/developers/PROFILING.md b/docs/developers/PROFILING.md new file mode 100644 index 0000000000000..81cdf1980304d --- /dev/null +++ b/docs/developers/PROFILING.md @@ -0,0 +1,55 @@ +# Profiling +This article describes how to collect performance traces and memory profiles +from Telegraf. If you are submitting this for an issue, please include the +version.txt generated below. + +Use the `--pprof-addr` option to enable the profiler, the easiest way to do +this may be to add this line to `/etc/default/telegraf`: +``` +TELEGRAF_OPTS="--pprof-addr localhost:6060" +``` + +Restart Telegraf to activate the profile address. + +#### Trace Profile +Collect a trace during the time where the performance issue is occurring. This +example collects a 10 second trace and runs for 10 seconds: +``` +curl 'http://localhost:6060/debug/pprof/trace?seconds=10' > trace.bin +telegraf --version > version.txt +go env GOOS GOARCH >> version.txt +``` + +The `trace.bin` and `version.txt` files can be sent in for analysis or, if desired, you can +analyze the trace with: +``` +go tool trace trace.bin +``` + +#### Memory Profile +Collect a heap memory profile: +``` +curl 'http://localhost:6060/debug/pprof/heap' > mem.prof +telegraf --version > version.txt +go env GOOS GOARCH >> version.txt +``` + +Analyze: +``` +$ go tool pprof mem.prof +(pprof) top5 +``` + +#### CPU Profile +Collect a 30s CPU profile: +``` +curl 'http://localhost:6060/debug/pprof/profile' > cpu.prof +telegraf --version > version.txt +go env GOOS GOARCH >> version.txt +``` + +Analyze: +``` +go tool pprof cpu.prof +(pprof) top5 +``` diff --git a/docs/developers/REVIEWS.md b/docs/developers/REVIEWS.md new file mode 100644 index 0000000000000..d97fe16cdc772 --- /dev/null +++ b/docs/developers/REVIEWS.md @@ -0,0 +1,147 @@ +# Reviews + +Expect several rounds of back and forth on reviews, non-trivial changes are +rarely accepted on the first pass. + +While review cannot be exhaustively documented, there are several things that +should always be double checked. + +All pull requests should follow the style and best practices in the +[CONTRIBUTING.md](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md) +document. + +## Reviewing Plugin Code + +- Avoid variables scoped to the package. Everything should be scoped to the plugin struct, since multiple instances of the same plugin are allowed and package-level variables will cause race conditions. +- SampleConfig must match the readme, but not include the plugin name. +- structs should include toml tags for fields that are expected to be editable from the config. eg `toml:"command"` (snake_case) +- plugins that want to log should declare the Telegraf logger, not use the log package. eg: +```Go + Log telegraf.Logger `toml:"-"` +``` +(in tests, you can do `myPlugin.Log = testutil.Logger{}`) +- Initialization and config checking should be done on the `Init() error` function, not in the Connect, Gather, or Start functions. +- plugins should avoid synchronization code if they are not starting goroutines. Plugin functions are never called in parallel. +- avoid goroutines when you don't need them and removing them would simplify the code +- errors should almost always be checked. +- avoid boolean fields when a string or enumerated type would be better for future extension. Lots of boolean fields also make the code difficult to maintain. +- use config.Duration instead of internal.Duration +- compose tls.ClientConfig as opposed to specifying all the TLS fields manually +- http.Client should be declared once on `Init() error` and reused, (or better yet, on the package if there's no client-specific configuration). http.Client has built-in concurrency protection and reuses connections transparently when possible. +- avoid doing network calls in loops where possible, as this has a large performance cost. This isn't always possible to avoid. +- when processing batches of records with multiple network requests (some outputs that need to partition writes do this), return an error when you want the whole batch to be retried, log the error when you want the batch to continue without the record +- consider using the StreamingProcessor interface instead of the (legacy) Processor interface +- avoid network calls in processors when at all possible. If it's necessary, it's possible, but complicated (see processor.reversedns). +- avoid dependencies when: + - they require cgo + - they pull in massive projects instead of small libraries + - they could be replaced by a simple http call + - they seem unnecessary, superfluous, or gratuitous +- consider adding build tags if plugins have OS-specific considerations +- use the right logger log levels so that Telegraf is normally quiet eg `plugin.Log.Debugf()` only shows up when running Telegraf with `--debug` +- consistent field types: dynamically setting the type of a field should be strongly avoided as it causes problems that are difficult to solve later, made worse by having to worry about backwards compatibility in future changes. For example, if an numeric value comes from a string field and it is not clear if the field can sometimes be a float, the author should pick either a float or an int, and parse that field consistently every time. Better to sometimes truncate a float, or to always store ints as floats, rather than changing the field type, which causes downstream problems with output databases. +- backwards compatibility: We work hard not to break existing configurations during new changes. Upgrading Telegraf should be a seamless transition. Possible tools to make this transition smooth are: + - enumerable type fields that allow you to customize behavior (avoid boolean feature flags) + - version fields that can be used to opt in to newer changed behavior without breaking old (see inputs.mysql for example) + - a new version of the plugin if it has changed significantly (eg outputs.influxdb and outputs.influxdb_v2) + - Logger and README deprecation warnings + - changing the default value of a field can be okay, but will affect users who have not specified the field and should be approached cautiously. + - The general rule here is "don't surprise me": users should not be caught off-guard by unexpected or breaking changes. + + +## Testing + +Sufficient unit tests must be created. New plugins must always contain +some unit tests. Bug fixes and enhancements should include new tests, but +they can be allowed if the reviewer thinks it would not be worth the effort. + +[Table Driven Tests](https://github.com/golang/go/wiki/TableDrivenTests) are +encouraged to reduce boiler plate in unit tests. + +The [stretchr/testify](https://github.com/stretchr/testify) library should be +used for assertions within the tests when possible, with preference towards +github.com/stretchr/testify/require. + +Primarily use the require package to avoid cascading errors: +```go +assert.Equal(t, lhs, rhs) # avoid +require.Equal(t, lhs, rhs) # good +``` + +## Configuration + +The config file is the primary interface and should be carefully scrutinized. + +Ensure the [[SampleConfig]] and +[README](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md) +match with the current standards. + +READMEs should: +- be spaces, not tabs +- be indented consistently, matching other READMEs +- have two `#` for comments +- have one `#` for defaults, which should always match the default value of the plugin +- include all appropriate types as a list for enumerable field types +- include a useful example, avoiding "example", "test", etc. +- include tips for any common problems +- include example output from the plugin, if input/processor/aggregator/parser/serializer + +## Metric Schema + +Telegraf metrics are heavily based on InfluxDB points, but have some +extensions to support other outputs and metadata. + +New metrics must follow the recommended +[schema design](https://docs.influxdata.com/influxdb/latest/concepts/schema_and_data_layout/). +Each metric should be evaluated for _series cardinality_, proper use of tags vs +fields, and should use existing patterns for encoding metrics. + +Metrics use `snake_case` naming style. + +### Enumerations + +Generally enumeration data should be encoded as a tag. In some cases it may +be desirable to also include the data as an integer field: +``` +net_response,result=success result_code=0i +``` + +### Histograms + +Use tags for each range with the `le` tag, and `+Inf` for the values out of +range. This format is inspired by the Prometheus project: +``` +cpu,le=0.0 usage_idle_bucket=0i 1486998330000000000 +cpu,le=50.0 usage_idle_bucket=2i 1486998330000000000 +cpu,le=100.0 usage_idle_bucket=2i 1486998330000000000 +cpu,le=+Inf usage_idle_bucket=2i 1486998330000000000 +``` + +### Lists + +Lists are tricky, but the general technique is to encode using a tag, creating +one series be item in the list. + +### Counters + +Counters retrieved from other projects often are in one of two styles, +monotonically increasing without reset and reset on each interval. No attempt +should be made to switch between these two styles but if given the option it +is preferred to use the non-reseting variant. This style is more resilient in +the face of downtime and does not contain a fixed time element. + +## Go Best Practices + +In general code should follow best practice describe in [Code Review +Comments](https://github.com/golang/go/wiki/CodeReviewComments). + +### Networking + +All network operations should have appropriate timeouts. The ability to +cancel the option, preferably using a context, is desirable but not always +worth the implementation complexity. + +### Channels + +Channels should be used in judiciously as they often complicate the design and +can easily be used improperly. Only use them when they are needed. diff --git a/docs/developers/SAMPLE_CONFIG.md b/docs/developers/SAMPLE_CONFIG.md new file mode 100644 index 0000000000000..f6202145d27b6 --- /dev/null +++ b/docs/developers/SAMPLE_CONFIG.md @@ -0,0 +1,76 @@ +# Sample Configuration + +The sample config file is generated from a results of the `SampleConfig()` and +`Description()` functions of the plugins. + +You can generate a full sample +config: +``` +telegraf config +``` + +You can also generate the config for a particular plugin using the `-usage` +option: +``` +telegraf --usage influxdb +``` + +## Style + +In the config file we use 2-space indention. Since the config is +[TOML](https://github.com/toml-lang/toml) the indention has no meaning. + +Documentation is double commented, full sentences, and ends with a period. +```toml + ## This text describes what an the exchange_type option does. + # exchange_type = "topic" +``` + +Try to give every parameter a default value whenever possible. If an +parameter does not have a default or must frequently be changed then have it +uncommented. +```toml + ## Brokers are the AMQP brokers to connect to. + brokers = ["amqp://localhost:5672"] +``` + + +Options where the default value is usually sufficient are normally commented +out. The commented out value is the default. +```toml + ## What an exchange type is. + # exchange_type = "topic" +``` + +If you want to show an example of a possible setting filled out that is +different from the default, show both: +```toml + ## Static routing key. Used when no routing_tag is set or as a fallback + ## when the tag specified in routing tag is not found. + ## example: routing_key = "telegraf" + # routing_key = "" +``` + +Unless parameters are closely related, add a space between them. Usually +parameters is closely related have a single description. +```toml + ## If true, queue will be declared as an exclusive queue. + # queue_exclusive = false + + ## If true, queue will be declared as an auto deleted queue. + # queue_auto_delete = false + + ## Authentication credentials for the PLAIN auth_method. + # username = "" + # password = "" +``` + +An parameters should usually be describable in a few sentences. If it takes +much more than this, try to provide a shorter explanation and provide a more +complex description in the Configuration section of the plugins +[README](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md) + +Boolean parameters should be used judiciously. You should try to think of +something better since they don't scale well, things are often not truly +boolean, and frequently end up with implicit dependencies: this option does +something if this and this are also set. diff --git a/docs/maintainers/CHANGELOG.md b/docs/maintainers/CHANGELOG.md new file mode 100644 index 0000000000000..8935ad70ca74e --- /dev/null +++ b/docs/maintainers/CHANGELOG.md @@ -0,0 +1,43 @@ +# Changelog + +The changelog contains the list of changes by version in addition to release +notes. The file is updated immediately after adding a change that impacts +users. Changes that don't effect the functionality of Telegraf, such as +refactoring code, are not included. + +The changelog entries are added by a maintainer after merging a pull request. +We experimented with requiring the pull request contributor to add the entry, +which had a nice side-effect of reducing the number of changelog only commits +in the log history, however this had several drawbacks: + +- The entry often needed reworded. +- Entries frequently caused merge conflicts. +- Required contributor to know which version a change was accepted into. +- Merge conflicts made it more time consuming to backport changes. + +Changes are added only to the first version a change is added in. For +example, a change backported to 1.7.2 would only appear under 1.7.2 and not in +1.8.0. This may become confusing if we begin supporting more than one +previous version but works well for now. + +## Updating + +If the change resulted in deprecation, mention the deprecation in the Release +Notes section of the version. In general all changes that require or +recommend the user to perform an action when upgrading should be mentioned in +the release notes. + +If a new plugin has been added, include it in a section based on the type of +the plugin. + +All user facing changes, including those already mentioned in the release +notes or new plugin sections, should be added to either the Features or +Bugfixes section. + +Features should generally link to the pull request since this describes the +actual implementation. Bug fixes should link to the issue instead of the pull +request since this describes the problem, if a bug has been fixed but does not +have an issue then it is okay to link to the pull request. + +It is usually okay to just use the shortlog commit message, but if needed +it can differ or be further clarified in the changelog. diff --git a/docs/maintainers/LABELS.md b/docs/maintainers/LABELS.md new file mode 100644 index 0000000000000..72840394a94bb --- /dev/null +++ b/docs/maintainers/LABELS.md @@ -0,0 +1,34 @@ +# Labels + +This page describes the meaning of the various +[labels](https://github.com/influxdata/telegraf/labels) we use on the Github +issue tracker. + +## Categories + +New issues are usually labeled one of `feature request`, `bug`, or `question`. +If you are unsure what label to apply you can use the `need more info` label +and if there is another issue you can add the duplicate label and close the +new issue. + +New pull requests are usually labeled one of `enhancement`, `bugfix` or `new +plugin`. + +## Additional Labels + +Apply any of the `area/*` labels that match. If an area doesn't exist, new +ones can be added but it is not a goal to have an area for all issues. + +If the issue only applies to one platform, you can use a `platform/*` label. +These are only applied to single platform issues which are not on Linux. + +The `breaking change` label can be added to issues and pull requests that +would result in a breaking change. + +Apply `performance` to issues and pull requests that address performance +issues. + +For bugs you may want to add `panic`, `regression`, or `upstream` to provide +further detail. + +Labels starting with `pm` or `vert` are not applied by maintainers. diff --git a/docs/maintainers/PULL_REQUESTS.md b/docs/maintainers/PULL_REQUESTS.md new file mode 100644 index 0000000000000..c41e4dd138788 --- /dev/null +++ b/docs/maintainers/PULL_REQUESTS.md @@ -0,0 +1,67 @@ +# Pull Requests + +## Before Review + +Ensure that the CLA is signed. The only exemption would be non-copyrightable +changes such as fixing a typo. + +Check that all tests are passing. Due to intermittent errors in the CI tests +it may be required to check the cause of test failures and restart failed +tests and/or create new issues to fix intermittent test failures. + +Ensure that PR is opened against the master branch as all changes are merged +to master initially. It is possible to change the branch a pull request is +opened against but it often results in many conflicts, change it before +reviewing and then if needed ask the contributor to rebase. + +Ensure there are no merge conflicts. If there are conflicts, ask the +contributor to merge or rebase. + +## Review + +[Review the pull request](Review). + +## Merge + +Determine what release the change will be applied to. New features should +be added only to master, and will be released in the next minor version (1.x). +Bug fixes can be backported to the current release branch to go out with the +next patch release (1.7.x) unless the bug is too risky to backport or there is +an easy workaround. Set the correct milestone on the pull request and any +associated issue. + +All pull requests are merged using the "Squash and Merge" strategy on Github. +This method is used because many pull requests do not have a clean change +history and this method allows us to normalize commit messages as well as +simplifies backporting. + +After selecting "Squash and Merge" you may need to rewrite the commit message. +Usually the body of the commit messages should be cleared as well, unless it +is well written and applies to the entire changeset. Use imperative present +tense for the first line of the message: instead of "I added tests for" or +"Adding tests for," use "Add tests for.". The default merge commit messages +include the PR number at the end of the commit message, keep this in the final +message. If applicable mention the plugin in the message. + +**Example Enhancement:** + +> Add user tag to procstat input (#4386) + +**Example Bug Fix:** + +> Fix output format of printer processor (#4417) + +## After Merge + +[Update the Changelog](Changelog). + +If required, backport the patch and the changelog update to the current +release branch. Usually this can be done by cherry picking the commits: +``` +git cherry-pick -x aaaaaaaa bbbbbbbb +``` + +Backporting changes to the changelog often pulls in unwanted changes. After +cherry picking commits, double check that the only the expected lines are +modified and if needed clean up the changelog and amend the change. Push the +new master and release branch to Github. diff --git a/docs/maintainers/RELEASES.md b/docs/maintainers/RELEASES.md new file mode 100644 index 0000000000000..3c05cdf968715 --- /dev/null +++ b/docs/maintainers/RELEASES.md @@ -0,0 +1,97 @@ +# Releases + +## Release Branch + +On master, update `etc/telegraf.conf` and commit: +```sh +./telegraf config > etc/telegraf.conf +``` + +Create the new release branch: +```sh +git checkout -b release-1.15 +``` + +Push the changes: +```sh +git push origin release-1.15 master +``` + +Update next version strings on master: +```sh +git checkout master +echo 1.16.0 > build_version.txt +``` + +## Release Candidate + +Release candidates are created only for new minor releases (ex: 1.15.0). Tags +are created but some of the other tasks, such as adding a changelog entry are +skipped. Packages are added to the github release page and posted to +community but are not posted to package repos or docker hub. +```sh +git checkout release-1.15 +git commit --allow-empty -m "Telegraf 1.15.0-rc1" +git tag -s v1.15.0-rc1 -m "Telegraf 1.15.0-rc1" +git push origin release-1.15 v1.15.0-rc1 +``` + +## Release + +On master, set the release date in the changelog and cherry-pick the change +back: +```sh +git checkout master +vi CHANGELOG.md +git commit -m "Set 1.8.0 release date" +git checkout release-1.8 +git cherry-pick -x +``` + +Double check that the changelog was applied as desired, or fix it up and +amend the change before pushing. + +Tag the release: +```sh +git checkout release-1.8 +# This just improves the `git show 1.8.0` output +git commit --allow-empty -m "Telegraf 1.8.0" +git tag -s v1.8.0 -m "Telegraf 1.8.0" +``` + +Check that the version was set correctly, the tag can always be altered if a +mistake is made but only before you push it to Github: +```sh +make +./telegraf --version +Telegraf v1.8.0 (git: release-1.8 aaaaaaaa) +``` + +When you push a branch with a tag to Github, CircleCI will be triggered to +build the packages. +```sh +git push origin master release-1.8 v1.8.0 +``` + +Set the release notes on Github. + +Update webpage download links. + +Update apt and yum repositories hosted at repos.influxdata.com. + +Update the package signatures on S3, these are used primarily by the docker images. + +Update docker image [influxdata/influxdata-docker](https://github.com/influxdata/influxdata-docker): +```sh +cd influxdata-docker +git co master +git pull +git co -b telegraf-1.8.0 +telegraf/1.8/Dockerfile +telegraf/1.8/alpine/Dockerfile +git commit -am "telegraf 1.8.0" +``` + +Official company post to RSS/community. + +Update documentation on docs.influxdata.com From 8bb388584d333d78ea0181b23cf6045f62237dc1 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Fri, 23 Apr 2021 09:37:27 -0400 Subject: [PATCH 389/761] Add OAuth2 to HTTP input (#9138) * add oauth2 to http input * linter fixes * add http config to common plugin * address linter changes * Update README.md * add log for user if fields are missing * add correct logger * alter output plugin as well * fix formatting * add oauth2 separate package * fix package naming * remove unnecessary logger --- plugins/common/http/config.go | 54 ++++++++++++++++++++ plugins/common/oauth/config.go | 32 ++++++++++++ plugins/inputs/http/README.md | 6 +++ plugins/inputs/http/http.go | 39 +++++---------- plugins/inputs/http/http_test.go | 82 +++++++++++++++++++++++++++++++ plugins/outputs/http/http.go | 51 ++----------------- plugins/outputs/http/http_test.go | 16 ++++-- 7 files changed, 203 insertions(+), 77 deletions(-) create mode 100644 plugins/common/http/config.go create mode 100644 plugins/common/oauth/config.go diff --git a/plugins/common/http/config.go b/plugins/common/http/config.go new file mode 100644 index 0000000000000..b61a346be7868 --- /dev/null +++ b/plugins/common/http/config.go @@ -0,0 +1,54 @@ +package httpconfig + +import ( + "context" + "net/http" + "time" + + "github.com/influxdata/telegraf/config" + oauthConfig "github.com/influxdata/telegraf/plugins/common/oauth" + "github.com/influxdata/telegraf/plugins/common/proxy" + "github.com/influxdata/telegraf/plugins/common/tls" +) + +// Common HTTP client struct. +type HTTPClientConfig struct { + Timeout config.Duration `toml:"timeout"` + IdleConnTimeout config.Duration `toml:"idle_conn_timeout"` + + proxy.HTTPProxy + tls.ClientConfig + oauthConfig.OAuth2Config +} + +func (h *HTTPClientConfig) CreateClient(ctx context.Context) (*http.Client, error) { + tlsCfg, err := h.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + prox, err := h.HTTPProxy.Proxy() + if err != nil { + return nil, err + } + + transport := &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: prox, + IdleConnTimeout: time.Duration(h.IdleConnTimeout), + } + + timeout := h.Timeout + if timeout == 0 { + timeout = config.Duration(time.Second * 5) + } + + client := &http.Client{ + Transport: transport, + Timeout: time.Duration(timeout), + } + + client = h.OAuth2Config.CreateOauth2Client(ctx, client) + + return client, nil +} diff --git a/plugins/common/oauth/config.go b/plugins/common/oauth/config.go new file mode 100644 index 0000000000000..aa42a7a65569a --- /dev/null +++ b/plugins/common/oauth/config.go @@ -0,0 +1,32 @@ +package oauth + +import ( + "context" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +type OAuth2Config struct { + // OAuth2 Credentials + ClientID string `toml:"client_id"` + ClientSecret string `toml:"client_secret"` + TokenURL string `toml:"token_url"` + Scopes []string `toml:"scopes"` +} + +func (o *OAuth2Config) CreateOauth2Client(ctx context.Context, client *http.Client) *http.Client { + if o.ClientID != "" && o.ClientSecret != "" && o.TokenURL != "" { + oauthConfig := clientcredentials.Config{ + ClientID: o.ClientID, + ClientSecret: o.ClientSecret, + TokenURL: o.TokenURL, + Scopes: o.Scopes, + } + ctx = context.WithValue(ctx, oauth2.HTTPClient, client) + client = oauthConfig.Client(ctx) + } + + return client +} diff --git a/plugins/inputs/http/README.md b/plugins/inputs/http/README.md index a9c554cadbfb4..4b799043b5edc 100644 --- a/plugins/inputs/http/README.md +++ b/plugins/inputs/http/README.md @@ -34,6 +34,12 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The # username = "username" # password = "pa$$word" + ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. + # client_id = "clientid" + # client_secret = "secret" + # token_url = "https://indentityprovider/oauth2/v1/token" + # scopes = ["urn:opc:idm:__myscopes__"] + ## HTTP Proxy support # http_proxy_url = "" diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index 3c5b80a8e3f95..a0cffd07d6486 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -1,19 +1,17 @@ package http import ( + "context" "fmt" "io" "io/ioutil" "net/http" "strings" "sync" - "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/common/proxy" - "github.com/influxdata/telegraf/plugins/common/tls" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) @@ -29,18 +27,14 @@ type HTTP struct { // HTTP Basic Auth Credentials Username string `toml:"username"` Password string `toml:"password"` - tls.ClientConfig - - proxy.HTTPProxy // Absolute path to file with Bearer token BearerToken string `toml:"bearer_token"` SuccessStatusCodes []int `toml:"success_status_codes"` - Timeout config.Duration `toml:"timeout"` - client *http.Client + httpconfig.HTTPClientConfig // The parser will automatically be set by Telegraf core code because // this plugin implements the ParserInput interface (i.e. the SetParser method) @@ -77,6 +71,12 @@ var sampleConfig = ` ## HTTP Proxy support # http_proxy_url = "" + ## OAuth2 Client Credentials Grant + # client_id = "clientid" + # client_secret = "secret" + # token_url = "https://indentityprovider/oauth2/v1/token" + # scopes = ["urn:opc:idm:__myscopes__"] + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -108,25 +108,13 @@ func (*HTTP) Description() string { } func (h *HTTP) Init() error { - tlsCfg, err := h.ClientConfig.TLSConfig() - if err != nil { - return err - } - - proxy, err := h.HTTPProxy.Proxy() + ctx := context.Background() + client, err := h.HTTPClientConfig.CreateClient(ctx) if err != nil { return err } - transport := &http.Transport{ - TLSClientConfig: tlsCfg, - Proxy: proxy, - } - - h.client = &http.Client{ - Transport: transport, - Timeout: time.Duration(h.Timeout), - } + h.client = client // Set default as [200] if len(h.SuccessStatusCodes) == 0 { @@ -262,8 +250,7 @@ func makeRequestBodyReader(contentEncoding, body string) (io.ReadCloser, error) func init() { inputs.Add("http", func() telegraf.Input { return &HTTP{ - Timeout: config.Duration(time.Second * 5), - Method: "GET", + Method: "GET", } }) } diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index edd0b2004a0d1..02351effc71b9 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -6,8 +6,11 @@ import ( "io/ioutil" "net/http" "net/http/httptest" + "net/url" "testing" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" + oauth "github.com/influxdata/telegraf/plugins/common/oauth" plugin "github.com/influxdata/telegraf/plugins/inputs/http" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -252,3 +255,82 @@ func TestBodyAndContentEncoding(t *testing.T) { }) } } + +type TestHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) + +func TestOAuthClientCredentialsGrant(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + var token = "2YotnFZFEjr1zCsicMWpAA" + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *plugin.HTTP + tokenHandler TestHandlerFunc + handler TestHandlerFunc + }{ + { + name: "no credentials", + plugin: &plugin.HTTP{ + URLs: []string{u.String()}, + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Len(t, r.Header["Authorization"], 0) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "success", + plugin: &plugin.HTTP{ + URLs: []string{u.String() + "/write"}, + HTTPClientConfig: httpconfig.HTTPClientConfig{ + OAuth2Config: oauth.OAuth2Config{ + ClientID: "howdy", + ClientSecret: "secret", + TokenURL: u.String() + "/token", + Scopes: []string{"urn:opc:idm:__myscopes__"}, + }, + }, + }, + tokenHandler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + values := url.Values{} + values.Add("access_token", token) + values.Add("token_type", "bearer") + values.Add("expires_in", "3600") + _, err := w.Write([]byte(values.Encode())) + require.NoError(t, err) + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, []string{"Bearer " + token}, r.Header["Authorization"]) + w.WriteHeader(http.StatusOK) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + tt.handler(t, w, r) + case "/token": + tt.tokenHandler(t, w, r) + } + }) + + parser, _ := parsers.NewValueParser("metric", "string", "", nil) + tt.plugin.SetParser(parser) + err = tt.plugin.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + err = tt.plugin.Gather(&acc) + require.NoError(t, err) + }) + } +} diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 82ae230eceb0f..5da273f2d40a6 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -11,13 +11,11 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - "golang.org/x/oauth2" - "golang.org/x/oauth2/clientcredentials" ) const ( @@ -80,18 +78,13 @@ const ( type HTTP struct { URL string `toml:"url"` - Timeout config.Duration `toml:"timeout"` Method string `toml:"method"` Username string `toml:"username"` Password string `toml:"password"` Headers map[string]string `toml:"headers"` - ClientID string `toml:"client_id"` - ClientSecret string `toml:"client_secret"` - TokenURL string `toml:"token_url"` - Scopes []string `toml:"scopes"` ContentEncoding string `toml:"content_encoding"` - IdleConnTimeout config.Duration `toml:"idle_conn_timeout"` tls.ClientConfig + httpconfig.HTTPClientConfig client *http.Client serializer serializers.Serializer @@ -101,35 +94,6 @@ func (h *HTTP) SetSerializer(serializer serializers.Serializer) { h.serializer = serializer } -func (h *HTTP) createClient(ctx context.Context) (*http.Client, error) { - tlsCfg, err := h.ClientConfig.TLSConfig() - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsCfg, - Proxy: http.ProxyFromEnvironment, - IdleConnTimeout: time.Duration(h.IdleConnTimeout), - }, - Timeout: time.Duration(h.Timeout), - } - - if h.ClientID != "" && h.ClientSecret != "" && h.TokenURL != "" { - oauthConfig := clientcredentials.Config{ - ClientID: h.ClientID, - ClientSecret: h.ClientSecret, - TokenURL: h.TokenURL, - Scopes: h.Scopes, - } - ctx = context.WithValue(ctx, oauth2.HTTPClient, client) - client = oauthConfig.Client(ctx) - } - - return client, nil -} - func (h *HTTP) Connect() error { if h.Method == "" { h.Method = http.MethodPost @@ -139,12 +103,8 @@ func (h *HTTP) Connect() error { return fmt.Errorf("invalid method [%s] %s", h.URL, h.Method) } - if h.Timeout == 0 { - h.Timeout = config.Duration(defaultClientTimeout) - } - ctx := context.Background() - client, err := h.createClient(ctx) + client, err := h.HTTPClientConfig.CreateClient(ctx) if err != nil { return err } @@ -229,9 +189,8 @@ func (h *HTTP) write(reqBody []byte) error { func init() { outputs.Add("http", func() telegraf.Output { return &HTTP{ - Timeout: config.Duration(defaultClientTimeout), - Method: defaultMethod, - URL: defaultURL, + Method: defaultMethod, + URL: defaultURL, } }) } diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index a09f7dd7ea28f..8089f45f59f2e 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -13,6 +13,8 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" + oauth "github.com/influxdata/telegraf/plugins/common/oauth" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/stretchr/testify/require" ) @@ -379,11 +381,15 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { { name: "success", plugin: &HTTP{ - URL: u.String() + "/write", - ClientID: "howdy", - ClientSecret: "secret", - TokenURL: u.String() + "/token", - Scopes: []string{"urn:opc:idm:__myscopes__"}, + URL: u.String() + "/write", + HTTPClientConfig: httpconfig.HTTPClientConfig{ + OAuth2Config: oauth.OAuth2Config{ + ClientID: "howdy", + ClientSecret: "secret", + TokenURL: u.String() + "/token", + Scopes: []string{"urn:opc:idm:__myscopes__"}, + }, + }, }, tokenHandler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) From 9ff3fe5aa49add9de9ec416a3b1e8951e4953a10 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Mon, 26 Apr 2021 09:18:58 -0400 Subject: [PATCH 390/761] Parallelize PR builds by Architecture (#9172) * add oauth2 to http input * stuff * Updated config.yml * Update README.md * Update http_test.go * Update Makefile * Updated config.yml * Update http_test.go * Update http.go * Update http.go * Update http.go * Update http.go * Updated config.yml --- .circleci/config.yml | 100 +++++++++++++++++++++++++++++++++---------- Makefile | 74 ++++++++++++++++++++------------ 2 files changed, 123 insertions(+), 51 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 690eaa9150989..26758b951a26e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -66,11 +66,11 @@ commands: - when: condition: << parameters.release >> steps: - - run: 'debian=1 centos=1 mac=1 freebsd=1 linux=1 windows=1 make package' + - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 make package' - when: condition: << parameters.nightly >> steps: - - run: 'debian=1 centos=1 mac=1 freebsd=1 linux=1 windows=1 NIGHTLY=1 make package' + - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 NIGHTLY=1 make package' - run: 'make upload-nightly' - unless: condition: << parameters.nightly >> @@ -157,31 +157,61 @@ jobs: steps: - package-build: type: windows - debian-package: + darwin-package: executor: go-1_16 steps: - package-build: - type: debian - centos-package: + type: darwin + i386-package: executor: go-1_16 steps: - package-build: - type: centos - mac-package: + type: i386 + ppc641e-package: executor: go-1_16 steps: - package-build: - type: mac - freebsd-package: + type: ppc641e + s390x-package: executor: go-1_16 steps: - package-build: - type: freebsd - linux-package: + type: s390x + armel-package: executor: go-1_16 steps: - package-build: - type: linux + type: armel + amd64-package: + executor: go-1_16 + steps: + - package-build: + type: amd64 + arm64-package: + executor: go-1_16 + steps: + - package-build: + type: arm64 + mipsel-package: + executor: go-1_16 + steps: + - package-build: + type: mipsel + mips-package: + executor: go-1_16 + steps: + - package-build: + type: mips + static-package: + executor: go-1_16 + steps: + - package-build: + type: static + armhf-package: + executor: go-1_16 + steps: + - package-build: + type: armhf release: executor: go-1_16 @@ -307,29 +337,53 @@ workflows: - 'windows-package': requires: - 'test-go-windows' - - 'debian-package': + - 'darwin-package': + requires: + - 'test-go-mac' + - 'i386-package': requires: - 'test-awaiter' - - 'centos-package': + - 'ppc641e-package': requires: - 'test-awaiter' - - 'mac-package': + - 's390x-package': requires: - - 'test-go-mac' - - 'freebsd-package': + - 'test-awaiter' + - 'armel-package': + requires: + - 'test-awaiter' + - 'amd64-package': + requires: + - 'test-awaiter' + - 'arm64-package': + requires: + - 'test-awaiter' + - 'armhf-package': + requires: + - 'test-awaiter' + - 'static-package': + requires: + - 'test-awaiter' + - 'mipsel-package': requires: - 'test-awaiter' - - 'linux-package': + - 'mips-package': requires: - 'test-awaiter' - 'share-artifacts': requires: - - 'linux-package' - - 'freebsd-package' - - 'mac-package' - - 'centos-package' - - 'debian-package' + - 'i386-package' + - 'ppc641e-package' + - 's390x-package' + - 'armel-package' + - 'amd64-package' + - 'mipsel-package' + - 'mips-package' + - 'darwin-package' - 'windows-package' + - 'static-package' + - 'arm64-package' + - 'armhf-package' filters: branches: ignore: diff --git a/Makefile b/Makefile index 2e3e7e3554e25..f0b92fc35958f 100644 --- a/Makefile +++ b/Makefile @@ -228,53 +228,71 @@ $(buildbin): @mkdir -pv $(dir $@) go build -o $(dir $@) -ldflags "$(LDFLAGS)" ./cmd/telegraf -ifdef debian -debs := telegraf_$(deb_version)_amd64.deb -debs += telegraf_$(deb_version)_arm64.deb -debs += telegraf_$(deb_version)_armel.deb -debs += telegraf_$(deb_version)_armhf.deb -debs += telegraf_$(deb_version)_i386.deb +ifdef mips debs += telegraf_$(deb_version)_mips.deb +tars += telegraf-$(tar_version)_linux_mips.tar.gz +endif + +ifdef mipsel debs += telegraf_$(deb_version)_mipsel.deb -debs += telegraf_$(deb_version)_s390x.deb -debs += telegraf_$(deb_version)_ppc64el.deb +tars += telegraf-$(tar_version)_linux_mipsel.tar.gz endif -ifdef centos +ifdef arm64 +tars += telegraf-$(tar_version)_linux_arm64.tar.gz +debs += telegraf_$(deb_version)_arm64.deb rpms += telegraf-$(rpm_version).aarch64.rpm -rpms += telegraf-$(rpm_version).armel.rpm -rpms += telegraf-$(rpm_version).armv6hl.rpm -rpms += telegraf-$(rpm_version).i386.rpm -rpms += telegraf-$(rpm_version).s390x.rpm -rpms += telegraf-$(rpm_version).ppc64le.rpm -rpms += telegraf-$(rpm_version).x86_64.rpm endif -ifdef mac -tars += telegraf-$(tar_version)_darwin_amd64.tar.gz +ifdef amd64 +tars += telegraf-$(tar_version)_freebsd_amd64.tar.gz +tars += telegraf-$(tar_version)_linux_amd64.tar.gz +debs := telegraf_$(deb_version)_amd64.deb +rpms += telegraf-$(rpm_version).x86_64.rpm endif -ifdef freebsd -tars += telegraf-$(tar_version)_freebsd_amd64.tar.gz -tars += telegraf-$(tar_version)_freebsd_i386.tar.gz +ifdef static +tars += telegraf-$(tar_version)_static_linux_amd64.tar.gz endif -ifdef linux -tars += telegraf-$(tar_version)_linux_amd64.tar.gz -tars += telegraf-$(tar_version)_linux_arm64.tar.gz +ifdef armel tars += telegraf-$(tar_version)_linux_armel.tar.gz +rpms += telegraf-$(rpm_version).armel.rpm +debs += telegraf_$(deb_version)_armel.deb +endif + +ifdef armhf tars += telegraf-$(tar_version)_linux_armhf.tar.gz -tars += telegraf-$(tar_version)_linux_i386.tar.gz -tars += telegraf-$(tar_version)_linux_mips.tar.gz -tars += telegraf-$(tar_version)_linux_mipsel.tar.gz +debs += telegraf_$(deb_version)_armhf.deb +rpms += telegraf-$(rpm_version).armv6hl.rpm +endif + +ifdef s390x tars += telegraf-$(tar_version)_linux_s390x.tar.gz +debs += telegraf_$(deb_version)_s390x.deb +rpms += telegraf-$(rpm_version).s390x.rpm +endif + +ifdef ppc641e tars += telegraf-$(tar_version)_linux_ppc64le.tar.gz -tars += telegraf-$(tar_version)_static_linux_amd64.tar.gz +rpms += telegraf-$(rpm_version).ppc64le.rpm +debs += telegraf_$(deb_version)_ppc64el.deb +endif + +ifdef i386 +tars += telegraf-$(tar_version)_freebsd_i386.tar.gz +debs += telegraf_$(deb_version)_i386.deb +tars += telegraf-$(tar_version)_linux_i386.tar.gz +rpms += telegraf-$(rpm_version).i386.rpm endif ifdef windows -zips += telegraf-$(tar_version)_windows_amd64.zip zips += telegraf-$(tar_version)_windows_i386.zip +zips += telegraf-$(tar_version)_windows_amd64.zip +endif + +ifdef darwin +tars += telegraf-$(tar_version)_darwin_amd64.tar.gz endif dists := $(debs) $(rpms) $(tars) $(zips) From 83e7c3ec6ab5da728b778bf378d69b915aacc08f Mon Sep 17 00:00:00 2001 From: machinly Date: Tue, 27 Apr 2021 03:24:15 +0800 Subject: [PATCH 391/761] add dnsmasq plugin in external plugin (#9187) --- EXTERNAL_PLUGINS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index 1bf0d2f1dd371..aa6fa8a40b4e5 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -16,6 +16,7 @@ Pull requests welcome. - [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts - [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels - [Big Blue Button](https://github.com/SLedunois/bigbluebutton-telegraf-plugin) - Gather meetings information from [Big Blue Button](https://bigbluebutton.org/) server +- [dnsmasq](https://github.com/machinly/dnsmasq-telegraf-plugin) - Gather dnsmasq statistics from dnsmasq ## Outputs - [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. From e058f3641c8da784a5b0036261ea5fab7625572e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Mon, 26 Apr 2021 22:57:05 +0200 Subject: [PATCH 392/761] Linter fixes for plugins/inputs/[ab]* (#9191) --- plugins/inputs/aerospike/README.md | 11 +- plugins/inputs/aerospike/aerospike.go | 80 +++++++------- plugins/inputs/aerospike/aerospike_test.go | 100 ++++++++--------- plugins/inputs/aliyuncms/aliyuncms.go | 13 +-- plugins/inputs/aliyuncms/aliyuncms_test.go | 7 +- plugins/inputs/aliyuncms/discovery.go | 87 ++++++++------- plugins/inputs/amqp_consumer/README.md | 2 +- plugins/inputs/amqp_consumer/amqp_consumer.go | 60 ++++------- plugins/inputs/apache/apache.go | 44 ++++---- plugins/inputs/apcupsd/apcupsd.go | 102 ++++++++++-------- plugins/inputs/apcupsd/apcupsd_test.go | 59 +++++----- plugins/inputs/beat/README.md | 2 +- plugins/inputs/beat/beat.go | 13 ++- plugins/inputs/bind/json_stats.go | 38 ++++--- plugins/inputs/bind/xml_stats_v2.go | 6 +- plugins/inputs/bind/xml_stats_v3.go | 34 +++--- plugins/inputs/burrow/burrow_test.go | 5 +- 17 files changed, 343 insertions(+), 320 deletions(-) diff --git a/plugins/inputs/aerospike/README.md b/plugins/inputs/aerospike/README.md index 66fbbe12ec8f0..59ff6ed702db7 100644 --- a/plugins/inputs/aerospike/README.md +++ b/plugins/inputs/aerospike/README.md @@ -28,18 +28,17 @@ All metrics are attempted to be cast to integers, then booleans, then strings. # tls_key = "/etc/telegraf/key.pem" ## If false, skip chain & host verification # insecure_skip_verify = true - + # Feature Options # Add namespace variable to limit the namespaces executed on # Leave blank to do all # disable_query_namespaces = true # default false # namespaces = ["namespace1", "namespace2"] - # Enable set level telmetry + # Enable set level telemetry # query_sets = true # default: false # Add namespace set combinations to limit sets executed on - # Leave blank to do all - # sets = ["namespace1/set1", "namespace1/set2"] + # Leave blank to do all sets # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] # Histograms @@ -48,12 +47,10 @@ All metrics are attempted to be cast to integers, then booleans, then strings. # by default, aerospike produces a 100 bucket histogram # this is not great for most graphing tools, this will allow - # the ability to squash this to a smaller number of buckets + # the ability to squash this to a smaller number of buckets # To have a balanced histogram, the number of buckets chosen # should divide evenly into 100. # num_histogram_buckets = 100 # default: 10 - - ``` ### Measurements: diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index 38674d89a7595..dd2ff32df975f 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -10,11 +10,11 @@ import ( "sync" "time" + as "github.com/aerospike/aerospike-client-go" + "github.com/influxdata/telegraf" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - - as "github.com/aerospike/aerospike-client-go" ) type Aerospike struct { @@ -65,7 +65,7 @@ var sampleConfig = ` # disable_query_namespaces = true # default false # namespaces = ["namespace1", "namespace2"] - # Enable set level telmetry + # Enable set level telemetry # query_sets = true # default: false # Add namespace set combinations to limit sets executed on # Leave blank to do all sets @@ -77,7 +77,9 @@ var sampleConfig = ` # by default, aerospike produces a 100 bucket histogram # this is not great for most graphing tools, this will allow - # the ability to squash this to a smaller number of buckets + # the ability to squash this to a smaller number of buckets + # To have a balanced histogram, the number of buckets chosen + # should divide evenly into 100. # num_histogram_buckets = 100 # default: 10 ` @@ -119,7 +121,7 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error { } if len(a.Servers) == 0 { - return a.gatherServer("127.0.0.1:3000", acc) + return a.gatherServer(acc, "127.0.0.1:3000") } var wg sync.WaitGroup @@ -127,7 +129,7 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error { for _, server := range a.Servers { go func(serv string) { defer wg.Done() - acc.AddError(a.gatherServer(serv, acc)) + acc.AddError(a.gatherServer(acc, serv)) }(server) } @@ -135,7 +137,7 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error { return nil } -func (a *Aerospike) gatherServer(hostPort string, acc telegraf.Accumulator) error { +func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) error { host, port, err := net.SplitHostPort(hostPort) if err != nil { return err @@ -162,7 +164,7 @@ func (a *Aerospike) gatherServer(hostPort string, acc telegraf.Accumulator) erro if err != nil { return err } - a.parseNodeInfo(stats, hostPort, n.GetName(), acc) + a.parseNodeInfo(acc, stats, hostPort, n.GetName()) namespaces, err := a.getNamespaces(n) if err != nil { @@ -176,18 +178,17 @@ func (a *Aerospike) gatherServer(hostPort string, acc telegraf.Accumulator) erro if err != nil { continue - } else { - a.parseNamespaceInfo(stats, hostPort, namespace, n.GetName(), acc) } + a.parseNamespaceInfo(acc, stats, hostPort, namespace, n.GetName()) if a.EnableTTLHistogram { - err = a.getTTLHistogram(hostPort, namespace, "", n, acc) + err = a.getTTLHistogram(acc, hostPort, namespace, "", n) if err != nil { continue } } if a.EnableObjectSizeLinearHistogram { - err = a.getObjectSizeLinearHistogram(hostPort, namespace, "", n, acc) + err = a.getObjectSizeLinearHistogram(acc, hostPort, namespace, "", n) if err != nil { continue } @@ -200,24 +201,22 @@ func (a *Aerospike) gatherServer(hostPort string, acc telegraf.Accumulator) erro if err == nil { for _, namespaceSet := range namespaceSets { namespace, set := splitNamespaceSet(namespaceSet) - stats, err := a.getSetInfo(namespaceSet, n) if err != nil { continue - } else { - a.parseSetInfo(stats, hostPort, namespaceSet, n.GetName(), acc) } + a.parseSetInfo(acc, stats, hostPort, namespaceSet, n.GetName()) if a.EnableTTLHistogram { - err = a.getTTLHistogram(hostPort, namespace, set, n, acc) + err = a.getTTLHistogram(acc, hostPort, namespace, set, n) if err != nil { continue } } if a.EnableObjectSizeLinearHistogram { - err = a.getObjectSizeLinearHistogram(hostPort, namespace, set, n, acc) + err = a.getObjectSizeLinearHistogram(acc, hostPort, namespace, set, n) if err != nil { continue } @@ -238,7 +237,7 @@ func (a *Aerospike) getNodeInfo(n *as.Node) (map[string]string, error) { return stats, nil } -func (a *Aerospike) parseNodeInfo(stats map[string]string, hostPort string, nodeName string, acc telegraf.Accumulator) { +func (a *Aerospike) parseNodeInfo(acc telegraf.Accumulator, stats map[string]string, hostPort string, nodeName string) { tags := map[string]string{ "aerospike_host": hostPort, "node_name": nodeName, @@ -275,7 +274,7 @@ func (a *Aerospike) getNamespaceInfo(namespace string, n *as.Node) (map[string]s return stats, err } -func (a *Aerospike) parseNamespaceInfo(stats map[string]string, hostPort string, namespace string, nodeName string, acc telegraf.Accumulator) { +func (a *Aerospike) parseNamespaceInfo(acc telegraf.Accumulator, stats map[string]string, hostPort string, namespace string, nodeName string) { nTags := map[string]string{ "aerospike_host": hostPort, "node_name": nodeName, @@ -341,7 +340,7 @@ func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node) (map[string]stri return stats, nil } -func (a *Aerospike) parseSetInfo(stats map[string]string, hostPort string, namespaceSet string, nodeName string, acc telegraf.Accumulator) { +func (a *Aerospike) parseSetInfo(acc telegraf.Accumulator, stats map[string]string, hostPort string, namespaceSet string, nodeName string) { stat := strings.Split( strings.TrimSuffix( stats[fmt.Sprintf("sets/%s", namespaceSet)], ";"), ":") @@ -363,22 +362,26 @@ func (a *Aerospike) parseSetInfo(stats map[string]string, hostPort string, names acc.AddFields("aerospike_set", nFields, nTags, time.Now()) } -func (a *Aerospike) getTTLHistogram(hostPort string, namespace string, set string, n *as.Node, acc telegraf.Accumulator) error { +func (a *Aerospike) getTTLHistogram(acc telegraf.Accumulator, hostPort string, namespace string, set string, n *as.Node) error { stats, err := a.getHistogram(namespace, set, "ttl", n) if err != nil { return err } - a.parseHistogram(stats, hostPort, namespace, set, "ttl", n.GetName(), acc) + + nTags := createTags(hostPort, n.GetName(), namespace, set) + a.parseHistogram(acc, stats, nTags, "ttl") return nil } -func (a *Aerospike) getObjectSizeLinearHistogram(hostPort string, namespace string, set string, n *as.Node, acc telegraf.Accumulator) error { +func (a *Aerospike) getObjectSizeLinearHistogram(acc telegraf.Accumulator, hostPort string, namespace string, set string, n *as.Node) error { stats, err := a.getHistogram(namespace, set, "object-size-linear", n) if err != nil { return err } - a.parseHistogram(stats, hostPort, namespace, set, "object-size-linear", n.GetName(), acc) + + nTags := createTags(hostPort, n.GetName(), namespace, set) + a.parseHistogram(acc, stats, nTags, "object-size-linear") return nil } @@ -398,17 +401,7 @@ func (a *Aerospike) getHistogram(namespace string, set string, histogramType str return stats, nil } -func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, namespace string, set string, histogramType string, nodeName string, acc telegraf.Accumulator) { - nTags := map[string]string{ - "aerospike_host": hostPort, - "node_name": nodeName, - "namespace": namespace, - } - - if len(set) > 0 { - nTags["set"] = set - } - +func (a *Aerospike) parseHistogram(acc telegraf.Accumulator, stats map[string]string, nTags map[string]string, histogramType string) { nFields := make(map[string]interface{}) for _, stat := range stats { @@ -421,7 +414,7 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam if pieces[0] == "buckets" { buckets := strings.Split(pieces[1], ",") - // Normalize incase of less buckets than expected + // Normalize in case of less buckets than expected numRecordsPerBucket := 1 if len(buckets) > a.NumberHistogramBuckets { numRecordsPerBucket = int(math.Ceil(float64(len(buckets)) / float64(a.NumberHistogramBuckets))) @@ -458,7 +451,7 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam acc.AddFields(fmt.Sprintf("aerospike_histogram_%v", strings.Replace(histogramType, "-", "_", -1)), nFields, nTags, time.Now()) } -func splitNamespaceSet(namespaceSet string) (string, string) { +func splitNamespaceSet(namespaceSet string) (namespace string, set string) { split := strings.Split(namespaceSet, "/") return split[0], split[1] } @@ -478,6 +471,19 @@ func parseAerospikeValue(key string, v string) interface{} { } } +func createTags(hostPort string, nodeName string, namespace string, set string) map[string]string { + nTags := map[string]string{ + "aerospike_host": hostPort, + "node_name": nodeName, + "namespace": namespace, + } + + if len(set) > 0 { + nTags["set"] = set + } + return nTags +} + func init() { inputs.Add("aerospike", func() telegraf.Input { return &Aerospike{} diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go index 57d37a06c5d4d..ab93d4e2a185f 100644 --- a/plugins/inputs/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -4,9 +4,9 @@ import ( "testing" as "github.com/aerospike/aerospike-client-go" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestAerospikeStatisticsIntegration(t *testing.T) { @@ -23,14 +23,14 @@ func TestAerospikeStatisticsIntegration(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.True(t, acc.HasTag("aerospike_node", "node_name")) - assert.True(t, acc.HasMeasurement("aerospike_namespace")) - assert.True(t, acc.HasTag("aerospike_namespace", "node_name")) - assert.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.True(t, acc.HasTag("aerospike_node", "node_name")) + require.True(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasTag("aerospike_namespace", "node_name")) + require.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error")) namespaceName := acc.TagValue("aerospike_namespace", "namespace") - assert.Equal(t, namespaceName, "test") + require.Equal(t, "test", namespaceName) } func TestAerospikeStatisticsPartialErrIntegration(t *testing.T) { @@ -50,14 +50,14 @@ func TestAerospikeStatisticsPartialErrIntegration(t *testing.T) { require.Error(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.True(t, acc.HasMeasurement("aerospike_namespace")) - assert.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.True(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error")) namespaceName := acc.TagSetValue("aerospike_namespace", "namespace") - assert.Equal(t, namespaceName, "test") + require.Equal(t, "test", namespaceName) } -func TestSelectNamepsacesIntegration(t *testing.T) { +func TestSelectNamespacesIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } @@ -73,10 +73,10 @@ func TestSelectNamepsacesIntegration(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.True(t, acc.HasTag("aerospike_node", "node_name")) - assert.True(t, acc.HasMeasurement("aerospike_namespace")) - assert.True(t, acc.HasTag("aerospike_namespace", "node_name")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.True(t, acc.HasTag("aerospike_node", "node_name")) + require.True(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasTag("aerospike_namespace", "node_name")) // Expect only 1 namespace count := 0 @@ -85,10 +85,10 @@ func TestSelectNamepsacesIntegration(t *testing.T) { count++ } } - assert.Equal(t, count, 1) + require.Equal(t, 1, count) // expect namespace to have no fields as nonexistent - assert.False(t, acc.HasInt64Field("aerospke_namespace", "appeals_tx_remaining")) + require.False(t, acc.HasInt64Field("aerospke_namespace", "appeals_tx_remaining")) } func TestDisableQueryNamespacesIntegration(t *testing.T) { @@ -107,15 +107,15 @@ func TestDisableQueryNamespacesIntegration(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.False(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.False(t, acc.HasMeasurement("aerospike_namespace")) a.DisableQueryNamespaces = false err = acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.True(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.True(t, acc.HasMeasurement("aerospike_namespace")) } func TestQuerySetsIntegration(t *testing.T) { @@ -127,6 +127,7 @@ func TestQuerySetsIntegration(t *testing.T) { // test is the default namespace from aerospike policy := as.NewClientPolicy() client, err := as.NewClientWithPolicy(policy, testutil.GetLocalHost(), 3000) + require.NoError(t, err) key, err := as.NewKey("test", "foo", 123) require.NoError(t, err) @@ -158,12 +159,12 @@ func TestQuerySetsIntegration(t *testing.T) { err = acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo")) - assert.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar")) + require.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo")) + require.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar")) - assert.True(t, acc.HasMeasurement("aerospike_set")) - assert.True(t, acc.HasTag("aerospike_set", "set")) - assert.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) + require.True(t, acc.HasMeasurement("aerospike_set")) + require.True(t, acc.HasTag("aerospike_set", "set")) + require.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) } func TestSelectQuerySetsIntegration(t *testing.T) { @@ -175,6 +176,7 @@ func TestSelectQuerySetsIntegration(t *testing.T) { // test is the default namespace from aerospike policy := as.NewClientPolicy() client, err := as.NewClientWithPolicy(policy, testutil.GetLocalHost(), 3000) + require.NoError(t, err) key, err := as.NewKey("test", "foo", 123) require.NoError(t, err) @@ -207,12 +209,12 @@ func TestSelectQuerySetsIntegration(t *testing.T) { err = acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo")) - assert.False(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar")) + require.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo")) + require.False(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar")) - assert.True(t, acc.HasMeasurement("aerospike_set")) - assert.True(t, acc.HasTag("aerospike_set", "set")) - assert.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) + require.True(t, acc.HasMeasurement("aerospike_set")) + require.True(t, acc.HasTag("aerospike_set", "set")) + require.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) } func TestDisableTTLHistogramIntegration(t *testing.T) { @@ -233,7 +235,7 @@ func TestDisableTTLHistogramIntegration(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.False(t, acc.HasMeasurement("aerospike_histogram_ttl")) + require.False(t, acc.HasMeasurement("aerospike_histogram_ttl")) } func TestTTLHistogramIntegration(t *testing.T) { if testing.Short() { @@ -250,7 +252,7 @@ func TestTTLHistogramIntegration(t *testing.T) { } /* Produces histogram - Measurment exists + Measurement exists Has appropriate tags (node name etc) Has appropriate keys (time:value) may be able to leverage histogram plugin @@ -259,8 +261,8 @@ func TestTTLHistogramIntegration(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_histogram_ttl")) - assert.True(t, FindTagValue(&acc, "aerospike_histogram_ttl", "namespace", "test")) + require.True(t, acc.HasMeasurement("aerospike_histogram_ttl")) + require.True(t, FindTagValue(&acc, "aerospike_histogram_ttl", "namespace", "test")) } func TestDisableObjectSizeLinearHistogramIntegration(t *testing.T) { if testing.Short() { @@ -280,7 +282,7 @@ func TestDisableObjectSizeLinearHistogramIntegration(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.False(t, acc.HasMeasurement("aerospike_histogram_object_size_linear")) + require.False(t, acc.HasMeasurement("aerospike_histogram_object_size_linear")) } func TestObjectSizeLinearHistogramIntegration(t *testing.T) { if testing.Short() { @@ -297,7 +299,7 @@ func TestObjectSizeLinearHistogramIntegration(t *testing.T) { } /* Produces histogram - Measurment exists + Measurement exists Has appropriate tags (node name etc) Has appropriate keys (time:value) @@ -305,8 +307,8 @@ func TestObjectSizeLinearHistogramIntegration(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_histogram_object_size_linear")) - assert.True(t, FindTagValue(&acc, "aerospike_histogram_object_size_linear", "namespace", "test")) + require.True(t, acc.HasMeasurement("aerospike_histogram_object_size_linear")) + require.True(t, FindTagValue(&acc, "aerospike_histogram_object_size_linear", "namespace", "test")) } func TestParseNodeInfo(t *testing.T) { @@ -330,7 +332,7 @@ func TestParseNodeInfo(t *testing.T) { "node_name": "TestNodeName", } - a.parseNodeInfo(stats, "127.0.0.1:3000", "TestNodeName", &acc) + a.parseNodeInfo(&acc, stats, "127.0.0.1:3000", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_node", expectedFields, expectedTags) } @@ -356,7 +358,7 @@ func TestParseNamespaceInfo(t *testing.T) { "namespace": "test", } - a.parseNamespaceInfo(stats, "127.0.0.1:3000", "test", "TestNodeName", &acc) + a.parseNamespaceInfo(&acc, stats, "127.0.0.1:3000", "test", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_namespace", expectedFields, expectedTags) } @@ -380,7 +382,7 @@ func TestParseSetInfo(t *testing.T) { "node_name": "TestNodeName", "set": "test/foo", } - a.parseSetInfo(stats, "127.0.0.1:3000", "test/foo", "TestNodeName", &acc) + a.parseSetInfo(&acc, stats, "127.0.0.1:3000", "test/foo", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_set", expectedFields, expectedTags) } @@ -412,7 +414,8 @@ func TestParseHistogramSet(t *testing.T) { "set": "foo", } - a.parseHistogram(stats, "127.0.0.1:3000", "test", "foo", "object-size-linear", "TestNodeName", &acc) + nTags := createTags("127.0.0.1:3000", "TestNodeName", "test", "foo") + a.parseHistogram(&acc, stats, nTags, "object-size-linear") acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags) } func TestParseHistogramNamespace(t *testing.T) { @@ -442,16 +445,17 @@ func TestParseHistogramNamespace(t *testing.T) { "namespace": "test", } - a.parseHistogram(stats, "127.0.0.1:3000", "test", "", "object-size-linear", "TestNodeName", &acc) + nTags := createTags("127.0.0.1:3000", "TestNodeName", "test", "") + a.parseHistogram(&acc, stats, nTags, "object-size-linear") acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags) } func TestAerospikeParseValue(t *testing.T) { // uint64 with value bigger than int64 max val := parseAerospikeValue("", "18446744041841121751") - require.Equal(t, val, uint64(18446744041841121751)) + require.Equal(t, uint64(18446744041841121751), val) val = parseAerospikeValue("", "true") - require.Equal(t, val, true) + require.Equal(t, true, val) // int values val = parseAerospikeValue("", "42") diff --git a/plugins/inputs/aliyuncms/aliyuncms.go b/plugins/inputs/aliyuncms/aliyuncms.go index 3b521579b12de..ac70b9a441597 100644 --- a/plugins/inputs/aliyuncms/aliyuncms.go +++ b/plugins/inputs/aliyuncms/aliyuncms.go @@ -11,13 +11,14 @@ import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" "github.com/aliyun/alibaba-cloud-sdk-go/services/cms" + "github.com/jmespath/go-jmespath" + "github.com/pkg/errors" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/limiter" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/jmespath/go-jmespath" - "github.com/pkg/errors" ) const ( @@ -161,7 +162,7 @@ type ( dtLock sync.Mutex //Guard for discoveryTags & dimensions discoveryTags map[string]map[string]string //Internal data structure that can enrich metrics with tags dimensionsUdObj map[string]string - dimensionsUdArr []map[string]string //Parsed Dimesnsions JSON string (unmarshalled) + dimensionsUdArr []map[string]string //Parsed Dimensions JSON string (unmarshalled) requestDimensions []map[string]string //this is the actual dimensions list that would be used in API request requestDimensionsStr string //String representation of the above @@ -239,7 +240,7 @@ func (s *AliyunCMS) Init() error { //Init discovery... if s.dt == nil { //Support for tests - s.dt, err = NewDiscoveryTool(s.DiscoveryRegions, s.Project, s.Log, credential, int(float32(s.RateLimit)*0.2), time.Duration(s.DiscoveryInterval)) + s.dt, err = newDiscoveryTool(s.DiscoveryRegions, s.Project, s.Log, credential, int(float32(s.RateLimit)*0.2), time.Duration(s.DiscoveryInterval)) if err != nil { s.Log.Errorf("Discovery tool is not activated: %v", err) s.dt = nil @@ -395,8 +396,8 @@ func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, me } //Tag helper -func parseTag(tagSpec string, data interface{}) (string, string, error) { - tagKey := tagSpec +func parseTag(tagSpec string, data interface{}) (tagKey string, tagValue string, err error) { + tagKey = tagSpec queryPath := tagSpec //Split query path to tagKey and query path diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go index b9028c8ba22a0..a2bae5d0d58a2 100644 --- a/plugins/inputs/aliyuncms/aliyuncms_test.go +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -12,12 +12,13 @@ import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" "github.com/aliyun/alibaba-cloud-sdk-go/services/cms" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" ) const inputTitle = "inputs.aliyuncms" @@ -95,7 +96,7 @@ func getDiscoveryTool(project string, discoverRegions []string) (*discoveryTool, return nil, errors.Errorf("failed to retrieve credential: %v", err) } - dt, err := NewDiscoveryTool(discoverRegions, project, testutil.Logger{Name: inputTitle}, credential, 1, time.Minute*2) + dt, err := newDiscoveryTool(discoverRegions, project, testutil.Logger{Name: inputTitle}, credential, 1, time.Minute*2) if err != nil { return nil, errors.Errorf("Can't create discovery tool object: %v", err) diff --git a/plugins/inputs/aliyuncms/discovery.go b/plugins/inputs/aliyuncms/discovery.go index 7e33d7f92c64a..c3f35c78aa3ec 100644 --- a/plugins/inputs/aliyuncms/discovery.go +++ b/plugins/inputs/aliyuncms/discovery.go @@ -2,7 +2,6 @@ package aliyuncms import ( "encoding/json" - "github.com/influxdata/telegraf" "reflect" "regexp" "strconv" @@ -17,8 +16,10 @@ import ( "github.com/aliyun/alibaba-cloud-sdk-go/services/rds" "github.com/aliyun/alibaba-cloud-sdk-go/services/slb" "github.com/aliyun/alibaba-cloud-sdk-go/services/vpc" - "github.com/influxdata/telegraf/internal/limiter" "github.com/pkg/errors" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/limiter" ) // https://www.alibabacloud.com/help/doc-detail/40654.htm?gclid=Cj0KCQjw4dr0BRCxARIsAKUNjWTAMfyVUn_Y3OevFBV3CMaazrhq0URHsgE7c0m0SeMQRKlhlsJGgIEaAviyEALw_wcB @@ -69,6 +70,13 @@ type discoveryTool struct { lg telegraf.Logger //Telegraf logger (should be provided) } +type response struct { + discData []interface{} + totalCount int + pageSize int + pageNumber int +} + //getRPCReqFromDiscoveryRequest - utility function to map between aliyun request primitives //discoveryRequest represents different type of discovery requests func getRPCReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, error) { @@ -97,13 +105,13 @@ func getRPCReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, return nil, errors.Errorf("Didn't find *requests.RpcRequest embedded struct in %q", ptrV.Type()) } -//NewDiscoveryTool function returns discovery tool object. +//newDiscoveryTool function returns discovery tool object. //The object is used to periodically get data about aliyun objects and send this //data into channel. The intention is to enrich reported metrics with discovery data. //Discovery is supported for a limited set of object types (defined by project) and can be extended in future. //Discovery can be limited by region if not set, then all regions is queried. //Request against API can inquire additional costs, consult with aliyun API documentation. -func NewDiscoveryTool(regions []string, project string, lg telegraf.Logger, credential auth.Credential, rateLimit int, discoveryInterval time.Duration) (*discoveryTool, error) { +func newDiscoveryTool(regions []string, project string, lg telegraf.Logger, credential auth.Credential, rateLimit int, discoveryInterval time.Duration) (*discoveryTool, error) { var ( dscReq = map[string]discoveryRequest{} cli = map[string]aliyunSdkClient{} @@ -292,22 +300,22 @@ func NewDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred }, nil } -func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) (discData []interface{}, totalCount int, pageSize int, pageNumber int, err error) { +func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) (discoveryResponse *response, err error) { var ( - fullOutput = map[string]interface{}{} - data []byte - foundDataItem bool - foundRootKey bool + fullOutput = map[string]interface{}{} + foundDataItem, foundRootKey bool + discData []interface{} + totalCount, pageSize, pageNumber int ) - data = resp.GetHttpContentBytes() + data := resp.GetHttpContentBytes() if data == nil { //No data - return nil, 0, 0, 0, errors.Errorf("No data in response to be parsed") + return nil, errors.Errorf("No data in response to be parsed") } err = json.Unmarshal(data, &fullOutput) if err != nil { - return nil, 0, 0, 0, errors.Errorf("Can't parse JSON from discovery response: %v", err) + return nil, errors.Errorf("Can't parse JSON from discovery response: %v", err) } for key, val := range fullOutput { @@ -316,7 +324,7 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) foundRootKey = true rootKeyVal, ok := val.(map[string]interface{}) if !ok { - return nil, 0, 0, 0, errors.Errorf("Content of root key %q, is not an object: %v", key, val) + return nil, errors.Errorf("Content of root key %q, is not an object: %v", key, val) } //It should contain the array with discovered data @@ -326,7 +334,7 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) } } if !foundDataItem { - return nil, 0, 0, 0, errors.Errorf("Didn't find array item in root key %q", key) + return nil, errors.Errorf("Didn't find array item in root key %q", key) } case "TotalCount": totalCount = int(val.(float64)) @@ -337,55 +345,54 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) } } if !foundRootKey { - return nil, 0, 0, 0, errors.Errorf("Didn't find root key %q in discovery response", dt.respRootKey) + return nil, errors.Errorf("Didn't find root key %q in discovery response", dt.respRootKey) } - return + return &response{ + discData: discData, + totalCount: totalCount, + pageSize: pageSize, + pageNumber: pageNumber, + }, nil } -func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.CommonRequest, limiter chan bool) (map[string]interface{}, error) { - var ( - err error - resp *responses.CommonResponse - data []interface{} - discoveryData []interface{} - totalCount int - pageNumber int - ) +func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.CommonRequest, limiterChan chan bool) (map[string]interface{}, error) { + var discoveryData []interface{} + defer delete(req.QueryParams, "PageNumber") for { - if limiter != nil { - <-limiter //Rate limiting + if limiterChan != nil { + <-limiterChan //Rate limiting } - resp, err = cli.ProcessCommonRequest(req) + resp, err := cli.ProcessCommonRequest(req) if err != nil { return nil, err } - data, totalCount, _, pageNumber, err = dt.parseDiscoveryResponse(resp) + discoveryResponse, err := dt.parseDiscoveryResponse(resp) if err != nil { return nil, err } - discoveryData = append(discoveryData, data...) + discoveryData = append(discoveryData, discoveryResponse.discData...) //Pagination - pageNumber++ - req.QueryParams["PageNumber"] = strconv.Itoa(pageNumber) + discoveryResponse.pageNumber++ + req.QueryParams["PageNumber"] = strconv.Itoa(discoveryResponse.pageNumber) - if len(discoveryData) == totalCount { //All data received + if len(discoveryData) == discoveryResponse.totalCount { //All data received //Map data to appropriate shape before return preparedData := map[string]interface{}{} for _, raw := range discoveryData { - if elem, ok := raw.(map[string]interface{}); ok { - if objectID, ok := elem[dt.respObjectIDKey].(string); ok { - preparedData[objectID] = elem - } - } else { + elem, ok := raw.(map[string]interface{}) + if !ok { return nil, errors.Errorf("Can't parse input data element, not a map[string]interface{} type") } + if objectID, ok := elem[dt.respObjectIDKey].(string); ok { + preparedData[objectID] = elem + } } return preparedData, nil @@ -393,7 +400,7 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com } } -func (dt *discoveryTool) getDiscoveryDataAllRegions(limiter chan bool) (map[string]interface{}, error) { +func (dt *discoveryTool) getDiscoveryDataAllRegions(limiterChan chan bool) (map[string]interface{}, error) { var ( data map[string]interface{} resultData = map[string]interface{}{} @@ -424,7 +431,7 @@ func (dt *discoveryTool) getDiscoveryDataAllRegions(limiter chan bool) (map[stri commonRequest.TransToAcsRequest() //Get discovery data using common request - data, err = dt.getDiscoveryData(cli, commonRequest, limiter) + data, err = dt.getDiscoveryData(cli, commonRequest, limiterChan) if err != nil { return nil, err } diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md index 8ef6d6fe2a8e9..ff417eb26b67c 100644 --- a/plugins/inputs/amqp_consumer/README.md +++ b/plugins/inputs/amqp_consumer/README.md @@ -43,7 +43,7 @@ The following defaults are known to work with RabbitMQ: # exchange_arguments = { } # exchange_arguments = {"hash_property" = "timestamp"} - ## AMQP queue name + ## AMQP queue name. queue = "telegraf" ## AMQP queue durability can be "transient" or "durable". diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index 39bfeeaede0b3..abe86bc385515 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -9,12 +9,13 @@ import ( "sync" "time" + "github.com/streadway/amqp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/streadway/amqp" ) const ( @@ -183,7 +184,7 @@ func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error { func (a *AMQPConsumer) createConfig() (*amqp.Config, error) { // make new tls config - tls, err := a.ClientConfig.TLSConfig() + tlsCfg, err := a.ClientConfig.TLSConfig() if err != nil { return nil, err } @@ -201,7 +202,7 @@ func (a *AMQPConsumer) createConfig() (*amqp.Config, error) { } config := amqp.Config{ - TLSClientConfig: tls, + TLSClientConfig: tlsCfg, SASL: auth, // if nil, it will be PLAIN } return &config, nil @@ -292,12 +293,9 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err } if a.Exchange != "" { - var exchangeDurable = true - switch a.ExchangeDurability { - case "transient": + exchangeDurable := true + if a.ExchangeDurability == "transient" { exchangeDurable = false - default: - exchangeDurable = true } exchangeArgs := make(amqp.Table, len(a.ExchangeArguments)) @@ -305,11 +303,8 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err exchangeArgs[k] = v } - err = declareExchange( + err = a.declareExchange( ch, - a.Exchange, - a.ExchangeType, - a.ExchangePassive, exchangeDurable, exchangeArgs) if err != nil { @@ -317,11 +312,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err } } - q, err := declareQueue( - ch, - a.Queue, - a.QueueDurability, - a.QueuePassive) + q, err := a.declareQueue(ch) if err != nil { return nil, err } @@ -364,19 +355,16 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err return msgs, err } -func declareExchange( +func (a *AMQPConsumer) declareExchange( channel *amqp.Channel, - exchangeName string, - exchangeType string, - exchangePassive bool, exchangeDurable bool, exchangeArguments amqp.Table, ) error { var err error - if exchangePassive { + if a.ExchangePassive { err = channel.ExchangeDeclarePassive( - exchangeName, - exchangeType, + a.Exchange, + a.ExchangeType, exchangeDurable, false, // delete when unused false, // internal @@ -385,8 +373,8 @@ func declareExchange( ) } else { err = channel.ExchangeDeclare( - exchangeName, - exchangeType, + a.Exchange, + a.ExchangeType, exchangeDurable, false, // delete when unused false, // internal @@ -400,26 +388,18 @@ func declareExchange( return nil } -func declareQueue( - channel *amqp.Channel, - queueName string, - queueDurability string, - queuePassive bool, -) (*amqp.Queue, error) { +func (a *AMQPConsumer) declareQueue(channel *amqp.Channel) (*amqp.Queue, error) { var queue amqp.Queue var err error - var queueDurable = true - switch queueDurability { - case "transient": + queueDurable := true + if a.QueueDurability == "transient" { queueDurable = false - default: - queueDurable = true } - if queuePassive { + if a.QueuePassive { queue, err = channel.QueueDeclarePassive( - queueName, // queue + a.Queue, // queue queueDurable, // durable false, // delete when unused false, // exclusive @@ -428,7 +408,7 @@ func declareQueue( ) } else { queue, err = channel.QueueDeclare( - queueName, // queue + a.Queue, // queue queueDurable, // durable false, // delete when unused false, // exclusive diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index 429d1cb9e69cc..9b9059ac8d48a 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -158,31 +158,31 @@ func (n *Apache) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { } func (n *Apache) gatherScores(data string) map[string]interface{} { - var waiting, open int = 0, 0 - var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0 + var waiting, open = 0, 0 + var s, r, w, k, d, c, l, g, i = 0, 0, 0, 0, 0, 0, 0, 0, 0 - for _, s := range strings.Split(data, "") { - switch s { + for _, str := range strings.Split(data, "") { + switch str { case "_": waiting++ case "S": - S++ + s++ case "R": - R++ + r++ case "W": - W++ + w++ case "K": - K++ + k++ case "D": - D++ + d++ case "C": - C++ + c++ case "L": - L++ + l++ case "G": - G++ + g++ case "I": - I++ + i++ case ".": open++ } @@ -190,15 +190,15 @@ func (n *Apache) gatherScores(data string) map[string]interface{} { fields := map[string]interface{}{ "scboard_waiting": float64(waiting), - "scboard_starting": float64(S), - "scboard_reading": float64(R), - "scboard_sending": float64(W), - "scboard_keepalive": float64(K), - "scboard_dnslookup": float64(D), - "scboard_closing": float64(C), - "scboard_logging": float64(L), - "scboard_finishing": float64(G), - "scboard_idle_cleanup": float64(I), + "scboard_starting": float64(s), + "scboard_reading": float64(r), + "scboard_sending": float64(w), + "scboard_keepalive": float64(k), + "scboard_dnslookup": float64(d), + "scboard_closing": float64(c), + "scboard_logging": float64(l), + "scboard_finishing": float64(g), + "scboard_idle_cleanup": float64(i), "scboard_open": float64(open), } return fields diff --git a/plugins/inputs/apcupsd/apcupsd.go b/plugins/inputs/apcupsd/apcupsd.go index b41a91b829af2..2cb7522984119 100644 --- a/plugins/inputs/apcupsd/apcupsd.go +++ b/plugins/inputs/apcupsd/apcupsd.go @@ -7,10 +7,11 @@ import ( "strings" "time" + apcupsdClient "github.com/mdlayher/apcupsd" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/mdlayher/apcupsd" ) const defaultAddress = "tcp://127.0.0.1:3551" @@ -42,60 +43,67 @@ func (*ApcUpsd) SampleConfig() string { func (h *ApcUpsd) Gather(acc telegraf.Accumulator) error { ctx := context.Background() - for _, addr := range h.Servers { - addrBits, err := url.Parse(addr) - if err != nil { - return err - } - if addrBits.Scheme == "" { - addrBits.Scheme = "tcp" - } - - ctx, cancel := context.WithTimeout(ctx, time.Duration(h.Timeout)) - defer cancel() + for _, server := range h.Servers { + err := func(address string) error { + addrBits, err := url.Parse(address) + if err != nil { + return err + } + if addrBits.Scheme == "" { + addrBits.Scheme = "tcp" + } + + ctx, cancel := context.WithTimeout(ctx, time.Duration(h.Timeout)) + defer cancel() + + status, err := fetchStatus(ctx, addrBits) + if err != nil { + return err + } + + tags := map[string]string{ + "serial": status.SerialNumber, + "ups_name": status.UPSName, + "status": status.Status, + "model": status.Model, + } + + flags, err := strconv.ParseUint(strings.Fields(status.StatusFlags)[0], 0, 64) + if err != nil { + return err + } + + fields := map[string]interface{}{ + "status_flags": flags, + "input_voltage": status.LineVoltage, + "load_percent": status.LoadPercent, + "battery_charge_percent": status.BatteryChargePercent, + "time_left_ns": status.TimeLeft.Nanoseconds(), + "output_voltage": status.OutputVoltage, + "internal_temp": status.InternalTemp, + "battery_voltage": status.BatteryVoltage, + "input_frequency": status.LineFrequency, + "time_on_battery_ns": status.TimeOnBattery.Nanoseconds(), + "nominal_input_voltage": status.NominalInputVoltage, + "nominal_battery_voltage": status.NominalBatteryVoltage, + "nominal_power": status.NominalPower, + "firmware": status.Firmware, + "battery_date": status.BatteryDate, + } + + acc.AddFields("apcupsd", fields, tags) + return nil + }(server) - status, err := fetchStatus(ctx, addrBits) if err != nil { return err } - - tags := map[string]string{ - "serial": status.SerialNumber, - "ups_name": status.UPSName, - "status": status.Status, - "model": status.Model, - } - - flags, err := strconv.ParseUint(strings.Fields(status.StatusFlags)[0], 0, 64) - if err != nil { - return err - } - - fields := map[string]interface{}{ - "status_flags": flags, - "input_voltage": status.LineVoltage, - "load_percent": status.LoadPercent, - "battery_charge_percent": status.BatteryChargePercent, - "time_left_ns": status.TimeLeft.Nanoseconds(), - "output_voltage": status.OutputVoltage, - "internal_temp": status.InternalTemp, - "battery_voltage": status.BatteryVoltage, - "input_frequency": status.LineFrequency, - "time_on_battery_ns": status.TimeOnBattery.Nanoseconds(), - "nominal_input_voltage": status.NominalInputVoltage, - "nominal_battery_voltage": status.NominalBatteryVoltage, - "nominal_power": status.NominalPower, - "firmware": status.Firmware, - "battery_date": status.BatteryDate, - } - - acc.AddFields("apcupsd", fields, tags) } return nil } -func fetchStatus(ctx context.Context, addr *url.URL) (*apcupsd.Status, error) { - client, err := apcupsd.DialContext(ctx, addr.Scheme, addr.Host) +func fetchStatus(ctx context.Context, addr *url.URL) (*apcupsdClient.Status, error) { + client, err := apcupsdClient.DialContext(ctx, addr.Scheme, addr.Host) if err != nil { return nil, err } diff --git a/plugins/inputs/apcupsd/apcupsd_test.go b/plugins/inputs/apcupsd/apcupsd_test.go index dd3c986afdc79..d2baca29646a9 100644 --- a/plugins/inputs/apcupsd/apcupsd_test.go +++ b/plugins/inputs/apcupsd/apcupsd_test.go @@ -7,9 +7,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestApcupsdDocs(_ *testing.T) { @@ -35,31 +36,33 @@ func listen(ctx context.Context, t *testing.T, out [][]byte) (string, error) { } go func() { - for ctx.Err() == nil { - defer ln.Close() - - conn, err := ln.Accept() - if err != nil { - continue - } - defer conn.Close() - require.NoError(t, conn.SetReadDeadline(time.Now().Add(time.Second))) - - in := make([]byte, 128) - n, err := conn.Read(in) - require.NoError(t, err, "failed to read from connection") - - status := []byte{0, 6, 's', 't', 'a', 't', 'u', 's'} - want, got := status, in[:n] - require.Equal(t, want, got) + defer ln.Close() - // Run against test function and append EOF to end of output bytes - out = append(out, []byte{0, 0}) - - for _, o := range out { - _, err := conn.Write(o) - require.NoError(t, err, "failed to write to connection") - } + for ctx.Err() == nil { + func() { + conn, err := ln.Accept() + if err != nil { + return + } + defer conn.Close() + require.NoError(t, conn.SetReadDeadline(time.Now().Add(time.Second))) + + in := make([]byte, 128) + n, err := conn.Read(in) + require.NoError(t, err, "failed to read from connection") + + status := []byte{0, 6, 's', 't', 'a', 't', 'u', 's'} + want, got := status, in[:n] + require.Equal(t, want, got) + + // Run against test function and append EOF to end of output bytes + out = append(out, []byte{0, 0}) + + for _, o := range out { + _, err := conn.Write(o) + require.NoError(t, err, "failed to write to connection") + } + }() } }() @@ -137,9 +140,9 @@ func TestApcupsdGather(t *testing.T) { "time_on_battery_ns": int64(0), "nominal_input_voltage": float64(230), "nominal_battery_voltage": float64(12), - "nominal_power": int(865), - "firmware": string("857.L3 .I USB FW:L3"), - "battery_date": string("2016-09-06"), + "nominal_power": 865, + "firmware": "857.L3 .I USB FW:L3", + "battery_date": "2016-09-06", }, out: genOutput, }, diff --git a/plugins/inputs/beat/README.md b/plugins/inputs/beat/README.md index a3ef9b1b8c7cd..d819b5ab950b8 100644 --- a/plugins/inputs/beat/README.md +++ b/plugins/inputs/beat/README.md @@ -3,7 +3,7 @@ The Beat plugin will collect metrics from the given Beat instances. It is known to work with Filebeat and Kafkabeat. ### Configuration: ```toml - ## An URL from which to read beat-formatted JSON + ## An URL from which to read Beat-formatted JSON ## Default is "http://127.0.0.1:5066". url = "http://127.0.0.1:5066" diff --git a/plugins/inputs/beat/beat.go b/plugins/inputs/beat/beat.go index 2d57a6deadfca..08b5c3851d6c0 100644 --- a/plugins/inputs/beat/beat.go +++ b/plugins/inputs/beat/beat.go @@ -12,7 +12,6 @@ import ( "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" ) @@ -55,7 +54,7 @@ const description = "Read metrics exposed by Beat" const suffixInfo = "/" const suffixStats = "/stats" -type BeatInfo struct { +type Info struct { Beat string `json:"beat"` Hostname string `json:"hostname"` Name string `json:"name"` @@ -63,7 +62,7 @@ type BeatInfo struct { Version string `json:"version"` } -type BeatStats struct { +type Stats struct { Beat map[string]interface{} `json:"beat"` FileBeat interface{} `json:"filebeat"` Libbeat interface{} `json:"libbeat"` @@ -140,8 +139,8 @@ func (beat *Beat) createHTTPClient() (*http.Client, error) { } // gatherJSONData query the data source and parse the response JSON -func (beat *Beat) gatherJSONData(url string, value interface{}) error { - request, err := http.NewRequest(beat.Method, url, nil) +func (beat *Beat) gatherJSONData(address string, value interface{}) error { + request, err := http.NewRequest(beat.Method, address, nil) if err != nil { return err } @@ -167,8 +166,8 @@ func (beat *Beat) gatherJSONData(url string, value interface{}) error { } func (beat *Beat) Gather(accumulator telegraf.Accumulator) error { - beatStats := &BeatStats{} - beatInfo := &BeatInfo{} + beatStats := &Stats{} + beatInfo := &Info{} infoURL, err := url.Parse(beat.URL + suffixInfo) if err != nil { diff --git a/plugins/inputs/bind/json_stats.go b/plugins/inputs/bind/json_stats.go index 96a5a9b6ec9e6..61307683aac35 100644 --- a/plugins/inputs/bind/json_stats.go +++ b/plugins/inputs/bind/json_stats.go @@ -64,8 +64,8 @@ func addJSONCounter(acc telegraf.Accumulator, commonTags map[string]string, stat } //Add grouped metrics - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } } @@ -144,8 +144,8 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st } //Add grouped metrics - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } } @@ -157,21 +157,29 @@ func (b *Bind) readStatsJSON(addr *url.URL, acc telegraf.Accumulator) error { // Progressively build up full jsonStats struct by parsing the individual HTTP responses for _, suffix := range [...]string{"/server", "/net", "/mem"} { - scrapeURL := addr.String() + suffix + err := func() error { + scrapeURL := addr.String() + suffix - resp, err := b.client.Get(scrapeURL) - if err != nil { - return err - } + resp, err := b.client.Get(scrapeURL) + if err != nil { + return err + } - defer resp.Body.Close() + defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status: %s", scrapeURL, resp.Status) - } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status: %s", scrapeURL, resp.Status) + } - if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil { - return fmt.Errorf("Unable to decode JSON blob: %s", err) + if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("unable to decode JSON blob: %s", err) + } + + return nil + }() + + if err != nil { + return err } } diff --git a/plugins/inputs/bind/xml_stats_v2.go b/plugins/inputs/bind/xml_stats_v2.go index ce98b2ddc90e0..5a0092c5af7cc 100644 --- a/plugins/inputs/bind/xml_stats_v2.go +++ b/plugins/inputs/bind/xml_stats_v2.go @@ -81,8 +81,8 @@ func addXMLv2Counter(acc telegraf.Accumulator, commonTags map[string]string, sta } //Add grouped metrics - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } } @@ -103,7 +103,7 @@ func (b *Bind) readStatsXMLv2(addr *url.URL, acc telegraf.Accumulator) error { } if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { - return fmt.Errorf("Unable to decode XML document: %s", err) + return fmt.Errorf("unable to decode XML document: %s", err) } tags := map[string]string{"url": addr.Host} diff --git a/plugins/inputs/bind/xml_stats_v3.go b/plugins/inputs/bind/xml_stats_v3.go index c4fe7e1992674..ef303f4bf052c 100644 --- a/plugins/inputs/bind/xml_stats_v3.go +++ b/plugins/inputs/bind/xml_stats_v3.go @@ -129,8 +129,8 @@ func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort s } //Add grouped metrics - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } } @@ -142,21 +142,29 @@ func (b *Bind) readStatsXMLv3(addr *url.URL, acc telegraf.Accumulator) error { // Progressively build up full v3Stats struct by parsing the individual HTTP responses for _, suffix := range [...]string{"/server", "/net", "/mem"} { - scrapeURL := addr.String() + suffix + err := func() error { + scrapeURL := addr.String() + suffix - resp, err := b.client.Get(scrapeURL) - if err != nil { - return err - } + resp, err := b.client.Get(scrapeURL) + if err != nil { + return err + } - defer resp.Body.Close() + defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status: %s", scrapeURL, resp.Status) - } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status: %s", scrapeURL, resp.Status) + } - if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { - return fmt.Errorf("Unable to decode XML document: %s", err) + if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("unable to decode XML document: %s", err) + } + + return nil + }() + + if err != nil { + return err } } diff --git a/plugins/inputs/burrow/burrow_test.go b/plugins/inputs/burrow/burrow_test.go index de0b56692e11a..d9df7be31d27e 100644 --- a/plugins/inputs/burrow/burrow_test.go +++ b/plugins/inputs/burrow/burrow_test.go @@ -9,8 +9,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) // remap uri to json file, eg: /v3/kafka -> ./testdata/v3_kafka.json @@ -49,7 +50,7 @@ func getHTTPServerBasicAuth() *httptest.Server { w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) username, password, authOK := r.BasicAuth() - if authOK == false { + if !authOK { http.Error(w, "Not authorized", 401) return } From 598990b46956dd281d232750a9f5a288f9262cbc Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 26 Apr 2021 16:12:58 -0500 Subject: [PATCH 393/761] Private linter for Telegraf (#9168) * Custom linter * Telegraf private linter Co-authored-by: Bas <3441183+BattleBas@users.noreply.github.com> --- .golangci.yml | 5 +++ go.mod | 3 +- go.sum | 6 ++-- telegraflinter/README.md | 31 ++++++++++++++++++ telegraflinter/telegraflinter.go | 54 ++++++++++++++++++++++++++++++++ 5 files changed, 96 insertions(+), 3 deletions(-) create mode 100644 telegraflinter/README.md create mode 100644 telegraflinter/telegraflinter.go diff --git a/.golangci.yml b/.golangci.yml index ffef28f505900..23218a5c7ff1b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,6 +1,7 @@ linters: disable-all: true enable: + # - telegraflinter - bodyclose - dogsled - errcheck @@ -20,6 +21,10 @@ linters: - varcheck linters-settings: + # custom: + # telegraflinter: + # path: telegraflinter.so + # description: "Find Telegraf specific review criteria, more info: https://github.com/influxdata/telegraf/wiki/Review" revive: rules: - name: argument-limit diff --git a/go.mod b/go.mod index 57ee3c129648b..0541f5f8833eb 100644 --- a/go.mod +++ b/go.mod @@ -130,8 +130,9 @@ require ( golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 - golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 + golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 golang.org/x/text v0.3.4 + golang.org/x/tools v0.1.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.29.0 google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70 diff --git a/go.sum b/go.sum index f29d3c36bd0a9..d3e5f8c26b5a6 100644 --- a/go.sum +++ b/go.sum @@ -1360,8 +1360,9 @@ golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= @@ -1438,8 +1439,9 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9 h1:sEvmEcJVKBNUvgCUClbUQeHOAa9U0I2Ce1BooMvVCY4= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/telegraflinter/README.md b/telegraflinter/README.md new file mode 100644 index 0000000000000..b049cf6446bc6 --- /dev/null +++ b/telegraflinter/README.md @@ -0,0 +1,31 @@ +# Private linter for Telegraf + +The purpose of this linter is to enforce the review criteria for the Telegraf project, outlined here: https://github.com/influxdata/telegraf/wiki/Review. This is currently not compatible with the linter running in the CI and can only be ran locally. + +## Running it locally + +To use the Telegraf linter, you need a binary of golangci-lint that was compiled with CGO enabled. Currently no release is provided with it enabled, therefore you will need to clone the source code and compile it yourself. You can run the following commands to achieve this: + +1. `git clone https://github.com/sspaink/golangci-lint.git` +2. `cd golangci-lint` +3. `git checkout tags/v1.39.0 -b 1390` +4. `CGO_ENABLED=true go build -o golangci-lint-cgo ./cmd/golangci-lint` + +You will now have the binary you need to run the Telegraf linter. The Telegraf linter will now need to be compiled as a plugin to get a *.so file. [Currently plugins are only supported on Linux, FreeBSD, and macOS](https://golang.org/pkg/plugin/). From the root of the Telegraf project, you can run the following commands to compile the linter and run it: + +1. `CGO_ENABLED=true go build -buildmode=plugin telegraflinter/telegraflinter.go` +2. In the .golanci-lint file: + * uncomment the `custom` section under the `linters-settings` section + * uncomment `telegraflinter` under the `enable` section +3. `golanci-lint-cgo run` + +*Note:* If you made a change to the telegraf linter and want to run it again, be sure to clear the [cache directory](https://golang.org/pkg/os/#UserCacheDir). On unix systems you can run `rm -rf ~/.cache/golangci-lint` otherwise it will seem like nothing changed. + +## Requirement + +This linter lives in the Telegraf repository and is compiled to become a Go plugin, any packages used in the linter *MUST* match the version in the golanci-lint otherwise there will be issues. For example the import `golang.org/x/tools v0.1.0` needs to match what golangci-lint is using. + +## Useful references + +* https://golangci-lint.run/contributing/new-linters/#how-to-add-a-private-linter-to-golangci-lint +* https://github.com/golangci/example-plugin-linter diff --git a/telegraflinter/telegraflinter.go b/telegraflinter/telegraflinter.go new file mode 100644 index 0000000000000..b295327f8eed5 --- /dev/null +++ b/telegraflinter/telegraflinter.go @@ -0,0 +1,54 @@ +// This must be package main +package main + +import ( + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" +) + +type analyzerPlugin struct{} + +// This must be implemented +func (*analyzerPlugin) GetAnalyzers() []*analysis.Analyzer { + return []*analysis.Analyzer{ + TelegrafAnalyzer, + } +} + +// This must be defined and named 'AnalyzerPlugin' +var AnalyzerPlugin analyzerPlugin + +var TelegrafAnalyzer = &analysis.Analyzer{ + Name: "telegraflinter", + Doc: "Find Telegraf specific review criteria, more info: https://github.com/influxdata/telegraf/wiki/Review", + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + ast.Inspect(file, func(n ast.Node) bool { + checkLogImport(n, pass) + return true + }) + } + return nil, nil +} + +func checkLogImport(n ast.Node, pass *analysis.Pass) { + if !strings.HasPrefix(pass.Pkg.Path(), "github.com/influxdata/telegraf/plugins/") { + return + } + if importSpec, ok := n.(*ast.ImportSpec); ok { + if importSpec.Path != nil && strings.HasPrefix(importSpec.Path.Value, "\"log\"") { + pass.Report(analysis.Diagnostic{ + Pos: importSpec.Pos(), + End: 0, + Category: "log", + Message: "Don't use log package in plugin, use the Telegraf logger.", + SuggestedFixes: nil, + }) + } + } +} From fe352db59078576f9866671591bf708242ce11ce Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 27 Apr 2021 11:01:28 -0400 Subject: [PATCH 394/761] Remote write compliance updates (#9196) --- go.mod | 2 +- go.sum | 3 ++- .../prometheusremotewrite.go | 5 +++++ .../prometheusremotewrite_test.go | 22 +++++++++++++++++++ 4 files changed, 30 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 0541f5f8833eb..799181e7d7416 100644 --- a/go.mod +++ b/go.mod @@ -99,7 +99,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.13.0 + github.com/prometheus/common v0.15.0 github.com/prometheus/procfs v0.1.3 github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 github.com/riemann/riemann-go-client v0.5.0 diff --git a/go.sum b/go.sum index d3e5f8c26b5a6..c20d3527f1b88 100644 --- a/go.sum +++ b/go.sum @@ -960,8 +960,9 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.13.0 h1:vJlpe9wPgDRM1Z+7Wj3zUUjY1nr6/1jNKyl7llliccg= github.com/prometheus/common v0.13.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go index 87c7f8f798ce0..fb3cea4edd352 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go @@ -274,6 +274,11 @@ func (s *Serializer) createLabels(metric telegraf.Metric) []prompb.Label { continue } + // remove tags with empty values + if tag.Value == "" { + continue + } + labels = append(labels, prompb.Label{Name: name, Value: tag.Value}) } diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go index 03879e21915d1..f9e47eac54db5 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go @@ -613,6 +613,28 @@ cpu_time_user{cpu="cpu3"} 94148 expected: []byte(` rpc_duration_seconds_count 2693 rpc_duration_seconds_sum 17560473 +`), + }, + { + name: "empty label string value", + config: FormatConfig{ + MetricSortOrder: SortMetrics, + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "cpu": "", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` + time_idle 42 `), }, } From 56a2df9ec8b51f2970aaaa7cd9155259a4ad8206 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 27 Apr 2021 11:32:46 -0400 Subject: [PATCH 395/761] upgrade gogo protobuf to v1.3.2 (#9190) --- go.mod | 2 +- go.sum | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 799181e7d7416..361dce3f40ee4 100644 --- a/go.mod +++ b/go.mod @@ -59,7 +59,7 @@ require ( github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v2.1.0+incompatible - github.com/gogo/protobuf v1.3.1 + github.com/gogo/protobuf v1.3.2 github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/protobuf v1.5.1 github.com/golang/snappy v0.0.1 diff --git a/go.sum b/go.sum index c20d3527f1b88..8f701025e38ab 100644 --- a/go.sum +++ b/go.sum @@ -488,10 +488,10 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -740,6 +740,7 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= @@ -1439,8 +1440,10 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 16894c6c7539d356799de3cab339e7094d05afba Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 27 Apr 2021 12:03:50 -0500 Subject: [PATCH 396/761] Resolve dependabot error by ignoring package (#9202) Ignoring the package `gopkg.in/djherbis/times.v1` because it is causing dependabot to throw the following error: ``` Dependabot wasn't able to update gopkg.in/djherbis/times.v1 The module path gopkg.in/djherbis/times.v1 found in your go.mod doesn't match the actual path github.com/djherbis/times found in the dependency's go.mod. Updating the module path in your go.mod to github.com/djherbis/times should resolve this issue. ``` --- .github/dependabot.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index f1b219b47ce50..a2e4551e32844 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,3 +4,6 @@ updates: directory: "/" schedule: interval: "weekly" + ignore: + # Dependabot isn't able to update this package do the name not matching the source + - dependency-name: "gopkg.in/djherbis/times.v1" From 06d85ae457f51d17018c5473c4a55433d4262651 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 27 Apr 2021 15:04:30 -0500 Subject: [PATCH 397/761] Ignore soniah/gosnmp depedency (#9205) --- .github/dependabot.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index a2e4551e32844..4b48b7cecc2fa 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -7,3 +7,5 @@ updates: ignore: # Dependabot isn't able to update this package do the name not matching the source - dependency-name: "gopkg.in/djherbis/times.v1" + # Updating this package is blocked by: https://github.com/gosnmp/gosnmp/issues/284 + - dependency-name: "github.com/soniah/gosnmp" From c83577b23d8f0b4971bc3041ff7b7eeff03d6d5f Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 27 Apr 2021 15:46:21 -0500 Subject: [PATCH 398/761] Update Dependabot config: ignore all packages with *.v* (#9206) --- .github/dependabot.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4b48b7cecc2fa..053ba133f0b87 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,7 +5,7 @@ updates: schedule: interval: "weekly" ignore: - # Dependabot isn't able to update this package do the name not matching the source - - dependency-name: "gopkg.in/djherbis/times.v1" + # Dependabot isn't able to update this packages that do not match the source, so anything with a version + - dependency-name: "*.v*" # Updating this package is blocked by: https://github.com/gosnmp/gosnmp/issues/284 - dependency-name: "github.com/soniah/gosnmp" From 5256f916eb1dbe1657a8dc039033ab12901cafb8 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 27 Apr 2021 17:01:45 -0500 Subject: [PATCH 399/761] Make microsoft lowercase (#9209) --- docs/LICENSE_OF_DEPENDENCIES.md | 2 +- etc/telegraf.conf | 2 +- go.mod | 4 ++-- go.sum | 13 ++++++++----- plugins/outputs/application_insights/README.md | 2 +- .../application_insights/application_insights.go | 4 ++-- .../application_insights_test.go | 2 +- .../diagnostic_message_subscriber.go | 2 +- .../mocks/diagnostics_message_subscriber.go | 2 +- .../application_insights/mocks/transmitter.go | 2 +- plugins/outputs/application_insights/transmitter.go | 2 +- 11 files changed, 20 insertions(+), 17 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 0e2d31cb99ec6..d065a2014fd5f 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -14,7 +14,6 @@ following works: - github.com/Azure/go-amqp [MIT License](https://github.com/Azure/go-amqp/blob/master/LICENSE) - github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) - github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE) -- github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE) - github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE) - github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE) - github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE) @@ -129,6 +128,7 @@ following works: - github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md) - github.com/mdlayher/genetlink [MIT License](https://github.com/mdlayher/genetlink/blob/master/LICENSE.md) - github.com/mdlayher/netlink [MIT License](https://github.com/mdlayher/netlink/blob/master/LICENSE.md) +- github.com/microsoft/ApplicationInsights-Go [MIT License](https://github.com/microsoft/ApplicationInsights-Go/blob/master/LICENSE) - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) - github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE) - github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 9e02cc4c38ca0..c870d5bc4d4ba 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -310,7 +310,7 @@ # ## Context Tag Sources add Application Insights context tags to a tag value. # ## # ## For list of allowed context tag keys see: -# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go # # [outputs.application_insights.context_tag_sources] # # "ai.cloud.role" = "kubernetes_container_name" # # "ai.cloud.roleInstance" = "kubernetes_pod_name" diff --git a/go.mod b/go.mod index 361dce3f40ee4..8be560c9fa41c 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,6 @@ require ( github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 github.com/BurntSushi/toml v0.3.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee - github.com/Microsoft/ApplicationInsights-Go v0.4.2 github.com/Shopify/sarama v1.27.2 github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect github.com/aerospike/aerospike-client-go v1.27.0 @@ -58,7 +57,7 @@ require ( github.com/goburrow/modbus v0.1.0 github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 - github.com/gofrs/uuid v2.1.0+incompatible + github.com/gofrs/uuid v3.3.0+incompatible github.com/gogo/protobuf v1.3.2 github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/protobuf v1.5.1 @@ -87,6 +86,7 @@ require ( github.com/lib/pq v1.3.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b + github.com/microsoft/ApplicationInsights-Go v0.4.4 github.com/miekg/dns v1.1.31 github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect diff --git a/go.sum b/go.sum index 8f701025e38ab..d7260c3190e85 100644 --- a/go.sum +++ b/go.sum @@ -29,6 +29,7 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= @@ -104,8 +105,6 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3 github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= -github.com/Microsoft/ApplicationInsights-Go v0.4.2 h1:HIZoGXMiKNwAtMAgCSSX35j9mP+DjGF9ezfBvxMDLLg= -github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg= github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -475,10 +474,9 @@ github.com/goburrow/serial v0.1.0 h1:v2T1SQa/dlUqQiYIT8+Cu7YolfqAi3K96UmhwYyuSrA github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gofrs/uuid v2.1.0+incompatible h1:8oEj3gioPmmDAOLQUZdnW+h4FZu9aSE/SQIas1E9pzA= -github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.3.1 h1:CzMaKrvF6Qa7XtRii064vKBQiyvmY8H8vG1xa1/W1JA= github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= @@ -813,6 +811,8 @@ github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkfg= github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= +github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo= @@ -880,10 +880,12 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= @@ -1084,6 +1086,7 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= +github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= diff --git a/plugins/outputs/application_insights/README.md b/plugins/outputs/application_insights/README.md index 34017a89f0bab..b23f1affef06f 100644 --- a/plugins/outputs/application_insights/README.md +++ b/plugins/outputs/application_insights/README.md @@ -20,7 +20,7 @@ This plugin writes telegraf metrics to [Azure Application Insights](https://azur ## Context Tag Sources add Application Insights context tags to a tag value. ## ## For list of allowed context tag keys see: - ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go + ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go # [outputs.application_insights.context_tag_sources] # "ai.cloud.role" = "kubernetes_container_name" # "ai.cloud.roleInstance" = "kubernetes_pod_name" diff --git a/plugins/outputs/application_insights/application_insights.go b/plugins/outputs/application_insights/application_insights.go index 950a9fcf7e3ff..54635ee7df6b1 100644 --- a/plugins/outputs/application_insights/application_insights.go +++ b/plugins/outputs/application_insights/application_insights.go @@ -6,10 +6,10 @@ import ( "time" "unsafe" - "github.com/Microsoft/ApplicationInsights-Go/appinsights" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" + "github.com/microsoft/ApplicationInsights-Go/appinsights" ) type TelemetryTransmitter interface { @@ -51,7 +51,7 @@ var ( ## Context Tag Sources add Application Insights context tags to a tag value. ## ## For list of allowed context tag keys see: - ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go + ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go # [outputs.application_insights.context_tag_sources] # "ai.cloud.role" = "kubernetes_container_name" # "ai.cloud.roleInstance" = "kubernetes_pod_name" diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go index c5a5c0a3eebed..b685f6c318d05 100644 --- a/plugins/outputs/application_insights/application_insights_test.go +++ b/plugins/outputs/application_insights/application_insights_test.go @@ -7,7 +7,7 @@ import ( "github.com/influxdata/telegraf/testutil" - "github.com/Microsoft/ApplicationInsights-Go/appinsights" + "github.com/microsoft/ApplicationInsights-Go/appinsights" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" diff --git a/plugins/outputs/application_insights/diagnostic_message_subscriber.go b/plugins/outputs/application_insights/diagnostic_message_subscriber.go index 78993191096dc..a5b11671a1bf1 100644 --- a/plugins/outputs/application_insights/diagnostic_message_subscriber.go +++ b/plugins/outputs/application_insights/diagnostic_message_subscriber.go @@ -1,7 +1,7 @@ package application_insights import ( - "github.com/Microsoft/ApplicationInsights-Go/appinsights" + "github.com/microsoft/ApplicationInsights-Go/appinsights" ) type diagnosticsMessageSubscriber struct { diff --git a/plugins/outputs/application_insights/mocks/diagnostics_message_subscriber.go b/plugins/outputs/application_insights/mocks/diagnostics_message_subscriber.go index 841de1ac87728..d360a29e5618b 100644 --- a/plugins/outputs/application_insights/mocks/diagnostics_message_subscriber.go +++ b/plugins/outputs/application_insights/mocks/diagnostics_message_subscriber.go @@ -1,7 +1,7 @@ // Code generated by mockery v1.0.0. DO NOT EDIT. package mocks -import appinsights "github.com/Microsoft/ApplicationInsights-Go/appinsights" +import appinsights "github.com/microsoft/ApplicationInsights-Go/appinsights" import mock "github.com/stretchr/testify/mock" diff --git a/plugins/outputs/application_insights/mocks/transmitter.go b/plugins/outputs/application_insights/mocks/transmitter.go index 4faa715f78836..6b26f84da2fc2 100644 --- a/plugins/outputs/application_insights/mocks/transmitter.go +++ b/plugins/outputs/application_insights/mocks/transmitter.go @@ -1,7 +1,7 @@ // Code generated by mockery v1.0.0. DO NOT EDIT. package mocks -import appinsights "github.com/Microsoft/ApplicationInsights-Go/appinsights" +import appinsights "github.com/microsoft/ApplicationInsights-Go/appinsights" import mock "github.com/stretchr/testify/mock" diff --git a/plugins/outputs/application_insights/transmitter.go b/plugins/outputs/application_insights/transmitter.go index d66f069783048..a16039ad1a24f 100644 --- a/plugins/outputs/application_insights/transmitter.go +++ b/plugins/outputs/application_insights/transmitter.go @@ -1,7 +1,7 @@ package application_insights import ( - "github.com/Microsoft/ApplicationInsights-Go/appinsights" + "github.com/microsoft/ApplicationInsights-Go/appinsights" ) type Transmitter struct { From 1fabc5f1fe0e49bb8555a27bc8e319e0ecb11bca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 28 Apr 2021 03:41:52 +0200 Subject: [PATCH 400/761] Linter fixes for plugins/inputs/[c]* (#9194) * Linter fixes for plugins/inputs/[c]* * Linter fixes for plugins/inputs/[c]* Co-authored-by: Pawel Zak --- plugins/inputs/cassandra/README.md | 20 +- plugins/inputs/cassandra/cassandra.go | 35 ++- plugins/inputs/ceph/README.md | 2 +- plugins/inputs/ceph/ceph.go | 73 +++--- plugins/inputs/ceph/ceph_test.go | 15 +- plugins/inputs/cgroup/README.md | 13 +- plugins/inputs/cgroup/cgroup_linux.go | 8 +- plugins/inputs/chrony/chrony_test.go | 4 +- .../cisco_telemetry_mdt.go | 45 ++-- .../cisco_telemetry_mdt_test.go | 243 +++++++++--------- .../cisco_telemetry_util.go | 1 + plugins/inputs/clickhouse/clickhouse.go | 10 +- plugins/inputs/clickhouse/clickhouse_test.go | 96 +++---- plugins/inputs/cloudwatch/README.md | 3 +- plugins/inputs/cloudwatch/cloudwatch.go | 99 +++---- plugins/inputs/cloudwatch/cloudwatch_test.go | 95 ++++--- plugins/inputs/couchbase/couchbase.go | 15 +- plugins/inputs/couchbase/couchbase_test.go | 2 +- plugins/inputs/cpu/README.md | 5 +- plugins/inputs/cpu/cpu.go | 11 +- plugins/inputs/cpu/cpu_test.go | 35 ++- plugins/inputs/csgo/README.md | 1 + plugins/inputs/csgo/csgo.go | 11 +- plugins/inputs/csgo/csgo_test.go | 2 +- 24 files changed, 446 insertions(+), 398 deletions(-) diff --git a/plugins/inputs/cassandra/README.md b/plugins/inputs/cassandra/README.md index d89459533f55e..56c36bfe93d21 100644 --- a/plugins/inputs/cassandra/README.md +++ b/plugins/inputs/cassandra/README.md @@ -19,10 +19,26 @@ Cassandra plugin produces one or more measurements for each metric configured, a Given a configuration like: ```toml +# Read Cassandra metrics through Jolokia [[inputs.cassandra]] + ## DEPRECATED: The cassandra plugin has been deprecated. Please use the + ## jolokia2 plugin instead. + ## + ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 + context = "/jolokia/read" - servers = [":8778"] - metrics = ["/java.lang:type=Memory/HeapMemoryUsage"] + ## List of cassandra servers exposing jolokia read service + servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] + ## List of metrics collected on above servers + ## Each metric consists of a jmx path. + ## This will collect all heap memory usage metrics from the jvm and + ## ReadLatency metrics for all keyspaces and tables. + ## "type=Table" in the query works with Cassandra3.0. Older versions might + ## need to use "type=ColumnFamily" + metrics = [ + "/java.lang:type=Memory/HeapMemoryUsage", + "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" + ] ``` The collected metrics will be: diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index 7f9fe98b2a49e..4a52ef2979b7d 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -49,13 +49,11 @@ type jmxMetric interface { addTagsFields(out map[string]interface{}) } -func newJavaMetric(host string, metric string, - acc telegraf.Accumulator) *javaMetric { +func newJavaMetric(acc telegraf.Accumulator, host string, metric string) *javaMetric { return &javaMetric{host: host, metric: metric, acc: acc} } -func newCassandraMetric(host string, metric string, - acc telegraf.Accumulator) *cassandraMetric { +func newCassandraMetric(acc telegraf.Accumulator, host string, metric string) *cassandraMetric { return &cassandraMetric{host: host, metric: metric, acc: acc} } @@ -72,13 +70,15 @@ func addValuesAsFields(values map[string]interface{}, fields map[string]interfac func parseJmxMetricRequest(mbean string) map[string]string { tokens := make(map[string]string) classAndPairs := strings.Split(mbean, ":") - if classAndPairs[0] == "org.apache.cassandra.metrics" { + switch classAndPairs[0] { + case "org.apache.cassandra.metrics": tokens["class"] = "cassandra" - } else if classAndPairs[0] == "java.lang" { + case "java.lang": tokens["class"] = "java" - } else { + default: return tokens } + pairs := strings.Split(classAndPairs[1], ",") for _, pair := range pairs { p := strings.Split(pair, "=") @@ -147,22 +147,21 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) { // maps in the json response if (tokens["type"] == "Table" || tokens["type"] == "ColumnFamily") && (tokens["keyspace"] == "*" || tokens["scope"] == "*") { - if valuesMap, ok := out["value"]; ok { - for k, v := range valuesMap.(map[string]interface{}) { - addCassandraMetric(k, c, v.(map[string]interface{})) - } - } else { + valuesMap, ok := out["value"] + if !ok { c.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", c.metric, out)) return } + for k, v := range valuesMap.(map[string]interface{}) { + addCassandraMetric(k, c, v.(map[string]interface{})) + } } else { - if values, ok := out["value"]; ok { - addCassandraMetric(r.(map[string]interface{})["mbean"].(string), - c, values.(map[string]interface{})) - } else { + values, ok := out["value"] + if !ok { c.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", c.metric, out)) return } + addCassandraMetric(r.(map[string]interface{})["mbean"].(string), c, values.(map[string]interface{})) } } @@ -277,10 +276,10 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error { var m jmxMetric if strings.HasPrefix(metric, "/java.lang:") { - m = newJavaMetric(serverTokens["host"], metric, acc) + m = newJavaMetric(acc, serverTokens["host"], metric) } else if strings.HasPrefix(metric, "/org.apache.cassandra.metrics:") { - m = newCassandraMetric(serverTokens["host"], metric, acc) + m = newCassandraMetric(acc, serverTokens["host"], metric) } else { // unsupported metric type acc.AddError(fmt.Errorf("unsupported Cassandra metric [%s], skipping", metric)) diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index 171b64760654f..dc58adb0ffe6b 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -45,7 +45,7 @@ the cluster. The currently supported commands are: ### Configuration: ```toml -# Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. +# Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. [[inputs.ceph]] ## This is the recommended interval to poll. Too frequent and you will lose ## data points due to timeouts during rebalancing and recovery diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index 3445b2d12ed42..7baa28213ac7f 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io/ioutil" - "log" "os/exec" "path/filepath" "strings" @@ -28,17 +27,19 @@ const ( ) type Ceph struct { - CephBinary string - OsdPrefix string - MonPrefix string - MdsPrefix string - RgwPrefix string - SocketDir string - SocketSuffix string - CephUser string - CephConfig string - GatherAdminSocketStats bool - GatherClusterStats bool + CephBinary string `toml:"ceph_binary"` + OsdPrefix string `toml:"osd_prefix"` + MonPrefix string `toml:"mon_prefix"` + MdsPrefix string `toml:"mds_prefix"` + RgwPrefix string `toml:"rgw_prefix"` + SocketDir string `toml:"socket_dir"` + SocketSuffix string `toml:"socket_suffix"` + CephUser string `toml:"ceph_user"` + CephConfig string `toml:"ceph_config"` + GatherAdminSocketStats bool `toml:"gather_admin_socket_stats"` + GatherClusterStats bool `toml:"gather_cluster_stats"` + + Log telegraf.Logger `toml:"-"` } func (c *Ceph) Description() string { @@ -67,7 +68,14 @@ var sampleConfig = ` ## suffix used to identify socket files socket_suffix = "asok" - ## Ceph user to authenticate as + ## Ceph user to authenticate as, ceph will search for the corresponding keyring + ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the + ## client section of ceph.conf for example: + ## + ## [client.telegraf] + ## keyring = /etc/ceph/client.telegraf.keyring + ## + ## Consult the ceph documentation for more detail on keyring generation. ceph_user = "client.admin" ## Ceph configuration to use to locate the cluster @@ -76,7 +84,8 @@ var sampleConfig = ` ## Whether to gather statistics via the admin socket gather_admin_socket_stats = true - ## Whether to gather statistics via ceph commands + ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config + ## to be specified gather_cluster_stats = false ` @@ -112,14 +121,14 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error { acc.AddError(fmt.Errorf("error reading from socket '%s': %v", s.socket, err)) continue } - data, err := parseDump(dump) + data, err := c.parseDump(dump) if err != nil { acc.AddError(fmt.Errorf("error parsing dump from socket '%s': %v", s.socket, err)) continue } for tag, metrics := range data { acc.AddFields(measurement, - map[string]interface{}(metrics), + metrics, map[string]string{"type": s.sockType, "id": s.sockID, "collection": tag}) } } @@ -138,7 +147,7 @@ func (c *Ceph) gatherClusterStats(acc telegraf.Accumulator) error { // For each job, execute against the cluster, parse and accumulate the data points for _, job := range jobs { - output, err := c.exec(job.command) + output, err := c.execute(job.command) if err != nil { return fmt.Errorf("error executing command: %v", err) } @@ -171,15 +180,17 @@ func init() { var perfDump = func(binary string, socket *socket) (string, error) { cmdArgs := []string{"--admin-daemon", socket.socket} - if socket.sockType == typeOsd { + + switch socket.sockType { + case typeOsd: cmdArgs = append(cmdArgs, "perf", "dump") - } else if socket.sockType == typeMon { + case typeMon: cmdArgs = append(cmdArgs, "perfcounters_dump") - } else if socket.sockType == typeMds { + case typeMds: cmdArgs = append(cmdArgs, "perf", "dump") - } else if socket.sockType == typeRgw { + case typeRgw: cmdArgs = append(cmdArgs, "perf", "dump") - } else { + default: return "", fmt.Errorf("ignoring unknown socket type: %s", socket.sockType) } @@ -268,23 +279,23 @@ type taggedMetricMap map[string]metricMap // Parses a raw JSON string into a taggedMetricMap // Delegates the actual parsing to newTaggedMetricMap(..) -func parseDump(dump string) (taggedMetricMap, error) { +func (c *Ceph) parseDump(dump string) (taggedMetricMap, error) { data := make(map[string]interface{}) err := json.Unmarshal([]byte(dump), &data) if err != nil { return nil, fmt.Errorf("failed to parse json: '%s': %v", dump, err) } - return newTaggedMetricMap(data), nil + return c.newTaggedMetricMap(data), nil } // Builds a TaggedMetricMap out of a generic string map. // The top-level key is used as a tag and all sub-keys are flattened into metrics -func newTaggedMetricMap(data map[string]interface{}) taggedMetricMap { +func (c *Ceph) newTaggedMetricMap(data map[string]interface{}) taggedMetricMap { tmm := make(taggedMetricMap) for tag, datapoints := range data { mm := make(metricMap) - for _, m := range flatten(datapoints) { + for _, m := range c.flatten(datapoints) { mm[m.name()] = m.value } tmm[tag] = mm @@ -296,7 +307,7 @@ func newTaggedMetricMap(data map[string]interface{}) taggedMetricMap { // Nested keys are flattened into ordered slices associated with a metric value. // The key slices are treated as stacks, and are expected to be reversed and concatenated // when passed as metrics to the accumulator. (see (*metric).name()) -func flatten(data interface{}) []*metric { +func (c *Ceph) flatten(data interface{}) []*metric { var metrics []*metric switch val := data.(type) { @@ -305,20 +316,20 @@ func flatten(data interface{}) []*metric { case map[string]interface{}: metrics = make([]*metric, 0, len(val)) for k, v := range val { - for _, m := range flatten(v) { + for _, m := range c.flatten(v) { m.pathStack = append(m.pathStack, k) metrics = append(metrics, m) } } default: - log.Printf("I! [inputs.ceph] ignoring unexpected type '%T' for value %v", val, val) + c.Log.Infof("ignoring unexpected type '%T' for value %v", val, val) } return metrics } -// exec executes the 'ceph' command with the supplied arguments, returning JSON formatted output -func (c *Ceph) exec(command string) (string, error) { +// execute executes the 'ceph' command with the supplied arguments, returning JSON formatted output +func (c *Ceph) execute(command string) (string, error) { cmdArgs := []string{"--conf", c.CephConfig, "--name", c.CephUser, "--format", "json"} cmdArgs = append(cmdArgs, strings.Split(command, " ")...) diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index 5cb120e578b18..a61838bc6a4e0 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -9,8 +9,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const ( @@ -29,28 +30,32 @@ func TestParseSockId(t *testing.T) { } func TestParseMonDump(t *testing.T) { - dump, err := parseDump(monPerfDump) + c := &Ceph{Log: testutil.Logger{}} + dump, err := c.parseDump(monPerfDump) require.NoError(t, err) require.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon) require.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon) } func TestParseOsdDump(t *testing.T) { - dump, err := parseDump(osdPerfDump) + c := &Ceph{Log: testutil.Logger{}} + dump, err := c.parseDump(osdPerfDump) require.NoError(t, err) require.InEpsilon(t, 552132.109360000, dump["filestore"]["commitcycle_interval.sum"], epsilon) require.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"]) } func TestParseMdsDump(t *testing.T) { - dump, err := parseDump(mdsPerfDump) + c := &Ceph{Log: testutil.Logger{}} + dump, err := c.parseDump(mdsPerfDump) require.NoError(t, err) require.InEpsilon(t, 2408386.600934982, dump["mds"]["reply_latency.sum"], epsilon) require.Equal(t, float64(0), dump["throttle-write_buf_throttle"]["wait.avgcount"]) } func TestParseRgwDump(t *testing.T) { - dump, err := parseDump(rgwPerfDump) + c := &Ceph{Log: testutil.Logger{}} + dump, err := c.parseDump(rgwPerfDump) require.NoError(t, err) require.InEpsilon(t, 0.002219876, dump["rgw"]["get_initial_lat.sum"], epsilon) require.Equal(t, float64(0), dump["rgw"]["put_initial_lat.avgcount"]) diff --git a/plugins/inputs/cgroup/README.md b/plugins/inputs/cgroup/README.md index 6982517bc5879..3b755bbd8790d 100644 --- a/plugins/inputs/cgroup/README.md +++ b/plugins/inputs/cgroup/README.md @@ -44,12 +44,19 @@ All measurements have the following tags: ### Configuration: ```toml +# Read specific statistics per cgroup # [[inputs.cgroup]] + ## Directories in which to look for files, globs are supported. + ## Consider restricting paths to the set of cgroups you really + ## want to monitor if you have a large number of cgroups, to avoid + ## any cardinality issues. # paths = [ - # "/sys/fs/cgroup/memory", # root cgroup - # "/sys/fs/cgroup/memory/child1", # container cgroup - # "/sys/fs/cgroup/memory/child2/*", # all children cgroups under child2, but not child2 itself + # "/sys/fs/cgroup/memory", + # "/sys/fs/cgroup/memory/child1", + # "/sys/fs/cgroup/memory/child2/*", # ] + ## cgroup stat fields, as file names, globs are supported. + ## these file names are appended to each path from above. # files = ["memory.*usage*", "memory.limit_in_bytes"] ``` diff --git a/plugins/inputs/cgroup/cgroup_linux.go b/plugins/inputs/cgroup/cgroup_linux.go index bb38525b7a8f5..6ecfd255a06b7 100644 --- a/plugins/inputs/cgroup/cgroup_linux.go +++ b/plugins/inputs/cgroup/cgroup_linux.go @@ -25,7 +25,7 @@ func (g *CGroup) Gather(acc telegraf.Accumulator) error { acc.AddError(dir.err) continue } - if err := g.gatherDir(dir.path, acc); err != nil { + if err := g.gatherDir(acc, dir.path); err != nil { acc.AddError(err) } } @@ -33,7 +33,7 @@ func (g *CGroup) Gather(acc telegraf.Accumulator) error { return nil } -func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error { +func (g *CGroup) gatherDir(acc telegraf.Accumulator, dir string) error { fields := make(map[string]interface{}) list := make(chan pathInfo) @@ -72,8 +72,8 @@ type pathInfo struct { err error } -func isDir(path string) (bool, error) { - result, err := os.Stat(path) +func isDir(pathToCheck string) (bool, error) { + result, err := os.Stat(pathToCheck) if err != nil { return false, err } diff --git a/plugins/inputs/chrony/chrony_test.go b/plugins/inputs/chrony/chrony_test.go index 60cb69da79933..01f5f458dd738 100644 --- a/plugins/inputs/chrony/chrony_test.go +++ b/plugins/inputs/chrony/chrony_test.go @@ -51,7 +51,7 @@ func TestGather(t *testing.T) { acc.AssertContainsTaggedFields(t, "chrony", fields, tags) } -// fackeExecCommand is a helper function that mock +// fakeExecCommand is a helper function that mock // the exec.Command call (and call the test binary) func fakeExecCommand(command string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcess", "--", command} @@ -103,7 +103,9 @@ Leap status : Not synchronized } else { //nolint:errcheck,revive // test will fail anyway fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) } + //nolint:revive // error code is important for this "test" os.Exit(0) } diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index 20c5362b3e692..10f1f764c0515 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -15,15 +15,16 @@ import ( dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" - "github.com/golang/protobuf/proto" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" - internaltls "github.com/influxdata/telegraf/plugins/common/tls" - "github.com/influxdata/telegraf/plugins/inputs" + "github.com/golang/protobuf/proto" //nolint:staticcheck // Cannot switch to "google.golang.org/protobuf/proto", "github.com/golang/protobuf/proto" is used by "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "google.golang.org/grpc" "google.golang.org/grpc/credentials" _ "google.golang.org/grpc/encoding/gzip" // Register GRPC gzip decoder to support compressed telemetry "google.golang.org/grpc/peer" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + internaltls "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" ) const ( @@ -51,15 +52,15 @@ type CiscoTelemetryMDT struct { listener net.Listener // Internal state - aliases map[string]string - dmesFuncs map[string]string - warned map[string]struct{} - extraTags map[string]map[string]struct{} - nxpathMap map[string]map[string]string //per path map - propMap map[string]func(field *telemetry.TelemetryField, value interface{}) interface{} - mutex sync.Mutex - acc telegraf.Accumulator - wg sync.WaitGroup + internalAliases map[string]string + dmesFuncs map[string]string + warned map[string]struct{} + extraTags map[string]map[string]struct{} + nxpathMap map[string]map[string]string //per path map + propMap map[string]func(field *telemetry.TelemetryField, value interface{}) interface{} + mutex sync.Mutex + acc telegraf.Accumulator + wg sync.WaitGroup } type NxPayloadXfromStructure struct { @@ -87,9 +88,9 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { // Invert aliases list c.warned = make(map[string]struct{}) - c.aliases = make(map[string]string, len(c.Aliases)) + c.internalAliases = make(map[string]string, len(c.Aliases)) for alias, encodingPath := range c.Aliases { - c.aliases[encodingPath] = alias + c.internalAliases[encodingPath] = alias } c.initDb() @@ -276,9 +277,9 @@ func (c *CiscoTelemetryMDT) handleTCPClient(conn net.Conn) error { // MdtDialout RPC server method for grpc-dialout transport func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutServer) error { - peer, peerOK := peer.FromContext(stream.Context()) + peerInCtx, peerOK := peer.FromContext(stream.Context()) if peerOK { - c.Log.Debugf("Accepted Cisco MDT GRPC dialout connection from %s", peer.Addr) + c.Log.Debugf("Accepted Cisco MDT GRPC dialout connection from %s", peerInCtx.Addr) } var chunkBuffer bytes.Buffer @@ -314,7 +315,7 @@ func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutS } if peerOK { - c.Log.Debugf("Closed Cisco MDT GRPC dialout connection from %s", peer.Addr) + c.Log.Debugf("Closed Cisco MDT GRPC dialout connection from %s", peerInCtx.Addr) } return nil @@ -375,8 +376,8 @@ func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) { } } - for _, metric := range grouper.Metrics() { - c.acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + c.acc.AddMetric(groupedMetric) } } @@ -540,7 +541,7 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie if value := decodeValue(field); value != nil { // Do alias lookup, to shorten measurement names measurement := encodingPath - if alias, ok := c.aliases[encodingPath]; ok { + if alias, ok := c.internalAliases[encodingPath]; ok { measurement = alias } else { c.mutex.Lock() diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index 69b2fd1159637..745b26dea4b20 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -9,11 +9,12 @@ import ( "testing" dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" - telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" - "github.com/golang/protobuf/proto" - "github.com/influxdata/telegraf/testutil" + telemetryBis "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" + "github.com/golang/protobuf/proto" //nolint:staticcheck // Cannot switch to "google.golang.org/protobuf/proto", "github.com/golang/protobuf/proto" is used by "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "github.com/stretchr/testify/require" "google.golang.org/grpc" + + "github.com/influxdata/telegraf/testutil" ) func TestHandleTelemetryTwoSimple(t *testing.T) { @@ -23,55 +24,55 @@ func TestHandleTelemetryTwoSimple(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/some/path", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str"}, }, { Name: "uint64", - ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 1234}, + ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 1234}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "bool", - ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: true}, + ValueByType: &telemetryBis.TelemetryField_BoolValue{BoolValue: true}, }, }, }, }, }, { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str2"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str2"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "bool", - ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: false}, + ValueByType: &telemetryBis.TelemetryField_BoolValue{BoolValue: false}, }, }, }, @@ -101,26 +102,26 @@ func TestHandleTelemetrySingleNested(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/nested/path", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "nested", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "key", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "level", - ValueByType: &telemetry.TelemetryField_DoubleValue{DoubleValue: 3}, + ValueByType: &telemetryBis.TelemetryField_DoubleValue{DoubleValue: 3}, }, }, }, @@ -130,16 +131,16 @@ func TestHandleTelemetrySingleNested(t *testing.T) { }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "nested", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "value", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, @@ -169,49 +170,49 @@ func TestHandleEmbeddedTags(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/extra", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "list", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry1"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "entry1"}, }, { Name: "test", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, { Name: "list", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry2"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "entry2"}, }, { Name: "test", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, @@ -242,57 +243,57 @@ func TestHandleNXAPI(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "show nxapi", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "TABLE_nxapi", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "ROW_nxapi", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "index", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i1"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i1"}, }, { Name: "value", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "index", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i2"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i2"}, }, { Name: "value", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, @@ -331,45 +332,45 @@ func TestHandleNXAPIXformNXAPI(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "show processes cpu", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "TABLE_process_cpu", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "ROW_process_cpu", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "index", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i1"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i1"}, }, { Name: "value", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, @@ -405,57 +406,57 @@ func TestHandleNXXformMulti(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "sys/lldp", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "fooEntity", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "attributes", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "rn", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "some-rn"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "some-rn"}, }, { Name: "portIdV", - ValueByType: &telemetry.TelemetryField_Uint32Value{Uint32Value: 12}, + ValueByType: &telemetryBis.TelemetryField_Uint32Value{Uint32Value: 12}, }, { Name: "portDesc", - ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 100}, + ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 100}, }, { Name: "test", - ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 281474976710655}, + ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 281474976710655}, }, { Name: "subscriptionId", - ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 2814749767106551}, + ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 2814749767106551}, }, }, }, @@ -490,45 +491,45 @@ func TestHandleNXDME(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "sys/dme", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "fooEntity", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "attributes", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "rn", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "some-rn"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "some-rn"}, }, { Name: "value", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, @@ -584,30 +585,30 @@ func TestTCPDialoutOverflow(t *testing.T) { require.Contains(t, acc.Errors, errors.New("dialout packet too long: 1000000000")) } -func mockTelemetryMessage() *telemetry.Telemetry { - return &telemetry.Telemetry{ +func mockTelemetryMessage() *telemetryBis.Telemetry { + return &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/some/path", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "value", - ValueByType: &telemetry.TelemetryField_Sint64Value{Sint64Value: -1}, + ValueByType: &telemetryBis.TelemetryField_Sint64Value{Sint64Value: -1}, }, }, }, diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go index e9fb4efe04103..8f6ea93eab4b3 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go @@ -139,6 +139,7 @@ func (c *CiscoTelemetryMDT) nxosValueXform(field *telemetry.TelemetryField, valu return nil //Xformation supported is only from String case "float": + //nolint:revive // switch needed for `.(type)` switch val := field.ValueByType.(type) { case *telemetry.TelemetryField_StringValue: if valf, err := strconv.ParseFloat(val.StringValue, 64); err == nil { diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go index 4e87431c0b032..3a46390b4f7dc 100644 --- a/plugins/inputs/clickhouse/clickhouse.go +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -24,7 +24,7 @@ var defaultTimeout = 5 * time.Second var sampleConfig = ` ## Username for authorization on ClickHouse server - ## example: username = "default"" + ## example: username = "default" username = "default" ## Password for authorization on ClickHouse server @@ -560,11 +560,11 @@ func (e *clickhouseError) Error() string { return fmt.Sprintf("received error code %d: %s", e.StatusCode, e.body) } -func (ch *ClickHouse) execQuery(url *url.URL, query string, i interface{}) error { - q := url.Query() +func (ch *ClickHouse) execQuery(address *url.URL, query string, i interface{}) error { + q := address.Query() q.Set("query", query+" FORMAT JSON") - url.RawQuery = q.Encode() - req, _ := http.NewRequest("GET", url.String(), nil) + address.RawQuery = q.Encode() + req, _ := http.NewRequest("GET", address.String(), nil) if ch.Username != "" { req.Header.Add("X-ClickHouse-User", ch.Username) } diff --git a/plugins/inputs/clickhouse/clickhouse_test.go b/plugins/inputs/clickhouse/clickhouse_test.go index d6dcf44221252..6e308b509a9f0 100644 --- a/plugins/inputs/clickhouse/clickhouse_test.go +++ b/plugins/inputs/clickhouse/clickhouse_test.go @@ -8,28 +8,28 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func TestClusterIncludeExcludeFilter(t *testing.T) { ch := ClickHouse{} - if assert.Equal(t, "", ch.clusterIncludeExcludeFilter()) { - ch.ClusterExclude = []string{"test_cluster"} - assert.Equal(t, "WHERE cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) + require.Equal(t, "", ch.clusterIncludeExcludeFilter()) + ch.ClusterExclude = []string{"test_cluster"} + require.Equal(t, "WHERE cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) - ch.ClusterExclude = []string{"test_cluster"} - ch.ClusterInclude = []string{"cluster"} - assert.Equal(t, "WHERE cluster IN ('cluster') OR cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) + ch.ClusterExclude = []string{"test_cluster"} + ch.ClusterInclude = []string{"cluster"} + require.Equal(t, "WHERE cluster IN ('cluster') OR cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) - ch.ClusterExclude = []string{} - ch.ClusterInclude = []string{"cluster1", "cluster2"} - assert.Equal(t, "WHERE cluster IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) + ch.ClusterExclude = []string{} + ch.ClusterInclude = []string{"cluster1", "cluster2"} + require.Equal(t, "WHERE cluster IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) - ch.ClusterExclude = []string{"cluster1", "cluster2"} - ch.ClusterInclude = []string{} - assert.Equal(t, "WHERE cluster NOT IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) - } + ch.ClusterExclude = []string{"cluster1", "cluster2"} + ch.ClusterInclude = []string{} + require.Equal(t, "WHERE cluster NOT IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) } func TestChInt64(t *testing.T) { @@ -42,9 +42,9 @@ func TestChInt64(t *testing.T) { } for src, expected := range assets { var v chUInt64 - if err := v.UnmarshalJSON([]byte(src)); assert.NoError(t, err) { - assert.Equal(t, expected, uint64(v)) - } + err := v.UnmarshalJSON([]byte(src)) + require.NoError(t, err) + require.Equal(t, expected, uint64(v)) } } @@ -74,7 +74,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "system.events"): err := enc.Encode(result{ Data: []struct { @@ -91,7 +91,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "system.metrics"): err := enc.Encode(result{ Data: []struct { @@ -108,7 +108,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "system.asynchronous_metrics"): err := enc.Encode(result{ Data: []struct { @@ -125,7 +125,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "zk_exists"): err := enc.Encode(result{ Data: []struct { @@ -136,7 +136,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "zk_root_nodes"): err := enc.Encode(result{ Data: []struct { @@ -147,7 +147,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "replication_queue_exists"): err := enc.Encode(result{ Data: []struct { @@ -158,7 +158,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "replication_too_many_tries_replicas"): err := enc.Encode(result{ Data: []struct { @@ -171,7 +171,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "system.detached_parts"): err := enc.Encode(result{ Data: []struct { @@ -182,7 +182,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "system.dictionaries"): err := enc.Encode(result{ Data: []struct { @@ -197,7 +197,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "system.mutations"): err := enc.Encode(result{ Data: []struct { @@ -212,7 +212,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "system.disks"): err := enc.Encode(result{ Data: []struct { @@ -229,7 +229,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "system.processes"): err := enc.Encode(result{ Data: []struct { @@ -258,7 +258,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "text_log_exists"): err := enc.Encode(result{ Data: []struct { @@ -269,7 +269,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "system.text_log"): err := enc.Encode(result{ Data: []struct { @@ -298,7 +298,7 @@ func TestGather(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) } })) ch = &ClickHouse{ @@ -309,7 +309,7 @@ func TestGather(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - assert.NoError(t, ch.Gather(acc)) + require.NoError(t, ch.Gather(acc)) acc.AssertContainsTaggedFields(t, "clickhouse_tables", map[string]interface{}{ @@ -451,7 +451,7 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "replication_queue_exists"): err := enc.Encode(result{ Data: []struct { @@ -462,7 +462,7 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) case strings.Contains(query, "text_log_exists"): err := enc.Encode(result{ Data: []struct { @@ -473,7 +473,7 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) } })) ch = &ClickHouse{ @@ -485,7 +485,7 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - assert.NoError(t, ch.Gather(acc)) + require.NoError(t, ch.Gather(acc)) acc.AssertDoesNotContainMeasurement(t, "clickhouse_zookeeper") acc.AssertDoesNotContainMeasurement(t, "clickhouse_replication_queue") @@ -503,7 +503,7 @@ func TestWrongJSONMarshalling(t *testing.T) { err := enc.Encode(result{ Data: []struct{}{}, }) - assert.NoError(t, err) + require.NoError(t, err) })) ch = &ClickHouse{ Servers: []string{ @@ -514,9 +514,9 @@ func TestWrongJSONMarshalling(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - assert.NoError(t, ch.Gather(acc)) + require.NoError(t, ch.Gather(acc)) - assert.Equal(t, 0, len(acc.Metrics)) + require.Equal(t, 0, len(acc.Metrics)) allMeasurements := []string{ "clickhouse_events", "clickhouse_metrics", @@ -531,7 +531,7 @@ func TestWrongJSONMarshalling(t *testing.T) { "clickhouse_processes", "clickhouse_text_log", } - assert.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) + require.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) } func TestOfflineServer(t *testing.T) { @@ -547,9 +547,9 @@ func TestOfflineServer(t *testing.T) { }, } ) - assert.NoError(t, ch.Gather(acc)) + require.NoError(t, ch.Gather(acc)) - assert.Equal(t, 0, len(acc.Metrics)) + require.Equal(t, 0, len(acc.Metrics)) allMeasurements := []string{ "clickhouse_events", "clickhouse_metrics", @@ -564,7 +564,7 @@ func TestOfflineServer(t *testing.T) { "clickhouse_processes", "clickhouse_text_log", } - assert.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) + require.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) } func TestAutoDiscovery(t *testing.T) { @@ -574,8 +574,8 @@ func TestAutoDiscovery(t *testing.T) { Data interface{} `json:"data"` } enc := json.NewEncoder(w) - switch query := r.URL.Query().Get("query"); { - case strings.Contains(query, "system.clusters"): + query := r.URL.Query().Get("query") + if strings.Contains(query, "system.clusters") { err := enc.Encode(result{ Data: []struct { Cluster string `json:"test"` @@ -589,7 +589,7 @@ func TestAutoDiscovery(t *testing.T) { }, }, }) - assert.NoError(t, err) + require.NoError(t, err) } })) ch = &ClickHouse{ @@ -602,5 +602,5 @@ func TestAutoDiscovery(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - assert.NoError(t, ch.Gather(acc)) + require.NoError(t, ch.Gather(acc)) } diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index d7c803c8c83b9..a0c175e1ed565 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -16,6 +16,7 @@ API endpoint. In the following order the plugin will attempt to authenticate. ### Configuration: ```toml +# Pull Metric Statistics from Amazon CloudWatch [[inputs.cloudwatch]] ## Amazon Region region = "us-east-1" @@ -101,7 +102,7 @@ API endpoint. In the following order the plugin will attempt to authenticate. # # ## Dimension filters for Metric. All dimensions defined for the metric names # ## must be specified in order to retrieve the metric statistics. - # ## 'value' has wildcard / 'glob' matching support such as `p-*`. + # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. # [[inputs.cloudwatch.metrics.dimensions]] # name = "LoadBalancerName" # value = "p-example" diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 34088110ea398..c4df8f9a77961 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -10,15 +10,16 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" + cwClient "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/limiter" - "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/plugins/common/proxy" + internalMetric "github.com/influxdata/telegraf/metric" + internalProxy "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -36,7 +37,7 @@ type CloudWatch struct { StatisticInclude []string `toml:"statistic_include"` Timeout config.Duration `toml:"timeout"` - proxy.HTTPProxy + internalProxy.HTTPProxy Period config.Duration `toml:"period"` Delay config.Duration `toml:"delay"` @@ -76,12 +77,12 @@ type metricCache struct { ttl time.Duration built time.Time metrics []filteredMetric - queries []*cloudwatch.MetricDataQuery + queries []*cwClient.MetricDataQuery } type cloudwatchClient interface { - ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) - GetMetricData(*cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) + ListMetrics(*cwClient.ListMetricsInput) (*cwClient.ListMetricsOutput, error) + GetMetricData(*cwClient.GetMetricDataInput) (*cwClient.GetMetricDataOutput, error) } // SampleConfig returns the default configuration of the Cloudwatch input plugin. @@ -171,7 +172,7 @@ func (c *CloudWatch) SampleConfig() string { # # ## Dimension filters for Metric. All dimensions defined for the metric names # ## must be specified in order to retrieve the metric statistics. - # ## 'value' has wildcard / 'glob' matching support. such as 'p-*'. + # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. # [[inputs.cloudwatch.metrics.dimensions]] # name = "LoadBalancerName" # value = "p-example" @@ -223,11 +224,11 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { wg := sync.WaitGroup{} rLock := sync.Mutex{} - results := []*cloudwatch.MetricDataResult{} + results := []*cwClient.MetricDataResult{} // 500 is the maximum number of metric data queries a `GetMetricData` request can contain. batchSize := 500 - var batches [][]*cloudwatch.MetricDataQuery + var batches [][]*cwClient.MetricDataQuery for batchSize < len(queries) { queries, batches = queries[batchSize:], append(batches, queries[0:batchSize:batchSize]) @@ -237,7 +238,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { for i := range batches { wg.Add(1) <-lmtr.C - go func(inm []*cloudwatch.MetricDataQuery) { + go func(inm []*cwClient.MetricDataQuery) { defer wg.Done() result, err := c.gatherMetrics(c.getDataInputs(inm)) if err != nil { @@ -294,7 +295,7 @@ func (c *CloudWatch) initializeCloudWatch() error { } loglevel := aws.LogOff - c.client = cloudwatch.New(configProvider, cfg.WithLogLevel(loglevel)) + c.client = cwClient.New(configProvider, cfg.WithLogLevel(loglevel)) // Initialize regex matchers for each Dimension value. for _, m := range c.Metrics { @@ -312,7 +313,7 @@ func (c *CloudWatch) initializeCloudWatch() error { } type filteredMetric struct { - metrics []*cloudwatch.Metric + metrics []*cwClient.Metric statFilter filter.Filter } @@ -327,17 +328,17 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { // check for provided metric filter if c.Metrics != nil { for _, m := range c.Metrics { - metrics := []*cloudwatch.Metric{} + metrics := []*cwClient.Metric{} if !hasWildcard(m.Dimensions) { - dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions)) + dimensions := make([]*cwClient.Dimension, len(m.Dimensions)) for k, d := range m.Dimensions { - dimensions[k] = &cloudwatch.Dimension{ + dimensions[k] = &cwClient.Dimension{ Name: aws.String(d.Name), Value: aws.String(d.Value), } } for _, name := range m.MetricNames { - metrics = append(metrics, &cloudwatch.Metric{ + metrics = append(metrics, &cwClient.Metric{ Namespace: aws.String(c.Namespace), MetricName: aws.String(name), Dimensions: dimensions, @@ -351,7 +352,7 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { for _, name := range m.MetricNames { for _, metric := range allMetrics { if isSelected(name, metric, m.Dimensions) { - metrics = append(metrics, &cloudwatch.Metric{ + metrics = append(metrics, &cwClient.Metric{ Namespace: aws.String(c.Namespace), MetricName: aws.String(name), Dimensions: metric.Dimensions, @@ -399,11 +400,11 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { } // fetchNamespaceMetrics retrieves available metrics for a given CloudWatch namespace. -func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) { - metrics := []*cloudwatch.Metric{} +func (c *CloudWatch) fetchNamespaceMetrics() ([]*cwClient.Metric, error) { + metrics := []*cwClient.Metric{} var token *string - var params *cloudwatch.ListMetricsInput + var params *cwClient.ListMetricsInput var recentlyActive *string switch c.RecentlyActive { @@ -412,9 +413,9 @@ func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) { default: recentlyActive = nil } - params = &cloudwatch.ListMetricsInput{ + params = &cwClient.ListMetricsInput{ Namespace: aws.String(c.Namespace), - Dimensions: []*cloudwatch.DimensionFilter{}, + Dimensions: []*cwClient.DimensionFilter{}, NextToken: token, MetricName: nil, RecentlyActive: recentlyActive, @@ -451,75 +452,75 @@ func (c *CloudWatch) updateWindow(relativeTo time.Time) { } // getDataQueries gets all of the possible queries so we can maximize the request payload. -func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) []*cloudwatch.MetricDataQuery { +func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) []*cwClient.MetricDataQuery { if c.metricCache != nil && c.metricCache.queries != nil && c.metricCache.isValid() { return c.metricCache.queries } c.queryDimensions = map[string]*map[string]string{} - dataQueries := []*cloudwatch.MetricDataQuery{} + dataQueries := []*cwClient.MetricDataQuery{} for i, filtered := range filteredMetrics { for j, metric := range filtered.metrics { id := strconv.Itoa(j) + "_" + strconv.Itoa(i) dimension := ctod(metric.Dimensions) if filtered.statFilter.Match("average") { c.queryDimensions["average_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries = append(dataQueries, &cwClient.MetricDataQuery{ Id: aws.String("average_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_average")), - MetricStat: &cloudwatch.MetricStat{ + MetricStat: &cwClient.MetricStat{ Metric: metric, Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticAverage), + Stat: aws.String(cwClient.StatisticAverage), }, }) } if filtered.statFilter.Match("maximum") { c.queryDimensions["maximum_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries = append(dataQueries, &cwClient.MetricDataQuery{ Id: aws.String("maximum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_maximum")), - MetricStat: &cloudwatch.MetricStat{ + MetricStat: &cwClient.MetricStat{ Metric: metric, Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticMaximum), + Stat: aws.String(cwClient.StatisticMaximum), }, }) } if filtered.statFilter.Match("minimum") { c.queryDimensions["minimum_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries = append(dataQueries, &cwClient.MetricDataQuery{ Id: aws.String("minimum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_minimum")), - MetricStat: &cloudwatch.MetricStat{ + MetricStat: &cwClient.MetricStat{ Metric: metric, Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticMinimum), + Stat: aws.String(cwClient.StatisticMinimum), }, }) } if filtered.statFilter.Match("sum") { c.queryDimensions["sum_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries = append(dataQueries, &cwClient.MetricDataQuery{ Id: aws.String("sum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_sum")), - MetricStat: &cloudwatch.MetricStat{ + MetricStat: &cwClient.MetricStat{ Metric: metric, Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticSum), + Stat: aws.String(cwClient.StatisticSum), }, }) } if filtered.statFilter.Match("sample_count") { c.queryDimensions["sample_count_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries = append(dataQueries, &cwClient.MetricDataQuery{ Id: aws.String("sample_count_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_sample_count")), - MetricStat: &cloudwatch.MetricStat{ + MetricStat: &cwClient.MetricStat{ Metric: metric, Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticSampleCount), + Stat: aws.String(cwClient.StatisticSampleCount), }, }) } @@ -546,9 +547,9 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) []*cloudwa // gatherMetrics gets metric data from Cloudwatch. func (c *CloudWatch) gatherMetrics( - params *cloudwatch.GetMetricDataInput, -) ([]*cloudwatch.MetricDataResult, error) { - results := []*cloudwatch.MetricDataResult{} + params *cwClient.GetMetricDataInput, +) ([]*cwClient.MetricDataResult, error) { + results := []*cwClient.MetricDataResult{} for { resp, err := c.client.GetMetricData(params) @@ -568,10 +569,10 @@ func (c *CloudWatch) gatherMetrics( func (c *CloudWatch) aggregateMetrics( acc telegraf.Accumulator, - metricDataResults []*cloudwatch.MetricDataResult, + metricDataResults []*cwClient.MetricDataResult, ) error { var ( - grouper = metric.NewSeriesGrouper() + grouper = internalMetric.NewSeriesGrouper() namespace = sanitizeMeasurement(c.Namespace) ) @@ -626,7 +627,7 @@ func snakeCase(s string) string { } // ctod converts cloudwatch dimensions to regular dimensions. -func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string { +func ctod(cDimensions []*cwClient.Dimension) *map[string]string { dimensions := map[string]string{} for i := range cDimensions { dimensions[snakeCase(*cDimensions[i].Name)] = *cDimensions[i].Value @@ -634,8 +635,8 @@ func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string { return &dimensions } -func (c *CloudWatch) getDataInputs(dataQueries []*cloudwatch.MetricDataQuery) *cloudwatch.GetMetricDataInput { - return &cloudwatch.GetMetricDataInput{ +func (c *CloudWatch) getDataInputs(dataQueries []*cwClient.MetricDataQuery) *cwClient.GetMetricDataInput { + return &cwClient.GetMetricDataInput{ StartTime: aws.Time(c.windowStart), EndTime: aws.Time(c.windowEnd), MetricDataQueries: dataQueries, @@ -656,7 +657,7 @@ func hasWildcard(dimensions []*Dimension) bool { return false } -func isSelected(name string, metric *cloudwatch.Metric, dimensions []*Dimension) bool { +func isSelected(name string, metric *cwClient.Metric, dimensions []*Dimension) bool { if name != *metric.MetricName { return false } diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 158f29a1bc26a..ccd27ec22fd1d 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -6,8 +6,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/stretchr/testify/assert" + cwClient "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" @@ -18,13 +17,13 @@ import ( type mockGatherCloudWatchClient struct{} -func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { - return &cloudwatch.ListMetricsOutput{ - Metrics: []*cloudwatch.Metric{ +func (m *mockGatherCloudWatchClient) ListMetrics(params *cwClient.ListMetricsInput) (*cwClient.ListMetricsOutput, error) { + return &cwClient.ListMetricsOutput{ + Metrics: []*cwClient.Metric{ { Namespace: params.Namespace, MetricName: aws.String("Latency"), - Dimensions: []*cloudwatch.Dimension{ + Dimensions: []*cwClient.Dimension{ { Name: aws.String("LoadBalancerName"), Value: aws.String("p-example"), @@ -35,9 +34,9 @@ func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsI }, nil } -func (m *mockGatherCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) { - return &cloudwatch.GetMetricDataOutput{ - MetricDataResults: []*cloudwatch.MetricDataResult{ +func (m *mockGatherCloudWatchClient) GetMetricData(params *cwClient.GetMetricDataInput) (*cwClient.GetMetricDataOutput, error) { + return &cwClient.GetMetricDataOutput{ + MetricDataResults: []*cwClient.MetricDataResult{ { Id: aws.String("minimum_0_0"), Label: aws.String("latency_minimum"), @@ -98,8 +97,8 @@ func (m *mockGatherCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricD } func TestSnakeCase(t *testing.T) { - assert.Equal(t, "cluster_name", snakeCase("Cluster Name")) - assert.Equal(t, "broker_id", snakeCase("Broker ID")) + require.Equal(t, "cluster_name", snakeCase("Cluster Name")) + require.Equal(t, "broker_id", snakeCase("Broker ID")) } func TestGather(t *testing.T) { @@ -116,7 +115,7 @@ func TestGather(t *testing.T) { var acc testutil.Accumulator c.client = &mockGatherCloudWatchClient{} - assert.NoError(t, acc.GatherError(c.Gather)) + require.NoError(t, acc.GatherError(c.Gather)) fields := map[string]interface{}{} fields["latency_minimum"] = 0.1 @@ -129,14 +128,14 @@ func TestGather(t *testing.T) { tags["region"] = "us-east-1" tags["load_balancer_name"] = "p-example" - assert.True(t, acc.HasMeasurement("cloudwatch_aws_elb")) + require.True(t, acc.HasMeasurement("cloudwatch_aws_elb")) acc.AssertContainsTaggedFields(t, "cloudwatch_aws_elb", fields, tags) } type mockSelectMetricsCloudWatchClient struct{} -func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { - metrics := []*cloudwatch.Metric{} +func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ *cwClient.ListMetricsInput) (*cwClient.ListMetricsOutput, error) { + metrics := []*cwClient.Metric{} // 4 metrics are available metricNames := []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"} // for 3 ELBs @@ -146,10 +145,10 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ *cloudwatch.ListMetric for _, m := range metricNames { for _, lb := range loadBalancers { // For each metric/ELB pair, we get an aggregate value across all AZs. - metrics = append(metrics, &cloudwatch.Metric{ + metrics = append(metrics, &cwClient.Metric{ Namespace: aws.String("AWS/ELB"), MetricName: aws.String(m), - Dimensions: []*cloudwatch.Dimension{ + Dimensions: []*cwClient.Dimension{ { Name: aws.String("LoadBalancerName"), Value: aws.String(lb), @@ -158,10 +157,10 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ *cloudwatch.ListMetric }) for _, az := range availabilityZones { // We get a metric for each metric/ELB/AZ triplet. - metrics = append(metrics, &cloudwatch.Metric{ + metrics = append(metrics, &cwClient.Metric{ Namespace: aws.String("AWS/ELB"), MetricName: aws.String(m), - Dimensions: []*cloudwatch.Dimension{ + Dimensions: []*cwClient.Dimension{ { Name: aws.String("LoadBalancerName"), Value: aws.String(lb), @@ -176,13 +175,13 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ *cloudwatch.ListMetric } } - result := &cloudwatch.ListMetricsOutput{ + result := &cwClient.ListMetricsOutput{ Metrics: metrics, } return result, nil } -func (m *mockSelectMetricsCloudWatchClient) GetMetricData(_ *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) { +func (m *mockSelectMetricsCloudWatchClient) GetMetricData(_ *cwClient.GetMetricDataInput) (*cwClient.GetMetricDataOutput, error) { return nil, nil } @@ -212,24 +211,24 @@ func TestSelectMetrics(t *testing.T) { }, } err := c.initializeCloudWatch() - assert.NoError(t, err) + require.NoError(t, err) c.client = &mockSelectMetricsCloudWatchClient{} filtered, err := getFilteredMetrics(c) // We've asked for 2 (out of 4) metrics, over all 3 load balancers in all 2 // AZs. We should get 12 metrics. - assert.Equal(t, 12, len(filtered[0].metrics)) - assert.NoError(t, err) + require.Equal(t, 12, len(filtered[0].metrics)) + require.NoError(t, err) } func TestGenerateStatisticsInputParams(t *testing.T) { - d := &cloudwatch.Dimension{ + d := &cwClient.Dimension{ Name: aws.String("LoadBalancerName"), Value: aws.String("p-example"), } - m := &cloudwatch.Metric{ + m := &cwClient.Metric{ MetricName: aws.String("Latency"), - Dimensions: []*cloudwatch.Dimension{d}, + Dimensions: []*cwClient.Dimension{d}, } duration, _ := time.ParseDuration("1m") @@ -248,25 +247,25 @@ func TestGenerateStatisticsInputParams(t *testing.T) { c.updateWindow(now) statFilter, _ := filter.NewIncludeExcludeFilter(nil, nil) - queries := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) + queries := c.getDataQueries([]filteredMetric{{metrics: []*cwClient.Metric{m}, statFilter: statFilter}}) params := c.getDataInputs(queries) - assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) - assert.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) + require.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) + require.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) require.Len(t, params.MetricDataQueries, 5) - assert.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) - assert.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) + require.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) + require.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) } func TestGenerateStatisticsInputParamsFiltered(t *testing.T) { - d := &cloudwatch.Dimension{ + d := &cwClient.Dimension{ Name: aws.String("LoadBalancerName"), Value: aws.String("p-example"), } - m := &cloudwatch.Metric{ + m := &cwClient.Metric{ MetricName: aws.String("Latency"), - Dimensions: []*cloudwatch.Dimension{d}, + Dimensions: []*cwClient.Dimension{d}, } duration, _ := time.ParseDuration("1m") @@ -285,14 +284,14 @@ func TestGenerateStatisticsInputParamsFiltered(t *testing.T) { c.updateWindow(now) statFilter, _ := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil) - queries := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) + queries := c.getDataQueries([]filteredMetric{{metrics: []*cwClient.Metric{m}, statFilter: statFilter}}) params := c.getDataInputs(queries) - assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) - assert.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) + require.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) + require.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) require.Len(t, params.MetricDataQueries, 2) - assert.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) - assert.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) + require.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) + require.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) } func TestMetricsCacheTimeout(t *testing.T) { @@ -302,9 +301,9 @@ func TestMetricsCacheTimeout(t *testing.T) { ttl: time.Minute, } - assert.True(t, cache.isValid()) + require.True(t, cache.isValid()) cache.built = time.Now().Add(-time.Minute) - assert.False(t, cache.isValid()) + require.False(t, cache.isValid()) } func TestUpdateWindow(t *testing.T) { @@ -319,23 +318,23 @@ func TestUpdateWindow(t *testing.T) { now := time.Now() - assert.True(t, c.windowEnd.IsZero()) - assert.True(t, c.windowStart.IsZero()) + require.True(t, c.windowEnd.IsZero()) + require.True(t, c.windowStart.IsZero()) c.updateWindow(now) newStartTime := c.windowEnd // initial window just has a single period - assert.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay))) - assert.EqualValues(t, c.windowStart, now.Add(-time.Duration(c.Delay)).Add(-time.Duration(c.Period))) + require.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay))) + require.EqualValues(t, c.windowStart, now.Add(-time.Duration(c.Delay)).Add(-time.Duration(c.Period))) now = time.Now() c.updateWindow(now) // subsequent window uses previous end time as start time - assert.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay))) - assert.EqualValues(t, c.windowStart, newStartTime) + require.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay))) + require.EqualValues(t, c.windowStart, newStartTime) } func TestProxyFunction(t *testing.T) { diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index ef66cb8d1d053..e89393ee82316 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -7,7 +7,8 @@ import ( "sync" "time" - couchbase "github.com/couchbase/go-couchbase" + couchbaseClient "github.com/couchbase/go-couchbase" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" @@ -33,7 +34,7 @@ var sampleConfig = ` ## If no port is specified, 8091 is used. servers = ["http://localhost:8091"] - ## Filter fields to include only here. + ## Filter bucket fields to include only here. # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] ` @@ -45,14 +46,14 @@ func (cb *Couchbase) SampleConfig() string { } func (cb *Couchbase) Description() string { - return "Read metrics from one or many couchbase clusters" + return "Read per-node and per-bucket metrics from Couchbase" } // Reads stats from all configured clusters. Accumulates stats. // Returns one of the errors encountered while gathering stats (if any). func (cb *Couchbase) Gather(acc telegraf.Accumulator) error { if len(cb.Servers) == 0 { - return cb.gatherServer("http://localhost:8091/", acc, nil) + return cb.gatherServer(acc, "http://localhost:8091/", nil) } var wg sync.WaitGroup @@ -60,7 +61,7 @@ func (cb *Couchbase) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(serv string) { defer wg.Done() - acc.AddError(cb.gatherServer(serv, acc, nil)) + acc.AddError(cb.gatherServer(acc, serv, nil)) }(serv) } @@ -69,9 +70,9 @@ func (cb *Couchbase) Gather(acc telegraf.Accumulator) error { return nil } -func (cb *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error { +func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string, pool *couchbaseClient.Pool) error { if pool == nil { - client, err := couchbase.Connect(addr) + client, err := couchbaseClient.Connect(addr) if err != nil { return err } diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go index 25728544c6a97..3b927e8c4f8e9 100644 --- a/plugins/inputs/couchbase/couchbase_test.go +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -43,7 +43,7 @@ func TestGatherServer(t *testing.T) { require.NoError(t, err) var acc testutil.Accumulator - err = cb.gatherServer(fakeServer.URL, &acc, &pool) + err = cb.gatherServer(&acc, fakeServer.URL, &pool) require.NoError(t, err) acc.AssertContainsTaggedFields(t, "couchbase_node", map[string]interface{}{"memory_free": 23181365248.0, "memory_total": 64424656896.0}, diff --git a/plugins/inputs/cpu/README.md b/plugins/inputs/cpu/README.md index bc86ae898021c..8e2ef66f92451 100644 --- a/plugins/inputs/cpu/README.md +++ b/plugins/inputs/cpu/README.md @@ -4,14 +4,15 @@ The `cpu` plugin gather metrics on the system CPUs. #### Configuration ```toml +# Read metrics about cpu usage [[inputs.cpu]] ## Whether to report per-cpu stats or not percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## If true, collect raw CPU time metrics. + ## If true, collect raw CPU time metrics collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. + ## If true, compute and report the sum of all non-idle CPU states report_active = false ``` diff --git a/plugins/inputs/cpu/cpu.go b/plugins/inputs/cpu/cpu.go index 3fcdb3db4136e..9e795c82a589d 100644 --- a/plugins/inputs/cpu/cpu.go +++ b/plugins/inputs/cpu/cpu.go @@ -4,15 +4,16 @@ import ( "fmt" "time" + cpuUtil "github.com/shirou/gopsutil/cpu" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/system" - "github.com/shirou/gopsutil/cpu" ) type CPUStats struct { ps system.PS - lastStats map[string]cpu.TimesStat + lastStats map[string]cpuUtil.TimesStat PerCPU bool `toml:"percpu"` TotalCPU bool `toml:"totalcpu"` @@ -123,7 +124,7 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error { acc.AddGauge("cpu", fieldsG, tags, now) } - c.lastStats = make(map[string]cpu.TimesStat) + c.lastStats = make(map[string]cpuUtil.TimesStat) for _, cts := range times { c.lastStats[cts.CPU] = cts } @@ -131,12 +132,12 @@ func (c *CPUStats) Gather(acc telegraf.Accumulator) error { return err } -func totalCPUTime(t cpu.TimesStat) float64 { +func totalCPUTime(t cpuUtil.TimesStat) float64 { total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + t.Idle return total } -func activeCPUTime(t cpu.TimesStat) float64 { +func activeCPUTime(t cpuUtil.TimesStat) float64 { active := totalCPUTime(t) - t.Idle return active } diff --git a/plugins/inputs/cpu/cpu_test.go b/plugins/inputs/cpu/cpu_test.go index d3849a5198038..e51660a0adee6 100644 --- a/plugins/inputs/cpu/cpu_test.go +++ b/plugins/inputs/cpu/cpu_test.go @@ -4,11 +4,11 @@ import ( "fmt" "testing" + cpuUtil "github.com/shirou/gopsutil/cpu" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/cpu" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestCPUStats(t *testing.T) { @@ -16,7 +16,7 @@ func TestCPUStats(t *testing.T) { defer mps.AssertExpectations(t) var acc testutil.Accumulator - cts := cpu.TimesStat{ + cts := cpuUtil.TimesStat{ CPU: "cpu0", User: 8.8, System: 8.2, @@ -30,7 +30,7 @@ func TestCPUStats(t *testing.T) { GuestNice: 0.324, } - cts2 := cpu.TimesStat{ + cts2 := cpuUtil.TimesStat{ CPU: "cpu0", User: 24.9, // increased by 16.1 System: 10.9, // increased by 2.7 @@ -44,7 +44,7 @@ func TestCPUStats(t *testing.T) { GuestNice: 2.524, // increased by 2.2 } - mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil) + mps.On("CPUTimes").Return([]cpuUtil.TimesStat{cts}, nil) cs := NewCPUStats(&mps) @@ -66,7 +66,7 @@ func TestCPUStats(t *testing.T) { assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0) mps2 := system.MockPS{} - mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) + mps2.On("CPUTimes").Return([]cpuUtil.TimesStat{cts2}, nil) cs.ps = &mps2 // Should have added cpu percentages too @@ -131,8 +131,7 @@ func assertContainsTaggedFloat( return } } else { - assert.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", - measurement)) + require.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", measurement)) } } } @@ -141,7 +140,7 @@ func assertContainsTaggedFloat( msg := fmt.Sprintf( "Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f", measurement, delta, expectedValue, actualValue) - assert.Fail(t, msg) + require.Fail(t, msg) } // TestCPUCountChange tests that no errors are encountered if the number of @@ -155,7 +154,7 @@ func TestCPUCountIncrease(t *testing.T) { cs := NewCPUStats(&mps) mps.On("CPUTimes").Return( - []cpu.TimesStat{ + []cpuUtil.TimesStat{ { CPU: "cpu0", }, @@ -165,7 +164,7 @@ func TestCPUCountIncrease(t *testing.T) { require.NoError(t, err) mps2.On("CPUTimes").Return( - []cpu.TimesStat{ + []cpuUtil.TimesStat{ { CPU: "cpu0", }, @@ -186,28 +185,28 @@ func TestCPUTimesDecrease(t *testing.T) { defer mps.AssertExpectations(t) var acc testutil.Accumulator - cts := cpu.TimesStat{ + cts := cpuUtil.TimesStat{ CPU: "cpu0", User: 18, Idle: 80, Iowait: 2, } - cts2 := cpu.TimesStat{ + cts2 := cpuUtil.TimesStat{ CPU: "cpu0", User: 38, // increased by 20 Idle: 40, // decreased by 40 Iowait: 1, // decreased by 1 } - cts3 := cpu.TimesStat{ + cts3 := cpuUtil.TimesStat{ CPU: "cpu0", User: 56, // increased by 18 Idle: 120, // increased by 80 Iowait: 3, // increased by 2 } - mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil) + mps.On("CPUTimes").Return([]cpuUtil.TimesStat{cts}, nil) cs := NewCPUStats(&mps) @@ -221,7 +220,7 @@ func TestCPUTimesDecrease(t *testing.T) { assertContainsTaggedFloat(t, &acc, "time_iowait", 2, 0) mps2 := system.MockPS{} - mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) + mps2.On("CPUTimes").Return([]cpuUtil.TimesStat{cts2}, nil) cs.ps = &mps2 // CPU times decreased. An error should be raised @@ -229,7 +228,7 @@ func TestCPUTimesDecrease(t *testing.T) { require.Error(t, err) mps3 := system.MockPS{} - mps3.On("CPUTimes").Return([]cpu.TimesStat{cts3}, nil) + mps3.On("CPUTimes").Return([]cpuUtil.TimesStat{cts3}, nil) cs.ps = &mps3 err = cs.Gather(&acc) diff --git a/plugins/inputs/csgo/README.md b/plugins/inputs/csgo/README.md index dbf3f3fdf54e9..b335509400426 100644 --- a/plugins/inputs/csgo/README.md +++ b/plugins/inputs/csgo/README.md @@ -4,6 +4,7 @@ The `csgo` plugin gather metrics from Counter-Strike: Global Offensive servers. #### Configuration ```toml +# Fetch metrics from a CSGO SRCDS [[inputs.csgo]] ## Specify servers using the following format: ## servers = [ diff --git a/plugins/inputs/csgo/csgo.go b/plugins/inputs/csgo/csgo.go index 75cf8a9240099..59d1110ad08a5 100644 --- a/plugins/inputs/csgo/csgo.go +++ b/plugins/inputs/csgo/csgo.go @@ -8,9 +8,10 @@ import ( "sync" "time" + "github.com/james4k/rcon" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/james4k/rcon" ) type statsData struct { @@ -30,7 +31,7 @@ type CSGO struct { Servers [][]string `toml:"servers"` } -func (_ *CSGO) Description() string { +func (*CSGO) Description() string { return "Fetch metrics from a CSGO SRCDS" } @@ -45,7 +46,7 @@ var sampleConfig = ` servers = [] ` -func (_ *CSGO) SampleConfig() string { +func (*CSGO) SampleConfig() string { return sampleConfig } @@ -57,7 +58,7 @@ func (s *CSGO) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(ss []string) { defer wg.Done() - acc.AddError(s.gatherServer(ss, requestServer, acc)) + acc.AddError(s.gatherServer(acc, ss, requestServer)) }(server) } @@ -72,9 +73,9 @@ func init() { } func (s *CSGO) gatherServer( + acc telegraf.Accumulator, server []string, request func(string, string) (string, error), - acc telegraf.Accumulator, ) error { if len(server) != 2 { return errors.New("incorrect server config") diff --git a/plugins/inputs/csgo/csgo_test.go b/plugins/inputs/csgo/csgo_test.go index 311e4b2b69bf0..b1d1c9b693814 100644 --- a/plugins/inputs/csgo/csgo_test.go +++ b/plugins/inputs/csgo/csgo_test.go @@ -19,7 +19,7 @@ var ( func TestCPUStats(t *testing.T) { c := NewCSGOStats() var acc testutil.Accumulator - err := c.gatherServer(c.Servers[0], requestMock, &acc) + err := c.gatherServer(&acc, c.Servers[0], requestMock) if err != nil { t.Error(err) } From d181b4338f4182833c02f9aac6f51b7b401355b1 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Wed, 28 Apr 2021 09:43:19 -0400 Subject: [PATCH 401/761] Fix apcupsd 'ALARMDEL' bug via forked repo (#9195) * add oauth2 to http input * switch APC UPSD dependency to forked version with bug fixed. * fix branch discrepancies * fix branch discrepancies * fix branch discrepancies * fix branch discrepancies * fork to influxdata repo --- go.mod | 3 +++ go.sum | 4 ++-- plugins/inputs/apcupsd/apcupsd_test.go | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 8be560c9fa41c..188758ece1805 100644 --- a/go.mod +++ b/go.mod @@ -153,3 +153,6 @@ require ( // replaced due to https://github.com/satori/go.uuid/issues/73 replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible + +// replaced due to https//github.com/mdlayher/apcupsd/issues/10 +replace github.com/mdlayher/apcupsd => github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e diff --git a/go.sum b/go.sum index d7260c3190e85..95651ec1d48b3 100644 --- a/go.sum +++ b/go.sum @@ -661,6 +661,8 @@ github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmK github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e h1:3J1OB4RDKwXs5l8uEV6BP/tucOJOPDQysiT7/9cuXzA= +github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s= github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo= @@ -803,8 +805,6 @@ github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b h1:Kcr+kPbkWZHFHXwl87quXUAmavS4/IMgu2zck3aiE7k= -github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= diff --git a/plugins/inputs/apcupsd/apcupsd_test.go b/plugins/inputs/apcupsd/apcupsd_test.go index d2baca29646a9..f21c5a4c4ce94 100644 --- a/plugins/inputs/apcupsd/apcupsd_test.go +++ b/plugins/inputs/apcupsd/apcupsd_test.go @@ -208,6 +208,7 @@ func genOutput() [][]byte { "NOMBATTV : 12.0 Volts", "NOMPOWER : 865 Watts", "FIRMWARE : 857.L3 .I USB FW:L3", + "ALARMDEL : Low Battery", } var out [][]byte From ff2992ed21ef4ce7fdda748fd6c2f573613401b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 28 Apr 2021 17:54:22 +0200 Subject: [PATCH 402/761] New DPDK input plugin (#8883) --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/dpdk/README.md | 200 +++++++++++ plugins/inputs/dpdk/dpdk.go | 263 ++++++++++++++ plugins/inputs/dpdk/dpdk_connector.go | 162 +++++++++ plugins/inputs/dpdk/dpdk_connector_test.go | 182 ++++++++++ plugins/inputs/dpdk/dpdk_notlinux.go | 3 + plugins/inputs/dpdk/dpdk_test.go | 398 +++++++++++++++++++++ plugins/inputs/dpdk/dpdk_utils.go | 116 ++++++ plugins/inputs/dpdk/dpdk_utils_test.go | 137 +++++++ plugins/inputs/dpdk/mocks/conn.go | 146 ++++++++ 11 files changed, 1609 insertions(+) create mode 100644 plugins/inputs/dpdk/README.md create mode 100644 plugins/inputs/dpdk/dpdk.go create mode 100644 plugins/inputs/dpdk/dpdk_connector.go create mode 100644 plugins/inputs/dpdk/dpdk_connector_test.go create mode 100644 plugins/inputs/dpdk/dpdk_notlinux.go create mode 100644 plugins/inputs/dpdk/dpdk_test.go create mode 100644 plugins/inputs/dpdk/dpdk_utils.go create mode 100644 plugins/inputs/dpdk/dpdk_utils_test.go create mode 100644 plugins/inputs/dpdk/mocks/conn.go diff --git a/README.md b/README.md index 45bdc43baadc2..b2d8e6a548d3c 100644 --- a/README.md +++ b/README.md @@ -186,6 +186,7 @@ For documentation on the latest development code see the [documentation index][d * [docker](./plugins/inputs/docker) * [docker_log](./plugins/inputs/docker_log) * [dovecot](./plugins/inputs/dovecot) +* [dpdk](./plugins/inputs/dpdk) * [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) * [elasticsearch](./plugins/inputs/elasticsearch) * [ethtool](./plugins/inputs/ethtool) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 5f7e816487f62..3beb30cb412ca 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -41,6 +41,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/docker" _ "github.com/influxdata/telegraf/plugins/inputs/docker_log" _ "github.com/influxdata/telegraf/plugins/inputs/dovecot" + _ "github.com/influxdata/telegraf/plugins/inputs/dpdk" _ "github.com/influxdata/telegraf/plugins/inputs/ecs" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" _ "github.com/influxdata/telegraf/plugins/inputs/ethtool" diff --git a/plugins/inputs/dpdk/README.md b/plugins/inputs/dpdk/README.md new file mode 100644 index 0000000000000..bd98af050427d --- /dev/null +++ b/plugins/inputs/dpdk/README.md @@ -0,0 +1,200 @@ +# DPDK Input Plugin +The `dpdk` plugin collects metrics exposed by applications built with [Data Plane Development Kit](https://www.dpdk.org/) +which is an extensive set of open source libraries designed for accelerating packet processing workloads. + +DPDK provides APIs that enable exposing various statistics from the devices used by DPDK applications and enable exposing +KPI metrics directly from applications. Device statistics include e.g. common statistics available across NICs, like: +received and sent packets, received and sent bytes etc. In addition to this generic statistics, an extended statistics API +is available that allows providing more detailed, driver-specific metrics that are not available as generic statistics. + +[DPDK Release 20.05](https://doc.dpdk.org/guides/rel_notes/release_20_05.html) introduced updated telemetry interface +that enables DPDK libraries and applications to provide their telemetry. This is referred to as `v2` version of this +socket-based telemetry interface. This release enabled e.g. reading driver-specific extended stats (`/ethdev/xstats`) +via this new interface. + +[DPDK Release 20.11](https://doc.dpdk.org/guides/rel_notes/release_20_11.html) introduced reading via `v2` interface +common statistics (`/ethdev/stats`) in addition to existing (`/ethdev/xstats`). + +The example usage of `v2` telemetry interface can be found in [Telemetry User Guide](https://doc.dpdk.org/guides/howto/telemetry.html). +A variety of [DPDK Sample Applications](https://doc.dpdk.org/guides/sample_app_ug/index.html) is also available for users +to discover and test the capabilities of DPDK libraries and to explore the exposed metrics. + +> **DPDK Version Info:** This plugin uses this `v2` interface to read telemetry data from applications build with +> `DPDK version >= 20.05`. The default configuration include reading common statistics from `/ethdev/stats` that is +> available from `DPDK version >= 20.11`. When using `DPDK 20.05 <= version < DPDK 20.11` it is recommended to disable +> querying `/ethdev/stats` by setting corresponding `exclude_commands` configuration option. + +> **NOTE:** Since DPDK will most likely run with root privileges, the socket telemetry interface exposed by DPDK +> will also require root access. This means that either access permissions have to be adjusted for socket telemetry +> interface to allow Telegraf to access it, or Telegraf should run with root privileges. + +## Configuration +This plugin offers multiple configuration options, please review examples below for additional usage information. +```toml +# Reads metrics from DPDK applications using v2 telemetry interface. +[[inputs.dpdk]] + ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK telemetry interface. + # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" + + ## Duration that defines how long the connected socket client will wait for a response before terminating connection. + ## This includes both writing to and reading from socket. Since it's local socket access + ## to a fast packet processing application, the timeout should be sufficient for most users. + ## Setting the value to 0 disables the timeout (not recommended) + # socket_access_timeout = "200ms" + + ## Enables telemetry data collection for selected device types. + ## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status). + ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats). + # device_types = ["ethdev"] + + ## List of custom, application-specific telemetry commands to query + ## The list of available commands depend on the application deployed. Applications can register their own commands + ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands + ## For e.g. L3 Forwarding with Power Management Sample Application this could be: + ## additional_commands = ["/l3fwd-power/stats"] + # additional_commands = [] + + ## Allows turning off collecting data for individual "ethdev" commands. + ## Remove "/ethdev/link_status" from list to start getting link status metrics. + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] + + ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify + ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. + ## [inputs.dpdk.tags] + ## dpdk_instance = "my-fwd-app" +``` + +### Example: Minimal Configuration for NIC metrics +This configuration allows getting metrics for all devices reported via `/ethdev/list` command: +* `/ethdev/stats` - basic device statistics (since `DPDK 20.11`) +* `/ethdev/xstats` - extended device statistics +* `/ethdev/link_status` - up/down link status +```toml +[[inputs.dpdk]] + device_types = ["ethdev"] +``` +Since this configuration will query `/ethdev/link_status` it's recommended to increase timeout to `socket_access_timeout = "10s"`. + +The [plugin collecting interval](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#input-plugins) +should be adjusted accordingly (e.g. `interval = "30s"`). + +### Example: Excluding NIC link status from being collected +Checking link status depending on underlying implementation may take more time to complete. +This configuration can be used to exclude this telemetry command to allow faster response for metrics. +```toml +[[inputs.dpdk]] + device_types = ["ethdev"] + + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] +``` +A separate plugin instance with higher timeout settings can be used to get `/ethdev/link_status` independently. +Consult [Independent NIC link status configuration](#example-independent-nic-link-status-configuration) +and [Getting metrics from multiple DPDK instances running on same host](#example-getting-metrics-from-multiple-dpdk-instances-running-on-same-host) +examples for further details. + +### Example: Independent NIC link status configuration +This configuration allows getting `/ethdev/link_status` using separate configuration, with higher timeout. +```toml +[[inputs.dpdk]] + interval = "30s" + socket_access_timeout = "10s" + device_types = ["ethdev"] + + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/stats", "/ethdev/xstats"] +``` + +### Example: Getting application-specific metrics +This configuration allows reading custom metrics exposed by applications. Example telemetry command obtained from +[L3 Forwarding with Power Management Sample Application](https://doc.dpdk.org/guides/sample_app_ug/l3_forward_power_man.html). +```toml +[[inputs.dpdk]] + device_types = ["ethdev"] + additional_commands = ["/l3fwd-power/stats"] + + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] +``` +Command entries specified in `additional_commands` should match DPDK command format: +* Command entry format: either `command` or `command,params` for commands that expect parameters, where comma (`,`) separates command from params. +* Command entry length (command with params) should be `< 1024` characters. +* Command length (without params) should be `< 56` characters. +* Commands have to start with `/`. + +Providing invalid commands will prevent the plugin from starting. Additional commands allow duplicates, but they +will be removed during execution so each command will be executed only once during each metric gathering interval. + +### Example: Getting metrics from multiple DPDK instances running on same host +This configuration allows getting metrics from two separate applications exposing their telemetry interfaces +via separate sockets. For each plugin instance a unique tag `[inputs.dpdk.tags]` allows distinguishing between them. +```toml +# Instance #1 - L3 Forwarding with Power Management Application +[[inputs.dpdk]] + socket_path = "/var/run/dpdk/rte/l3fwd-power_telemetry.v2" + device_types = ["ethdev"] + additional_commands = ["/l3fwd-power/stats"] + + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] + + [inputs.dpdk.tags] + dpdk_instance = "l3fwd-power" + +# Instance #2 - L2 Forwarding with Intel Cache Allocation Technology (CAT) Application +[[inputs.dpdk]] + socket_path = "/var/run/dpdk/rte/l2fwd-cat_telemetry.v2" + device_types = ["ethdev"] + +[inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] + + [inputs.dpdk.tags] + dpdk_instance = "l2fwd-cat" +``` +This utilizes Telegraf's standard capability of [adding custom tags](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#input-plugins) +to input plugin's measurements. + +## Metrics +The DPDK socket accepts `command,params` requests and returns metric data in JSON format. All metrics from DPDK socket +become flattened using [Telegraf's JSON Flattener](../../parsers/json/README.md) and exposed as fields. +If DPDK response contains no information (is empty or is null) then such response will be discarded. + +> **NOTE:** Since DPDK allows registering custom metrics in its telemetry framework the JSON response from DPDK +> may contain various sets of metrics. While metrics from `/ethdev/stats` should be most stable, the `/ethdev/xstats` +> may contain driver-specific metrics (depending on DPDK application configuration). The application-specific commands +> like `/l3fwd-power/stats` can return their own specific set of metrics. + +## Example output +The output consists of plugin name (`dpdk`), and a set of tags that identify querying hierarchy: +``` +dpdk,host=dpdk-host,dpdk_instance=l3fwd-power,command=/ethdev/stats,params=0 [fields] [timestamp] +``` + +| Tag | Description | +|-----|-------------| +| `host` | hostname of the machine (consult [Telegraf Agent configuration](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#agent) for additional details) | +| `dpdk_instance` | custom tag from `[inputs.dpdk.tags]` (optional) | +| `command` | executed command (without params) | +| `params` | command parameter, e.g. for `/ethdev/stats` it is the id of NIC as exposed by `/ethdev/list`
For DPDK app that uses 2 NICs the metrics will output e.g. `params=0`, `params=1`. | + +When running plugin configuration below... +```toml +[[inputs.dpdk]] + device_types = ["ethdev"] + additional_commands = ["/l3fwd-power/stats"] + [inputs.dpdk.tags] + dpdk_instance = "l3fwd-power" +``` + +...expected output for `dpdk` plugin instance running on host named `host=dpdk-host`: +``` +dpdk,command=/ethdev/stats,dpdk_instance=l3fwd-power,host=dpdk-host,params=0 q_opackets_0=0,q_ipackets_5=0,q_errors_11=0,ierrors=0,q_obytes_5=0,q_obytes_10=0,q_opackets_10=0,q_ipackets_4=0,q_ipackets_7=0,q_ipackets_15=0,q_ibytes_5=0,q_ibytes_6=0,q_ibytes_9=0,obytes=0,q_opackets_1=0,q_opackets_11=0,q_obytes_7=0,q_errors_5=0,q_errors_10=0,q_ibytes_4=0,q_obytes_6=0,q_errors_1=0,q_opackets_5=0,q_errors_3=0,q_errors_12=0,q_ipackets_11=0,q_ipackets_12=0,q_obytes_14=0,q_opackets_15=0,q_obytes_2=0,q_errors_8=0,q_opackets_12=0,q_errors_0=0,q_errors_9=0,q_opackets_14=0,q_ibytes_3=0,q_ibytes_15=0,q_ipackets_13=0,q_ipackets_14=0,q_obytes_3=0,q_errors_13=0,q_opackets_3=0,q_ibytes_0=7092,q_ibytes_2=0,q_ibytes_8=0,q_ipackets_8=0,q_ipackets_10=0,q_obytes_4=0,q_ibytes_10=0,q_ibytes_13=0,q_ibytes_1=0,q_ibytes_12=0,opackets=0,q_obytes_1=0,q_errors_15=0,q_opackets_2=0,oerrors=0,rx_nombuf=0,q_opackets_8=0,q_ibytes_11=0,q_ipackets_3=0,q_obytes_0=0,q_obytes_12=0,q_obytes_11=0,q_obytes_13=0,q_errors_6=0,q_ipackets_1=0,q_ipackets_6=0,q_ipackets_9=0,q_obytes_15=0,q_opackets_7=0,q_ibytes_14=0,ipackets=98,q_ipackets_2=0,q_opackets_6=0,q_ibytes_7=0,imissed=0,q_opackets_4=0,q_opackets_9=0,q_obytes_8=0,q_obytes_9=0,q_errors_4=0,q_errors_14=0,q_opackets_13=0,ibytes=7092,q_ipackets_0=98,q_errors_2=0,q_errors_7=0 1606310780000000000 +dpdk,command=/ethdev/stats,dpdk_instance=l3fwd-power,host=dpdk-host,params=1 q_opackets_0=0,q_ipackets_5=0,q_errors_11=0,ierrors=0,q_obytes_5=0,q_obytes_10=0,q_opackets_10=0,q_ipackets_4=0,q_ipackets_7=0,q_ipackets_15=0,q_ibytes_5=0,q_ibytes_6=0,q_ibytes_9=0,obytes=0,q_opackets_1=0,q_opackets_11=0,q_obytes_7=0,q_errors_5=0,q_errors_10=0,q_ibytes_4=0,q_obytes_6=0,q_errors_1=0,q_opackets_5=0,q_errors_3=0,q_errors_12=0,q_ipackets_11=0,q_ipackets_12=0,q_obytes_14=0,q_opackets_15=0,q_obytes_2=0,q_errors_8=0,q_opackets_12=0,q_errors_0=0,q_errors_9=0,q_opackets_14=0,q_ibytes_3=0,q_ibytes_15=0,q_ipackets_13=0,q_ipackets_14=0,q_obytes_3=0,q_errors_13=0,q_opackets_3=0,q_ibytes_0=7092,q_ibytes_2=0,q_ibytes_8=0,q_ipackets_8=0,q_ipackets_10=0,q_obytes_4=0,q_ibytes_10=0,q_ibytes_13=0,q_ibytes_1=0,q_ibytes_12=0,opackets=0,q_obytes_1=0,q_errors_15=0,q_opackets_2=0,oerrors=0,rx_nombuf=0,q_opackets_8=0,q_ibytes_11=0,q_ipackets_3=0,q_obytes_0=0,q_obytes_12=0,q_obytes_11=0,q_obytes_13=0,q_errors_6=0,q_ipackets_1=0,q_ipackets_6=0,q_ipackets_9=0,q_obytes_15=0,q_opackets_7=0,q_ibytes_14=0,ipackets=98,q_ipackets_2=0,q_opackets_6=0,q_ibytes_7=0,imissed=0,q_opackets_4=0,q_opackets_9=0,q_obytes_8=0,q_obytes_9=0,q_errors_4=0,q_errors_14=0,q_opackets_13=0,ibytes=7092,q_ipackets_0=98,q_errors_2=0,q_errors_7=0 1606310780000000000 +dpdk,command=/ethdev/xstats,dpdk_instance=l3fwd-power,host=dpdk-host,params=0 out_octets_encrypted=0,rx_fcoe_mbuf_allocation_errors=0,tx_q1packets=0,rx_priority0_xoff_packets=0,rx_priority7_xoff_packets=0,rx_errors=0,mac_remote_errors=0,in_pkts_invalid=0,tx_priority3_xoff_packets=0,tx_errors=0,rx_fcoe_bytes=0,rx_flow_control_xon_packets=0,rx_priority4_xoff_packets=0,tx_priority2_xoff_packets=0,rx_illegal_byte_errors=0,rx_xoff_packets=0,rx_management_packets=0,rx_priority7_dropped=0,rx_priority4_dropped=0,in_pkts_unchecked=0,rx_error_bytes=0,rx_size_256_to_511_packets=0,tx_priority4_xoff_packets=0,rx_priority6_xon_packets=0,tx_priority4_xon_to_xoff_packets=0,in_pkts_delayed=0,rx_priority0_mbuf_allocation_errors=0,out_octets_protected=0,tx_priority7_xon_to_xoff_packets=0,tx_priority1_xon_to_xoff_packets=0,rx_fcoe_no_direct_data_placement_ext_buff=0,tx_priority6_xon_to_xoff_packets=0,flow_director_filter_add_errors=0,rx_total_packets=99,rx_crc_errors=0,flow_director_filter_remove_errors=0,rx_missed_errors=0,tx_size_64_packets=0,rx_priority3_dropped=0,flow_director_matched_filters=0,tx_priority2_xon_to_xoff_packets=0,rx_priority1_xon_packets=0,rx_size_65_to_127_packets=99,rx_fragment_errors=0,in_pkts_notusingsa=0,rx_q0bytes=7162,rx_fcoe_dropped=0,rx_priority1_dropped=0,rx_fcoe_packets=0,rx_priority5_xoff_packets=0,out_pkts_protected=0,tx_total_packets=0,rx_priority2_dropped=0,in_pkts_late=0,tx_q1bytes=0,in_pkts_badtag=0,rx_multicast_packets=99,rx_priority6_xoff_packets=0,tx_flow_control_xoff_packets=0,rx_flow_control_xoff_packets=0,rx_priority0_xon_packets=0,in_pkts_untagged=0,tx_fcoe_packets=0,rx_priority7_mbuf_allocation_errors=0,tx_priority0_xon_to_xoff_packets=0,tx_priority5_xon_to_xoff_packets=0,tx_flow_control_xon_packets=0,tx_q0packets=0,tx_xoff_packets=0,rx_size_512_to_1023_packets=0,rx_priority3_xon_packets=0,rx_q0errors=0,rx_oversize_errors=0,tx_priority4_xon_packets=0,tx_priority5_xoff_packets=0,rx_priority5_xon_packets=0,rx_total_missed_packets=0,rx_priority4_mbuf_allocation_errors=0,tx_priority1_xon_packets=0,tx_management_packets=0,rx_priority5_mbuf_allocation_errors=0,rx_fcoe_no_direct_data_placement=0,rx_undersize_errors=0,tx_priority1_xoff_packets=0,rx_q0packets=99,tx_q2packets=0,tx_priority6_xon_packets=0,rx_good_packets=99,tx_priority5_xon_packets=0,tx_size_256_to_511_packets=0,rx_priority6_dropped=0,rx_broadcast_packets=0,tx_size_512_to_1023_packets=0,tx_priority3_xon_to_xoff_packets=0,in_pkts_unknownsci=0,in_octets_validated=0,tx_priority6_xoff_packets=0,tx_priority7_xoff_packets=0,rx_jabber_errors=0,tx_priority7_xon_packets=0,tx_priority0_xon_packets=0,in_pkts_unusedsa=0,tx_priority0_xoff_packets=0,mac_local_errors=33,rx_total_bytes=7162,in_pkts_notvalid=0,rx_length_errors=0,in_octets_decrypted=0,rx_size_128_to_255_packets=0,rx_good_bytes=7162,tx_size_65_to_127_packets=0,rx_mac_short_packet_dropped=0,tx_size_1024_to_max_packets=0,rx_priority2_mbuf_allocation_errors=0,flow_director_added_filters=0,tx_multicast_packets=0,rx_fcoe_crc_errors=0,rx_priority1_xoff_packets=0,flow_director_missed_filters=0,rx_xon_packets=0,tx_size_128_to_255_packets=0,out_pkts_encrypted=0,rx_priority4_xon_packets=0,rx_priority0_dropped=0,rx_size_1024_to_max_packets=0,tx_good_bytes=0,rx_management_dropped=0,rx_mbuf_allocation_errors=0,tx_xon_packets=0,rx_priority3_xoff_packets=0,tx_good_packets=0,tx_fcoe_bytes=0,rx_priority6_mbuf_allocation_errors=0,rx_priority2_xon_packets=0,tx_broadcast_packets=0,tx_q2bytes=0,rx_priority7_xon_packets=0,out_pkts_untagged=0,rx_priority2_xoff_packets=0,rx_priority1_mbuf_allocation_errors=0,tx_q0bytes=0,rx_size_64_packets=0,rx_priority5_dropped=0,tx_priority2_xon_packets=0,in_pkts_nosci=0,flow_director_removed_filters=0,in_pkts_ok=0,rx_l3_l4_xsum_error=0,rx_priority3_mbuf_allocation_errors=0,tx_priority3_xon_packets=0 1606310780000000000 +dpdk,command=/ethdev/xstats,dpdk_instance=l3fwd-power,host=dpdk-host,params=1 tx_priority5_xoff_packets=0,in_pkts_unknownsci=0,tx_q0packets=0,tx_total_packets=0,rx_crc_errors=0,rx_priority4_xoff_packets=0,rx_priority5_dropped=0,tx_size_65_to_127_packets=0,rx_good_packets=98,tx_priority6_xoff_packets=0,tx_fcoe_bytes=0,out_octets_protected=0,out_pkts_encrypted=0,rx_priority1_xon_packets=0,tx_size_128_to_255_packets=0,rx_flow_control_xoff_packets=0,rx_priority7_xoff_packets=0,tx_priority0_xon_to_xoff_packets=0,rx_broadcast_packets=0,tx_priority1_xon_packets=0,rx_xon_packets=0,rx_fragment_errors=0,tx_flow_control_xoff_packets=0,tx_q0bytes=0,out_pkts_untagged=0,rx_priority4_xon_packets=0,tx_priority5_xon_packets=0,rx_priority1_xoff_packets=0,rx_good_bytes=7092,rx_priority4_mbuf_allocation_errors=0,in_octets_decrypted=0,tx_priority2_xon_to_xoff_packets=0,rx_priority3_dropped=0,tx_multicast_packets=0,mac_local_errors=33,in_pkts_ok=0,rx_illegal_byte_errors=0,rx_xoff_packets=0,rx_q0errors=0,flow_director_added_filters=0,rx_size_256_to_511_packets=0,rx_priority3_xon_packets=0,rx_l3_l4_xsum_error=0,rx_priority6_dropped=0,in_pkts_notvalid=0,rx_size_64_packets=0,tx_management_packets=0,rx_length_errors=0,tx_priority7_xon_to_xoff_packets=0,rx_mbuf_allocation_errors=0,rx_missed_errors=0,rx_priority1_mbuf_allocation_errors=0,rx_fcoe_no_direct_data_placement=0,tx_priority3_xoff_packets=0,in_pkts_delayed=0,tx_errors=0,rx_size_512_to_1023_packets=0,tx_priority4_xon_packets=0,rx_q0bytes=7092,in_pkts_unchecked=0,tx_size_512_to_1023_packets=0,rx_fcoe_packets=0,in_pkts_nosci=0,rx_priority6_mbuf_allocation_errors=0,rx_priority1_dropped=0,tx_q2packets=0,rx_priority7_dropped=0,tx_size_1024_to_max_packets=0,rx_management_packets=0,rx_multicast_packets=98,rx_total_bytes=7092,mac_remote_errors=0,tx_priority3_xon_packets=0,rx_priority2_mbuf_allocation_errors=0,rx_priority5_mbuf_allocation_errors=0,tx_q2bytes=0,rx_size_128_to_255_packets=0,in_pkts_badtag=0,out_pkts_protected=0,rx_management_dropped=0,rx_fcoe_bytes=0,flow_director_removed_filters=0,tx_priority2_xoff_packets=0,rx_fcoe_crc_errors=0,rx_priority0_mbuf_allocation_errors=0,rx_priority0_xon_packets=0,rx_fcoe_dropped=0,tx_priority1_xon_to_xoff_packets=0,rx_size_65_to_127_packets=98,rx_q0packets=98,tx_priority0_xoff_packets=0,rx_priority6_xon_packets=0,rx_total_packets=98,rx_undersize_errors=0,flow_director_missed_filters=0,rx_jabber_errors=0,in_pkts_invalid=0,in_pkts_late=0,rx_priority5_xon_packets=0,tx_priority4_xoff_packets=0,out_octets_encrypted=0,tx_q1packets=0,rx_priority5_xoff_packets=0,rx_priority6_xoff_packets=0,rx_errors=0,in_octets_validated=0,rx_priority3_xoff_packets=0,tx_priority4_xon_to_xoff_packets=0,tx_priority5_xon_to_xoff_packets=0,tx_flow_control_xon_packets=0,rx_priority0_dropped=0,flow_director_filter_add_errors=0,tx_q1bytes=0,tx_priority6_xon_to_xoff_packets=0,flow_director_matched_filters=0,tx_priority2_xon_packets=0,rx_fcoe_mbuf_allocation_errors=0,rx_priority2_xoff_packets=0,tx_priority7_xoff_packets=0,rx_priority0_xoff_packets=0,rx_oversize_errors=0,in_pkts_notusingsa=0,tx_size_64_packets=0,rx_size_1024_to_max_packets=0,tx_priority6_xon_packets=0,rx_priority2_dropped=0,rx_priority4_dropped=0,rx_priority7_mbuf_allocation_errors=0,rx_flow_control_xon_packets=0,tx_good_bytes=0,tx_priority3_xon_to_xoff_packets=0,rx_total_missed_packets=0,rx_error_bytes=0,tx_priority7_xon_packets=0,rx_mac_short_packet_dropped=0,tx_priority1_xoff_packets=0,tx_good_packets=0,tx_broadcast_packets=0,tx_xon_packets=0,in_pkts_unusedsa=0,rx_priority2_xon_packets=0,in_pkts_untagged=0,tx_fcoe_packets=0,flow_director_filter_remove_errors=0,rx_priority3_mbuf_allocation_errors=0,tx_priority0_xon_packets=0,rx_priority7_xon_packets=0,rx_fcoe_no_direct_data_placement_ext_buff=0,tx_xoff_packets=0,tx_size_256_to_511_packets=0 1606310780000000000 +dpdk,command=/ethdev/link_status,dpdk_instance=l3fwd-power,host=dpdk-host,params=0 status="UP",speed=10000,duplex="full-duplex" 1606310780000000000 +dpdk,command=/ethdev/link_status,dpdk_instance=l3fwd-power,host=dpdk-host,params=1 status="UP",speed=10000,duplex="full-duplex" 1606310780000000000 +dpdk,command=/l3fwd-power/stats,dpdk_instance=l3fwd-power,host=dpdk-host empty_poll=49506395979901,full_poll=0,busy_percent=0 1606310780000000000 +``` diff --git a/plugins/inputs/dpdk/dpdk.go b/plugins/inputs/dpdk/dpdk.go new file mode 100644 index 0000000000000..293dbee90adf3 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk.go @@ -0,0 +1,263 @@ +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/inputs" + jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" +) + +const ( + description = "Reads metrics from DPDK applications using v2 telemetry interface." + sampleConfig = ` + ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK telemetry interface. + # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" + + ## Duration that defines how long the connected socket client will wait for a response before terminating connection. + ## This includes both writing to and reading from socket. Since it's local socket access + ## to a fast packet processing application, the timeout should be sufficient for most users. + ## Setting the value to 0 disables the timeout (not recommended) + # socket_access_timeout = "200ms" + + ## Enables telemetry data collection for selected device types. + ## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status). + ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats). + # device_types = ["ethdev"] + + ## List of custom, application-specific telemetry commands to query + ## The list of available commands depend on the application deployed. Applications can register their own commands + ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands + ## For e.g. L3 Forwarding with Power Management Sample Application this could be: + ## additional_commands = ["/l3fwd-power/stats"] + # additional_commands = [] + + ## Allows turning off collecting data for individual "ethdev" commands. + ## Remove "/ethdev/link_status" from list to start getting link status metrics. + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] + + ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify + ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. + ## [inputs.dpdk.tags] + ## dpdk_instance = "my-fwd-app" +` + defaultPathToSocket = "/var/run/dpdk/rte/dpdk_telemetry.v2" + defaultAccessTimeout = config.Duration(200 * time.Millisecond) + maxCommandLength = 56 + maxCommandLengthWithParams = 1024 + pluginName = "dpdk" + ethdevListCommand = "/ethdev/list" + rawdevListCommand = "/rawdev/list" +) + +type dpdk struct { + SocketPath string `toml:"socket_path"` + AccessTimeout config.Duration `toml:"socket_access_timeout"` + DeviceTypes []string `toml:"device_types"` + EthdevConfig ethdevConfig `toml:"ethdev"` + AdditionalCommands []string `toml:"additional_commands"` + Log telegraf.Logger `toml:"-"` + + connector *dpdkConnector + rawdevCommands []string + ethdevCommands []string + ethdevExcludedCommandsFilter filter.Filter +} + +type ethdevConfig struct { + EthdevExcludeCommands []string `toml:"exclude_commands"` +} + +func init() { + inputs.Add(pluginName, func() telegraf.Input { + dpdk := &dpdk{ + // Setting it here (rather than in `Init()`) to distinguish between "zero" value, + // default value and don't having value in config at all. + AccessTimeout: defaultAccessTimeout, + } + return dpdk + }) +} + +func (dpdk *dpdk) SampleConfig() string { + return sampleConfig +} + +func (dpdk *dpdk) Description() string { + return description +} + +// Performs validation of all parameters from configuration +func (dpdk *dpdk) Init() error { + if dpdk.SocketPath == "" { + dpdk.SocketPath = defaultPathToSocket + dpdk.Log.Debugf("using default '%v' path for socket_path", defaultPathToSocket) + } + + if dpdk.DeviceTypes == nil { + dpdk.DeviceTypes = []string{"ethdev"} + } + + var err error + if err = isSocket(dpdk.SocketPath); err != nil { + return err + } + + dpdk.rawdevCommands = []string{"/rawdev/xstats"} + dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats", "/ethdev/link_status"} + + if err = dpdk.validateCommands(); err != nil { + return err + } + + if dpdk.AccessTimeout < 0 { + return fmt.Errorf("socket_access_timeout should be positive number or equal to 0 (to disable timeouts)") + } + + if len(dpdk.AdditionalCommands) == 0 && len(dpdk.DeviceTypes) == 0 { + return fmt.Errorf("plugin was configured with nothing to read") + } + + dpdk.ethdevExcludedCommandsFilter, err = filter.Compile(dpdk.EthdevConfig.EthdevExcludeCommands) + if err != nil { + return fmt.Errorf("error occurred during filter prepation for ethdev excluded commands - %v", err) + } + + dpdk.connector = newDpdkConnector(dpdk.SocketPath, dpdk.AccessTimeout) + initMessage, err := dpdk.connector.connect() + if initMessage != nil { + dpdk.Log.Debugf("Successfully connected to %v running as process with PID %v with len %v", + initMessage.Version, initMessage.Pid, initMessage.MaxOutputLen) + } + return err +} + +// Checks that user-supplied commands are unique and match DPDK commands format +func (dpdk *dpdk) validateCommands() error { + dpdk.AdditionalCommands = uniqueValues(dpdk.AdditionalCommands) + + for _, commandWithParams := range dpdk.AdditionalCommands { + if len(commandWithParams) == 0 { + return fmt.Errorf("got empty command") + } + + if commandWithParams[0] != '/' { + return fmt.Errorf("'%v' command should start with '/'", commandWithParams) + } + + if commandWithoutParams := stripParams(commandWithParams); len(commandWithoutParams) >= maxCommandLength { + return fmt.Errorf("'%v' command is too long. It shall be less than %v characters", commandWithoutParams, maxCommandLength) + } + + if len(commandWithParams) >= maxCommandLengthWithParams { + return fmt.Errorf("command with parameters '%v' shall be less than %v characters", commandWithParams, maxCommandLengthWithParams) + } + } + + return nil +} + +// Gathers all unique commands and processes each command sequentially +// Parallel processing could be achieved by running several instances of this plugin with different settings +func (dpdk *dpdk) Gather(acc telegraf.Accumulator) error { + // This needs to be done during every `Gather(...)`, because DPDK can be restarted between consecutive + // `Gather(...)` cycles which can cause that it will be exposing different set of metrics. + commands := dpdk.gatherCommands(acc) + + for _, command := range commands { + dpdk.processCommand(acc, command) + } + + return nil +} + +// Gathers all unique commands +func (dpdk *dpdk) gatherCommands(acc telegraf.Accumulator) []string { + var commands []string + if choice.Contains("ethdev", dpdk.DeviceTypes) { + ethdevCommands := removeSubset(dpdk.ethdevCommands, dpdk.ethdevExcludedCommandsFilter) + ethdevCommands, err := dpdk.appendCommandsWithParamsFromList(ethdevListCommand, ethdevCommands) + if err != nil { + acc.AddError(fmt.Errorf("error occurred during fetching of %v params - %v", ethdevListCommand, err)) + } + + commands = append(commands, ethdevCommands...) + } + + if choice.Contains("rawdev", dpdk.DeviceTypes) { + rawdevCommands, err := dpdk.appendCommandsWithParamsFromList(rawdevListCommand, dpdk.rawdevCommands) + if err != nil { + acc.AddError(fmt.Errorf("error occurred during fetching of %v params - %v", rawdevListCommand, err)) + } + + commands = append(commands, rawdevCommands...) + } + + commands = append(commands, dpdk.AdditionalCommands...) + return uniqueValues(commands) +} + +// Fetches all identifiers of devices and then creates all possible combinations of commands for each device +func (dpdk *dpdk) appendCommandsWithParamsFromList(listCommand string, commands []string) ([]string, error) { + response, err := dpdk.connector.getCommandResponse(listCommand) + if err != nil { + return nil, err + } + + params, err := jsonToArray(response, listCommand) + if err != nil { + return nil, err + } + + result := make([]string, 0, len(commands)*len(params)) + for _, command := range commands { + for _, param := range params { + result = append(result, commandWithParams(command, param)) + } + } + + return result, nil +} + +// Executes command, parses response and creates/writes metric from response +func (dpdk *dpdk) processCommand(acc telegraf.Accumulator, commandWithParams string) { + buf, err := dpdk.connector.getCommandResponse(commandWithParams) + if err != nil { + acc.AddError(err) + return + } + + var parsedResponse map[string]interface{} + err = json.Unmarshal(buf, &parsedResponse) + if err != nil { + acc.AddError(fmt.Errorf("failed to unmarshall json response from %v command - %v", commandWithParams, err)) + return + } + + command := stripParams(commandWithParams) + value := parsedResponse[command] + if isEmpty(value) { + acc.AddError(fmt.Errorf("got empty json on '%v' command", commandWithParams)) + return + } + + jf := jsonparser.JSONFlattener{} + err = jf.FullFlattenJSON("", value, true, true) + if err != nil { + acc.AddError(fmt.Errorf("failed to flatten response - %v", err)) + return + } + + acc.AddFields(pluginName, jf.Fields, map[string]string{ + "command": command, + "params": getParams(commandWithParams), + }) +} diff --git a/plugins/inputs/dpdk/dpdk_connector.go b/plugins/inputs/dpdk/dpdk_connector.go new file mode 100644 index 0000000000000..1129d16d31604 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_connector.go @@ -0,0 +1,162 @@ +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "net" + "time" + + "github.com/influxdata/telegraf/config" +) + +const maxInitMessageLength = 1024 + +type initMessage struct { + Version string `json:"version"` + Pid int `json:"pid"` + MaxOutputLen uint32 `json:"max_output_len"` +} + +type dpdkConnector struct { + pathToSocket string + maxOutputLen uint32 + messageShowed bool + accessTimeout time.Duration + connection net.Conn +} + +func newDpdkConnector(pathToSocket string, accessTimeout config.Duration) *dpdkConnector { + return &dpdkConnector{ + pathToSocket: pathToSocket, + messageShowed: false, + accessTimeout: time.Duration(accessTimeout), + } +} + +// Connects to the socket +// Since DPDK is a local unix socket, it is instantly returns error or connection, so there's no need to set timeout for it +func (conn *dpdkConnector) connect() (*initMessage, error) { + connection, err := net.Dial("unixpacket", conn.pathToSocket) + if err != nil { + return nil, fmt.Errorf("failed to connect to the socket - %v", err) + } + + conn.connection = connection + result, err := conn.readMaxOutputLen() + if err != nil { + if closeErr := conn.tryClose(); closeErr != nil { + return nil, fmt.Errorf("%v and failed to close connection - %v", err, closeErr) + } + return nil, err + } + + return result, nil +} + +// Executes command using provided connection and returns response +// If error (such as timeout) occurred, then connection is discarded and recreated +// because otherwise behaviour of connection is undefined (e.g. it could return result of timed out command instead of latest) +func (conn *dpdkConnector) getCommandResponse(fullCommand string) ([]byte, error) { + connection, err := conn.getConnection() + if err != nil { + return nil, fmt.Errorf("failed to get connection to execute %v command - %v", fullCommand, err) + } + + err = conn.setTimeout() + if err != nil { + return nil, fmt.Errorf("failed to set timeout for %v command - %v", fullCommand, err) + } + + _, err = connection.Write([]byte(fullCommand)) + if err != nil { + if closeErr := conn.tryClose(); closeErr != nil { + return nil, fmt.Errorf("failed to send '%v' command - %v and failed to close connection - %v", + fullCommand, err, closeErr) + } + return nil, fmt.Errorf("failed to send '%v' command - %v", fullCommand, err) + } + + buf := make([]byte, conn.maxOutputLen) + messageLength, err := connection.Read(buf) + if err != nil { + if closeErr := conn.tryClose(); closeErr != nil { + return nil, fmt.Errorf("failed read response of '%v' command - %v and failed to close connection - %v", + fullCommand, err, closeErr) + } + return nil, fmt.Errorf("failed to read response of '%v' command - %v", fullCommand, err) + } + + if messageLength == 0 { + return nil, fmt.Errorf("got empty response during execution of '%v' command", fullCommand) + } + return buf[:messageLength], nil +} + +func (conn *dpdkConnector) tryClose() error { + if conn.connection == nil { + return nil + } + + err := conn.connection.Close() + conn.connection = nil + if err != nil { + return err + } + return nil +} + +func (conn *dpdkConnector) setTimeout() error { + if conn.connection == nil { + return fmt.Errorf("connection had not been established before") + } + + if conn.accessTimeout == 0 { + return conn.connection.SetDeadline(time.Time{}) + } + return conn.connection.SetDeadline(time.Now().Add(conn.accessTimeout)) +} + +// Returns connections, if connection is not created then function tries to recreate it +func (conn *dpdkConnector) getConnection() (net.Conn, error) { + if conn.connection == nil { + _, err := conn.connect() + if err != nil { + return nil, err + } + } + return conn.connection, nil +} + +// Reads InitMessage for connection. Should be read for each connection, otherwise InitMessage is returned as response for first command. +func (conn *dpdkConnector) readMaxOutputLen() (*initMessage, error) { + buf := make([]byte, maxInitMessageLength) + err := conn.setTimeout() + if err != nil { + return nil, fmt.Errorf("failed to set timeout - %v", err) + } + + messageLength, err := conn.connection.Read(buf) + if err != nil { + return nil, fmt.Errorf("failed to read InitMessage - %v", err) + } + + var initMessage initMessage + err = json.Unmarshal(buf[:messageLength], &initMessage) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response - %v", err) + } + + if initMessage.MaxOutputLen == 0 { + return nil, fmt.Errorf("failed to read maxOutputLen information") + } + + if !conn.messageShowed { + conn.maxOutputLen = initMessage.MaxOutputLen + conn.messageShowed = true + return &initMessage, nil + } + + return nil, nil +} diff --git a/plugins/inputs/dpdk/dpdk_connector_test.go b/plugins/inputs/dpdk/dpdk_connector_test.go new file mode 100644 index 0000000000000..a322964979fe8 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_connector_test.go @@ -0,0 +1,182 @@ +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/inputs/dpdk/mocks" +) + +func Test_readMaxOutputLen(t *testing.T) { + t.Run("should return error if timeout occurred", func(t *testing.T) { + conn := &mocks.Conn{} + conn.On("Read", mock.Anything).Return(0, fmt.Errorf("timeout")) + conn.On("SetDeadline", mock.Anything).Return(nil) + connector := dpdkConnector{connection: conn} + + _, err := connector.readMaxOutputLen() + + require.Error(t, err) + require.Contains(t, err.Error(), "timeout") + }) + + t.Run("should pass and set maxOutputLen if provided with valid InitMessage", func(t *testing.T) { + maxOutputLen := uint32(4567) + initMessage := initMessage{ + Version: "DPDK test version", + Pid: 1234, + MaxOutputLen: maxOutputLen, + } + message, err := json.Marshal(initMessage) + require.NoError(t, err) + conn := &mocks.Conn{} + conn.On("Read", mock.Anything).Run(func(arg mock.Arguments) { + elem := arg.Get(0).([]byte) + copy(elem, message) + }).Return(len(message), nil) + conn.On("SetDeadline", mock.Anything).Return(nil) + connector := dpdkConnector{connection: conn} + + _, err = connector.readMaxOutputLen() + + require.NoError(t, err) + require.Equal(t, maxOutputLen, connector.maxOutputLen) + }) + + t.Run("should fail if received invalid json", func(t *testing.T) { + message := `{notAJson}` + conn := &mocks.Conn{} + conn.On("Read", mock.Anything).Run(func(arg mock.Arguments) { + elem := arg.Get(0).([]byte) + copy(elem, message) + }).Return(len(message), nil) + conn.On("SetDeadline", mock.Anything).Return(nil) + connector := dpdkConnector{connection: conn} + + _, err := connector.readMaxOutputLen() + + require.Error(t, err) + require.Contains(t, err.Error(), "looking for beginning of object key string") + }) + + t.Run("should fail if received maxOutputLen equals to 0", func(t *testing.T) { + message, err := json.Marshal(initMessage{ + Version: "test", + Pid: 1, + MaxOutputLen: 0, + }) + require.NoError(t, err) + conn := &mocks.Conn{} + conn.On("Read", mock.Anything).Run(func(arg mock.Arguments) { + elem := arg.Get(0).([]byte) + copy(elem, message) + }).Return(len(message), nil) + conn.On("SetDeadline", mock.Anything).Return(nil) + connector := dpdkConnector{connection: conn} + + _, err = connector.readMaxOutputLen() + + require.Error(t, err) + require.Contains(t, err.Error(), "failed to read maxOutputLen information") + }) +} + +func Test_connect(t *testing.T) { + t.Run("should pass if PathToSocket points to socket", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + dpdk := dpdk{ + SocketPath: pathToSocket, + connector: newDpdkConnector(pathToSocket, 0), + } + go simulateSocketResponse(socket, t) + + _, err := dpdk.connector.connect() + + require.NoError(t, err) + }) +} + +func Test_getCommandResponse(t *testing.T) { + command := "/" + response := "myResponseString" + + t.Run("should return proper buffer size and value if no error occurred", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + simulateResponse(mockConn, response, nil) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.NoError(t, err) + require.Equal(t, len(response), len(buf)) + require.Equal(t, response, string(buf)) + }) + + t.Run("should return error if failed to get connection handler", func(t *testing.T) { + _, dpdk, _ := prepareEnvironment() + dpdk.connector.connection = nil + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Contains(t, err.Error(), "failed to get connection to execute / command") + require.Equal(t, 0, len(buf)) + }) + + t.Run("should return error if failed to set timeout duration", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + mockConn.On("SetDeadline", mock.Anything).Return(fmt.Errorf("deadline error")) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Contains(t, err.Error(), "deadline error") + require.Equal(t, 0, len(buf)) + }) + + t.Run("should return error if timeout occurred during Write operation", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + mockConn.On("Write", mock.Anything).Return(0, fmt.Errorf("write timeout")) + mockConn.On("SetDeadline", mock.Anything).Return(nil) + mockConn.On("Close").Return(nil) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Contains(t, err.Error(), "write timeout") + require.Equal(t, 0, len(buf)) + }) + + t.Run("should return error if timeout occurred during Read operation", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + simulateResponse(mockConn, "", fmt.Errorf("read timeout")) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Contains(t, err.Error(), "read timeout") + require.Equal(t, 0, len(buf)) + }) + + t.Run("should return error if got empty response", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + simulateResponse(mockConn, "", nil) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Equal(t, 0, len(buf)) + require.Contains(t, err.Error(), "got empty response during execution of") + }) +} diff --git a/plugins/inputs/dpdk/dpdk_notlinux.go b/plugins/inputs/dpdk/dpdk_notlinux.go new file mode 100644 index 0000000000000..a86625ff5c93f --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_notlinux.go @@ -0,0 +1,3 @@ +// +build !linux + +package dpdk diff --git a/plugins/inputs/dpdk/dpdk_test.go b/plugins/inputs/dpdk/dpdk_test.go new file mode 100644 index 0000000000000..cfee021e9e6bb --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_test.go @@ -0,0 +1,398 @@ +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "net" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/inputs/dpdk/mocks" + "github.com/influxdata/telegraf/testutil" +) + +func Test_Init(t *testing.T) { + t.Run("when SocketPath field isn't set then it should be set to default value", func(t *testing.T) { + _, dpdk, _ := prepareEnvironment() + dpdk.SocketPath = "" + require.Equal(t, "", dpdk.SocketPath) + + _ = dpdk.Init() + + require.Equal(t, defaultPathToSocket, dpdk.SocketPath) + }) + + t.Run("when commands are in invalid format (doesn't start with '/') then error should be returned", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + dpdk := dpdk{ + SocketPath: pathToSocket, + AdditionalCommands: []string{"invalid"}, + } + + err := dpdk.Init() + + require.Error(t, err) + require.Contains(t, err.Error(), "command should start with '/'") + }) + + t.Run("when all values are valid, then no error should be returned", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + dpdk := dpdk{ + SocketPath: pathToSocket, + DeviceTypes: []string{"ethdev"}, + Log: testutil.Logger{}, + } + go simulateSocketResponse(socket, t) + + err := dpdk.Init() + + require.NoError(t, err) + }) + + t.Run("when device_types and additional_commands are empty, then error should be returned", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + dpdk := dpdk{ + SocketPath: pathToSocket, + DeviceTypes: []string{}, + AdditionalCommands: []string{}, + Log: testutil.Logger{}, + } + + err := dpdk.Init() + + require.Error(t, err) + require.Contains(t, err.Error(), "plugin was configured with nothing to read") + }) +} + +func Test_validateCommands(t *testing.T) { + t.Run("when validating commands in correct format then no error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{"/test", "/help"}, + } + + err := dpdk.validateCommands() + + require.NoError(t, err) + }) + + t.Run("when validating command that doesn't begin with slash then error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "commandWithoutSlash", + }, + } + + err := dpdk.validateCommands() + + require.Error(t, err) + require.Contains(t, err.Error(), "command should start with '/'") + }) + + t.Run("when validating long command (without parameters) then error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "/" + strings.Repeat("a", maxCommandLength), + }, + } + + err := dpdk.validateCommands() + + require.Error(t, err) + require.Contains(t, err.Error(), "command is too long") + }) + + t.Run("when validating long command (with params) then error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "/," + strings.Repeat("a", maxCommandLengthWithParams), + }, + } + + err := dpdk.validateCommands() + + require.Error(t, err) + require.Contains(t, err.Error(), "shall be less than 1024 characters") + }) + + t.Run("when validating empty command then error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "", + }, + } + + err := dpdk.validateCommands() + + require.Error(t, err) + require.Contains(t, err.Error(), "got empty command") + }) + + t.Run("when validating commands with duplicates then duplicates should be removed and no error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "/test", + }, + } + require.Equal(t, 2, len(dpdk.AdditionalCommands)) + + err := dpdk.validateCommands() + + require.Equal(t, 1, len(dpdk.AdditionalCommands)) + require.NoError(t, err) + }) +} + +func Test_dpdkPluginDescriber(t *testing.T) { + dpdk := dpdk{} + t.Run("sampleConfig function should return value from constant", func(t *testing.T) { + require.Equal(t, sampleConfig, dpdk.SampleConfig()) + }) + + t.Run("description function should return value from constant", func(t *testing.T) { + require.Equal(t, description, dpdk.Description()) + }) +} + +func prepareEnvironment() (*mocks.Conn, dpdk, *testutil.Accumulator) { + mockConnection := &mocks.Conn{} + dpdk := dpdk{ + connector: &dpdkConnector{ + connection: mockConnection, + maxOutputLen: 1024, + accessTimeout: 2 * time.Second, + }, + Log: testutil.Logger{}, + } + mockAcc := &testutil.Accumulator{} + return mockConnection, dpdk, mockAcc +} + +func Test_processCommand(t *testing.T) { + t.Run("should pass if received valid response", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := `{"/": ["/", "/eal/app_params", "/eal/params", "/ethdev/link_status"]}` + simulateResponse(mockConn, response, nil) + + dpdk.processCommand(mockAcc, "/") + + require.Equal(t, 0, len(mockAcc.Errors)) + }) + + t.Run("if received a non-JSON object then should return error", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := `notAJson` + simulateResponse(mockConn, response, nil) + + dpdk.processCommand(mockAcc, "/") + + require.Equal(t, 1, len(mockAcc.Errors)) + require.Contains(t, mockAcc.Errors[0].Error(), "invalid character") + }) + + t.Run("if failed to get command response then accumulator should contain error", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + mockConn.On("Write", mock.Anything).Return(0, fmt.Errorf("deadline exceeded")) + mockConn.On("SetDeadline", mock.Anything).Return(nil) + mockConn.On("Close").Return(nil) + + dpdk.processCommand(mockAcc, "/") + + require.Equal(t, 1, len(mockAcc.Errors)) + require.Contains(t, mockAcc.Errors[0].Error(), "deadline exceeded") + }) + + t.Run("if response contains nil or empty value then error should be returned in accumulator", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := `{"/test": null}` + simulateResponse(mockConn, response, nil) + + dpdk.processCommand(mockAcc, "/test,param") + + require.Equal(t, 1, len(mockAcc.Errors)) + require.Contains(t, mockAcc.Errors[0].Error(), "got empty json on") + }) +} + +func Test_appendCommandsWithParams(t *testing.T) { + t.Run("when got valid data, then valid commands with params should be created", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := `{"/testendpoint": [1,123]}` + simulateResponse(mockConn, response, nil) + expectedCommands := []string{"/action1,1", "/action1,123", "/action2,1", "/action2,123"} + + result, err := dpdk.appendCommandsWithParamsFromList("/testendpoint", []string{"/action1", "/action2"}) + + require.NoError(t, err) + require.Equal(t, 4, len(result)) + require.ElementsMatch(t, result, expectedCommands) + }) +} + +func Test_getCommandsAndParamsCombinations(t *testing.T) { + t.Run("when 2 ethdev commands are enabled, then 2*numberOfIds new commands should be appended", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := fmt.Sprintf(`{"%s": [1, 123]}`, ethdevListCommand) + simulateResponse(mockConn, response, nil) + expectedCommands := []string{"/ethdev/stats,1", "/ethdev/stats,123", "/ethdev/xstats,1", "/ethdev/xstats,123"} + + dpdk.DeviceTypes = []string{"ethdev"} + dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"} + dpdk.ethdevExcludedCommandsFilter, _ = filter.Compile([]string{}) + dpdk.AdditionalCommands = []string{} + commands := dpdk.gatherCommands(mockAcc) + + require.ElementsMatch(t, commands, expectedCommands) + require.Equal(t, 0, len(mockAcc.Errors)) + }) + + t.Run("when 1 rawdev command is enabled, then 2*numberOfIds new commands should be appended", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := fmt.Sprintf(`{"%s": [1, 123]}`, rawdevListCommand) + simulateResponse(mockConn, response, nil) + expectedCommands := []string{"/rawdev/xstats,1", "/rawdev/xstats,123"} + + dpdk.DeviceTypes = []string{"rawdev"} + dpdk.rawdevCommands = []string{"/rawdev/xstats"} + dpdk.AdditionalCommands = []string{} + commands := dpdk.gatherCommands(mockAcc) + + require.ElementsMatch(t, commands, expectedCommands) + require.Equal(t, 0, len(mockAcc.Errors)) + }) + + t.Run("when 2 ethdev commands are enabled but one command is disabled, then numberOfIds new commands should be appended", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := fmt.Sprintf(`{"%s": [1, 123]}`, ethdevListCommand) + simulateResponse(mockConn, response, nil) + expectedCommands := []string{"/ethdev/stats,1", "/ethdev/stats,123"} + + dpdk.DeviceTypes = []string{"ethdev"} + dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"} + dpdk.ethdevExcludedCommandsFilter, _ = filter.Compile([]string{"/ethdev/xstats"}) + dpdk.AdditionalCommands = []string{} + commands := dpdk.gatherCommands(mockAcc) + + require.ElementsMatch(t, commands, expectedCommands) + require.Equal(t, 0, len(mockAcc.Errors)) + }) + + t.Run("when ethdev commands are enabled but params fetching command returns error then error should be logged in accumulator", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + simulateResponse(mockConn, `{notAJson}`, fmt.Errorf("some error")) + + dpdk.DeviceTypes = []string{"ethdev"} + dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"} + dpdk.ethdevExcludedCommandsFilter, _ = filter.Compile([]string{}) + dpdk.AdditionalCommands = []string{} + commands := dpdk.gatherCommands(mockAcc) + + require.Equal(t, 0, len(commands)) + require.Equal(t, 1, len(mockAcc.Errors)) + }) +} + +func Test_Gather(t *testing.T) { + t.Run("When parsing a plain json without nested object, then its key should be equal to \"\"", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + dpdk.AdditionalCommands = []string{"/endpoint1"} + simulateResponse(mockConn, `{"/endpoint1":"myvalue"}`, nil) + + err := dpdk.Gather(mockAcc) + + require.NoError(t, err) + require.Equal(t, 0, len(mockAcc.Errors)) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "dpdk", + map[string]string{ + "command": "/endpoint1", + "params": "", + }, + map[string]interface{}{ + "": "myvalue", + }, + time.Unix(0, 0), + ), + } + + actual := mockAcc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) + }) + + t.Run("When parsing a list of value in nested object then list should be flattened", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + dpdk.AdditionalCommands = []string{"/endpoint1"} + simulateResponse(mockConn, `{"/endpoint1":{"myvalue":[0,1,123]}}`, nil) + + err := dpdk.Gather(mockAcc) + require.NoError(t, err) + require.Equal(t, 0, len(mockAcc.Errors)) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "dpdk", + map[string]string{ + "command": "/endpoint1", + "params": "", + }, + map[string]interface{}{ + "myvalue_0": float64(0), + "myvalue_1": float64(1), + "myvalue_2": float64(123), + }, + time.Unix(0, 0), + ), + } + + actual := mockAcc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) + }) +} + +func simulateResponse(mockConn *mocks.Conn, response string, readErr error) { + mockConn.On("Write", mock.Anything).Return(0, nil) + mockConn.On("Read", mock.Anything).Run(func(arg mock.Arguments) { + elem := arg.Get(0).([]byte) + copy(elem, response) + }).Return(len(response), readErr) + mockConn.On("SetDeadline", mock.Anything).Return(nil) + + if readErr != nil { + mockConn.On("Close").Return(nil) + } +} + +func simulateSocketResponse(socket net.Listener, t *testing.T) { + conn, err := socket.Accept() + require.NoError(t, err) + + initMessage, err := json.Marshal(initMessage{MaxOutputLen: 1}) + require.NoError(t, err) + + _, err = conn.Write(initMessage) + require.NoError(t, err) +} diff --git a/plugins/inputs/dpdk/dpdk_utils.go b/plugins/inputs/dpdk/dpdk_utils.go new file mode 100644 index 0000000000000..962186a424893 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_utils.go @@ -0,0 +1,116 @@ +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "strconv" + "strings" + + "github.com/influxdata/telegraf/filter" +) + +func commandWithParams(command string, params string) string { + if params != "" { + return command + "," + params + } + return command +} + +func stripParams(command string) string { + index := strings.IndexRune(command, ',') + if index == -1 { + return command + } + return command[:index] +} + +// Since DPDK is an open-source project, developers can use their own format of params +// so it could "/command,1,3,5,123" or "/command,userId=1, count=1234". +// To avoid issues with different formats of params, all params are returned as single string +func getParams(command string) string { + index := strings.IndexRune(command, ',') + if index == -1 { + return "" + } + return command[index+1:] +} + +// Checks if provided path points to socket +func isSocket(path string) error { + pathInfo, err := os.Lstat(path) + if os.IsNotExist(err) { + return fmt.Errorf("provided path does not exist: '%v'", path) + } + + if err != nil { + return fmt.Errorf("cannot get system information of '%v' file: %v", path, err) + } + + if pathInfo.Mode()&os.ModeSocket != os.ModeSocket { + return fmt.Errorf("provided path does not point to a socket file: '%v'", path) + } + + return nil +} + +// Converts JSON array containing devices identifiers from DPDK response to string slice +func jsonToArray(input []byte, command string) ([]string, error) { + if len(input) == 0 { + return nil, fmt.Errorf("got empty object instead of json") + } + + var rawMessage map[string]json.RawMessage + err := json.Unmarshal(input, &rawMessage) + if err != nil { + return nil, err + } + + var intArray []int64 + var stringArray []string + err = json.Unmarshal(rawMessage[command], &intArray) + if err != nil { + return nil, fmt.Errorf("failed to unmarshall json response - %v", err) + } + + for _, value := range intArray { + stringArray = append(stringArray, strconv.FormatInt(value, 10)) + } + + return stringArray, nil +} + +func removeSubset(elements []string, excludedFilter filter.Filter) []string { + if excludedFilter == nil { + return elements + } + + var result []string + for _, element := range elements { + if !excludedFilter.Match(element) { + result = append(result, element) + } + } + + return result +} + +func uniqueValues(values []string) []string { + in := make(map[string]bool) + result := make([]string, 0, len(values)) + + for _, value := range values { + if !in[value] { + in[value] = true + result = append(result, value) + } + } + return result +} + +func isEmpty(value interface{}) bool { + return value == nil || (reflect.ValueOf(value).Kind() == reflect.Ptr && reflect.ValueOf(value).IsNil()) +} diff --git a/plugins/inputs/dpdk/dpdk_utils_test.go b/plugins/inputs/dpdk/dpdk_utils_test.go new file mode 100644 index 0000000000000..6697e9ab38113 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_utils_test.go @@ -0,0 +1,137 @@ +// +build linux + +package dpdk + +import ( + "fmt" + "net" + "os" + "strconv" + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_isSocket(t *testing.T) { + t.Run("when path points to non-existing file then error should be returned", func(t *testing.T) { + err := isSocket("/tmp/file-that-doesnt-exists") + + require.Error(t, err) + require.Contains(t, err.Error(), "provided path does not exist") + }) + + t.Run("should pass if path points to socket", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + + err := isSocket(pathToSocket) + + require.NoError(t, err) + }) + + t.Run("if path points to regular file instead of socket then error should be returned", func(t *testing.T) { + pathToFile := "/tmp/dpdk-text-file.txt" + _, err := os.Create(pathToFile) + require.NoError(t, err) + defer os.Remove(pathToFile) + + err = isSocket(pathToFile) + + require.Error(t, err) + require.Contains(t, err.Error(), "provided path does not point to a socket file") + }) +} + +func Test_stripParams(t *testing.T) { + command := "/mycommand" + params := "myParams" + t.Run("when passed string without params then passed string should be returned", func(t *testing.T) { + strippedCommand := stripParams(command) + + require.Equal(t, command, strippedCommand) + }) + + t.Run("when passed string with params then string without params should be returned", func(t *testing.T) { + strippedCommand := stripParams(commandWithParams(command, params)) + + require.Equal(t, command, strippedCommand) + }) +} + +func Test_commandWithParams(t *testing.T) { + command := "/mycommand" + params := "myParams" + t.Run("when passed string with params then command with comma should be returned", func(t *testing.T) { + commandWithParams := commandWithParams(command, params) + + require.Equal(t, command+","+params, commandWithParams) + }) + + t.Run("when passed command with no params then command should be returned", func(t *testing.T) { + commandWithParams := commandWithParams(command, "") + + require.Equal(t, command, commandWithParams) + }) +} + +func Test_getParams(t *testing.T) { + command := "/mycommand" + params := "myParams" + t.Run("when passed string with params then command with comma should be returned", func(t *testing.T) { + commandParams := getParams(commandWithParams(command, params)) + + require.Equal(t, params, commandParams) + }) + + t.Run("when passed command with no params then empty string (representing empty params) should be returned", func(t *testing.T) { + commandParams := getParams(commandWithParams(command, "")) + + require.Equal(t, "", commandParams) + }) +} + +func Test_jsonToArray(t *testing.T) { + key := "/ethdev/list" + t.Run("when got numeric array then string array should be returned", func(t *testing.T) { + firstValue := int64(0) + secondValue := int64(1) + jsonString := fmt.Sprintf(`{"%s": [%d, %d]}`, key, firstValue, secondValue) + + arr, err := jsonToArray([]byte(jsonString), key) + + require.NoError(t, err) + require.Equal(t, strconv.FormatInt(firstValue, 10), arr[0]) + require.Equal(t, strconv.FormatInt(secondValue, 10), arr[1]) + }) + + t.Run("if non-json string is supplied as input then error should be returned", func(t *testing.T) { + _, err := jsonToArray([]byte("{notAJson}"), key) + + require.Error(t, err) + }) + + t.Run("when empty string is supplied as input then error should be returned", func(t *testing.T) { + jsonString := "" + + _, err := jsonToArray([]byte(jsonString), key) + + require.Error(t, err) + require.Contains(t, err.Error(), "got empty object instead of json") + }) + + t.Run("when valid json with json-object is supplied as input then error should be returned", func(t *testing.T) { + jsonString := fmt.Sprintf(`{"%s": {"testKey": "testValue"}}`, key) + + _, err := jsonToArray([]byte(jsonString), key) + + require.Error(t, err) + require.Contains(t, err.Error(), "failed to unmarshall json response") + }) +} + +func createSocketForTest(t *testing.T) (string, net.Listener) { + pathToSocket := "/tmp/dpdk-test-socket" + socket, err := net.Listen("unixpacket", pathToSocket) + require.NoError(t, err) + return pathToSocket, socket +} diff --git a/plugins/inputs/dpdk/mocks/conn.go b/plugins/inputs/dpdk/mocks/conn.go new file mode 100644 index 0000000000000..58961039dce86 --- /dev/null +++ b/plugins/inputs/dpdk/mocks/conn.go @@ -0,0 +1,146 @@ +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. + +package mocks + +import ( + net "net" + + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// Conn is an autogenerated mock type for the Conn type +type Conn struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *Conn) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// LocalAddr provides a mock function with given fields: +func (_m *Conn) LocalAddr() net.Addr { + ret := _m.Called() + + var r0 net.Addr + if rf, ok := ret.Get(0).(func() net.Addr); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.Addr) + } + } + + return r0 +} + +// Read provides a mock function with given fields: b +func (_m *Conn) Read(b []byte) (int, error) { + ret := _m.Called(b) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(b) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(b) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RemoteAddr provides a mock function with given fields: +func (_m *Conn) RemoteAddr() net.Addr { + ret := _m.Called() + + var r0 net.Addr + if rf, ok := ret.Get(0).(func() net.Addr); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.Addr) + } + } + + return r0 +} + +// SetDeadline provides a mock function with given fields: t +func (_m *Conn) SetDeadline(t time.Time) error { + ret := _m.Called(t) + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(t) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetReadDeadline provides a mock function with given fields: t +func (_m *Conn) SetReadDeadline(t time.Time) error { + ret := _m.Called(t) + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(t) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetWriteDeadline provides a mock function with given fields: t +func (_m *Conn) SetWriteDeadline(t time.Time) error { + ret := _m.Called(t) + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(t) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Write provides a mock function with given fields: b +func (_m *Conn) Write(b []byte) (int, error) { + ret := _m.Called(b) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(b) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(b) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} From ba05724918096f4d8f323952ecb1f88993fcf339 Mon Sep 17 00:00:00 2001 From: Thomas Casteleyn Date: Wed, 28 Apr 2021 17:57:01 +0200 Subject: [PATCH 403/761] Remove outdated milestones info from FAQ (#9213) --- docs/FAQ.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/FAQ.md b/docs/FAQ.md index 3667c10ebe8c4..40a101fdf6fe1 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -60,8 +60,3 @@ You can use the following techniques to avoid cardinality issues: [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [show cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx docs]: https://docs.influxdata.com/influxdb/latest/ - -### Q: When will the next version be released? - -The latest release date estimate can be viewed on the -[milestones](https://github.com/influxdata/telegraf/milestones) page. From 79b1ac1f06c2e4c9de3fbf24b84ee62a79b1e276 Mon Sep 17 00:00:00 2001 From: David Bennett <71459415+Jagularr@users.noreply.github.com> Date: Wed, 28 Apr 2021 12:31:48 -0400 Subject: [PATCH 404/761] Converter processor: add support for large hexadecimal strings (#9160) * add oauth2 to http input * reset not included changes * reset not included changes * reset not included changes * add hexadecimal parser changes * add linter changes * add documentation note --- plugins/processors/converter/README.md | 2 + plugins/processors/converter/converter.go | 48 ++++++++++++++++++- .../processors/converter/converter_test.go | 32 +++++++++++++ 3 files changed, 80 insertions(+), 2 deletions(-) diff --git a/plugins/processors/converter/README.md b/plugins/processors/converter/README.md index d916c87643bee..46a2e2ec6390a 100644 --- a/plugins/processors/converter/README.md +++ b/plugins/processors/converter/README.md @@ -9,6 +9,8 @@ Values that cannot be converted are dropped. uniquely identifiable. Fields with the same series key (measurement + tags) will overwrite one another. +**Note on large strings being converted to numeric types:** When converting a string value to a numeric type, precision may be lost if the number is too large. The largest numeric type this plugin supports is `float64`, and if a string 'number' exceeds its size limit, accuracy may be lost. + ### Configuration ```toml # Convert values to another metric value type diff --git a/plugins/processors/converter/converter.go b/plugins/processors/converter/converter.go index 6f69d2eb6714e..fd56cc4d9a6a8 100644 --- a/plugins/processors/converter/converter.go +++ b/plugins/processors/converter/converter.go @@ -1,9 +1,12 @@ package converter import ( + "errors" "fmt" "math" + "math/big" "strconv" + "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" @@ -368,10 +371,19 @@ func toInteger(v interface{}) (int64, bool) { result, err := strconv.ParseInt(value, 0, 64) if err != nil { - result, err := strconv.ParseFloat(value, 64) + var result float64 + var err error + + if isHexadecimal(value) { + result, err = parseHexadecimal(value) + } else { + result, err = strconv.ParseFloat(value, 64) + } + if err != nil { return 0, false } + return toInteger(result) } return result, true @@ -405,10 +417,19 @@ func toUnsigned(v interface{}) (uint64, bool) { result, err := strconv.ParseUint(value, 0, 64) if err != nil { - result, err := strconv.ParseFloat(value, 64) + var result float64 + var err error + + if isHexadecimal(value) { + result, err = parseHexadecimal(value) + } else { + result, err = strconv.ParseFloat(value, 64) + } + if err != nil { return 0, false } + return toUnsigned(result) } return result, true @@ -430,6 +451,11 @@ func toFloat(v interface{}) (float64, bool) { } return 0.0, true case string: + if isHexadecimal(value) { + result, err := parseHexadecimal(value) + return result, err == nil + } + result, err := strconv.ParseFloat(value, 64) return result, err == nil } @@ -452,6 +478,24 @@ func toString(v interface{}) (string, bool) { return "", false } +func parseHexadecimal(value string) (float64, error) { + i := new(big.Int) + + _, success := i.SetString(value, 0) + if !success { + return 0, errors.New("unable to parse string to big int") + } + + f := new(big.Float).SetInt(i) + result, _ := f.Float64() + + return result, nil +} + +func isHexadecimal(value string) bool { + return len(value) >= 3 && strings.ToLower(value)[1] == 'x' +} + func init() { processors.Add("converter", func() telegraf.Processor { return &Converter{} diff --git a/plugins/processors/converter/converter_test.go b/plugins/processors/converter/converter_test.go index 0a8200dbef449..b9e30c589a88d 100644 --- a/plugins/processors/converter/converter_test.go +++ b/plugins/processors/converter/converter_test.go @@ -432,6 +432,38 @@ func TestConverter(t *testing.T) { ), }, }, + { + name: "from string field hexidecimal", + converter: &Converter{ + Fields: &Conversion{ + Integer: []string{"a"}, + Unsigned: []string{"b"}, + Float: []string{"c"}, + }, + }, + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": "0x11826c", + "b": "0x11826c", + "c": "0x2139d19bb1c580ebe0", + }, + time.Unix(0, 0), + ), + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": int64(1147500), + "b": uint64(1147500), + "c": float64(612908836750534700000), + }, + time.Unix(0, 0), + ), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From c0d5af16021cf55da60413903decafe2a358e62b Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Wed, 28 Apr 2021 17:48:59 -0600 Subject: [PATCH 405/761] Update changelog (cherry picked from commit d900a35839170e30ddf28b855611e2c5aec99343) --- CHANGELOG.md | 14 ++++++++++++++ etc/telegraf.conf | 10 ++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 43e4e3fdd5e11..51af7faf9a490 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,17 @@ +## v1.18.2 [2021-04-28] + +#### Bugfixes + + - [#9160](https://github.com/influxdata/telegraf/pull/9160) `processors.converter` Add support for large hexadecimal strings + - [#9195](https://github.com/influxdata/telegraf/pull/9195) `inputs.apcupsd` Fix apcupsd 'ALARMDEL' bug via forked repo + - [#9110](https://github.com/influxdata/telegraf/pull/9110) `parsers.json` Make JSON format compatible with nulls + - [#9128](https://github.com/influxdata/telegraf/pull/9128) `inputs.nfsclient` Fix nfsclient ops map to allow collection of metrics other than read and write + - [#8917](https://github.com/influxdata/telegraf/pull/8917) `inputs.snmp` Log snmpv3 auth failures + - [#8892](https://github.com/influxdata/telegraf/pull/8892) `common.shim` Accept larger inputs from scanner + - [#9045](https://github.com/influxdata/telegraf/pull/9045) `inputs.vsphere` Add MetricLookback setting to handle reporting delays in vCenter 6.7 and later + - [#9026](https://github.com/influxdata/telegraf/pull/9026) `outputs.sumologic` Carbon2 serializer: sanitize metric name + - [#9086](https://github.com/influxdata/telegraf/pull/9086) `inputs.opcua` Fix error handling + ## v1.18.1 [2021-04-07] #### Bugfixes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index c870d5bc4d4ba..5160db820730f 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1263,7 +1263,7 @@ # ## Prometheus format. When using the prometheus input, use the same value in # ## both plugins to ensure metrics are round-tripped without modification. # ## -# ## example: metric_version = 1; +# ## example: metric_version = 1; # ## metric_version = 2; recommended version # # metric_version = 1 # @@ -7581,7 +7581,7 @@ # ## value in both plugins to ensure metrics are round-tripped without # ## modification. # ## -# ## example: metric_version = 1; +# ## example: metric_version = 1; # ## metric_version = 2; recommended version # # metric_version = 1 # @@ -8164,6 +8164,12 @@ # # custom_attribute_include = [] # # custom_attribute_exclude = ["*"] # +# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In +# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported +# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing +# ## it too much may cause performance issues. +# # metric_lookback = 3 +# # ## Optional SSL Config # # ssl_ca = "/path/to/cafile" # # ssl_cert = "/path/to/certfile" From e3ae7caaf00f6c8d67c83563cce6298d841c67f2 Mon Sep 17 00:00:00 2001 From: i-prudnikov Date: Thu, 29 Apr 2021 19:06:36 +0300 Subject: [PATCH 406/761] Fix for #9135 (#9156) --- plugins/inputs/aliyuncms/README.md | 37 ++- plugins/inputs/aliyuncms/aliyuncms.go | 368 ++++++++++++--------- plugins/inputs/aliyuncms/aliyuncms_test.go | 46 ++- plugins/inputs/aliyuncms/discovery.go | 115 +++---- 4 files changed, 307 insertions(+), 259 deletions(-) diff --git a/plugins/inputs/aliyuncms/README.md b/plugins/inputs/aliyuncms/README.md index 4304de593bbc2..c239baa63e05c 100644 --- a/plugins/inputs/aliyuncms/README.md +++ b/plugins/inputs/aliyuncms/README.md @@ -36,7 +36,26 @@ In the following order the plugin will attempt to authenticate. # private_key = "" # public_key_id = "" # role_name = "" - + + ## Specify the ali cloud region list to be queried for metrics and objects discovery + ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here + ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm + ## Default supported regions are: + ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, + ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, + ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 + ## + ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich + ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then + ## it will be reported on the start - for example for 'acs_cdn' project: + ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) + ## Currently, discovery supported for the following projects: + ## - acs_ecs_dashboard + ## - acs_rds_dashboard + ## - acs_slb_dashboard + ## - acs_vpc_eip + regions = ["cn-hongkong"] + # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all # metrics are made available to the 1 minute period. Some are collected at # 3 minute, 5 minute, or larger intervals. @@ -61,21 +80,7 @@ In the following order the plugin will attempt to authenticate. ## Maximum requests per second, default value is 200 ratelimit = 200 - ## Discovery regions set the scope for object discovery, the discovered info can be used to enrich - ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then - ## it will be reported on the start - foo example for 'acs_cdn' project: - ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) - ## Currently, discovery supported for the following projects: - ## - acs_ecs_dashboard - ## - acs_rds_dashboard - ## - acs_slb_dashboard - ## - acs_vpc_eip - ## - ## If not set, all regions would be covered, it can provide a significant load on API, so the recommendation here - ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm - discovery_regions = ["cn-hongkong"] - - ## how often the discovery API call executed (default 1m) + ## How often the discovery API call executed (default 1m) #discovery_interval = "1m" ## Metrics to Pull (Required) diff --git a/plugins/inputs/aliyuncms/aliyuncms.go b/plugins/inputs/aliyuncms/aliyuncms.go index ac70b9a441597..1dc20d7187853 100644 --- a/plugins/inputs/aliyuncms/aliyuncms.go +++ b/plugins/inputs/aliyuncms/aliyuncms.go @@ -11,112 +11,116 @@ import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" "github.com/aliyun/alibaba-cloud-sdk-go/services/cms" - "github.com/jmespath/go-jmespath" - "github.com/pkg/errors" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/limiter" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/jmespath/go-jmespath" + "github.com/pkg/errors" ) const ( description = "Pull Metric Statistics from Aliyun CMS" sampleConfig = ` - ## Aliyun Credentials - ## Credentials are loaded in the following order - ## 1) Ram RoleArn credential - ## 2) AccessKey STS token credential - ## 3) AccessKey credential - ## 4) Ecs Ram Role credential - ## 5) RSA keypair credential - ## 6) Environment variables credential - ## 7) Instance metadata credential - - # access_key_id = "" - # access_key_secret = "" - # access_key_sts_token = "" - # role_arn = "" - # role_session_name = "" - # private_key = "" - # public_key_id = "" - # role_name = "" - - # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all - # metrics are made available to the 1 minute period. Some are collected at - # 3 minute, 5 minute, or larger intervals. - # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv - # Note that if a period is configured that is smaller than the minimum for a - # particular metric, that metric will not be returned by the Aliyun OpenAPI - # and will not be collected by Telegraf. - # - ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) - period = "5m" - - ## Collection Delay (required - must account for metrics availability via AliyunCMS API) - delay = "1m" - - ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid - ## gaps or overlap in pulled data - interval = "5m" - - ## Metric Statistic Project (required) - project = "acs_slb_dashboard" - - ## Maximum requests per second, default value is 200 - ratelimit = 200 - - ## Discovery regions set the scope for object discovery, the discovered info can be used to enrich - ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then - ## it will be reported on the start - foo example for 'acs_cdn' project: - ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) - ## Currently, discovery supported for the following projects: - ## - acs_ecs_dashboard - ## - acs_rds_dashboard - ## - acs_slb_dashboard - ## - acs_vpc_eip - ## - ## If not set, all regions would be covered, it can provide a significant load on API, so the recommendation here - ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm - discovery_regions = ["cn-hongkong"] - - ## how often the discovery API call executed (default 1m) - #discovery_interval = "1m" - - ## Metrics to Pull (Required) - [[inputs.aliyuncms.metrics]] - ## Metrics names to be requested, - ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq - names = ["InstanceActiveConnection", "InstanceNewConnection"] - - ## Dimension filters for Metric (these are optional). - ## This allows to get additional metric dimension. If dimension is not specified it can be returned or - ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq - ## - ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) - ## Values specified here would be added into the list of discovered objects. - ## You can specify either single dimension: - #dimensions = '{"instanceId": "p-example"}' - - ## Or you can specify several dimensions at once: - #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' - - ## Enrichment tags, can be added from discovery (if supported) - ## Notation is : - ## To figure out which fields are available, consult the Describe API per project. - ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO - #tag_query_path = [ - # "address:Address", - # "name:LoadBalancerName", - # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" - # ] - ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. - - ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery - ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage - ## of discovery scope vs monitoring scope - #allow_dps_without_discovery = false + ## Aliyun Credentials + ## Credentials are loaded in the following order + ## 1) Ram RoleArn credential + ## 2) AccessKey STS token credential + ## 3) AccessKey credential + ## 4) Ecs Ram Role credential + ## 5) RSA keypair credential + ## 6) Environment variables credential + ## 7) Instance metadata credential + + # access_key_id = "" + # access_key_secret = "" + # access_key_sts_token = "" + # role_arn = "" + # role_session_name = "" + # private_key = "" + # public_key_id = "" + # role_name = "" + + ## Specify the ali cloud region list to be queried for metrics and objects discovery + ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here + ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm + ## Default supported regions are: + ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, + ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, + ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 + ## + ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich + ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then + ## it will be reported on the start - for example for 'acs_cdn' project: + ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) + ## Currently, discovery supported for the following projects: + ## - acs_ecs_dashboard + ## - acs_rds_dashboard + ## - acs_slb_dashboard + ## - acs_vpc_eip + regions = ["cn-hongkong"] + + # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all + # metrics are made available to the 1 minute period. Some are collected at + # 3 minute, 5 minute, or larger intervals. + # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv + # Note that if a period is configured that is smaller than the minimum for a + # particular metric, that metric will not be returned by the Aliyun OpenAPI + # and will not be collected by Telegraf. + # + ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) + period = "5m" + + ## Collection Delay (required - must account for metrics availability via AliyunCMS API) + delay = "1m" + + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid + ## gaps or overlap in pulled data + interval = "5m" + + ## Metric Statistic Project (required) + project = "acs_slb_dashboard" + + ## Maximum requests per second, default value is 200 + ratelimit = 200 + + ## How often the discovery API call executed (default 1m) + #discovery_interval = "1m" + + ## Metrics to Pull (Required) + [[inputs.aliyuncms.metrics]] + ## Metrics names to be requested, + ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + names = ["InstanceActiveConnection", "InstanceNewConnection"] + + ## Dimension filters for Metric (these are optional). + ## This allows to get additional metric dimension. If dimension is not specified it can be returned or + ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + ## + ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) + ## Values specified here would be added into the list of discovered objects. + ## You can specify either single dimension: + #dimensions = '{"instanceId": "p-example"}' + + ## Or you can specify several dimensions at once: + #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' + + ## Enrichment tags, can be added from discovery (if supported) + ## Notation is : + ## To figure out which fields are available, consult the Describe API per project. + ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO + #tag_query_path = [ + # "address:Address", + # "name:LoadBalancerName", + # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" + # ] + ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. + + ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery + ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage + ## of discovery scope vs monitoring scope + #allow_dps_without_discovery = false ` ) @@ -132,7 +136,7 @@ type ( PublicKeyID string `toml:"public_key_id"` RoleName string `toml:"role_name"` - DiscoveryRegions []string `toml:"discovery_regions"` + Regions []string `toml:"regions"` DiscoveryInterval config.Duration `toml:"discovery_interval"` Period config.Duration `toml:"period"` Delay config.Duration `toml:"delay"` @@ -162,7 +166,7 @@ type ( dtLock sync.Mutex //Guard for discoveryTags & dimensions discoveryTags map[string]map[string]string //Internal data structure that can enrich metrics with tags dimensionsUdObj map[string]string - dimensionsUdArr []map[string]string //Parsed Dimensions JSON string (unmarshalled) + dimensionsUdArr []map[string]string //Parsed Dimesnsions JSON string (unmarshalled) requestDimensions []map[string]string //this is the actual dimensions list that would be used in API request requestDimensionsStr string //String representation of the above @@ -178,6 +182,31 @@ type ( } ) +// https://www.alibabacloud.com/help/doc-detail/40654.htm?gclid=Cj0KCQjw4dr0BRCxARIsAKUNjWTAMfyVUn_Y3OevFBV3CMaazrhq0URHsgE7c0m0SeMQRKlhlsJGgIEaAviyEALw_wcB +var aliyunRegionList = []string{ + "cn-qingdao", + "cn-beijing", + "cn-zhangjiakou", + "cn-huhehaote", + "cn-hangzhou", + "cn-shanghai", + "cn-shenzhen", + "cn-heyuan", + "cn-chengdu", + "cn-hongkong", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-5", + "ap-south-1", + "ap-northeast-1", + "us-west-1", + "us-east-1", + "eu-central-1", + "eu-west-1", + "me-east-1", +} + // SampleConfig implements telegraf.Inputs interface func (s *AliyunCMS) SampleConfig() string { return sampleConfig @@ -188,6 +217,7 @@ func (s *AliyunCMS) Description() string { return description } +// Init perform checks of plugin inputs and initialize internals func (s *AliyunCMS) Init() error { if s.Project == "" { return errors.New("project is not set") @@ -238,9 +268,16 @@ func (s *AliyunCMS) Init() error { s.measurement = formatMeasurement(s.Project) + //Check regions + if len(s.Regions) == 0 { + s.Regions = aliyunRegionList + s.Log.Infof("'regions' is not set. Metrics will be queried across %d regions:\n%s", + len(s.Regions), strings.Join(s.Regions, ",")) + } + //Init discovery... if s.dt == nil { //Support for tests - s.dt, err = newDiscoveryTool(s.DiscoveryRegions, s.Project, s.Log, credential, int(float32(s.RateLimit)*0.2), time.Duration(s.DiscoveryInterval)) + s.dt, err = newDiscoveryTool(s.Regions, s.Project, s.Log, credential, int(float32(s.RateLimit)*0.2), time.Duration(s.DiscoveryInterval)) if err != nil { s.Log.Errorf("Discovery tool is not activated: %v", err) s.dt = nil @@ -248,7 +285,7 @@ func (s *AliyunCMS) Init() error { } } - s.discoveryData, err = s.dt.getDiscoveryDataAllRegions(nil) + s.discoveryData, err = s.dt.getDiscoveryDataAcrossRegions(nil) if err != nil { s.Log.Errorf("Discovery tool is not activated: %v", err) s.dt = nil @@ -265,10 +302,11 @@ func (s *AliyunCMS) Init() error { return nil } +// Start plugin discovery loop, metrics are gathered through Gather func (s *AliyunCMS) Start(telegraf.Accumulator) error { //Start periodic discovery process if s.dt != nil { - s.dt.Start() + s.dt.start() } return nil @@ -300,9 +338,10 @@ func (s *AliyunCMS) Gather(acc telegraf.Accumulator) error { return nil } +// Stop - stops the plugin discovery loop func (s *AliyunCMS) Stop() { if s.dt != nil { - s.dt.Stop() + s.dt.stop() } } @@ -327,78 +366,85 @@ func (s *AliyunCMS) updateWindow(relativeTo time.Time) { // Gather given metric and emit error func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, metric *Metric) error { - req := cms.CreateDescribeMetricListRequest() - req.Period = strconv.FormatInt(int64(time.Duration(s.Period).Seconds()), 10) - req.MetricName = metricName - req.Length = "10000" - req.Namespace = s.Project - req.EndTime = strconv.FormatInt(s.windowEnd.Unix()*1000, 10) - req.StartTime = strconv.FormatInt(s.windowStart.Unix()*1000, 10) - req.Dimensions = metric.requestDimensionsStr - - for more := true; more; { - resp, err := s.client.DescribeMetricList(req) - if err != nil { - return errors.Errorf("failed to query metricName list: %v", err) - } else if resp.Code != "200" { - s.Log.Errorf("failed to query metricName list: %v", resp.Message) - break - } - - var datapoints []map[string]interface{} - if err = json.Unmarshal([]byte(resp.Datapoints), &datapoints); err != nil { - return errors.Errorf("failed to decode response datapoints: %v", err) - } + for _, region := range s.Regions { + req := cms.CreateDescribeMetricListRequest() + req.Period = strconv.FormatInt(int64(time.Duration(s.Period).Seconds()), 10) + req.MetricName = metricName + req.Length = "10000" + req.Namespace = s.Project + req.EndTime = strconv.FormatInt(s.windowEnd.Unix()*1000, 10) + req.StartTime = strconv.FormatInt(s.windowStart.Unix()*1000, 10) + req.Dimensions = metric.requestDimensionsStr + req.RegionId = region + + for more := true; more; { + resp, err := s.client.DescribeMetricList(req) + if err != nil { + return errors.Errorf("failed to query metricName list: %v", err) + } + if resp.Code != "200" { + s.Log.Errorf("failed to query metricName list: %v", resp.Message) + break + } - if len(datapoints) == 0 { - s.Log.Debugf("No metrics returned from CMS, response msg: %s", resp.Message) - break - } + var datapoints []map[string]interface{} + if err := json.Unmarshal([]byte(resp.Datapoints), &datapoints); err != nil { + return errors.Errorf("failed to decode response datapoints: %v", err) + } - NextDataPoint: - for _, datapoint := range datapoints { - fields := map[string]interface{}{} - datapointTime := int64(0) - tags := map[string]string{} - for key, value := range datapoint { - switch key { - case "instanceId", "BucketName": - tags[key] = value.(string) - if metric.discoveryTags != nil { //discovery can be not activated - //Skipping data point if discovery data not exist - if _, ok := metric.discoveryTags[value.(string)]; !ok && - !metric.AllowDataPointWODiscoveryData { - s.Log.Warnf("Instance %q is not found in discovery, skipping monitoring datapoint...", value.(string)) - continue NextDataPoint - } + if len(datapoints) == 0 { + s.Log.Debugf("No metrics returned from CMS, response msg: %s", resp.Message) + break + } - for k, v := range metric.discoveryTags[value.(string)] { - tags[k] = v + NextDataPoint: + for _, datapoint := range datapoints { + fields := map[string]interface{}{} + datapointTime := int64(0) + tags := map[string]string{} + for key, value := range datapoint { + switch key { + case "instanceId", "BucketName": + tags[key] = value.(string) + if metric.discoveryTags != nil { //discovery can be not activated + //Skipping data point if discovery data not exist + _, ok := metric.discoveryTags[value.(string)] + if !ok && + !metric.AllowDataPointWODiscoveryData { + s.Log.Warnf("Instance %q is not found in discovery, skipping monitoring datapoint...", value.(string)) + continue NextDataPoint + } + + for k, v := range metric.discoveryTags[value.(string)] { + tags[k] = v + } } + case "userId": + tags[key] = value.(string) + case "timestamp": + datapointTime = int64(value.(float64)) / 1000 + default: + fields[formatField(metricName, key)] = value } - case "userId": - tags[key] = value.(string) - case "timestamp": - datapointTime = int64(value.(float64)) / 1000 - default: - fields[formatField(metricName, key)] = value } + //Log.logW("Datapoint time: %s, now: %s", time.Unix(datapointTime, 0).Format(time.RFC3339), time.Now().Format(time.RFC3339)) + acc.AddFields(s.measurement, fields, tags, time.Unix(datapointTime, 0)) } - //Log.logW("Datapoint time: %s, now: %s", time.Unix(datapointTime, 0).Format(time.RFC3339), time.Now().Format(time.RFC3339)) - acc.AddFields(s.measurement, fields, tags, time.Unix(datapointTime, 0)) - } - req.NextToken = resp.NextToken - more = req.NextToken != "" + req.NextToken = resp.NextToken + more = req.NextToken != "" + } } - return nil } -//Tag helper +//tag helper func parseTag(tagSpec string, data interface{}) (tagKey string, tagValue string, err error) { + var ( + ok bool + queryPath = tagSpec + ) tagKey = tagSpec - queryPath := tagSpec //Split query path to tagKey and query path if splitted := strings.Split(tagSpec, ":"); len(splitted) == 2 { @@ -416,7 +462,7 @@ func parseTag(tagSpec string, data interface{}) (tagKey string, tagValue string, return "", "", nil } - tagValue, ok := tagRawValue.(string) + tagValue, ok = tagRawValue.(string) if !ok { return "", "", errors.Errorf("Tag value %v parsed by query %q is not a string value", tagRawValue, queryPath) diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go index a2bae5d0d58a2..22e0acbc52ebe 100644 --- a/plugins/inputs/aliyuncms/aliyuncms_test.go +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -123,14 +123,13 @@ func TestPluginInitialize(t *testing.T) { var err error plugin := new(AliyunCMS) - plugin.DiscoveryRegions = []string{"cn-shanghai"} - plugin.dt, err = getDiscoveryTool("acs_slb_dashboard", plugin.DiscoveryRegions) + plugin.Log = testutil.Logger{Name: inputTitle} + plugin.Regions = []string{"cn-shanghai"} + plugin.dt, err = getDiscoveryTool("acs_slb_dashboard", plugin.Regions) if err != nil { t.Fatalf("Can't create discovery tool object: %v", err) } - plugin.Log = testutil.Logger{Name: inputTitle} - httpResp := &http.Response{ StatusCode: 200, Body: ioutil.NopCloser(bytes.NewBufferString( @@ -150,7 +149,7 @@ func TestPluginInitialize(t *testing.T) { if err != nil { t.Fatalf("Can't create mock sdk cli: %v", err) } - plugin.dt.cli = map[string]aliyunSdkClient{plugin.DiscoveryRegions[0]: &mockCli} + plugin.dt.cli = map[string]aliyunSdkClient{plugin.Regions[0]: &mockCli} tests := []struct { name string @@ -158,14 +157,24 @@ func TestPluginInitialize(t *testing.T) { accessKeyID string accessKeySecret string expectedErrorString string + regions []string + discoveryRegions []string }{ { name: "Empty project", expectedErrorString: "project is not set", + regions: []string{"cn-shanghai"}, }, { name: "Valid project", project: "acs_slb_dashboard", + regions: []string{"cn-shanghai"}, + accessKeyID: "dummy", + accessKeySecret: "dummy", + }, + { + name: "'regions' is not set", + project: "acs_slb_dashboard", accessKeyID: "dummy", accessKeySecret: "dummy", }, @@ -176,12 +185,16 @@ func TestPluginInitialize(t *testing.T) { plugin.Project = tt.project plugin.AccessKeyID = tt.accessKeyID plugin.AccessKeySecret = tt.accessKeySecret + plugin.Regions = tt.regions if tt.expectedErrorString != "" { require.EqualError(t, plugin.Init(), tt.expectedErrorString) } else { require.Equal(t, nil, plugin.Init()) } + if len(tt.regions) == 0 { //Check if set to default + require.Equal(t, plugin.Regions, aliyunRegionList) + } }) } } @@ -224,6 +237,7 @@ func TestGatherMetric(t *testing.T) { client: new(mockGatherAliyunCMSClient), measurement: formatMeasurement("acs_slb_dashboard"), Log: testutil.Logger{Name: inputTitle}, + Regions: []string{"cn-shanghai"}, } metric := &Metric{ @@ -262,15 +276,15 @@ func TestGather(t *testing.T) { Dimensions: `{"instanceId": "i-abcdefgh123456"}`, } plugin := &AliyunCMS{ - AccessKeyID: "my_access_key_id", - AccessKeySecret: "my_access_key_secret", - Project: "acs_slb_dashboard", - Metrics: []*Metric{metric}, - RateLimit: 200, - measurement: formatMeasurement("acs_slb_dashboard"), - DiscoveryRegions: []string{"cn-shanghai"}, - client: new(mockGatherAliyunCMSClient), - Log: testutil.Logger{Name: inputTitle}, + AccessKeyID: "my_access_key_id", + AccessKeySecret: "my_access_key_secret", + Project: "acs_slb_dashboard", + Metrics: []*Metric{metric}, + RateLimit: 200, + measurement: formatMeasurement("acs_slb_dashboard"), + Regions: []string{"cn-shanghai"}, + client: new(mockGatherAliyunCMSClient), + Log: testutil.Logger{Name: inputTitle}, } //test table: @@ -326,7 +340,7 @@ func TestGather(t *testing.T) { } } -func TestGetDiscoveryDataAllRegions(t *testing.T) { +func TestGetDiscoveryDataAcrossRegions(t *testing.T) { //test table: tests := []struct { name string @@ -391,7 +405,7 @@ func TestGetDiscoveryDataAllRegions(t *testing.T) { t.Fatalf("Can't create mock sdk cli: %v", err) } dt.cli = map[string]aliyunSdkClient{tt.region: &mockCli} - data, err := dt.getDiscoveryDataAllRegions(nil) + data, err := dt.getDiscoveryDataAcrossRegions(nil) require.Equal(t, tt.discData, data) if err != nil { diff --git a/plugins/inputs/aliyuncms/discovery.go b/plugins/inputs/aliyuncms/discovery.go index c3f35c78aa3ec..a6fe5471beecf 100644 --- a/plugins/inputs/aliyuncms/discovery.go +++ b/plugins/inputs/aliyuncms/discovery.go @@ -5,6 +5,7 @@ import ( "reflect" "regexp" "strconv" + "strings" "sync" "time" @@ -16,37 +17,11 @@ import ( "github.com/aliyun/alibaba-cloud-sdk-go/services/rds" "github.com/aliyun/alibaba-cloud-sdk-go/services/slb" "github.com/aliyun/alibaba-cloud-sdk-go/services/vpc" - "github.com/pkg/errors" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/limiter" + "github.com/pkg/errors" ) -// https://www.alibabacloud.com/help/doc-detail/40654.htm?gclid=Cj0KCQjw4dr0BRCxARIsAKUNjWTAMfyVUn_Y3OevFBV3CMaazrhq0URHsgE7c0m0SeMQRKlhlsJGgIEaAviyEALw_wcB -var aliyunRegionList = []string{ - "cn-qingdao", - "cn-beijing", - "cn-zhangjiakou", - "cn-huhehaote", - "cn-hangzhou", - "cn-shanghai", - "cn-shenzhen", - "cn-heyuan", - "cn-chengdu", - "cn-hongkong", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-5", - "ap-south-1", - "ap-northeast-1", - "us-west-1", - "us-east-1", - "eu-central-1", - "eu-west-1", - "me-east-1", -} - type discoveryRequest interface { } @@ -54,6 +29,7 @@ type aliyunSdkClient interface { ProcessCommonRequest(req *requests.CommonRequest) (response *responses.CommonResponse, err error) } +// discoveryTool is a object that provides discovery feature type discoveryTool struct { req map[string]discoveryRequest //Discovery request (specific per object type) rateLimit int //Rate limit for API query, as it is limited by API backend @@ -70,8 +46,8 @@ type discoveryTool struct { lg telegraf.Logger //Telegraf logger (should be provided) } -type response struct { - discData []interface{} +type parsedDResp struct { + data []interface{} totalCount int pageSize int pageNumber int @@ -124,7 +100,8 @@ func newDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred if len(regions) == 0 { regions = aliyunRegionList - lg.Warnf("Discovery regions are not provided! Data will be queried across %d regions!", len(aliyunRegionList)) + lg.Infof("'regions' is not provided! Discovery data will be queried across %d regions:\n%s", + len(aliyunRegionList), strings.Join(aliyunRegionList, ",")) } if rateLimit == 0 { //Can be a rounding case @@ -300,21 +277,21 @@ func newDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred }, nil } -func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) (discoveryResponse *response, err error) { +func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) (*parsedDResp, error) { var ( - fullOutput = map[string]interface{}{} - foundDataItem, foundRootKey bool - discData []interface{} - totalCount, pageSize, pageNumber int + fullOutput = map[string]interface{}{} + data []byte + foundDataItem bool + foundRootKey bool + pdResp = &parsedDResp{} ) - data := resp.GetHttpContentBytes() + data = resp.GetHttpContentBytes() if data == nil { //No data return nil, errors.Errorf("No data in response to be parsed") } - err = json.Unmarshal(data, &fullOutput) - if err != nil { + if err := json.Unmarshal(data, &fullOutput); err != nil { return nil, errors.Errorf("Can't parse JSON from discovery response: %v", err) } @@ -329,7 +306,7 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) //It should contain the array with discovered data for _, item := range rootKeyVal { - if discData, foundDataItem = item.([]interface{}); foundDataItem { + if pdResp.data, foundDataItem = item.([]interface{}); foundDataItem { break } } @@ -337,70 +314,72 @@ func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) return nil, errors.Errorf("Didn't find array item in root key %q", key) } case "TotalCount": - totalCount = int(val.(float64)) + pdResp.totalCount = int(val.(float64)) case "PageSize": - pageSize = int(val.(float64)) + pdResp.pageSize = int(val.(float64)) case "PageNumber": - pageNumber = int(val.(float64)) + pdResp.pageNumber = int(val.(float64)) } } if !foundRootKey { return nil, errors.Errorf("Didn't find root key %q in discovery response", dt.respRootKey) } - return &response{ - discData: discData, - totalCount: totalCount, - pageSize: pageSize, - pageNumber: pageNumber, - }, nil + return pdResp, nil } -func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.CommonRequest, limiterChan chan bool) (map[string]interface{}, error) { - var discoveryData []interface{} - +func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.CommonRequest, lmtr chan bool) (map[string]interface{}, error) { + var ( + err error + resp *responses.CommonResponse + pDResp *parsedDResp + discoveryData []interface{} + totalCount int + pageNumber int + ) defer delete(req.QueryParams, "PageNumber") for { - if limiterChan != nil { - <-limiterChan //Rate limiting + if lmtr != nil { + <-lmtr //Rate limiting } - resp, err := cli.ProcessCommonRequest(req) + resp, err = cli.ProcessCommonRequest(req) if err != nil { return nil, err } - discoveryResponse, err := dt.parseDiscoveryResponse(resp) + pDResp, err = dt.parseDiscoveryResponse(resp) if err != nil { return nil, err } - discoveryData = append(discoveryData, discoveryResponse.discData...) + discoveryData = append(discoveryData, pDResp.data...) + pageNumber = pDResp.pageNumber + totalCount = pDResp.totalCount //Pagination - discoveryResponse.pageNumber++ - req.QueryParams["PageNumber"] = strconv.Itoa(discoveryResponse.pageNumber) + pageNumber++ + req.QueryParams["PageNumber"] = strconv.Itoa(pageNumber) - if len(discoveryData) == discoveryResponse.totalCount { //All data received + if len(discoveryData) == totalCount { //All data received //Map data to appropriate shape before return preparedData := map[string]interface{}{} for _, raw := range discoveryData { elem, ok := raw.(map[string]interface{}) if !ok { - return nil, errors.Errorf("Can't parse input data element, not a map[string]interface{} type") + return nil, errors.Errorf("can't parse input data element, not a map[string]interface{} type") } if objectID, ok := elem[dt.respObjectIDKey].(string); ok { preparedData[objectID] = elem } } - return preparedData, nil } } } -func (dt *discoveryTool) getDiscoveryDataAllRegions(limiterChan chan bool) (map[string]interface{}, error) { +func (dt *discoveryTool) getDiscoveryDataAcrossRegions(lmtr chan bool) (map[string]interface{}, error) { var ( data map[string]interface{} resultData = map[string]interface{}{} @@ -431,7 +410,7 @@ func (dt *discoveryTool) getDiscoveryDataAllRegions(limiterChan chan bool) (map[ commonRequest.TransToAcsRequest() //Get discovery data using common request - data, err = dt.getDiscoveryData(cli, commonRequest, limiterChan) + data, err = dt.getDiscoveryData(cli, commonRequest, lmtr) if err != nil { return nil, err } @@ -443,7 +422,9 @@ func (dt *discoveryTool) getDiscoveryDataAllRegions(limiterChan chan bool) (map[ return resultData, nil } -func (dt *discoveryTool) Start() { +// start the discovery pooling +// In case smth. new found it will be reported back through `DataChan` +func (dt *discoveryTool) start() { var ( err error data map[string]interface{} @@ -468,7 +449,7 @@ func (dt *discoveryTool) Start() { case <-dt.done: return case <-ticker.C: - data, err = dt.getDiscoveryDataAllRegions(lmtr.C) + data, err = dt.getDiscoveryDataAcrossRegions(lmtr.C) if err != nil { dt.lg.Errorf("Can't get discovery data: %v", err) continue @@ -489,7 +470,9 @@ func (dt *discoveryTool) Start() { }() } -func (dt *discoveryTool) Stop() { +// stop the discovery loop, making sure +// all data is read from 'dataChan' +func (dt *discoveryTool) stop() { close(dt.done) //Shutdown timer From 4fc849d73fb0140b1c96a0fd68ae8f1f7a4aa6ce Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 29 Apr 2021 15:58:35 -0500 Subject: [PATCH 407/761] Upgrade gopsutil to v3.21.3 (#9224) Co-authored-by: John Blesener --- docs/LICENSE_OF_DEPENDENCIES.md | 2 ++ go.mod | 5 +++-- go.sum | 11 ++++++++--- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index d065a2014fd5f..8fcefe55f7c7f 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -175,6 +175,8 @@ following works: - github.com/tidwall/match [MIT License](https://github.com/tidwall/match/blob/master/LICENSE) - github.com/tidwall/pretty [MIT License](https://github.com/tidwall/pretty/blob/master/LICENSE) - github.com/tinylib/msgp [MIT License](https://github.com/tinylib/msgp/blob/master/LICENSE) +- github.com/tklauser/go-sysconf [BSD 3-Clause "New" or "Revised" License](https://github.com/tklauser/go-sysconf/blob/master/LICENSE) +- github.com/tklauser/numcpus [Apache License 2.0](https://github.com/tklauser/numcpus/blob/master/LICENSE) - github.com/vapourismo/knx-go [MIT License](https://github.com/vapourismo/knx-go/blob/master/LICENSE) - github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE) - github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 188758ece1805..95d64c570e0bc 100644 --- a/go.mod +++ b/go.mod @@ -105,7 +105,7 @@ require ( github.com/riemann/riemann-go-client v0.5.0 github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/sensu/sensu-go/api/core/v2 v2.6.0 - github.com/shirou/gopsutil v3.20.11+incompatible + github.com/shirou/gopsutil v3.21.3+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/signalfx/golib/v3 v3.3.0 github.com/sirupsen/logrus v1.6.0 @@ -115,6 +115,7 @@ require ( github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/tidwall/gjson v1.6.0 github.com/tinylib/msgp v1.1.5 + github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect @@ -130,7 +131,7 @@ require ( golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 - golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 + golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa golang.org/x/text v0.3.4 golang.org/x/tools v0.1.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 diff --git a/go.sum b/go.sum index 95651ec1d48b3..6470784f9f396 100644 --- a/go.sum +++ b/go.sum @@ -1014,8 +1014,8 @@ github.com/sensu/sensu-go/api/core/v2 v2.6.0 h1:hEKPHFZZNDuWTlKr7Kgm2yog65ZdkBUq github.com/sensu/sensu-go/api/core/v2 v2.6.0/go.mod h1:97IK4ZQuvVjWvvoLkp+NgrD6ot30WDRz3LEbFUc/N34= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v3.20.11+incompatible h1:LJr4ZQK4mPpIV5gOa4jCOKOGb4ty4DZO54I4FGqIpto= -github.com/shirou/gopsutil v3.20.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.3+incompatible h1:uenXGGa8ESCQq+dbgtl916dmg6PSAz2cXov0uORQ9v8= +github.com/shirou/gopsutil v3.21.3+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= @@ -1096,6 +1096,10 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= +github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= +github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= @@ -1366,8 +1370,9 @@ golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa h1:ZYxPR6aca/uhfRJyaOAtflSHjJYiktO7QnJC5ut7iY4= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= From 370836d436e083a0552af912d40f718ff0becb4f Mon Sep 17 00:00:00 2001 From: Logan Date: Thu, 29 Apr 2021 18:46:36 -0600 Subject: [PATCH 408/761] Add UTF-8 sanitizer to Strings plugin (#9118) Adds a new option for the Strings processer to sanitize strings so that they conform to utf-8 --- plugins/processors/strings/README.md | 7 ++ plugins/processors/strings/strings.go | 13 +++ plugins/processors/strings/strings_test.go | 110 +++++++++++++++++++++ 3 files changed, 130 insertions(+) diff --git a/plugins/processors/strings/README.md b/plugins/processors/strings/README.md index a7aa0e2a585bd..e0fcec9103151 100644 --- a/plugins/processors/strings/README.md +++ b/plugins/processors/strings/README.md @@ -14,6 +14,7 @@ Implemented functions are: - replace - left - base64decode +- valid_utf8 Please note that in this implementation these are processed in the order that they appear above. @@ -78,6 +79,12 @@ If you'd like to apply multiple processings to the same `tag_key` or `field_key` ## Decode a base64 encoded utf-8 string # [[processors.strings.base64decode]] # field = "message" + + ## Sanitize a string to ensure it is a valid utf-8 string + ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty + # [[processors.strings.valid_utf8]] + # field = "message" + # replacement = "" ``` #### Trim, TrimLeft, TrimRight diff --git a/plugins/processors/strings/strings.go b/plugins/processors/strings/strings.go index 92ce560988384..7b2d3251ea381 100644 --- a/plugins/processors/strings/strings.go +++ b/plugins/processors/strings/strings.go @@ -22,6 +22,7 @@ type Strings struct { Replace []converter `toml:"replace"` Left []converter `toml:"left"` Base64Decode []converter `toml:"base64decode"` + ValidUTF8 []converter `toml:"valid_utf8"` converters []converter init bool @@ -42,6 +43,7 @@ type converter struct { Old string New string Width int + Replacement string fn ConvertFunc } @@ -98,6 +100,12 @@ const sampleConfig = ` ## Decode a base64 encoded utf-8 string # [[processors.strings.base64decode]] # field = "message" + + ## Sanitize a string to ensure it is a valid utf-8 string + ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty + # [[processors.strings.valid_utf8]] + # field = "message" + # replacement = "" ` func (s *Strings) SampleConfig() string { @@ -318,6 +326,11 @@ func (s *Strings) initOnce() { } s.converters = append(s.converters, c) } + for _, c := range s.ValidUTF8 { + c := c + c.fn = func(s string) string { return strings.ToValidUTF8(s, c.Replacement) } + s.converters = append(s.converters, c) + } s.init = true } diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go index 40d798a6db0e3..c4201188436e6 100644 --- a/plugins/processors/strings/strings_test.go +++ b/plugins/processors/strings/strings_test.go @@ -1047,3 +1047,113 @@ func TestBase64Decode(t *testing.T) { }) } } + +func TestValidUTF8(t *testing.T) { + tests := []struct { + name string + plugin *Strings + metric []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "valid utf-8 keeps original string", + plugin: &Strings{ + ValidUTF8: []converter{ + { + Field: "message", + Replacement: "r", + }, + }, + }, + metric: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "howdy", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "howdy", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "non-valid utf-8 modifies original string", + plugin: &Strings{ + ValidUTF8: []converter{ + { + Field: "message", + Replacement: "r", + }, + }, + }, + metric: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "ho" + string([]byte{0xff}) + "wdy", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "horwdy", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "non-valid utf-8 and empty replacement removes invalid characters", + plugin: &Strings{ + ValidUTF8: []converter{ + { + Field: "message", + Replacement: "", + }, + }, + }, + metric: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "ho" + string([]byte{0xff}) + "wdy", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "howdy", + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := tt.plugin.Apply(tt.metric...) + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} From 6977121aeda6b6d73eecb32dd64056bb0d576dfa Mon Sep 17 00:00:00 2001 From: Jacob Marble Date: Fri, 30 Apr 2021 07:36:48 -0700 Subject: [PATCH 409/761] OpenTelemetry Input Plugin (#9077) --- docs/LICENSE_OF_DEPENDENCIES.md | 3 + go.mod | 5 +- go.sum | 17 ++- plugins/inputs/all/all.go | 1 + plugins/inputs/opentelemetry/README.md | 78 ++++++++++++++ plugins/inputs/opentelemetry/grpc_services.go | 94 ++++++++++++++++ plugins/inputs/opentelemetry/logger.go | 16 +++ plugins/inputs/opentelemetry/opentelemetry.go | 101 ++++++++++++++++++ plugins/inputs/opentelemetry/writer.go | 32 ++++++ 9 files changed, 345 insertions(+), 2 deletions(-) create mode 100644 plugins/inputs/opentelemetry/README.md create mode 100644 plugins/inputs/opentelemetry/grpc_services.go create mode 100644 plugins/inputs/opentelemetry/logger.go create mode 100644 plugins/inputs/opentelemetry/opentelemetry.go create mode 100644 plugins/inputs/opentelemetry/writer.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 8fcefe55f7c7f..ca8c0611cf2bb 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -105,6 +105,9 @@ following works: - github.com/hashicorp/golang-lru [Mozilla Public License 2.0](https://github.com/hashicorp/golang-lru/blob/master/LICENSE) - github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE) - github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE) +- github.com/influxdata/influxdb-observability/common [MIT License](https://github.com/influxdata/influxdb-observability/blob/main/LICENSE) +- github.com/influxdata/influxdb-observability/otel2influx [MIT License](https://github.com/influxdata/influxdb-observability/blob/main/LICENSE) +- github.com/influxdata/influxdb-observability/otlp [MIT License](https://github.com/influxdata/influxdb-observability/blob/main/LICENSE) - github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt) - github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE) - github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 95d64c570e0bc..fc0ec4dea0a7d 100644 --- a/go.mod +++ b/go.mod @@ -73,6 +73,9 @@ require ( github.com/hashicorp/consul/api v1.6.0 github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/influxdata/go-syslog/v2 v2.0.1 + github.com/influxdata/influxdb-observability/common v0.0.0-20210429174543-86ae73cafd31 + github.com/influxdata/influxdb-observability/otel2influx v0.0.0-20210429174543-86ae73cafd31 + github.com/influxdata/influxdb-observability/otlp v0.0.0-20210429174543-86ae73cafd31 github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 @@ -137,7 +140,7 @@ require ( golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.29.0 google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70 - google.golang.org/grpc v1.33.1 + google.golang.org/grpc v1.37.0 gopkg.in/djherbis/times.v1 v1.2.0 gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 diff --git a/go.sum b/go.sum index 6470784f9f396..6ce13c192d965 100644 --- a/go.sum +++ b/go.sum @@ -235,6 +235,7 @@ github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -315,6 +316,7 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -526,6 +528,7 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= @@ -667,6 +670,14 @@ github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmc github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s= github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo= github.com/influxdata/influxdb v1.8.2/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= +github.com/influxdata/influxdb-observability/common v0.0.0-20210428231528-a010f53e3e02/go.mod h1:PMngVYsW4uwtzIVmj0ZfLL9UIOwo7Vs+09QHkoYMZv8= +github.com/influxdata/influxdb-observability/common v0.0.0-20210429174543-86ae73cafd31 h1:pfWcpiOrWLJvicIpCiFR8vqrkVbAuKUttWvQDmSlfUM= +github.com/influxdata/influxdb-observability/common v0.0.0-20210429174543-86ae73cafd31/go.mod h1:PMngVYsW4uwtzIVmj0ZfLL9UIOwo7Vs+09QHkoYMZv8= +github.com/influxdata/influxdb-observability/otel2influx v0.0.0-20210429174543-86ae73cafd31 h1:uiRNaaczvfx837c6OSH9Q6H4td1cWnR9X0pveHTHeYs= +github.com/influxdata/influxdb-observability/otel2influx v0.0.0-20210429174543-86ae73cafd31/go.mod h1:43guzIbK1oO/UMBuMCqG++LHZqLhMbWxqU4H1Lgpf28= +github.com/influxdata/influxdb-observability/otlp v0.0.0-20210428231528-a010f53e3e02/go.mod h1:J2N8KOAXSXgDhLjYWvjbxPhrgq3nVQ/npzW8l8T77Qo= +github.com/influxdata/influxdb-observability/otlp v0.0.0-20210429174543-86ae73cafd31 h1:Cf6WCNdgyxWv3x3uMehlexHAkWO3AZTAv5Q2yo0WQ0s= +github.com/influxdata/influxdb-observability/otlp v0.0.0-20210429174543-86ae73cafd31/go.mod h1:23SLY21Ag84PC0TbvVhdKoOVvrQF6nq5j5sFOW09ZBU= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= @@ -1452,6 +1463,7 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1518,6 +1530,7 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70 h1:wboULUXGF3c5qdUnKp+6gLAccE6PRpa/czkYvQ4UXv8= google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1535,8 +1548,9 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.33.1 h1:DGeFlSan2f+WEtCERJ4J9GJWk15TxUi8QGagfI87Xyc= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1547,6 +1561,7 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 3beb30cb412ca..aa273a4aa7fb5 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -132,6 +132,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/openldap" _ "github.com/influxdata/telegraf/plugins/inputs/openntpd" _ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd" + _ "github.com/influxdata/telegraf/plugins/inputs/opentelemetry" _ "github.com/influxdata/telegraf/plugins/inputs/openweathermap" _ "github.com/influxdata/telegraf/plugins/inputs/passenger" _ "github.com/influxdata/telegraf/plugins/inputs/pf" diff --git a/plugins/inputs/opentelemetry/README.md b/plugins/inputs/opentelemetry/README.md new file mode 100644 index 0000000000000..9cb8f96eb5635 --- /dev/null +++ b/plugins/inputs/opentelemetry/README.md @@ -0,0 +1,78 @@ +# OpenTelemetry Input Plugin + +This plugin receives traces, metrics and logs from [OpenTelemetry](https://opentelemetry.io) clients and agents via gRPC. + +### Configuration + +```toml +[[inputs.opentelemetry]] + ## Override the OpenTelemetry gRPC service address:port + # service_address = "0.0.0.0:4317" + + ## Override the default request timeout + # timeout = "5s" + + ## Select a schema for metrics: "prometheus-v1" or "prometheus-v2" + ## For more information about the alternatives, read the Prometheus input + ## plugin notes. + # metrics_schema = "prometheus-v1" +``` + +#### Schema + +The OpenTelemetry->InfluxDB conversion [schema](https://github.com/influxdata/influxdb-observability/blob/main/docs/index.md) +and [implementation](https://github.com/influxdata/influxdb-observability/tree/main/otel2influx) +are hosted at https://github.com/influxdata/influxdb-observability . + +Spans are stored in measurement `spans`. +Logs are stored in measurement `logs`. + +For metrics, two output schemata exist. +Metrics received with `metrics_schema=prometheus-v1` are assigned measurement from the OTel field `Metric.name`. +Metrics received with `metrics_schema=prometheus-v2` are stored in measurement `prometheus`. + +### Example Output + +#### Tracing Spans +``` +spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="d5270e78d85f570f",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="4c28227be6a010e1",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689169000 +spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="lets-go",net.peer.ip="1.2.3.4",peer.service="tracegen-server",service.name="tracegen",span.kind="client",span_id="d5270e78d85f570f",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689135000 +spans end_time_unix_nano="2021-02-19 20:50:25.6895667 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="b57e98af78c3399b",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="a0643a156d7f9f7f",status_code="STATUS_CODE_OK",trace_id="fd6b8bb5965e726c94978c644962cdc8" 1613767825689388000 +spans end_time_unix_nano="2021-02-19 20:50:25.6895667 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="lets-go",net.peer.ip="1.2.3.4",peer.service="tracegen-server",service.name="tracegen",span.kind="client",span_id="b57e98af78c3399b",status_code="STATUS_CODE_OK",trace_id="fd6b8bb5965e726c94978c644962cdc8" 1613767825689303300 +spans end_time_unix_nano="2021-02-19 20:50:25.6896741 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="6a8e6a0edcc1c966",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="d68f7f3b41eb8075",status_code="STATUS_CODE_OK",trace_id="651dadde186b7834c52b13a28fc27bea" 1613767825689480300 +``` + +### Metrics - `prometheus-v1` +``` +cpu_temp,foo=bar gauge=87.332 +http_requests_total,method=post,code=200 counter=1027 +http_requests_total,method=post,code=400 counter=3 +http_request_duration_seconds 0.05=24054,0.1=33444,0.2=100392,0.5=129389,1=133988,sum=53423,count=144320 +rpc_duration_seconds 0.01=3102,0.05=3272,0.5=4773,0.9=9001,0.99=76656,sum=1.7560473e+07,count=2693 +``` + +### Metrics - `prometheus-v2` +``` +prometheus,foo=bar cpu_temp=87.332 +prometheus,method=post,code=200 http_requests_total=1027 +prometheus,method=post,code=400 http_requests_total=3 +prometheus,le=0.05 http_request_duration_seconds_bucket=24054 +prometheus,le=0.1 http_request_duration_seconds_bucket=33444 +prometheus,le=0.2 http_request_duration_seconds_bucket=100392 +prometheus,le=0.5 http_request_duration_seconds_bucket=129389 +prometheus,le=1 http_request_duration_seconds_bucket=133988 +prometheus http_request_duration_seconds_count=144320,http_request_duration_seconds_sum=53423 +prometheus,quantile=0.01 rpc_duration_seconds=3102 +prometheus,quantile=0.05 rpc_duration_seconds=3272 +prometheus,quantile=0.5 rpc_duration_seconds=4773 +prometheus,quantile=0.9 rpc_duration_seconds=9001 +prometheus,quantile=0.99 rpc_duration_seconds=76656 +prometheus rpc_duration_seconds_count=1.7560473e+07,rpc_duration_seconds_sum=2693 +``` + +### Logs +``` +logs fluent.tag="fluent.info",pid=18i,ppid=9i,worker=0i 1613769568895331700 +logs fluent.tag="fluent.debug",instance=1720i,queue_size=0i,stage_size=0i 1613769568895697200 +logs fluent.tag="fluent.info",worker=0i 1613769568896515100 +``` diff --git a/plugins/inputs/opentelemetry/grpc_services.go b/plugins/inputs/opentelemetry/grpc_services.go new file mode 100644 index 0000000000000..4045f8b60d504 --- /dev/null +++ b/plugins/inputs/opentelemetry/grpc_services.go @@ -0,0 +1,94 @@ +package opentelemetry + +import ( + "context" + "fmt" + + "github.com/influxdata/influxdb-observability/common" + "github.com/influxdata/influxdb-observability/otel2influx" + otlpcollectorlogs "github.com/influxdata/influxdb-observability/otlp/collector/logs/v1" + otlpcollectormetrics "github.com/influxdata/influxdb-observability/otlp/collector/metrics/v1" + otlpcollectortrace "github.com/influxdata/influxdb-observability/otlp/collector/trace/v1" +) + +type traceService struct { + otlpcollectortrace.UnimplementedTraceServiceServer + + converter *otel2influx.OtelTracesToLineProtocol + writer *writeToAccumulator +} + +func newTraceService(logger common.Logger, writer *writeToAccumulator) *traceService { + converter := otel2influx.NewOtelTracesToLineProtocol(logger) + return &traceService{ + converter: converter, + writer: writer, + } +} + +func (s *traceService) Export(ctx context.Context, req *otlpcollectortrace.ExportTraceServiceRequest) (*otlpcollectortrace.ExportTraceServiceResponse, error) { + err := s.converter.WriteTraces(ctx, req.ResourceSpans, s.writer) + if err != nil { + return nil, err + } + return &otlpcollectortrace.ExportTraceServiceResponse{}, nil +} + +type metricsService struct { + otlpcollectormetrics.UnimplementedMetricsServiceServer + + converter *otel2influx.OtelMetricsToLineProtocol + writer *writeToAccumulator +} + +var metricsSchemata = map[string]otel2influx.MetricsSchema{ + "prometheus-v1": otel2influx.MetricsSchemaTelegrafPrometheusV1, + "prometheus-v2": otel2influx.MetricsSchemaTelegrafPrometheusV2, +} + +func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema string) (*metricsService, error) { + ms, found := metricsSchemata[schema] + if !found { + return nil, fmt.Errorf("schema '%s' not recognized", schema) + } + + converter, err := otel2influx.NewOtelMetricsToLineProtocol(logger, ms) + if err != nil { + return nil, err + } + return &metricsService{ + converter: converter, + writer: writer, + }, nil +} + +func (s *metricsService) Export(ctx context.Context, req *otlpcollectormetrics.ExportMetricsServiceRequest) (*otlpcollectormetrics.ExportMetricsServiceResponse, error) { + err := s.converter.WriteMetrics(ctx, req.ResourceMetrics, s.writer) + if err != nil { + return nil, err + } + return &otlpcollectormetrics.ExportMetricsServiceResponse{}, nil +} + +type logsService struct { + otlpcollectorlogs.UnimplementedLogsServiceServer + + converter *otel2influx.OtelLogsToLineProtocol + writer *writeToAccumulator +} + +func newLogsService(logger common.Logger, writer *writeToAccumulator) *logsService { + converter := otel2influx.NewOtelLogsToLineProtocol(logger) + return &logsService{ + converter: converter, + writer: writer, + } +} + +func (s *logsService) Export(ctx context.Context, req *otlpcollectorlogs.ExportLogsServiceRequest) (*otlpcollectorlogs.ExportLogsServiceResponse, error) { + err := s.converter.WriteLogs(ctx, req.ResourceLogs, s.writer) + if err != nil { + return nil, err + } + return &otlpcollectorlogs.ExportLogsServiceResponse{}, nil +} diff --git a/plugins/inputs/opentelemetry/logger.go b/plugins/inputs/opentelemetry/logger.go new file mode 100644 index 0000000000000..3db3621bcc672 --- /dev/null +++ b/plugins/inputs/opentelemetry/logger.go @@ -0,0 +1,16 @@ +package opentelemetry + +import ( + "strings" + + "github.com/influxdata/telegraf" +) + +type otelLogger struct { + telegraf.Logger +} + +func (l otelLogger) Debug(msg string, kv ...interface{}) { + format := msg + strings.Repeat(" %s=%q", len(kv)/2) + l.Logger.Debugf(format, kv...) +} diff --git a/plugins/inputs/opentelemetry/opentelemetry.go b/plugins/inputs/opentelemetry/opentelemetry.go new file mode 100644 index 0000000000000..cf2f6de08f33c --- /dev/null +++ b/plugins/inputs/opentelemetry/opentelemetry.go @@ -0,0 +1,101 @@ +package opentelemetry + +import ( + "fmt" + "net" + "sync" + "time" + + otlpcollectorlogs "github.com/influxdata/influxdb-observability/otlp/collector/logs/v1" + otlpcollectormetrics "github.com/influxdata/influxdb-observability/otlp/collector/metrics/v1" + otlpcollectortrace "github.com/influxdata/influxdb-observability/otlp/collector/trace/v1" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/inputs" + "google.golang.org/grpc" +) + +type OpenTelemetry struct { + ServiceAddress string `toml:"service_address"` + Timeout config.Duration `toml:"timeout"` + + MetricsSchema string `toml:"metrics_schema"` + + Log telegraf.Logger `toml:"-"` + + grpcServer *grpc.Server + + wg sync.WaitGroup +} + +const sampleConfig = ` + ## Override the OpenTelemetry gRPC service address:port + # service_address = "0.0.0.0:4317" + + ## Override the default request timeout + # timeout = "5s" + + ## Select a schema for metrics: prometheus-v1 or prometheus-v2 + ## For more information about the alternatives, read the Prometheus input + ## plugin notes. + # metrics_schema = "prometheus-v1" +` + +func (o *OpenTelemetry) SampleConfig() string { + return sampleConfig +} + +func (o *OpenTelemetry) Description() string { + return "Receive OpenTelemetry traces, metrics, and logs over gRPC" +} + +func (o *OpenTelemetry) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (o *OpenTelemetry) Start(accumulator telegraf.Accumulator) error { + listener, err := net.Listen("tcp", o.ServiceAddress) + if err != nil { + return err + } + + logger := &otelLogger{o.Log} + influxWriter := &writeToAccumulator{accumulator} + o.grpcServer = grpc.NewServer() + + otlpcollectortrace.RegisterTraceServiceServer(o.grpcServer, newTraceService(logger, influxWriter)) + ms, err := newMetricsService(logger, influxWriter, o.MetricsSchema) + if err != nil { + return err + } + otlpcollectormetrics.RegisterMetricsServiceServer(o.grpcServer, ms) + otlpcollectorlogs.RegisterLogsServiceServer(o.grpcServer, newLogsService(logger, influxWriter)) + + o.wg.Add(1) + go func() { + if err := o.grpcServer.Serve(listener); err != nil { + accumulator.AddError(fmt.Errorf("failed to stop OpenTelemetry gRPC service: %w", err)) + } + o.wg.Done() + }() + + return nil +} + +func (o *OpenTelemetry) Stop() { + if o.grpcServer != nil { + o.grpcServer.Stop() + } + + o.wg.Wait() +} + +func init() { + inputs.Add("opentelemetry", func() telegraf.Input { + return &OpenTelemetry{ + ServiceAddress: "0.0.0.0:4317", + Timeout: config.Duration(5 * time.Second), + MetricsSchema: "prometheus-v1", + } + }) +} diff --git a/plugins/inputs/opentelemetry/writer.go b/plugins/inputs/opentelemetry/writer.go new file mode 100644 index 0000000000000..69b627e38256e --- /dev/null +++ b/plugins/inputs/opentelemetry/writer.go @@ -0,0 +1,32 @@ +package opentelemetry + +import ( + "context" + "fmt" + "time" + + "github.com/influxdata/influxdb-observability/otel2influx" + "github.com/influxdata/telegraf" +) + +type writeToAccumulator struct { + accumulator telegraf.Accumulator +} + +func (w *writeToAccumulator) WritePoint(_ context.Context, measurement string, tags map[string]string, fields map[string]interface{}, ts time.Time, vType otel2influx.InfluxWriterValueType) error { + switch vType { + case otel2influx.InfluxWriterValueTypeUntyped: + w.accumulator.AddFields(measurement, fields, tags, ts) + case otel2influx.InfluxWriterValueTypeGauge: + w.accumulator.AddGauge(measurement, fields, tags, ts) + case otel2influx.InfluxWriterValueTypeSum: + w.accumulator.AddCounter(measurement, fields, tags, ts) + case otel2influx.InfluxWriterValueTypeHistogram: + w.accumulator.AddHistogram(measurement, fields, tags, ts) + case otel2influx.InfluxWriterValueTypeSummary: + w.accumulator.AddSummary(measurement, fields, tags, ts) + default: + return fmt.Errorf("unrecognized InfluxWriterValueType %q", vType) + } + return nil +} From 279dda21ef6163fdce1b9365fc8931cb8213d474 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 3 May 2021 11:00:49 -0500 Subject: [PATCH 410/761] Add FreeBSD armv7 package (#9200) --- Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Makefile b/Makefile index f0b92fc35958f..ee9c1b71d9ac9 100644 --- a/Makefile +++ b/Makefile @@ -263,6 +263,7 @@ endif ifdef armhf tars += telegraf-$(tar_version)_linux_armhf.tar.gz +tars += telegraf-$(tar_version)_freebsd_armv7.tar.gz debs += telegraf_$(deb_version)_armhf.deb rpms += telegraf-$(rpm_version).armv6hl.rpm endif @@ -435,6 +436,10 @@ upload-nightly: %freebsd_i386.tar.gz: export GOOS := freebsd %freebsd_i386.tar.gz: export GOARCH := 386 +%freebsd_armv7.tar.gz: export GOOS := freebsd +%freebsd_armv7.tar.gz: export GOARCH := arm +%freebsd_armv7.tar.gz: export GOARM := 7 + %windows_amd64.zip: export GOOS := windows %windows_amd64.zip: export GOARCH := amd64 From 09faa32043c98ddb88459262fe1270c05b02f48d Mon Sep 17 00:00:00 2001 From: SomKen Date: Mon, 3 May 2021 12:35:04 -0700 Subject: [PATCH 411/761] Document using group membership to allow access to /dev/pf for pf input plugin (#9232) --- plugins/inputs/pf/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/pf/README.md b/plugins/inputs/pf/README.md index 83215d8f62f3a..9d4e2ad47c1b8 100644 --- a/plugins/inputs/pf/README.md +++ b/plugins/inputs/pf/README.md @@ -7,6 +7,7 @@ The pf plugin retrieves this information by invoking the `pfstat` command. The ` * Run telegraf as root. This is strongly discouraged. * Change the ownership and permissions for /dev/pf such that the user telegraf runs at can read the /dev/pf device file. This is probably not that good of an idea either. * Configure sudo to grant telegraf to run `pfctl` as root. This is the most restrictive option, but require sudo setup. +* Add "telegraf" to the "proxy" group as /dev/pf is owned by root:proxy. ### Using sudo From 26e791f1c81b10f583e7cccf985d7d6707194699 Mon Sep 17 00:00:00 2001 From: Adam Dobrawy Date: Mon, 3 May 2021 22:02:21 +0200 Subject: [PATCH 412/761] Add docs for docker-compose labels in docker input (#9173) --- plugins/inputs/docker/README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 1a8aca6ae924f..8d75e641a1fb4 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -132,6 +132,22 @@ may prefer to exclude them: docker_label_exclude = ["annotation.kubernetes*"] ``` + +#### Docker-compose Labels + +Docker-compose will add labels to your containers. You can limit restrict labels to selected ones, e.g. + +``` + docker_label_include = [ + "com.docker.compose.config-hash", + "com.docker.compose.container-number", + "com.docker.compose.oneoff", + "com.docker.compose.project", + "com.docker.compose.service", + ] +``` + + ### Metrics: - docker From 175cd16f1968ff292a93378931e4fc08975c620a Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 4 May 2021 11:15:47 -0500 Subject: [PATCH 413/761] Migrate ipvs library from docker/libnetwork/ipvs to moby/ipvs (#9235) --- docs/LICENSE_OF_DEPENDENCIES.md | 2 +- go.mod | 4 +--- go.sum | 16 ++++++++++------ plugins/inputs/ipvs/ipvs.go | 2 +- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index ca8c0611cf2bb..57f4402d9488a 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -57,7 +57,6 @@ following works: - github.com/docker/docker [Apache License 2.0](https://github.com/docker/docker/blob/master/LICENSE) - github.com/docker/go-connections [Apache License 2.0](https://github.com/docker/go-connections/blob/master/LICENSE) - github.com/docker/go-units [Apache License 2.0](https://github.com/docker/go-units/blob/master/LICENSE) -- github.com/docker/libnetwork [Apache License 2.0](https://github.com/docker/libnetwork/blob/master/LICENSE) - github.com/eapache/go-resiliency [MIT License](https://github.com/eapache/go-resiliency/blob/master/LICENSE) - github.com/eapache/go-xerial-snappy [MIT License](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE) - github.com/eapache/queue [MIT License](https://github.com/eapache/queue/blob/master/LICENSE) @@ -135,6 +134,7 @@ following works: - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) - github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE) - github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) +- github.com/moby/ipvs [Apache License 2.0](https://github.com/moby/ipvs/blob/master/LICENSE) - github.com/modern-go/concurrent [Apache License 2.0](https://github.com/modern-go/concurrent/blob/master/LICENSE) - github.com/modern-go/reflect2 [Apache License 2.0](https://github.com/modern-go/reflect2/blob/master/LICENSE) - github.com/multiplay/go-ts3 [BSD 2-Clause "Simplified" License](https://github.com/multiplay/go-ts3/blob/master/LICENSE) diff --git a/go.mod b/go.mod index fc0ec4dea0a7d..e4bb9710b7dd5 100644 --- a/go.mod +++ b/go.mod @@ -47,7 +47,6 @@ require ( github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible - github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 github.com/eclipse/paho.mqtt.golang v1.3.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.5.0 @@ -91,6 +90,7 @@ require ( github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b github.com/microsoft/ApplicationInsights-Go v0.4.4 github.com/miekg/dns v1.1.31 + github.com/moby/ipvs v1.0.1 github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect github.com/nats-io/nats-server/v2 v2.1.4 @@ -120,8 +120,6 @@ require ( github.com/tinylib/msgp v1.1.5 github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 - github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect - github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect github.com/vjeantet/grok v1.0.1 github.com/vmware/govmomi v0.19.0 github.com/wavefronthq/wavefront-sdk-go v0.9.7 diff --git a/go.sum b/go.sum index 6ce13c192d965..0b4946b557c49 100644 --- a/go.sum +++ b/go.sum @@ -288,8 +288,6 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 h1:KgEcrKF0NWi9GT/OvDp9ioXZIrHRbP8S5o+sot9gznQ= -github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= @@ -844,6 +842,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= +github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1125,10 +1125,10 @@ github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 h1:iBlTJosRsR70a github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330/go.mod h1:7+aWBsUJCo9OQRCgTypRmIQW9KKKcPMjtrdnYIBsS70= github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY= @@ -1340,6 +1340,7 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1427,6 +1428,7 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1631,6 +1633,8 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclp gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/netdb v0.0.0-20150201073656-a416d700ae39/go.mod h1:rbNo0ST5hSazCG4rGfpHrwnwvzP1QX62WbhzD+ghGzs= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/plugins/inputs/ipvs/ipvs.go b/plugins/inputs/ipvs/ipvs.go index 5e3ae0d5637b0..65db5efe3bf7f 100644 --- a/plugins/inputs/ipvs/ipvs.go +++ b/plugins/inputs/ipvs/ipvs.go @@ -8,10 +8,10 @@ import ( "strconv" "syscall" - "github.com/docker/libnetwork/ipvs" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/logrus" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/moby/ipvs" ) // IPVS holds the state for this input plugin From 537bc9d21db59d1febf38770e8aa14e4aed8620e Mon Sep 17 00:00:00 2001 From: Josef Johansson Date: Tue, 4 May 2021 23:09:55 +0200 Subject: [PATCH 414/761] plugins/inputs/dovecot: Add support for unix domain sockets (#9223) It's safer for dovecot to export metrics via a UDS instead of tcp port, this will add support for that option. ### Required for all PRs: - [x] Updated associated README.md. - [x] Wrote appropriate unit tests. resolves #9215 dovecot: Add support for unix domain sockets as well --- plugins/inputs/dovecot/README.md | 3 ++ plugins/inputs/dovecot/dovecot.go | 23 ++++++++++--- plugins/inputs/dovecot/dovecot_test.go | 47 ++++++++++++++++++++++++-- 3 files changed, 66 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/dovecot/README.md b/plugins/inputs/dovecot/README.md index 3b6129488dae3..9e44d99edbc07 100644 --- a/plugins/inputs/dovecot/README.md +++ b/plugins/inputs/dovecot/README.md @@ -14,6 +14,9 @@ the [upgrading steps][upgrading]. ## specify dovecot servers via an address:port list ## e.g. ## localhost:24242 + ## or as an UDS socket + ## e.g. + ## /var/run/dovecot/old-stats ## ## If no servers are specified, then localhost is used as the host. servers = ["localhost:24242"] diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index 94c941655ccc8..ab5067534dea0 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -78,12 +78,20 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error { } func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error { - _, _, err := net.SplitHostPort(addr) - if err != nil { - return fmt.Errorf("%q on url %s", err.Error(), addr) + var proto string + + if strings.HasPrefix(addr, "/") { + proto = "unix" + } else { + proto = "tcp" + + _, _, err := net.SplitHostPort(addr) + if err != nil { + return fmt.Errorf("%q on url %s", err.Error(), addr) + } } - c, err := net.DialTimeout("tcp", addr, defaultTimeout) + c, err := net.DialTimeout(proto, addr, defaultTimeout) if err != nil { return fmt.Errorf("enable to connect to dovecot server '%s': %s", addr, err) } @@ -108,7 +116,12 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype stri return fmt.Errorf("copying message failed for dovecot server '%s': %s", addr, err) } - host, _, _ := net.SplitHostPort(addr) + var host string + if strings.HasPrefix(addr, "/") { + host = addr + } else { + host, _, _ = net.SplitHostPort(addr) + } return gatherStats(&buf, acc, host, qtype) } diff --git a/plugins/inputs/dovecot/dovecot_test.go b/plugins/inputs/dovecot/dovecot_test.go index 97c1d2f88d964..f9ce76de947d6 100644 --- a/plugins/inputs/dovecot/dovecot_test.go +++ b/plugins/inputs/dovecot/dovecot_test.go @@ -1,7 +1,12 @@ package dovecot import ( + "bufio" "bytes" + "io" + "net" + "net/textproto" + "os" "testing" "time" @@ -42,11 +47,49 @@ func TestDovecotIntegration(t *testing.T) { var acc testutil.Accumulator + // Test type=global server=unix + addr := "/tmp/socket" + wait := make(chan int) + go func() { + defer close(wait) + + la, err := net.ResolveUnixAddr("unix", addr) + require.NoError(t, err) + + l, err := net.ListenUnix("unix", la) + require.NoError(t, err) + defer l.Close() + defer os.Remove(addr) + + wait <- 0 + conn, err := l.Accept() + require.NoError(t, err) + defer conn.Close() + + readertp := textproto.NewReader(bufio.NewReader(conn)) + _, err = readertp.ReadLine() + require.NoError(t, err) + + buf := bytes.NewBufferString(sampleGlobal) + _, err = io.Copy(conn, buf) + require.NoError(t, err) + }() + + // Wait for server to start + <-wait + + d := &Dovecot{Servers: []string{addr}, Type: "global"} + err := d.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{"server": addr, "type": "global"} + acc.AssertContainsTaggedFields(t, "dovecot", fields, tags) + // Test type=global - tags := map[string]string{"server": "dovecot.test", "type": "global"} + tags = map[string]string{"server": "dovecot.test", "type": "global"} buf := bytes.NewBufferString(sampleGlobal) - err := gatherStats(buf, &acc, "dovecot.test", "global") + err = gatherStats(buf, &acc, "dovecot.test", "global") require.NoError(t, err) acc.AssertContainsTaggedFields(t, "dovecot", fields, tags) From 06c35862588db340a8aa22bc1d388b5965b49569 Mon Sep 17 00:00:00 2001 From: Jean-Christophe GONNARD Date: Tue, 4 May 2021 23:15:25 +0200 Subject: [PATCH 415/761] Add x509_crl external plugin (#9229) --- EXTERNAL_PLUGINS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index aa6fa8a40b4e5..f08ef5f91d537 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -17,6 +17,7 @@ Pull requests welcome. - [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels - [Big Blue Button](https://github.com/SLedunois/bigbluebutton-telegraf-plugin) - Gather meetings information from [Big Blue Button](https://bigbluebutton.org/) server - [dnsmasq](https://github.com/machinly/dnsmasq-telegraf-plugin) - Gather dnsmasq statistics from dnsmasq +- [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - Gather information from your X509 CRL files ## Outputs - [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. From 9a7ef2d8d86a37f53b9a3d342f93bf83796da5c4 Mon Sep 17 00:00:00 2001 From: Marco Favero Date: Wed, 5 May 2021 18:01:19 +0200 Subject: [PATCH 416/761] Update EXTERNAL_PLUGINS.md (#9217) --- EXTERNAL_PLUGINS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index f08ef5f91d537..66e9143da9aee 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -17,6 +17,7 @@ Pull requests welcome. - [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels - [Big Blue Button](https://github.com/SLedunois/bigbluebutton-telegraf-plugin) - Gather meetings information from [Big Blue Button](https://bigbluebutton.org/) server - [dnsmasq](https://github.com/machinly/dnsmasq-telegraf-plugin) - Gather dnsmasq statistics from dnsmasq +- [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - Gather statistics from 389ds and from LDAP trees. - [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - Gather information from your X509 CRL files ## Outputs From 14822469020dfa1b117a53be6af70dd565af6a46 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 5 May 2021 15:56:31 -0400 Subject: [PATCH 417/761] Upgrade hashicorp/consul/api to v1.8.1 (#9238) --- go.mod | 2 +- go.sum | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index e4bb9710b7dd5..4c73b2657b113 100644 --- a/go.mod +++ b/go.mod @@ -69,7 +69,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 - github.com/hashicorp/consul/api v1.6.0 + github.com/hashicorp/consul/api v1.8.1 github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/influxdata/go-syslog/v2 v2.0.1 github.com/influxdata/influxdb-observability/common v0.0.0-20210429174543-86ae73cafd31 diff --git a/go.sum b/go.sum index 0b4946b557c49..0debba3d9d718 100644 --- a/go.sum +++ b/go.sum @@ -606,11 +606,13 @@ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMW github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ= github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.6.0 h1:SZB2hQW8AcTOpfDmiVblQbijxzsRuiyy0JpHfabvHio= github.com/hashicorp/consul/api v1.6.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= +github.com/hashicorp/consul/api v1.8.1 h1:BOEQaMWoGMhmQ29fC26bi0qb7/rId9JzZP2V0Xmx7m8= +github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.6.0 h1:FfhMEkwvQl57CildXJyGHnwGGM4HMODGyfjGwNM1Vdw= github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.7.0 h1:H6R9d008jDcHPQPAqPNuydAshJ4v5/8URdFnUvK/+sc= +github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -653,8 +655,9 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.9.3 h1:AVF6JDQQens6nMHT9OGERBvK0f8rPrAGILnsKLr6lzM= github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hetznercloud/hcloud-go v1.21.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= From 3e9f191142e8b1799fef52f3fe8cfee95b0b136b Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Fri, 7 May 2021 12:26:11 -0700 Subject: [PATCH 418/761] Redirect former wiki links (#9183) * fix docs links * redirect links * redirect links * redirect links * redirect links --- docs/AGGREGATORS.md | 8 ++++---- docs/INPUTS.md | 8 ++++---- docs/OUTPUTS.md | 8 ++++---- docs/PROCESSORS.md | 8 ++++---- docs/maintainers/PULL_REQUESTS.md | 4 ++-- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/AGGREGATORS.md b/docs/AGGREGATORS.md index a5930a3e0df6d..0edf467837457 100644 --- a/docs/AGGREGATORS.md +++ b/docs/AGGREGATORS.md @@ -11,13 +11,13 @@ This section is for developers who want to create a new aggregator plugin. `github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file. - The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is included in `telegraf config`. Please - consult the [SampleConfig][] page for the latest style guidelines. + consult the [Sample Config][] page for the latest style guidelines. * The `Description` function should say in one line what this aggregator does. * The Aggregator plugin will need to keep caches of metrics that have passed through it. This should be done using the builtin `HashID()` function of each metric. * When the `Reset()` function is called, all caches should be cleared. -- Follow the recommended [CodeStyle][]. +- Follow the recommended [Code Style][]. ### Aggregator Plugin Example @@ -128,5 +128,5 @@ func init() { ``` [telegraf.Aggregator]: https://godoc.org/github.com/influxdata/telegraf#Aggregator -[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig -[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[Sample Config]: https://github.com/influxdata/telegraf/blob/master/docs/developers/SAMPLE_CONFIG.md +[Code Style]: https://github.com/influxdata/telegraf/blob/master/docs/developers/CODE_STYLE.md diff --git a/docs/INPUTS.md b/docs/INPUTS.md index 6a10cf949829c..679c24e287604 100644 --- a/docs/INPUTS.md +++ b/docs/INPUTS.md @@ -17,10 +17,10 @@ and submit new inputs. `github.com/influxdata/telegraf/plugins/inputs/all/all.go` file. - The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is included in `telegraf config`. Please - consult the [SampleConfig][] page for the latest style + consult the [Sample Config][] page for the latest style guidelines. - The `Description` function should say in one line what this plugin does. -- Follow the recommended [CodeStyle][]. +- Follow the recommended [Code Style][]. Let's say you've written a plugin that emits metrics about processes on the current host. @@ -143,8 +143,8 @@ Check the [amqp_consumer][] for an example implementation. [amqp_consumer]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/amqp_consumer [prom metric types]: https://prometheus.io/docs/concepts/metric_types/ [input data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig -[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[Sample Config]: https://github.com/influxdata/telegraf/blob/master/docs/developers/SAMPLE_CONFIG.md +[Code Style]: https://github.com/influxdata/telegraf/blob/master/docs/developers/CODE_STYLE.md [telegraf.Input]: https://godoc.org/github.com/influxdata/telegraf#Input [telegraf.ServiceInput]: https://godoc.org/github.com/influxdata/telegraf#ServiceInput [telegraf.Accumulator]: https://godoc.org/github.com/influxdata/telegraf#Accumulator diff --git a/docs/OUTPUTS.md b/docs/OUTPUTS.md index 1a27ca515f118..db8383126ad68 100644 --- a/docs/OUTPUTS.md +++ b/docs/OUTPUTS.md @@ -13,9 +13,9 @@ similar constructs. `github.com/influxdata/telegraf/plugins/outputs/all/all.go` file. - The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is included in `telegraf config`. Please - consult the [SampleConfig][] page for the latest style guidelines. + consult the [Sample Config][] page for the latest style guidelines. - The `Description` function should say in one line what this output does. -- Follow the recommended [CodeStyle][]. +- Follow the recommended [Code Style][]. ### Output Plugin Example @@ -115,6 +115,6 @@ or investigate other reasons why the writes might be taking longer than expected [file]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/file [output data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig -[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[Sample Config]: https://github.com/influxdata/telegraf/blob/master/docs/developers/SAMPLE_CONFIG.md +[Code Style]: https://github.com/influxdata/telegraf/blob/master/docs/developers/CODE_STYLE.md [telegraf.Output]: https://godoc.org/github.com/influxdata/telegraf#Output diff --git a/docs/PROCESSORS.md b/docs/PROCESSORS.md index 25566fe323fd2..30b2c643de8f6 100644 --- a/docs/PROCESSORS.md +++ b/docs/PROCESSORS.md @@ -14,9 +14,9 @@ This section is for developers who want to create a new processor plugin. config`. - The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is included in `telegraf config`. Please - consult the [SampleConfig][] page for the latest style guidelines. + consult the [Sample Config][] page for the latest style guidelines. * The `Description` function should say in one line what this processor does. -- Follow the recommended [CodeStyle][]. +- Follow the recommended [Code Style][]. ### Processor Plugin Example @@ -160,7 +160,7 @@ func init() { } ``` -[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig -[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[Sample Config]: https://github.com/influxdata/telegraf/blob/master/docs/developers/SAMPLE_CONFIG.md +[Code Style]: https://github.com/influxdata/telegraf/blob/master/docs/developers/CODE_STYLE.md [telegraf.Processor]: https://godoc.org/github.com/influxdata/telegraf#Processor [telegraf.StreamingProcessor]: https://godoc.org/github.com/influxdata/telegraf#StreamingProcessor diff --git a/docs/maintainers/PULL_REQUESTS.md b/docs/maintainers/PULL_REQUESTS.md index c41e4dd138788..e7b26c10fca69 100644 --- a/docs/maintainers/PULL_REQUESTS.md +++ b/docs/maintainers/PULL_REQUESTS.md @@ -19,7 +19,7 @@ contributor to merge or rebase. ## Review -[Review the pull request](Review). +[Review the pull request](docs/developers/REVIEWS.md). ## Merge @@ -53,7 +53,7 @@ message. If applicable mention the plugin in the message. ## After Merge -[Update the Changelog](Changelog). +[Update the Changelog](https://github.com/influxdata/telegraf/blob/master/docs/maintainers/CHANGELOG.md). If required, backport the patch and the changelog update to the current release branch. Usually this can be done by cherry picking the commits: From b56ffdc49871b25867364cecac630fc2b803186b Mon Sep 17 00:00:00 2001 From: Tuan Nguyen Huy Date: Sun, 9 May 2021 02:17:44 +0700 Subject: [PATCH 419/761] Add ability to enable gzip compression in elasticsearch output (#8913) --- .../outputs/elasticsearch/elasticsearch.go | 4 + .../elasticsearch/elasticsearch_test.go | 73 +++++++++++++++++++ 2 files changed, 77 insertions(+) diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go index 6d85b7e97742f..8f57f4e12ebf5 100644 --- a/plugins/outputs/elasticsearch/elasticsearch.go +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -30,6 +30,7 @@ type Elasticsearch struct { EnableSniffer bool Timeout config.Duration HealthCheckInterval config.Duration + EnableGzip bool ManageTemplate bool TemplateName string OverwriteTemplate bool @@ -50,6 +51,8 @@ var sampleConfig = ` ## Set to true to ask Elasticsearch a list of all cluster nodes, ## thus it is not necessary to list all nodes in the urls config option. enable_sniffer = false + ## Set to true to enable gzip compression + enable_gzip = false ## Set the interval to check if the Elasticsearch nodes are available ## Setting to "0s" will disable the health check (not recommended in production) health_check_interval = "10s" @@ -197,6 +200,7 @@ func (a *Elasticsearch) Connect() error { elastic.SetSniff(a.EnableSniffer), elastic.SetURL(a.URLs...), elastic.SetHealthcheckInterval(time.Duration(a.HealthCheckInterval)), + elastic.SetGzip(a.EnableGzip), ) if a.Username != "" && a.Password != "" { diff --git a/plugins/outputs/elasticsearch/elasticsearch_test.go b/plugins/outputs/elasticsearch/elasticsearch_test.go index baf6e3162555c..7ad1e632c6d20 100644 --- a/plugins/outputs/elasticsearch/elasticsearch_test.go +++ b/plugins/outputs/elasticsearch/elasticsearch_test.go @@ -2,6 +2,8 @@ package elasticsearch import ( "context" + "net/http" + "net/http/httptest" "reflect" "testing" "time" @@ -22,6 +24,7 @@ func TestConnectAndWriteIntegration(t *testing.T) { URLs: urls, IndexName: "test-%Y.%m.%d", Timeout: config.Duration(time.Second * 5), + EnableGzip: true, ManageTemplate: true, TemplateName: "telegraf", OverwriteTemplate: false, @@ -50,6 +53,7 @@ func TestTemplateManagementEmptyTemplateIntegration(t *testing.T) { URLs: urls, IndexName: "test-%Y.%m.%d", Timeout: config.Duration(time.Second * 5), + EnableGzip: true, ManageTemplate: true, TemplateName: "", OverwriteTemplate: true, @@ -70,6 +74,7 @@ func TestTemplateManagementIntegration(t *testing.T) { URLs: urls, IndexName: "test-%Y.%m.%d", Timeout: config.Duration(time.Second * 5), + EnableGzip: true, ManageTemplate: true, TemplateName: "telegraf", OverwriteTemplate: true, @@ -96,6 +101,7 @@ func TestTemplateInvalidIndexPatternIntegration(t *testing.T) { URLs: urls, IndexName: "{{host}}-%Y.%m.%d", Timeout: config.Duration(time.Second * 5), + EnableGzip: true, ManageTemplate: true, TemplateName: "telegraf", OverwriteTemplate: true, @@ -254,3 +260,70 @@ func TestGetIndexName(t *testing.T) { } } } + +func TestRequestHeaderWhenGzipIsEnabled(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/_bulk": + require.Equal(t, "gzip", r.Header.Get("Content-Encoding")) + require.Equal(t, "gzip", r.Header.Get("Accept-Encoding")) + _, err := w.Write([]byte("{}")) + require.NoError(t, err) + return + default: + _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)) + require.NoError(t, err) + return + } + })) + defer ts.Close() + + urls := []string{"http://" + ts.Listener.Addr().String()} + + e := &Elasticsearch{ + URLs: urls, + IndexName: "{{host}}-%Y.%m.%d", + Timeout: config.Duration(time.Second * 5), + EnableGzip: true, + ManageTemplate: false, + } + + err := e.Connect() + require.NoError(t, err) + + err = e.Write(testutil.MockMetrics()) + require.NoError(t, err) +} + +func TestRequestHeaderWhenGzipIsDisabled(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/_bulk": + require.NotEqual(t, "gzip", r.Header.Get("Content-Encoding")) + _, err := w.Write([]byte("{}")) + require.NoError(t, err) + return + default: + _, err := w.Write([]byte(`{"version": {"number": "7.8"}}`)) + require.NoError(t, err) + return + } + })) + defer ts.Close() + + urls := []string{"http://" + ts.Listener.Addr().String()} + + e := &Elasticsearch{ + URLs: urls, + IndexName: "{{host}}-%Y.%m.%d", + Timeout: config.Duration(time.Second * 5), + EnableGzip: false, + ManageTemplate: false, + } + + err := e.Connect() + require.NoError(t, err) + + err = e.Write(testutil.MockMetrics()) + require.NoError(t, err) +} From 8b9883e2eca63f9e4f51e9af1efc7925f7012e3e Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Tue, 11 May 2021 23:08:25 +0200 Subject: [PATCH 420/761] SQL Server - sqlServerRingBufferCPU - removed whitespaces (#9130) ### Required for all PRs: - [ ] Updated associated README.md. - [ ] Wrote appropriate unit tests. Removed a pair of whitespace chars from the **sqlServerRingBufferCPU** SQL statement and added some formatting. This query exists only for the on-prem version of SQL Server (`database_type = "SQLServer"`) If you were unlucky enough to have some SQL 2008 the query wouldn't work as the whitespace char is not allowed inside the statement. --- plugins/inputs/sqlserver/sqlserverqueries.go | 25 ++++++++++---------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index 76a7712522189..756d8a8beabc6 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -1141,7 +1141,7 @@ END; WITH utilization_cte AS ( SELECT - [SQLProcessUtilization] AS [sqlserver_process_cpu] + [SQLProcessUtilization] AS [sqlserver_process_cpu] ,[SystemIdle] AS [system_idle_cpu] ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] FROM ( @@ -1170,8 +1170,8 @@ WITH utilization_cte AS ), processor_Info_cte AS ( - SELECT (cpu_count / hyperthread_ratio) as number_of_physical_cpus -  FROM sys.dm_os_sys_info + SELECT ([cpu_count] / [hyperthread_ratio]) as [number_of_physical_cpus] + FROM sys.dm_os_sys_info ) SELECT 'sqlserver_cpu' AS [measurement] @@ -1179,16 +1179,15 @@ SELECT ,[sqlserver_process_cpu] ,[system_idle_cpu] ,100 - [system_idle_cpu] - [sqlserver_process_cpu] AS [other_process_cpu] -FROM - ( - SELECT - (case - when [other_process_cpu] < 0 then [sqlserver_process_cpu] / a.number_of_physical_cpus - else [sqlserver_process_cpu] -  end) as [sqlserver_process_cpu] - ,[system_idle_cpu] - FROM utilization_cte - CROSS APPLY processor_Info_cte a +FROM ( + SELECT + (CASE + WHEN u.[other_process_cpu] < 0 THEN u.[sqlserver_process_cpu] / p.[number_of_physical_cpus] + ELSE u.[sqlserver_process_cpu] + END) AS [sqlserver_process_cpu] + ,u.[system_idle_cpu] + FROM utilization_cte AS u + CROSS APPLY processor_Info_cte AS p ) AS b ` From 741e3884e62d1f8183203649e928e233df98c6c3 Mon Sep 17 00:00:00 2001 From: maxuntr <33737845+maxuntr@users.noreply.github.com> Date: Tue, 11 May 2021 17:16:58 -0400 Subject: [PATCH 421/761] Kinesis_consumer input plugin - fix repeating parser error (#9169) --- plugins/inputs/kinesis_consumer/kinesis_consumer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 64822c2d75453..57b1998311ec7 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -230,8 +230,8 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { } err := k.onMessage(k.acc, r) if err != nil { - k.sem <- struct{}{} - return consumer.ScanStatus{Error: err} + <-k.sem + k.Log.Errorf("Scan parser error: %s", err.Error()) } return consumer.ScanStatus{} From 1ea5c20aadc55863ce1acd49051e6d6db03b4d60 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 11 May 2021 17:24:05 -0400 Subject: [PATCH 422/761] Update packaging.md (#9258) --- docs/developers/PACKAGING.md | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/docs/developers/PACKAGING.md b/docs/developers/PACKAGING.md index 836fd01973d3f..f9708fb7164d0 100644 --- a/docs/developers/PACKAGING.md +++ b/docs/developers/PACKAGING.md @@ -27,15 +27,10 @@ git checkout release-1.10 git reset --hard 1.10.2 make deps -# This builds _all_ platforms and architectures; will take a long time -./scripts/build.py --release --package -``` - -If you would like to only build a subset of the packages run this: +# To build packages run: ``` -# Use the platform and arch arguments to skip unwanted packages: -./scripts/build.py --release --package --platform=linux --arch=amd64 +make package amd64=1 ``` From the host system, copy the build artifacts out of the container: From b3f5a15410b2a43f5a7744972f309558dc268ed2 Mon Sep 17 00:00:00 2001 From: SomKen Date: Wed, 12 May 2021 18:20:04 -0700 Subject: [PATCH 423/761] Add Freebsd armv7 URL for nightly builds / organize (#9268) --- README.md | 41 +++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index b2d8e6a548d3c..d6c3e7fd4c7b0 100644 --- a/README.md +++ b/README.md @@ -74,33 +74,46 @@ version. ### Nightly Builds These builds are generated from the master branch: -- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) + +FreeBSD - .tar.gz +- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) +- [telegraf-nightly_freebsd_armv7.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_armv7.tar.gz) +- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) + +Linux - .rpm +- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) +- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) +- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) +- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) +- [telegraf-nightly.ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) +- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) +- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) + +Linux - .deb - [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) - [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) -- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) - [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) -- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) - [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) -- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) -- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) -- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) - [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) -- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) +- [telegraf_nightly_ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) +- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) + +Linux - .tar.gz - [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) - [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) - [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) - [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) - [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) - [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) -- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) -- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) -- [telegraf_nightly_ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) -- [telegraf-nightly.ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) -- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) -- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) -- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) - [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz) +OSX - .tar.gz +- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) + +Windows - .zip +- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) +- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) + ## How to use it: See usage with: From 940fea9135b8612b0a826ab6cfef349a9ba19018 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Thu, 13 May 2021 15:28:14 -0400 Subject: [PATCH 424/761] fix spelling of receive (#9269) --- plugins/parsers/prometheusremotewrite/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/parsers/prometheusremotewrite/README.md b/plugins/parsers/prometheusremotewrite/README.md index 213bb601529de..b409e9e6d5c8f 100644 --- a/plugins/parsers/prometheusremotewrite/README.md +++ b/plugins/parsers/prometheusremotewrite/README.md @@ -10,7 +10,7 @@ Converts prometheus remote write samples directly into Telegraf metrics. It can service_address = ":1234" ## Path to listen to. - path = "/recieve" + path = "/receive" ## Data format to consume. data_format = "prometheusremotewrite" From 5330a74feaf21a5074b774531b2a1d77f4fedd82 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 13 May 2021 15:00:57 -0500 Subject: [PATCH 425/761] Migrate soniah/gosnmp import to gosnmp/gosnmp (#9203) --- .github/dependabot.yml | 2 - docs/LICENSE_OF_DEPENDENCIES.md | 1 - go.mod | 3 +- go.sum | 86 +--------------------- plugins/inputs/snmp_legacy/snmp_legacy.go | 6 +- plugins/inputs/snmp_trap/snmp_trap.go | 7 +- plugins/inputs/snmp_trap/snmp_trap_test.go | 4 +- 7 files changed, 11 insertions(+), 98 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 053ba133f0b87..c1de7d8fd2824 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -7,5 +7,3 @@ updates: ignore: # Dependabot isn't able to update this packages that do not match the source, so anything with a version - dependency-name: "*.v*" - # Updating this package is blocked by: https://github.com/gosnmp/gosnmp/issues/284 - - dependency-name: "github.com/soniah/gosnmp" diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 57f4402d9488a..a712ac0979e57 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -170,7 +170,6 @@ following works: - github.com/signalfx/golib [Apache License 2.0](https://github.com/signalfx/golib/blob/master/LICENSE) - github.com/signalfx/sapm-proto [Apache License 2.0](https://github.com/signalfx/sapm-proto/blob/master/LICENSE) - github.com/sirupsen/logrus [MIT License](https://github.com/sirupsen/logrus/blob/master/LICENSE) -- github.com/soniah/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/soniah/gosnmp/blob/master/LICENSE) - github.com/streadway/amqp [BSD 2-Clause "Simplified" License](https://github.com/streadway/amqp/blob/master/LICENSE) - github.com/stretchr/objx [MIT License](https://github.com/stretchr/objx/blob/master/LICENSE) - github.com/stretchr/testify [custom -- permissive](https://github.com/stretchr/testify/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 4c73b2657b113..f64f609968daa 100644 --- a/go.mod +++ b/go.mod @@ -65,7 +65,7 @@ require ( github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.13 github.com/gorilla/mux v1.7.3 - github.com/gosnmp/gosnmp v1.31.0 + github.com/gosnmp/gosnmp v1.32.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 @@ -112,7 +112,6 @@ require ( github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/signalfx/golib/v3 v3.3.0 github.com/sirupsen/logrus v1.6.0 - github.com/soniah/gosnmp v1.25.0 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 diff --git a/go.sum b/go.sum index 0debba3d9d718..7e7835870e08a 100644 --- a/go.sum +++ b/go.sum @@ -65,10 +65,8 @@ github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEg github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.10 h1:r6fZHMaHD8B6LDCn0o5vyBFHIHrM6Ywwx7mb49lPItI= github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= @@ -108,15 +106,10 @@ github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0T github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -142,8 +135,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 h1:YtaYjXmemIMyySUbs0VGFPqsLpsNHf4TW/L6yqpJQ9s= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= -github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= -github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antchfx/xmlquery v1.3.5 h1:I7TuBRqsnfFuL11ruavGm911Awx9IqSdiU6W/ztSmVw= @@ -169,14 +160,11 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.34 h1:5dC0ZU0xy25+UavGNEkQ/5MOQwxXDA2YXtjCL1HfYKI= -github.com/aws/aws-sdk-go v1.34.34 h1:5dC0ZU0xy25+UavGNEkQ/5MOQwxXDA2YXtjCL1HfYKI= -github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.1.0 h1:sKP6QWxdN1oRYjl+k6S3bpgBI+XUx/0mqVOLIw4lR/Q= @@ -212,7 +200,6 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= @@ -275,7 +262,6 @@ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMa github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= @@ -351,7 +337,6 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -376,22 +361,14 @@ github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -405,13 +382,10 @@ github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= @@ -425,8 +399,6 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= @@ -481,13 +453,10 @@ github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFG github.com/gogo/googleapis v1.3.1 h1:CzMaKrvF6Qa7XtRii064vKBQiyvmY8H8vG1xa1/W1JA= github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= @@ -517,7 +486,6 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -565,7 +533,6 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= @@ -591,9 +558,8 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gosnmp/gosnmp v1.31.0 h1:l18tqymKfReKBPr3kMK4mMM+n3DHlIpsZbBBSy8nuko= -github.com/gosnmp/gosnmp v1.31.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvNDMpgF0= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gosnmp/gosnmp v1.32.0 h1:gctewmZx5qFI0oHMzRnjETqIZ093d9NgZy9TQr3V0iA= +github.com/gosnmp/gosnmp v1.32.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvNDMpgF0= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= @@ -704,7 +670,6 @@ github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -721,7 +686,6 @@ github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrY github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -733,7 +697,6 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea h1:g2k+8WR7cHch4g0tBDhfiEvAp7fXxTNBiD1oC1Oxj3E= github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= @@ -762,7 +725,6 @@ github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM52 github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -792,8 +754,6 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= @@ -914,10 +874,8 @@ github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= @@ -936,7 +894,6 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= @@ -1003,8 +960,6 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1019,8 +974,6 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0 github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= @@ -1032,8 +985,6 @@ github.com/shirou/gopsutil v3.21.3+incompatible h1:uenXGGa8ESCQq+dbgtl916dmg6PSA github.com/shirou/gopsutil v3.21.3+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= -github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= -github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -1049,15 +1000,11 @@ github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8 github.com/signalfx/sapm-proto v0.4.0 h1:5lQX++6FeIjUZEIcnSgBqhOpmSjMkRBW3y/4ZiKMo5E= github.com/signalfx/sapm-proto v0.4.0/go.mod h1:x3gtwJ1GRejtkghB4nYpwixh2zqJrLbPU959ZNhM0Fk= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -1065,18 +1012,12 @@ github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1 github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= -github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= -github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= -github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -1116,9 +1057,6 @@ github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefld github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= -github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= @@ -1156,8 +1094,6 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= -github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= -github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= @@ -1213,7 +1149,6 @@ golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1289,7 +1224,6 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1312,8 +1246,6 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1346,7 +1278,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1357,8 +1288,6 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1374,8 +1303,6 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1389,7 +1316,6 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa h1:ZYxPR6aca/uhfRJyaOAtflSHjJYiktO7QnJC5ut7iY4= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1433,8 +1359,6 @@ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190906203814-12febf440ab1/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1532,8 +1456,6 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70 h1:wboULUXGF3c5qdUnKp+6gLAccE6PRpa/czkYvQ4UXv8= @@ -1663,7 +1585,6 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= @@ -1693,9 +1614,6 @@ sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnM sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index 99ad5d170cb0e..d85afca8e4e7f 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -11,7 +11,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/soniah/gosnmp" + "github.com/gosnmp/gosnmp" ) // Snmp is a snmp plugin @@ -102,7 +102,7 @@ type Data struct { // Unit Unit string // SNMP getbulk max repetition - MaxRepetition uint8 `toml:"max_repetition"` + MaxRepetition uint32 `toml:"max_repetition"` // SNMP Instance (default 0) // (only used with GET request and if // OID is a name from snmptranslate file) @@ -476,7 +476,7 @@ func (h *Host) SNMPMap( oidNext := oidAsked needMoreRequests := true // Set max repetition - maxRepetition := uint8(32) + maxRepetition := uint32(32) // Launch requests for needMoreRequests { // Launch request diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index acc97790855e4..32107eb5ffe71 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -16,12 +16,11 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/soniah/gosnmp" + "github.com/gosnmp/gosnmp" ) var defaultTimeout = config.Duration(time.Second * 5) -type handler func(*gosnmp.SnmpPacket, *net.UDPAddr) type execer func(config.Duration, string, ...string) ([]byte, error) type mibEntry struct { @@ -50,7 +49,7 @@ type SnmpTrap struct { timeFunc func() time.Time errCh chan error - makeHandlerWrapper func(handler) handler + makeHandlerWrapper func(gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc Log telegraf.Logger `toml:"-"` @@ -261,7 +260,7 @@ func setTrapOid(tags map[string]string, oid string, e mibEntry) { tags["mib"] = e.mibName } -func makeTrapHandler(s *SnmpTrap) handler { +func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { return func(packet *gosnmp.SnmpPacket, addr *net.UDPAddr) { tm := s.timeFunc() fields := map[string]interface{}{} diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index 062c2cf1fe153..98e3d7f09b2e5 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/soniah/gosnmp" + "github.com/gosnmp/gosnmp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -1270,7 +1270,7 @@ func TestReceiveTrap(t *testing.T) { // Hook into the trap handler so the test knows when the // trap has been received received := make(chan int) - wrap := func(f handler) handler { + wrap := func(f gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc { return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { f(p, a) received <- 0 From 760ad3e366977b2ff84a811ed840e5af3d8b3255 Mon Sep 17 00:00:00 2001 From: Erikas Date: Thu, 13 May 2021 23:36:46 +0300 Subject: [PATCH 426/761] Update kafka.conf (#9263) --- plugins/inputs/jolokia2/examples/kafka.conf | 60 +++++++++++++++++++-- 1 file changed, 57 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/jolokia2/examples/kafka.conf b/plugins/inputs/jolokia2/examples/kafka.conf index ae34831fc55c9..24053b5ad6fa7 100644 --- a/plugins/inputs/jolokia2/examples/kafka.conf +++ b/plugins/inputs/jolokia2/examples/kafka.conf @@ -1,6 +1,30 @@ [[inputs.jolokia2_agent]] name_prefix = "kafka_" + + ## If you intend to use "non_negative_derivative(1s)" with "*.count" fields, you don't need precalculated fields. + # fielddrop = [ + # "*.EventType", + # "*.FifteenMinuteRate", + # "*.FiveMinuteRate", + # "*.MeanRate", + # "*.OneMinuteRate", + # "*.RateUnit", + # "*.LatencyUnit", + # "*.50thPercentile", + # "*.75thPercentile", + # "*.95thPercentile", + # "*.98thPercentile", + # "*.99thPercentile", + # "*.999thPercentile", + # "*.Min", + # "*.Mean", + # "*.Max", + # "*.StdDev" + # ] + + ## jolokia_agent_url tag is not needed if you have only one instance of Kafka on the server. + # tagexclude = ["jolokia_agent_url"] urls = ["http://localhost:8080/jolokia"] @@ -21,9 +45,15 @@ field_name = "$2" [[inputs.jolokia2_agent.metric]] - name = "client" - mbean = "kafka.server:client-id=*,type=*" - tag_keys = ["client-id", "type"] + name = "zookeeper" + mbean = "kafka.server:name=*,type=SessionExpireListener" + field_prefix = "$1." + + [[inputs.jolokia2_agent.metric]] + name = "user" + mbean = "kafka.server:user=*,type=Request" + field_prefix = "" + tag_keys = ["user"] [[inputs.jolokia2_agent.metric]] name = "request" @@ -53,3 +83,27 @@ mbean = "kafka.cluster:name=UnderReplicated,partition=*,topic=*,type=Partition" field_name = "UnderReplicatedPartitions" tag_keys = ["topic", "partition"] + +## If you have multiple instances of Kafka on the server, use 'jolokia_agent_url' as identity of each instance +# [[processors.rename]] +# namepass = ["kafka_*"] +# order = 1 +# [[processors.rename.replace]] +# tag = "jolokia_agent_url" +# dest = "instance" +# +# [[processors.regex]] +# namepass = ["kafka_*"] +# order = 2 +# [[processors.regex.tags]] +# key = "instance" +# pattern = "^.+:8080/.+$" +# replacement = "0" +# [[processors.regex.tags]] +# key = "instance" +# pattern = "^.+:8081/.+$" +# replacement = "1" +# [[processors.regex.tags]] +# key = "instance" +# pattern = "^.+:8082/.+$" +# replacement = "2" From ae7d31996bc1603e1ca6be999db956d02f97f06d Mon Sep 17 00:00:00 2001 From: Giovanni Luisotto Date: Fri, 14 May 2021 06:59:02 +0200 Subject: [PATCH 427/761] SQL Server - SQL Requests - added s.[login_name] (#8351) --- plugins/inputs/sqlserver/azuresqlqueries.go | 2 ++ plugins/inputs/sqlserver/sqlserverqueries.go | 1 + 2 files changed, 3 insertions(+) diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go index 41c0d384ba557..318509ac28ee5 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqlqueries.go @@ -606,6 +606,7 @@ SELECT ,s.[program_name] ,s.[host_name] ,s.[nt_user_name] + ,s.[login_name] ,COALESCE(r.[open_transaction_count], s.[open_transaction_count]) AS [open_transaction] ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) WHEN 0 THEN '0-Read Committed' @@ -1143,6 +1144,7 @@ SELECT ,s.[program_name] ,s.[host_name] ,s.[nt_user_name] + ,s.[login_name] ,COALESCE(r.[open_transaction_count], s.[open_transaction_count]) AS [open_transaction] ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) WHEN 0 THEN '0-Read Committed' diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index 756d8a8beabc6..1d46e5cd91277 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -1063,6 +1063,7 @@ SELECT ,s.[program_name] ,s.[host_name] ,s.[nt_user_name] + ,s.[login_name] ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) WHEN 0 THEN ''0-Read Committed'' WHEN 1 THEN ''1-Read Uncommitted (NOLOCK)'' From df47b41668f530cf2bc40593faaf6746ae46fee9 Mon Sep 17 00:00:00 2001 From: Greg Linton Date: Tue, 18 May 2021 12:29:30 -0400 Subject: [PATCH 428/761] Allow more characters in graphite tags (#9249) --- config/config.go | 12 +- plugins/outputs/graphite/graphite.go | 12 +- plugins/outputs/instrumental/instrumental.go | 2 +- plugins/serializers/graphite/README.md | 11 ++ plugins/serializers/graphite/graphite.go | 42 ++++-- plugins/serializers/graphite/graphite_test.go | 121 ++++++++++++++++++ plugins/serializers/registry.go | 22 +++- 7 files changed, 193 insertions(+), 29 deletions(-) diff --git a/config/config.go b/config/config.go index c1bf9235f1583..02586ff9b8f4e 100644 --- a/config/config.go +++ b/config/config.go @@ -1400,6 +1400,8 @@ func (c *Config) buildSerializer(tbl *ast.Table) (serializers.Serializer, error) c.getFieldBool(tbl, "influx_sort_fields", &sc.InfluxSortFields) c.getFieldBool(tbl, "influx_uint_support", &sc.InfluxUintSupport) c.getFieldBool(tbl, "graphite_tag_support", &sc.GraphiteTagSupport) + c.getFieldString(tbl, "graphite_tag_sanitize_mode", &sc.GraphiteTagSanitizeMode) + c.getFieldString(tbl, "graphite_separator", &sc.GraphiteSeparator) c.getFieldDuration(tbl, "json_timestamp_units", &sc.TimestampUnits) @@ -1464,11 +1466,11 @@ func (c *Config) missingTomlField(_ reflect.Type, key string) error { "data_format", "data_type", "delay", "drop", "drop_original", "dropwizard_metric_registry_path", "dropwizard_tag_paths", "dropwizard_tags_path", "dropwizard_time_format", "dropwizard_time_path", "fielddrop", "fieldpass", "flush_interval", "flush_jitter", "form_urlencoded_tag_keys", - "grace", "graphite_separator", "graphite_tag_support", "grok_custom_pattern_files", - "grok_custom_patterns", "grok_named_patterns", "grok_patterns", "grok_timezone", - "grok_unique_timestamp", "influx_max_line_bytes", "influx_sort_fields", "influx_uint_support", - "interval", "json_name_key", "json_query", "json_strict", "json_string_fields", - "json_time_format", "json_time_key", "json_timestamp_units", "json_timezone", + "grace", "graphite_separator", "graphite_tag_sanitize_mode", "graphite_tag_support", + "grok_custom_pattern_files", "grok_custom_patterns", "grok_named_patterns", "grok_patterns", + "grok_timezone", "grok_unique_timestamp", "influx_max_line_bytes", "influx_sort_fields", + "influx_uint_support", "interval", "json_name_key", "json_query", "json_strict", + "json_string_fields", "json_time_format", "json_time_key", "json_timestamp_units", "json_timezone", "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", "name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision", "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index 455c7c785e7d2..bd35a4203385a 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -15,8 +15,9 @@ import ( ) type Graphite struct { - GraphiteTagSupport bool `toml:"graphite_tag_support"` - GraphiteSeparator string `toml:"graphite_separator"` + GraphiteTagSupport bool `toml:"graphite_tag_support"` + GraphiteTagSanitizeMode string `toml:"graphite_tag_sanitize_mode"` + GraphiteSeparator string `toml:"graphite_separator"` // URL is only for backwards compatibility Servers []string `toml:"servers"` Prefix string `toml:"prefix"` @@ -43,6 +44,11 @@ var sampleConfig = ` ## Enable Graphite tags support # graphite_tag_support = false + ## Define how metric names and tags are sanitized; options are "strict", or "compatible" + ## strict - Default method, and backwards compatible with previous versionf of Telegraf + ## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec + # graphite_tag_sanitize_mode = "strict" + ## Character for separating metric name and field for Graphite tags # graphite_separator = "." @@ -150,7 +156,7 @@ func (g *Graphite) checkEOF(conn net.Conn) { func (g *Graphite) Write(metrics []telegraf.Metric) error { // Prepare data var batch []byte - s, err := serializers.NewGraphiteSerializer(g.Prefix, g.Template, g.GraphiteTagSupport, g.GraphiteSeparator, g.Templates) + s, err := serializers.NewGraphiteSerializer(g.Prefix, g.Template, g.GraphiteTagSupport, g.GraphiteTagSanitizeMode, g.GraphiteSeparator, g.Templates) if err != nil { return err } diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index 87148ed08d9d9..f7158f16fc4c3 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -88,7 +88,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { } } - s, err := serializers.NewGraphiteSerializer(i.Prefix, i.Template, false, ".", i.Templates) + s, err := serializers.NewGraphiteSerializer(i.Prefix, i.Template, false, "strict", ".", i.Templates) if err != nil { return err } diff --git a/plugins/serializers/graphite/README.md b/plugins/serializers/graphite/README.md index f6fd0c2ccd9bd..f68765c54ae31 100644 --- a/plugins/serializers/graphite/README.md +++ b/plugins/serializers/graphite/README.md @@ -35,6 +35,8 @@ method is used, otherwise the [Template Pattern](templates) is used. ## Support Graphite tags, recommended to enable when using Graphite 1.1 or later. # graphite_tag_support = false + ## Enable Graphite tags to support the full list of allowed characters + # graphite_tag_new_sanitize = false ## Character for separating metric name and field for Graphite tags # graphite_separator = "." ``` @@ -64,4 +66,13 @@ cpu_usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 cpu_usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 ``` +The `graphite_tag_sanitize_mode` option defines how we should sanitize the tag names and values. Possible values are `strict`, or `compatible`, with the default being `strict`. + +When in `strict` mode Telegraf uses the same rules as metrics when not using tags. +When in `compatible` mode Telegraf allows more characters through, and is based on the Graphite specification: +>Tag names must have a length >= 1 and may contain any ascii characters except `;!^=`. Tag values must also have a length >= 1, they may contain any ascii characters except `;` and the first character must not be `~`. UTF-8 characters may work for names and values, but they are not well tested and it is not recommended to use non-ascii characters in metric names or tags. Metric names get indexed under the special tag name, if a metric name starts with one or multiple ~ they simply get removed from the derived tag value because the ~ character is not allowed to be in the first position of the tag value. If a metric name consists of no other characters than ~, then it is considered invalid and may get dropped. + + + + [templates]: /docs/TEMPLATE_PATTERN.md diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index f71e97fa419c4..c6130c7b7c4b4 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -16,8 +16,11 @@ import ( const DefaultTemplate = "host.tags.measurement.field" var ( - allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-:._=\p{L}]`) - hyphenChars = strings.NewReplacer( + strictAllowedChars = regexp.MustCompile(`[^a-zA-Z0-9-:._=\p{L}]`) + compatibleAllowedCharsName = regexp.MustCompile(`[^ "-:\<>-\]_a-~\p{L}]`) + compatibleAllowedCharsValue = regexp.MustCompile(`[^ -:<-~\p{L}]`) + compatibleLeadingTildeDrop = regexp.MustCompile(`^[~]*(.*)`) + hyphenChars = strings.NewReplacer( "/", "-", "@", "-", "*", "-", @@ -36,11 +39,12 @@ type GraphiteTemplate struct { } type GraphiteSerializer struct { - Prefix string - Template string - TagSupport bool - Separator string - Templates []*GraphiteTemplate + Prefix string + Template string + TagSupport bool + TagSanitizeMode string + Separator string + Templates []*GraphiteTemplate } func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { @@ -56,7 +60,7 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { if fieldValue == "" { continue } - bucket := SerializeBucketNameWithTags(metric.Name(), metric.Tags(), s.Prefix, s.Separator, fieldName) + bucket := SerializeBucketNameWithTags(metric.Name(), metric.Tags(), s.Prefix, s.Separator, fieldName, s.TagSanitizeMode) metricString := fmt.Sprintf("%s %s %d\n", // insert "field" section of template bucket, @@ -87,7 +91,7 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { } metricString := fmt.Sprintf("%s %s %d\n", // insert "field" section of template - sanitize(InsertField(bucket, fieldName)), + strictSanitize(InsertField(bucket, fieldName)), fieldValue, timestamp) point := []byte(metricString) @@ -248,6 +252,7 @@ func SerializeBucketNameWithTags( prefix string, separator string, field string, + tagSanitizeMode string, ) string { var out string var tagsCopy []string @@ -255,7 +260,11 @@ func SerializeBucketNameWithTags( if k == "name" { k = "_name" } - tagsCopy = append(tagsCopy, sanitize(k+"="+v)) + if tagSanitizeMode == "compatible" { + tagsCopy = append(tagsCopy, compatibleSanitize(k, v)) + } else { + tagsCopy = append(tagsCopy, strictSanitize(k+"="+v)) + } } sort.Strings(tagsCopy) @@ -269,7 +278,7 @@ func SerializeBucketNameWithTags( out += separator + field } - out = sanitize(out) + out = strictSanitize(out) if len(tagsCopy) > 0 { out += ";" + strings.Join(tagsCopy, ";") @@ -308,11 +317,18 @@ func buildTags(tags map[string]string) string { return tagStr } -func sanitize(value string) string { +func strictSanitize(value string) string { // Apply special hyphenation rules to preserve backwards compatibility value = hyphenChars.Replace(value) // Apply rule to drop some chars to preserve backwards compatibility value = dropChars.Replace(value) // Replace any remaining illegal chars - return allowedChars.ReplaceAllLiteralString(value, "_") + return strictAllowedChars.ReplaceAllLiteralString(value, "_") +} + +func compatibleSanitize(name string, value string) string { + name = compatibleAllowedCharsName.ReplaceAllLiteralString(name, "_") + value = compatibleAllowedCharsValue.ReplaceAllLiteralString(value, "_") + value = compatibleLeadingTildeDrop.FindStringSubmatch(value)[1] + return name + "=" + value } diff --git a/plugins/serializers/graphite/graphite_test.go b/plugins/serializers/graphite/graphite_test.go index 0a2e0bd7beaa1..f2fd3b7f150a9 100644 --- a/plugins/serializers/graphite/graphite_test.go +++ b/plugins/serializers/graphite/graphite_test.go @@ -543,6 +543,32 @@ func TestSerializeTagWithSpacesWithTagSupport(t *testing.T) { assert.Equal(t, expS, mS) } +func TestSerializeTagWithSpacesWithTagSupportCompatibleSanitize(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "host": "localhost", + "cpu": `cpu\ 0`, + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + `field_with_spaces`: float64(91.5), + } + m := metric.New("cpu", tags, fields, now) + + s := GraphiteSerializer{ + TagSupport: true, + TagSanitizeMode: "compatible", + Separator: ".", + } + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + + expS := []string{ + fmt.Sprintf("cpu.field_with_spaces;cpu=cpu\\ 0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), + } + assert.Equal(t, expS, mS) +} + // test that a field named "value" gets ignored at beginning of template. func TestSerializeValueField3(t *testing.T) { now := time.Now() @@ -942,6 +968,101 @@ func TestCleanWithTagsSupport(t *testing.T) { } } +func TestCleanWithTagsSupportCompatibleSanitize(t *testing.T) { + now := time.Unix(1234567890, 0) + tests := []struct { + name string + metricName string + tags map[string]string + fields map[string]interface{} + expected string + }{ + { + "Base metric", + "cpu", + map[string]string{"host": "localhost"}, + map[string]interface{}{"usage_busy": float64(8.5)}, + "cpu.usage_busy;host=localhost 8.5 1234567890\n", + }, + { + "Dot and whitespace in tags", + "cpu", + map[string]string{"host": "localhost", "label.dot and space": "value with.dot"}, + map[string]interface{}{"usage_busy": float64(8.5)}, + "cpu.usage_busy;host=localhost;label.dot and space=value with.dot 8.5 1234567890\n", + }, + { + "Field with space", + "system", + map[string]string{"host": "localhost"}, + map[string]interface{}{"uptime_format": "20 days, 23:26"}, + "", // yes nothing. graphite don't serialize string fields + }, + { + "Allowed punct", + "cpu", + map[string]string{"host": "localhost", "tag": "-_:=!^~"}, + map[string]interface{}{"usage_busy": float64(10)}, + "cpu.usage_busy;host=localhost;tag=-_:=!^~ 10 1234567890\n", + }, + { + "Special characters preserved", + "cpu", + map[string]string{"host": "localhost", "tag": "/@*"}, + map[string]interface{}{"usage_busy": float64(10)}, + "cpu.usage_busy;host=localhost;tag=/@* 10 1234567890\n", + }, + { + "Special characters preserved 2", + "cpu", + map[string]string{"host": "localhost", "tag": `\no change to slash`}, + map[string]interface{}{"usage_busy": float64(10)}, + "cpu.usage_busy;host=localhost;tag=\\no change to slash 10 1234567890\n", + }, + { + "Empty tag & value field", + "cpu", + map[string]string{"host": "localhost"}, + map[string]interface{}{"value": float64(10)}, + "cpu;host=localhost 10 1234567890\n", + }, + { + "Unicode Letters allowed", + "cpu", + map[string]string{"host": "localhost", "tag": "μnicodε_letters"}, + map[string]interface{}{"value": float64(10)}, + "cpu;host=localhost;tag=μnicodε_letters 10 1234567890\n", + }, + { + "Other Unicode not allowed", + "cpu", + map[string]string{"host": "localhost", "tag": "“☢”"}, + map[string]interface{}{"value": float64(10)}, + "cpu;host=localhost;tag=___ 10 1234567890\n", + }, + { + "Newline in tags", + "cpu", + map[string]string{"host": "localhost", "label": "some\nthing\nwith\nnewline"}, + map[string]interface{}{"usage_busy": float64(8.5)}, + "cpu.usage_busy;host=localhost;label=some_thing_with_newline 8.5 1234567890\n", + }, + } + + s := GraphiteSerializer{ + TagSupport: true, + TagSanitizeMode: "compatible", + Separator: ".", + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := metric.New(tt.metricName, tt.tags, tt.fields, now) + actual, _ := s.Serialize(m) + require.Equal(t, tt.expected, string(actual)) + }) + } +} + func TestSerializeBatch(t *testing.T) { now := time.Unix(1234567890, 0) tests := []struct { diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index 247324d4ab4f5..e67a9594dda73 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -59,6 +59,9 @@ type Config struct { // Support tags in graphite protocol GraphiteTagSupport bool `toml:"graphite_tag_support"` + // Support tags which follow the spec + GraphiteTagSanitizeMode string `toml:"graphite_tag_sanitize_mode"` + // Character for separating metric name and field for Graphite tags GraphiteSeparator string `toml:"graphite_separator"` @@ -118,7 +121,7 @@ func NewSerializer(config *Config) (Serializer, error) { case "influx": serializer, err = NewInfluxSerializerConfig(config) case "graphite": - serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport, config.GraphiteSeparator, config.Templates) + serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport, config.GraphiteTagSanitizeMode, config.GraphiteSeparator, config.Templates) case "json": serializer, err = NewJSONSerializer(config.TimestampUnits) case "splunkmetric": @@ -223,7 +226,7 @@ func NewInfluxSerializer() (Serializer, error) { return influx.NewSerializer(), nil } -func NewGraphiteSerializer(prefix, template string, tagSupport bool, separator string, templates []string) (Serializer, error) { +func NewGraphiteSerializer(prefix, template string, tagSupport bool, tagSanitizeMode string, separator string, templates []string) (Serializer, error) { graphiteTemplates, defaultTemplate, err := graphite.InitGraphiteTemplates(templates) if err != nil { @@ -234,16 +237,21 @@ func NewGraphiteSerializer(prefix, template string, tagSupport bool, separator s template = defaultTemplate } + if tagSanitizeMode == "" { + tagSanitizeMode = "strict" + } + if separator == "" { separator = "." } return &graphite.GraphiteSerializer{ - Prefix: prefix, - Template: template, - TagSupport: tagSupport, - Separator: separator, - Templates: graphiteTemplates, + Prefix: prefix, + Template: template, + TagSupport: tagSupport, + TagSanitizeMode: tagSanitizeMode, + Separator: separator, + Templates: graphiteTemplates, }, nil } From 3a1a44d67ecd775fd45921a8abf65f1e1f830eb3 Mon Sep 17 00:00:00 2001 From: Gareth Dunstone Date: Wed, 19 May 2021 07:20:13 +1000 Subject: [PATCH 429/761] Feature: merge multiple "--config" and "--config-directory" flags (#9007) --- cmd/telegraf/telegraf.go | 43 ++++++++++++++++++++++++++------ cmd/telegraf/telegraf_windows.go | 13 +++++++--- 2 files changed, 44 insertions(+), 12 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 60001fb60e064..02acdbbdebeb4 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -29,6 +29,18 @@ import ( _ "github.com/influxdata/telegraf/plugins/processors/all" ) +type sliceFlags []string + +func (i *sliceFlags) String() string { + s := strings.Join(*i, " ") + return "[" + s + "]" +} + +func (i *sliceFlags) Set(value string) error { + *i = append(*i, value) + return nil +} + // If you update these, update usage.go and usage_windows.go var fDebug = flag.Bool("debug", false, "turn on debug logging") @@ -38,9 +50,10 @@ var fQuiet = flag.Bool("quiet", false, "run in quiet mode") var fTest = flag.Bool("test", false, "enable test mode: gather metrics, print them out, and exit. Note: Test mode only runs inputs, not processors, aggregators, or outputs") var fTestWait = flag.Int("test-wait", 0, "wait up to this many seconds for service inputs to complete in test mode") -var fConfig = flag.String("config", "", "configuration file to load") -var fConfigDirectory = flag.String("config-directory", "", - "directory containing additional *.conf files") + +var fConfigs sliceFlags +var fConfigDirs sliceFlags + var fVersion = flag.Bool("version", false, "display the version and exit") var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration") @@ -133,17 +146,28 @@ func runAgent(ctx context.Context, c := config.NewConfig() c.OutputFilters = outputFilters c.InputFilters = inputFilters - err := c.LoadConfig(*fConfig) - if err != nil { - return err + var err error + // providing no "config" flag should load default config + if len(fConfigs) == 0 { + err = c.LoadConfig("") + if err != nil { + return err + } + } + for _, fConfig := range fConfigs { + err = c.LoadConfig(fConfig) + if err != nil { + return err + } } - if *fConfigDirectory != "" { - err = c.LoadDirectory(*fConfigDirectory) + for _, fConfigDirectory := range fConfigDirs { + err = c.LoadDirectory(fConfigDirectory) if err != nil { return err } } + if !*fTest && len(c.Outputs) == 0 { return errors.New("Error: no outputs found, did you provide a valid config file?") } @@ -245,6 +269,9 @@ func formatFullVersion() string { } func main() { + flag.Var(&fConfigs, "config", "configuration file to load") + flag.Var(&fConfigDirs, "config-directory", "directory containing additional *.conf files") + flag.Usage = func() { usageExit(0) } flag.Parse() args := flag.Args() diff --git a/cmd/telegraf/telegraf_windows.go b/cmd/telegraf/telegraf_windows.go index d04bfc34c7555..8bd14d64eaa19 100644 --- a/cmd/telegraf/telegraf_windows.go +++ b/cmd/telegraf/telegraf_windows.go @@ -74,12 +74,17 @@ func runAsWindowsService(inputFilters, outputFilters []string) { // Handle the --service flag here to prevent any issues with tooling that // may not have an interactive session, e.g. installing from Ansible. if *fService != "" { - if *fConfig != "" { - svcConfig.Arguments = []string{"--config", *fConfig} + if len(fConfigs) > 0 { + svcConfig.Arguments = []string{} } - if *fConfigDirectory != "" { - svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", *fConfigDirectory) + for _, fConfig := range fConfigs { + svcConfig.Arguments = append(svcConfig.Arguments, "--config", fConfig) } + + for _, fConfigDirectory := range fConfigDirs { + svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", fConfigDirectory) + } + //set servicename to service cmd line, to have a custom name after relaunch as a service svcConfig.Arguments = append(svcConfig.Arguments, "--service-name", *fServiceName) From e8ae01921b2aa93c07be4f651af5def6e210e6ca Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Thu, 20 May 2021 07:36:36 -0700 Subject: [PATCH 430/761] Set user agent when scraping prom metrics (#9271) --- plugins/inputs/prometheus/prometheus.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 01ebfb61b2a24..2aec8ed8b392d 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -15,6 +15,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" parser_v2 "github.com/influxdata/telegraf/plugins/parsers/prometheus" @@ -58,7 +59,8 @@ type Prometheus struct { Log telegraf.Logger - client *http.Client + client *http.Client + headers map[string]string // Should we scrape Kubernetes services for prometheus annotations MonitorPods bool `toml:"monitor_kubernetes_pods"` @@ -273,6 +275,10 @@ func (p *Prometheus) Gather(acc telegraf.Accumulator) error { return err } p.client = client + p.headers = map[string]string{ + "User-Agent": internal.ProductToken(), + "Accept": acceptHeader, + } } var wg sync.WaitGroup @@ -350,7 +356,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error } } - req.Header.Add("Accept", acceptHeader) + p.addHeaders(req) if p.BearerToken != "" { token, err := ioutil.ReadFile(p.BearerToken) @@ -427,6 +433,12 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return nil } +func (p *Prometheus) addHeaders(req *http.Request) { + for header, value := range p.headers { + req.Header.Add(header, value) + } +} + /* Check if the field selector specified is valid. * See ToSelectableFields() for list of fields that are selectable: * https://github.com/kubernetes/kubernetes/release-1.20/pkg/registry/core/pod/strategy.go From 8c73370e5019fbbe84a8862b7cfc188846ef72e3 Mon Sep 17 00:00:00 2001 From: Rajiv Kushwaha Date: Fri, 21 May 2021 00:51:20 +0530 Subject: [PATCH 431/761] inputs.ping: Add an option to specify packet size (#9274) --- plugins/inputs/ping/README.md | 4 ++++ plugins/inputs/ping/ping.go | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 82c0d58480b2a..5829d6bd07283 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -70,6 +70,10 @@ native Go by the Telegraf process, eliminating the need to execute the system ## Use only IPv6 addresses when resolving a hostname. # ipv6 = false + + ## Number of data bytes to be sent. Corresponds to the "-s" + ## option of the ping command. This only works with the native method. + # size = 56 ``` #### File Limit diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index c8d768c64a385..7d3b05178ad0b 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -18,6 +18,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +const ( + defaultPingDataBytesSize = 56 +) + // HostPinger is a function that runs the "ping" function using a list of // passed arguments. This can be easily switched with a mocked ping function // for unit test purposes (see ping_test.go) @@ -73,6 +77,9 @@ type Ping struct { // Calculate the given percentiles when using native method Percentiles []int + + // Packet size + Size *int } func (*Ping) Description() string { @@ -125,6 +132,10 @@ const sampleConfig = ` ## Use only IPv6 addresses when resolving a hostname. # ipv6 = false + + ## Number of data bytes to be sent. Corresponds to the "-s" + ## option of the ping command. This only works with the native method. + # size = 56 ` func (*Ping) SampleConfig() string { @@ -172,6 +183,13 @@ func (p *Ping) nativePing(destination string) (*pingStats, error) { pinger.SetNetwork("ip6") } + if p.Method == "native" { + pinger.Size = defaultPingDataBytesSize + if p.Size != nil { + pinger.Size = *p.Size + } + } + pinger.Source = p.sourceAddress pinger.Interval = p.calcInterval From 468e7e1650049ea20914167a607f625821be6d3e Mon Sep 17 00:00:00 2001 From: Thomas Casteleyn Date: Thu, 20 May 2021 22:34:07 +0200 Subject: [PATCH 432/761] Cleanup metric type docs (#9244) --- metric.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/metric.go b/metric.go index 32bc69499aedd..23098bb8bc71e 100644 --- a/metric.go +++ b/metric.go @@ -57,9 +57,7 @@ type Metric interface { Time() time.Time // Type returns a general type for the entire metric that describes how you - // might interpret, aggregate the values. - // - // This method may be removed in the future and its use is discouraged. + // might interpret, aggregate the values. Used by prometheus and statsd. Type() ValueType // SetName sets the metric name. From 0e55eedd7eb29b407c8dbaef2afd725307794d75 Mon Sep 17 00:00:00 2001 From: Victor Marinsky Date: Thu, 20 May 2021 16:37:01 -0400 Subject: [PATCH 433/761] Improve eventhub_consumer input documentation (#8731) --- plugins/inputs/eventhub_consumer/README.md | 11 +++++++++-- plugins/inputs/eventhub_consumer/eventhub_consumer.go | 11 +++++++++-- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/eventhub_consumer/README.md b/plugins/inputs/eventhub_consumer/README.md index 06c43cf318d39..c0533b513b8bf 100644 --- a/plugins/inputs/eventhub_consumer/README.md +++ b/plugins/inputs/eventhub_consumer/README.md @@ -18,8 +18,6 @@ The main focus for development of this plugin is Azure IoT hub: ## This requires one of the following sets of environment variables to be set: ## ## 1) Expected Environment Variables: - ## - "EVENTHUB_NAMESPACE" - ## - "EVENTHUB_NAME" ## - "EVENTHUB_CONNECTION_STRING" ## ## 2) Expected Environment Variables: @@ -28,8 +26,17 @@ The main focus for development of this plugin is Azure IoT hub: ## - "EVENTHUB_KEY_NAME" ## - "EVENTHUB_KEY_VALUE" + ## 3) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "AZURE_TENANT_ID" + ## - "AZURE_CLIENT_ID" + ## - "AZURE_CLIENT_SECRET" + ## Uncommenting the option below will create an Event Hub client based solely on the connection string. ## This can either be the associated environment variable or hard coded directly. + ## If this option is uncommented, environment variables will be ignored. + ## Connection string should contain EventHubName (EntityPath) # connection_string = "" ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister diff --git a/plugins/inputs/eventhub_consumer/eventhub_consumer.go b/plugins/inputs/eventhub_consumer/eventhub_consumer.go index da66872da6284..114a6335060ca 100644 --- a/plugins/inputs/eventhub_consumer/eventhub_consumer.go +++ b/plugins/inputs/eventhub_consumer/eventhub_consumer.go @@ -69,8 +69,6 @@ func (*EventHub) SampleConfig() string { ## This requires one of the following sets of environment variables to be set: ## ## 1) Expected Environment Variables: - ## - "EVENTHUB_NAMESPACE" - ## - "EVENTHUB_NAME" ## - "EVENTHUB_CONNECTION_STRING" ## ## 2) Expected Environment Variables: @@ -79,8 +77,17 @@ func (*EventHub) SampleConfig() string { ## - "EVENTHUB_KEY_NAME" ## - "EVENTHUB_KEY_VALUE" + ## 3) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "AZURE_TENANT_ID" + ## - "AZURE_CLIENT_ID" + ## - "AZURE_CLIENT_SECRET" + ## Uncommenting the option below will create an Event Hub client based solely on the connection string. ## This can either be the associated environment variable or hard coded directly. + ## If this option is uncommented, environment variables will be ignored. + ## Connection string should contain EventHubName (EntityPath) # connection_string = "" ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister From 60bb676730165beb75b9919a3ec750e194d23cf8 Mon Sep 17 00:00:00 2001 From: Chris Dagenais Date: Thu, 20 May 2021 14:50:40 -0600 Subject: [PATCH 434/761] vSphere input: need to be able to configure the historical interval duration (#9276) --- plugins/inputs/vsphere/README.md | 9 +++++++-- plugins/inputs/vsphere/endpoint.go | 6 +++--- plugins/inputs/vsphere/vsphere.go | 6 ++++++ plugins/inputs/vsphere/vsphere_test.go | 7 +++++++ 4 files changed, 23 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 9bb33211d29e4..d43f559b16eb8 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -282,7 +282,7 @@ We can extend this to looking at a cluster level: ```/DC0/host/Cluster1/*/hadoop vCenter keeps two different kinds of metrics, known as realtime and historical metrics. * Realtime metrics: Available at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter. -* Historical metrics: Available at a 5 minute, 30 minutes, 2 hours and 24 hours rollup levels. The vSphere Telegraf plugin only uses the 5 minute rollup. These metrics are stored in the vCenter database and can be expensive and slow to query. Historical metrics are the only type of metrics available for **clusters**, **datastores** and **datacenters**. +* Historical metrics: Available at a (default) 5 minute, 30 minutes, 2 hours and 24 hours rollup levels. The vSphere Telegraf plugin only uses the most granular rollup which defaults to 5 minutes but can be changed in vCenter to other interval durations. These metrics are stored in the vCenter database and can be expensive and slow to query. Historical metrics are the only type of metrics available for **clusters**, **datastores** and **datacenters**. For more information, refer to the vSphere documentation here: https://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch16_Performance.18.2.html @@ -315,7 +315,7 @@ This will disrupt the metric collection and can result in missed samples. The be [[inputs.vsphere]] interval = "300s" - + vcenters = [ "https://someaddress/sdk" ] username = "someuser@vsphere.local" password = "secret" @@ -355,6 +355,11 @@ The vSphere plugin allows you to specify two concurrency settings: While a higher level of concurrency typically has a positive impact on performance, increasing these numbers too much can cause performance issues at the vCenter server. A rule of thumb is to set these parameters to the number of virtual machines divided by 1500 and rounded up to the nearest integer. +### Configuring historical_interval setting + +When the vSphere plugin queries vCenter for historical statistics it queries for statistics that exist at a specific interval. The default historical interval duration is 5 minutes but if this interval has been changed then you must override the default query interval in the vSphere plugin. +* ```historical_interval```: The interval of the most granular statistics configured in vSphere represented in seconds. + ## Measurements & Fields - Cluster Stats diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index a9c226edf80bb..9903647f8d4ee 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -136,7 +136,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL, log telegra parentTag: "", enabled: anythingEnabled(parent.DatacenterMetricExclude), realTime: false, - sampling: 300, + sampling: int32(time.Duration(parent.HistoricalInterval).Seconds()), objects: make(objectMap), filters: newFilterOrPanic(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude), paths: parent.DatacenterInclude, @@ -154,7 +154,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL, log telegra parentTag: "dcname", enabled: anythingEnabled(parent.ClusterMetricExclude), realTime: false, - sampling: 300, + sampling: int32(time.Duration(parent.HistoricalInterval).Seconds()), objects: make(objectMap), filters: newFilterOrPanic(parent.ClusterMetricInclude, parent.ClusterMetricExclude), paths: parent.ClusterInclude, @@ -207,7 +207,7 @@ func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL, log telegra pKey: "dsname", enabled: anythingEnabled(parent.DatastoreMetricExclude), realTime: false, - sampling: 300, + sampling: int32(time.Duration(parent.HistoricalInterval).Seconds()), objects: make(objectMap), filters: newFilterOrPanic(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude), paths: parent.DatastoreInclude, diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index b014f2f764c79..f587ab6aaba95 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -57,6 +57,7 @@ type VSphere struct { ForceDiscoverOnInit bool ObjectDiscoveryInterval config.Duration Timeout config.Duration + HistoricalInterval config.Duration endpoints []*Endpoint cancel context.CancelFunc @@ -250,6 +251,10 @@ var sampleConfig = ` # ssl_key = "/path/to/keyfile" ## Use SSL but skip chain & host verification # insecure_skip_verify = false + + ## The Historical Interval value must match EXACTLY the interval in the daily + # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals + # historical_interval = "5m" ` // SampleConfig returns a set of default configuration to be used as a boilerplate when setting up @@ -374,6 +379,7 @@ func init() { ForceDiscoverOnInit: true, ObjectDiscoveryInterval: config.Duration(time.Second * 300), Timeout: config.Duration(time.Second * 60), + HistoricalInterval: config.Duration(time.Second * 300), } }) } diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index ae2ce57b9a88e..31bb0fdf08844 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -153,6 +153,7 @@ func defaultVSphere() *VSphere { DiscoverConcurrency: 1, CollectConcurrency: 1, Separator: ".", + HistoricalInterval: config.Duration(time.Second * 300), } } @@ -228,6 +229,12 @@ func TestParseConfig(t *testing.T) { tab, err := toml.Parse([]byte(c)) require.NoError(t, err) require.NotNil(t, tab) + +} + +func TestConfigDurationParsing(t *testing.T) { + v := defaultVSphere() + require.Equal(t, int32(300), int32(time.Duration(v.HistoricalInterval).Seconds()), "HistoricalInterval.Seconds() with default duration should resolve 300") } func TestMaxQuery(t *testing.T) { From 1929e48e6159ad9bb8fce01bca11fd50be55676a Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Thu, 20 May 2021 16:40:15 -0600 Subject: [PATCH 435/761] Update changelog (cherry picked from commit 3fe5e3ff3d84ce0c6a758064065b591cdaddd526) --- CHANGELOG.md | 27 +++++++++++++++++++++++++++ etc/telegraf.conf | 2 ++ 2 files changed, 29 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 51af7faf9a490..a85038587f10b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,30 @@ +## v1.18.3 [2021-05-20] + +#### Release Notes + + - Added FreeBSD armv7 build + +#### Bugfixes + + - [#9271](https://github.com/influxdata/telegraf/pull/9271) `inputs.prometheus` Set user agent when scraping prom metrics + - [#9203](https://github.com/influxdata/telegraf/pull/9203) Migrate from soniah/gosnmp to gosnmp/gosnmp and update to 1.32.0 + - [#9169](https://github.com/influxdata/telegraf/pull/9169) `inputs.kinesis_consumer` Fix repeating parser error + - [#9130](https://github.com/influxdata/telegraf/pull/9130) `inputs.sqlserver` Remove disallowed whitespace from sqlServerRingBufferCPU query + - [#9238](https://github.com/influxdata/telegraf/pull/9238) Update hashicorp/consul/api module to v1.8.1 + - [#9235](https://github.com/influxdata/telegraf/pull/9235) Migrate from docker/libnetwork/ipvs to moby/ipvs + - [#9224](https://github.com/influxdata/telegraf/pull/9224) Update shirou/gopsutil to 3.21.3 + - [#9209](https://github.com/influxdata/telegraf/pull/9209) Update microsoft/ApplicationInsights-Go to 0.4.4 + - [#9190](https://github.com/influxdata/telegraf/pull/9190) Update gogo/protobuf to 1.3.2 + - [#8746](https://github.com/influxdata/telegraf/pull/8746) Update Azure/go-autorest/autorest/azure/auth to 0.5.6 and Azure/go-autorest/autorest to 0.11.17 + - [#8745](https://github.com/influxdata/telegraf/pull/8745) Update collectd.org to 0.5.0 + - [#8716](https://github.com/influxdata/telegraf/pull/8716) Update nats-io/nats.go 1.10.0 + - [#9039](https://github.com/influxdata/telegraf/pull/9039) Update golang/protobuf to v1.5.1 + - [#8937](https://github.com/influxdata/telegraf/pull/8937) Migrate from ericchiang/k8s to kubernetes/client-go + +#### Features + + - [#8913](https://github.com/influxdata/telegraf/pull/8913) `outputs.elasticsearch` Add ability to enable gzip compression + ## v1.18.2 [2021-04-28] #### Bugfixes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 5160db820730f..1c20bc28e0284 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -530,6 +530,8 @@ # ## Set to true to ask Elasticsearch a list of all cluster nodes, # ## thus it is not necessary to list all nodes in the urls config option. # enable_sniffer = false +# ## Set to true to enable gzip compression +# enable_gzip = false # ## Set the interval to check if the Elasticsearch nodes are available # ## Setting to "0s" will disable the health check (not recommended in production) # health_check_interval = "10s" From 8eec1598719c8b975bc7119446ec0e3faece10c8 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 20 May 2021 13:34:24 -0700 Subject: [PATCH 436/761] update HTTP v2 listener readme (#9250) --- plugins/inputs/http_listener_v2/README.md | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md index 05e48058667ef..108a8d50a9a2a 100644 --- a/plugins/inputs/http_listener_v2/README.md +++ b/plugins/inputs/http_listener_v2/README.md @@ -1,11 +1,13 @@ # HTTP Listener v2 Input Plugin HTTP Listener v2 is a service input plugin that listens for metrics sent via -HTTP. Metrics may be sent in any supported [data format][data_format]. +HTTP. Metrics may be sent in any supported [data format][data_format]. For metrics in +[InfluxDB Line Protocol][line_protocol] it's recommended to use the [`influxdb_listener`][influxdb_listener] +or [`influxdb_v2_listener`][influxdb_v2_listener] instead. **Note:** The plugin previously known as `http_listener` has been renamed `influxdb_listener`. If you would like Telegraf to act as a proxy/relay for -InfluxDB it is recommended to use [`influxdb_listener`][influxdb_listener]. +InfluxDB it is recommended to use [`influxdb_listener`][influxdb_listener] or [`influxdb_v2_listener`][influxdb_v2_listener]. ### Configuration: @@ -57,7 +59,7 @@ This is a sample configuration for the plugin. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" + data_format = "json" ``` ### Metrics: @@ -83,3 +85,5 @@ curl -i -XGET 'http://localhost:8080/telegraf?host=server01&value=0.42' [data_format]: /docs/DATA_FORMATS_INPUT.md [influxdb_listener]: /plugins/inputs/influxdb_listener/README.md +[line_protocol]: https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/ +[influxdb_v2_listener]: /plugins/inputs/influxdb_v2_listener/README.md From 467ab8791255ab44b1304ed3f4d2aa2989b46140 Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Tue, 25 May 2021 14:11:14 -0700 Subject: [PATCH 437/761] chore: update prometheus input readme to match config options (#9270) --- plugins/inputs/prometheus/README.md | 30 ++++++++++++++++--------- plugins/inputs/prometheus/prometheus.go | 2 +- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index ee49e047436e4..88aa5be4941f2 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -10,7 +10,7 @@ in Prometheus format. [[inputs.prometheus]] ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] - + ## Metric version controls the mapping from Prometheus metrics into ## Telegraf metrics. When using the prometheus_client output, use the same ## value in both plugins to ensure metrics are round-tripped without @@ -19,30 +19,37 @@ in Prometheus format. ## example: metric_version = 1; ## metric_version = 2; recommended version # metric_version = 1 - + + ## Url tag name (tag containing scrapped url. optional, default is "url") + # url_tag = "url" + ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - + ## Kubernetes config file to create client from. # kube_config = "/path/to/kubernetes.config" - + ## Scrape Kubernetes pods for the following prometheus annotations: ## - prometheus.io/scrape: Enable scraping for this pod ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to - ## set this to `https` & most likely set the tls config. + ## set this to 'https' & most likely set the tls config. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation # monitor_kubernetes_pods = true + ## Get the list of pods to scrape with either the scope of - ## - cluster: the kubernetes watch api (default), no need to specify + ## - cluster: the kubernetes watch api (default, no need to specify) ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. # pod_scrape_scope = "cluster" + ## Only for node scrape scope: node IP of the node that telegraf is running on. ## Either this config or the environment variable NODE_IP must be set. # node_ip = "10.180.1.1" - ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping + + ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. ## Default is 60 seconds. # pod_scrape_interval = 60 + ## Restricts Kubernetes monitoring to a single namespace ## ex: monitor_kubernetes_pods_namespace = "default" # monitor_kubernetes_pods_namespace = "" @@ -51,24 +58,25 @@ in Prometheus format. # field selector to target pods # eg. To scrape pods on a specific node # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" - + ## Use bearer token for authorization. ('bearer_token' takes priority) # bearer_token = "/path/to/bearer/token" ## OR # bearer_token_string = "abc_123" - + ## HTTP Basic Authentication username and password. ('bearer_token' and ## 'bearer_token_string' take priority) # username = "" # password = "" - + ## Specify timeout duration for slower prometheus clients (default is 3s) # response_timeout = "3s" - + ## Optional TLS Config # tls_ca = /path/to/cafile # tls_cert = /path/to/certfile # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification # insecure_skip_verify = false ``` diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 2aec8ed8b392d..4a3b71408c552 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -93,7 +93,7 @@ var sampleConfig = ` # metric_version = 1 ## Url tag name (tag containing scrapped url. optional, default is "url") - # url_tag = "scrapeUrl" + # url_tag = "url" ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] From 9ab2ea5ee2c0e2cea0c68716d183b26a829b7994 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 25 May 2021 23:20:30 +0200 Subject: [PATCH 438/761] Document inactivity procedure. (#9259) --- docs/developers/REVIEWS.md | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/docs/developers/REVIEWS.md b/docs/developers/REVIEWS.md index d97fe16cdc772..d7e016530e4d4 100644 --- a/docs/developers/REVIEWS.md +++ b/docs/developers/REVIEWS.md @@ -1,27 +1,45 @@ # Reviews -Expect several rounds of back and forth on reviews, non-trivial changes are -rarely accepted on the first pass. - -While review cannot be exhaustively documented, there are several things that -should always be double checked. +Pull-requests require two approvals before being merged. Expect several rounds of back and forth on +reviews, non-trivial changes are rarely accepted on the first pass. It might take some time +until you see a first review so please be patient. All pull requests should follow the style and best practices in the [CONTRIBUTING.md](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md) document. +## Process +The review process is roughly structured as follows: +1. Submit a pull request. +Please check that you signed the [CLA](https://www.influxdata.com/legal/cla/) (and [Corporate CLA](https://www.influxdata.com/legal/ccla/) if you are contributing code on as an employee of your company). Provide a short description of your submission and reference issues that you potentially close. Make sure the CI tests are all green and there are no linter-issues. +1. Get feedback from a first reviewer and a `ready for final review` tag. +Please constructively work with the reviewer to get your code into a mergable state (see also [below](#reviewing-plugin-code)). +1. Get a final review by one of the InfluxData maintainers. +Please fix any issue raised. +1. Wait for the pull-request to be merged. +It might take some time until your PR gets merged, depending on the release cycle and the type of +your pull-request (bugfix, enhancement of existing code, new plugin, etc). Remember, it might be necessary to rebase your code before merge to resolve conflicts. + +Please read the review comments carefully, fix the related part of the code and/or respond in case there is anything unclear. If there is no activity in a pull-request or the contributor does not respond, we apply the following scheme: +1. We send a first reminder after at least 2 weeks of inactivity. +1. After at least another two weeks of inactivity we send a second reminder and are setting the `waiting for response` tag. +1. Another two weeks later we will ask the community for help setting the `help wanted` reminder. +1. In case nobody volunteers to take over the PR within the next 30 days, InfluxData will triage the PR and might close it due to inactivity. + +So in case you expect a longer period of inactivity or you want to abandon a pull-request, please let us know. + ## Reviewing Plugin Code - Avoid variables scoped to the package. Everything should be scoped to the plugin struct, since multiple instances of the same plugin are allowed and package-level variables will cause race conditions. - SampleConfig must match the readme, but not include the plugin name. - structs should include toml tags for fields that are expected to be editable from the config. eg `toml:"command"` (snake_case) -- plugins that want to log should declare the Telegraf logger, not use the log package. eg: +- plugins that want to log should declare the Telegraf logger, not use the log package. eg: ```Go Log telegraf.Logger `toml:"-"` ``` (in tests, you can do `myPlugin.Log = testutil.Logger{}`) - Initialization and config checking should be done on the `Init() error` function, not in the Connect, Gather, or Start functions. -- plugins should avoid synchronization code if they are not starting goroutines. Plugin functions are never called in parallel. +- plugins should avoid synchronization code if they are not starting goroutines. Plugin functions are never called in parallel. - avoid goroutines when you don't need them and removing them would simplify the code - errors should almost always be checked. - avoid boolean fields when a string or enumerated type would be better for future extension. Lots of boolean fields also make the code difficult to maintain. @@ -39,8 +57,8 @@ document. - they seem unnecessary, superfluous, or gratuitous - consider adding build tags if plugins have OS-specific considerations - use the right logger log levels so that Telegraf is normally quiet eg `plugin.Log.Debugf()` only shows up when running Telegraf with `--debug` -- consistent field types: dynamically setting the type of a field should be strongly avoided as it causes problems that are difficult to solve later, made worse by having to worry about backwards compatibility in future changes. For example, if an numeric value comes from a string field and it is not clear if the field can sometimes be a float, the author should pick either a float or an int, and parse that field consistently every time. Better to sometimes truncate a float, or to always store ints as floats, rather than changing the field type, which causes downstream problems with output databases. -- backwards compatibility: We work hard not to break existing configurations during new changes. Upgrading Telegraf should be a seamless transition. Possible tools to make this transition smooth are: +- consistent field types: dynamically setting the type of a field should be strongly avoided as it causes problems that are difficult to solve later, made worse by having to worry about backwards compatibility in future changes. For example, if an numeric value comes from a string field and it is not clear if the field can sometimes be a float, the author should pick either a float or an int, and parse that field consistently every time. Better to sometimes truncate a float, or to always store ints as floats, rather than changing the field type, which causes downstream problems with output databases. +- backwards compatibility: We work hard not to break existing configurations during new changes. Upgrading Telegraf should be a seamless transition. Possible tools to make this transition smooth are: - enumerable type fields that allow you to customize behavior (avoid boolean feature flags) - version fields that can be used to opt in to newer changed behavior without breaking old (see inputs.mysql for example) - a new version of the plugin if it has changed significantly (eg outputs.influxdb and outputs.influxdb_v2) From 58479fdb05655bff895215fc66a99159567ac044 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Wed, 26 May 2021 09:13:50 -0700 Subject: [PATCH 439/761] Fix reading config files starting with http: (#9275) --- config/config.go | 26 +++++++++++++++++--------- config/config_test.go | 14 ++++++++++++++ 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/config/config.go b/config/config.go index 02586ff9b8f4e..0391a3c1ad11d 100644 --- a/config/config.go +++ b/config/config.go @@ -50,6 +50,10 @@ var ( `\`, `\\`, ) httpLoadConfigRetryInterval = 10 * time.Second + + // fetchURLRe is a regex to determine whether the requested file should + // be fetched from a remote or read from the filesystem. + fetchURLRe = regexp.MustCompile(`^\w+://`) ) // Config specifies the URL/user/password for the database that telegraf @@ -902,17 +906,21 @@ func escapeEnv(value string) string { } func loadConfig(config string) ([]byte, error) { - u, err := url.Parse(config) - if err != nil { - return nil, err - } + if fetchURLRe.MatchString(config) { + u, err := url.Parse(config) + if err != nil { + return nil, err + } - switch u.Scheme { - case "https", "http": - return fetchConfig(u) - default: - // If it isn't a https scheme, try it as a file. + switch u.Scheme { + case "https", "http": + return fetchConfig(u) + default: + return nil, fmt.Errorf("scheme %q not supported", u.Scheme) + } } + + // If it isn't a https scheme, try it as a file return ioutil.ReadFile(config) } diff --git a/config/config_test.go b/config/config_test.go index 3095ffdf12b08..91d0a81e8dc4a 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -5,6 +5,7 @@ import ( "net/http" "net/http/httptest" "os" + "runtime" "strings" "testing" "time" @@ -323,6 +324,19 @@ func TestConfig_URLRetries3FailsThenPasses(t *testing.T) { require.Equal(t, 4, responseCounter) } +func TestConfig_URLLikeFileName(t *testing.T) { + c := NewConfig() + err := c.LoadConfig("http:##www.example.com.conf") + require.Error(t, err) + + if runtime.GOOS == "windows" { + // The error file not found error message is different on windows + require.Equal(t, "Error loading config file http:##www.example.com.conf: open http:##www.example.com.conf: The system cannot find the file specified.", err.Error()) + } else { + require.Equal(t, "Error loading config file http:##www.example.com.conf: open http:##www.example.com.conf: no such file or directory", err.Error()) + } +} + /*** Mockup INPUT plugin for testing to avoid cyclic dependencies ***/ type MockupInputPlugin struct { Servers []string `toml:"servers"` From 2e7b232073650883b016b7bf2ab585a75862f6b2 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 27 May 2021 22:58:46 +0200 Subject: [PATCH 440/761] Modbus refactor (#9141) --- docs/LICENSE_OF_DEPENDENCIES.md | 4 +- go.mod | 3 +- go.sum | 4 + plugins/inputs/modbus/README.md | 18 +- plugins/inputs/modbus/configuration.go | 61 ++ .../inputs/modbus/configuration_original.go | 246 ++++++ plugins/inputs/modbus/modbus.go | 716 ++++++------------ plugins/inputs/modbus/modbus_test.go | 549 ++++++++++---- plugins/inputs/modbus/request.go | 58 ++ plugins/inputs/modbus/type_conversions.go | 54 ++ plugins/inputs/modbus/type_conversions16.go | 138 ++++ plugins/inputs/modbus/type_conversions32.go | 200 +++++ plugins/inputs/modbus/type_conversions64.go | 182 +++++ 13 files changed, 1555 insertions(+), 678 deletions(-) create mode 100644 plugins/inputs/modbus/configuration.go create mode 100644 plugins/inputs/modbus/configuration_original.go create mode 100644 plugins/inputs/modbus/request.go create mode 100644 plugins/inputs/modbus/type_conversions.go create mode 100644 plugins/inputs/modbus/type_conversions16.go create mode 100644 plugins/inputs/modbus/type_conversions32.go create mode 100644 plugins/inputs/modbus/type_conversions64.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index a712ac0979e57..605ee4073b1e0 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -71,8 +71,6 @@ following works: - github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE) - github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) - github.com/go-stack/stack [MIT License](https://github.com/go-stack/stack/blob/master/LICENSE.md) -- github.com/goburrow/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/goburrow/modbus/blob/master/LICENSE) -- github.com/goburrow/serial [MIT License](https://github.com/goburrow/serial/LICENSE) - github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE) - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) - github.com/gogo/googleapis [Apache License 2.0](https://github.com/gogo/googleapis/blob/master/LICENSE) @@ -92,6 +90,8 @@ following works: - github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE) - github.com/gorilla/websocket [BSD 2-Clause "Simplified" License](https://github.com/gorilla/websocket/blob/master/LICENSE) - github.com/gosnmp/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/gosnmp/gosnmp/blob/master/LICENSE) +- github.com/grid-x/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/grid-x/modbus/blob/master/LICENSE) +- github.com/grid-x/serial [MIT License](https://github.com/grid-x/serial/blob/master/LICENSE) - github.com/grpc-ecosystem/grpc-gateway [BSD 3-Clause "New" or "Revised" License](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/LICENSE.txt) - github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) - github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE) diff --git a/go.mod b/go.mod index f64f609968daa..6405514eb9fce 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,7 @@ require ( github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c github.com/go-redis/redis v6.15.9+incompatible github.com/go-sql-driver/mysql v1.5.0 - github.com/goburrow/modbus v0.1.0 + github.com/goburrow/modbus v0.1.0 // indirect github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v3.3.0+incompatible @@ -66,6 +66,7 @@ require ( github.com/gopcua/opcua v0.1.13 github.com/gorilla/mux v1.7.3 github.com/gosnmp/gosnmp v1.32.0 + github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 diff --git a/go.sum b/go.sum index 7e7835870e08a..b21a2fe1a00e4 100644 --- a/go.sum +++ b/go.sum @@ -561,6 +561,10 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gosnmp/gosnmp v1.32.0 h1:gctewmZx5qFI0oHMzRnjETqIZ093d9NgZy9TQr3V0iA= github.com/gosnmp/gosnmp v1.32.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvNDMpgF0= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b h1:Y4xqzO0CDNoehCr3ncgie3IgFTO9AzV8PMMEWESFM5c= +github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b/go.mod h1:YaK0rKJenZ74vZFcSSLlAQqtG74PMI68eDjpDCDDmTw= +github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 h1:syBxnRYnSPUDdkdo5U4sy2roxBPQDjNiw4od7xlsABQ= +github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md index 7fe8f8fda4205..6340672b6e13d 100644 --- a/plugins/inputs/modbus/README.md +++ b/plugins/inputs/modbus/README.md @@ -96,7 +96,7 @@ Metric are custom and configured using the `discrete_inputs`, `coils`, The field `data_type` defines the representation of the data value on input from the modbus registers. The input values are then converted from the given `data_type` to a type that is apropriate when -sending the value to the output plugin. These output types are usually one of string, +sending the value to the output plugin. These output types are usually one of string, integer or floating-point-number. The size of the output type is assumed to be large enough for all supported input types. The mapping from the input type to the output type is fixed and cannot be configured. @@ -114,7 +114,7 @@ always include the sign and therefore there exists no variant. These types are handled as an integer type on input, but are converted to floating point representation for further processing (e.g. scaling). Use one of these types when the input value is a decimal fixed point -representation of a non-integer value. +representation of a non-integer value. Select the type `UFIXED` when the input type is declared to hold unsigned integer values, which cannot be negative. The documentation of your modbus device should indicate this by a term like @@ -127,6 +127,20 @@ with N decimal places'. (FLOAT32 is deprecated and should not be used any more. UFIXED provides the same conversion from unsigned values). +### Trouble shooting +Modbus documentations are often a mess. People confuse memory-address (starts at one) and register address (starts at zero) or stay unclear about the used word-order. Furthermore, there are some non-standard implementations that also +swap the bytes within the register word (16-bit). + +If you get an error or don't get the expected values from your device, you can try the following steps (assuming a 32-bit value). + +In case are using a serial device and get an `permission denied` error, please check the permissions of your serial device and change accordingly. + +In case you get an `exception '2' (illegal data address)` error you might try to offset your `address` entries by minus one as it is very likely that there is a confusion between memory and register addresses. + +In case you see strange values, the `byte_order` might be off. You can either probe all combinations (`ABCD`, `CDBA`, `BADC` or `DCBA`) or you set `byte_order="ABCD" data_type="UINT32"` and use the resulting value(s) in an online converter like [this](https://www.scadacore.com/tools/programming-calculators/online-hex-converter/). This makes especially sense if you don't want to mess with the device, deal with 64-bit values and/or don't know the `data_type` of your register (e.g. fix-point floating values vs. IEEE floating point). + +If nothing helps, please post your configuration, error message and/or the output of `byte_order="ABCD" data_type="UINT32"` to one of the telegraf support channels (forum, slack or as issue). + ### Example Output ```sh diff --git a/plugins/inputs/modbus/configuration.go b/plugins/inputs/modbus/configuration.go new file mode 100644 index 0000000000000..cbf36cab15524 --- /dev/null +++ b/plugins/inputs/modbus/configuration.go @@ -0,0 +1,61 @@ +package modbus + +import "fmt" + +const ( + maxQuantityDiscreteInput = uint16(2000) + maxQuantityCoils = uint16(2000) + maxQuantityInputRegisters = uint16(125) + maxQuantityHoldingRegisters = uint16(125) +) + +type Configuration interface { + Check() error + Process() (map[byte]requestSet, error) +} + +func removeDuplicates(elements []uint16) []uint16 { + encountered := map[uint16]bool{} + result := []uint16{} + + for _, addr := range elements { + if !encountered[addr] { + encountered[addr] = true + result = append(result, addr) + } + } + + return result +} + +func normalizeInputDatatype(dataType string) (string, error) { + switch dataType { + case "INT16", "UINT16", "INT32", "UINT32", "INT64", "UINT64", "FLOAT32", "FLOAT64": + return dataType, nil + } + return "unknown", fmt.Errorf("unknown type %q", dataType) +} + +func normalizeOutputDatatype(dataType string) (string, error) { + switch dataType { + case "", "native": + return "native", nil + case "INT64", "UINT64", "FLOAT64": + return dataType, nil + } + return "unknown", fmt.Errorf("unknown type %q", dataType) +} + +func normalizeByteOrder(byteOrder string) (string, error) { + switch byteOrder { + case "ABCD", "MSW-BE", "MSW": // Big endian (Motorola) + return "ABCD", nil + case "BADC", "MSW-LE": // Big endian with bytes swapped + return "BADC", nil + case "CDAB", "LSW-BE": // Little endian with bytes swapped + return "CDAB", nil + case "DCBA", "LSW-LE", "LSW": // Little endian (Intel) + return "DCBA", nil + } + return "unknown", fmt.Errorf("unknown byte-order %q", byteOrder) +} diff --git a/plugins/inputs/modbus/configuration_original.go b/plugins/inputs/modbus/configuration_original.go new file mode 100644 index 0000000000000..cf4b2e1241b8e --- /dev/null +++ b/plugins/inputs/modbus/configuration_original.go @@ -0,0 +1,246 @@ +package modbus + +import ( + "fmt" +) + +type fieldDefinition struct { + Measurement string `toml:"measurement"` + Name string `toml:"name"` + ByteOrder string `toml:"byte_order"` + DataType string `toml:"data_type"` + Scale float64 `toml:"scale"` + Address []uint16 `toml:"address"` +} + +type ConfigurationOriginal struct { + SlaveID byte `toml:"slave_id"` + DiscreteInputs []fieldDefinition `toml:"discrete_inputs"` + Coils []fieldDefinition `toml:"coils"` + HoldingRegisters []fieldDefinition `toml:"holding_registers"` + InputRegisters []fieldDefinition `toml:"input_registers"` +} + +func (c *ConfigurationOriginal) Process() (map[byte]requestSet, error) { + coil, err := c.initRequests(c.Coils, cCoils, maxQuantityCoils) + if err != nil { + return nil, err + } + + discrete, err := c.initRequests(c.DiscreteInputs, cDiscreteInputs, maxQuantityDiscreteInput) + if err != nil { + return nil, err + } + + holding, err := c.initRequests(c.HoldingRegisters, cHoldingRegisters, maxQuantityHoldingRegisters) + if err != nil { + return nil, err + } + + input, err := c.initRequests(c.InputRegisters, cInputRegisters, maxQuantityInputRegisters) + if err != nil { + return nil, err + } + + return map[byte]requestSet{ + c.SlaveID: { + coil: coil, + discrete: discrete, + holding: holding, + input: input, + }, + }, nil +} + +func (c *ConfigurationOriginal) Check() error { + if err := c.validateFieldDefinitions(c.DiscreteInputs, cDiscreteInputs); err != nil { + return err + } + + if err := c.validateFieldDefinitions(c.Coils, cCoils); err != nil { + return err + } + + if err := c.validateFieldDefinitions(c.HoldingRegisters, cHoldingRegisters); err != nil { + return err + } + + return c.validateFieldDefinitions(c.InputRegisters, cInputRegisters) +} + +func (c *ConfigurationOriginal) initRequests(fieldDefs []fieldDefinition, registerType string, maxQuantity uint16) ([]request, error) { + fields, err := c.initFields(fieldDefs) + if err != nil { + return nil, err + } + return newRequestsFromFields(fields, c.SlaveID, registerType, maxQuantity), nil +} + +func (c *ConfigurationOriginal) initFields(fieldDefs []fieldDefinition) ([]field, error) { + // Construct the fields from the field definitions + fields := make([]field, 0, len(fieldDefs)) + for _, def := range fieldDefs { + f, err := c.newFieldFromDefinition(def) + if err != nil { + return nil, fmt.Errorf("initializing field %q failed: %v", def.Name, err) + } + fields = append(fields, f) + } + + return fields, nil +} + +func (c *ConfigurationOriginal) newFieldFromDefinition(def fieldDefinition) (field, error) { + // Check if the addresses are consecutive + expected := def.Address[0] + for _, current := range def.Address[1:] { + expected++ + if current != expected { + return field{}, fmt.Errorf("addresses of field %q are not consecutive", def.Name) + } + } + + // Initialize the field + f := field{ + measurement: def.Measurement, + name: def.Name, + scale: def.Scale, + address: def.Address[0], + length: uint16(len(def.Address)), + } + if def.DataType != "" { + inType, err := c.normalizeInputDatatype(def.DataType, len(def.Address)) + if err != nil { + return f, err + } + outType, err := c.normalizeOutputDatatype(def.DataType) + if err != nil { + return f, err + } + byteOrder, err := c.normalizeByteOrder(def.ByteOrder) + if err != nil { + return f, err + } + + f.converter, err = determineConverter(inType, byteOrder, outType, def.Scale) + if err != nil { + return f, err + } + } + + return f, nil +} + +func (c *ConfigurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefinition, registerType string) error { + nameEncountered := map[string]bool{} + for _, item := range fieldDefs { + //check empty name + if item.Name == "" { + return fmt.Errorf("empty name in '%s'", registerType) + } + + //search name duplicate + canonicalName := item.Measurement + "." + item.Name + if nameEncountered[canonicalName] { + return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, registerType, item.Name) + } + nameEncountered[canonicalName] = true + + if registerType == cInputRegisters || registerType == cHoldingRegisters { + // search byte order + switch item.ByteOrder { + case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA", "ABCDEFGH", "HGFEDCBA", "BADCFEHG", "GHEFCDAB": + default: + return fmt.Errorf("invalid byte order '%s' in '%s' - '%s'", item.ByteOrder, registerType, item.Name) + } + + // search data type + switch item.DataType { + case "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "FLOAT32-IEEE", "FLOAT64-IEEE", "FLOAT32", "FIXED", "UFIXED": + default: + return fmt.Errorf("invalid data type '%s' in '%s' - '%s'", item.DataType, registerType, item.Name) + } + + // check scale + if item.Scale == 0.0 { + return fmt.Errorf("invalid scale '%f' in '%s' - '%s'", item.Scale, registerType, item.Name) + } + } + + // check address + if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 { + return fmt.Errorf("invalid address '%v' length '%v' in '%s' - '%s'", item.Address, len(item.Address), registerType, item.Name) + } + + if registerType == cInputRegisters || registerType == cHoldingRegisters { + if 2*len(item.Address) != len(item.ByteOrder) { + return fmt.Errorf("invalid byte order '%s' and address '%v' in '%s' - '%s'", item.ByteOrder, item.Address, registerType, item.Name) + } + + // search duplicated + if len(item.Address) > len(removeDuplicates(item.Address)) { + return fmt.Errorf("duplicate address '%v' in '%s' - '%s'", item.Address, registerType, item.Name) + } + } else if len(item.Address) != 1 { + return fmt.Errorf("invalid address'%v' length'%v' in '%s' - '%s'", item.Address, len(item.Address), registerType, item.Name) + } + } + return nil +} + +func (c *ConfigurationOriginal) normalizeInputDatatype(dataType string, words int) (string, error) { + // Handle our special types + switch dataType { + case "FIXED": + switch words { + case 1: + return "INT16", nil + case 2: + return "INT32", nil + case 4: + return "INT64", nil + default: + return "unknown", fmt.Errorf("invalid length %d for type %q", words, dataType) + } + case "FLOAT32", "UFIXED": + switch words { + case 1: + return "UINT16", nil + case 2: + return "UINT32", nil + case 4: + return "UINT64", nil + default: + return "unknown", fmt.Errorf("invalid length %d for type %q", words, dataType) + } + case "FLOAT32-IEEE": + return "FLOAT32", nil + case "FLOAT64-IEEE": + return "FLOAT64", nil + } + return normalizeInputDatatype(dataType) +} + +func (c *ConfigurationOriginal) normalizeOutputDatatype(dataType string) (string, error) { + // Handle our special types + switch dataType { + case "FIXED", "FLOAT32", "UFIXED": + return "FLOAT64", nil + } + return normalizeOutputDatatype("native") +} + +func (c *ConfigurationOriginal) normalizeByteOrder(byteOrder string) (string, error) { + // Handle our special types + switch byteOrder { + case "AB", "ABCDEFGH": + return "ABCD", nil + case "BADCFEHG": + return "BADC", nil + case "GHEFCDAB": + return "CDAB", nil + case "BA", "HGFEDCBA": + return "DCBA", nil + } + return normalizeByteOrder(byteOrder) +} diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index f4236c722f4cf..8e1dc90027cf0 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -1,15 +1,14 @@ package modbus import ( - "encoding/binary" "fmt" - "math" "net" "net/url" - "sort" + "strconv" "time" - mb "github.com/goburrow/modbus" + mb "github.com/grid-x/modbus" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" @@ -18,51 +17,46 @@ import ( // Modbus holds all data relevant to the plugin type Modbus struct { - Name string `toml:"name"` - Controller string `toml:"controller"` - TransmissionMode string `toml:"transmission_mode"` - BaudRate int `toml:"baud_rate"` - DataBits int `toml:"data_bits"` - Parity string `toml:"parity"` - StopBits int `toml:"stop_bits"` - SlaveID int `toml:"slave_id"` - Timeout config.Duration `toml:"timeout"` - Retries int `toml:"busy_retries"` - RetriesWaitTime config.Duration `toml:"busy_retries_wait"` - DiscreteInputs []fieldContainer `toml:"discrete_inputs"` - Coils []fieldContainer `toml:"coils"` - HoldingRegisters []fieldContainer `toml:"holding_registers"` - InputRegisters []fieldContainer `toml:"input_registers"` - Log telegraf.Logger `toml:"-"` - registers []register - isConnected bool - tcpHandler *mb.TCPClientHandler - rtuHandler *mb.RTUClientHandler - asciiHandler *mb.ASCIIClientHandler - client mb.Client -} - -type register struct { - Type string - RegistersRange []registerRange - Fields []fieldContainer -} - -type fieldContainer struct { - Measurement string `toml:"measurement"` - Name string `toml:"name"` - ByteOrder string `toml:"byte_order"` - DataType string `toml:"data_type"` - Scale float64 `toml:"scale"` - Address []uint16 `toml:"address"` + Name string `toml:"name"` + Controller string `toml:"controller"` + TransmissionMode string `toml:"transmission_mode"` + BaudRate int `toml:"baud_rate"` + DataBits int `toml:"data_bits"` + Parity string `toml:"parity"` + StopBits int `toml:"stop_bits"` + Timeout config.Duration `toml:"timeout"` + Retries int `toml:"busy_retries"` + RetriesWaitTime config.Duration `toml:"busy_retries_wait"` + Log telegraf.Logger `toml:"-"` + // Register configuration + ConfigurationOriginal + // Connection handling + client mb.Client + handler mb.ClientHandler + isConnected bool + // Request handling + requests map[byte]requestSet +} + +type fieldConverterFunc func(bytes []byte) interface{} + +type requestSet struct { + coil []request + discrete []request + holding []request + input []request +} + +type field struct { + measurement string + name string + scale float64 + address uint16 + length uint16 + converter fieldConverterFunc value interface{} } -type registerRange struct { - address uint16 - length uint16 -} - const ( cDiscreteInputs = "discrete_input" cCoils = "coil" @@ -173,84 +167,74 @@ func (m *Modbus) Init() error { return fmt.Errorf("retries cannot be negative") } - err := m.InitRegister(m.DiscreteInputs, cDiscreteInputs) - if err != nil { - return err + // Check and process the configuration + if err := m.ConfigurationOriginal.Check(); err != nil { + return fmt.Errorf("original configuraton invalid: %v", err) } - err = m.InitRegister(m.Coils, cCoils) + r, err := m.ConfigurationOriginal.Process() if err != nil { - return err + return fmt.Errorf("cannot process original configuraton: %v", err) } + m.requests = r - err = m.InitRegister(m.HoldingRegisters, cHoldingRegisters) - if err != nil { - return err - } - - err = m.InitRegister(m.InputRegisters, cInputRegisters) - if err != nil { - return err + // Setup client + if err := m.initClient(); err != nil { + return fmt.Errorf("initializing client failed: %v", err) } return nil } -func (m *Modbus) InitRegister(fields []fieldContainer, name string) error { - if len(fields) == 0 { - return nil - } - - err := validateFieldContainers(fields, name) - if err != nil { - return err +// Gather implements the telegraf plugin interface method for data accumulation +func (m *Modbus) Gather(acc telegraf.Accumulator) error { + if !m.isConnected { + if err := m.connect(); err != nil { + return err + } } - addrs := []uint16{} - for _, field := range fields { - addrs = append(addrs, field.Address...) + timestamp := time.Now() + for retry := 0; retry <= m.Retries; retry++ { + timestamp = time.Now() + if err := m.gatherFields(); err != nil { + if mberr, ok := err.(*mb.Error); ok && mberr.ExceptionCode == mb.ExceptionCodeServerDeviceBusy && retry < m.Retries { + m.Log.Infof("Device busy! Retrying %d more time(s)...", m.Retries-retry) + time.Sleep(time.Duration(m.RetriesWaitTime)) + continue + } + // Show the disconnect error this way to not shadow the initial error + if discerr := m.disconnect(); discerr != nil { + m.Log.Errorf("Disconnecting failed: %v", discerr) + } + return err + } + // Reading was successful, leave the retry loop + break } - addrs = removeDuplicates(addrs) - sort.Slice(addrs, func(i, j int) bool { return addrs[i] < addrs[j] }) + for slaveID, requests := range m.requests { + tags := map[string]string{ + "name": m.Name, + "type": cCoils, + "slave_id": strconv.Itoa(int(slaveID)), + } + m.collectFields(acc, timestamp, tags, requests.coil) - ii := 0 - maxQuantity := 1 - var registersRange []registerRange - if name == cDiscreteInputs || name == cCoils { - maxQuantity = 2000 - } else if name == cInputRegisters || name == cHoldingRegisters { - maxQuantity = 125 - } + tags["type"] = cDiscreteInputs + m.collectFields(acc, timestamp, tags, requests.discrete) - // Get range of consecutive integers - // [1, 2, 3, 5, 6, 10, 11, 12, 14] - // (1, 3) , (5, 2) , (10, 3), (14 , 1) - for range addrs { - if ii >= len(addrs) { - break - } - quantity := 1 - start := addrs[ii] - end := start - - for ii < len(addrs)-1 && addrs[ii+1]-addrs[ii] == 1 && quantity < maxQuantity { - end = addrs[ii+1] - ii++ - quantity++ - } - ii++ + tags["type"] = cHoldingRegisters + m.collectFields(acc, timestamp, tags, requests.holding) - registersRange = append(registersRange, registerRange{start, end - start + 1}) + tags["type"] = cInputRegisters + m.collectFields(acc, timestamp, tags, requests.input) } - m.registers = append(m.registers, register{name, registersRange, fields}) - return nil } -// Connect to a MODBUS Slave device via Modbus/[TCP|RTU|ASCII] -func connect(m *Modbus) error { +func (m *Modbus) initClient() error { u, err := url.Parse(m.Controller) if err != nil { return err @@ -258,467 +242,189 @@ func connect(m *Modbus) error { switch u.Scheme { case "tcp": - var host, port string - host, port, err = net.SplitHostPort(u.Host) - if err != nil { - return err - } - m.tcpHandler = mb.NewTCPClientHandler(host + ":" + port) - m.tcpHandler.Timeout = time.Duration(m.Timeout) - m.tcpHandler.SlaveId = byte(m.SlaveID) - m.client = mb.NewClient(m.tcpHandler) - err := m.tcpHandler.Connect() + host, port, err := net.SplitHostPort(u.Host) if err != nil { return err } - m.isConnected = true - return nil + handler := mb.NewTCPClientHandler(host + ":" + port) + handler.Timeout = time.Duration(m.Timeout) + m.handler = handler case "file": - if m.TransmissionMode == "RTU" { - m.rtuHandler = mb.NewRTUClientHandler(u.Path) - m.rtuHandler.Timeout = time.Duration(m.Timeout) - m.rtuHandler.SlaveId = byte(m.SlaveID) - m.rtuHandler.BaudRate = m.BaudRate - m.rtuHandler.DataBits = m.DataBits - m.rtuHandler.Parity = m.Parity - m.rtuHandler.StopBits = m.StopBits - m.client = mb.NewClient(m.rtuHandler) - err := m.rtuHandler.Connect() - if err != nil { - return err - } - m.isConnected = true - return nil - } else if m.TransmissionMode == "ASCII" { - m.asciiHandler = mb.NewASCIIClientHandler(u.Path) - m.asciiHandler.Timeout = time.Duration(m.Timeout) - m.asciiHandler.SlaveId = byte(m.SlaveID) - m.asciiHandler.BaudRate = m.BaudRate - m.asciiHandler.DataBits = m.DataBits - m.asciiHandler.Parity = m.Parity - m.asciiHandler.StopBits = m.StopBits - m.client = mb.NewClient(m.asciiHandler) - err := m.asciiHandler.Connect() - if err != nil { - return err - } - m.isConnected = true - return nil - } else { + switch m.TransmissionMode { + case "RTU": + handler := mb.NewRTUClientHandler(u.Path) + handler.Timeout = time.Duration(m.Timeout) + handler.BaudRate = m.BaudRate + handler.DataBits = m.DataBits + handler.Parity = m.Parity + handler.StopBits = m.StopBits + m.handler = handler + case "ASCII": + handler := mb.NewASCIIClientHandler(u.Path) + handler.Timeout = time.Duration(m.Timeout) + handler.BaudRate = m.BaudRate + handler.DataBits = m.DataBits + handler.Parity = m.Parity + handler.StopBits = m.StopBits + m.handler = handler + default: return fmt.Errorf("invalid protocol '%s' - '%s' ", u.Scheme, m.TransmissionMode) } default: - return fmt.Errorf("invalid controller") + return fmt.Errorf("invalid controller %q", m.Controller) } -} -func disconnect(m *Modbus) error { - u, err := url.Parse(m.Controller) - if err != nil { - return err - } + m.handler.SetSlave(m.SlaveID) + m.client = mb.NewClient(m.handler) + m.isConnected = false - switch u.Scheme { - case "tcp": - m.tcpHandler.Close() - return nil - case "file": - if m.TransmissionMode == "RTU" { - m.rtuHandler.Close() - return nil - } else if m.TransmissionMode == "ASCII" { - m.asciiHandler.Close() - return nil - } else { - return fmt.Errorf("invalid protocol '%s' - '%s' ", u.Scheme, m.TransmissionMode) - } - default: - return fmt.Errorf("invalid controller") - } + return nil } -func validateFieldContainers(t []fieldContainer, n string) error { - nameEncountered := map[string]bool{} - for _, item := range t { - //check empty name - if item.Name == "" { - return fmt.Errorf("empty name in '%s'", n) - } - - //search name duplicate - canonicalName := item.Measurement + "." + item.Name - if nameEncountered[canonicalName] { - return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, n, item.Name) - } - nameEncountered[canonicalName] = true - - if n == cInputRegisters || n == cHoldingRegisters { - // search byte order - switch item.ByteOrder { - case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA", "ABCDEFGH", "HGFEDCBA", "BADCFEHG", "GHEFCDAB": - break - default: - return fmt.Errorf("invalid byte order '%s' in '%s' - '%s'", item.ByteOrder, n, item.Name) - } +// Connect to a MODBUS Slave device via Modbus/[TCP|RTU|ASCII] +func (m *Modbus) connect() error { + err := m.handler.Connect() + m.isConnected = err == nil + return err +} - // search data type - switch item.DataType { - case "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "FLOAT32-IEEE", "FLOAT64-IEEE", "FLOAT32", "FIXED", "UFIXED": - break - default: - return fmt.Errorf("invalid data type '%s' in '%s' - '%s'", item.DataType, n, item.Name) - } +func (m *Modbus) disconnect() error { + err := m.handler.Close() + m.isConnected = false + return err +} - // check scale - if item.Scale == 0.0 { - return fmt.Errorf("invalid scale '%f' in '%s' - '%s'", item.Scale, n, item.Name) - } +func (m *Modbus) gatherFields() error { + for _, requests := range m.requests { + if err := m.gatherRequestsCoil(requests.coil); err != nil { + return err } - - // check address - if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 { - return fmt.Errorf("invalid address '%v' length '%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name) + if err := m.gatherRequestsDiscrete(requests.discrete); err != nil { + return err } - - if n == cInputRegisters || n == cHoldingRegisters { - if 2*len(item.Address) != len(item.ByteOrder) { - return fmt.Errorf("invalid byte order '%s' and address '%v' in '%s' - '%s'", item.ByteOrder, item.Address, n, item.Name) - } - - // search duplicated - if len(item.Address) > len(removeDuplicates(item.Address)) { - return fmt.Errorf("duplicate address '%v' in '%s' - '%s'", item.Address, n, item.Name) - } - } else if len(item.Address) != 1 { - return fmt.Errorf("invalid address'%v' length'%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name) + if err := m.gatherRequestsHolding(requests.holding); err != nil { + return err + } + if err := m.gatherRequestsInput(requests.input); err != nil { + return err } } + return nil } -func removeDuplicates(elements []uint16) []uint16 { - encountered := map[uint16]bool{} - result := []uint16{} - - for v := range elements { - if encountered[elements[v]] { - } else { - encountered[elements[v]] = true - result = append(result, elements[v]) +func (m *Modbus) gatherRequestsCoil(requests []request) error { + for _, request := range requests { + m.Log.Debugf("trying to read coil@%v[%v]...", request.address, request.length) + bytes, err := m.client.ReadCoils(request.address, request.length) + if err != nil { + return err } - } + m.Log.Debugf("got coil@%v[%v]: %v", request.address, request.length, bytes) - return result -} + // Bit value handling + for i, field := range request.fields { + offset := field.address - request.address + idx := offset / 8 + bit := offset % 8 -func readRegisterValues(m *Modbus, rt string, rr registerRange) ([]byte, error) { - if rt == cDiscreteInputs { - return m.client.ReadDiscreteInputs(rr.address, rr.length) - } else if rt == cCoils { - return m.client.ReadCoils(rr.address, rr.length) - } else if rt == cInputRegisters { - return m.client.ReadInputRegisters(rr.address, rr.length) - } else if rt == cHoldingRegisters { - return m.client.ReadHoldingRegisters(rr.address, rr.length) - } else { - return []byte{}, fmt.Errorf("not Valid function") + request.fields[i].value = uint16((bytes[idx] >> bit) & 0x01) + m.Log.Debugf(" field %s with bit %d @ byte %d: %v --> %v", field.name, bit, idx, (bytes[idx]>>bit)&0x01, request.fields[i].value) + } } + return nil } -func (m *Modbus) getFields() error { - for _, register := range m.registers { - rawValues := make(map[uint16][]byte) - bitRawValues := make(map[uint16]uint16) - for _, rr := range register.RegistersRange { - address := rr.address - readValues, err := readRegisterValues(m, register.Type, rr) - if err != nil { - return err - } - - // Raw Values - if register.Type == cDiscreteInputs || register.Type == cCoils { - for _, readValue := range readValues { - for bitPosition := 0; bitPosition < 8; bitPosition++ { - bitRawValues[address] = getBitValue(readValue, bitPosition) - address = address + 1 - if address > rr.address+rr.length { - break - } - } - } - } - - // Raw Values - if register.Type == cInputRegisters || register.Type == cHoldingRegisters { - batchSize := 2 - for batchSize < len(readValues) { - rawValues[address] = readValues[0:batchSize:batchSize] - address = address + 1 - readValues = readValues[batchSize:] - } - - rawValues[address] = readValues[0:batchSize:batchSize] - } - } - - if register.Type == cDiscreteInputs || register.Type == cCoils { - for i := 0; i < len(register.Fields); i++ { - register.Fields[i].value = bitRawValues[register.Fields[i].Address[0]] - } +func (m *Modbus) gatherRequestsDiscrete(requests []request) error { + for _, request := range requests { + m.Log.Debugf("trying to read discrete@%v[%v]...", request.address, request.length) + bytes, err := m.client.ReadDiscreteInputs(request.address, request.length) + if err != nil { + return err } + m.Log.Debugf("got discrete@%v[%v]: %v", request.address, request.length, bytes) - if register.Type == cInputRegisters || register.Type == cHoldingRegisters { - for i := 0; i < len(register.Fields); i++ { - var valuesT []byte - - for j := 0; j < len(register.Fields[i].Address); j++ { - tempArray := rawValues[register.Fields[i].Address[j]] - for x := 0; x < len(tempArray); x++ { - valuesT = append(valuesT, tempArray[x]) - } - } + // Bit value handling + for i, field := range request.fields { + offset := field.address - request.address + idx := offset / 8 + bit := offset % 8 - register.Fields[i].value = convertDataType(register.Fields[i], valuesT) - } + request.fields[i].value = uint16((bytes[idx] >> bit) & 0x01) + m.Log.Debugf(" field %s with bit %d @ byte %d: %v --> %v", field.name, bit, idx, (bytes[idx]>>bit)&0x01, request.fields[i].value) } } - return nil } -func getBitValue(n byte, pos int) uint16 { - return uint16(n >> uint(pos) & 0x01) -} - -func convertDataType(t fieldContainer, bytes []byte) interface{} { - switch t.DataType { - case "UINT16": - e16 := convertEndianness16(t.ByteOrder, bytes) - return scaleUint16(t.Scale, e16) - case "INT16": - e16 := convertEndianness16(t.ByteOrder, bytes) - f16 := int16(e16) - return scaleInt16(t.Scale, f16) - case "UINT32": - e32 := convertEndianness32(t.ByteOrder, bytes) - return scaleUint32(t.Scale, e32) - case "INT32": - e32 := convertEndianness32(t.ByteOrder, bytes) - f32 := int32(e32) - return scaleInt32(t.Scale, f32) - case "UINT64": - e64 := convertEndianness64(t.ByteOrder, bytes) - f64 := format64(t.DataType, e64).(uint64) - return scaleUint64(t.Scale, f64) - case "INT64": - e64 := convertEndianness64(t.ByteOrder, bytes) - f64 := format64(t.DataType, e64).(int64) - return scaleInt64(t.Scale, f64) - case "FLOAT32-IEEE": - e32 := convertEndianness32(t.ByteOrder, bytes) - f32 := math.Float32frombits(e32) - return scaleFloat32(t.Scale, f32) - case "FLOAT64-IEEE": - e64 := convertEndianness64(t.ByteOrder, bytes) - f64 := math.Float64frombits(e64) - return scaleFloat64(t.Scale, f64) - case "FIXED": - if len(bytes) == 2 { - e16 := convertEndianness16(t.ByteOrder, bytes) - f16 := int16(e16) - return scale16toFloat(t.Scale, f16) - } else if len(bytes) == 4 { - e32 := convertEndianness32(t.ByteOrder, bytes) - f32 := int32(e32) - return scale32toFloat(t.Scale, f32) - } else { - e64 := convertEndianness64(t.ByteOrder, bytes) - f64 := int64(e64) - return scale64toFloat(t.Scale, f64) - } - case "FLOAT32", "UFIXED": - if len(bytes) == 2 { - e16 := convertEndianness16(t.ByteOrder, bytes) - return scale16UtoFloat(t.Scale, e16) - } else if len(bytes) == 4 { - e32 := convertEndianness32(t.ByteOrder, bytes) - return scale32UtoFloat(t.Scale, e32) - } else { - e64 := convertEndianness64(t.ByteOrder, bytes) - return scale64UtoFloat(t.Scale, e64) +func (m *Modbus) gatherRequestsHolding(requests []request) error { + for _, request := range requests { + m.Log.Debugf("trying to read holding@%v[%v]...", request.address, request.length) + bytes, err := m.client.ReadHoldingRegisters(request.address, request.length) + if err != nil { + return err } - default: - return 0 - } -} + m.Log.Debugf("got holding@%v[%v]: %v", request.address, request.length, bytes) -func convertEndianness16(o string, b []byte) uint16 { - switch o { - case "AB": - return binary.BigEndian.Uint16(b) - case "BA": - return binary.LittleEndian.Uint16(b) - default: - return 0 - } -} - -func convertEndianness32(o string, b []byte) uint32 { - switch o { - case "ABCD": - return binary.BigEndian.Uint32(b) - case "DCBA": - return binary.LittleEndian.Uint32(b) - case "BADC": - return uint32(binary.LittleEndian.Uint16(b[0:]))<<16 | uint32(binary.LittleEndian.Uint16(b[2:])) - case "CDAB": - return uint32(binary.BigEndian.Uint16(b[2:]))<<16 | uint32(binary.BigEndian.Uint16(b[0:])) - default: - return 0 - } -} + // Non-bit value handling + for i, field := range request.fields { + // Determine the offset of the field values in the read array + offset := 2 * (field.address - request.address) // registers are 16bit = 2 byte + length := 2 * field.length // field length is in registers a 16bit -func convertEndianness64(o string, b []byte) uint64 { - switch o { - case "ABCDEFGH": - return binary.BigEndian.Uint64(b) - case "HGFEDCBA": - return binary.LittleEndian.Uint64(b) - case "BADCFEHG": - return uint64(binary.LittleEndian.Uint16(b[0:]))<<48 | uint64(binary.LittleEndian.Uint16(b[2:]))<<32 | uint64(binary.LittleEndian.Uint16(b[4:]))<<16 | uint64(binary.LittleEndian.Uint16(b[6:])) - case "GHEFCDAB": - return uint64(binary.BigEndian.Uint16(b[6:]))<<48 | uint64(binary.BigEndian.Uint16(b[4:]))<<32 | uint64(binary.BigEndian.Uint16(b[2:]))<<16 | uint64(binary.BigEndian.Uint16(b[0:])) - default: - return 0 - } -} - -func format64(f string, r uint64) interface{} { - switch f { - case "UINT64": - return r - case "INT64": - return int64(r) - default: - return r + // Convert the actual value + request.fields[i].value = field.converter(bytes[offset : offset+length]) + m.Log.Debugf(" field %s with offset %d with len %d: %v --> %v", field.name, offset, length, bytes[offset:offset+length], request.fields[i].value) + } } + return nil } -func scale16toFloat(s float64, v int16) float64 { - return float64(v) * s -} - -func scale32toFloat(s float64, v int32) float64 { - return float64(float64(v) * float64(s)) -} - -func scale64toFloat(s float64, v int64) float64 { - return float64(float64(v) * float64(s)) -} - -func scale16UtoFloat(s float64, v uint16) float64 { - return float64(v) * s -} - -func scale32UtoFloat(s float64, v uint32) float64 { - return float64(float64(v) * float64(s)) -} - -func scale64UtoFloat(s float64, v uint64) float64 { - return float64(float64(v) * float64(s)) -} - -func scaleInt16(s float64, v int16) int16 { - return int16(float64(v) * s) -} - -func scaleUint16(s float64, v uint16) uint16 { - return uint16(float64(v) * s) -} - -func scaleUint32(s float64, v uint32) uint32 { - return uint32(float64(v) * float64(s)) -} - -func scaleInt32(s float64, v int32) int32 { - return int32(float64(v) * float64(s)) -} - -func scaleFloat32(s float64, v float32) float32 { - return float32(float64(v) * s) -} - -func scaleFloat64(s float64, v float64) float64 { - return v * s -} - -func scaleUint64(s float64, v uint64) uint64 { - return uint64(float64(v) * float64(s)) -} - -func scaleInt64(s float64, v int64) int64 { - return int64(float64(v) * float64(s)) -} - -// Gather implements the telegraf plugin interface method for data accumulation -func (m *Modbus) Gather(acc telegraf.Accumulator) error { - if !m.isConnected { - err := connect(m) +func (m *Modbus) gatherRequestsInput(requests []request) error { + for _, request := range requests { + m.Log.Debugf("trying to read input@%v[%v]...", request.address, request.length) + bytes, err := m.client.ReadInputRegisters(request.address, request.length) if err != nil { - m.isConnected = false return err } - } + m.Log.Debugf("got input@%v[%v]: %v", request.address, request.length, bytes) - timestamp := time.Now() - for retry := 0; retry <= m.Retries; retry++ { - timestamp = time.Now() - err := m.getFields() - if err != nil { - mberr, ok := err.(*mb.ModbusError) - if ok && mberr.ExceptionCode == mb.ExceptionCodeServerDeviceBusy && retry < m.Retries { - m.Log.Infof("Device busy! Retrying %d more time(s)...", m.Retries-retry) - time.Sleep(time.Duration(m.RetriesWaitTime)) - continue - } - // Ignore return error to not shadow the initial error - //nolint:errcheck,revive - disconnect(m) - m.isConnected = false - return err + // Non-bit value handling + for i, field := range request.fields { + // Determine the offset of the field values in the read array + offset := 2 * (field.address - request.address) // registers are 16bit = 2 byte + length := 2 * field.length // field length is in registers a 16bit + + // Convert the actual value + request.fields[i].value = field.converter(bytes[offset : offset+length]) + m.Log.Debugf(" field %s with offset %d with len %d: %v --> %v", field.name, offset, length, bytes[offset:offset+length], request.fields[i].value) } - // Reading was successful, leave the retry loop - break } + return nil +} +func (m *Modbus) collectFields(acc telegraf.Accumulator, timestamp time.Time, tags map[string]string, requests []request) { grouper := metric.NewSeriesGrouper() - for _, reg := range m.registers { - tags := map[string]string{ - "name": m.Name, - "type": reg.Type, - } - - for _, field := range reg.Fields { + for _, request := range requests { + for _, field := range request.fields { // In case no measurement was specified we use "modbus" as default measurement := "modbus" - if field.Measurement != "" { - measurement = field.Measurement + if field.measurement != "" { + measurement = field.measurement } // Group the data by series - if err := grouper.Add(measurement, tags, timestamp, field.Name, field.value); err != nil { - return err + if err := grouper.Add(measurement, tags, timestamp, field.name, field.value); err != nil { + acc.AddError(fmt.Errorf("cannot add field %q for measurement %q: %v", field.name, measurement, err)) + continue } } - - // Add the metrics grouped by series to the accumulator - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) - } } - return nil + // Add the metrics grouped by series to the accumulator + for _, x := range grouper.Metrics() { + acc.AddMetric(x) + } } // Add this plugin to telegraf diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index d506562106da2..b0b49b5711075 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -2,12 +2,15 @@ package modbus import ( "fmt" + "strconv" "testing" + "time" - m "github.com/goburrow/modbus" - "github.com/stretchr/testify/assert" + mb "github.com/grid-x/modbus" + "github.com/stretchr/testify/require" "github.com/tbrandon/mbserver" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" ) @@ -78,44 +81,52 @@ func TestCoils(t *testing.T) { } serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() - assert.NoError(t, err) - handler := m.NewTCPClientHandler("localhost:1502") - err = handler.Connect() - assert.NoError(t, err) + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) defer handler.Close() - client := m.NewClient(handler) + client := mb.NewClient(handler) for _, ct := range coilTests { t.Run(ct.name, func(t *testing.T) { - _, err = client.WriteMultipleCoils(ct.address, ct.quantity, ct.write) - assert.NoError(t, err) + _, err := client.WriteMultipleCoils(ct.address, ct.quantity, ct.write) + require.NoError(t, err) modbus := Modbus{ Name: "TestCoils", Controller: "tcp://localhost:1502", - SlaveID: 1, - Coils: []fieldContainer{ - { - Name: ct.name, - Address: []uint16{ct.address}, - }, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: ct.name, + Address: []uint16{ct.address}, }, - Log: testutil.Logger{}, } - err = modbus.Init() - assert.NoError(t, err) + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cCoils, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + map[string]interface{}{ct.name: ct.read}, + time.Unix(0, 0), + ), + } + var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.NoError(t, err) - assert.NotEmpty(t, modbus.registers) + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) - for _, coil := range modbus.registers { - assert.Equal(t, ct.read, coil.Fields[0].value) - } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } @@ -614,115 +625,291 @@ func TestHoldingRegisters(t *testing.T) { } serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() - assert.NoError(t, err) - handler := m.NewTCPClientHandler("localhost:1502") - err = handler.Connect() - assert.NoError(t, err) + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) defer handler.Close() - client := m.NewClient(handler) + client := mb.NewClient(handler) for _, hrt := range holdingRegisterTests { t.Run(hrt.name, func(t *testing.T) { - _, err = client.WriteMultipleRegisters(hrt.address[0], hrt.quantity, hrt.write) - assert.NoError(t, err) + _, err := client.WriteMultipleRegisters(hrt.address[0], hrt.quantity, hrt.write) + require.NoError(t, err) modbus := Modbus{ Name: "TestHoldingRegisters", Controller: "tcp://localhost:1502", - SlaveID: 1, - HoldingRegisters: []fieldContainer{ - { - Name: hrt.name, - ByteOrder: hrt.byteOrder, - DataType: hrt.dataType, - Scale: hrt.scale, - Address: hrt.address, - }, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.HoldingRegisters = []fieldDefinition{ + { + Name: hrt.name, + ByteOrder: hrt.byteOrder, + DataType: hrt.dataType, + Scale: hrt.scale, + Address: hrt.address, }, - Log: testutil.Logger{}, } - err = modbus.Init() - assert.NoError(t, err) + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cHoldingRegisters, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + map[string]interface{}{hrt.name: hrt.read}, + time.Unix(0, 0), + ), + } + var acc testutil.Accumulator - assert.NoError(t, modbus.Gather(&acc)) - assert.NotEmpty(t, modbus.registers) + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) - for _, coil := range modbus.registers { - assert.Equal(t, hrt.read, coil.Fields[0].value) - } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } +func TestReadMultipleCoilWithHole(t *testing.T) { + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() + + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) + defer handler.Close() + client := mb.NewClient(handler) + + fcs := []fieldDefinition{} + expectedFields := make(map[string]interface{}) + writeValue := uint16(0) + readValue := uint16(0) + for i := 0; i < 14; i++ { + fc := fieldDefinition{} + fc.Name = fmt.Sprintf("coil-%v", i) + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + _, err := client.WriteSingleCoil(fc.Address[0], writeValue) + require.NoError(t, err) + + expectedFields[fc.Name] = readValue + writeValue = 65280 - writeValue + readValue = 1 - readValue + } + for i := 15; i < 18; i++ { + fc := fieldDefinition{} + fc.Name = fmt.Sprintf("coil-%v", i) + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + _, err := client.WriteSingleCoil(fc.Address[0], writeValue) + require.NoError(t, err) + + expectedFields[fc.Name] = readValue + writeValue = 65280 - writeValue + readValue = 1 - readValue + } + for i := 24; i < 33; i++ { + fc := fieldDefinition{} + fc.Name = fmt.Sprintf("coil-%v", i) + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + _, err := client.WriteSingleCoil(fc.Address[0], writeValue) + require.NoError(t, err) + + expectedFields[fc.Name] = readValue + writeValue = 65280 - writeValue + readValue = 1 - readValue + } + require.Len(t, expectedFields, len(fcs)) + + modbus := Modbus{ + Name: "TestReadMultipleCoilWithHole", + Controller: "tcp://localhost:1502", + Log: testutil.Logger{Name: "modbus:MultipleCoilWithHole"}, + } + modbus.SlaveID = 1 + modbus.Coils = fcs + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cCoils, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + expectedFields, + time.Unix(0, 0), + ), + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + func TestReadMultipleCoilLimit(t *testing.T) { serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") - assert.NoError(t, err) + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() - handler := m.NewTCPClientHandler("localhost:1502") - err = handler.Connect() - assert.NoError(t, err) + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) defer handler.Close() - client := m.NewClient(handler) + client := mb.NewClient(handler) - fcs := []fieldContainer{} + fcs := []fieldDefinition{} + expectedFields := make(map[string]interface{}) writeValue := uint16(0) - for i := 0; i <= 4000; i++ { - fc := fieldContainer{} + readValue := uint16(0) + for i := 0; i < 4000; i++ { + fc := fieldDefinition{} fc.Name = fmt.Sprintf("coil-%v", i) fc.Address = []uint16{uint16(i)} fcs = append(fcs, fc) - t.Run(fc.Name, func(t *testing.T) { - _, err = client.WriteSingleCoil(fc.Address[0], writeValue) - assert.NoError(t, err) - }) + _, err := client.WriteSingleCoil(fc.Address[0], writeValue) + require.NoError(t, err) + expectedFields[fc.Name] = readValue writeValue = 65280 - writeValue + readValue = 1 - readValue } + require.Len(t, expectedFields, len(fcs)) modbus := Modbus{ Name: "TestReadCoils", Controller: "tcp://localhost:1502", - SlaveID: 1, - Coils: fcs, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = fcs + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cCoils, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + expectedFields, + time.Unix(0, 0), + ), } - err = modbus.Init() - assert.NoError(t, err) var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.NoError(t, err) - - writeValue = 0 - for i := 0; i <= 4000; i++ { - t.Run(modbus.registers[0].Fields[i].Name, func(t *testing.T) { - assert.Equal(t, writeValue, modbus.registers[0].Fields[i].value) - writeValue = 1 - writeValue - }) + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func TestReadMultipleHoldingRegisterWithHole(t *testing.T) { + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() + + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) + defer handler.Close() + client := mb.NewClient(handler) + + fcs := []fieldDefinition{} + expectedFields := make(map[string]interface{}) + for i := 0; i < 10; i++ { + fc := fieldDefinition{ + Name: fmt.Sprintf("HoldingRegister-%v", i), + ByteOrder: "AB", + DataType: "INT16", + Scale: 1.0, + Address: []uint16{uint16(i)}, + } + fcs = append(fcs, fc) + + _, err := client.WriteSingleRegister(fc.Address[0], uint16(i)) + require.NoError(t, err) + + expectedFields[fc.Name] = int64(i) } + for i := 20; i < 30; i++ { + fc := fieldDefinition{ + Name: fmt.Sprintf("HoldingRegister-%v", i), + ByteOrder: "AB", + DataType: "INT16", + Scale: 1.0, + Address: []uint16{uint16(i)}, + } + fcs = append(fcs, fc) + + _, err := client.WriteSingleRegister(fc.Address[0], uint16(i)) + require.NoError(t, err) + + expectedFields[fc.Name] = int64(i) + } + require.Len(t, expectedFields, len(fcs)) + + modbus := Modbus{ + Name: "TestHoldingRegister", + Controller: "tcp://localhost:1502", + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.HoldingRegisters = fcs + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cHoldingRegisters, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + expectedFields, + time.Unix(0, 0), + ), + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } func TestReadMultipleHoldingRegisterLimit(t *testing.T) { serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") - assert.NoError(t, err) + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() - handler := m.NewTCPClientHandler("localhost:1502") - err = handler.Connect() - assert.NoError(t, err) + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) defer handler.Close() - client := m.NewClient(handler) + client := mb.NewClient(handler) - fcs := []fieldContainer{} + fcs := []fieldDefinition{} + expectedFields := make(map[string]interface{}) for i := 0; i <= 400; i++ { - fc := fieldContainer{} + fc := fieldDefinition{} fc.Name = fmt.Sprintf("HoldingRegister-%v", i) fc.ByteOrder = "AB" fc.DataType = "INT16" @@ -730,28 +917,40 @@ func TestReadMultipleHoldingRegisterLimit(t *testing.T) { fc.Address = []uint16{uint16(i)} fcs = append(fcs, fc) - t.Run(fc.Name, func(t *testing.T) { - _, err = client.WriteSingleRegister(fc.Address[0], uint16(i)) - assert.NoError(t, err) - }) + _, err := client.WriteSingleRegister(fc.Address[0], uint16(i)) + require.NoError(t, err) + + expectedFields[fc.Name] = int64(i) } modbus := Modbus{ - Name: "TestHoldingRegister", - Controller: "tcp://localhost:1502", - SlaveID: 1, - HoldingRegisters: fcs, + Name: "TestHoldingRegister", + Controller: "tcp://localhost:1502", + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.HoldingRegisters = fcs + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cHoldingRegisters, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + expectedFields, + time.Unix(0, 0), + ), } - err = modbus.Init() - assert.NoError(t, err) var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) - for i := 0; i <= 400; i++ { - assert.Equal(t, int16(i), modbus.registers[0].Fields[i].value) - } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } func TestRetrySuccessful(t *testing.T) { @@ -760,8 +959,7 @@ func TestRetrySuccessful(t *testing.T) { value := 1 serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") - assert.NoError(t, err) + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() // Make read on coil-registers fail for some trials by making the device @@ -781,40 +979,47 @@ func TestRetrySuccessful(t *testing.T) { return data, except }) - t.Run("retry_success", func(t *testing.T) { - modbus := Modbus{ - Name: "TestRetry", - Controller: "tcp://localhost:1502", - SlaveID: 1, - Retries: maxretries, - Coils: []fieldContainer{ - { - Name: "retry_success", - Address: []uint16{0}, - }, + modbus := Modbus{ + Name: "TestRetry", + Controller: "tcp://localhost:1502", + Retries: maxretries, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: "retry_success", + Address: []uint16{0}, + }, + } + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cCoils, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, }, - Log: testutil.Logger{}, - } + map[string]interface{}{"retry_success": uint16(value)}, + time.Unix(0, 0), + ), + } - err = modbus.Init() - assert.NoError(t, err) - var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.NoError(t, err) - assert.NotEmpty(t, modbus.registers) + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) - for _, coil := range modbus.registers { - assert.Equal(t, uint16(value), coil.Fields[0].value) - } - }) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } -func TestRetryFail(t *testing.T) { +func TestRetryFailExhausted(t *testing.T) { maxretries := 2 serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") - assert.NoError(t, err) + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() // Make the read on coils fail with busy @@ -827,27 +1032,35 @@ func TestRetryFail(t *testing.T) { return data, &mbserver.SlaveDeviceBusy }) - t.Run("retry_fail", func(t *testing.T) { - modbus := Modbus{ - Name: "TestRetryFail", - Controller: "tcp://localhost:1502", - SlaveID: 1, - Retries: maxretries, - Coils: []fieldContainer{ - { - Name: "retry_fail", - Address: []uint16{0}, - }, - }, - Log: testutil.Logger{}, - } + modbus := Modbus{ + Name: "TestRetryFailExhausted", + Controller: "tcp://localhost:1502", + Retries: maxretries, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: "retry_fail", + Address: []uint16{0}, + }, + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + + err := modbus.Gather(&acc) + require.Error(t, err) + require.Equal(t, "modbus: exception '6' (server device busy), function '129'", err.Error()) +} + +func TestRetryFailIllegal(t *testing.T) { + maxretries := 2 - err = modbus.Init() - assert.NoError(t, err) - var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.Error(t, err) - }) + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() // Make the read on coils fail with illegal function preventing retry counter := 0 @@ -861,26 +1074,26 @@ func TestRetryFail(t *testing.T) { return data, &mbserver.IllegalFunction }) - t.Run("retry_fail", func(t *testing.T) { - modbus := Modbus{ - Name: "TestRetryFail", - Controller: "tcp://localhost:1502", - SlaveID: 1, - Retries: maxretries, - Coils: []fieldContainer{ - { - Name: "retry_fail", - Address: []uint16{0}, - }, - }, - Log: testutil.Logger{}, - } + modbus := Modbus{ + Name: "TestRetryFailExhausted", + Controller: "tcp://localhost:1502", + Retries: maxretries, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: "retry_fail", + Address: []uint16{0}, + }, + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) - err = modbus.Init() - assert.NoError(t, err) - var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.Error(t, err) - assert.Equal(t, counter, 1) - }) + err := modbus.Gather(&acc) + require.Error(t, err) + require.Equal(t, "modbus: exception '1' (illegal function), function '129'", err.Error()) + require.Equal(t, counter, 1) } diff --git a/plugins/inputs/modbus/request.go b/plugins/inputs/modbus/request.go new file mode 100644 index 0000000000000..125aebe2eb8c4 --- /dev/null +++ b/plugins/inputs/modbus/request.go @@ -0,0 +1,58 @@ +package modbus + +import "sort" + +type request struct { + address uint16 + length uint16 + fields []field +} + +func newRequestsFromFields(fields []field, slaveID byte, registerType string, maxBatchSize uint16) []request { + if len(fields) == 0 { + return nil + } + + // Sort the fields by address (ascending) and length + sort.Slice(fields, func(i, j int) bool { + addrI := fields[i].address + addrJ := fields[j].address + return addrI < addrJ || (addrI == addrJ && fields[i].length > fields[j].length) + }) + + // Construct the consecutive register chunks for the addresses and construct Modbus requests. + // For field addresses like [1, 2, 3, 5, 6, 10, 11, 12, 14] we should construct the following + // requests (1, 3) , (5, 2) , (10, 3), (14 , 1). Furthermore, we should respect field boundaries + // and the given maximum chunk sizes. + var requests []request + + current := request{ + address: fields[0].address, + length: fields[0].length, + fields: []field{fields[0]}, + } + + for _, f := range fields[1:] { + // Check if we need to interrupt the current chunk and require a new one + needInterrupt := f.address != current.address+current.length // not consecutive + needInterrupt = needInterrupt || f.length+current.length > maxBatchSize // too large + + if !needInterrupt { + // Still save to add the field to the current request + current.length += f.length + current.fields = append(current.fields, f) // TODO: omit the field with a future flag + continue + } + + // Finish the current request, add it to the list and construct a new one + requests = append(requests, current) + current = request{ + address: f.address, + length: f.length, + fields: []field{f}, + } + } + requests = append(requests, current) + + return requests +} diff --git a/plugins/inputs/modbus/type_conversions.go b/plugins/inputs/modbus/type_conversions.go new file mode 100644 index 0000000000000..556f7b423c13d --- /dev/null +++ b/plugins/inputs/modbus/type_conversions.go @@ -0,0 +1,54 @@ +package modbus + +import "fmt" + +func determineConverter(inType, byteOrder, outType string, scale float64) (fieldConverterFunc, error) { + if scale != 0.0 { + return determineConverterScale(inType, byteOrder, outType, scale) + } + return determineConverterNoScale(inType, byteOrder, outType) +} + +func determineConverterScale(inType, byteOrder, outType string, scale float64) (fieldConverterFunc, error) { + switch inType { + case "INT16": + return determineConverterI16Scale(outType, byteOrder, scale) + case "UINT16": + return determineConverterU16Scale(outType, byteOrder, scale) + case "INT32": + return determineConverterI32Scale(outType, byteOrder, scale) + case "UINT32": + return determineConverterU32Scale(outType, byteOrder, scale) + case "INT64": + return determineConverterI64Scale(outType, byteOrder, scale) + case "UINT64": + return determineConverterU64Scale(outType, byteOrder, scale) + case "FLOAT32": + return determineConverterF32Scale(outType, byteOrder, scale) + case "FLOAT64": + return determineConverterF64Scale(outType, byteOrder, scale) + } + return nil, fmt.Errorf("invalid input data-type: %s", inType) +} + +func determineConverterNoScale(inType, byteOrder, outType string) (fieldConverterFunc, error) { + switch inType { + case "INT16": + return determineConverterI16(outType, byteOrder) + case "UINT16": + return determineConverterU16(outType, byteOrder) + case "INT32": + return determineConverterI32(outType, byteOrder) + case "UINT32": + return determineConverterU32(outType, byteOrder) + case "INT64": + return determineConverterI64(outType, byteOrder) + case "UINT64": + return determineConverterU64(outType, byteOrder) + case "FLOAT32": + return determineConverterF32(outType, byteOrder) + case "FLOAT64": + return determineConverterF64(outType, byteOrder) + } + return nil, fmt.Errorf("invalid input data-type: %s", inType) +} diff --git a/plugins/inputs/modbus/type_conversions16.go b/plugins/inputs/modbus/type_conversions16.go new file mode 100644 index 0000000000000..7766e1d0edafe --- /dev/null +++ b/plugins/inputs/modbus/type_conversions16.go @@ -0,0 +1,138 @@ +package modbus + +import ( + "encoding/binary" + "fmt" +) + +type convert16 func([]byte) uint16 + +func endianessConverter16(byteOrder string) (convert16, error) { + switch byteOrder { + case "ABCD": // Big endian (Motorola) + return binary.BigEndian.Uint16, nil + case "DCBA": // Little endian (Intel) + return binary.LittleEndian.Uint16, nil + } + return nil, fmt.Errorf("invalid byte-order: %s", byteOrder) +} + +// I16 - no scale +func determineConverterI16(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter16(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + return int16(tohost(b)) + }, nil + case "INT64": + return func(b []byte) interface{} { + return int64(int16(tohost(b))) + }, nil + case "UINT64": + return func(b []byte) interface{} { + return uint64(int16(tohost(b))) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(int16(tohost(b))) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U16 - no scale +func determineConverterU16(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter16(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + return tohost(b) + }, nil + case "INT64": + return func(b []byte) interface{} { + return int64(tohost(b)) + }, nil + case "UINT64": + return func(b []byte) interface{} { + return uint64(tohost(b)) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(tohost(b)) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// I16 - scale +func determineConverterI16Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter16(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := int16(tohost(b)) + return int16(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := int16(tohost(b)) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := int16(tohost(b)) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := int16(tohost(b)) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U16 - scale +func determineConverterU16Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter16(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := tohost(b) + return uint16(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := tohost(b) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := tohost(b) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := tohost(b) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} diff --git a/plugins/inputs/modbus/type_conversions32.go b/plugins/inputs/modbus/type_conversions32.go new file mode 100644 index 0000000000000..1a0255ef3e8e0 --- /dev/null +++ b/plugins/inputs/modbus/type_conversions32.go @@ -0,0 +1,200 @@ +package modbus + +import ( + "encoding/binary" + "fmt" + "math" +) + +type convert32 func([]byte) uint32 + +func binaryMSWLEU32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(binary.LittleEndian.Uint16(b[0:]))<<16 | uint32(binary.LittleEndian.Uint16(b[2:])) +} + +func binaryLSWBEU32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(binary.BigEndian.Uint16(b[2:]))<<16 | uint32(binary.BigEndian.Uint16(b[0:])) +} + +func endianessConverter32(byteOrder string) (convert32, error) { + switch byteOrder { + case "ABCD": // Big endian (Motorola) + return binary.BigEndian.Uint32, nil + case "BADC": // Big endian with bytes swapped + return binaryMSWLEU32, nil + case "CDAB": // Little endian with bytes swapped + return binaryLSWBEU32, nil + case "DCBA": // Little endian (Intel) + return binary.LittleEndian.Uint32, nil + } + return nil, fmt.Errorf("invalid byte-order: %s", byteOrder) +} + +// I32 - no scale +func determineConverterI32(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + return int32(tohost(b)) + }, nil + case "INT64": + return func(b []byte) interface{} { + return int64(int32(tohost(b))) + }, nil + case "UINT64": + return func(b []byte) interface{} { + return uint64(int32(tohost(b))) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(int32(tohost(b))) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U32 - no scale +func determineConverterU32(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + return tohost(b) + }, nil + case "INT64": + return func(b []byte) interface{} { + return int64(tohost(b)) + }, nil + case "UINT64": + return func(b []byte) interface{} { + return uint64(tohost(b)) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(tohost(b)) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// F32 - no scale +func determineConverterF32(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + raw := tohost(b) + return math.Float32frombits(raw) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + raw := tohost(b) + in := math.Float32frombits(raw) + return float64(in) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// I32 - scale +func determineConverterI32Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := int32(tohost(b)) + return int32(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := int32(tohost(b)) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := int32(tohost(b)) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := int32(tohost(b)) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U32 - scale +func determineConverterU32Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := tohost(b) + return uint32(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := tohost(b) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := tohost(b) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := tohost(b) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// F32 - scale +func determineConverterF32Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + raw := tohost(b) + in := math.Float32frombits(raw) + return float32(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + raw := tohost(b) + in := math.Float32frombits(raw) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} diff --git a/plugins/inputs/modbus/type_conversions64.go b/plugins/inputs/modbus/type_conversions64.go new file mode 100644 index 0000000000000..f72dfdf3af66d --- /dev/null +++ b/plugins/inputs/modbus/type_conversions64.go @@ -0,0 +1,182 @@ +package modbus + +import ( + "encoding/binary" + "fmt" + "math" +) + +type convert64 func([]byte) uint64 + +func binaryMSWLEU64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(binary.LittleEndian.Uint16(b[0:]))<<48 | uint64(binary.LittleEndian.Uint16(b[2:]))<<32 | uint64(binary.LittleEndian.Uint16(b[4:]))<<16 | uint64(binary.LittleEndian.Uint16(b[6:])) +} + +func binaryLSWBEU64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(binary.BigEndian.Uint16(b[6:]))<<48 | uint64(binary.BigEndian.Uint16(b[4:]))<<32 | uint64(binary.BigEndian.Uint16(b[2:]))<<16 | uint64(binary.BigEndian.Uint16(b[0:])) +} + +func endianessConverter64(byteOrder string) (convert64, error) { + switch byteOrder { + case "ABCD": // Big endian (Motorola) + return binary.BigEndian.Uint64, nil + case "BADC": // Big endian with bytes swapped + return binaryMSWLEU64, nil + case "CDAB": // Little endian with bytes swapped + return binaryLSWBEU64, nil + case "DCBA": // Little endian (Intel) + return binary.LittleEndian.Uint64, nil + } + return nil, fmt.Errorf("invalid byte-order: %s", byteOrder) +} + +// I64 - no scale +func determineConverterI64(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native", "INT64": + return func(b []byte) interface{} { + return int64(tohost(b)) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return uint64(in) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return float64(in) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U64 - no scale +func determineConverterU64(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "INT64": + return func(b []byte) interface{} { + return int64(tohost(b)) + }, nil + case "native", "UINT64": + return func(b []byte) interface{} { + return tohost(b) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(tohost(b)) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// F64 - no scale +func determineConverterF64(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native", "FLOAT64": + return func(b []byte) interface{} { + raw := tohost(b) + return math.Float64frombits(raw) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// I64 - scale +func determineConverterI64Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return int64(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U64 - scale +func determineConverterU64Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := tohost(b) + return uint64(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := tohost(b) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := tohost(b) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := tohost(b) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// F64 - scale +func determineConverterF64Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native", "FLOAT64": + return func(b []byte) interface{} { + raw := tohost(b) + in := math.Float64frombits(raw) + return in * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} From ad6d25aebc26145cdfe6fdc8f97580a54a29cc6c Mon Sep 17 00:00:00 2001 From: Jess Ingrassellino Date: Tue, 1 Jun 2021 17:05:49 -0400 Subject: [PATCH 441/761] Addresses issues in PR 4928 (#9087) --- internal/internal.go | 30 +++++++ internal/internal_test.go | 178 +++++++++++++++++++++++++++++++++++++- 2 files changed, 205 insertions(+), 3 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index 636d7f06a7014..055ea361c3d26 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -332,6 +332,36 @@ func parseTime(format string, timestamp interface{}, location string) (time.Time if err != nil { return time.Unix(0, 0), err } + switch strings.ToLower(format) { + case "ansic": + format = time.ANSIC + case "unixdate": + format = time.UnixDate + case "rubydate": + format = time.RubyDate + case "rfc822": + format = time.RFC822 + case "rfc822z": + format = time.RFC822Z + case "rfc850": + format = time.RFC850 + case "rfc1123": + format = time.RFC1123 + case "rfc1123z": + format = time.RFC1123Z + case "rfc3339": + format = time.RFC3339 + case "rfc3339nano": + format = time.RFC3339Nano + case "stamp": + format = time.Stamp + case "stampmilli": + format = time.StampMilli + case "stampmicro": + format = time.StampMicro + case "stampnano": + format = time.StampNano + } return time.ParseInLocation(format, ts, loc) default: return time.Unix(0, 0), errors.New("unsupported type") diff --git a/internal/internal_test.go b/internal/internal_test.go index 2bed302ee0a11..7cb56d5324f06 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -321,9 +321,84 @@ func TestAlignTime(t *testing.T) { func TestParseTimestamp(t *testing.T) { rfc3339 := func(value string) time.Time { tm, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - panic(err) - } + require.NoError(t, err) + return tm + } + ansic := func(value string) time.Time { + tm, err := time.Parse(time.ANSIC, value) + require.NoError(t, err) + return tm + } + + unixdate := func(value string) time.Time { + tm, err := time.Parse(time.UnixDate, value) + require.NoError(t, err) + return tm + } + + rubydate := func(value string) time.Time { + tm, err := time.Parse(time.RubyDate, value) + require.NoError(t, err) + return tm + } + + rfc822 := func(value string) time.Time { + tm, err := time.Parse(time.RFC822, value) + require.NoError(t, err) + return tm + } + + rfc822z := func(value string) time.Time { + tm, err := time.Parse(time.RFC822Z, value) + require.NoError(t, err) + return tm + } + + rfc850 := func(value string) time.Time { + tm, err := time.Parse(time.RFC850, value) + require.NoError(t, err) + return tm + } + + rfc1123 := func(value string) time.Time { + tm, err := time.Parse(time.RFC1123, value) + require.NoError(t, err) + return tm + } + + rfc1123z := func(value string) time.Time { + tm, err := time.Parse(time.RFC1123Z, value) + require.NoError(t, err) + return tm + } + + rfc3339nano := func(value string) time.Time { + tm, err := time.Parse(time.RFC3339Nano, value) + require.NoError(t, err) + return tm + } + + stamp := func(value string) time.Time { + tm, err := time.Parse(time.Stamp, value) + require.NoError(t, err) + return tm + } + + stampmilli := func(value string) time.Time { + tm, err := time.Parse(time.StampMilli, value) + require.NoError(t, err) + return tm + } + + stampmicro := func(value string) time.Time { + tm, err := time.Parse(time.StampMicro, value) + require.NoError(t, err) + return tm + } + + stampnano := func(value string) time.Time { + tm, err := time.Parse(time.StampNano, value) + require.NoError(t, err) return tm } @@ -421,6 +496,103 @@ func TestParseTimestamp(t *testing.T) { timestamp: "1568338208000000500", expected: rfc3339("2019-09-13T01:30:08.000000500Z"), }, + { + name: "rfc339 test", + format: "RFC3339", + timestamp: "2018-10-26T13:30:33Z", + expected: rfc3339("2018-10-26T13:30:33Z"), + }, + + { + name: "ANSIC", + format: "ANSIC", + timestamp: "Mon Jan 2 15:04:05 2006", + expected: ansic("Mon Jan 2 15:04:05 2006"), + }, + + { + name: "UnixDate", + format: "UnixDate", + timestamp: "Mon Jan 2 15:04:05 MST 2006", + expected: unixdate("Mon Jan 2 15:04:05 MST 2006"), + }, + + { + name: "RubyDate", + format: "RubyDate", + timestamp: "Mon Jan 02 15:04:05 -0700 2006", + expected: rubydate("Mon Jan 02 15:04:05 -0700 2006"), + }, + + { + name: "RFC822", + format: "RFC822", + timestamp: "02 Jan 06 15:04 MST", + expected: rfc822("02 Jan 06 15:04 MST"), + }, + + { + name: "RFC822Z", + format: "RFC822Z", + timestamp: "02 Jan 06 15:04 -0700", + expected: rfc822z("02 Jan 06 15:04 -0700"), + }, + + { + name: "RFC850", + format: "RFC850", + timestamp: "Monday, 02-Jan-06 15:04:05 MST", + expected: rfc850("Monday, 02-Jan-06 15:04:05 MST"), + }, + + { + name: "RFC1123", + format: "RFC1123", + timestamp: "Mon, 02 Jan 2006 15:04:05 MST", + expected: rfc1123("Mon, 02 Jan 2006 15:04:05 MST"), + }, + + { + name: "RFC1123Z", + format: "RFC1123Z", + timestamp: "Mon, 02 Jan 2006 15:04:05 -0700", + expected: rfc1123z("Mon, 02 Jan 2006 15:04:05 -0700"), + }, + + { + name: "RFC3339Nano", + format: "RFC3339Nano", + timestamp: "2006-01-02T15:04:05.999999999-07:00", + expected: rfc3339nano("2006-01-02T15:04:05.999999999-07:00"), + }, + + { + name: "Stamp", + format: "Stamp", + timestamp: "Jan 2 15:04:05", + expected: stamp("Jan 2 15:04:05"), + }, + + { + name: "StampMilli", + format: "StampMilli", + timestamp: "Jan 2 15:04:05.000", + expected: stampmilli("Jan 2 15:04:05.000"), + }, + + { + name: "StampMicro", + format: "StampMicro", + timestamp: "Jan 2 15:04:05.000000", + expected: stampmicro("Jan 2 15:04:05.000000"), + }, + + { + name: "StampNano", + format: "StampNano", + timestamp: "Jan 2 15:04:05.000000000", + expected: stampnano("Jan 2 15:04:05.000000000"), + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From aa837476d7fab25ff633a7c7d748c7572e830bbc Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 1 Jun 2021 14:09:37 -0700 Subject: [PATCH 442/761] update github maintainer docs (#9311) --- docs/maintainers/LABELS.md | 60 +++++++++++++++++++++++++------ docs/maintainers/PULL_REQUESTS.md | 16 +++++---- 2 files changed, 58 insertions(+), 18 deletions(-) diff --git a/docs/maintainers/LABELS.md b/docs/maintainers/LABELS.md index 72840394a94bb..1ee6cc7517c74 100644 --- a/docs/maintainers/LABELS.md +++ b/docs/maintainers/LABELS.md @@ -6,9 +6,9 @@ issue tracker. ## Categories -New issues are usually labeled one of `feature request`, `bug`, or `question`. -If you are unsure what label to apply you can use the `need more info` label -and if there is another issue you can add the duplicate label and close the +New issues are automatically labeled `feature request`, `bug`, or `support`. +If you are unsure what problem the author is proposing, you can use the `need more info` label +and if there is another issue you can add the `closed/duplicate` label and close the new issue. New pull requests are usually labeled one of `enhancement`, `bugfix` or `new @@ -17,18 +17,56 @@ plugin`. ## Additional Labels Apply any of the `area/*` labels that match. If an area doesn't exist, new -ones can be added but it is not a goal to have an area for all issues. +ones can be added but **it is not a goal to have an area for all issues.** If the issue only applies to one platform, you can use a `platform/*` label. These are only applied to single platform issues which are not on Linux. -The `breaking change` label can be added to issues and pull requests that -would result in a breaking change. - -Apply `performance` to issues and pull requests that address performance -issues. - For bugs you may want to add `panic`, `regression`, or `upstream` to provide further detail. -Labels starting with `pm` or `vert` are not applied by maintainers. +Summary of Labels: +| Label | Description | Purpose | +| --- | ----------- | ---| +| `area/*` | These labels each corresponding to a plugin or group of plugins that can be added to identify the affected plugin or group of plugins | categorization | +| `breaking change` | Improvement to Telegraf that requires breaking changes to the plugin or agent; for minor/major releases | triage | +| `bug` | New issue for an existing component of Telegraf | triage | +| `cloud` | Issues or request around cloud environments | categorization | +| `dependencies` | Pull requests that update a dependency file | triage | +| `discussion` | Issues open for discussion | community/categorization | +| `documentation` | Issues related to Telegraf documentation and configuration descriptions | categorization | +| `error handling` | Issues related to error handling | categorization | +| `external plugin` | Plugins that would be ideal external plugin and expedite being able to use plugin w/ Telegraf | categorization | +| `good first issue` | This is a smaller issue suited for getting started in Telegraf, Golang, and contributing to OSS | community | +| `help wanted` | Request for community participation, code, contribution | community | +| `need more info` | Issue triaged but outstanding questions remain | community | +| `performance` | Issues or PRs that address performance issues | categorization| +| `platform/*` | Issues that only apply to one platform | categorization | +| `plugin/*` | 1. Request for new * plugins 2. Issues/PRs that are related to * plugins | categorization | +| `ready for final review` | Pull request has been reviewed and/or tested by multiple users and is ready for a final review | triage | +| `rfc` | Request for comment - larger topics of discussion that are looking for feedback | community | +| `support` |Telegraf questions, may be directed to community site or slack | triage | +| `upstream` | Bug or issues that rely on dependency fixes and we cannot fix independently | triage | +| `waiting for response` | Waiting for response from contributor | community/triage | +| `wip` | PR still Work In Progress, not ready for detailed review | triage | + +Labels starting with `pm` are not applied by maintainers. + +## Closing Issues + +We close issues for the following reasons: + +| Label | Reason | +| --- | ----------- | +| `closed/as-designed` | Labels to be used when closing an issue or PR with short description why it was closed | +| `closed/duplicate` | This issue or pull request already exists | +| `closed/external-candidate` | The feature request is best implemented by an external plugin | +| `closed/external-issue` | The feature request is best implemented by an external plugin | +| `closed/needs more info` | Did not receive the information we need within 3 months from last activity on issue | +| `closed/not-reproducible` | Given the information we have we can't reproduce the issue | +| `closed/out-of-scope` | The feature request is out of scope for Telegraf - highly unlikely to be worked on | +| `closed/question` | This issue is a support question, directed to community site or slack | + + + + diff --git a/docs/maintainers/PULL_REQUESTS.md b/docs/maintainers/PULL_REQUESTS.md index e7b26c10fca69..8273d95e52e91 100644 --- a/docs/maintainers/PULL_REQUESTS.md +++ b/docs/maintainers/PULL_REQUESTS.md @@ -2,8 +2,8 @@ ## Before Review -Ensure that the CLA is signed. The only exemption would be non-copyrightable -changes such as fixing a typo. +Ensure that the CLA is signed (the `telegraf-tiger` bot performs this check). The +only exemption would be non-copyrightable changes such as fixing a typo. Check that all tests are passing. Due to intermittent errors in the CI tests it may be required to check the cause of test failures and restart failed @@ -35,13 +35,15 @@ This method is used because many pull requests do not have a clean change history and this method allows us to normalize commit messages as well as simplifies backporting. +### Rewriting the commit message After selecting "Squash and Merge" you may need to rewrite the commit message. Usually the body of the commit messages should be cleared as well, unless it -is well written and applies to the entire changeset. Use imperative present -tense for the first line of the message: instead of "I added tests for" or -"Adding tests for," use "Add tests for.". The default merge commit messages -include the PR number at the end of the commit message, keep this in the final -message. If applicable mention the plugin in the message. +is well written and applies to the entire changeset. +- Use imperative present tense for the first line of the message: + - Use "Add tests for" (instead of "I added tests for" or "Adding tests for") +- The default merge commit messages include the PR number at the end of the +commit message, keep this in the final message. +- If applicable mention the plugin in the message. **Example Enhancement:** From db0b6de140eccd291e199ca7e50bc80c4540a881 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Tue, 1 Jun 2021 14:18:31 -0700 Subject: [PATCH 443/761] Add HTTP proxy to datadog output (#9297) --- plugins/outputs/datadog/README.md | 3 +++ plugins/outputs/datadog/datadog.go | 12 +++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/datadog/README.md b/plugins/outputs/datadog/README.md index ad1c7a02592e1..f9dd3fb0ef922 100644 --- a/plugins/outputs/datadog/README.md +++ b/plugins/outputs/datadog/README.md @@ -16,6 +16,9 @@ This plugin writes to the [Datadog Metrics API][metrics] and requires an ## Write URL override; useful for debugging. # url = "https://app.datadoghq.com/api/v1/series" + + ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) + # http_proxy_url = "http://localhost:8888" ``` ### Metrics diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 0e019de0eb334..47d8a4e91a43b 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -12,6 +12,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -22,6 +23,7 @@ type Datadog struct { Log telegraf.Logger `toml:"-"` client *http.Client + proxy.HTTPProxy } var sampleConfig = ` @@ -33,6 +35,9 @@ var sampleConfig = ` ## Write URL override; useful for debugging. # url = "https://app.datadoghq.com/api/v1/series" + + ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) + # http_proxy_url = "http://localhost:8888" ` type TimeSeries struct { @@ -55,9 +60,14 @@ func (d *Datadog) Connect() error { return fmt.Errorf("apikey is a required field for datadog output") } + proxyFunc, err := d.Proxy() + if err != nil { + return err + } + d.client = &http.Client{ Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, + Proxy: proxyFunc, }, Timeout: time.Duration(d.Timeout), } From dfed0e8bb7fbc3116e289f9a1b1ea370a1a18f00 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 2 Jun 2021 11:34:50 -0400 Subject: [PATCH 444/761] Clarify Init errors --- docs/developers/REVIEWS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/developers/REVIEWS.md b/docs/developers/REVIEWS.md index d7e016530e4d4..0f036d225b7ba 100644 --- a/docs/developers/REVIEWS.md +++ b/docs/developers/REVIEWS.md @@ -39,6 +39,7 @@ So in case you expect a longer period of inactivity or you want to abandon a pul ``` (in tests, you can do `myPlugin.Log = testutil.Logger{}`) - Initialization and config checking should be done on the `Init() error` function, not in the Connect, Gather, or Start functions. +- `Init() error` should not contain connections to external services. If anything fails in Init, Telegraf will consider it a configuration error and refuse to start. - plugins should avoid synchronization code if they are not starting goroutines. Plugin functions are never called in parallel. - avoid goroutines when you don't need them and removing them would simplify the code - errors should almost always be checked. From b1d6730c9b9bc4cca97a7bc0614559e4527adc12 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 2 Jun 2021 11:45:32 -0400 Subject: [PATCH 445/761] fix link --- docs/maintainers/PULL_REQUESTS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/maintainers/PULL_REQUESTS.md b/docs/maintainers/PULL_REQUESTS.md index 8273d95e52e91..90c49fd5af689 100644 --- a/docs/maintainers/PULL_REQUESTS.md +++ b/docs/maintainers/PULL_REQUESTS.md @@ -19,7 +19,7 @@ contributor to merge or rebase. ## Review -[Review the pull request](docs/developers/REVIEWS.md). +[Review the pull request](https://github.com/influxdata/telegraf/blob/master/docs/developers/REVIEWS.md). ## Merge From aa427ed812cdbacdd6608c4e7697d2a9e713090e Mon Sep 17 00:00:00 2001 From: reimda Date: Wed, 2 Jun 2021 13:19:33 -0600 Subject: [PATCH 446/761] Parallel build fixes (#9326) --- .circleci/config.yml | 5 ++++- Makefile | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 26758b951a26e..4296082e04c08 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -73,7 +73,10 @@ commands: - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 NIGHTLY=1 make package' - run: 'make upload-nightly' - unless: - condition: << parameters.nightly >> + condition: + or: + - << parameters.nightly >> + - << parameters.release >> steps: - run: '<< parameters.type >>=1 make package' - store_artifacts: diff --git a/Makefile b/Makefile index ee9c1b71d9ac9..1537e0f05eceb 100644 --- a/Makefile +++ b/Makefile @@ -247,7 +247,7 @@ endif ifdef amd64 tars += telegraf-$(tar_version)_freebsd_amd64.tar.gz tars += telegraf-$(tar_version)_linux_amd64.tar.gz -debs := telegraf_$(deb_version)_amd64.deb +debs += telegraf_$(deb_version)_amd64.deb rpms += telegraf-$(rpm_version).x86_64.rpm endif From 58a90783f57329dfb87ef53ec016fe06c3415ab0 Mon Sep 17 00:00:00 2001 From: Alexey Kuzyashin <33540273+Kuzyashin@users.noreply.github.com> Date: Thu, 3 Jun 2021 06:22:15 +0300 Subject: [PATCH 447/761] Add telegraf url env var (#8987) --- config/config.go | 10 ++++++++++ config/config_test.go | 16 ++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/config/config.go b/config/config.go index 0391a3c1ad11d..88d6eedcef7df 100644 --- a/config/config.go +++ b/config/config.go @@ -712,6 +712,10 @@ func getDefaultConfigPath() (string, error) { etcfile = programFiles + `\Telegraf\telegraf.conf` } for _, path := range []string{envfile, homefile, etcfile} { + if isURL(path) { + log.Printf("I! Using config url: %s", path) + return path, nil + } if _, err := os.Stat(path); err == nil { log.Printf("I! Using config file: %s", path) return path, nil @@ -723,6 +727,12 @@ func getDefaultConfigPath() (string, error) { " in $TELEGRAF_CONFIG_PATH, %s, or %s", homefile, etcfile) } +// isURL checks if string is valid url +func isURL(str string) bool { + u, err := url.Parse(str) + return err == nil && u.Scheme != "" && u.Host != "" +} + // LoadConfig loads the given config file and applies it to c func (c *Config) LoadConfig(path string) error { var err error diff --git a/config/config_test.go b/config/config_test.go index 91d0a81e8dc4a..940b84ada7773 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -324,6 +324,22 @@ func TestConfig_URLRetries3FailsThenPasses(t *testing.T) { require.Equal(t, 4, responseCounter) } +func TestConfig_getDefaultConfigPathFromEnvURL(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + c := NewConfig() + err := os.Setenv("TELEGRAF_CONFIG_PATH", ts.URL) + require.NoError(t, err) + configPath, err := getDefaultConfigPath() + require.NoError(t, err) + require.Equal(t, ts.URL, configPath) + err = c.LoadConfig("") + require.NoError(t, err) +} + func TestConfig_URLLikeFileName(t *testing.T) { c := NewConfig() err := c.LoadConfig("http:##www.example.com.conf") From 3ad40df3116b0f123a0e864645e4cbfd6d1a018f Mon Sep 17 00:00:00 2001 From: Pascal Zimmermann Date: Thu, 3 Jun 2021 05:28:16 +0200 Subject: [PATCH 448/761] Update pgx to v4 (#9182) --- docker-compose.yml | 2 +- docs/LICENSE_OF_DEPENDENCIES.md | 10 ++- etc/telegraf.conf | 4 +- go.mod | 4 +- go.sum | 76 ++++++++++++++++++- plugins/inputs/pgbouncer/pgbouncer.go | 2 +- plugins/inputs/pgbouncer/pgbouncer_test.go | 27 ++++--- plugins/inputs/postgresql/postgresql.go | 2 +- plugins/inputs/postgresql/service.go | 46 ++++------- .../postgresql_extensible.go | 2 +- plugins/outputs/cratedb/README.md | 4 +- plugins/outputs/cratedb/cratedb.go | 6 +- 12 files changed, 125 insertions(+), 60 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 3c929f656b7de..bd092d0718388 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -48,7 +48,7 @@ services: ports: - "11211:11211" pgbouncer: - image: mbentley/ubuntu-pgbouncer + image: z9pascal/pgbouncer-container:1.15-latest environment: - PG_ENV_POSTGRESQL_USER=pgbouncer - PG_ENV_POSTGRESQL_PASS=pgbouncer diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 605ee4073b1e0..c1570f7b7badd 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -110,6 +110,13 @@ following works: - github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt) - github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE) - github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE) +- github.com/jackc/chunkreader [MIT License](https://github.com/jackc/chunkreader/blob/master/LICENSE) +- github.com/jackc/pgconn [MIT License](https://github.com/jackc/pgconn/blob/master/LICENSE) +- github.com/jackc/pgio [MIT License](https://github.com/jackc/pgio/blob/master/LICENSE) +- github.com/jackc/pgpassfile [MIT License](https://github.com/jackc/pgpassfile/blob/master/LICENSE) +- github.com/jackc/pgproto3 [MIT License](https://github.com/jackc/pgproto3/blob/master/LICENSE) +- github.com/jackc/pgservicefile [MIT License](https://github.com/jackc/pgservicefile/blob/master/LICENSE) +- github.com/jackc/pgtype [MIT License](https://github.com/jackc/pgtype/blob/master/LICENSE) - github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE) - github.com/jaegertracing/jaeger [Apache License 2.0](https://github.com/jaegertracing/jaeger/blob/master/LICENSE) - github.com/james4k/rcon [MIT License](https://github.com/james4k/rcon/blob/master/LICENSE) @@ -202,6 +209,7 @@ following works: - golang.org/x/term [BSD 3-Clause License](https://pkg.go.dev/golang.org/x/term?tab=licenses) - golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE) - golang.org/x/time [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE) +- golang.org/x/xerrors [BSD 3-Clause Clear License](https://github.com/golang/xerrors/blob/master/LICENSE) - golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - golang.zx2c4.com/wireguard/wgctrl [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE) @@ -237,4 +245,4 @@ following works: - sigs.k8s.io/structured-merge-diff [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - sigs.k8s.io/yaml [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) ## telegraf used and modified code from these projects -- github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) +- github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) \ No newline at end of file diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 1c20bc28e0284..cd38899db22c5 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -455,8 +455,8 @@ # # Configuration for CrateDB to send metrics to. # [[outputs.cratedb]] -# # A github.com/jackc/pgx connection string. -# # See https://godoc.org/github.com/jackc/pgx#ParseDSN +# # A github.com/jackc/pgx/v4 connection string. +# # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig # url = "postgres://user:password@localhost/schema?sslmode=disable" # # Timeout for all CrateDB queries. # timeout = "5s" diff --git a/go.mod b/go.mod index 6405514eb9fce..070e883d7d4e3 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,6 @@ require ( github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 github.com/caio/go-tdigest v3.1.0+incompatible github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 - github.com/cockroachdb/apd v1.1.0 // indirect github.com/containerd/containerd v1.4.1 // indirect github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect @@ -79,8 +78,7 @@ require ( github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 - github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect - github.com/jackc/pgx v3.6.0+incompatible + github.com/jackc/pgx/v4 v4.6.0 github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a github.com/jmespath/go-jmespath v0.4.0 github.com/kardianos/service v1.0.0 diff --git a/go.sum b/go.sum index b21a2fe1a00e4..03c5229f8d46d 100644 --- a/go.sum +++ b/go.sum @@ -236,6 +236,8 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= @@ -337,6 +339,7 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -486,6 +489,7 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -662,10 +666,47 @@ github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q= -github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.5.0 h1:oFSOilzIZkyg787M1fEmyMfOUUvwj0daqYMfaWwNL4o= +github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1 h1:Rdjp4NFjwHnEslx2b66FfCI2S0LhO4itac3hXz6WX9M= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 h1:Q3tB+ExeflWUW7AFcAhXqk40s9mnNYLk1nOkKNZ5GnU= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.3.0 h1:l8JvKrby3RI7Kg3bYEeU9TA4vqC38QDpFCfcrC7KuN0= +github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik= +github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o= +github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.6.0 h1:Fh0O9GdlG4gYpjpwOqjdEodJUQM9jzN3Hdv7PN0xmm0= +github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jaegertracing/jaeger v1.15.1 h1:7QzNAXq+4ko9GtCjozDNAp2uonoABu+B2Rk94hjQcp4= github.com/jaegertracing/jaeger v1.15.1/go.mod h1:LUWPSnzNPGRubM8pk0inANGitpiMOOxihXx0+53llXI= github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPIc28Jel37LGREut2fpV+ObkwJ0= @@ -739,6 +780,7 @@ github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -749,6 +791,8 @@ github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= @@ -765,12 +809,17 @@ github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= @@ -878,6 +927,7 @@ github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= @@ -968,6 +1018,9 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -988,6 +1041,7 @@ github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMT github.com/shirou/gopsutil v3.21.3+incompatible h1:uenXGGa8ESCQq+dbgtl916dmg6PSAz2cXov0uORQ9v8= github.com/shirou/gopsutil v3.21.3+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= @@ -1099,6 +1153,7 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -1116,6 +1171,7 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.starlark.net v0.0.0-20210406145628-7a1108eaa012 h1:4RGobP/iq7S22H0Bb92OEt+M8/cfBQnW+T+a2MC0sQo= go.starlark.net v0.0.0-20210406145628-7a1108eaa012/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= @@ -1135,6 +1191,7 @@ golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1142,6 +1199,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1283,6 +1341,7 @@ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1354,6 +1413,7 @@ golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1363,7 +1423,12 @@ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190906203814-12febf440ab1/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1399,6 +1464,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1518,6 +1585,7 @@ gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1 gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU= gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index 2547a617e14d7..fead359d2271f 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -8,7 +8,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/postgresql" - _ "github.com/jackc/pgx/stdlib" // register driver + _ "github.com/jackc/pgx/v4/stdlib" // register driver ) type PgBouncer struct { diff --git a/plugins/inputs/pgbouncer/pgbouncer_test.go b/plugins/inputs/pgbouncer/pgbouncer_test.go index 7de58a78bd013..7dd75fb4ae487 100644 --- a/plugins/inputs/pgbouncer/pgbouncer_test.go +++ b/plugins/inputs/pgbouncer/pgbouncer_test.go @@ -11,7 +11,7 @@ import ( ) func TestPgBouncerGeneratesMetricsIntegration(t *testing.T) { - t.Skip("Skipping due to not allowed (SQLSTATE 08P01)") + t.Skip("Skipping test, connection refused") p := &PgBouncer{ Service: postgresql.Service{ @@ -27,15 +27,19 @@ func TestPgBouncerGeneratesMetricsIntegration(t *testing.T) { require.NoError(t, p.Start(&acc)) require.NoError(t, p.Gather(&acc)) - intMetrics := []string{ - "total_requests", + // Return value of pgBouncer + // [pgbouncer map[db:pgbouncer server:host=localhost user=pgbouncer dbname=pgbouncer port=6432 ] map[avg_query_count:0 avg_query_time:0 avg_wait_time:0 avg_xact_count:0 avg_xact_time:0 total_query_count:3 total_query_time:0 total_received:0 total_sent:0 total_wait_time:0 total_xact_count:3 total_xact_time:0] 1620163750039747891 pgbouncer_pools map[db:pgbouncer pool_mode:statement server:host=localhost user=pgbouncer dbname=pgbouncer port=6432 user:pgbouncer] map[cl_active:1 cl_waiting:0 maxwait:0 maxwait_us:0 sv_active:0 sv_idle:0 sv_login:0 sv_tested:0 sv_used:0] 1620163750041444466] + + intMetricsPgBouncer := []string{ "total_received", "total_sent", "total_query_time", - "avg_req", - "avg_recv", - "avg_sent", - "avg_query", + "avg_query_count", + "avg_query_time", + "avg_wait_time", + } + + intMetricsPgBouncerPools := []string{ "cl_active", "cl_waiting", "sv_active", @@ -50,16 +54,21 @@ func TestPgBouncerGeneratesMetricsIntegration(t *testing.T) { metricsCounted := 0 - for _, metric := range intMetrics { + for _, metric := range intMetricsPgBouncer { assert.True(t, acc.HasInt64Field("pgbouncer", metric)) metricsCounted++ } + for _, metric := range intMetricsPgBouncerPools { + assert.True(t, acc.HasInt64Field("pgbouncer_pools", metric)) + metricsCounted++ + } + for _, metric := range int32Metrics { assert.True(t, acc.HasInt32Field("pgbouncer", metric)) metricsCounted++ } assert.True(t, metricsCounted > 0) - assert.Equal(t, len(intMetrics)+len(int32Metrics), metricsCounted) + assert.Equal(t, len(intMetricsPgBouncer)+len(intMetricsPgBouncerPools)+len(int32Metrics), metricsCounted) } diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 1f1c22dc00e21..a90f571b7a7a0 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -6,7 +6,7 @@ import ( "strings" // register in driver. - _ "github.com/jackc/pgx/stdlib" + _ "github.com/jackc/pgx/v4/stdlib" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go index db4438e416939..2d98f145b892e 100644 --- a/plugins/inputs/postgresql/service.go +++ b/plugins/inputs/postgresql/service.go @@ -10,9 +10,8 @@ import ( "strings" "time" - "github.com/jackc/pgx" - "github.com/jackc/pgx/pgtype" - "github.com/jackc/pgx/stdlib" + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/stdlib" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -90,7 +89,7 @@ func parseURL(uri string) (string, error) { // packages. type Service struct { Address string - Outputaddress string + OutputAddress string MaxIdle int MaxOpen int MaxLifetime config.Duration @@ -111,33 +110,16 @@ func (p *Service) Start(telegraf.Accumulator) (err error) { // Specific support to make it work with PgBouncer too // See https://github.com/influxdata/telegraf/issues/3253#issuecomment-357505343 if p.IsPgBouncer { - d := &stdlib.DriverConfig{ - ConnConfig: pgx.ConnConfig{ - PreferSimpleProtocol: true, - RuntimeParams: map[string]string{ - "client_encoding": "UTF8", - }, - CustomConnInfo: func(c *pgx.Conn) (*pgtype.ConnInfo, error) { - info := c.ConnInfo.DeepCopy() - info.RegisterDataType(pgtype.DataType{ - Value: &pgtype.OIDValue{}, - Name: "int8OID", - OID: pgtype.Int8OID, - }) - // Newer versions of pgbouncer need this defined. See the discussion here: - // https://github.com/jackc/pgx/issues/649 - info.RegisterDataType(pgtype.DataType{ - Value: &pgtype.OIDValue{}, - Name: "numericOID", - OID: pgtype.NumericOID, - }) - - return info, nil - }, - }, + // Remove DriveConfig and revert it by the ParseConfig method + // See https://github.com/influxdata/telegraf/issues/9134 + d, err := pgx.ParseConfig(p.Address) + if err != nil { + return err } - stdlib.RegisterDriverConfig(d) - connectionString = d.ConnectionString(p.Address) + + d.PreferSimpleProtocol = true + + connectionString = stdlib.RegisterConnConfig(d) } if p.DB, err = sql.Open("pgx", connectionString); err != nil { @@ -166,8 +148,8 @@ func (p *Service) SanitizedAddress() (sanitizedAddress string, err error) { canonicalizedAddress string ) - if p.Outputaddress != "" { - return p.Outputaddress, nil + if p.OutputAddress != "" { + return p.OutputAddress, nil } if strings.HasPrefix(p.Address, "postgres://") || strings.HasPrefix(p.Address, "postgresql://") { diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index ef66c26cf9e7e..8311064b1f060 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -8,7 +8,7 @@ import ( "strings" "time" - _ "github.com/jackc/pgx/stdlib" //to register stdlib from PostgreSQL Driver and Toolkit + _ "github.com/jackc/pgx/v4/stdlib" //to register stdlib from PostgreSQL Driver and Toolkit "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" diff --git a/plugins/outputs/cratedb/README.md b/plugins/outputs/cratedb/README.md index a8a01fdfe99d7..50386fbbc94d1 100644 --- a/plugins/outputs/cratedb/README.md +++ b/plugins/outputs/cratedb/README.md @@ -26,8 +26,8 @@ config option, see below. ```toml # Configuration for CrateDB to send metrics to. [[outputs.cratedb]] - # A github.com/jackc/pgx connection string. - # See https://godoc.org/github.com/jackc/pgx#ParseDSN + # A github.com/jackc/pgx/v4 connection string. + # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig url = "postgres://user:password@localhost/schema?sslmode=disable" # Timeout for all CrateDB queries. timeout = "5s" diff --git a/plugins/outputs/cratedb/cratedb.go b/plugins/outputs/cratedb/cratedb.go index 6e43b58f71563..a28e29dc0e47c 100644 --- a/plugins/outputs/cratedb/cratedb.go +++ b/plugins/outputs/cratedb/cratedb.go @@ -14,7 +14,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" - _ "github.com/jackc/pgx/stdlib" //to register stdlib from PostgreSQL Driver and Toolkit + _ "github.com/jackc/pgx/v4/stdlib" //to register stdlib from PostgreSQL Driver and Toolkit ) const MaxInt64 = int64(^uint64(0) >> 1) @@ -28,8 +28,8 @@ type CrateDB struct { } var sampleConfig = ` - # A github.com/jackc/pgx connection string. - # See https://godoc.org/github.com/jackc/pgx#ParseDSN + # A github.com/jackc/pgx/v4 connection string. + # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig url = "postgres://user:password@localhost/schema?sslmode=disable" # Timeout for all CrateDB queries. timeout = "5s" From ee44aee1ca5652ce598568bef0c973ed0b19be1a Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 3 Jun 2021 09:53:15 -0400 Subject: [PATCH 449/761] clarify docs around shim plugin loading --- plugins/common/shim/example/cmd/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/common/shim/example/cmd/main.go b/plugins/common/shim/example/cmd/main.go index 7326cc492476c..ddabaa5da2a81 100644 --- a/plugins/common/shim/example/cmd/main.go +++ b/plugins/common/shim/example/cmd/main.go @@ -30,7 +30,7 @@ var err error // // shim.AddInput(myInput) // -// // now the shim.Run() call as below. +// // now the shim.Run() call as below. Note the shim is only intended to run a single plugin. // func main() { // parse command line options @@ -52,7 +52,7 @@ func main() { os.Exit(1) } - // run the input plugin(s) until stdin closes or we receive a termination signal + // run a single plugin until stdin closes or we receive a termination signal if err := shim.Run(*pollInterval); err != nil { fmt.Fprintf(os.Stderr, "Err: %s\n", err) os.Exit(1) From e289612ff31a4e82f311a67892523769d0df2e9e Mon Sep 17 00:00:00 2001 From: reimda Date: Thu, 3 Jun 2021 22:49:55 -0600 Subject: [PATCH 450/761] Add SQL output plugin (#9280) --- README.md | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 16 +- go.mod | 27 +- go.sum | 400 ++++++++++++++++-- plugins/outputs/all/all.go | 1 + plugins/outputs/sql/README.md | 150 +++++++ plugins/outputs/sql/sql.go | 277 ++++++++++++ plugins/outputs/sql/sql_test.go | 329 ++++++++++++++ plugins/outputs/sql/sqlite.go | 15 + plugins/outputs/sql/sqlite_test.go | 134 ++++++ .../outputs/sql/testdata/mariadb/expected.sql | 36 ++ .../sql/testdata/mariadb/initdb/script.sql | 4 + .../sql/testdata/postgres/expected.sql | 41 ++ .../sql/testdata/postgres/initdb/init.sql | 2 + 14 files changed, 1373 insertions(+), 60 deletions(-) create mode 100644 plugins/outputs/sql/README.md create mode 100644 plugins/outputs/sql/sql.go create mode 100644 plugins/outputs/sql/sql_test.go create mode 100644 plugins/outputs/sql/sqlite.go create mode 100644 plugins/outputs/sql/sqlite_test.go create mode 100644 plugins/outputs/sql/testdata/mariadb/expected.sql create mode 100644 plugins/outputs/sql/testdata/mariadb/initdb/script.sql create mode 100644 plugins/outputs/sql/testdata/postgres/expected.sql create mode 100644 plugins/outputs/sql/testdata/postgres/initdb/init.sql diff --git a/README.md b/README.md index d6c3e7fd4c7b0..58254877ca1a0 100644 --- a/README.md +++ b/README.md @@ -326,6 +326,7 @@ For documentation on the latest development code see the [documentation index][d * [solr](./plugins/inputs/solr) * [sql server](./plugins/inputs/sqlserver) (microsoft) * [stackdriver](./plugins/inputs/stackdriver) (Google Cloud Monitoring) +* [sql](./plugins/outputs/sql) (SQL generic output) * [statsd](./plugins/inputs/statsd) * [suricata](./plugins/inputs/suricata) * [swap](./plugins/inputs/swap) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index c1570f7b7badd..5cb8b917be17f 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -10,6 +10,7 @@ following works: - github.com/Azure/azure-event-hubs-go [MIT License](https://github.com/Azure/azure-event-hubs-go/blob/master/LICENSE) - github.com/Azure/azure-pipeline-go [MIT License](https://github.com/Azure/azure-pipeline-go/blob/master/LICENSE) - github.com/Azure/azure-sdk-for-go [Apache License 2.0](https://github.com/Azure/azure-sdk-for-go/blob/master/LICENSE) +- github.com/Azure/azure-storage-blob-go [MIT License](https://github.com/Azure/azure-storage-blob-go/blob/master/LICENSE) - github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE) - github.com/Azure/go-amqp [MIT License](https://github.com/Azure/go-amqp/blob/master/LICENSE) - github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) @@ -23,6 +24,7 @@ following works: - github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE) - github.com/antchfx/xmlquery [MIT License](https://github.com/antchfx/xmlquery/blob/master/LICENSE) - github.com/antchfx/xpath [MIT License](https://github.com/antchfx/xpath/blob/master/LICENSE) +- github.com/apache/arrow/go/arrow [Apache License 2.0](https://github.com/apache/arrow/blob/master/LICENSE.txt) - github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE) - github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE) - github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING) @@ -32,8 +34,12 @@ following works: - github.com/aws/aws-sdk-go-v2/config [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/config/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/credentials [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/credentials/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/ec2/imds [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/ec2/imds/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/feature/s3/manager [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/ec2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/accept-encoding/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/presigned-url/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/internal/s3shared [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/s3shared/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/s3 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/s3/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/sso [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/sts [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/sts/LICENSE.txt) - github.com/aws/smithy-go [Apache License 2.0](https://github.com/aws/smithy-go/blob/main/LICENSE) @@ -80,10 +86,12 @@ following works: - github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE) - github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE) - github.com/golang/snappy [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/google/flatbuffers [Apache License 2.0](https://github.com/google/flatbuffers/blob/master/LICENSE.txt) - github.com/google/go-cmp [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-cmp/blob/master/LICENSE) - github.com/google/go-github [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-github/blob/master/LICENSE) - github.com/google/go-querystring [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-querystring/blob/master/LICENSE) - github.com/google/gofuzz [Apache License 2.0](https://github.com/google/gofuzz/blob/master/LICENSE) +- github.com/google/uuid [BSD 3-Clause "New" or "Revised" License](https://github.com/google/uuid/blob/master/LICENSE) - github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) - github.com/googleapis/gnostic [Apache License 2.0](https://github.com/google/gnostic/blob/master/LICENSE) - github.com/gopcua/opcua [MIT License](https://github.com/gopcua/opcua/blob/master/LICENSE) @@ -128,10 +136,10 @@ following works: - github.com/karrick/godirwalk [BSD 2-Clause "Simplified" License](https://github.com/karrick/godirwalk/blob/master/LICENSE) - github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE) - github.com/klauspost/compress [BSD 3-Clause Clear License](https://github.com/klauspost/compress/blob/master/LICENSE) -- github.com/konsorten/go-windows-terminal-sequences [MIT License](https://github.com/konsorten/go-windows-terminal-sequences/blob/master/LICENSE) - github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) - github.com/mattn/go-colorable [MIT License](https://github.com/mattn/go-colorable/blob/master/LICENSE) +- github.com/mattn/go-ieproxy [MIT License](https://github.com/mattn/go-ieproxy/blob/master/LICENSE) - github.com/mattn/go-isatty [MIT License](https://github.com/mattn/go-isatty/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) - github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md) @@ -160,6 +168,7 @@ following works: - github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE) - github.com/philhofer/fwd [MIT License](https://github.com/philhofer/fwd/blob/master/LICENSE.md) - github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE) +- github.com/pkg/browser [BSD 2-Clause "Simplified" License](https://github.com/pkg/browser/blob/master/LICENSE) - github.com/pkg/errors [BSD 2-Clause "Simplified" License](https://github.com/pkg/errors/blob/master/LICENSE) - github.com/pmezard/go-difflib [BSD 3-Clause Clear License](https://github.com/pmezard/go-difflib/blob/master/LICENSE) - github.com/prometheus/client_golang [Apache License 2.0](https://github.com/prometheus/client_golang/blob/master/LICENSE) @@ -168,6 +177,7 @@ following works: - github.com/prometheus/procfs [Apache License 2.0](https://github.com/prometheus/procfs/blob/master/LICENSE) - github.com/prometheus/prometheus [Apache License 2.0](https://github.com/prometheus/prometheus/blob/master/LICENSE) - github.com/rcrowley/go-metrics [MIT License](https://github.com/rcrowley/go-metrics/blob/master/LICENSE) +- github.com/remyoudompheng/bigfft [BSD 3-Clause "New" or "Revised" License](https://github.com/remyoudompheng/bigfft/blob/master/LICENSE) - github.com/riemann/riemann-go-client [MIT License](https://github.com/riemann/riemann-go-client/blob/master/LICENSE) - github.com/safchain/ethtool [Apache License 2.0](https://github.com/safchain/ethtool/blob/master/LICENSE) - github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) @@ -177,6 +187,7 @@ following works: - github.com/signalfx/golib [Apache License 2.0](https://github.com/signalfx/golib/blob/master/LICENSE) - github.com/signalfx/sapm-proto [Apache License 2.0](https://github.com/signalfx/sapm-proto/blob/master/LICENSE) - github.com/sirupsen/logrus [MIT License](https://github.com/sirupsen/logrus/blob/master/LICENSE) +- github.com/snowflakedb/gosnowflake [Apache License 2.0](https://github.com/snowflakedb/gosnowflake/blob/master/LICENSE) - github.com/streadway/amqp [BSD 2-Clause "Simplified" License](https://github.com/streadway/amqp/blob/master/LICENSE) - github.com/stretchr/objx [MIT License](https://github.com/stretchr/objx/blob/master/LICENSE) - github.com/stretchr/testify [custom -- permissive](https://github.com/stretchr/testify/blob/master/LICENSE) @@ -240,9 +251,10 @@ following works: - k8s.io/klog [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - k8s.io/utils [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - modernc.org/libc [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/libc/-/blob/master/LICENSE) +- modernc.org/mathutil [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/mathutil/-/blob/master/LICENSE) - modernc.org/memory [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/memory/-/blob/master/LICENSE) - modernc.org/sqlite [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/sqlite/-/blob/master/LICENSE) - sigs.k8s.io/structured-merge-diff [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - sigs.k8s.io/yaml [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) ## telegraf used and modified code from these projects -- github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) \ No newline at end of file +- github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) diff --git a/go.mod b/go.mod index 070e883d7d4e3..810f616135f0e 100644 --- a/go.mod +++ b/go.mod @@ -27,25 +27,24 @@ require ( github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/aws/aws-sdk-go v1.34.34 - github.com/aws/aws-sdk-go-v2 v1.1.0 - github.com/aws/aws-sdk-go-v2/config v1.1.0 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.1 + github.com/aws/aws-sdk-go-v2 v1.3.2 + github.com/aws/aws-sdk-go-v2/config v1.1.5 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 - github.com/aws/smithy-go v1.0.0 + github.com/aws/smithy-go v1.3.1 github.com/benbjohnson/clock v1.0.3 github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmatcuk/doublestar/v3 v3.0.0 github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 github.com/caio/go-tdigest v3.1.0+incompatible github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 - github.com/containerd/containerd v1.4.1 // indirect github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect github.com/denisenkom/go-mssqldb v0.9.0 github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 - github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible + github.com/docker/docker v20.10.5+incompatible github.com/eclipse/paho.mqtt.golang v1.3.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.5.0 @@ -85,7 +84,7 @@ require ( github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/lib/pq v1.3.0 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b github.com/microsoft/ApplicationInsights-Go v0.4.4 github.com/miekg/dns v1.1.31 @@ -102,7 +101,7 @@ require ( github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.15.0 - github.com/prometheus/procfs v0.1.3 + github.com/prometheus/procfs v0.2.0 github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 github.com/riemann/riemann-go-client v0.5.0 github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 @@ -110,10 +109,12 @@ require ( github.com/shirou/gopsutil v3.21.3+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/signalfx/golib/v3 v3.3.0 - github.com/sirupsen/logrus v1.6.0 + github.com/sirupsen/logrus v1.7.0 + github.com/snowflakedb/gosnowflake v1.5.0 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 + github.com/testcontainers/testcontainers-go v0.10.0 github.com/tidwall/gjson v1.6.0 github.com/tinylib/msgp v1.1.5 github.com/tklauser/go-sysconf v0.3.5 // indirect @@ -127,7 +128,7 @@ require ( github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/multierr v1.6.0 // indirect - golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 + golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa @@ -135,7 +136,7 @@ require ( golang.org/x/tools v0.1.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.29.0 - google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70 + google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a google.golang.org/grpc v1.37.0 gopkg.in/djherbis/times.v1 v1.2.0 gopkg.in/fatih/pool.v2 v2.0.0 // indirect @@ -143,12 +144,12 @@ require ( gopkg.in/ldap.v3 v3.1.0 gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 gopkg.in/olivere/elastic.v5 v5.0.70 - gopkg.in/yaml.v2 v2.3.0 + gopkg.in/yaml.v2 v2.4.0 gotest.tools v2.2.0+incompatible k8s.io/api v0.20.4 k8s.io/apimachinery v0.20.4 k8s.io/client-go v0.20.4 - modernc.org/sqlite v1.7.4 + modernc.org/sqlite v1.10.8 ) // replaced due to https://github.com/satori/go.uuid/issues/73 diff --git a/go.sum b/go.sum index 03c5229f8d46d..3d6d54883f580 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -41,18 +42,23 @@ github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1S github.com/Azure/azure-event-hubs-go/v3 v3.2.0 h1:CQlxKH5a4NX1ZmbdqXUPRwuNGh2XvtgmhkZvkEuWzhs= github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4clbrULrQ3q7+icmqHyyLc= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= -github.com/Azure/azure-pipeline-go v0.1.9 h1:u7JFb9fFTE6Y/j8ae2VK33ePrRqJqoCM/IWkQdAZ+rg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v45.1.0+incompatible h1:kxtaPD8n2z5Za+9e3sKsYG2IX6PG2R6VXtgS7gAbh3A= github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= +github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= +github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 h1:7MiZ6Th+YTmwUdrKmFg5OMsGYz7IdQwjqL0RPxkhhOQ= github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY= github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= @@ -103,8 +109,21 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3 github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= -github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 h1:mw6pDQqv38/WGF1cO/jF5t/jyAJ2yi7CmtFLLO5tGFI= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15 h1:Aof83YILRs2Vx3GhHqlvvfyx1asRJKMFIMeVlHsZKtI= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -112,6 +131,7 @@ github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.27.2 h1:1EyY1dsxNDUQEv0O/4TsjosHI2CgB1uo9H/v56xzTxc= github.com/Shopify/sarama v1.27.2/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= @@ -132,6 +152,7 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 h1:YtaYjXmemIMyySUbs0VGFPqsLpsNHf4TW/L6yqpJQ9s= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= @@ -144,6 +165,8 @@ github.com/antchfx/xpath v1.1.11 h1:WOFtK8TVAjLm3lbgqeP0arlHpvCEeTANeWZ/csPpJkQ= github.com/antchfx/xpath v1.1.11/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 h1:5ultmol0yeX75oh1hY78uAFn3dupBQ/QUNxERCkiaUQ= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -162,31 +185,44 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.34 h1:5dC0ZU0xy25+UavGNEkQ/5MOQwxXDA2YXtjCL1HfYKI= github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aws/aws-sdk-go-v2 v1.1.0 h1:sKP6QWxdN1oRYjl+k6S3bpgBI+XUx/0mqVOLIw4lR/Q= github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= -github.com/aws/aws-sdk-go-v2/config v1.1.0 h1:f3QVGpAcKrWpYNhKB8hE/buMjcfei95buQ5xdr/xYcU= -github.com/aws/aws-sdk-go-v2/config v1.1.0/go.mod h1:zfTyI6wH8yiZEvb6hGVza+S5oIB2lts2M7TDB4zMoeo= -github.com/aws/aws-sdk-go-v2/credentials v1.1.0 h1:RV0yzjGSNnJhTBco+01lwvWlc2m8gqBfha3D9dQDk78= -github.com/aws/aws-sdk-go-v2/credentials v1.1.0/go.mod h1:cV0qgln5tz/76IxAV0EsJVmmR5ZzKSQwWixsIvzk6lY= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.1 h1:eoT5e1jJf8Vcacu+mkEe1cgsgEAkuabpjhgq03GiXKc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.1/go.mod h1:b+8dhYiS3m1xpzTZWk5EuQml/vSmPhKlzM/bAm/fttY= +github.com/aws/aws-sdk-go-v2 v1.3.2 h1:RQj8l98yKUm0UV2Wd3w/Ms+TXV9Rs1E6Kr5tRRMfyU4= +github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8= +github.com/aws/aws-sdk-go-v2/config v1.1.5 h1:imDWOGwlIrRpHLallJ9mli2SIQ4egtGKtFUFsuGRIaQ= +github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E= +github.com/aws/aws-sdk-go-v2/credentials v1.1.5 h1:R9v/eN5cXv5yMLC619xRYl5PgCSuy5SarizmM7+qqSA= +github.com/aws/aws-sdk-go-v2/credentials v1.1.5/go.mod h1:Ir1R6tPiR1/2y1hes8yOijFMz54hzSmgcmCDo6F45Qc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 h1:zoOz5V56jO/rGixsCDnrQtAzYRYM2hGA/43U6jVMFbo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2 h1:Doa5wabOIDA0XZzBX5yCTAPGwDCVZ8Ux0wh29AUDmN4= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2/go.mod h1:Azf567f5wBUfUbwpyJJnLM/geFFIzEulGR30L+nQZOE= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 h1:+VnEgB1yp+7KlOsk6FXX/v/fU9uL5oSujIMkKQBBmp8= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0/go.mod h1:/6514fU/SRcY3+ousB1zjUqiXjruSuti2qcfE70osOc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1 h1:E7zGGgca12s7jA3VqirtaltXj5Wwe5eUIsUlNl1v+d8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4 h1:8yeByqOL6UWBsOOXsHnW93/ukwL66O008tRfxXxnTwA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4/go.mod h1:BCfU3Uo2fhKcMZFp9zU5QQGQxqWCOYmZ/27Dju3S/do= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1/go.mod h1:PISaKWylTYAyruocNk4Lr9miOOJjOcVBd7twCPbydDk= -github.com/aws/aws-sdk-go-v2/service/sso v1.1.0 h1:oQ/FE7bk1MldOs6RBTr+D7uMv1RfQ8WxxBRuH4lYEEo= -github.com/aws/aws-sdk-go-v2/service/sso v1.1.0/go.mod h1:VnS0vieB4YxutHFP9ROJ3ciT3T/XJZjxxv9L39eo8OQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.1.0 h1:X9oTTSm14wc0ef4dit7aIB02UIw1kVi/imV7zLhFDdM= -github.com/aws/aws-sdk-go-v2/service/sts v1.1.0/go.mod h1:A15vQm/MsXL3a410CxwKQ5IBoSvIg+cr10fEFzPgEYs= -github.com/aws/smithy-go v1.0.0 h1:hkhcRKG9rJ4Fn+RbfXY7Tz7b3ITLDyolBnLLBhwbg/c= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6 h1:ldYIsOP4WyjdzW8t6RC/aSieajrlx+3UN3UCZy1KM5Y= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6/go.mod h1:L0KWr0ASo83PRZu9NaZaDsw3koS6PspKv137DMDZjHo= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2 h1:aU8H58DoYxNo8R1TaSPTofkuxfQNnoqZmWL+G3+k/vA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2/go.mod h1:nnutjMLuna0s3GVY/MAkpLX03thyNER06gXvnMAPj5g= +github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0 h1:VbwXUI3L0hyhVmrFxbDxrs6cBX8TNFX0YxCpooMNjvY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0/go.mod h1:uwA7gs93Qcss43astPUb1eq4RyceNmYWAQjZFDOAMLo= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 h1:B7ec5wE4+3Ldkurmq0C4gfQFtElGTG+/iTpi/YPMzi4= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/J0tzWCMXHbw6FZ0j1GkWM= +github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 h1:fKw6QSGcFlvZCBPYx3fo4sL0HfTmaT06ZtMHJfQQNQQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= +github.com/aws/smithy-go v1.3.1 h1:xJFO4pK0y9J8fCl34uGsSJX5KNnGbdARDlA5BPhXnwE= +github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -194,12 +230,21 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= github.com/bmatcuk/doublestar/v3 v3.0.0/go.mod h1:6PcTVMw80pCY1RVuoqu3V++99uQB3vsSYKPTd8AWA0k= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= @@ -212,9 +257,13 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= @@ -227,18 +276,71 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102 h1:Qf4HiqfvmB7zS6scsmNgTLmByHbq8n9RTF39v+TzP7A= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY= -github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1 h1:IK6yirB4X7wpKyFSikWiT++nZsyIxGAAgNEv3fEGuls= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7 h1:6ejg6Lkk8dskcM7wQ28gONkukbQkM4qpj4RnYbpFzrI= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/coreos/etcd v3.3.22+incompatible h1:AnRMUyVdVvh1k7lHe61YEd227+CLoNogQuAypztGSK4= github.com/coreos/etcd v3.3.22+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 h1:F8nmbiuX+gCz9xvWMi6Ak8HQntB4ATFXP46gaxifbp4= @@ -246,16 +348,24 @@ github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:sr github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8= github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.9.0 h1:RSohk2RsiZqLZ0zCjtfn3S4Gp4exhpBWHyQ7D0yGjAk= github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU= @@ -266,21 +376,31 @@ github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2x github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible h1:SiUATuP//KecDjpOK2tvZJgeScYAklvyjfK8JZlU6fo= -github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.5+incompatible h1:o5WL5onN4awYGwrW7+oTn5x9AF2prw7V0Ox8ZEkoCdg= +github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -298,6 +418,7 @@ github.com/eclipse/paho.mqtt.golang v1.3.0/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+J github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -326,6 +447,8 @@ github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= @@ -337,6 +460,7 @@ github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1T github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -449,15 +573,22 @@ github.com/goburrow/serial v0.1.0 h1:v2T1SQa/dlUqQiYIT8+Cu7YolfqAi3K96UmhwYyuSrA github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/googleapis v1.3.1 h1:CzMaKrvF6Qa7XtRii064vKBQiyvmY8H8vG1xa1/W1JA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= +github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -509,6 +640,7 @@ github.com/google/addlicense v0.0.0-20190510175307-22550fa7c1b0/go.mod h1:QtPG26 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -518,6 +650,7 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -539,8 +672,9 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -556,7 +690,9 @@ github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU8 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -587,6 +723,7 @@ github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.7.0 h1:H6R9d008jDcHPQPAqPNuydAshJ4v5/8URdFnUvK/+sc= github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -601,6 +738,7 @@ github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= @@ -638,6 +776,8 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e h1:3J1OB4RDKwXs5l8uEV6BP/tucOJOPDQysiT7/9cuXzA= github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= @@ -666,6 +806,7 @@ github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= @@ -714,6 +855,8 @@ github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQD github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -764,14 +907,14 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.3 h1:dB4Bn0tN3wdCzQxnS8r06kV74qN/TAfaIS0bVE8h3jc= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -808,12 +951,15 @@ github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8 github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -826,10 +972,14 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= @@ -844,6 +994,7 @@ github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -858,8 +1009,16 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mountinfo v0.4.0 h1:1KInV3Huv18akCu58V7lzNlt+jFmqlu1EaErnEHE/VM= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd h1:aY7OQNf2XqY/JQ6qREWamhI/81os/agb2BAGpcx5yWI= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -867,12 +1026,15 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/multiplay/go-ts3 v1.0.0 h1:loxtEFqvYtpoGh1jOqEt6aDzctYuQsi3vb3dMpvWiWw= github.com/multiplay/go-ts3 v1.0.0/go.mod h1:14S6cS3fLNT3xOytrA/DkRyAFNuQLMLEqOYAsf87IbQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= @@ -893,6 +1055,7 @@ github.com/nats-io/nkeys v0.1.4 h1:aEsHIssIk6ETN5m2/MD8Y4B2X7FfXrBAUdkyRvbVYzA= github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 h1:9YEHXplqlVkOltThchh+RxeODvTb1TBvQ1181aXg3pY= github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1/go.mod h1:2kY6OeOxrJ+RIQlVjWDc/pZlT3MIf30prs6drzMfJ6E= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= @@ -904,24 +1067,48 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93 h1:x2UMpOOVf3kQ8arv/EsDGwim8PTNqzL1/EYDr/+scOM= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= @@ -956,7 +1143,10 @@ github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0 github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -966,15 +1156,19 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -982,6 +1176,7 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= @@ -991,13 +1186,18 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.13.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 h1:IB/5RJRcJiR/YzKs4Aou86s/RaMepZOZVCArYNHJHWc= github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2/go.mod h1:Td6hjwdXDmVt5CI9T03Sw+yBNxLBq/Yx3ZtmtP8zlCA= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1024,6 +1224,7 @@ github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThC github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -1032,6 +1233,7 @@ github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4 github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sensu/sensu-go/api/core/v2 v2.6.0 h1:hEKPHFZZNDuWTlKr7Kgm2yog65ZdkBUqNesE5qaWEGo= @@ -1057,25 +1259,36 @@ github.com/signalfx/golib/v3 v3.3.0/go.mod h1:GzjWpV0skAXZn7+u9LnkOkiXAx9KKd5XZc github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8KfCSyuwy/VILnROdgCvbQLA5ch0nkbG7lKT0BXw= github.com/signalfx/sapm-proto v0.4.0 h1:5lQX++6FeIjUZEIcnSgBqhOpmSjMkRBW3y/4ZiKMo5E= github.com/signalfx/sapm-proto v0.4.0/go.mod h1:x3gtwJ1GRejtkghB4nYpwixh2zqJrLbPU959ZNhM0Fk= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/gosnowflake v1.5.0 h1:Md7P8zbPegXy0+/SZ2nG8whXYkAT44nQ/yEb35LlIKo= +github.com/snowflakedb/gosnowflake v1.5.0/go.mod h1:1kyg2XEduwti88V11PKRHImhXLK5WpGiayY6lFNYb98= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -1084,10 +1297,12 @@ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1097,9 +1312,15 @@ github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= +github.com/testcontainers/testcontainers-go v0.10.0 h1:ASWe0nwTNg5z8K3WSQ8aBNB6j5vrNJocFPEZF4NS0qI= +github.com/testcontainers/testcontainers-go v0.10.0/go.mod h1:zFYk0JndthnMHEwtVRHCpLwIP/Ik1G7mvIAQ2MdZ+Ig= github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= @@ -1114,18 +1335,23 @@ github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITn github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 h1:iBlTJosRsR70amr0zsmSPvaKNH8K/p3YlX/5SdPmSl8= github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330/go.mod h1:7+aWBsUJCo9OQRCgTypRmIQW9KKKcPMjtrdnYIBsS70= github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= @@ -1135,6 +1361,8 @@ github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59b github.com/wavefronthq/wavefront-sdk-go v0.9.7 h1:SrtABcXXeKCW5SerQYsnCzHo15GeggjZmL+DjtTy6CI= github.com/wavefronthq/wavefront-sdk-go v0.9.7/go.mod h1:JTGsu+KKgxx+GitC65VVdftN2iep1nVpQi/8EGR6v4Y= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOFra9xJfRXZcL2pLhMI8oNuDugNxg9Q= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk= @@ -1144,6 +1372,9 @@ github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhe github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= @@ -1153,9 +1384,14 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -1185,7 +1421,9 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1211,8 +1449,9 @@ golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1254,6 +1493,7 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1267,8 +1507,10 @@ golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1278,6 +1520,7 @@ golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1289,13 +1532,15 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 h1:lwlPPsmjDKK0J6eG6xDWd5XPehI0R024zxjDnw3esPA= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1335,14 +1580,20 @@ golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1351,14 +1602,21 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1370,12 +1628,21 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa h1:ZYxPR6aca/uhfRJyaOAtflSHjJYiktO7QnJC5ut7iY4= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1460,6 +1727,7 @@ golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= @@ -1483,6 +1751,7 @@ gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -1504,11 +1773,13 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= @@ -1520,6 +1791,7 @@ google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1529,8 +1801,10 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70 h1:wboULUXGF3c5qdUnKp+6gLAccE6PRpa/czkYvQ4UXv8= google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1540,13 +1814,16 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1563,13 +1840,16 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -1583,6 +1863,7 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU= gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= @@ -1604,11 +1885,14 @@ gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE= gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE= gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/sourcemap.v1 v1.0.5 h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI= gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1623,8 +1907,9 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -1632,7 +1917,6 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -honnef.co/go/netdb v0.0.0-20150201073656-a416d700ae39/go.mod h1:rbNo0ST5hSazCG4rGfpHrwnwvzP1QX62WbhzD+ghGzs= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1642,14 +1926,21 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4 h1:xZjKidCirayzX6tHONRQyTNDVIR55TYVqgATqo6ZULY= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4 h1:vhxQ0PPUUU2Ns1b9r4/UFp13UPs8cw2iOoTjnY9faa0= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4 h1:85crgh1IotNkLpKYKZHVNI1JT86nr/iDCvq2iWKsql4= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -1661,26 +1952,45 @@ k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -modernc.org/httpfs v1.0.0 h1:LtuKNg6JMiaBKVQHKd6Phhvk+2GFp+pUcmDQgRjrds0= -modernc.org/httpfs v1.0.0/go.mod h1:BSkfoMUcahSijQD5J/Vu4UMOxzmEf5SNRwyXC4PJBEw= -modernc.org/libc v1.3.1 h1:ZAAaxQZtb94hXvlPMEQybXBLLxEtJlQtVfvLkKOPZ5w= -modernc.org/libc v1.3.1/go.mod h1:f8sp9GAfEyGYh3lsRIKtBh/XwACdFvGznxm6GJmQvXk= -modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE= +modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= +modernc.org/cc/v3 v3.33.5 h1:gfsIOmcv80EelyQyOHn/Xhlzex8xunhQxWiJRMYmPrI= +modernc.org/cc/v3 v3.33.5/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= +modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo= +modernc.org/ccgo/v3 v3.9.4 h1:mt2+HyTZKxva27O6T4C9//0xiNQ/MornL3i8itM5cCs= +modernc.org/ccgo/v3 v3.9.4/go.mod h1:19XAY9uOrYnDhOgfHwCABasBvK69jgC4I8+rizbk3Bc= +modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/libc v1.9.5 h1:zv111ldxmP7DJ5mOIqzRbza7ZDl3kh4ncKfASB2jIYY= +modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.0.1 h1:bhVo78NAdgvRD4N+b2hGnAwL5RP2+QyiEJDsX3jpeDA= -modernc.org/memory v1.0.1/go.mod h1:NSjvC08+g3MLOpcAxQbdctcThAEX4YlJ20WWHYEhvRg= -modernc.org/sqlite v1.7.4 h1:pJVbc3NLKENbO1PJ3/uH+kDeuJiTShqc8eZarwANJgU= -modernc.org/sqlite v1.7.4/go.mod h1:xse4RHCm8Fzw0COf5SJqAyiDrVeDwAQthAS1V/woNIA= -modernc.org/tcl v1.4.1 h1:8ERwg+o+EFtrXmXDOVuGGmo+EkEh8Bkokb/ybI3kXPQ= -modernc.org/tcl v1.4.1/go.mod h1:8YCvzidU9SIwkz7RZwlCWK61mhV8X9UwfkRDRp7y5e0= +modernc.org/mathutil v1.2.2 h1:+yFk8hBprV+4c0U9GjFtL+dV3N8hOJ8JCituQcMShFY= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.0.4 h1:utMBrFcpnQDdNsmM6asmyH/FM9TqLPS7XF7otpJmrwM= +modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= +modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.10.8 h1:tZzV+/FwlSBddiJAHLR+qxsw2nx7jpLMKOCVu6NTjxI= +modernc.org/sqlite v1.10.8/go.mod h1:k45BYY2DU82vbS/dJ24OzHCtjPeMEcZ1DV2POiE8nRs= +modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/tcl v1.5.2 h1:sYNjGr4zK6cDH74USl8wVJRrvDX6UOLpG0j4lFvR0W0= +modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo= +modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +modernc.org/z v1.0.1 h1:WyIDpEpAIx4Hel6q/Pcgj/VhaQV5XPJ2I6ryIYbjnpc= +modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 61270d5ad412e..893ac91f45c50 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -40,6 +40,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/sensu" _ "github.com/influxdata/telegraf/plugins/outputs/signalfx" _ "github.com/influxdata/telegraf/plugins/outputs/socket_writer" + _ "github.com/influxdata/telegraf/plugins/outputs/sql" _ "github.com/influxdata/telegraf/plugins/outputs/stackdriver" _ "github.com/influxdata/telegraf/plugins/outputs/sumologic" _ "github.com/influxdata/telegraf/plugins/outputs/syslog" diff --git a/plugins/outputs/sql/README.md b/plugins/outputs/sql/README.md new file mode 100644 index 0000000000000..6fb215612ecaf --- /dev/null +++ b/plugins/outputs/sql/README.md @@ -0,0 +1,150 @@ +# SQL Output Plugin + +The SQL output plugin saves Telegraf metric data to an SQL database. + +The plugin uses a simple, hard-coded database schema. There is a table +for each metric type and the table name is the metric name. There is a +column per field and a column per tag. There is an optional column for +the metric timestamp. + +A row is written for every input metric. This means multiple metrics +are never merged into a single row, even if they have the same metric +name, tags, and timestamp. + +The plugin uses Golang's generic "database/sql" interface and third +party drivers. See the driver-specific section below for a list of +supported drivers and details. Additional drivers may be added in +future Telegraf releases. + +## Getting started + +To use the plugin, set the driver setting to the driver name +appropriate for your database. Then set the data source name +(DSN). The format of the DSN varies by driver but often includes a +username, password, the database instance to use, and the hostname of +the database server. The user account must have privileges to insert +rows and create tables. + +## Generated SQL + +The plugin generates simple ANSI/ISO SQL that is likely to work on any +DBMS. It doesn't use language features that are specific to a +particular DBMS. If you want to use a feature that is specific to a +particular DBMS, you may be able to set it up manually outside of this +plugin or through the init_sql setting. + +The insert statements generated by the plugin use placeholder +parameters. Most database drivers use question marks as placeholders +but postgres uses indexed dollar signs. The plugin chooses which +placeholder style to use depending on the driver selected. + +## Advanced options + +When the plugin first connects it runs SQL from the init_sql setting, +allowing you to perform custom initialization for the connection. + +Before inserting a row, the plugin checks whether the table exists. If +it doesn't exist, the plugin creates the table. The existence check +and the table creation statements can be changed through template +settings. The template settings allows you to have the plugin create +customized tables or skip table creation entirely by setting the check +template to any query that executes without error, such as "select 1". + +The name of the timestamp column is "timestamp" but it can be changed +with the timestamp\_column setting. The timestamp column can be +completely disabled by setting it to "". + +By changing the table creation template, it's possible with some +databases to save a row insertion timestamp. You can add an additional +column with a default value to the template, like "CREATE TABLE +{TABLE}(insertion_timestamp TIMESTAMP DEFAULT CURRENT\_TIMESTAMP, +{COLUMNS})". + +The mapping of metric types to sql column types can be customized +through the convert settings. + +## Configuration + +``` +# Save metrics to an SQL Database +[[outputs.sql]] + ## Database driver + ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), + ## sqlite (SQLite3), snowflake (snowflake.com) + # driver = "" + + ## Data source name + ## The format of the data source name is different for each database driver. + ## See the plugin readme for details. + # data_source_name = "" + + ## Timestamp column name + # timestamp_column = "timestamp" + + ## Table creation template + ## Available template variables: + ## {TABLE} - table name as a quoted identifier + ## {TABLELITERAL} - table name as a quoted string literal + ## {COLUMNS} - column definitions (list of quoted identifiers and types) + # table_template = "CREATE TABLE {TABLE}({COLUMNS})" + + ## Table existence check template + ## Available template variables: + ## {TABLE} - tablename as a quoted identifier + # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" + + ## Initialization SQL + # init_sql = "" + + ## Metric type to SQL type conversion + #[outputs.sql.convert] + # integer = "INT" + # real = "DOUBLE" + # text = "TEXT" + # timestamp = "TIMESTAMP" + # defaultvalue = "TEXT" + # unsigned = "UNSIGNED" +``` + +## Driver-specific information + +### go-sql-driver/mysql + +MySQL default quoting differs from standard ANSI/ISO SQL quoting. You +must use MySQL's ANSI\_QUOTES mode with this plugin. You can enable +this mode by using the setting `init_sql = "SET +sql_mode='ANSI_QUOTES';"` or through a command-line option when +running MySQL. See MySQL's docs for [details on +ANSI\_QUOTES](https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sqlmode_ansi_quotes) +and [how to set the SQL +mode](https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sql-mode-setting). + +You can use a DSN of the format +"username:password@tcp(host:port)/dbname". See the [driver +docs](https://github.com/go-sql-driver/mysql) for details. + +### jackc/pgx + +You can use a DSN of the format +"postgres://username:password@host:port/dbname". See the [driver +docs](https://github.com/jackc/pgx) for more details. + +### modernc.org/sqlite + +This driver is not available on all operating systems and +architectures. It is only included in Linux builds on amd64, 386, +arm64, arm, and Darwin on amd64. It is not available for Windows, +FreeBSD, and other Linux and Darwin platforms. + +The DSN is a filename or url with scheme "file:". See the [driver +docs](https://modernc.org/sqlite) for details. + +### denisenkom/go-mssqldb + +Telegraf doesn't have unit tests for go-mssqldb so it should be +treated as experimental. + +### snowflakedb/gosnowflake + +Telegraf doesn't have unit tests for gosnowflake so it should be +treated as experimental. diff --git a/plugins/outputs/sql/sql.go b/plugins/outputs/sql/sql.go new file mode 100644 index 0000000000000..3e003d3309873 --- /dev/null +++ b/plugins/outputs/sql/sql.go @@ -0,0 +1,277 @@ +package sql + +import ( + gosql "database/sql" + "fmt" + "strings" + + //Register sql drivers + _ "github.com/denisenkom/go-mssqldb" // mssql (sql server) + _ "github.com/go-sql-driver/mysql" // mysql + _ "github.com/jackc/pgx/v4/stdlib" // pgx (postgres) + _ "github.com/snowflakedb/gosnowflake" // snowflake + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" +) + +type ConvertStruct struct { + Integer string + Real string + Text string + Timestamp string + Defaultvalue string + Unsigned string +} + +type SQL struct { + Driver string + DataSourceName string + TimestampColumn string + TableTemplate string + TableExistsTemplate string + InitSQL string `toml:"init_sql"` + Convert ConvertStruct + + db *gosql.DB + Log telegraf.Logger `toml:"-"` + tables map[string]bool +} + +func (p *SQL) Connect() error { + db, err := gosql.Open(p.Driver, p.DataSourceName) + if err != nil { + return err + } + + err = db.Ping() + if err != nil { + return err + } + + if p.InitSQL != "" { + _, err = db.Exec(p.InitSQL) + if err != nil { + return err + } + } + + p.db = db + p.tables = make(map[string]bool) + + return nil +} + +func (p *SQL) Close() error { + return p.db.Close() +} + +// Quote an identifier (table or column name) +func quoteIdent(name string) string { + return `"` + strings.Replace(sanitizeQuoted(name), `"`, `""`, -1) + `"` +} + +// Quote a string literal +func quoteStr(name string) string { + return "'" + strings.Replace(name, "'", "''", -1) + "'" +} + +func sanitizeQuoted(in string) string { + // https://dev.mysql.com/doc/refman/8.0/en/identifiers.html + // https://www.postgresql.org/docs/13/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS + + // Whitelist allowed characters + return strings.Map(func(r rune) rune { + switch { + case r >= '\u0001' && r <= '\uFFFF': + return r + default: + return '_' + } + }, in) +} + +func (p *SQL) deriveDatatype(value interface{}) string { + var datatype string + + switch value.(type) { + case int64: + datatype = p.Convert.Integer + case uint64: + datatype = fmt.Sprintf("%s %s", p.Convert.Integer, p.Convert.Unsigned) + case float64: + datatype = p.Convert.Real + case string: + datatype = p.Convert.Text + default: + datatype = p.Convert.Defaultvalue + p.Log.Errorf("Unknown datatype: '%T' %v", value, value) + } + return datatype +} + +var sampleConfig = ` + ## Database driver + ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), + ## sqlite (SQLite3), snowflake (snowflake.com) + # driver = "" + + ## Data source name + ## The format of the data source name is different for each database driver. + ## See the plugin readme for details. + # data_source_name = "" + + ## Timestamp column name + # timestamp_column = "timestamp" + + ## Table creation template + ## Available template variables: + ## {TABLE} - table name as a quoted identifier + ## {TABLELITERAL} - table name as a quoted string literal + ## {COLUMNS} - column definitions (list of quoted identifiers and types) + # table_template = "CREATE TABLE {TABLE}({COLUMNS})" + + ## Table existence check template + ## Available template variables: + ## {TABLE} - tablename as a quoted identifier + # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" + + ## Initialization SQL + # init_sql = "" + + ## Metric type to SQL type conversion + #[outputs.sql.convert] + # integer = "INT" + # real = "DOUBLE" + # text = "TEXT" + # timestamp = "TIMESTAMP" + # defaultvalue = "TEXT" + # unsigned = "UNSIGNED" +` + +func (p *SQL) SampleConfig() string { return sampleConfig } +func (p *SQL) Description() string { return "Send metrics to SQL Database" } + +func (p *SQL) generateCreateTable(metric telegraf.Metric) string { + var columns []string + // ## {KEY_COLUMNS} is a comma-separated list of key columns (timestamp and tags) + //var pk []string + + if p.TimestampColumn != "" { + //pk = append(pk, quoteIdent(p.TimestampColumn)) + columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(p.TimestampColumn), p.Convert.Timestamp)) + } + + for _, tag := range metric.TagList() { + //pk = append(pk, quoteIdent(tag.Key)) + columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(tag.Key), p.Convert.Text)) + } + + var datatype string + for _, field := range metric.FieldList() { + datatype = p.deriveDatatype(field.Value) + columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(field.Key), datatype)) + } + + query := p.TableTemplate + query = strings.Replace(query, "{TABLE}", quoteIdent(metric.Name()), -1) + query = strings.Replace(query, "{TABLELITERAL}", quoteStr(metric.Name()), -1) + query = strings.Replace(query, "{COLUMNS}", strings.Join(columns, ","), -1) + //query = strings.Replace(query, "{KEY_COLUMNS}", strings.Join(pk, ","), -1) + + return query +} + +func (p *SQL) generateInsert(tablename string, columns []string) string { + var placeholders, quotedColumns []string + for _, column := range columns { + quotedColumns = append(quotedColumns, quoteIdent(column)) + } + if p.Driver == "pgx" { + // Postgres uses $1 $2 $3 as placeholders + for i := 0; i < len(columns); i++ { + placeholders = append(placeholders, fmt.Sprintf("$%d", i+1)) + } + } else { + // Everything else uses ? ? ? as placeholders + for i := 0; i < len(columns); i++ { + placeholders = append(placeholders, "?") + } + } + + return fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", + quoteIdent(tablename), + strings.Join(quotedColumns, ","), + strings.Join(placeholders, ",")) +} + +func (p *SQL) tableExists(tableName string) bool { + stmt := strings.Replace(p.TableExistsTemplate, "{TABLE}", quoteIdent(tableName), -1) + + _, err := p.db.Exec(stmt) + return err == nil +} + +func (p *SQL) Write(metrics []telegraf.Metric) error { + for _, metric := range metrics { + tablename := metric.Name() + + // create table if needed + if !p.tables[tablename] && !p.tableExists(tablename) { + createStmt := p.generateCreateTable(metric) + _, err := p.db.Exec(createStmt) + if err != nil { + return err + } + p.tables[tablename] = true + } + + var columns []string + var values []interface{} + + if p.TimestampColumn != "" { + columns = append(columns, p.TimestampColumn) + values = append(values, metric.Time()) + } + + for column, value := range metric.Tags() { + columns = append(columns, column) + values = append(values, value) + } + + for column, value := range metric.Fields() { + columns = append(columns, column) + values = append(values, value) + } + + sql := p.generateInsert(tablename, columns) + _, err := p.db.Exec(sql, values...) + + if err != nil { + // check if insert error was caused by column mismatch + p.Log.Errorf("Error during insert: %v, %v", err, sql) + return err + } + } + return nil +} + +func init() { + outputs.Add("sql", func() telegraf.Output { return newSQL() }) +} + +func newSQL() *SQL { + return &SQL{ + TableTemplate: "CREATE TABLE {TABLE}({COLUMNS})", + TableExistsTemplate: "SELECT 1 FROM {TABLE} LIMIT 1", + TimestampColumn: "timestamp", + Convert: ConvertStruct{ + Integer: "INT", + Real: "DOUBLE", + Text: "TEXT", + Timestamp: "TIMESTAMP", + Defaultvalue: "TEXT", + Unsigned: "UNSIGNED", + }, + } +} diff --git a/plugins/outputs/sql/sql_test.go b/plugins/outputs/sql/sql_test.go new file mode 100644 index 0000000000000..c57570442c617 --- /dev/null +++ b/plugins/outputs/sql/sql_test.go @@ -0,0 +1,329 @@ +package sql + +import ( + "context" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" +) + +func TestSqlQuote(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } +} + +func TestSqlCreateStatement(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } +} + +func TestSqlInsertStatement(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } +} + +func pwgen(n int) string { + charset := []byte("abcdedfghijklmnopqrstABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") + + nchars := len(charset) + buffer := make([]byte, n) + + for i := range buffer { + buffer[i] = charset[rand.Intn(nchars)] + } + + return string(buffer) +} + +func stableMetric( + name string, + tags []telegraf.Tag, + fields []telegraf.Field, + tm time.Time, + tp ...telegraf.ValueType, +) telegraf.Metric { + // We want to compare the output of this plugin with expected + // output. Maps don't preserve order so comparison fails. There's + // no metric constructor that takes a slice of tag and slice of + // field, just the one that takes maps. + // + // To preserve order, construct the metric without tags and fields + // and then add them using AddTag and AddField. Those are stable. + m := metric.New(name, map[string]string{}, map[string]interface{}{}, tm, tp...) + for _, tag := range tags { + m.AddTag(tag.Key, tag.Value) + } + for _, field := range fields { + m.AddField(field.Key, field.Value) + } + return m +} + +var ( + // 2021-05-17T22:04:45+00:00 + // or 2021-05-17T16:04:45-06:00 + ts = time.Unix(1621289085, 0).UTC() + + testMetrics = []telegraf.Metric{ + stableMetric( + "metric_one", + []telegraf.Tag{ + { + Key: "tag_one", + Value: "tag1", + }, + { + Key: "tag_two", + Value: "tag2", + }, + }, + []telegraf.Field{ + { + Key: "int64_one", + Value: int64(1234), + }, + { + Key: "int64_two", + Value: int64(2345), + }, + }, + ts, + ), + stableMetric( + "metric_two", + []telegraf.Tag{ + { + Key: "tag_three", + Value: "tag3", + }, + }, + []telegraf.Field{ + { + Key: "string_one", + Value: "string1", + }, + }, + ts, + ), + stableMetric( //test spaces in metric, tag, and field names + "metric three", + []telegraf.Tag{ + { + Key: "tag four", + Value: "tag4", + }, + }, + []telegraf.Field{ + { + Key: "string two", + Value: "string2", + }, + }, + ts, + ), + } +) + +func TestMysqlIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + initdb, err := filepath.Abs("testdata/mariadb/initdb") + require.NoError(t, err) + + // initdb/script.sql creates this database + const dbname = "foo" + + // The mariadb image lets you set the root password through an env + // var. We'll use root to insert and query test data. + const username = "root" + + password := pwgen(32) + outDir, err := ioutil.TempDir("", "tg-mysql-*") + require.NoError(t, err) + defer os.RemoveAll(outDir) + + ctx := context.Background() + req := testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "mariadb", + Env: map[string]string{ + "MARIADB_ROOT_PASSWORD": password, + }, + BindMounts: map[string]string{ + initdb: "/docker-entrypoint-initdb.d", + outDir: "/out", + }, + ExposedPorts: []string{"3306/tcp"}, + WaitingFor: wait.ForListeningPort("3306/tcp"), + }, + Started: true, + } + mariadbContainer, err := testcontainers.GenericContainer(ctx, req) + require.NoError(t, err, "starting container failed") + defer func() { + require.NoError(t, mariadbContainer.Terminate(ctx), "terminating container failed") + }() + + // Get the connection details from the container + host, err := mariadbContainer.Host(ctx) + require.NoError(t, err, "getting container host address failed") + require.NotEmpty(t, host) + natPort, err := mariadbContainer.MappedPort(ctx, "3306/tcp") + require.NoError(t, err, "getting container host port failed") + port := natPort.Port() + require.NotEmpty(t, port) + + //use the plugin to write to the database + address := fmt.Sprintf("%v:%v@tcp(%v:%v)/%v", + username, password, host, port, dbname, + ) + p := newSQL() + p.Log = testutil.Logger{} + p.Driver = "mysql" + p.DataSourceName = address + //p.Convert.Timestamp = "TEXT" //disable mysql default current_timestamp() + p.InitSQL = "SET sql_mode='ANSI_QUOTES';" + + require.NoError(t, p.Connect()) + require.NoError(t, p.Write( + testMetrics, + )) + + //dump the database + var rc int + rc, err = mariadbContainer.Exec(ctx, []string{ + "bash", + "-c", + "mariadb-dump --user=" + username + + " --password=" + password + + " --compact --skip-opt " + + dbname + + " > /out/dump", + }) + require.NoError(t, err) + require.Equal(t, 0, rc) + dumpfile := filepath.Join(outDir, "dump") + require.FileExists(t, dumpfile) + + //compare the dump to what we expected + expected, err := ioutil.ReadFile("testdata/mariadb/expected.sql") + require.NoError(t, err) + actual, err := ioutil.ReadFile(dumpfile) + require.NoError(t, err) + require.Equal(t, string(expected), string(actual)) +} + +func TestPostgresIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + initdb, err := filepath.Abs("testdata/postgres/initdb") + require.NoError(t, err) + + // initdb/init.sql creates this database + const dbname = "foo" + + // default username for postgres is postgres + const username = "postgres" + + password := pwgen(32) + outDir, err := ioutil.TempDir("", "tg-postgres-*") + require.NoError(t, err) + defer os.RemoveAll(outDir) + + ctx := context.Background() + req := testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "postgres", + Env: map[string]string{ + "POSTGRES_PASSWORD": password, + }, + BindMounts: map[string]string{ + initdb: "/docker-entrypoint-initdb.d", + outDir: "/out", + }, + ExposedPorts: []string{"5432/tcp"}, + WaitingFor: wait.ForListeningPort("5432/tcp"), + }, + Started: true, + } + cont, err := testcontainers.GenericContainer(ctx, req) + require.NoError(t, err, "starting container failed") + defer func() { + require.NoError(t, cont.Terminate(ctx), "terminating container failed") + }() + + // Get the connection details from the container + host, err := cont.Host(ctx) + require.NoError(t, err, "getting container host address failed") + require.NotEmpty(t, host) + natPort, err := cont.MappedPort(ctx, "5432/tcp") + require.NoError(t, err, "getting container host port failed") + port := natPort.Port() + require.NotEmpty(t, port) + + //use the plugin to write to the database + // host, port, username, password, dbname + address := fmt.Sprintf("postgres://%v:%v@%v:%v/%v", + username, password, host, port, dbname, + ) + p := newSQL() + p.Log = testutil.Logger{} + p.Driver = "pgx" + p.DataSourceName = address + + require.NoError(t, p.Connect()) + require.NoError(t, p.Write( + testMetrics, + )) + + //dump the database + //psql -u postgres + var rc int + rc, err = cont.Exec(ctx, []string{ + "bash", + "-c", + "pg_dump" + + " --username=" + username + + //" --password=" + password + + // " --compact --skip-opt " + + " --no-comments" + + //" --data-only" + + " " + dbname + + // pg_dump's output has comments that include build info + // of postgres and pg_dump. The build info changes with + // each release. To prevent these changes from causing the + // test to fail, we strip out comments. Also strip out + // blank lines. + "|grep -E -v '(^--|^$)'" + + " > /out/dump 2>&1", + }) + require.NoError(t, err) + require.Equal(t, 0, rc) + dumpfile := filepath.Join(outDir, "dump") + require.FileExists(t, dumpfile) + + //compare the dump to what we expected + expected, err := ioutil.ReadFile("testdata/postgres/expected.sql") + require.NoError(t, err) + actual, err := ioutil.ReadFile(dumpfile) + require.NoError(t, err) + require.Equal(t, string(expected), string(actual)) +} diff --git a/plugins/outputs/sql/sqlite.go b/plugins/outputs/sql/sqlite.go new file mode 100644 index 0000000000000..2d93cda6b4a7b --- /dev/null +++ b/plugins/outputs/sql/sqlite.go @@ -0,0 +1,15 @@ +// +build !mips +// +build !mipsle +// +build !s390x +// +build !ppc64le +// +build !windows +// +build !freebsd + +package sql + +// The modernc.org sqlite driver isn't supported on all +// platforms. Register it with build constraints to prevent build +// failures on unsupported platforms. +import ( + _ "modernc.org/sqlite" // Register sqlite sql driver +) diff --git a/plugins/outputs/sql/sqlite_test.go b/plugins/outputs/sql/sqlite_test.go new file mode 100644 index 0000000000000..38784013c26cb --- /dev/null +++ b/plugins/outputs/sql/sqlite_test.go @@ -0,0 +1,134 @@ +// +build linux +// +build 386 amd64 arm arm64 + +package sql + +import ( + gosql "database/sql" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestSqlite(t *testing.T) { + outDir, err := ioutil.TempDir("", "tg-sqlite-*") + require.NoError(t, err) + defer os.RemoveAll(outDir) + + dbfile := filepath.Join(outDir, "db") + + // Use the plugin to write to the database address := + // fmt.Sprintf("file:%v", dbfile) + address := dbfile // accepts a path or a file: URI + p := newSQL() + p.Log = testutil.Logger{} + p.Driver = "sqlite" + p.DataSourceName = address + + require.NoError(t, p.Connect()) + require.NoError(t, p.Write( + testMetrics, + )) + + //read directly from the database + db, err := gosql.Open("sqlite", address) + require.NoError(t, err) + defer db.Close() + + var countMetricOne int + require.NoError(t, db.QueryRow("select count(*) from metric_one").Scan(&countMetricOne)) + require.Equal(t, 1, countMetricOne) + + var countMetricTwo int + require.NoError(t, db.QueryRow("select count(*) from metric_one").Scan(&countMetricTwo)) + require.Equal(t, 1, countMetricTwo) + + var rows *gosql.Rows + + // Check that tables were created as expected + rows, err = db.Query("select sql from sqlite_master") + require.NoError(t, err) + var sql string + require.True(t, rows.Next()) + require.NoError(t, rows.Scan(&sql)) + require.Equal(t, + `CREATE TABLE "metric_one"("timestamp" TIMESTAMP,"tag_one" TEXT,"tag_two" TEXT,"int64_one" INT,"int64_two" INT)`, + sql, + ) + require.True(t, rows.Next()) + require.NoError(t, rows.Scan(&sql)) + require.Equal(t, + `CREATE TABLE "metric_two"("timestamp" TIMESTAMP,"tag_three" TEXT,"string_one" TEXT)`, + sql, + ) + require.True(t, rows.Next()) + require.NoError(t, rows.Scan(&sql)) + require.Equal(t, + `CREATE TABLE "metric three"("timestamp" TIMESTAMP,"tag four" TEXT,"string two" TEXT)`, + sql, + ) + require.False(t, rows.Next()) + require.NoError(t, rows.Close()) //nolint:sqlclosecheck + + // sqlite stores dates as strings. They may be in the local + // timezone. The test needs to parse them back into a time.Time to + // check them. + //timeLayout := "2006-01-02 15:04:05 -0700 MST" + timeLayout := "2006-01-02T15:04:05Z" + var actualTime time.Time + + // Check contents of tables + rows, err = db.Query("select timestamp, tag_one, tag_two, int64_one, int64_two from metric_one") + require.NoError(t, err) + require.True(t, rows.Next()) + var ( + a string + b, c string + d, e int64 + ) + require.NoError(t, rows.Scan(&a, &b, &c, &d, &e)) + actualTime, err = time.Parse(timeLayout, a) + require.NoError(t, err) + require.Equal(t, ts, actualTime.UTC()) + require.Equal(t, "tag1", b) + require.Equal(t, "tag2", c) + require.Equal(t, int64(1234), d) + require.Equal(t, int64(2345), e) + require.False(t, rows.Next()) + require.NoError(t, rows.Close()) //nolint:sqlclosecheck + + rows, err = db.Query("select timestamp, tag_three, string_one from metric_two") + require.NoError(t, err) + require.True(t, rows.Next()) + var ( + f, g, h string + ) + require.NoError(t, rows.Scan(&f, &g, &h)) + actualTime, err = time.Parse(timeLayout, f) + require.NoError(t, err) + require.Equal(t, ts, actualTime.UTC()) + require.Equal(t, "tag3", g) + require.Equal(t, "string1", h) + require.False(t, rows.Next()) + require.NoError(t, rows.Close()) //nolint:sqlclosecheck + + rows, err = db.Query(`select timestamp, "tag four", "string two" from "metric three"`) + require.NoError(t, err) + require.True(t, rows.Next()) + var ( + i, j, k string + ) + require.NoError(t, rows.Scan(&i, &j, &k)) + actualTime, err = time.Parse(timeLayout, i) + require.NoError(t, err) + require.Equal(t, ts, actualTime.UTC()) + require.Equal(t, "tag4", j) + require.Equal(t, "string2", k) + require.False(t, rows.Next()) + require.NoError(t, rows.Close()) //nolint:sqlclosecheck +} diff --git a/plugins/outputs/sql/testdata/mariadb/expected.sql b/plugins/outputs/sql/testdata/mariadb/expected.sql new file mode 100644 index 0000000000000..49a3095db4da2 --- /dev/null +++ b/plugins/outputs/sql/testdata/mariadb/expected.sql @@ -0,0 +1,36 @@ +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `bar` ( + `baz` int(11) DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `bar` VALUES (1); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric three` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag four` text DEFAULT NULL, + `string two` text DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric three` VALUES ('2021-05-17 22:04:45','tag4','string2'); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric_one` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag_one` text DEFAULT NULL, + `tag_two` text DEFAULT NULL, + `int64_one` int(11) DEFAULT NULL, + `int64_two` int(11) DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric_one` VALUES ('2021-05-17 22:04:45','tag1','tag2',1234,2345); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric_two` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag_three` text DEFAULT NULL, + `string_one` text DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric_two` VALUES ('2021-05-17 22:04:45','tag3','string1'); diff --git a/plugins/outputs/sql/testdata/mariadb/initdb/script.sql b/plugins/outputs/sql/testdata/mariadb/initdb/script.sql new file mode 100644 index 0000000000000..7e155e105f15a --- /dev/null +++ b/plugins/outputs/sql/testdata/mariadb/initdb/script.sql @@ -0,0 +1,4 @@ +create database foo; +use foo; +create table bar (baz int); +insert into bar (baz) values (1); diff --git a/plugins/outputs/sql/testdata/postgres/expected.sql b/plugins/outputs/sql/testdata/postgres/expected.sql new file mode 100644 index 0000000000000..8bc2b2fc83018 --- /dev/null +++ b/plugins/outputs/sql/testdata/postgres/expected.sql @@ -0,0 +1,41 @@ +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; +SET default_tablespace = ''; +SET default_table_access_method = heap; +CREATE TABLE public."metric three" ( + "timestamp" timestamp without time zone, + "tag four" text, + "string two" text +); +ALTER TABLE public."metric three" OWNER TO postgres; +CREATE TABLE public.metric_one ( + "timestamp" timestamp without time zone, + tag_one text, + tag_two text, + int64_one integer, + int64_two integer +); +ALTER TABLE public.metric_one OWNER TO postgres; +CREATE TABLE public.metric_two ( + "timestamp" timestamp without time zone, + tag_three text, + string_one text +); +ALTER TABLE public.metric_two OWNER TO postgres; +COPY public."metric three" ("timestamp", "tag four", "string two") FROM stdin; +2021-05-17 22:04:45 tag4 string2 +\. +COPY public.metric_one ("timestamp", tag_one, tag_two, int64_one, int64_two) FROM stdin; +2021-05-17 22:04:45 tag1 tag2 1234 2345 +\. +COPY public.metric_two ("timestamp", tag_three, string_one) FROM stdin; +2021-05-17 22:04:45 tag3 string1 +\. diff --git a/plugins/outputs/sql/testdata/postgres/initdb/init.sql b/plugins/outputs/sql/testdata/postgres/initdb/init.sql new file mode 100644 index 0000000000000..0694ada11fbbe --- /dev/null +++ b/plugins/outputs/sql/testdata/postgres/initdb/init.sql @@ -0,0 +1,2 @@ +create database foo; + From 0fd0ae095329b52381d36556f70beb92a9045cf3 Mon Sep 17 00:00:00 2001 From: Alexander Emelin Date: Fri, 4 Jun 2021 07:53:38 +0300 Subject: [PATCH 451/761] Add WebSocket output plugin (#9188) --- README.md | 1 + go.mod | 1 + plugins/outputs/all/all.go | 1 + plugins/outputs/websocket/README.md | 39 ++++ plugins/outputs/websocket/websocket.go | 225 ++++++++++++++++++++ plugins/outputs/websocket/websocket_test.go | 221 +++++++++++++++++++ 6 files changed, 488 insertions(+) create mode 100644 plugins/outputs/websocket/README.md create mode 100644 plugins/outputs/websocket/websocket.go create mode 100644 plugins/outputs/websocket/websocket_test.go diff --git a/README.md b/README.md index 58254877ca1a0..0702d6b4d79eb 100644 --- a/README.md +++ b/README.md @@ -471,5 +471,6 @@ For documentation on the latest development code see the [documentation index][d * [udp](./plugins/outputs/socket_writer) * [warp10](./plugins/outputs/warp10) * [wavefront](./plugins/outputs/wavefront) +* [websocket](./plugins/outputs/websocket) * [sumologic](./plugins/outputs/sumologic) * [yandex_cloud_monitoring](./plugins/outputs/yandex_cloud_monitoring) diff --git a/go.mod b/go.mod index 810f616135f0e..d3f08990d6b14 100644 --- a/go.mod +++ b/go.mod @@ -63,6 +63,7 @@ require ( github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.13 github.com/gorilla/mux v1.7.3 + github.com/gorilla/websocket v1.4.2 github.com/gosnmp/gosnmp v1.32.0 github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 893ac91f45c50..8fc5f8b75ed90 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -47,5 +47,6 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/timestream" _ "github.com/influxdata/telegraf/plugins/outputs/warp10" _ "github.com/influxdata/telegraf/plugins/outputs/wavefront" + _ "github.com/influxdata/telegraf/plugins/outputs/websocket" _ "github.com/influxdata/telegraf/plugins/outputs/yandex_cloud_monitoring" ) diff --git a/plugins/outputs/websocket/README.md b/plugins/outputs/websocket/README.md new file mode 100644 index 0000000000000..577c10e6b0083 --- /dev/null +++ b/plugins/outputs/websocket/README.md @@ -0,0 +1,39 @@ +# Websocket Output Plugin + +This plugin can write to a WebSocket endpoint. + +It can output data in any of the [supported output formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md). + +### Configuration: + +```toml +# A plugin that can transmit metrics over WebSocket. +[[outputs.websocket]] + ## URL is the address to send metrics to. Make sure ws or wss scheme is used. + url = "ws://127.0.0.1:3000/telegraf" + + ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). + # connect_timeout = "30s" + # write_timeout = "30s" + # read_timeout = "30s" + + ## Optionally turn on using text data frames (binary by default). + # use_text_frames = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + # data_format = "influx" + + ## Additional HTTP Upgrade headers + # [outputs.websocket.headers] + # Authorization = "Bearer " +``` diff --git a/plugins/outputs/websocket/websocket.go b/plugins/outputs/websocket/websocket.go new file mode 100644 index 0000000000000..17aea0542c6aa --- /dev/null +++ b/plugins/outputs/websocket/websocket.go @@ -0,0 +1,225 @@ +package websocket + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/proxy" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" + + ws "github.com/gorilla/websocket" +) + +var sampleConfig = ` + ## URL is the address to send metrics to. Make sure ws or wss scheme is used. + url = "ws://127.0.0.1:8080/telegraf" + + ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). + # connect_timeout = "30s" + # write_timeout = "30s" + # read_timeout = "30s" + + ## Optionally turn on using text data frames (binary by default). + # use_text_frames = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + # data_format = "influx" + + ## Additional HTTP Upgrade headers + # [outputs.websocket.headers] + # Authorization = "Bearer " +` + +const ( + defaultConnectTimeout = 30 * time.Second + defaultWriteTimeout = 30 * time.Second + defaultReadTimeout = 30 * time.Second +) + +// WebSocket can output to WebSocket endpoint. +type WebSocket struct { + URL string `toml:"url"` + ConnectTimeout config.Duration `toml:"connect_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + ReadTimeout config.Duration `toml:"read_timeout"` + Headers map[string]string `toml:"headers"` + UseTextFrames bool `toml:"use_text_frames"` + Log telegraf.Logger `toml:"-"` + proxy.HTTPProxy + tls.ClientConfig + + conn *ws.Conn + serializer serializers.Serializer +} + +// SetSerializer implements serializers.SerializerOutput. +func (w *WebSocket) SetSerializer(serializer serializers.Serializer) { + w.serializer = serializer +} + +// Description of plugin. +func (w *WebSocket) Description() string { + return "Generic WebSocket output writer." +} + +// SampleConfig returns plugin config sample. +func (w *WebSocket) SampleConfig() string { + return sampleConfig +} + +var errInvalidURL = errors.New("invalid websocket URL") + +// Init the output plugin. +func (w *WebSocket) Init() error { + if parsedURL, err := url.Parse(w.URL); err != nil || (parsedURL.Scheme != "ws" && parsedURL.Scheme != "wss") { + return fmt.Errorf("%w: \"%s\"", errInvalidURL, w.URL) + } + return nil +} + +// Connect to the output endpoint. +func (w *WebSocket) Connect() error { + tlsCfg, err := w.ClientConfig.TLSConfig() + if err != nil { + return fmt.Errorf("error creating TLS config: %v", err) + } + + dialProxy, err := w.HTTPProxy.Proxy() + if err != nil { + return fmt.Errorf("error creating proxy: %v", err) + } + + dialer := &ws.Dialer{ + Proxy: dialProxy, + HandshakeTimeout: time.Duration(w.ConnectTimeout), + TLSClientConfig: tlsCfg, + } + + headers := http.Header{} + for k, v := range w.Headers { + headers.Set(k, v) + } + + conn, resp, err := dialer.Dial(w.URL, headers) + if err != nil { + return fmt.Errorf("error dial: %v", err) + } + _ = resp.Body.Close() + if resp.StatusCode != http.StatusSwitchingProtocols { + return fmt.Errorf("wrong status code while connecting to server: %d", resp.StatusCode) + } + + w.conn = conn + go w.read(conn) + + return nil +} + +func (w *WebSocket) read(conn *ws.Conn) { + defer func() { _ = conn.Close() }() + if w.ReadTimeout > 0 { + if err := conn.SetReadDeadline(time.Now().Add(time.Duration(w.ReadTimeout))); err != nil { + w.Log.Errorf("error setting read deadline: %v", err) + return + } + conn.SetPingHandler(func(string) error { + err := conn.SetReadDeadline(time.Now().Add(time.Duration(w.ReadTimeout))) + if err != nil { + w.Log.Errorf("error setting read deadline: %v", err) + return err + } + return conn.WriteControl(ws.PongMessage, nil, time.Now().Add(time.Duration(w.WriteTimeout))) + }) + } + for { + // Need to read a connection (to properly process pings from a server). + _, _, err := conn.ReadMessage() + if err != nil { + // Websocket connection is not readable after first error, it's going to error state. + // In the beginning of this goroutine we have defer section that closes such connection. + // After that connection will be tried to reestablish on next Write. + if ws.IsUnexpectedCloseError(err, ws.CloseGoingAway, ws.CloseAbnormalClosure) { + w.Log.Errorf("error reading websocket connection: %v", err) + } + return + } + if w.ReadTimeout > 0 { + if err := conn.SetReadDeadline(time.Now().Add(time.Duration(w.ReadTimeout))); err != nil { + return + } + } + } +} + +// Write writes the given metrics to the destination. Not thread-safe. +func (w *WebSocket) Write(metrics []telegraf.Metric) error { + if w.conn == nil { + // Previous write failed with error and ws conn was closed. + if err := w.Connect(); err != nil { + return err + } + } + + messageData, err := w.serializer.SerializeBatch(metrics) + if err != nil { + return err + } + + if w.WriteTimeout > 0 { + if err := w.conn.SetWriteDeadline(time.Now().Add(time.Duration(w.WriteTimeout))); err != nil { + return fmt.Errorf("error setting write deadline: %v", err) + } + } + messageType := ws.BinaryMessage + if w.UseTextFrames { + messageType = ws.TextMessage + } + err = w.conn.WriteMessage(messageType, messageData) + if err != nil { + _ = w.conn.Close() + w.conn = nil + return fmt.Errorf("error writing to connection: %v", err) + } + return nil +} + +// Close closes the connection. Noop if already closed. +func (w *WebSocket) Close() error { + if w.conn == nil { + return nil + } + err := w.conn.Close() + w.conn = nil + return err +} + +func newWebSocket() *WebSocket { + return &WebSocket{ + ConnectTimeout: config.Duration(defaultConnectTimeout), + WriteTimeout: config.Duration(defaultWriteTimeout), + ReadTimeout: config.Duration(defaultReadTimeout), + } +} + +func init() { + outputs.Add("websocket", func() telegraf.Output { + return newWebSocket() + }) +} diff --git a/plugins/outputs/websocket/websocket_test.go b/plugins/outputs/websocket/websocket_test.go new file mode 100644 index 0000000000000..a6c74a77dd38a --- /dev/null +++ b/plugins/outputs/websocket/websocket_test.go @@ -0,0 +1,221 @@ +package websocket + +import ( + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" + + ws "github.com/gorilla/websocket" + "github.com/stretchr/testify/require" +) + +// testSerializer serializes to a number of metrics to simplify tests here. +type testSerializer struct{} + +func newTestSerializer() *testSerializer { + return &testSerializer{} +} + +func (t testSerializer) Serialize(_ telegraf.Metric) ([]byte, error) { + return []byte("1"), nil +} + +func (t testSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + return []byte(strconv.Itoa(len(metrics))), nil +} + +type testServer struct { + *httptest.Server + t *testing.T + messages chan []byte + upgradeDelay time.Duration + expectTextFrames bool +} + +func newTestServer(t *testing.T, messages chan []byte, tls bool) *testServer { + s := &testServer{} + s.t = t + if tls { + s.Server = httptest.NewTLSServer(s) + } else { + s.Server = httptest.NewServer(s) + } + s.URL = makeWsProto(s.Server.URL) + s.messages = messages + return s +} + +func makeWsProto(s string) string { + return "ws" + strings.TrimPrefix(s, "http") +} + +const ( + testHeaderName = "X-Telegraf-Test" + testHeaderValue = "1" +) + +var testUpgrader = ws.Upgrader{} + +func (s *testServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Header.Get(testHeaderName) != testHeaderValue { + s.t.Fatalf("expected test header found in request, got: %#v", r.Header) + } + if s.upgradeDelay > 0 { + // Emulate long handshake. + select { + case <-r.Context().Done(): + return + case <-time.After(s.upgradeDelay): + } + } + conn, err := testUpgrader.Upgrade(w, r, http.Header{}) + if err != nil { + return + } + defer func() { _ = conn.Close() }() + + for { + messageType, data, err := conn.ReadMessage() + if err != nil { + break + } + if s.expectTextFrames && messageType != ws.TextMessage { + s.t.Fatalf("unexpected frame type: %d", messageType) + } + select { + case s.messages <- data: + case <-time.After(5 * time.Second): + s.t.Fatal("timeout writing to messages channel, make sure there are readers") + } + } +} + +func initWebSocket(s *testServer) *WebSocket { + w := newWebSocket() + w.Log = testutil.Logger{} + w.URL = s.URL + w.Headers = map[string]string{testHeaderName: testHeaderValue} + w.SetSerializer(newTestSerializer()) + return w +} + +func connect(t *testing.T, w *WebSocket) { + err := w.Connect() + require.NoError(t, err) +} + +func TestWebSocket_NoURL(t *testing.T) { + w := newWebSocket() + err := w.Init() + require.ErrorIs(t, err, errInvalidURL) +} + +func TestWebSocket_Connect_Timeout(t *testing.T) { + s := newTestServer(t, nil, false) + s.upgradeDelay = time.Second + defer s.Close() + w := initWebSocket(s) + w.ConnectTimeout = config.Duration(10 * time.Millisecond) + err := w.Connect() + require.Error(t, err) +} + +func TestWebSocket_Connect_OK(t *testing.T) { + s := newTestServer(t, nil, false) + defer s.Close() + w := initWebSocket(s) + connect(t, w) +} + +func TestWebSocket_ConnectTLS_OK(t *testing.T) { + s := newTestServer(t, nil, true) + defer s.Close() + w := initWebSocket(s) + w.ClientConfig.InsecureSkipVerify = true + connect(t, w) +} + +func TestWebSocket_Write_OK(t *testing.T) { + messages := make(chan []byte, 1) + + s := newTestServer(t, messages, false) + defer s.Close() + + w := initWebSocket(s) + connect(t, w) + + var metrics []telegraf.Metric + metrics = append(metrics, testutil.TestMetric(0.4, "test")) + metrics = append(metrics, testutil.TestMetric(0.5, "test")) + err := w.Write(metrics) + require.NoError(t, err) + + select { + case data := <-messages: + require.Equal(t, []byte("2"), data) + case <-time.After(time.Second): + t.Fatal("timeout receiving data") + } +} + +func TestWebSocket_Write_Error(t *testing.T) { + s := newTestServer(t, nil, false) + defer s.Close() + + w := initWebSocket(s) + connect(t, w) + + require.NoError(t, w.conn.Close()) + + metrics := []telegraf.Metric{testutil.TestMetric(0.4, "test")} + err := w.Write(metrics) + require.Error(t, err) + require.Nil(t, w.conn) +} + +func TestWebSocket_Write_Reconnect(t *testing.T) { + messages := make(chan []byte, 1) + s := newTestServer(t, messages, false) + s.expectTextFrames = true // Also use text frames in this test. + defer s.Close() + + w := initWebSocket(s) + w.UseTextFrames = true + connect(t, w) + + metrics := []telegraf.Metric{testutil.TestMetric(0.4, "test")} + + require.NoError(t, w.conn.Close()) + + err := w.Write(metrics) + require.Error(t, err) + require.Nil(t, w.conn) + + err = w.Write(metrics) + require.NoError(t, err) + + select { + case data := <-messages: + require.Equal(t, []byte("1"), data) + case <-time.After(time.Second): + t.Fatal("timeout receiving data") + } +} + +func TestWebSocket_Close(t *testing.T) { + s := newTestServer(t, nil, false) + defer s.Close() + + w := initWebSocket(s) + connect(t, w) + require.NoError(t, w.Close()) + // Check no error on second close. + require.NoError(t, w.Close()) +} From 6f956411a20a24f39097ee8f9fe77cdf764bc801 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Fri, 4 Jun 2021 14:09:25 -0600 Subject: [PATCH 452/761] Update changelog (cherry picked from commit 81f186baa81f9b09f8221a7a02a482466459f95d) --- CHANGELOG.md | 71 ++++++ etc/telegraf.conf | 564 ++++++++++++++++++++++++++++++++++++---------- 2 files changed, 518 insertions(+), 117 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a85038587f10b..9580dc5976e60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,74 @@ +## v1.19.0-rc0 [2021-06-04] + +#### Release Notes + +- Many linter fixes - thanks @zak-pawel and all! + +#### Bugfixes + +- [#9182](https://github.com/influxdata/telegraf/pull/9182) Update pgx to v4 +- [#9275](https://github.com/influxdata/telegraf/pull/9275) Fix reading config files starting with http: +- [#9196](https://github.com/influxdata/telegraf/pull/9196) `serializers.prometheusremotewrite` Update dependency and remove tags with empty values +- [#9051](https://github.com/influxdata/telegraf/pull/9051) `outputs.kafka` Don't prevent telegraf from starting when there's a connection error +- [#8795](https://github.com/influxdata/telegraf/pull/8795) `parsers.prometheusremotewrite` Update prometheus dependency to v2.21.0 + +#### Features + +- [#8987](https://github.com/influxdata/telegraf/pull/8987) Config file environment variable can be a URL +- [#9297](https://github.com/influxdata/telegraf/pull/9297) `outputs.datadog` Add HTTP proxy to datadog output +- [#9087](https://github.com/influxdata/telegraf/pull/9087) Add named timestamp formats +- [#9276](https://github.com/influxdata/telegraf/pull/9276) `inputs.vsphere` Add config option for the historical interval duration +- [#9274](https://github.com/influxdata/telegraf/pull/9274) `inputs.ping` Add an option to specify packet size +- [#9007](https://github.com/influxdata/telegraf/pull/9007) Allow multiple "--config" and "--config-directory" flags +- [#9249](https://github.com/influxdata/telegraf/pull/9249) `outputs.graphite` Allow more characters in graphite tags +- [#8351](https://github.com/influxdata/telegraf/pull/8351) `inputs.sqlserver` Added login_name +- [#9223](https://github.com/influxdata/telegraf/pull/9223) `inputs.dovecot` Add support for unix domain sockets +- [#9118](https://github.com/influxdata/telegraf/pull/9118) `processors.strings` Add UTF-8 sanitizer +- [#9156](https://github.com/influxdata/telegraf/pull/9156) `inputs.aliyuncms` Add config option list of regions to query +- [#9138](https://github.com/influxdata/telegraf/pull/9138) `common.http` Add OAuth2 to HTTP input +- [#8822](https://github.com/influxdata/telegraf/pull/8822) `inputs.sqlserver` Enable Azure Active Directory (AAD) authentication support +- [#9136](https://github.com/influxdata/telegraf/pull/9136) `inputs.cloudwatch` Add wildcard support in dimensions configuration +- [#5517](https://github.com/influxdata/telegraf/pull/5517) `inputs.mysql` Gather all mysql channels +- [#8911](https://github.com/influxdata/telegraf/pull/8911) `processors.enum` Support float64 +- [#9105](https://github.com/influxdata/telegraf/pull/9105) `processors.starlark` Support nanosecond resolution timestamp +- [#9080](https://github.com/influxdata/telegraf/pull/9080) `inputs.logstash` Add support for version 7 queue stats +- [#9074](https://github.com/influxdata/telegraf/pull/9074) `parsers.prometheusremotewrite` Add starlark script for renaming metrics +- [#9032](https://github.com/influxdata/telegraf/pull/9032) `inputs.couchbase` Add ~200 more Couchbase metrics via Buckets endpoint +- [#8596](https://github.com/influxdata/telegraf/pull/8596) `inputs.sqlserver` input/sqlserver: Add service and save connection pools +- [#9042](https://github.com/influxdata/telegraf/pull/9042) `processors.starlark` Add math module +- [#6952](https://github.com/influxdata/telegraf/pull/6952) `inputs.x509_cert` Wildcard support for cert filenames +- [#9004](https://github.com/influxdata/telegraf/pull/9004) `processors.starlark` Add time module +- [#8891](https://github.com/influxdata/telegraf/pull/8891) `inputs.kinesis_consumer` Add content_encoding option with gzip and zlib support +- [#8996](https://github.com/influxdata/telegraf/pull/8996) `processors.starlark` Add an example showing how to obtain IOPS from diskio input +- [#8966](https://github.com/influxdata/telegraf/pull/8966) `inputs.http_listener_v2` Add support for snappy compression +- [#8661](https://github.com/influxdata/telegraf/pull/8661) `inputs.cisco_telemetry_mdt` Add support for events and class based query +- [#8861](https://github.com/influxdata/telegraf/pull/8861) `inputs.mongodb` Optionally collect top stats +- [#8979](https://github.com/influxdata/telegraf/pull/8979) `parsers.value` Add custom field name config option +- [#8544](https://github.com/influxdata/telegraf/pull/8544) `inputs.sqlserver` Add an optional health metric + +#### New Input Plugins + +- [OpenTelemetry](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry) - contributed by @jacobmarble +- [Intel Data Plane Development Kit (DPDK)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dpdk) - contributed by @p-zak +- [KNX](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/knx_listener) - contributed by @DocLambda + +#### New Output Plugins + +- [Websocket](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/websocket) - contributed by @FZambia +- [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/sql) - contributed by @illuusio +- [AWS Cloudwatch logs](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch_logs) - contributed by @i-prudnikov + +#### New Parser Plugins + +- [Prometheus Remote Write](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/prometheusremotewrite) - contributed by @helenosheaa + +#### New External Plugins + +- [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - contributed by @falon +- [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - contributed by @jcgonnard +- [dnsmasq](https://github.com/machinly/dnsmasq-telegraf-plugin) - contributed by @machinly +- [Big Blue Button](https://github.com/SLedunois/bigbluebutton-telegraf-plugin) - contributed by @SLedunois + ## v1.18.3 [2021-05-20] #### Release Notes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index cd38899db22c5..c7636e79d728d 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -90,8 +90,8 @@ ## If set to -1, no archives are removed. # logfile_rotation_max_archives = 5 - ## Pick a timezone to use when logging or type 'local' for local time. Example: 'America/Chicago'. - ## See https://socketloop.com/tutorials/golang-display-list-of-timezones-with-gmt for timezone formatting options. + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago # log_with_timezone = "" ## Override default hostname, if empty use os.Hostname() @@ -99,7 +99,6 @@ ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false - ############################################################################### # OUTPUT PLUGINS # ############################################################################### @@ -347,20 +346,6 @@ # # endpoint_url = "https://monitoring.core.usgovcloudapi.net" -# [[outputs.bigquery]] -# ## GCP Project -# project = "erudite-bloom-151019" -# -# ## The BigQuery dataset -# dataset = "telegraf" -# -# ## Timeout for BigQuery operations. -# # timeout = "5s" -# -# ## Character to replace hyphens on Metric name -# # replace_hyphen_to = "_" - - # # Publish Telegraf metrics to a Google Cloud PubSub topic # [[outputs.cloud_pubsub]] # ## Required. Name of Google Cloud Platform (GCP) Project that owns @@ -453,6 +438,63 @@ # # high_resolution_metrics = false +# # Configuration for AWS CloudWatchLogs output. +# [[outputs.cloudwatch_logs]] +# ## The region is the Amazon region that you wish to connect to. +# ## Examples include but are not limited to: +# ## - us-west-1 +# ## - us-west-2 +# ## - us-east-1 +# ## - ap-southeast-1 +# ## - ap-southeast-2 +# ## ... +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +# log_group = "my-group-name" +# +# ## Log stream in log group +# ## Either log group name or reference to metric attribute, from which it can be parsed: +# ## tag: or field:. If log stream is not exist, it will be created. +# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +# log_stream = "tag:location" +# +# ## Source of log data - metric name +# ## specify the name of the metric, from which the log data should be retrieved. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_metric_name = "docker_log" +# log_data_metric_name = "docker_log" +# +# ## Specify from which metric attribute the log data should be retrieved: +# ## tag: or field:. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_source = "field:message" +# log_data_source = "field:message" + + # # Configuration for CrateDB to send metrics to. # [[outputs.cratedb]] # # A github.com/jackc/pgx/v4 connection string. @@ -476,6 +518,9 @@ # # ## Write URL override; useful for debugging. # # url = "https://app.datadoghq.com/api/v1/series" +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it isn't set) +# # http_proxy_url = "http://localhost:8888" # # Send metrics to nowhere at all @@ -650,6 +695,11 @@ # ## Enable Graphite tags support # # graphite_tag_support = false # +# ## Define how metric names and tags are sanitized; options are "strict", or "compatible" +# ## strict - Default method, and backwards compatible with previous versionf of Telegraf +# ## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec +# # graphite_tag_sanitize_mode = "strict" +# # ## Character for separating metric name and field for Graphite tags # # graphite_separator = "." # @@ -1496,6 +1546,46 @@ # # data_format = "influx" +# # Send metrics to SQL Database +# [[outputs.sql]] +# ## Database driver +# ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), +# ## sqlite (SQLite3), snowflake (snowflake.com) +# # driver = "" +# +# ## Data source name +# ## The format of the data source name is different for each database driver. +# ## See the plugin readme for details. +# # data_source_name = "" +# +# ## Timestamp column name +# # timestamp_column = "timestamp" +# +# ## Table creation template +# ## Available template variables: +# ## {TABLE} - table name as a quoted identifier +# ## {TABLELITERAL} - table name as a quoted string literal +# ## {COLUMNS} - column definitions (list of quoted identifiers and types) +# # table_template = "CREATE TABLE {TABLE}({COLUMNS})" +# +# ## Table existence check template +# ## Available template variables: +# ## {TABLE} - tablename as a quoted identifier +# # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" +# +# ## Initialization SQL +# # init_sql = "" +# +# ## Metric type to SQL type conversion +# #[outputs.sql.convert] +# # integer = "INT" +# # real = "DOUBLE" +# # text = "TEXT" +# # timestamp = "TIMESTAMP" +# # defaultvalue = "TEXT" +# # unsigned = "UNSIGNED" + + # # Configuration for Google Cloud Stackdriver to send metrics to # [[outputs.stackdriver]] # ## GCP Project @@ -1845,6 +1935,37 @@ # # red = 0.0 +# # Generic WebSocket output writer. +# [[outputs.websocket]] +# ## URL is the address to send metrics to. Make sure ws or wss scheme is used. +# url = "ws://127.0.0.1:8080/telegraf" +# +# ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). +# # connect_timeout = "30s" +# # write_timeout = "30s" +# # read_timeout = "30s" +# +# ## Optionally turn on using text data frames (binary by default). +# # use_text_frames = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## Additional HTTP Upgrade headers +# # [outputs.websocket.headers] +# # Authorization = "Bearer " + + # # Send aggregated metrics to Yandex.Cloud Monitoring # [[outputs.yandex_cloud_monitoring]] # ## Timeout for HTTP writes. @@ -2367,6 +2488,12 @@ # ## Decode a base64 encoded utf-8 string # # [[processors.strings.base64decode]] # # field = "message" +# +# ## Sanitize a string to ensure it is a valid utf-8 string +# ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty +# # [[processors.strings.valid_utf8]] +# # field = "message" +# # replacement = "" # # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. @@ -2745,7 +2872,7 @@ # # disable_query_namespaces = true # default false # # namespaces = ["namespace1", "namespace2"] # -# # Enable set level telmetry +# # Enable set level telemetry # # query_sets = true # default: false # # Add namespace set combinations to limit sets executed on # # Leave blank to do all sets @@ -2758,6 +2885,8 @@ # # by default, aerospike produces a 100 bucket histogram # # this is not great for most graphing tools, this will allow # # the ability to squash this to a smaller number of buckets +# # To have a balanced histogram, the number of buckets chosen +# # should divide evenly into 100. # # num_histogram_buckets = 100 # default: 10 @@ -2978,7 +3107,14 @@ # ## suffix used to identify socket files # socket_suffix = "asok" # -# ## Ceph user to authenticate as +# ## Ceph user to authenticate as, ceph will search for the corresponding keyring +# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the +# ## client section of ceph.conf for example: +# ## +# ## [client.telegraf] +# ## keyring = /etc/ceph/client.telegraf.keyring +# ## +# ## Consult the ceph documentation for more detail on keyring generation. # ceph_user = "client.admin" # # ## Ceph configuration to use to locate the cluster @@ -2987,7 +3123,8 @@ # ## Whether to gather statistics via the admin socket # gather_admin_socket_stats = true # -# ## Whether to gather statistics via ceph commands +# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config +# ## to be specified # gather_cluster_stats = false @@ -3099,6 +3236,7 @@ # # # # ## Dimension filters for Metric. All dimensions defined for the metric names # # ## must be specified in order to retrieve the metric statistics. +# # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. # # [[inputs.cloudwatch.metrics.dimensions]] # # name = "LoadBalancerName" # # value = "p-example" @@ -3158,7 +3296,7 @@ # # tag_delimiter = ":" -# # Read metrics from one or many couchbase clusters +# # Read per-node and per-bucket metrics from Couchbase # [[inputs.couchbase]] # ## specify servers via a url matching: # ## [protocol://][:password]@address[:port] @@ -3170,6 +3308,9 @@ # ## If no protocol is specified, HTTP is used. # ## If no port is specified, 8091 is used. # servers = ["http://localhost:8091"] +# +# ## Filter bucket fields to include only here. +# # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] # # Read CouchDB Stats from one or more servers @@ -3364,6 +3505,40 @@ # filters = [""] +# # Reads metrics from DPDK applications using v2 telemetry interface. +# [[inputs.dpdk]] +# ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK telemetry interface. +# # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" +# +# ## Duration that defines how long the connected socket client will wait for a response before terminating connection. +# ## This includes both writing to and reading from socket. Since it's local socket access +# ## to a fast packet processing application, the timeout should be sufficient for most users. +# ## Setting the value to 0 disables the timeout (not recommended) +# # socket_access_timeout = "200ms" +# +# ## Enables telemetry data collection for selected device types. +# ## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status). +# ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats). +# # device_types = ["ethdev"] +# +# ## List of custom, application-specific telemetry commands to query +# ## The list of available commands depend on the application deployed. Applications can register their own commands +# ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands +# ## For e.g. L3 Forwarding with Power Management Sample Application this could be: +# ## additional_commands = ["/l3fwd-power/stats"] +# # additional_commands = [] +# +# ## Allows turning off collecting data for individual "ethdev" commands. +# ## Remove "/ethdev/link_status" from list to start getting link status metrics. +# [inputs.dpdk.ethdev] +# exclude_commands = ["/ethdev/link_status"] +# +# ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify +# ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. +# ## [inputs.dpdk.tags] +# ## dpdk_instance = "my-fwd-app" + + # # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints. # [[inputs.ecs]] # ## ECS metadata url. @@ -3750,6 +3925,12 @@ # ## HTTP Proxy support # # http_proxy_url = "" # +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -4620,6 +4801,10 @@ # ## When true, collect per collection stats # # gather_col_stats = false # +# ## When true, collect usage statistics for each collection +# ## (insert, update, queries, remove, getmore, commands etc...). +# # gather_top_stat = false +# # ## List of db where collections stats are collected # ## If empty, all db are concerned # # col_stats_dbs = ["local"] @@ -4723,6 +4908,12 @@ # ## gather metrics from SHOW SLAVE STATUS command output # # gather_slave_status = false # +# ## gather metrics from all channels from SHOW SLAVE STATUS command output +# # gather_all_slave_channels = false +# +# ## use MariaDB dialect for all channels SHOW SLAVE STATUS +# # mariadb_dialect = false +# # ## gather metrics from SHOW BINARY LOGS command output # # gather_binary_logs = false # @@ -5304,6 +5495,10 @@ # # ## Use only IPv6 addresses when resolving a hostname. # # ipv6 = false +# +# ## Number of data bytes to be sent. Corresponds to the "-s" +# ## option of the ping command. This only works with the native method. +# # size = 56 # # Measure postfix queue statistics @@ -5817,74 +6012,6 @@ # # password = "pa$$word" -# # Read metrics from Microsoft SQL Server -# [[inputs.sqlserver]] -# ## Specify instances to monitor with a list of connection strings. -# ## All connection parameters are optional. -# ## By default, the host is localhost, listening on default port, TCP 1433. -# ## for Windows, the user is the currently running AD user (SSO). -# ## See https://github.com/denisenkom/go-mssqldb for detailed connection -# ## parameters, in particular, tls connections can be created like so: -# ## "encrypt=true;certificate=;hostNameInCertificate=" -# servers = [ -# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", -# ] -# -# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 -# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -# ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" -# -# ## Queries enabled by default for database_type = "AzureSQLDB" are - -# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, -# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers -# -# # database_type = "AzureSQLDB" -# -# ## A list of queries to include. If not specified, all the above listed queries are used. -# # include_query = [] -# -# ## A list of queries to explicitly ignore. -# # exclude_query = [] -# -# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - -# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, -# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers -# -# # database_type = "AzureSQLManagedInstance" -# -# # include_query = [] -# -# # exclude_query = [] -# -# ## Queries enabled by default for database_type = "SQLServer" are - -# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, -# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu -# -# database_type = "SQLServer" -# -# include_query = [] -# -# ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default -# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] -# -# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use -# ## the new mechanism of identifying the database_type there by use it's corresponding queries -# -# ## Optional parameter, setting this to 2 will use a new version -# ## of the collection queries that break compatibility with the original -# ## dashboards. -# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB -# # query_version = 2 -# -# ## If you are using AzureDB, setting this to true will gather resource utilization metrics -# # azuredb = false - -# ## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health". -# ## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers". -# ## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues. -# ## This setting/metric is optional and is disabled by default. -# # health_metric = false - # # Gather timeseries from Google Cloud Platform v3 monitoring API # [[inputs.stackdriver]] # ## GCP Project @@ -6183,7 +6310,9 @@ # # Reads metrics from a SSL certificate # [[inputs.x509_cert]] # ## List certificate sources -# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] +# ## Prefix your entry with 'file://' if you intend to use relative paths +# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443", +# "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] # # ## Timeout for SSL connection # # timeout = "5s" @@ -6242,30 +6371,130 @@ ############################################################################### -# # Intel Resource Director Technology plugin -# [[inputs.IntelRDT]] -# ## Optionally set sampling interval to Nx100ms. -# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. -# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. -# # sampling_interval = "10" -# -# ## Optionally specify the path to pqos executable. -# ## If not provided, auto discovery will be performed. -# # pqos_path = "/usr/local/bin/pqos" +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.KNXListener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" # -# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. -# ## If not provided, default value is false. -# # shortened_metrics = false -# -# ## Specify the list of groups of CPU core(s) to be provided as pqos input. -# ## Mandatory if processes aren't set and forbidden if processes are specified. -# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] -# # cores = ["0-3"] -# -# ## Specify the list of processes for which Metrics will be collected. -# ## Mandatory if cores aren't set and forbidden if cores are specified. -# ## e.g. ["qemu", "pmd"] -# # processes = ["process"] +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.KNXListener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.KNXListener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Pull Metric Statistics from Aliyun CMS +# [[inputs.aliyuncms]] +# ## Aliyun Credentials +# ## Credentials are loaded in the following order +# ## 1) Ram RoleArn credential +# ## 2) AccessKey STS token credential +# ## 3) AccessKey credential +# ## 4) Ecs Ram Role credential +# ## 5) RSA keypair credential +# ## 6) Environment variables credential +# ## 7) Instance metadata credential +# +# # access_key_id = "" +# # access_key_secret = "" +# # access_key_sts_token = "" +# # role_arn = "" +# # role_session_name = "" +# # private_key = "" +# # public_key_id = "" +# # role_name = "" +# +# ## Specify the ali cloud region list to be queried for metrics and objects discovery +# ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here +# ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm +# ## Default supported regions are: +# ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, +# ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, +# ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 +# ## +# ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich +# ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then +# ## it will be reported on the start - for example for 'acs_cdn' project: +# ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) +# ## Currently, discovery supported for the following projects: +# ## - acs_ecs_dashboard +# ## - acs_rds_dashboard +# ## - acs_slb_dashboard +# ## - acs_vpc_eip +# regions = ["cn-hongkong"] +# +# # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. +# # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Aliyun OpenAPI +# # and will not be collected by Telegraf. +# # +# ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via AliyunCMS API) +# delay = "1m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Metric Statistic Project (required) +# project = "acs_slb_dashboard" +# +# ## Maximum requests per second, default value is 200 +# ratelimit = 200 +# +# ## How often the discovery API call executed (default 1m) +# #discovery_interval = "1m" +# +# ## Metrics to Pull (Required) +# [[inputs.aliyuncms.metrics]] +# ## Metrics names to be requested, +# ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# names = ["InstanceActiveConnection", "InstanceNewConnection"] +# +# ## Dimension filters for Metric (these are optional). +# ## This allows to get additional metric dimension. If dimension is not specified it can be returned or +# ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## +# ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) +# ## Values specified here would be added into the list of discovered objects. +# ## You can specify either single dimension: +# #dimensions = '{"instanceId": "p-example"}' +# +# ## Or you can specify several dimensions at once: +# #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' +# +# ## Enrichment tags, can be added from discovery (if supported) +# ## Notation is : +# ## To figure out which fields are available, consult the Describe API per project. +# ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO +# #tag_query_path = [ +# # "address:Address", +# # "name:LoadBalancerName", +# # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" +# # ] +# ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. +# +# ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery +# ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage +# ## of discovery scope vs monitoring scope +# #allow_dps_without_discovery = false # # AMQP consumer plugin @@ -6393,12 +6622,16 @@ # ## Define aliases to map telemetry encoding paths to simple measurement names # [inputs.cisco_telemetry_mdt.aliases] # ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +# ##Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. +# [inputs.cisco_telemetry_mdt.dmes] +# ModTs = "ignore" +# CreateTs = "ignore" # # Read metrics from one or many ClickHouse servers # [[inputs.clickhouse]] # ## Username for authorization on ClickHouse server -# ## example: username = "default"" +# ## example: username = "default" # username = "default" # # ## Password for authorization on ClickHouse server @@ -6674,8 +6907,6 @@ # ## This requires one of the following sets of environment variables to be set: # ## # ## 1) Expected Environment Variables: -# ## - "EVENTHUB_NAMESPACE" -# ## - "EVENTHUB_NAME" # ## - "EVENTHUB_CONNECTION_STRING" # ## # ## 2) Expected Environment Variables: @@ -6684,8 +6915,17 @@ # ## - "EVENTHUB_KEY_NAME" # ## - "EVENTHUB_KEY_VALUE" # +# ## 3) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "AZURE_TENANT_ID" +# ## - "AZURE_CLIENT_ID" +# ## - "AZURE_CLIENT_SECRET" +# # ## Uncommenting the option below will create an Event Hub client based solely on the connection string. # ## This can either be the associated environment variable or hard coded directly. +# ## If this option is uncommented, environment variables will be ignored. +# ## Connection string should contain EventHubName (EntityPath) # # connection_string = "" # # ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister @@ -7227,6 +7467,15 @@ # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" # +# ## +# ## The content encoding of the data from kinesis +# ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" +# ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws +# ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding +# ## is done automatically by the golang sdk, as data is read from kinesis) +# ## +# # content_encoding = "identity" +# # ## Optional # ## Configuration for a dynamodb checkpoint # [inputs.kinesis_consumer.checkpoint_dynamodb] @@ -7448,6 +7697,20 @@ # data_format = "influx" +# # Receive OpenTelemetry traces, metrics, and logs over gRPC +# [[inputs.opentelemetry]] +# ## Override the OpenTelemetry gRPC service address:port +# # service_address = "0.0.0.0:4317" +# +# ## Override the default request timeout +# # timeout = "5s" +# +# ## Select a schema for metrics: prometheus-v1 or prometheus-v2 +# ## For more information about the alternatives, read the Prometheus input +# ## plugin notes. +# # metrics_schema = "prometheus-v1" + + # # Read metrics from one or many pgbouncer servers # [[inputs.pgbouncer]] # ## specify address via a url matching: @@ -7588,7 +7851,7 @@ # # metric_version = 1 # # ## Url tag name (tag containing scrapped url. optional, default is "url") -# # url_tag = "scrapeUrl" +# # url_tag = "url" # # ## An array of Kubernetes services to scrape metrics from. # # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] @@ -7783,6 +8046,69 @@ # # content_encoding = "identity" +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] +# +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" +# +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# +# # database_type = "AzureSQLDB" +# +# ## A list of queries to include. If not specified, all the above listed queries are used. +# # include_query = [] +# +# ## A list of queries to explicitly ignore. +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# +# # database_type = "AzureSQLManagedInstance" +# +# # include_query = [] +# +# # exclude_query = [] +# +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu +# +# database_type = "SQLServer" +# +# include_query = [] +# +# ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# +# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# # query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false + + # # Statsd UDP/TCP Server # [[inputs.statsd]] # ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) @@ -8178,6 +8504,10 @@ # # ssl_key = "/path/to/keyfile" # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false +# +# ## The Historical Interval value must match EXACTLY the interval in the daily +# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals +# # historical_interval = "5m" # # A Webhooks Event collector From 32b963a4c6a52b024666509d4d8e96effcc35a4e Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Mon, 7 Jun 2021 11:41:39 -0700 Subject: [PATCH 453/761] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9580dc5976e60..0358541414037 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ #### New Input Plugins +- [Alibaba CloudMonitor Service (Aliyun)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aliyuncms) - contributed by @i-prudnikov - [OpenTelemetry](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry) - contributed by @jacobmarble - [Intel Data Plane Development Kit (DPDK)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dpdk) - contributed by @p-zak - [KNX](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/knx_listener) - contributed by @DocLambda From 7a987306e5147526f864d50792e7842d6e912f23 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 7 Jun 2021 19:19:22 -0500 Subject: [PATCH 454/761] Update Go to 1.16.5 (#9331) --- .circleci/config.yml | 46 ++++++++++++++++++++-------------------- Makefile | 10 ++++----- scripts/alpine.docker | 2 +- scripts/buster.docker | 2 +- scripts/ci-1.16.docker | 2 +- scripts/mac_installgo.sh | 2 +- 6 files changed, 32 insertions(+), 32 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 4296082e04c08..c8eab65732085 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,6 @@ version: 2.1 orbs: - win: circleci/windows@2.4.0 + win: circleci/windows@2.4.0 aws-cli: circleci/aws-cli@1.4.0 executors: @@ -13,7 +13,7 @@ executors: go-1_16: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.16.2' + - image: 'quay.io/influxdb/telegraf-ci:1.16.5' environment: GOFLAGS: -p=8 mac: @@ -30,7 +30,7 @@ commands: - run: ./scripts/check-file-changes.sh check-changed-files-or-halt-windows: steps: - - run: + - run: command: ./scripts/check-file-changes.sh shell: bash.exe test-go: @@ -65,11 +65,11 @@ commands: at: '/go' - when: condition: << parameters.release >> - steps: + steps: - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 make package' - when: condition: << parameters.nightly >> - steps: + steps: - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 NIGHTLY=1 make package' - run: 'make upload-nightly' - unless: @@ -150,7 +150,7 @@ jobs: steps: - checkout - check-changed-files-or-halt-windows - - run: choco upgrade golang --version=1.16.2 + - run: choco upgrade golang --version=1.16.5 - run: choco install make - run: git config --system core.longpaths true - run: make test-windows @@ -283,14 +283,14 @@ jobs: command: | echo "Go tests complete." share-artifacts: - executor: aws-cli/default + executor: aws-cli/default steps: - run: command: | PR=${CIRCLE_PULL_REQUEST##*/} printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" - curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" - + curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" + workflows: version: 2 check: @@ -331,47 +331,47 @@ workflows: filters: tags: only: /.*/ - - 'test-awaiter': + - 'test-awaiter': requires: - 'test-go-1_15' - 'test-go-1_15-386' - 'test-go-1_16' - 'test-go-1_16-386' - 'windows-package': - requires: + requires: - 'test-go-windows' - 'darwin-package': - requires: + requires: - 'test-go-mac' - 'i386-package': - requires: + requires: - 'test-awaiter' - 'ppc641e-package': - requires: + requires: - 'test-awaiter' - 's390x-package': - requires: + requires: - 'test-awaiter' - 'armel-package': - requires: + requires: - 'test-awaiter' - 'amd64-package': - requires: + requires: - 'test-awaiter' - 'arm64-package': - requires: + requires: - 'test-awaiter' - 'armhf-package': - requires: + requires: - 'test-awaiter' - 'static-package': requires: - 'test-awaiter' - 'mipsel-package': - requires: + requires: - 'test-awaiter' - 'mips-package': - requires: + requires: - 'test-awaiter' - 'share-artifacts': requires: @@ -412,7 +412,7 @@ workflows: only: /.*/ - 'package-sign-mac': requires: - - 'package-sign-windows' + - 'package-sign-windows' filters: tags: only: /.*/ @@ -448,4 +448,4 @@ workflows: filters: branches: only: - - master \ No newline at end of file + - master diff --git a/Makefile b/Makefile index 1537e0f05eceb..4f6ef13e8e4af 100644 --- a/Makefile +++ b/Makefile @@ -161,7 +161,7 @@ tidy: go mod verify go mod tidy @if ! git diff --quiet go.mod go.sum; then \ - echo "please run go mod tidy and check in changes"; \ + echo "please run go mod tidy and check in changes, you might have to use the same version of Go as the CI"; \ exit 1; \ fi @@ -201,8 +201,8 @@ ci-1.15: .PHONY: ci-1.16 ci-1.16: - docker build -t quay.io/influxdb/telegraf-ci:1.16.2 - < scripts/ci-1.16.docker - docker push quay.io/influxdb/telegraf-ci:1.16.2 + docker build -t quay.io/influxdb/telegraf-ci:1.16.5 - < scripts/ci-1.16.docker + docker push quay.io/influxdb/telegraf-ci:1.16.5 .PHONY: install install: $(buildbin) @@ -251,7 +251,7 @@ debs += telegraf_$(deb_version)_amd64.deb rpms += telegraf-$(rpm_version).x86_64.rpm endif -ifdef static +ifdef static tars += telegraf-$(tar_version)_static_linux_amd64.tar.gz endif @@ -266,7 +266,7 @@ tars += telegraf-$(tar_version)_linux_armhf.tar.gz tars += telegraf-$(tar_version)_freebsd_armv7.tar.gz debs += telegraf_$(deb_version)_armhf.deb rpms += telegraf-$(rpm_version).armv6hl.rpm -endif +endif ifdef s390x tars += telegraf-$(tar_version)_linux_s390x.tar.gz diff --git a/scripts/alpine.docker b/scripts/alpine.docker index 4bd3489bc4463..673498c6f598e 100644 --- a/scripts/alpine.docker +++ b/scripts/alpine.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.2 as builder +FROM golang:1.16.5 as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/buster.docker b/scripts/buster.docker index 65e96acb6efad..4276730b4bf1e 100644 --- a/scripts/buster.docker +++ b/scripts/buster.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.2-buster as builder +FROM golang:1.16.5-buster as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/ci-1.16.docker b/scripts/ci-1.16.docker index cc316dec00dcd..585abc137e060 100644 --- a/scripts/ci-1.16.docker +++ b/scripts/ci-1.16.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.2 +FROM golang:1.16.5 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/mac_installgo.sh b/scripts/mac_installgo.sh index 93d674daf2973..c332230a0aff7 100644 --- a/scripts/mac_installgo.sh +++ b/scripts/mac_installgo.sh @@ -1,6 +1,6 @@ #!/bin/sh -version="1.16.2" +version="1.16.5" # This path is cachable, while saving directly in /usr/local/ will cause issues restoring the cache path="/usr/local/Cellar" From d6ac4abfb89ad56eba25d18a3085d84a504d97d3 Mon Sep 17 00:00:00 2001 From: pierwill <19642016+pierwill@users.noreply.github.com> Date: Tue, 8 Jun 2021 08:24:21 -0700 Subject: [PATCH 455/761] fix: Verify checksum of Go download in mac script (#9335) Adds a SHA256 check, which will we exit the script upon failure (with `set -e`). Also edit comments. Co-authored-by: pierwill --- scripts/mac_installgo.sh | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/scripts/mac_installgo.sh b/scripts/mac_installgo.sh index c332230a0aff7..6d192377304ba 100644 --- a/scripts/mac_installgo.sh +++ b/scripts/mac_installgo.sh @@ -1,15 +1,22 @@ #!/bin/sh -version="1.16.5" -# This path is cachable, while saving directly in /usr/local/ will cause issues restoring the cache +set -eux + +GO_ARCH="darwin-amd64" +GO_VERSION="1.16.2" +GO_VERSION_SHA="c98cde81517c5daf427f3071412f39d5bc58f6120e90a0d94cc51480fa04dbc1" # from https://golang.org/dl + +# This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.) path="/usr/local/Cellar" -# Download Go directly from tar, the reason we aren't using brew: it is slow to update and we can't pull specific minor versions +# Download Go and verify Go tarball. (Note: we aren't using brew because +# it is slow to update and we can't pull specific minor versions.) setup_go () { echo "installing go" - curl -OL https://golang.org/dl/go${version}.darwin-amd64.tar.gz --output go${version}.darwin-amd64.tar.gz + curl -OL https://golang.org/dl/go${GO_VERSION}.${GO_ARCH}.tar.gz --output go${GO_VERSION}.${GO_ARCH}.tar.gz + echo "${GO_SHA} go${GO_VERSION}.${GO_ARCH}.tar.gz" | sha256sum --check sudo rm -rf ${path}/go - sudo tar -C $path -xzf go${version}.darwin-amd64.tar.gz + sudo tar -C $path -xzf go${GO_VERSION}.${GO_ARCH}.tar.gz ln -sf ${path}/go/bin/go /usr/local/bin/go ln -sf ${path}/go/bin/gofmt /usr/local/bin/gofmt } @@ -17,8 +24,8 @@ setup_go () { if command -v go &> /dev/null; then echo "Go is already installed" v=`go version | { read _ _ v _; echo ${v#go}; }` - echo "$v is installed, required version is $version" - if [ "$v" != $version ]; then + echo "$v is installed, required version is ${GO_VERSION}" + if [ "$v" != ${GO_VERSION} ]; then setup_go go version fi From 298670ae1868f64c9eb04aa7ccce80ddd8e15b3d Mon Sep 17 00:00:00 2001 From: Daniel Dyla Date: Tue, 8 Jun 2021 17:27:39 -0400 Subject: [PATCH 456/761] Use dynatrace-metric-utils (#9295) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 1 + go.sum | 20 +- plugins/outputs/dynatrace/README.md | 120 +++++++-- plugins/outputs/dynatrace/dynatrace.go | 267 +++++++++----------- plugins/outputs/dynatrace/dynatrace_test.go | 137 ++++++---- 6 files changed, 313 insertions(+), 233 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 5cb8b917be17f..9e5e9386c1198 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -63,6 +63,7 @@ following works: - github.com/docker/docker [Apache License 2.0](https://github.com/docker/docker/blob/master/LICENSE) - github.com/docker/go-connections [Apache License 2.0](https://github.com/docker/go-connections/blob/master/LICENSE) - github.com/docker/go-units [Apache License 2.0](https://github.com/docker/go-units/blob/master/LICENSE) +- github.com/dynatrace-oss/dynatrace-metric-utils-go [Apache License 2.0](https://github.com/dynatrace-oss/dynatrace-metric-utils-go/blob/master/LICENSE) - github.com/eapache/go-resiliency [MIT License](https://github.com/eapache/go-resiliency/blob/master/LICENSE) - github.com/eapache/go-xerial-snappy [MIT License](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE) - github.com/eapache/queue [MIT License](https://github.com/eapache/queue/blob/master/LICENSE) diff --git a/go.mod b/go.mod index d3f08990d6b14..dda464d48ad55 100644 --- a/go.mod +++ b/go.mod @@ -45,6 +45,7 @@ require ( github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 github.com/docker/docker v20.10.5+incompatible + github.com/dynatrace-oss/dynatrace-metric-utils-go v0.1.0 github.com/eclipse/paho.mqtt.golang v1.3.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.5.0 diff --git a/go.sum b/go.sum index 3d6d54883f580..288fa5ff30c59 100644 --- a/go.sum +++ b/go.sum @@ -239,7 +239,6 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= @@ -401,6 +400,8 @@ github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97h github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.1.0 h1:ldKn47mFgWCoiJRXA32psdEACPKffX9O1Msh1K8M+f0= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.1.0/go.mod h1:qw0E9EJ0PnSlhWawDNuqE0zhc1hqOBUCFIAj3dd9DNw= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -463,7 +464,6 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -620,7 +620,6 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -629,7 +628,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= @@ -1114,7 +1112,6 @@ github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= @@ -1265,9 +1262,6 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -1287,7 +1281,6 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1602,8 +1595,6 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1690,10 +1681,6 @@ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190906203814-12febf440ab1/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1729,7 +1716,6 @@ golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1800,7 +1786,6 @@ google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1836,7 +1821,6 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= diff --git a/plugins/outputs/dynatrace/README.md b/plugins/outputs/dynatrace/README.md index e0c1e17635183..5f25c70026177 100644 --- a/plugins/outputs/dynatrace/README.md +++ b/plugins/outputs/dynatrace/README.md @@ -1,49 +1,135 @@ # Dynatrace Output Plugin -This plugin is sending telegraf metrics to [Dynatrace](https://www.dynatrace.com). It has two operational modes. +This plugin sends Telegraf metrics to [Dynatrace](https://www.dynatrace.com) via the [Dynatrace Metrics API V2](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/metric-v2/). It may be run alongside the Dynatrace OneAgent for automatic authentication or it may be run standalone on a host without a OneAgent by specifying a URL and API Token. +More information on the plugin can be found in the [Dynatrace documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/ingestion-methods/telegraf/). -Telegraf minimum version: Telegraf 1.16 -Plugin minimum tested version: 1.16 +## Requirements + +You will either need a Dynatrace OneAgent (version 1.201 or higher) installed on the same host as Telegraf; or a Dynatrace environment with version 1.202 or higher. Monotonic counters (e.g. `diskio.reads`, `system.uptime`) require Dynatrace 208 or later. + +- Telegraf minimum version: Telegraf 1.16 + +## Getting Started -## Running alongside Dynatrace OneAgent +Setting up Telegraf is explained in the [Telegraf Documentation](https://docs.influxdata.com/telegraf/latest/introduction/getting-started/). +The Dynatrace exporter may be enabled by adding an `[[outputs.dynatrace]]` section to your `telegraf.conf` config file. +All configurations are optional, but if a `url` other than the OneAgent metric ingestion endpoint is specified then an `api_token` is required. +To see all available options, see [Configuration](#configuration) below. -if you run the Telegraf agent on a host or VM that is monitored by the Dynatrace OneAgent then you only need to enable the plugin but need no further configuration. The Dynatrace telegraf output plugin will send all metrics to the OneAgent which will use its secure and load balanced connection to send the metrics to your Dynatrace SaaS or Managed environment. +### Running alongside Dynatrace OneAgent + +If you run the Telegraf agent on a host or VM that is monitored by the Dynatrace OneAgent then you only need to enable the plugin, but need no further configuration. The Dynatrace Telegraf output plugin will send all metrics to the OneAgent which will use its secure and load balanced connection to send the metrics to your Dynatrace SaaS or Managed environment. +Depending on your environment, you might have to enable metrics ingestion on the OneAgent first as described in the [Dynatrace documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/ingestion-methods/telegraf/). + +Note: The name and identifier of the host running Telegraf will be added as a dimension to every metric. If this is undesirable, then the output plugin may be used in standalone mode using the directions below. + +```toml +[[outputs.dynatrace]] + ## No options are required. By default, metrics will be exported via the OneAgent on the local host. +``` ## Running standalone If you run the Telegraf agent on a host or VM without a OneAgent you will need to configure the environment API endpoint to send the metrics to and an API token for security. -The endpoint for the Dynatrace Metrics API is +You will also need to configure an API token for secure access. Find out how to create a token in the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/basics/dynatrace-api-authentication/) or simply navigate to **Settings > Integration > Dynatrace API** in your Dynatrace environment and create a token with Dynatrace API and create a new token with +'Ingest metrics' (`metrics.ingest`) scope enabled. It is recommended to limit Token scope to only this permission. -* Managed https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest -* SaaS https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest +The endpoint for the Dynatrace Metrics API v2 is -You can learn more about how to use the Dynatrace API [here](https://www.dynatrace.com/support/help/dynatrace-api/) +* on Dynatrace Managed: `https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest` +* on Dynatrace SaaS: `https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest` + +```toml +[[outputs.dynatrace]] + ## If no OneAgent is running on the host, url and api_token need to be set -You will also need to configure an API token for secure access. Find out how to create a token [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/tokens/) or simply navigate to **Settings > Integration > Dynatrace API** in your Dynatrace environment and create a token with Dynatrace API and create a new token with -'Ingest metrics data points' access scope enabled. + ## Dynatrace Metrics Ingest v2 endpoint to receive metrics + url = "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" + + ## API token is required if a URL is specified and should be restricted to the 'Ingest metrics' scope + api_token = "your API token here" // hard-coded for illustration only, should be read from environment +``` + +You can learn more about how to use the Dynatrace API [here](https://www.dynatrace.com/support/help/dynatrace-api/). ## Configuration +### `url` + +*required*: `false` + +*default*: Local OneAgent endpoint + +Set your Dynatrace environment URL (e.g.: `https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest`) if you do not use a OneAgent or wish to export metrics directly to a Dynatrace metrics v2 endpoint. If a URL is set to anything other than the local OneAgent endpoint, then an API token is required. + ```toml [[outputs.dynatrace]] ## Leave empty or use the local ingest endpoint of your OneAgent monitored host (e.g.: http://127.0.0.1:14499/metrics/ingest). ## Set Dynatrace environment URL (e.g.: https://YOUR_DOMAIN/api/v2/metrics/ingest) if you do not use a OneAgent url = "" api_token = "" - ## Optional prefix for metric names (e.g.: "telegraf.") - prefix = "telegraf." + ## Optional prefix for metric names (e.g.: "telegraf") + prefix = "telegraf" ## Flag for skipping the tls certificate check, just for testing purposes, should be false by default insecure_skip_verify = false ## If you want to convert values represented as gauges to counters, add the metric names here additional_counters = [ ] +url = "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" +``` + +### `api_token` +*required*: `false` unless `url` is specified + +API token is required if a URL other than the OneAgent endpoint is specified and it should be restricted to the 'Ingest metrics' scope. + +```toml +api_token = "your API token here" ``` -## Requirements +### `prefix` + +*required*: `false` + +Optional prefix to be prepended to all metric names (will be separated with a `.`). + +```toml +prefix = "telegraf" +``` + +### `insecure_skip_verify` + +*required*: `false` + +Setting this option to true skips TLS verification for testing or when using self-signed certificates. + +```toml +insecure_skip_verify = false +``` -You will either need a Dynatrace OneAgent (version 1.201 or higher) installed on the same host as Telegraf; or a Dynatrace environment with version 1.202 or higher. Monotonic counters (e.g. diskio.reads, system.uptime) require release 208 or later. -You will either need a Dynatrace OneAgent (version 1.201 or higher) installed on the same host as Telegraf; or a Dynatrace environment with version 1.202 or higher +### `additional_counters` + +*required*: `false` + +If you want to convert values represented as gauges to counters, add the metric names here. + +```toml +additional_counters = [ ] +``` + +### `default_dimensions` + +*required*: `false` + +Default dimensions that will be added to every exported metric. + +```toml +default_dimensions = { + key = "value" +} +``` ## Limitations -Telegraf measurements which can't be converted to a float64 are skipped. + +Telegraf measurements which can't be converted to a number are skipped. diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 0b13f5886fd83..fd012d0e1c6f5 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -4,11 +4,7 @@ import ( "bytes" "fmt" "io/ioutil" - "math" "net/http" - "regexp" - "sort" - "strconv" "strings" "time" @@ -16,17 +12,10 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" -) - -const ( - oneAgentMetricsURL = "http://127.0.0.1:14499/metrics/ingest" - dtIngestAPILineLimit = 1000 -) -var ( - reNameAllowedCharList = regexp.MustCompile("[^A-Za-z0-9.-]+") - maxDimKeyLen = 100 - maxMetricKeyLen = 250 + dtMetric "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric" + "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/apiconstants" + "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/dimensions" ) // Dynatrace Configuration for the Dynatrace output plugin @@ -37,12 +26,12 @@ type Dynatrace struct { Log telegraf.Logger `toml:"-"` Timeout config.Duration `toml:"timeout"` AddCounterMetrics []string `toml:"additional_counters"` - State map[string]string - SendCounter int tls.ClientConfig client *http.Client + + loggedMetrics map[string]bool // New empty set } const sampleConfig = ` @@ -61,8 +50,8 @@ const sampleConfig = ` ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. api_token = "" - ## Optional prefix for metric names (e.g.: "telegraf.") - prefix = "telegraf." + ## Optional prefix for metric names (e.g.: "telegraf") + prefix = "telegraf" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -101,163 +90,97 @@ func (d *Dynatrace) Description() string { return "Send telegraf metrics to a Dynatrace environment" } -// Normalizes a metric keys or metric dimension identifiers -// according to Dynatrace format. -func (d *Dynatrace) normalize(s string, max int) (string, error) { - s = reNameAllowedCharList.ReplaceAllString(s, "_") - - // Strip Digits and underscores if they are at the beginning of the string - normalizedString := strings.TrimLeft(s, "_0123456789") - - for strings.HasPrefix(normalizedString, "_") { - normalizedString = normalizedString[1:] - } - - if len(normalizedString) > max { - normalizedString = normalizedString[:max] - } - - for strings.HasSuffix(normalizedString, "_") { - normalizedString = normalizedString[:len(normalizedString)-1] - } - - normalizedString = strings.ReplaceAll(normalizedString, "..", "_") - - if len(normalizedString) == 0 { - return "", fmt.Errorf("error normalizing the string: %s", s) - } - return normalizedString, nil -} - -func (d *Dynatrace) escape(v string) string { - return strconv.Quote(v) -} - func (d *Dynatrace) Write(metrics []telegraf.Metric) error { - var buf bytes.Buffer - metricCounter := 1 - var tagb bytes.Buffer if len(metrics) == 0 { return nil } - for _, metric := range metrics { - // first write the tags into a buffer - tagb.Reset() - if len(metric.Tags()) > 0 { - keys := make([]string, 0, len(metric.Tags())) - for k := range metric.Tags() { - keys = append(keys, k) - } - // sort tag keys to expect the same order in ech run - sort.Strings(keys) + lines := []string{} - for _, k := range keys { - tagKey, err := d.normalize(k, maxDimKeyLen) - if err != nil { + for _, tm := range metrics { + dims := []dimensions.Dimension{} + for _, tag := range tm.TagList() { + // Ignore special tags for histogram and summary types. + switch tm.Type() { + case telegraf.Histogram: + if tag.Key == "le" || tag.Key == "gt" { continue } - if len(metric.Tags()[k]) > 0 { - fmt.Fprintf(&tagb, ",%s=%s", strings.ToLower(tagKey), d.escape(metric.Tags()[k])) + case telegraf.Summary: + if tag.Key == "quantile" { + continue } } + dims = append(dims, dimensions.NewDimension(tag.Key, tag.Value)) } - if len(metric.Fields()) > 0 { - for k, v := range metric.Fields() { - var value string - switch v := v.(type) { - case string: - continue - case float64: - if !math.IsNaN(v) && !math.IsInf(v, 0) { - value = fmt.Sprintf("%f", v) - } else { - continue - } - case uint64: - value = strconv.FormatUint(v, 10) - case int64: - value = strconv.FormatInt(v, 10) - case bool: - if v { - value = "1" - } else { - value = "0" - } - default: - d.Log.Debugf("Dynatrace type not supported! %s", v) - continue - } - // metric name - metricKey, err := d.normalize(k, maxMetricKeyLen) - if err != nil { - continue + metricType := tm.Type() + for _, field := range tm.FieldList() { + metricName := tm.Name() + "." + field.Key + for _, i := range d.AddCounterMetrics { + if metricName == i { + metricType = telegraf.Counter } + } - metricID, err := d.normalize(d.Prefix+metric.Name()+"."+metricKey, maxMetricKeyLen) - // write metric name combined with its field - if err != nil { - continue - } - // write metric id,tags and value + typeOpt := getTypeOption(metricType, field) - metricType := metric.Type() - for _, i := range d.AddCounterMetrics { - if metric.Name()+"."+metricKey == i { - metricType = telegraf.Counter - } + if typeOpt == nil { + // Unsupported type. Log only once per unsupported metric name + if !d.loggedMetrics[metricName] { + d.Log.Warnf("Unsupported type for %s", metricName) + d.loggedMetrics[metricName] = true } + continue + } - switch metricType { - case telegraf.Counter: - var delta float64 - - // Check if LastValue exists - if lastvalue, ok := d.State[metricID+tagb.String()]; ok { - // Convert Strings to Floats - floatLastValue, err := strconv.ParseFloat(lastvalue, 32) - if err != nil { - d.Log.Debugf("Could not parse last value: %s", lastvalue) - } - floatCurrentValue, err := strconv.ParseFloat(value, 32) - if err != nil { - d.Log.Debugf("Could not parse current value: %s", value) - } - if floatCurrentValue >= floatLastValue { - delta = floatCurrentValue - floatLastValue - fmt.Fprintf(&buf, "%s%s count,delta=%f\n", metricID, tagb.String(), delta) - } - } - d.State[metricID+tagb.String()] = value - - default: - fmt.Fprintf(&buf, "%s%s %v\n", metricID, tagb.String(), value) - } + name := tm.Name() + "." + field.Key + dm, err := dtMetric.NewMetric( + name, + dtMetric.WithPrefix(d.Prefix), + dtMetric.WithDimensions( + dimensions.MergeLists( + // dimensions.NewNormalizedDimensionList(e.opts.DefaultDimensions...), + dimensions.NewNormalizedDimensionList(dims...), + ), + ), + typeOpt, + ) + + if err != nil { + d.Log.Warn(fmt.Sprintf("failed to normalize metric: %s - %s", name, err.Error())) + continue + } - if metricCounter%dtIngestAPILineLimit == 0 { - err = d.send(buf.Bytes()) - if err != nil { - return err - } - buf.Reset() - } - metricCounter++ + line, err := dm.Serialize() + + if err != nil { + d.Log.Warn(fmt.Sprintf("failed to serialize metric: %s - %s", name, err.Error())) + continue } + + lines = append(lines, line) } } - d.SendCounter++ - // in typical interval of 10s, we will clean the counter state once in 24h which is 8640 iterations - if d.SendCounter%8640 == 0 { - d.State = make(map[string]string) + limit := apiconstants.GetPayloadLinesLimit() + for i := 0; i < len(lines); i += limit { + batch := lines[i:min(i+limit, len(lines))] + + output := strings.Join(batch, "\n") + if output != "" { + if err := d.send(output); err != nil { + return fmt.Errorf("error processing data:, %s", err.Error()) + } + } } - return d.send(buf.Bytes()) + + return nil } -func (d *Dynatrace) send(msg []byte) error { +func (d *Dynatrace) send(msg string) error { var err error - req, err := http.NewRequest("POST", d.URL, bytes.NewBuffer(msg)) + req, err := http.NewRequest("POST", d.URL, bytes.NewBufferString(msg)) if err != nil { d.Log.Errorf("Dynatrace error: %s", err.Error()) return fmt.Errorf("error while creating HTTP request:, %s", err.Error()) @@ -292,12 +215,11 @@ func (d *Dynatrace) send(msg []byte) error { } func (d *Dynatrace) Init() error { - d.State = make(map[string]string) if len(d.URL) == 0 { d.Log.Infof("Dynatrace URL is empty, defaulting to OneAgent metrics interface") - d.URL = oneAgentMetricsURL + d.URL = apiconstants.GetDefaultOneAgentEndpoint() } - if d.URL != oneAgentMetricsURL && len(d.APIToken) == 0 { + if d.URL != apiconstants.GetDefaultOneAgentEndpoint() && len(d.APIToken) == 0 { d.Log.Errorf("Dynatrace api_token is a required field for Dynatrace output") return fmt.Errorf("api_token is a required field for Dynatrace output") } @@ -320,8 +242,45 @@ func (d *Dynatrace) Init() error { func init() { outputs.Add("dynatrace", func() telegraf.Output { return &Dynatrace{ - Timeout: config.Duration(time.Second * 5), - SendCounter: 0, + Timeout: config.Duration(time.Second * 5), } }) } + +func getTypeOption(metricType telegraf.ValueType, field *telegraf.Field) dtMetric.MetricOption { + if metricType == telegraf.Counter { + switch v := field.Value.(type) { + case float64: + return dtMetric.WithFloatCounterValueTotal(v) + case uint64: + return dtMetric.WithIntCounterValueTotal(int64(v)) + case int64: + return dtMetric.WithIntCounterValueTotal(v) + default: + return nil + } + } + + switch v := field.Value.(type) { + case float64: + return dtMetric.WithFloatGaugeValue(v) + case uint64: + return dtMetric.WithIntGaugeValue(int64(v)) + case int64: + return dtMetric.WithIntGaugeValue(32) + case bool: + if v { + return dtMetric.WithIntGaugeValue(1) + } + return dtMetric.WithIntGaugeValue(0) + } + + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index a930a542d3692..ae0e3390fa557 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -5,9 +5,11 @@ import ( "io/ioutil" "net/http" "net/http/httptest" + "regexp" "testing" "time" + "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/apiconstants" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" @@ -18,7 +20,8 @@ import ( func TestNilMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + err := json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -42,7 +45,8 @@ func TestNilMetrics(t *testing.T) { func TestEmptyMetricsSlice(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + err := json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -65,7 +69,8 @@ func TestEmptyMetricsSlice(t *testing.T) { func TestMockURL(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + err := json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -88,9 +93,10 @@ func TestMissingURL(t *testing.T) { d.Log = testutil.Logger{} err := d.Init() - require.Equal(t, oneAgentMetricsURL, d.URL) + require.NoError(t, err) + require.Equal(t, apiconstants.GetDefaultOneAgentEndpoint(), d.URL) err = d.Connect() - require.Equal(t, oneAgentMetricsURL, d.URL) + require.Equal(t, apiconstants.GetDefaultOneAgentEndpoint(), d.URL) require.NoError(t, err) } @@ -99,9 +105,10 @@ func TestMissingAPITokenMissingURL(t *testing.T) { d.Log = testutil.Logger{} err := d.Init() - require.Equal(t, oneAgentMetricsURL, d.URL) + require.NoError(t, err) + require.Equal(t, apiconstants.GetDefaultOneAgentEndpoint(), d.URL) err = d.Connect() - require.Equal(t, oneAgentMetricsURL, d.URL) + require.Equal(t, apiconstants.GetDefaultOneAgentEndpoint(), d.URL) require.NoError(t, err) } @@ -118,16 +125,15 @@ func TestSendMetric(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - require.NoError(t, err) - } + require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield,host=\"192.168.0.1\",nix=\"nix\" 3.140000\nmymeasurement.value,host=\"192.168.0.1\" 3.140000\n" + expected := "mymeasurement.myfield,host=192.168.0.1 gauge,3.14\nmymeasurement.value,host=192.168.0.2 count,3.14" if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString) + t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) } w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + err = json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -145,16 +151,17 @@ func TestSendMetric(t *testing.T) { m1 := metric.New( "mymeasurement", - map[string]string{"host": "192.168.0.1", "nix": "nix"}, + map[string]string{"host": "192.168.0.1"}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) m2 := metric.New( "mymeasurement", - map[string]string{"host": "192.168.0.1"}, + map[string]string{"host": "192.168.0.2"}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + telegraf.Counter, ) metrics := []telegraf.Metric{m1, m2} @@ -167,16 +174,16 @@ func TestSendSingleMetricWithUnorderedTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - require.NoError(t, err) - } + require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield,a=\"test\",b=\"test\",c=\"test\" 3.140000\n" - if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString) - } + require.Regexp(t, regexp.MustCompile(`^mymeasurement\.myfield`), bodyString) + require.Regexp(t, regexp.MustCompile(`a=test`), bodyString) + require.Regexp(t, regexp.MustCompile(`b=test`), bodyString) + require.Regexp(t, regexp.MustCompile(`c=test`), bodyString) + require.Regexp(t, regexp.MustCompile(`gauge,3.14$`), bodyString) w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -210,15 +217,14 @@ func TestSendMetricWithoutTags(t *testing.T) { w.WriteHeader(http.StatusOK) // check the encoded result bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - require.NoError(t, err) - } + require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield 3.140000\n" + expected := "mymeasurement.myfield gauge,3.14" if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString) + t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) } - json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -252,15 +258,19 @@ func TestSendMetricWithUpperCaseTagKeys(t *testing.T) { w.WriteHeader(http.StatusOK) // check the encoded result bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - require.NoError(t, err) - } + require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield,aaa=\"test\",b_b=\"test\",ccc=\"test\" 3.140000\n" - if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString) - } - json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + + // expected := "mymeasurement.myfield,b_b=test,ccc=test,aaa=test gauge,3.14" + // use regex because dimension order isn't guaranteed + require.Regexp(t, regexp.MustCompile(`^mymeasurement\.myfield`), bodyString) + require.Regexp(t, regexp.MustCompile(`aaa=test`), bodyString) + require.Regexp(t, regexp.MustCompile(`b_b=test`), bodyString) + require.Regexp(t, regexp.MustCompile(`ccc=test`), bodyString) + require.Regexp(t, regexp.MustCompile(`gauge,3.14$`), bodyString) + + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -294,15 +304,54 @@ func TestSendBooleanMetricWithoutTags(t *testing.T) { w.WriteHeader(http.StatusOK) // check the encoded result bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - require.NoError(t, err) - } + require.NoError(t, err) + bodyString := string(bodyBytes) + // use regex because field order isn't guaranteed + require.Contains(t, bodyString, "mymeasurement.yes gauge,1") + require.Contains(t, bodyString, "mymeasurement.no gauge,0") + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) + })) + defer ts.Close() + + d := &Dynatrace{} + + d.URL = ts.URL + d.APIToken = "123" + d.Log = testutil.Logger{} + err := d.Init() + require.NoError(t, err) + err = d.Connect() + require.NoError(t, err) + + // Init metrics + + m1 := metric.New( + "mymeasurement", + map[string]string{}, + map[string]interface{}{"yes": true, "no": false}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics := []telegraf.Metric{m1} + + err = d.Write(metrics) + require.NoError(t, err) +} + +func TestSendCounterMetricWithoutTags(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + // check the encoded result + bodyBytes, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield 1\n" + expected := "mymeasurement.value gauge,32" if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %s but got: %s", expected, bodyString) + t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) } - json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) })) defer ts.Close() @@ -321,7 +370,7 @@ func TestSendBooleanMetricWithoutTags(t *testing.T) { m1 := metric.New( "mymeasurement", map[string]string{}, - map[string]interface{}{"myfield": bool(true)}, + map[string]interface{}{"value": 32}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) From 62715d158bfc0bb1d135b746df2b565ab350ccc4 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 10 Jun 2021 11:12:14 -0500 Subject: [PATCH 457/761] Fix Mac script to install go (#9345) Co-authored-by: reimda --- scripts/check-file-changes.sh | 8 ++++++-- scripts/mac_installgo.sh | 8 ++++---- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/scripts/check-file-changes.sh b/scripts/check-file-changes.sh index 4897f2d3fd73f..fa141afc4a23e 100755 --- a/scripts/check-file-changes.sh +++ b/scripts/check-file-changes.sh @@ -1,7 +1,11 @@ #!/bin/bash +# To prevent the tests/builds to run for only a doc change, this script checks what files have changed in a pull request. BRANCH="$(git rev-parse --abbrev-ref HEAD)" echo $BRANCH -if [[ "$BRANCH" != "master" ]]; then - git diff master --name-only --no-color | egrep -e "^(\.circleci\/.*)|(.*\.(go|mod|sum))|Makefile$" || circleci step halt; +if [[ "$BRANCH" != "master" ]] && [[ "$BRANCH" != release* ]]; then # This should never skip for master and release branches + # Ask git for all the differences between this branch and master + # Then use grep to look for changes in the .circleci/ directory, anything named *.go or *.mod or *.sum or *.sh or Makefile + # If no match is found, then circleci step halt will stop the CI job but mark it successful + git diff master --name-only --no-color | egrep -e "^(\.circleci\/.*)$|^(.*\.(go|mod|sum|sh))$|^Makefile$" || circleci step halt; fi diff --git a/scripts/mac_installgo.sh b/scripts/mac_installgo.sh index 6d192377304ba..285db8b315fc2 100644 --- a/scripts/mac_installgo.sh +++ b/scripts/mac_installgo.sh @@ -3,8 +3,8 @@ set -eux GO_ARCH="darwin-amd64" -GO_VERSION="1.16.2" -GO_VERSION_SHA="c98cde81517c5daf427f3071412f39d5bc58f6120e90a0d94cc51480fa04dbc1" # from https://golang.org/dl +GO_VERSION="1.16.5" +GO_VERSION_SHA="be761716d5bfc958a5367440f68ba6563509da2f539ad1e1864bd42fe553f277" # from https://golang.org/dl # This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.) path="/usr/local/Cellar" @@ -13,8 +13,8 @@ path="/usr/local/Cellar" # it is slow to update and we can't pull specific minor versions.) setup_go () { echo "installing go" - curl -OL https://golang.org/dl/go${GO_VERSION}.${GO_ARCH}.tar.gz --output go${GO_VERSION}.${GO_ARCH}.tar.gz - echo "${GO_SHA} go${GO_VERSION}.${GO_ARCH}.tar.gz" | sha256sum --check + curl -L https://golang.org/dl/go${GO_VERSION}.${GO_ARCH}.tar.gz --output go${GO_VERSION}.${GO_ARCH}.tar.gz + echo "${GO_VERSION_SHA} go${GO_VERSION}.${GO_ARCH}.tar.gz" | shasum -a 256 --check sudo rm -rf ${path}/go sudo tar -C $path -xzf go${GO_VERSION}.${GO_ARCH}.tar.gz ln -sf ${path}/go/bin/go /usr/local/bin/go From 885252d388e4c99deaaf93bec06c0fc8f2e2e98f Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 10 Jun 2021 14:22:18 -0500 Subject: [PATCH 458/761] New JSON Parser (#9246) --- .circleci/config.yml | 42 +- config/config.go | 65 +- go.mod | 12 +- go.sum | 97 ++- plugins/parsers/json_v2/README.md | 187 ++++++ plugins/parsers/json_v2/parser.go | 587 ++++++++++++++++++ plugins/parsers/json_v2/parser_test.go | 130 ++++ .../testdata/fields_and_tags/expected.out | 2 + .../testdata/fields_and_tags/input.json | 46 ++ .../testdata/fields_and_tags/telegraf.conf | 14 + .../fields_and_tags_complex/expected.out | 5 + .../fields_and_tags_complex/input.json | 87 +++ .../fields_and_tags_complex/telegraf.conf | 10 + .../measurement_name_int/expected.out | 1 + .../testdata/measurement_name_int/input.json | 19 + .../measurement_name_int/telegraf.conf | 9 + .../multiple_arrays_in_object/expected.out | 6 + .../multiple_arrays_in_object/input.json | 24 + .../multiple_arrays_in_object/telegraf.conf | 11 + .../testdata/multiple_timestamps/expected.out | 2 + .../testdata/multiple_timestamps/input.json | 12 + .../multiple_timestamps/telegraf.conf | 10 + .../nested_and_nonnested_tags/expected.out | 12 + .../nested_and_nonnested_tags/input.json | 174 ++++++ .../nested_and_nonnested_tags/telegraf.conf | 18 + .../nested_array_of_objects/expected.out | 2 + .../nested_array_of_objects/input.json | 36 ++ .../nested_array_of_objects/telegraf.conf | 15 + .../json_v2/testdata/nested_tags/expected.out | 2 + .../json_v2/testdata/nested_tags/input.json | 16 + .../testdata/nested_tags/telegraf.conf | 12 + .../testdata/nested_tags_complex/expected.out | 3 + .../testdata/nested_tags_complex/input.json | 35 ++ .../nested_tags_complex/telegraf.conf | 14 + .../json_v2/testdata/object/expected.out | 5 + .../json_v2/testdata/object/input.json | 87 +++ .../json_v2/testdata/object/telegraf.conf | 12 + .../json_v2/testdata/timestamp/expected.out | 4 + .../json_v2/testdata/timestamp/input.json | 25 + .../json_v2/testdata/timestamp/telegraf.conf | 11 + .../json_v2/testdata/types/expected.out | 4 + .../parsers/json_v2/testdata/types/input.json | 22 + .../json_v2/testdata/types/telegraf.conf | 105 ++++ plugins/parsers/registry.go | 32 + 44 files changed, 1981 insertions(+), 43 deletions(-) create mode 100644 plugins/parsers/json_v2/README.md create mode 100644 plugins/parsers/json_v2/parser.go create mode 100644 plugins/parsers/json_v2/parser_test.go create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags/expected.out create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags/input.json create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags_complex/expected.out create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags_complex/input.json create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags_complex/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/measurement_name_int/expected.out create mode 100644 plugins/parsers/json_v2/testdata/measurement_name_int/input.json create mode 100644 plugins/parsers/json_v2/testdata/measurement_name_int/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/multiple_arrays_in_object/input.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_arrays_in_object/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/multiple_timestamps/expected.out create mode 100644 plugins/parsers/json_v2/testdata/multiple_timestamps/input.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_timestamps/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/expected.out create mode 100644 plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/input.json create mode 100644 plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/nested_array_of_objects/expected.out create mode 100644 plugins/parsers/json_v2/testdata/nested_array_of_objects/input.json create mode 100644 plugins/parsers/json_v2/testdata/nested_array_of_objects/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/nested_tags/expected.out create mode 100644 plugins/parsers/json_v2/testdata/nested_tags/input.json create mode 100644 plugins/parsers/json_v2/testdata/nested_tags/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/nested_tags_complex/expected.out create mode 100644 plugins/parsers/json_v2/testdata/nested_tags_complex/input.json create mode 100644 plugins/parsers/json_v2/testdata/nested_tags_complex/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/object/input.json create mode 100644 plugins/parsers/json_v2/testdata/object/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/timestamp/expected.out create mode 100644 plugins/parsers/json_v2/testdata/timestamp/input.json create mode 100644 plugins/parsers/json_v2/testdata/timestamp/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/types/expected.out create mode 100644 plugins/parsers/json_v2/testdata/types/input.json create mode 100644 plugins/parsers/json_v2/testdata/types/telegraf.conf diff --git a/.circleci/config.yml b/.circleci/config.yml index c8eab65732085..03fe58b17a739 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,6 @@ version: 2.1 orbs: - win: circleci/windows@2.4.0 + win: circleci/windows@2.4.0 aws-cli: circleci/aws-cli@1.4.0 executors: @@ -30,7 +30,7 @@ commands: - run: ./scripts/check-file-changes.sh check-changed-files-or-halt-windows: steps: - - run: + - run: command: ./scripts/check-file-changes.sh shell: bash.exe test-go: @@ -65,11 +65,11 @@ commands: at: '/go' - when: condition: << parameters.release >> - steps: + steps: - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 make package' - when: condition: << parameters.nightly >> - steps: + steps: - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 NIGHTLY=1 make package' - run: 'make upload-nightly' - unless: @@ -283,14 +283,14 @@ jobs: command: | echo "Go tests complete." share-artifacts: - executor: aws-cli/default + executor: aws-cli/default steps: - run: command: | PR=${CIRCLE_PULL_REQUEST##*/} printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" - curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" - + curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" + workflows: version: 2 check: @@ -331,47 +331,47 @@ workflows: filters: tags: only: /.*/ - - 'test-awaiter': + - 'test-awaiter': requires: - 'test-go-1_15' - 'test-go-1_15-386' - 'test-go-1_16' - 'test-go-1_16-386' - 'windows-package': - requires: + requires: - 'test-go-windows' - 'darwin-package': - requires: + requires: - 'test-go-mac' - 'i386-package': - requires: + requires: - 'test-awaiter' - 'ppc641e-package': - requires: + requires: - 'test-awaiter' - 's390x-package': - requires: + requires: - 'test-awaiter' - 'armel-package': - requires: + requires: - 'test-awaiter' - 'amd64-package': - requires: + requires: - 'test-awaiter' - 'arm64-package': - requires: + requires: - 'test-awaiter' - 'armhf-package': - requires: + requires: - 'test-awaiter' - 'static-package': requires: - 'test-awaiter' - 'mipsel-package': - requires: + requires: - 'test-awaiter' - 'mips-package': - requires: + requires: - 'test-awaiter' - 'share-artifacts': requires: @@ -412,7 +412,7 @@ workflows: only: /.*/ - 'package-sign-mac': requires: - - 'package-sign-windows' + - 'package-sign-windows' filters: tags: only: /.*/ @@ -448,4 +448,4 @@ workflows: filters: branches: only: - - master + - master \ No newline at end of file diff --git a/config/config.go b/config/config.go index 88d6eedcef7df..0c990078ed0a6 100644 --- a/config/config.go +++ b/config/config.go @@ -24,6 +24,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/json_v2" "github.com/influxdata/telegraf/plugins/processors" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/toml" @@ -1387,6 +1388,68 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, } } + //for JSONPath parser + if node, ok := tbl.Fields["json_v2"]; ok { + if metricConfigs, ok := node.([]*ast.Table); ok { + pc.JSONV2Config = make([]parsers.JSONV2Config, len(metricConfigs)) + for i, metricConfig := range metricConfigs { + mc := pc.JSONV2Config[i] + c.getFieldString(metricConfig, "measurement_name", &mc.MeasurementName) + if mc.MeasurementName == "" { + mc.MeasurementName = name + } + c.getFieldString(metricConfig, "measurement_name_path", &mc.MeasurementNamePath) + c.getFieldString(metricConfig, "timestamp_path", &mc.TimestampPath) + c.getFieldString(metricConfig, "timestamp_format", &mc.TimestampFormat) + c.getFieldString(metricConfig, "timestamp_timezone", &mc.TimestampTimezone) + + if fieldConfigs, ok := metricConfig.Fields["field"]; ok { + if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { + for _, fieldconfig := range fieldConfigs { + var f json_v2.DataSet + c.getFieldString(fieldconfig, "path", &f.Path) + c.getFieldString(fieldconfig, "rename", &f.Rename) + c.getFieldString(fieldconfig, "type", &f.Type) + mc.Fields = append(mc.Fields, f) + } + } + } + if fieldConfigs, ok := metricConfig.Fields["tag"]; ok { + if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { + for _, fieldconfig := range fieldConfigs { + var t json_v2.DataSet + c.getFieldString(fieldconfig, "path", &t.Path) + c.getFieldString(fieldconfig, "rename", &t.Rename) + t.Type = "string" + mc.Tags = append(mc.Tags, t) + } + } + } + + if objectconfigs, ok := metricConfig.Fields["object"]; ok { + if objectconfigs, ok := objectconfigs.([]*ast.Table); ok { + for _, objectConfig := range objectconfigs { + var o json_v2.JSONObject + c.getFieldString(objectConfig, "path", &o.Path) + c.getFieldString(objectConfig, "timestamp_key", &o.TimestampKey) + c.getFieldString(objectConfig, "timestamp_format", &o.TimestampFormat) + c.getFieldString(objectConfig, "timestamp_timezone", &o.TimestampTimezone) + c.getFieldBool(objectConfig, "disable_prepend_keys", &o.DisablePrependKeys) + c.getFieldStringSlice(objectConfig, "included_keys", &o.IncludedKeys) + c.getFieldStringSlice(objectConfig, "excluded_keys", &o.ExcludedKeys) + c.getFieldStringSlice(objectConfig, "tags", &o.Tags) + c.getFieldStringMap(objectConfig, "renames", &o.Renames) + c.getFieldStringMap(objectConfig, "fields", &o.Fields) + mc.JSONObjects = append(mc.JSONObjects, o) + } + } + } + + pc.JSONV2Config[i] = mc + } + } + } + pc.MetricName = name if c.hasErrs() { @@ -1494,7 +1557,7 @@ func (c *Config) missingTomlField(_ reflect.Type, key string) error { "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", "separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys", "tagdrop", "tagexclude", "taginclude", "tagpass", "tags", "template", "templates", - "value_field_name", "wavefront_source_override", "wavefront_use_strict", "xml": + "value_field_name", "wavefront_source_override", "wavefront_use_strict", "xml", "json_v2": // ignore fields that are common to all plugins. default: diff --git a/go.mod b/go.mod index dda464d48ad55..3892479cdeeec 100644 --- a/go.mod +++ b/go.mod @@ -44,14 +44,14 @@ require ( github.com/denisenkom/go-mssqldb v0.9.0 github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 - github.com/docker/docker v20.10.5+incompatible + github.com/docker/docker v20.10.6+incompatible github.com/dynatrace-oss/dynatrace-metric-utils-go v0.1.0 github.com/eclipse/paho.mqtt.golang v1.3.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.5.0 github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c github.com/go-redis/redis v6.15.9+incompatible - github.com/go-sql-driver/mysql v1.5.0 + github.com/go-sql-driver/mysql v1.6.0 github.com/goburrow/modbus v0.1.0 // indirect github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 @@ -103,7 +103,7 @@ require ( github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.15.0 - github.com/prometheus/procfs v0.2.0 + github.com/prometheus/procfs v0.6.0 github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 github.com/riemann/riemann-go-client v0.5.0 github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 @@ -116,7 +116,7 @@ require ( github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/testcontainers/testcontainers-go v0.10.0 + github.com/testcontainers/testcontainers-go v0.11.0 github.com/tidwall/gjson v1.6.0 github.com/tinylib/msgp v1.1.5 github.com/tklauser/go-sysconf v0.3.5 // indirect @@ -132,8 +132,8 @@ require ( go.uber.org/multierr v1.6.0 // indirect golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 - golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a + golang.org/x/sys v0.0.0-20210324051608-47abb6519492 golang.org/x/text v0.3.4 golang.org/x/tools v0.1.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 diff --git a/go.sum b/go.sum index 288fa5ff30c59..f4184b3ce9510 100644 --- a/go.sum +++ b/go.sum @@ -121,9 +121,11 @@ github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3h github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= -github.com/Microsoft/hcsshim v0.8.15 h1:Aof83YILRs2Vx3GhHqlvvfyx1asRJKMFIMeVlHsZKtI= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16 h1:8/auA4LFIZFTGrqfKhGBSXwM6/4X1fHa/xniyEHu8ac= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -239,10 +241,16 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= @@ -276,13 +284,17 @@ github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMe github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102 h1:Qf4HiqfvmB7zS6scsmNgTLmByHbq8n9RTF39v+TzP7A= github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 h1:hkGVFjz+plgr5UfxZUTPFbUFIF/Km6/s+RVRIRHLrrY= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -294,24 +306,34 @@ github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1: github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.5.0-beta.1 h1:IK6yirB4X7wpKyFSikWiT++nZsyIxGAAgNEv3fEGuls= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4 h1:zjz4MOAOFgdBlwid2nNUlJ3YLpVi/97L36lfMYJex60= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= -github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7 h1:6ejg6Lkk8dskcM7wQ28gONkukbQkM4qpj4RnYbpFzrI= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e h1:6JKvHHt396/qabvMhnhUZvWaHZzfVfldxE60TK8YLhg= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -321,10 +343,13 @@ github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kw github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/coreos/etcd v3.3.22+incompatible h1:AnRMUyVdVvh1k7lHe61YEd227+CLoNogQuAypztGSK4= github.com/coreos/etcd v3.3.22+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= @@ -350,6 +375,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= @@ -381,8 +408,8 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.5+incompatible h1:o5WL5onN4awYGwrW7+oTn5x9AF2prw7V0Ox8ZEkoCdg= -github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= +github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -539,8 +566,9 @@ github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGK github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= @@ -776,6 +804,7 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e h1:3J1OB4RDKwXs5l8uEV6BP/tucOJOPDQysiT7/9cuXzA= github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= @@ -990,6 +1019,7 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= @@ -1009,14 +1039,20 @@ github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= +github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= +github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= +github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= +github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= -github.com/moby/sys/mountinfo v0.4.0 h1:1KInV3Huv18akCu58V7lzNlt+jFmqlu1EaErnEHE/VM= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd h1:aY7OQNf2XqY/JQ6qREWamhI/81os/agb2BAGpcx5yWI= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1193,8 +1229,9 @@ github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 h1:IB/5RJRcJiR/YzKs4Aou86s/RaMepZOZVCArYNHJHWc= github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2/go.mod h1:Td6hjwdXDmVt5CI9T03Sw+yBNxLBq/Yx3ZtmtP8zlCA= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1262,6 +1299,13 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -1281,11 +1325,14 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1312,8 +1359,8 @@ github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOs github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= -github.com/testcontainers/testcontainers-go v0.10.0 h1:ASWe0nwTNg5z8K3WSQ8aBNB6j5vrNJocFPEZF4NS0qI= -github.com/testcontainers/testcontainers-go v0.10.0/go.mod h1:zFYk0JndthnMHEwtVRHCpLwIP/Ik1G7mvIAQ2MdZ+Ig= +github.com/testcontainers/testcontainers-go v0.11.0 h1:HO5YOx2DYBHqcg4MzVWPj3FuHAv7USWVu94vCSsgiaM= +github.com/testcontainers/testcontainers-go v0.11.0/go.mod h1:HztBCODzuA+YpMXGK8amjO8j50jz2gcT0BOzSKUiYIs= github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= @@ -1343,6 +1390,10 @@ github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYM github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= +github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= +github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= @@ -1390,6 +1441,7 @@ go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1440,6 +1492,7 @@ golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= @@ -1532,6 +1585,7 @@ golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1549,8 +1603,9 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1618,10 +1673,13 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1630,12 +1688,18 @@ golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa h1:ZYxPR6aca/uhfRJyaOAtflSHjJYiktO7QnJC5ut7iY4= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1808,6 +1872,7 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= @@ -1877,6 +1942,7 @@ gopkg.in/sourcemap.v1 v1.0.5 h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI= gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1899,8 +1965,9 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclp gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/plugins/parsers/json_v2/README.md b/plugins/parsers/json_v2/README.md new file mode 100644 index 0000000000000..a1effd5940614 --- /dev/null +++ b/plugins/parsers/json_v2/README.md @@ -0,0 +1,187 @@ +# JSON Parser - Version 2 + +This parser takes valid JSON input and turns it into metrics. The query syntax supported is [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md), you can go to this playground to test out your GJSON path here: https://gjson.dev/. You can find multiple examples under the `testdata` folder. + +## Configuration + +You configure this parser by describing the metric you want by defining the fields and tags from the input. The configuration is divided into config sub-tables called `field`, `tag`, and `object`. In the example below you can see all the possible configuration keys you can define for each config table. In the sections that follow these configuration keys are defined in more detail. + +**Example configuration:** + +```toml + [[inputs.file]] + urls = [] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "" # A string that will become the new measurement name + measurement_name_path = "" # A string with valid GJSON path syntax, will override measurement_name + timestamp_path = "" # A string with valid GJSON path syntax to a valid timestamp (single value) + timestamp_format = "" # A string with a valid timestamp format (see below for possible values) + timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) + [[inputs.file.json_v2.tag]] + path = "" # A string with valid GJSON path syntax + rename = "new name" # A string with a new name for the tag key + [[inputs.file.json_v2.field]] + path = "" # A string with valid GJSON path syntax + rename = "new name" # A string with a new name for the tag key + type = "int" # A string specifying the type (int,uint,float,string,bool) + [[inputs.file.json_v2.object]] + path = "" # A string with valid GJSON path syntax + timestamp_key = "" # A JSON key (for a nested key, prepend the parent keys with underscores) to a valid timestamp + timestamp_format = "" # A string with a valid timestamp format (see below for possible values) + timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) + disable_prepend_keys = false (or true, just not both) + included_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that should be only included in result + excluded_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that shouldn't be included in result + tags = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field + [inputs.file.json_v2.object.renames] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a new name for the tag key + key = "new name" + [inputs.file.json_v2.object.fields] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a type (int,uint,float,string,bool) + key = "int" +``` +--- +### root config options + +* **measurement_name (OPTIONAL)**: Will set the measurement name to the provided string. +* **measurement_name_path (OPTIONAL)**: You can define a query with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) to set a measurement name from the JSON input. The query must return a single data value or it will use the default measurement name. This takes precedence over `measurement_name`. +* **timestamp_path (OPTIONAL)**: You can define a query with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) to set a timestamp from the JSON input. The query must return a single data value or it will default to the current time. +* **timestamp_format (OPTIONAL, but REQUIRED when timestamp_query is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or +the Go "reference time" which is defined to be the specific time: +`Mon Jan 2 15:04:05 MST 2006` +* **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_query**: This option should be set to a +[Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. Defaults to `UTC` + +--- + +### `field` and `tag` config options + +`field` and `tag` represent the elements of [line protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/), which is used to define a `metric`. You can use the `field` and `tag` config tables to gather a single value or an array of values that all share the same type and name. With this you can add a field or tag to a metric from data stored anywhere in your JSON. If you define the GJSON path to return a single value then you will get a single resutling metric that contains the field/tag. If you define the GJSON path to return an array of values, then each field/tag will be put into a separate metric (you use the # character to retrieve JSON arrays, find examples [here](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md#arrays)). + +Note that objects are handled separately, therefore if you provide a path that returns a object it will be ignored. You will need use the `object` config table to parse objects, because `field` and `tag` doesn't handle relationships between data. Each `field` and `tag` you define is handled as a separate data point. + +The notable difference between `field` and `tag`, is that `tag` values will always be type string while `field` can be multiple types. You can define the type of `field` to be any [type that line protocol supports](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/#data-types-and-format), which are: +* float +* int +* uint +* string +* bool + + +#### **field** + +* **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md). +* **name (OPTIONAL)**: You can define a string value to set the field name. If not defined it will use the trailing word from the provided query. +* **type (OPTIONAL)**: You can define a string value to set the desired type (float, int, uint, string, bool). If not defined it won't enforce a type and default to using the original type defined in the JSON (bool, float, or string). + +#### **tag** + +* **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md). +* **name (OPTIONAL)**: You can define a string value to set the field name. If not defined it will use the trailing word from the provided query. + +For good examples in using `field` and `tag` you can reference the following example configs: + +* [fields_and_tags](testdata/fields_and_tags/telegraf.conf) +--- +### object + +With the configuration section `object`, you can gather metrics from [JSON objects](https://www.w3schools.com/js/js_json_objects.asp). + +The following keys can be set for `object`: + +* **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) +* **timestamp_key(OPTIONAL)**: You can define a json key (for a nested key, prepend the parent keys with underscores) for the value to be set as the timestamp from the JSON input. +* **timestamp_format (OPTIONAL, but REQUIRED when timestamp_query is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or +the Go "reference time" which is defined to be the specific time: +`Mon Jan 2 15:04:05 MST 2006` +* **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_query**: This option should be set to a +[Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. Defaults to `UTC` +* **disable_prepend_keys (OPTIONAL)**: Set to true to prevent resulting nested data to contain the parent key prepended to its key **NOTE**: duplicate names can overwrite each other when this is enabled +* **included_keys (OPTIONAL)**: You can define a list of key's that should be the only data included in the metric, by default it will include everything. +* **excluded_keys (OPTIONAL)**: You can define json keys to be excluded in the metric, for a nested key, prepend the parent keys with underscores +* **tags (OPTIONAL)**: You can define json keys to be set as tags instead of fields, if you define a key that is an array or object then all nested values will become a tag +* **renames (OPTIONAL)**: A table matching the json key with the desired name (oppossed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results +* **fields (OPTIONAL)**: A table matching the json key with the desired type (int,string,bool,float), if you define a key that is an array or object then all nested values will become that type + +## Arrays and Objects + +The following describes the high-level approach when parsing arrays and objects: + +**Array**: Every element in an array is treated as a *separate* metric + +**Object**: Every key/value in a object is treated as a *single* metric + +When handling nested arrays and objects, these above rules continue to apply as the parser creates metrics. When an object has multiple array's as values, the array's will become separate metrics containing only non-array values from the obejct. Below you can see an example of this behavior, with an input json containing an array of book objects that has a nested array of characters. + +Example JSON: + +```json +{ + "book": { + "title": "The Lord Of The Rings", + "chapters": [ + "A Long-expected Party", + "The Shadow of the Past" + ], + "author": "Tolkien", + "characters": [ + { + "name": "Bilbo", + "species": "hobbit" + }, + { + "name": "Frodo", + "species": "hobbit" + } + ], + "random": [ + 1, + 2 + ] + } +} + +``` + +Example configuration: + +```toml +[[inputs.file]] + files = ["./testdata/multiple_arrays_in_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "book" + tags = ["title"] + disable_prepend_keys = true +``` + +Expected metrics: + +``` +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",name="Bilbo",species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",name="Frodo",species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",random=1 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",random=2 + +``` + +You can find more complicated examples under the folder `testdata`. + +## Types + +For each field you have the option to define the types for each metric. The following rules are in place for this configuration: + +* If a type is explicitly defined, the parser will enforce this type and convert the data to the defined type if possible. If the type can't be converted then the parser will fail. +* If a type isn't defined, the parser will use the default type defined in the JSON (int, float, string) + +The type values you can set: + +* `int`, bool, floats or strings (with valid numbers) can be converted to a int. +* `uint`, bool, floats or strings (with valid numbers) can be converted to a uint. +* `string`, any data can be formatted as a string. +* `float`, string values (with valid numbers) or integers can be converted to a float. +* `bool`, the string values "true" or "false" (regardless of capitalization) or the integer values `0` or `1` can be turned to a bool. diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go new file mode 100644 index 0000000000000..9ba7de2aa7cf9 --- /dev/null +++ b/plugins/parsers/json_v2/parser.go @@ -0,0 +1,587 @@ +package json_v2 + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" + "github.com/tidwall/gjson" +) + +type Parser struct { + Configs []Config + DefaultTags map[string]string + Log telegraf.Logger + Timestamp time.Time + + measurementName string + + iterateObjects bool + currentSettings JSONObject +} + +type Config struct { + MeasurementName string `toml:"measurement_name"` // OPTIONAL + MeasurementNamePath string `toml:"measurement_name_path"` // OPTIONAL + TimestampPath string `toml:"timestamp_path"` // OPTIONAL + TimestampFormat string `toml:"timestamp_format"` // OPTIONAL, but REQUIRED when timestamp_path is defined + TimestampTimezone string `toml:"timestamp_timezone"` // OPTIONAL, but REQUIRES timestamp_path + + Fields []DataSet + Tags []DataSet + JSONObjects []JSONObject +} + +type DataSet struct { + Path string `toml:"path"` // REQUIRED + Type string `toml:"type"` // OPTIONAL, can't be set for tags they will always be a string + Rename string `toml:"rename"` // OPTIONAL +} + +type JSONObject struct { + Path string `toml:"path"` // REQUIRED + TimestampKey string `toml:"timestamp_key"` // OPTIONAL + TimestampFormat string `toml:"timestamp_format"` // OPTIONAL, but REQUIRED when timestamp_path is defined + TimestampTimezone string `toml:"timestamp_timezone"` // OPTIONAL, but REQUIRES timestamp_path + Renames map[string]string `toml:"renames"` // OPTIONAL + Fields map[string]string `toml:"fields"` // OPTIONAL + Tags []string `toml:"tags"` // OPTIONAL + IncludedKeys []string `toml:"included_keys"` // OPTIONAL + ExcludedKeys []string `toml:"excluded_keys"` // OPTIONAL + DisablePrependKeys bool `toml:"disable_prepend_keys"` // OPTIONAL +} + +type MetricNode struct { + OutputName string + SetName string + Tag bool + DesiredType string // Can be "int", "uint", "float", "bool", "string" + + Metric telegraf.Metric + gjson.Result +} + +func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { + // Only valid JSON is supported + if !gjson.Valid(string(input)) { + return nil, fmt.Errorf("Invalid JSON provided, unable to parse") + } + + var metrics []telegraf.Metric + + for _, c := range p.Configs { + // Measurement name configuration + p.measurementName = c.MeasurementName + if c.MeasurementNamePath != "" { + result := gjson.GetBytes(input, c.MeasurementNamePath) + if !result.IsArray() && !result.IsObject() { + p.measurementName = result.String() + } + } + + // Timestamp configuration + p.Timestamp = time.Now() + if c.TimestampPath != "" { + result := gjson.GetBytes(input, c.TimestampPath) + if !result.IsArray() && !result.IsObject() { + if c.TimestampFormat == "" { + err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") + return nil, err + } + + var err error + p.Timestamp, err = internal.ParseTimestamp(c.TimestampFormat, result.Value(), c.TimestampTimezone) + if err != nil { + return nil, err + } + } + } + + fields, err := p.processMetric(c.Fields, input, false) + if err != nil { + return nil, err + } + + tags, err := p.processMetric(c.Tags, input, true) + if err != nil { + return nil, err + } + + objects, err := p.processObjects(c.JSONObjects, input) + if err != nil { + return nil, err + } + + metrics = append(metrics, cartesianProduct(tags, fields)...) + + if len(objects) != 0 && len(metrics) != 0 { + metrics = append(metrics, cartesianProduct(objects, metrics)...) + } else { + metrics = append(metrics, objects...) + } + } + + for k, v := range p.DefaultTags { + for _, t := range metrics { + t.AddTag(k, v) + } + } + + return metrics, nil +} + +// processMetric will iterate over all 'field' or 'tag' configs and create metrics for each +// A field/tag can either be a single value or an array of values, each resulting in its own metric +// For multiple configs, a set of metrics is created from the cartesian product of each separate config +func (p *Parser) processMetric(data []DataSet, input []byte, tag bool) ([]telegraf.Metric, error) { + if len(data) == 0 { + return nil, nil + } + + p.iterateObjects = false + var metrics [][]telegraf.Metric + + for _, c := range data { + if c.Path == "" { + return nil, fmt.Errorf("GJSON path is required") + } + result := gjson.GetBytes(input, c.Path) + + if result.IsObject() { + p.Log.Debugf("Found object in the path: %s, ignoring it please use 'object' to gather metrics from objects", c.Path) + continue + } + + setName := c.Rename + // Default to the last path word, should be the upper key name + if setName == "" { + s := strings.Split(c.Path, ".") + setName = s[len(s)-1] + } + setName = strings.ReplaceAll(setName, " ", "_") + + mNode := MetricNode{ + OutputName: setName, + SetName: setName, + DesiredType: c.Type, + Tag: tag, + Metric: metric.New( + p.measurementName, + map[string]string{}, + map[string]interface{}{}, + p.Timestamp, + ), + Result: result, + } + + // Expand all array's and nested arrays into separate metrics + nodes, err := p.expandArray(mNode) + if err != nil { + return nil, err + } + + var m []telegraf.Metric + for _, n := range nodes { + m = append(m, n.Metric) + } + metrics = append(metrics, m) + } + + for i := 1; i < len(metrics); i++ { + metrics[i] = cartesianProduct(metrics[i-1], metrics[i]) + } + + return metrics[len(metrics)-1], nil +} + +func cartesianProduct(a, b []telegraf.Metric) []telegraf.Metric { + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + p := make([]telegraf.Metric, len(a)*len(b)) + i := 0 + for _, a := range a { + for _, b := range b { + m := a.Copy() + mergeMetric(b, m) + p[i] = m + i++ + } + } + + return p +} + +func mergeMetric(a telegraf.Metric, m telegraf.Metric) { + for _, f := range a.FieldList() { + m.AddField(f.Key, f.Value) + } + for _, t := range a.TagList() { + m.AddTag(t.Key, t.Value) + } +} + +// expandArray will recursively create a new MetricNode for each element in a JSON array or single value +func (p *Parser) expandArray(result MetricNode) ([]MetricNode, error) { + var results []MetricNode + + if result.IsObject() { + if !p.iterateObjects { + p.Log.Debugf("Found object in query ignoring it please use 'object' to gather metrics from objects") + return results, nil + } + r, err := p.combineObject(result) + if err != nil { + return nil, err + } + results = append(results, r...) + return results, nil + } + + if result.IsArray() { + var err error + result.ForEach(func(_, val gjson.Result) bool { + m := metric.New( + p.measurementName, + map[string]string{}, + map[string]interface{}{}, + p.Timestamp, + ) + + if val.IsObject() { + if p.iterateObjects { + n := MetricNode{ + SetName: result.SetName, + Metric: m, + Result: val, + } + var r []MetricNode + r, err = p.combineObject(n) + if err != nil { + return false + } + + results = append(results, r...) + } else { + p.Log.Debugf("Found object in query ignoring it please use 'object' to gather metrics from objects") + } + if len(results) != 0 { + for _, newResult := range results { + mergeMetric(result.Metric, newResult.Metric) + } + } + return true + } + + for _, f := range result.Metric.FieldList() { + m.AddField(f.Key, f.Value) + } + for _, f := range result.Metric.TagList() { + m.AddTag(f.Key, f.Value) + } + n := MetricNode{ + Tag: result.Tag, + DesiredType: result.DesiredType, + OutputName: result.OutputName, + SetName: result.SetName, + Metric: m, + Result: val, + } + var r []MetricNode + r, err = p.expandArray(n) + if err != nil { + return false + } + results = append(results, r...) + return true + }) + if err != nil { + return nil, err + } + } else { + if !result.Tag && !result.IsObject() { + if result.SetName == p.currentSettings.TimestampKey { + if p.currentSettings.TimestampFormat == "" { + err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") + return nil, err + } + timestamp, err := internal.ParseTimestamp(p.currentSettings.TimestampFormat, result.Value(), p.currentSettings.TimestampTimezone) + if err != nil { + return nil, err + } + result.Metric.SetTime(timestamp) + } else { + v, err := p.convertType(result.Value(), result.DesiredType, result.SetName) + if err != nil { + return nil, err + } + result.Metric.AddField(result.OutputName, v) + } + } else if !result.IsObject() { + v, err := p.convertType(result.Value(), "string", result.SetName) + if err != nil { + return nil, err + } + result.Metric.AddTag(result.OutputName, v.(string)) + } + results = append(results, result) + } + + return results, nil +} + +// processObjects will iterate over all 'object' configs and create metrics for each +func (p *Parser) processObjects(objects []JSONObject, input []byte) ([]telegraf.Metric, error) { + p.iterateObjects = true + var t []telegraf.Metric + for _, c := range objects { + p.currentSettings = c + if c.Path == "" { + return nil, fmt.Errorf("GJSON path is required") + } + result := gjson.GetBytes(input, c.Path) + + if result.Type == gjson.Null { + return nil, fmt.Errorf("GJSON Path returned null") + } + + rootObject := MetricNode{ + Metric: metric.New( + p.measurementName, + map[string]string{}, + map[string]interface{}{}, + p.Timestamp, + ), + Result: result, + } + metrics, err := p.expandArray(rootObject) + if err != nil { + return nil, err + } + for _, m := range metrics { + t = append(t, m.Metric) + } + } + + return t, nil +} + +// combineObject will add all fields/tags to a single metric +// If the object has multiple array's as elements it won't comine those, they will remain separate metrics +func (p *Parser) combineObject(result MetricNode) ([]MetricNode, error) { + var results []MetricNode + if result.IsArray() || result.IsObject() { + var err error + var prevArray bool + result.ForEach(func(key, val gjson.Result) bool { + // Determine if field/tag set name is configured + var setName string + if result.SetName != "" { + setName = result.SetName + "_" + strings.ReplaceAll(key.String(), " ", "_") + } else { + setName = strings.ReplaceAll(key.String(), " ", "_") + } + + if p.isExcluded(setName) || !p.isIncluded(setName, val) { + return true + } + + var outputName string + if p.currentSettings.DisablePrependKeys { + outputName = strings.ReplaceAll(key.String(), " ", "_") + } else { + outputName = setName + } + for k, n := range p.currentSettings.Renames { + if k == setName { + outputName = n + break + } + } + + arrayNode := MetricNode{ + DesiredType: result.DesiredType, + Tag: result.Tag, + OutputName: outputName, + SetName: setName, + Metric: result.Metric, + Result: val, + } + + for k, t := range p.currentSettings.Fields { + if setName == k { + arrayNode.DesiredType = t + break + } + } + + tag := false + for _, t := range p.currentSettings.Tags { + if setName == t { + tag = true + break + } + } + + arrayNode.Tag = tag + if val.IsObject() { + prevArray = false + _, err = p.combineObject(arrayNode) + if err != nil { + return false + } + } else { + var r []MetricNode + r, err = p.expandArray(arrayNode) + if err != nil { + return false + } + if prevArray { + if !arrayNode.IsArray() { + // If another non-array element was found, merge it into all previous gathered metrics + if len(results) != 0 { + for _, newResult := range results { + mergeMetric(result.Metric, newResult.Metric) + } + } + } else { + // Multiple array's won't be merged but kept separate, add additional metrics gathered from an array + results = append(results, r...) + } + } else { + // Continue using the same metric if its an object + results = r + } + + if val.IsArray() { + prevArray = true + } + } + + return true + }) + + if err != nil { + return nil, err + } + } + + return results, nil +} + +func (p *Parser) isIncluded(key string, val gjson.Result) bool { + if len(p.currentSettings.IncludedKeys) == 0 { + return true + } + for _, i := range p.currentSettings.IncludedKeys { + if i == key { + return true + } + if val.IsArray() || val.IsObject() { + // Check if the included key is a sub element + if strings.HasPrefix(i, key) { + return true + } + } + } + return false +} + +func (p *Parser) isExcluded(key string) bool { + for _, i := range p.currentSettings.ExcludedKeys { + if i == key { + return true + } + } + return false +} + +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { + return nil, fmt.Errorf("ParseLine is designed for parsing influx line protocol, therefore not implemented for parsing JSON") +} + +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +// convertType will convert the value parsed from the input JSON to the specified type in the config +func (p *Parser) convertType(input interface{}, desiredType string, name string) (interface{}, error) { + switch inputType := input.(type) { + case string: + if desiredType != "string" { + switch desiredType { + case "uint": + r, err := strconv.ParseUint(inputType, 10, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type uint: %v", name, err) + } + return r, nil + case "int": + r, err := strconv.Atoi(inputType) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type int: %v", name, err) + } + return r, nil + case "float": + r, err := strconv.ParseFloat(inputType, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type float: %v", name, err) + } + return r, nil + case "bool": + r, err := strconv.ParseBool(inputType) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type bool: %v", name, err) + } + return r, nil + } + } + case bool: + switch desiredType { + case "string": + return strconv.FormatBool(inputType), nil + case "int": + if inputType { + return int64(1), nil + } + + return int64(0), nil + case "uint": + if inputType { + return uint64(1), nil + } + + return uint64(0), nil + } + case float64: + if desiredType != "float" { + switch desiredType { + case "string": + return fmt.Sprint(inputType), nil + case "int": + return int64(inputType), nil + case "uint": + return uint64(inputType), nil + case "bool": + if inputType == 0 { + return false, nil + } else if inputType == 1 { + return true, nil + } else { + return nil, fmt.Errorf("Unable to convert field '%s' to type bool", name) + } + } + } + default: + return nil, fmt.Errorf("unknown format '%T' for field '%s'", inputType, name) + } + + return input, nil +} diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go new file mode 100644 index 0000000000000..c8deda29edd94 --- /dev/null +++ b/plugins/parsers/json_v2/parser_test.go @@ -0,0 +1,130 @@ +package json_v2_test + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/file" + "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestData(t *testing.T) { + var tests = []struct { + name string + test string + }{ + { + name: "Test using just fields and tags", + test: "fields_and_tags", + }, + { + name: "Test gathering from array of nested objects", + test: "nested_array_of_objects", + }, + { + name: "Test setting timestamp", + test: "timestamp", + }, + { + name: "Test setting measurement name from int", + test: "measurement_name_int", + }, + { + name: "Test multiple types", + test: "types", + }, + { + name: "Test settings tags in nested object", + test: "nested_tags", + }, + { + name: "Test settings tags in nested and non-nested objects", + test: "nested_and_nonnested_tags", + }, + { + name: "Test a more complex nested tag retrieval", + test: "nested_tags_complex", + }, + { + name: "Test multiple arrays in object", + test: "multiple_arrays_in_object", + }, + { + name: "Test fields and tags complex", + test: "fields_and_tags_complex", + }, + { + name: "Test object", + test: "object", + }, + { + name: "Test multiple timestamps", + test: "multiple_timestamps", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + // Process the telegraf config file for the test + buf, err := ioutil.ReadFile(fmt.Sprintf("testdata/%s/telegraf.conf", tc.test)) + require.NoError(t, err) + inputs.Add("file", func() telegraf.Input { + return &file.File{} + }) + cfg := config.NewConfig() + err = cfg.LoadConfigData(buf) + require.NoError(t, err) + + // Gather the metrics from the input file configure + acc := testutil.Accumulator{} + for _, i := range cfg.Inputs { + err = i.Init() + require.NoError(t, err) + err = i.Gather(&acc) + require.NoError(t, err) + } + require.NoError(t, err) + + // Process expected metrics and compare with resulting metrics + expectedOutputs, err := readMetricFile(fmt.Sprintf("testdata/%s/expected.out", tc.test)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expectedOutputs, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} + +func readMetricFile(path string) ([]telegraf.Metric, error) { + var metrics []telegraf.Metric + expectedFile, err := os.Open(path) + if err != nil { + return metrics, err + } + defer expectedFile.Close() + + parser := influx.NewParser(influx.NewMetricHandler()) + scanner := bufio.NewScanner(expectedFile) + for scanner.Scan() { + line := scanner.Text() + if line != "" { + m, err := parser.ParseLine(line) + if err != nil { + return nil, fmt.Errorf("unable to parse metric in %q failed: %v", line, err) + } + metrics = append(metrics, m) + } + } + err = expectedFile.Close() + if err != nil { + return metrics, err + } + + return metrics, nil +} diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags/expected.out b/plugins/parsers/json_v2/testdata/fields_and_tags/expected.out new file mode 100644 index 0000000000000..2b7f6c16834c4 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags/expected.out @@ -0,0 +1,2 @@ +file,status=200 duration=2i,json_duration=100 +file,status=200 duration=2i,json_duration=60 diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags/input.json b/plugins/parsers/json_v2/testdata/fields_and_tags/input.json new file mode 100644 index 0000000000000..e8be29f955ca0 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags/input.json @@ -0,0 +1,46 @@ +{ + "message": "abc", + "fields": { + "status": 200, + "key": 1, + "json": [ + { + "duration": 100, + "code": 1, + "label": 2, + "line": 3, + "many": 4, + "more": 5, + "numerical": 6, + "fields": 7, + "nest": { + "label": 2, + "line": 3, + "many": 4, + "more": 5, + "numerical": 4, + "fields": 7 + } + }, + { + "duration": 60, + "code": 1, + "label": 2, + "line": 3, + "many": 4, + "more": 5, + "numerical": 6, + "fields": 7, + "nest": { + "label": 2, + "line": 3, + "many": 4, + "more": 5, + "numerical": 6, + "fields": 7 + } + } + ], + "duration": 2 + } +} diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags/telegraf.conf b/plugins/parsers/json_v2/testdata/fields_and_tags/telegraf.conf new file mode 100644 index 0000000000000..ceec731f991bd --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags/telegraf.conf @@ -0,0 +1,14 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/1363 + +[[inputs.file]] + files = ["./testdata/fields_and_tags/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.tag]] + path = "fields.status" + [[inputs.file.json_v2.field]] + path = "fields.json.#.duration" + rename = "json_duration" + [[inputs.file.json_v2.field]] + path = "fields.duration" + type = "int" diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags_complex/expected.out b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/expected.out new file mode 100644 index 0000000000000..02edaba46ff2f --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/expected.out @@ -0,0 +1,5 @@ +bart_json_v2,name=Powell\ St. minutes=9i +bart_json_v2,name=Powell\ St. minutes=40i +bart_json_v2,name=Powell\ St. minutes=70i +bart_json_v2,name=Powell\ St. minutes=12i +bart_json_v2,name=Powell\ St. minutes=42i diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags_complex/input.json b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/input.json new file mode 100644 index 0000000000000..15a0dab9519cb --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/input.json @@ -0,0 +1,87 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=POWL&json=y" + }, + "date": "06/03/2021", + "time": "09:46:01 AM PDT", + "station": [ + { + "name": "Powell St.", + "abbr": "POWL", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "9", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "40", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "70", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Berryessa", + "abbreviation": "BERY", + "limited": "0", + "estimate": [ + { + "minutes": "12", + "platform": "2", + "direction": "North", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "42", + "platform": "2", + "direction": "North", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags_complex/telegraf.conf b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/telegraf.conf new file mode 100644 index 0000000000000..e2b655930acce --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/telegraf.conf @@ -0,0 +1,10 @@ +[[inputs.file]] + files = ["./testdata/fields_and_tags_complex/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "bart_json_v2" + [[inputs.file.json_v2.tag]] + path = "root.station.#.name" + [[inputs.file.json_v2.field]] + path = "root.station.#.etd.#.estimate.#.minutes" + type = "int" diff --git a/plugins/parsers/json_v2/testdata/measurement_name_int/expected.out b/plugins/parsers/json_v2/testdata/measurement_name_int/expected.out new file mode 100644 index 0000000000000..4afd678a4b71a --- /dev/null +++ b/plugins/parsers/json_v2/testdata/measurement_name_int/expected.out @@ -0,0 +1 @@ +32 label="Basic" diff --git a/plugins/parsers/json_v2/testdata/measurement_name_int/input.json b/plugins/parsers/json_v2/testdata/measurement_name_int/input.json new file mode 100644 index 0000000000000..34dccc621ed15 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/measurement_name_int/input.json @@ -0,0 +1,19 @@ +{ + "value_id": "52-32-1-0", + "node_id": 52, + "class_id": 32, + "type": "byte", + "genre": "basic", + "instance": 1, + "index": 0, + "label": "Basic", + "units": "", + "help": "Basic status of the node", + "read_only": false, + "write_only": false, + "min": 0, + "max": 255, + "is_polled": false, + "value": 0, + "lastUpdate": 1584636017962 +} diff --git a/plugins/parsers/json_v2/testdata/measurement_name_int/telegraf.conf b/plugins/parsers/json_v2/testdata/measurement_name_int/telegraf.conf new file mode 100644 index 0000000000000..db6a86ca197f2 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/measurement_name_int/telegraf.conf @@ -0,0 +1,9 @@ +# Example taken from: https://github.com/influxdata/feature-requests/issues/160 + +[[inputs.file]] + files = ["./testdata/measurement_name_int/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name_path = "class_id" + [[inputs.file.json_v2.field]] + path = "label" diff --git a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out new file mode 100644 index 0000000000000..814d044ce6b6f --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out @@ -0,0 +1,6 @@ +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",name="Bilbo",species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",name="Frodo",species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",random=1 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",random=2 diff --git a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/input.json b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/input.json new file mode 100644 index 0000000000000..271638a4f6a33 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/input.json @@ -0,0 +1,24 @@ +{ + "book": { + "title": "The Lord Of The Rings", + "chapters": [ + "A Long-expected Party", + "The Shadow of the Past" + ], + "author": "Tolkien", + "characters": [ + { + "name": "Bilbo", + "species": "hobbit" + }, + { + "name": "Frodo", + "species": "hobbit" + } + ], + "random": [ + 1, + 2 + ] + } +} diff --git a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/telegraf.conf b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/telegraf.conf new file mode 100644 index 0000000000000..b83e383adbcc5 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/telegraf.conf @@ -0,0 +1,11 @@ +# Example getting nested fields with duplicate names +# Example taken from: https://github.com/influxdata/telegraf/issues/1363 + +[[inputs.file]] + files = ["./testdata/multiple_arrays_in_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "book" + tags = ["title"] + disable_prepend_keys = true diff --git a/plugins/parsers/json_v2/testdata/multiple_timestamps/expected.out b/plugins/parsers/json_v2/testdata/multiple_timestamps/expected.out new file mode 100644 index 0000000000000..0cc5bb93aafcf --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_timestamps/expected.out @@ -0,0 +1,2 @@ +file name="fire" 1555745371450794118 +file name="flood" 1555745371450794118 diff --git a/plugins/parsers/json_v2/testdata/multiple_timestamps/input.json b/plugins/parsers/json_v2/testdata/multiple_timestamps/input.json new file mode 100644 index 0000000000000..7931dca6635ab --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_timestamps/input.json @@ -0,0 +1,12 @@ +{ + "events": [ + { + "name": "fire", + "time": "1555745371410" + }, + { + "name": "flood", + "time": "1555745371410" + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/multiple_timestamps/telegraf.conf b/plugins/parsers/json_v2/testdata/multiple_timestamps/telegraf.conf new file mode 100644 index 0000000000000..da3bae2d373b7 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_timestamps/telegraf.conf @@ -0,0 +1,10 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/5940 + +[[inputs.file]] + files = ["./testdata/multiple_timestamps/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "events" + timestamp_key = "time" + timestamp_format = "unix_ms" diff --git a/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/expected.out b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/expected.out new file mode 100644 index 0000000000000..d48b7660e28c3 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/expected.out @@ -0,0 +1,12 @@ +file,hostname=testhost1,outputname=1A-CC01-PC01 systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=2A-CC01-KA01 systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=3A-CC01-CC02 systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=4A systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=5A systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=6A-CC01-88-INV01-A systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost2,outputname=1A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=2A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=3A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=4A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=5A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=6A systemVoltage=27.5,systemCurrent=9.5 diff --git a/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/input.json b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/input.json new file mode 100644 index 0000000000000..60d7f24821297 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/input.json @@ -0,0 +1,174 @@ +[ + { + "hostname": "testhost1", + "systemVoltage": -54.1, + "systemCurrent": -3.8, + "tables": [ + { + "outputnumber": 0.0, + "outputname": "1A-CC01-PC01", + "outputcurrent": -2.7, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 1.0, + "outputname": "2A-CC01-KA01", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 2.0, + "outputname": "3A-CC01-CC02", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 3.0, + "outputname": "4A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 4.0, + "outputname": "5A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 5.0, + "outputname": "6A-CC01-88-INV01-A", + "outputcurrent": -1.1, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "busnumber": 0.0, + "busname": "A--48A", + "busvoltage": -54.1, + "buscurrent": -3.8 + }, + { + "busnumber": 1.0, + "busname": "B--48B", + "busvoltage": -53.9, + "buscurrent": -4.2 + }, + { + "alarmnumber": 0.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 1.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 2.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 3.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 4.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + } + ] + }, + { + "hostname": "testhost2", + "systemVoltage": 27.5, + "systemCurrent": 9.5, + "tables": [ + { + "outputnumber": 0.0, + "outputname": "1A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 1.0, + "outputname": "2A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 2.0, + "outputname": "3A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 3.0, + "outputname": "4A", + "outputcurrent": 0.6, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 4.0, + "outputname": "5A", + "outputcurrent": 6.5, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 5.0, + "outputname": "6A", + "outputcurrent": 0.0, + "outputfusestatus": 2.0, + "outputenable": 1.0 + }, + { + "busnumber": 0.0, + "busname": "A-24V", + "busvoltage": 27.6, + "buscurrent": 0.6 + }, + { + "busnumber": 1.0, + "busname": "B-12V", + "busvoltage": 13.8, + "buscurrent": 0.0 + }, + { + "alarmnumber": 0.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 1.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 2.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 3.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 4.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + } + ] + } +] diff --git a/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/telegraf.conf b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/telegraf.conf new file mode 100644 index 0000000000000..45692dc5df0e2 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/telegraf.conf @@ -0,0 +1,18 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/6437 + +# Parse String types from JSON +[[inputs.file]] +files = ["./testdata/nested_and_nonnested_tags/input.json"] +data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + disable_prepend_keys = true + path = "@this" + included_keys = [ + "hostname", + "systemVoltage", + "systemCurrent", + "tables", + "tables_outputname", + ] + tags = ["hostname", "tables_outputname"] diff --git a/plugins/parsers/json_v2/testdata/nested_array_of_objects/expected.out b/plugins/parsers/json_v2/testdata/nested_array_of_objects/expected.out new file mode 100644 index 0000000000000..972ea5eadd30b --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_array_of_objects/expected.out @@ -0,0 +1,2 @@ +new_metric,name=partition LogEndOffset=339238i,LogStartOffset=339238i,NumLogSegments=1i,Size=0i,UnderReplicatedPartitions=0i 1610056029037925000 +new_metric,name=partition LogEndOffset=33914i,LogStartOffset=33238i,NumLogSegments=1i,Size=2i,UnderReplicatedPartitions=5i 1610056029037956000 diff --git a/plugins/parsers/json_v2/testdata/nested_array_of_objects/input.json b/plugins/parsers/json_v2/testdata/nested_array_of_objects/input.json new file mode 100644 index 0000000000000..86ded773af73b --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_array_of_objects/input.json @@ -0,0 +1,36 @@ +[ + { + "data": { + "LogEndOffset": 339238, + "LogStartOffset": 339238, + "NumLogSegments": 1, + "Size": 0, + "UnderReplicatedPartitions": 0 + }, + "name": "partition", + "tags": { + "host": "CUD1-001559", + "jolokia_agent_url": "http://localhost:7777/jolokia", + "partition": "1", + "topic": "qa-kafka-connect-logs" + }, + "timestamp": 1591124461 + }, + { + "data": { + "LogEndOffset": 33914, + "LogStartOffset": 33238, + "NumLogSegments": 1, + "Size": 2, + "UnderReplicatedPartitions": 5 + }, + "name": "partition", + "tags": { + "host": "CUD1-001559", + "jolokia_agent_url": "http://localhost:7777/jolokia", + "partition": "1", + "topic": "qa-kafka-connect-logs" + }, + "timestamp": 1591124461 + } +] diff --git a/plugins/parsers/json_v2/testdata/nested_array_of_objects/telegraf.conf b/plugins/parsers/json_v2/testdata/nested_array_of_objects/telegraf.conf new file mode 100644 index 0000000000000..0dd7960d4ec36 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_array_of_objects/telegraf.conf @@ -0,0 +1,15 @@ +# Example taken from: https://github.com/influxdata/feature-requests/issues/160 + +[[inputs.file]] + files = ["./testdata/nested_array_of_objects/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "new_metric" + [[inputs.file.json_v2.object]] + path = "@this" + disable_prepend_keys = true + excluded_keys = ["tags", "timestamp"] + tags = ["name"] + [inputs.file.json_v2.object.fields] + data = "int" + diff --git a/plugins/parsers/json_v2/testdata/nested_tags/expected.out b/plugins/parsers/json_v2/testdata/nested_tags/expected.out new file mode 100644 index 0000000000000..7b31560a594bb --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags/expected.out @@ -0,0 +1,2 @@ +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHNGTUT Count=0,Errors=0 +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHLPW9T Errors=0,Count=0 diff --git a/plugins/parsers/json_v2/testdata/nested_tags/input.json b/plugins/parsers/json_v2/testdata/nested_tags/input.json new file mode 100644 index 0000000000000..c3226f34d8e14 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags/input.json @@ -0,0 +1,16 @@ +{ + "device0": { + "Count": 0, + "Errors": 0, + "Serial": "9JHNGTUT", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + }, + "device1": { + "Count": 0, + "Errors": 0, + "Serial": "9JHLPW9T", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + } +} diff --git a/plugins/parsers/json_v2/testdata/nested_tags/telegraf.conf b/plugins/parsers/json_v2/testdata/nested_tags/telegraf.conf new file mode 100644 index 0000000000000..e03bd032552ba --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags/telegraf.conf @@ -0,0 +1,12 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/6853 + +[[inputs.file]] + files = ["./testdata/nested_tags/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "device0" + tags = ["Firmware", "Model", "Serial"] + [[inputs.file.json_v2.object]] + path = "device1" + tags = ["Firmware", "Model", "Serial"] diff --git a/plugins/parsers/json_v2/testdata/nested_tags_complex/expected.out b/plugins/parsers/json_v2/testdata/nested_tags_complex/expected.out new file mode 100644 index 0000000000000..92757bada156d --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags_complex/expected.out @@ -0,0 +1,3 @@ +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHNGTUT Count=0,Errors=0 +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHNGHJBT Errors=0,Count=2 +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHLPW9T Errors=0,Count=0 diff --git a/plugins/parsers/json_v2/testdata/nested_tags_complex/input.json b/plugins/parsers/json_v2/testdata/nested_tags_complex/input.json new file mode 100644 index 0000000000000..b373d90a387b8 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags_complex/input.json @@ -0,0 +1,35 @@ +{ + "Group A": [ + { + "Sub-group 1": [ + { + "Count": 0, + "Errors": 0, + "Serial": "9JHNGTUT", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + }, + { + "Count": 2, + "Errors": 0, + "Serial": "9JHNGHJBT", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + } + ] + } + ], + "Group B": [ + { + "Sub-group 1": [ + { + "Count": 0, + "Errors": 0, + "Serial": "9JHLPW9T", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + } + ] + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/nested_tags_complex/telegraf.conf b/plugins/parsers/json_v2/testdata/nested_tags_complex/telegraf.conf new file mode 100644 index 0000000000000..61fba304a4f27 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags_complex/telegraf.conf @@ -0,0 +1,14 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/6853 + +[[inputs.file]] + files = ["./testdata/nested_tags_complex/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "Group A" + disable_prepend_keys = true + tags = ["Sub-group_1_Firmware", "Sub-group_1_Model", "Sub-group_1_Serial"] + [[inputs.file.json_v2.object]] + path = "Group B" + disable_prepend_keys = true + tags = ["Sub-group_1_Firmware", "Sub-group_1_Model", "Sub-group_1_Serial"] diff --git a/plugins/parsers/json_v2/testdata/object/expected.out b/plugins/parsers/json_v2/testdata/object/expected.out new file mode 100644 index 0000000000000..8832d32bf0b6c --- /dev/null +++ b/plugins/parsers/json_v2/testdata/object/expected.out @@ -0,0 +1,5 @@ +bart_json_v2,destination=Antioch,name=Colma minutes=13i +bart_json_v2,destination=Antioch,name=Colma minutes=43i +bart_json_v2,destination=Millbrae,name=Colma minutes=19i +bart_json_v2,destination=Millbrae,name=Colma minutes=49i +bart_json_v2,destination=Millbrae,name=Colma minutes=79i diff --git a/plugins/parsers/json_v2/testdata/object/input.json b/plugins/parsers/json_v2/testdata/object/input.json new file mode 100644 index 0000000000000..cc8b0851f81db --- /dev/null +++ b/plugins/parsers/json_v2/testdata/object/input.json @@ -0,0 +1,87 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=COLM&json=y" + }, + "date": "06/03/2021", + "time": "12:54:31 PM PDT", + "station": [ + { + "name": "Colma", + "abbr": "COLM", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "13", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "43", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Millbrae", + "abbreviation": "MLBR", + "limited": "0", + "estimate": [ + { + "minutes": "19", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "49", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "79", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/object/telegraf.conf b/plugins/parsers/json_v2/testdata/object/telegraf.conf new file mode 100644 index 0000000000000..6ad244fd71418 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/object/telegraf.conf @@ -0,0 +1,12 @@ +[[inputs.file]] + files = ["./testdata/object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "bart_json_v2" + [[inputs.file.json_v2.object]] + path = "root.station" + disable_prepend_keys = true + included_keys = ["name", "etd_destination", "etd_estimate_minutes"] + tags = ["name", "etd_destination"] + [inputs.file.json_v2.object.fields] + etd_estimate_minutes = "int" diff --git a/plugins/parsers/json_v2/testdata/timestamp/expected.out b/plugins/parsers/json_v2/testdata/timestamp/expected.out new file mode 100644 index 0000000000000..e2e7415171b27 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp/expected.out @@ -0,0 +1,4 @@ +file,name=temperature,units=℃ value=23.4 1555745371450794118 +file,name=moisture,units=% value=5 1555745371450794118 +file,name=light,units=lux value=10118 1555745371450794118 +file,name=fertility,units=us/cm value=0 1555745371450794118 diff --git a/plugins/parsers/json_v2/testdata/timestamp/input.json b/plugins/parsers/json_v2/testdata/timestamp/input.json new file mode 100644 index 0000000000000..356d986e1f193 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp/input.json @@ -0,0 +1,25 @@ +{ + "time": 1555745371410, + "measurements": [ + { + "name": "temperature", + "value": 23.4, + "units": "℃" + }, + { + "name": "moisture", + "value": 5, + "units": "%" + }, + { + "name": "light", + "value": 10118, + "units": "lux" + }, + { + "name": "fertility", + "value": 0, + "units": "us/cm" + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/timestamp/telegraf.conf b/plugins/parsers/json_v2/testdata/timestamp/telegraf.conf new file mode 100644 index 0000000000000..ffea2d652ffc7 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp/telegraf.conf @@ -0,0 +1,11 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/5940 + +[[inputs.file]] + files = ["./testdata/timestamp/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + timestamp_path = "time" + timestamp_format = "unix_ms" + [[inputs.file.json_v2.object]] + path = "measurements" + tags = ["name", "units"] diff --git a/plugins/parsers/json_v2/testdata/types/expected.out b/plugins/parsers/json_v2/testdata/types/expected.out new file mode 100644 index 0000000000000..87cee38ee0e83 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/types/expected.out @@ -0,0 +1,4 @@ +file explicitstringtypeName="Bilbo",defaultstringtypeName="Baggins",convertbooltostringName="true",convertinttostringName="1",convertfloattostringName="1.1" +file defaultinttypeName=2,convertfloatointName=3i,convertstringtointName=4i,convertbooltointName=0i,explicitinttypeName=1i,uinttype=1u +file convertstringtofloatName=4.1,explicitfloattypeName=1.1,defaultfloattypeName=2.1,convertintotfloatName=3 +file explicitbooltypeName=true,defaultbooltypeName=false,convertinttoboolName=true,convertstringtoboolName=false,convertintstringtoboolTrueName=true,convertintstringtoboolFalseName=false diff --git a/plugins/parsers/json_v2/testdata/types/input.json b/plugins/parsers/json_v2/testdata/types/input.json new file mode 100644 index 0000000000000..bb85fc9eaa9e3 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/types/input.json @@ -0,0 +1,22 @@ +{ + "explicitstringtype": "Bilbo", + "defaultstringtype": "Baggins", + "convertbooltostring": true, + "convertinttostring": 1, + "convertfloattostring": 1.1, + "explicitinttype": 1, + "defaultinttype": 2, + "convertfloatoint": 3.1, + "convertstringtoint": "4", + "convertbooltoint": false, + "explicitfloattype": 1.1, + "defaultfloattype": 2.1, + "convertintotfloat": 3, + "convertstringtofloat": "4.1", + "explicitbooltype": true, + "defaultbooltype": false, + "convertinttobool": 1, + "convertstringtobool": "false", + "convertintstringtoboolTrue": "1", + "convertintstringtoboolFalse": "0" +} diff --git a/plugins/parsers/json_v2/testdata/types/telegraf.conf b/plugins/parsers/json_v2/testdata/types/telegraf.conf new file mode 100644 index 0000000000000..6a23818193b9d --- /dev/null +++ b/plugins/parsers/json_v2/testdata/types/telegraf.conf @@ -0,0 +1,105 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/7097 + +# Parse String types from JSON +[[inputs.file]] + files = ["./testdata/types/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + rename = "explicitstringtypeName" + path = "explicitstringtype" + type = "string" + [[inputs.file.json_v2.field]] + rename = "defaultstringtypeName" + path = "defaultstringtype" + [[inputs.file.json_v2.field]] + rename = "convertbooltostringName" + path = "convertbooltostring" + type = "string" + [[inputs.file.json_v2.field]] + rename = "convertinttostringName" + path = "convertinttostring" + type = "string" + [[inputs.file.json_v2.field]] + rename = "convertfloattostringName" + path = "convertfloattostring" + type = "string" + +# Parse int typess from JSON +[[inputs.file]] + files = ["./testdata/types/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + rename = "explicitinttypeName" + path = "explicitinttype" + type = "int" + [[inputs.file.json_v2.field]] + rename = "uinttype" + path = "explicitinttype" + type = "uint" + [[inputs.file.json_v2.field]] + rename = "defaultinttypeName" + path = "defaultinttype" + [[inputs.file.json_v2.field]] + rename = "convertfloatointName" + path = "convertfloatoint" + type = "int" + [[inputs.file.json_v2.field]] + rename = "convertstringtointName" + path = "convertstringtoint" + type = "int" + [[inputs.file.json_v2.field]] + rename = "convertbooltointName" + path = "convertbooltoint" + type = "int" + +# Parse float types from JSON +[[inputs.file]] + files = ["./testdata/types/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + rename = "explicitfloattypeName" + path = "explicitfloattype" + type = "float" + [[inputs.file.json_v2.field]] + rename = "defaultfloattypeName" + path = "defaultfloattype" + [[inputs.file.json_v2.field]] + rename = "convertintotfloatName" + path = "convertintotfloat" + type = "float" + [[inputs.file.json_v2.field]] + rename = "convertstringtofloatName" + path = "convertstringtofloat" + type = "float" + +# Parse bool types from JSON +[[inputs.file]] + files = ["./testdata/types/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + rename = "explicitbooltypeName" + path = "explicitbooltype" + type = "bool" + [[inputs.file.json_v2.field]] + rename = "defaultbooltypeName" + path = "defaultbooltype" + [[inputs.file.json_v2.field]] + rename = "convertinttoboolName" + path = "convertinttobool" + type = "bool" + [[inputs.file.json_v2.field]] + rename = "convertstringtoboolName" + path = "convertstringtobool" + type = "bool" + [[inputs.file.json_v2.field]] + rename = "convertintstringtoboolTrueName" + path = "convertintstringtoboolTrue" + type = "bool" + [[inputs.file.json_v2.field]] + rename = "convertintstringtoboolFalseName" + path = "convertintstringtoboolFalse" + type = "bool" diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index b2e66636cb1b8..4e01fb0a630fe 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -12,6 +12,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/grok" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/plugins/parsers/json_v2" "github.com/influxdata/telegraf/plugins/parsers/logfmt" "github.com/influxdata/telegraf/plugins/parsers/nagios" "github.com/influxdata/telegraf/plugins/parsers/prometheus" @@ -51,6 +52,8 @@ type Parser interface { // and parses it into a telegraf metric. // // Must be thread-safe. + // This function is only called by plugins that expect line based protocols + // Doesn't need to be implemented by non-linebased parsers (e.g. json, xml) ParseLine(line string) (telegraf.Metric, error) // SetDefaultTags tells the parser to add all of the given tags @@ -158,12 +161,19 @@ type Config struct { // XML configuration XMLConfig []XMLConfig `toml:"xml"` + + // JSONPath configuration + JSONV2Config []JSONV2Config `toml:"json_v2"` } type XMLConfig struct { xml.Config } +type JSONV2Config struct { + json_v2.Config +} + // NewParser returns a Parser interface based on the given config. func NewParser(config *Config) (Parser, error) { var err error @@ -253,6 +263,8 @@ func NewParser(config *Config) (Parser, error) { parser, err = NewPrometheusRemoteWriteParser(config.DefaultTags) case "xml": parser, err = NewXMLParser(config.MetricName, config.DefaultTags, config.XMLConfig) + case "json_v2": + parser, err = NewJSONPathParser(config.JSONV2Config) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } @@ -395,3 +407,23 @@ func NewXMLParser(metricName string, defaultTags map[string]string, xmlConfigs [ DefaultTags: defaultTags, }, nil } + +func NewJSONPathParser(jsonv2config []JSONV2Config) (Parser, error) { + configs := make([]json_v2.Config, len(jsonv2config)) + for i, cfg := range jsonv2config { + configs[i].MeasurementName = cfg.MeasurementName + configs[i].MeasurementNamePath = cfg.MeasurementNamePath + + configs[i].TimestampPath = cfg.TimestampPath + configs[i].TimestampFormat = cfg.TimestampFormat + configs[i].TimestampTimezone = cfg.TimestampTimezone + + configs[i].Fields = cfg.Fields + configs[i].Tags = cfg.Tags + + configs[i].JSONObjects = cfg.JSONObjects + } + return &json_v2.Parser{ + Configs: configs, + }, nil +} From c10bb9fa5906870e81775623be2944df8e16d54a Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Thu, 10 Jun 2021 13:41:19 -0600 Subject: [PATCH 459/761] Update changelog (cherry picked from commit 1751f5fa22644f744257fcfedf3b5f4de6d0f283) --- CHANGELOG.md | 5 ++++- etc/telegraf.conf | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0358541414037..2a40e857433be 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,9 @@ -## v1.19.0-rc0 [2021-06-04] +## v1.19.0-rc1 [2021-06-10] #### Release Notes - Many linter fixes - thanks @zak-pawel and all! +- [#9331](https://github.com/influxdata/telegraf/pull/9331) Update Go to 1.16.5 #### Bugfixes @@ -11,6 +12,7 @@ - [#9196](https://github.com/influxdata/telegraf/pull/9196) `serializers.prometheusremotewrite` Update dependency and remove tags with empty values - [#9051](https://github.com/influxdata/telegraf/pull/9051) `outputs.kafka` Don't prevent telegraf from starting when there's a connection error - [#8795](https://github.com/influxdata/telegraf/pull/8795) `parsers.prometheusremotewrite` Update prometheus dependency to v2.21.0 +- [#9295](https://github.com/influxdata/telegraf/pull/9295) `outputs.dynatrace` Use dynatrace-metric-utils #### Features @@ -62,6 +64,7 @@ #### New Parser Plugins - [Prometheus Remote Write](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/prometheusremotewrite) - contributed by @helenosheaa +- [JSON V2](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/json_v2) - contributed by @sspaink #### New External Plugins diff --git a/etc/telegraf.conf b/etc/telegraf.conf index c7636e79d728d..42e7d22b54b8f 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -545,8 +545,8 @@ # ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. # api_token = "" # -# ## Optional prefix for metric names (e.g.: "telegraf.") -# prefix = "telegraf." +# ## Optional prefix for metric names (e.g.: "telegraf") +# prefix = "telegraf" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" From f6a9d104f8fd934b10e0cbff871979a8f1e1e897 Mon Sep 17 00:00:00 2001 From: Mya Longmire Date: Thu, 10 Jun 2021 14:51:33 -0600 Subject: [PATCH 460/761] Feature powermode smartctl (#9306) Co-authored-by: Steven Soroka --- plugins/inputs/smart/smart.go | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index 4533ea768432d..b0f189d69fbf9 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -25,19 +25,23 @@ var ( // Device Model: APPLE SSD SM256E // Product: HUH721212AL5204 // Model Number: TS128GMTE850 - modelInfo = regexp.MustCompile("^(Device Model|Product|Model Number):\\s+(.*)$") + modelInfo = regexp.MustCompile(`^(Device Model|Product|Model Number):\s+(.*)$`) // Serial Number: S0X5NZBC422720 - serialInfo = regexp.MustCompile("(?i)^Serial Number:\\s+(.*)$") + serialInfo = regexp.MustCompile(`(?i)^Serial Number:\s+(.*)$`) // LU WWN Device Id: 5 002538 655584d30 - wwnInfo = regexp.MustCompile("^LU WWN Device Id:\\s+(.*)$") + wwnInfo = regexp.MustCompile(`^LU WWN Device Id:\s+(.*)$`) // User Capacity: 251,000,193,024 bytes [251 GB] - userCapacityInfo = regexp.MustCompile("^User Capacity:\\s+([0-9,]+)\\s+bytes.*$") + userCapacityInfo = regexp.MustCompile(`^User Capacity:\s+([0-9,]+)\s+bytes.*$`) // SMART support is: Enabled - smartEnabledInfo = regexp.MustCompile("^SMART support is:\\s+(\\w+)$") + smartEnabledInfo = regexp.MustCompile(`^SMART support is:\s+(\w+)$`) + // Power mode is: ACTIVE or IDLE or Power mode was: STANDBY + powermodeInfo = regexp.MustCompile(`^Power mode \w+:\s+(\w+)`) + // Device is in STANDBY mode + standbyInfo = regexp.MustCompile(`^Device is in\s+(\w+)`) // SMART overall-health self-assessment test result: PASSED // SMART Health Status: OK // PASSED, FAILED, UNKNOWN - smartOverallHealth = regexp.MustCompile("^(SMART overall-health self-assessment test result|SMART Health Status):\\s+(\\w+).*$") + smartOverallHealth = regexp.MustCompile(`^(SMART overall-health self-assessment test result|SMART Health Status):\s+(\w+).*$`) // sasNvmeAttr is a SAS or NVME SMART attribute sasNvmeAttr = regexp.MustCompile(`^([^:]+):\s+(.+)$`) @@ -46,7 +50,7 @@ var ( // 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0 // 5 Reallocated_Sector_Ct PO--CK 100 100 000 - 0 // 192 Power-Off_Retract_Count -O--C- 097 097 000 - 14716 - attribute = regexp.MustCompile("^\\s*([0-9]+)\\s(\\S+)\\s+([-P][-O][-S][-R][-C][-K])\\s+([0-9]+)\\s+([0-9]+)\\s+([0-9-]+)\\s+([-\\w]+)\\s+([\\w\\+\\.]+).*$") + attribute = regexp.MustCompile(`^\s*([0-9]+)\s(\S+)\s+([-P][-O][-S][-R][-C][-K])\s+([0-9]+)\s+([0-9]+)\s+([0-9-]+)\s+([-\w]+)\s+([\w\+\.]+).*$`) // Additional Smart Log for NVME device:nvme0 namespace-id:ffffffff // key normalized raw @@ -693,11 +697,24 @@ func gatherDisk(acc telegraf.Accumulator, timeout config.Duration, usesudo, coll deviceFields["health_ok"] = health[2] == "PASSED" || health[2] == "OK" } + // checks to see if there is a power mode to print to user + // if not look for Device is in STANDBY which happens when + // nocheck is set to standby (will exit to not spin up the disk) + // otherwise nothing is found so nothing is printed (NVMe does not show power) + if power := powermodeInfo.FindStringSubmatch(line); len(power) > 1 { + deviceTags["power"] = power[1] + } else { + if power := standbyInfo.FindStringSubmatch(line); len(power) > 1 { + deviceTags["power"] = power[1] + } + } + tags := map[string]string{} fields := make(map[string]interface{}) if collectAttributes { - keys := [...]string{"device", "model", "serial_no", "wwn", "capacity", "enabled"} + //add power mode + keys := [...]string{"device", "model", "serial_no", "wwn", "capacity", "enabled", "power"} for _, key := range keys { if value, ok := deviceTags[key]; ok { tags[key] = value From da7f2c7a934b5dc763ab10c7db02fe65b201686e Mon Sep 17 00:00:00 2001 From: Mya Longmire Date: Thu, 10 Jun 2021 15:05:43 -0600 Subject: [PATCH 461/761] Bugfix outputs influxdb endless retires (#9296) --- plugins/outputs/influxdb/http.go | 30 +++++++---- plugins/outputs/influxdb/http_test.go | 75 ++++++++++++++++++++------- plugins/outputs/influxdb/influxdb.go | 13 ++++- testutil/capturelog.go | 60 +++++++++++++++++++++ 4 files changed, 149 insertions(+), 29 deletions(-) create mode 100644 testutil/capturelog.go diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 21265ba44def8..5c11d2821d2f1 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -22,13 +22,14 @@ import ( ) const ( - defaultRequestTimeout = time.Second * 5 - defaultDatabase = "telegraf" - errStringDatabaseNotFound = "database not found" - errStringHintedHandoffNotEmpty = "hinted handoff queue not empty" - errStringPartialWrite = "partial write" - errStringPointsBeyondRP = "points beyond retention policy" - errStringUnableToParse = "unable to parse" + defaultRequestTimeout = time.Second * 5 + defaultDatabase = "telegraf" + errStringDatabaseNotFound = "database not found" + errStringRetentionPolicyNotFound = "retention policy not found" + errStringHintedHandoffNotEmpty = "hinted handoff queue not empty" + errStringPartialWrite = "partial write" + errStringPointsBeyondRP = "points beyond retention policy" + errStringUnableToParse = "unable to parse" ) var ( @@ -356,7 +357,7 @@ func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []te body, err := c.validateResponse(resp.Body) - // Check for poorly formatted response (can't be decoded) + // Check for poorly formatted response that can't be decoded if err != nil { return &APIError{ StatusCode: resp.StatusCode, @@ -373,7 +374,6 @@ func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []te if err == nil { desc = writeResp.Err } - if strings.Contains(desc, errStringDatabaseNotFound) { return &DatabaseNotFoundError{ APIError: APIError{ @@ -385,6 +385,18 @@ func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []te } } + //checks for any 4xx code and drops metric and retrying will not make the request work + if len(resp.Status) > 0 && resp.Status[0] == '4' { + c.log.Errorf("E! [outputs.influxdb] Failed to write metric (will be dropped: %s): %s\n", resp.Status, desc) + return nil + } + + // This error handles if there is an invaild or missing retention policy + if strings.Contains(desc, errStringRetentionPolicyNotFound) { + c.log.Errorf("When writing to [%s]: received error %v", c.URL(), desc) + return nil + } + // This "error" is an informational message about the state of the // InfluxDB cluster. if strings.Contains(desc, errStringHintedHandoffNotEmpty) { diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index 2f46e2441e937..39ac2b108da91 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -1,3 +1,4 @@ +//nolint package influxdb_test import ( @@ -13,7 +14,6 @@ import ( "net/url" "os" "path" - "strings" "testing" "time" @@ -386,7 +386,7 @@ func TestHTTP_Write(t *testing.T) { }, }, { - name: "hinted handoff not empty no log no error", + name: "hinted handoff not empty no error", config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", @@ -396,8 +396,8 @@ func TestHTTP_Write(t *testing.T) { w.WriteHeader(http.StatusBadRequest) w.Write([]byte(`{"error": "write failed: hinted handoff queue not empty"}`)) }, - logFunc: func(t *testing.T, str string) { - require.False(t, strings.Contains(str, "hinted handoff queue not empty")) + errFunc: func(t *testing.T, err error) { + require.NoError(t, err) }, }, { @@ -1077,19 +1077,6 @@ func TestDBRPTagsCreateDatabaseCalledOnDatabaseNotFound(t *testing.T) { handlers := &MockHandlerChain{ handlers: []http.HandlerFunc{ - func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/query": - if r.FormValue("q") != `CREATE DATABASE "telegraf"` { - w.WriteHeader(http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusForbidden) - w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`)) - default: - w.WriteHeader(http.StatusInternalServerError) - } - }, func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/write": @@ -1147,9 +1134,61 @@ func TestDBRPTagsCreateDatabaseCalledOnDatabaseNotFound(t *testing.T) { err = output.Connect() require.NoError(t, err) err = output.Write(metrics) - require.Error(t, err) + require.NoError(t, err) err = output.Write(metrics) require.NoError(t, err) require.True(t, handlers.Done(), "all handlers not called") } + +func TestDBNotFoundShouldDropMetricWhenSkipDatabaseCreateIsTrue(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + f := func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`{"error": "database not found: \"telegraf\""}`)) + default: + w.WriteHeader(http.StatusInternalServerError) + } + } + + ts.Config.Handler = http.HandlerFunc(f) + + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + } + + logger := &testutil.CaptureLogger{} + output := influxdb.InfluxDB{ + URL: u.String(), + Database: "telegraf", + DatabaseTag: "database", + SkipDatabaseCreation: true, + Log: logger, + CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) { + return influxdb.NewHTTPClient(*config) + }, + } + + err = output.Connect() + require.NoError(t, err) + err = output.Write(metrics) + require.Contains(t, logger.LastError, "database not found") + require.NoError(t, err) + + err = output.Write(metrics) + require.Contains(t, logger.LastError, "database not found") + require.NoError(t, err) +} diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 0bb4c01cc6996..36b38a9c906c5 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -210,6 +210,7 @@ func (i *InfluxDB) SampleConfig() string { func (i *InfluxDB) Write(metrics []telegraf.Metric) error { ctx := context.Background() + allErrorsAreDatabaseNotFoundErrors := true var err error p := rand.Perm(len(i.clients)) for _, n := range p { @@ -219,20 +220,28 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { return nil } + i.Log.Errorf("When writing to [%s]: %v", client.URL(), err) + switch apiError := err.(type) { case *DatabaseNotFoundError: if !i.SkipDatabaseCreation { + allErrorsAreDatabaseNotFoundErrors = false err := client.CreateDatabase(ctx, apiError.Database) if err != nil { i.Log.Errorf("When writing to [%s]: database %q not found and failed to recreate", client.URL(), apiError.Database) + } else { + // try another client, if all clients fail with this error, do not return error + continue } } } - - i.Log.Errorf("When writing to [%s]: %v", client.URL(), err) } + if allErrorsAreDatabaseNotFoundErrors { + // return nil because we should not be retrying this + return nil + } return errors.New("could not write any address") } diff --git a/testutil/capturelog.go b/testutil/capturelog.go new file mode 100644 index 0000000000000..d26609fffd6ab --- /dev/null +++ b/testutil/capturelog.go @@ -0,0 +1,60 @@ +package testutil + +import ( + "fmt" + "log" //nolint + + "github.com/influxdata/telegraf" +) + +var _ telegraf.Logger = &CaptureLogger{} + +// CaptureLogger defines a logging structure for plugins. +type CaptureLogger struct { + Name string // Name is the plugin name, will be printed in the `[]`. + LastError string +} + +// Errorf logs an error message, patterned after log.Printf. +func (l *CaptureLogger) Errorf(format string, args ...interface{}) { + s := fmt.Sprintf("E! ["+l.Name+"] "+format, args...) + l.LastError = s + log.Print(s) +} + +// Error logs an error message, patterned after log.Print. +func (l *CaptureLogger) Error(args ...interface{}) { + s := fmt.Sprint(append([]interface{}{"E! [" + l.Name + "] "}, args...)...) + l.LastError = s + log.Print(s) +} + +// Debugf logs a debug message, patterned after log.Printf. +func (l *CaptureLogger) Debugf(format string, args ...interface{}) { + log.Printf("D! ["+l.Name+"] "+format, args...) +} + +// Debug logs a debug message, patterned after log.Print. +func (l *CaptureLogger) Debug(args ...interface{}) { + log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...) +} + +// Warnf logs a warning message, patterned after log.Printf. +func (l *CaptureLogger) Warnf(format string, args ...interface{}) { + log.Printf("W! ["+l.Name+"] "+format, args...) +} + +// Warn logs a warning message, patterned after log.Print. +func (l *CaptureLogger) Warn(args ...interface{}) { + log.Print(append([]interface{}{"W! [" + l.Name + "] "}, args...)...) +} + +// Infof logs an information message, patterned after log.Printf. +func (l *CaptureLogger) Infof(format string, args ...interface{}) { + log.Printf("I! ["+l.Name+"] "+format, args...) +} + +// Info logs an information message, patterned after log.Print. +func (l *CaptureLogger) Info(args ...interface{}) { + log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...) +} From 1d4b8d62f5e4bb0caff3973bdb3b2c52b87d5b60 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 14 Jun 2021 10:07:36 -0500 Subject: [PATCH 462/761] Support new Suricata JSON format which includes arrays and strings (#9338) --- plugins/inputs/suricata/suricata.go | 9 +++++ plugins/inputs/suricata/suricata_test.go | 38 +++++++++++++++++++++ plugins/inputs/suricata/testdata/test2.json | 21 ++++++++++++ 3 files changed, 68 insertions(+) create mode 100644 plugins/inputs/suricata/testdata/test2.json diff --git a/plugins/inputs/suricata/suricata.go b/plugins/inputs/suricata/suricata.go index 631c6af0a05b2..8fd48b5cfd747 100644 --- a/plugins/inputs/suricata/suricata.go +++ b/plugins/inputs/suricata/suricata.go @@ -148,6 +148,15 @@ func flexFlatten(outmap map[string]interface{}, field string, v interface{}, del return err } } + case []interface{}: + for _, v := range t { + err := flexFlatten(outmap, field, v, delimiter) + if err != nil { + return err + } + } + case string: + outmap[field] = v case float64: outmap[field] = v.(float64) default: diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index f3204f29e5631..ab03de057c18c 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -296,3 +296,41 @@ func TestSuricataStartStop(t *testing.T) { require.NoError(t, s.Start(&acc)) s.Stop() } + +func TestSuricataParse(t *testing.T) { + tests := []struct { + filename string + expected []telegraf.Metric + }{{ + filename: "test2.json", + expected: []telegraf.Metric{ + testutil.MustMetric( + "suricata", + map[string]string{ + "thread": "W#01-ens2f1", + }, + map[string]interface{}{ + "detect_alert": float64(0), + "detect_engines_id": float64(0), + "detect_engines_last_reload": "2021-06-08T06:33:05.084872+0000", + "detect_engines_rules_failed": float64(0), + "detect_engines_rules_loaded": float64(22712), + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tc := range tests { + data, err := ioutil.ReadFile("testdata/" + tc.filename) + require.NoError(t, err) + s := Suricata{ + Delimiter: "_", + } + acc := testutil.Accumulator{} + s.parse(&acc, data) + + testutil.RequireMetricsEqual(t, tc.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + } +} diff --git a/plugins/inputs/suricata/testdata/test2.json b/plugins/inputs/suricata/testdata/test2.json new file mode 100644 index 0000000000000..edb7d245df1fd --- /dev/null +++ b/plugins/inputs/suricata/testdata/test2.json @@ -0,0 +1,21 @@ +{ + "timestamp": "2021-06-08T06:34:49.237367+0000", + "event_type": "stats", + "stats": { + "threads": { + "W#01-ens2f1": { + "detect": { + "engines": [ + { + "id": 0, + "last_reload": "2021-06-08T06:33:05.084872+0000", + "rules_loaded": 22712, + "rules_failed": 0 + } + ], + "alert": 0 + } + } + } + } +} From 905b22cac9d3f781fc12279e11f455ee2185a464 Mon Sep 17 00:00:00 2001 From: Vyacheslav Stepanov Date: Tue, 15 Jun 2021 07:23:39 +0300 Subject: [PATCH 463/761] Closing all idle connections in docker input plugin (#9243) This prevents error "too many open files" in most cases --- plugins/inputs/docker/client.go | 4 ++++ plugins/inputs/docker/docker.go | 9 ++++++--- plugins/inputs/docker/docker_test.go | 11 +++++++++++ 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/docker/client.go b/plugins/inputs/docker/client.go index 3ea24ea742530..14e4396980b9a 100644 --- a/plugins/inputs/docker/client.go +++ b/plugins/inputs/docker/client.go @@ -23,6 +23,7 @@ type Client interface { ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + Close() error } func NewEnvClient() (Client, error) { @@ -76,3 +77,6 @@ func (c *SocketClient) TaskList(ctx context.Context, options types.TaskListOptio func (c *SocketClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { return c.client.NodeList(ctx, options) } +func (c *SocketClient) Close() error { + return c.client.Close() +} diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 1b44351178d41..47eab7ce2430e 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -123,7 +123,7 @@ var sampleConfig = ` ## Whether to report for each container per-device blkio (8:0, 8:1...), ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. - ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting + ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting ## is honored. perdevice = true @@ -134,12 +134,12 @@ var sampleConfig = ` ## Whether to report for each container total blkio and network stats or not. ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. - ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting + ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting ## is honored. total = false ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. - ## Possible values are 'cpu', 'blkio' and 'network' + ## Possible values are 'cpu', 'blkio' and 'network' ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. ## Please note that this setting has no effect if 'total' is set to 'false' # total_include = ["cpu", "blkio", "network"] @@ -213,6 +213,9 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { d.client = c } + // Close any idle connections in the end of gathering + defer d.client.Close() + // Create label filters if not already created if !d.filtersCreated { err := d.createLabelFilters() diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 88adc600e77eb..f5a8ff7a89b83 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -26,6 +26,7 @@ type MockClient struct { ServiceListF func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) TaskListF func(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) NodeListF func(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + CloseF func() error } func (c *MockClient) Info(ctx context.Context) (types.Info, error) { @@ -75,6 +76,10 @@ func (c *MockClient) NodeList( return c.NodeListF(ctx, options) } +func (c *MockClient) Close() error { + return c.CloseF() +} + var baseClient = MockClient{ InfoF: func(context.Context) (types.Info, error) { return info, nil @@ -97,6 +102,9 @@ var baseClient = MockClient{ NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) { return NodeList, nil }, + CloseF: func() error { + return nil + }, } func newClient(_ string, _ *tls.Config) (Client, error) { @@ -279,6 +287,9 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) { NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) { return NodeList, nil }, + CloseF: func() error { + return nil + }, }, nil }, } From 908ad2f6ce5dcd5bded597c8e0d840666acfb297 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 15 Jun 2021 21:10:52 +0200 Subject: [PATCH 464/761] Generic SQL input (#8735) --- README.md | 1 + docs/SQL_DRIVERS_INPUT.md | 43 ++ filter/filter.go | 22 +- go.sum | 70 +++ internal/internal.go | 17 + internal/type_conversions.go | 198 +++++++ plugins/inputs/all/all.go | 1 + plugins/inputs/sql/README.md | 153 +++++ plugins/inputs/sql/drivers.go | 8 + plugins/inputs/sql/drivers_sqlite.go | 8 + plugins/inputs/sql/sql.go | 542 ++++++++++++++++++ plugins/inputs/sql/sql_test.go | 272 +++++++++ .../inputs/sql/testdata/mariadb/expected.sql | 36 ++ .../inputs/sql/testdata/postgres/expected.sql | 41 ++ 14 files changed, 1409 insertions(+), 3 deletions(-) create mode 100644 docs/SQL_DRIVERS_INPUT.md create mode 100644 internal/type_conversions.go create mode 100644 plugins/inputs/sql/README.md create mode 100644 plugins/inputs/sql/drivers.go create mode 100644 plugins/inputs/sql/drivers_sqlite.go create mode 100644 plugins/inputs/sql/sql.go create mode 100644 plugins/inputs/sql/sql_test.go create mode 100644 plugins/inputs/sql/testdata/mariadb/expected.sql create mode 100644 plugins/inputs/sql/testdata/postgres/expected.sql diff --git a/README.md b/README.md index 0702d6b4d79eb..b579cdd811cf8 100644 --- a/README.md +++ b/README.md @@ -324,6 +324,7 @@ For documentation on the latest development code see the [documentation index][d * [snmp_trap](./plugins/inputs/snmp_trap) * [socket_listener](./plugins/inputs/socket_listener) * [solr](./plugins/inputs/solr) +* [sql](./plugins/inputs/sql) (generic SQL query plugin) * [sql server](./plugins/inputs/sqlserver) (microsoft) * [stackdriver](./plugins/inputs/stackdriver) (Google Cloud Monitoring) * [sql](./plugins/outputs/sql) (SQL generic output) diff --git a/docs/SQL_DRIVERS_INPUT.md b/docs/SQL_DRIVERS_INPUT.md new file mode 100644 index 0000000000000..81049fcee9f99 --- /dev/null +++ b/docs/SQL_DRIVERS_INPUT.md @@ -0,0 +1,43 @@ +# Available SQL drivers for the SQL input plugin + +This is a list of available drivers for the SQL input plugin. The data-source-name (DSN) is driver specific and +might change between versions. Please check the driver documentation for available options and the format. + +database | driver | aliases | example DSN | comment +---------------------| ------------------------------------------------------| --------------- | -------------------------------------------------------------------------------------- | ------- +CockroachDB | [cockroach](https://github.com/jackc/pgx) | postgres
pgx | see _postgres_ driver | uses PostgresQL driver +MariaDB | [maria](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver +Microsoft SQL Server | [sqlserver](https://github.com/denisenkom/go-mssqldb) | mssql | `username:password@host/instance?param1=value¶m2=value` | uses newer _sqlserver_ driver +MySQL | [mysql](https://github.com/go-sql-driver/mysql) | | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/go-sql-driver/mysql) for more information +PostgreSQL | [postgres](https://github.com/jackc/pgx) | pgx | `[user[:password]@][netloc][:port][,...][/dbname][?param1=value1&...]` | see [postgres docs](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) for more information +SQLite | [sqlite](https://gitlab.com/cznic/sqlite) | | `filename` | see [driver docu](https://pkg.go.dev/modernc.org/sqlite) for more information +TiDB | [tidb](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver + +## Comments + +### Driver aliases +Some database drivers are supported though another driver (e.g. CockroachDB). For other databases we provide a more +obvious name (e.g. postgres) compared to the driver name. For all of those drivers you might use an _alias_ name +during configuration. + +### Example data-source-name DSN +The given examples are just that, so please check the driver documentation for the exact format +and available options and parameters. Please note that the format of a DSN might also change +between driver version. + +### Type conversions +Telegraf relies on type conversion of the database driver and/or the golang sql framework. In case you find +any problem, please open an issue! + +## Help +If nothing seems to work, you might find help in the telegraf forum or in the chat. + +### The documentation is wrong +Please open an issue or even better send a pull-request! + +### I found a bug +Please open an issue or even better send a pull-request! + +### My database is not supported +We currently cannot support CGO drivers in telegraf! Please check if a **pure Go** driver for the [golang sql framework](https://golang.org/pkg/database/sql/) exists. +If you found such a driver, please let us know by opening an issue or even better by sending a pull-request! diff --git a/filter/filter.go b/filter/filter.go index 29fcb8c4fafcc..984fa3ed08f70 100644 --- a/filter/filter.go +++ b/filter/filter.go @@ -79,13 +79,24 @@ func compileFilterNoGlob(filters []string) Filter { } type IncludeExcludeFilter struct { - include Filter - exclude Filter + include Filter + exclude Filter + includeDefault bool + excludeDefault bool } func NewIncludeExcludeFilter( include []string, exclude []string, +) (Filter, error) { + return NewIncludeExcludeFilterDefaults(include, exclude, true, false) +} + +func NewIncludeExcludeFilterDefaults( + include []string, + exclude []string, + includeDefault bool, + excludeDefault bool, ) (Filter, error) { in, err := Compile(include) if err != nil { @@ -97,7 +108,7 @@ func NewIncludeExcludeFilter( return nil, err } - return &IncludeExcludeFilter{in, ex}, nil + return &IncludeExcludeFilter{in, ex, includeDefault, excludeDefault}, nil } func (f *IncludeExcludeFilter) Match(s string) bool { @@ -105,12 +116,17 @@ func (f *IncludeExcludeFilter) Match(s string) bool { if !f.include.Match(s) { return false } + } else if !f.includeDefault { + return false } if f.exclude != nil { if f.exclude.Match(s) { return false } + } else if f.excludeDefault { + return false } + return true } diff --git a/go.sum b/go.sum index f4184b3ce9510..bd20f28427fa8 100644 --- a/go.sum +++ b/go.sum @@ -110,23 +110,39 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 h1:mw6pDQqv38/WGF1cO/jF5t/jyAJ2yi7CmtFLLO5tGFI= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 h1:mw6pDQqv38/WGF1cO/jF5t/jyAJ2yi7CmtFLLO5tGFI= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16 h1:8/auA4LFIZFTGrqfKhGBSXwM6/4X1fHa/xniyEHu8ac= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -246,10 +262,13 @@ github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= @@ -257,6 +276,8 @@ github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6L github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -287,12 +308,14 @@ github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABA github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 h1:hkGVFjz+plgr5UfxZUTPFbUFIF/Km6/s+RVRIRHLrrY= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -309,14 +332,17 @@ github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go. github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4 h1:zjz4MOAOFgdBlwid2nNUlJ3YLpVi/97L36lfMYJex60= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e h1:6JKvHHt396/qabvMhnhUZvWaHZzfVfldxE60TK8YLhg= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= @@ -330,9 +356,11 @@ github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= @@ -405,16 +433,23 @@ github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/ github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -427,6 +462,7 @@ github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97h github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dynatrace-oss/dynatrace-metric-utils-go v0.1.0 h1:ldKn47mFgWCoiJRXA32psdEACPKffX9O1Msh1K8M+f0= github.com/dynatrace-oss/dynatrace-metric-utils-go v0.1.0/go.mod h1:qw0E9EJ0PnSlhWawDNuqE0zhc1hqOBUCFIAj3dd9DNw= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -614,6 +650,7 @@ github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9 github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -885,6 +922,7 @@ github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJS github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -1045,11 +1083,16 @@ github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hx github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= @@ -1107,6 +1150,7 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1131,17 +1175,30 @@ github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVo github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93 h1:x2UMpOOVf3kQ8arv/EsDGwim8PTNqzL1/EYDr/+scOM= +github.com/opencontainers/runc v1.0.0-rc93 h1:x2UMpOOVf3kQ8arv/EsDGwim8PTNqzL1/EYDr/+scOM= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= @@ -1230,6 +1287,7 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 h1:IB/5RJRcJiR/YzKs4Aou86s/RaMepZOZVCArYNHJHWc= @@ -1296,6 +1354,7 @@ github.com/signalfx/sapm-proto v0.4.0/go.mod h1:x3gtwJ1GRejtkghB4nYpwixh2zqJrLbP github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -1307,6 +1366,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= @@ -1330,6 +1391,7 @@ github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bd github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= @@ -1586,6 +1648,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1650,6 +1713,8 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1678,6 +1743,7 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1689,11 +1755,15 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/internal/internal.go b/internal/internal.go index 055ea361c3d26..4441e9acfbf03 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -297,8 +297,25 @@ func parseComponents(timestamp interface{}) (int64, int64, error) { return 0, 0, err } return integer, 0, nil + case int8: + return int64(ts), 0, nil + case int16: + return int64(ts), 0, nil + case int32: + return int64(ts), 0, nil case int64: return ts, 0, nil + case uint8: + return int64(ts), 0, nil + case uint16: + return int64(ts), 0, nil + case uint32: + return int64(ts), 0, nil + case uint64: + return int64(ts), 0, nil + case float32: + integer, fractional := math.Modf(float64(ts)) + return int64(integer), int64(fractional * 1e9), nil case float64: integer, fractional := math.Modf(ts) return int64(integer), int64(fractional * 1e9), nil diff --git a/internal/type_conversions.go b/internal/type_conversions.go new file mode 100644 index 0000000000000..ed4ed374a3ffd --- /dev/null +++ b/internal/type_conversions.go @@ -0,0 +1,198 @@ +package internal + +import ( + "fmt" + "strconv" +) + +func ToString(value interface{}) (string, error) { + switch v := value.(type) { + case string: + return v, nil + case []byte: + return string(v), nil + case int: + return strconv.FormatInt(int64(v), 10), nil + case int8: + return strconv.FormatInt(int64(v), 10), nil + case int16: + return strconv.FormatInt(int64(v), 10), nil + case int32: + return strconv.FormatInt(int64(v), 10), nil + case int64: + return strconv.FormatInt(v, 10), nil + case uint: + return strconv.FormatUint(uint64(v), 10), nil + case uint8: + return strconv.FormatUint(uint64(v), 10), nil + case uint16: + return strconv.FormatUint(uint64(v), 10), nil + case uint32: + return strconv.FormatUint(uint64(v), 10), nil + case uint64: + return strconv.FormatUint(v, 10), nil + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32), nil + case float64: + return strconv.FormatFloat(v, 'f', -1, 64), nil + case bool: + return strconv.FormatBool(v), nil + case fmt.Stringer: + return v.String(), nil + case nil: + return "", nil + } + return "", fmt.Errorf("type \"%T\" unsupported", value) +} + +func ToFloat64(value interface{}) (float64, error) { + switch v := value.(type) { + case string: + return strconv.ParseFloat(v, 64) + case []byte: + return strconv.ParseFloat(string(v), 64) + case fmt.Stringer: + return strconv.ParseFloat(v.String(), 64) + case int: + return float64(v), nil + case int8: + return float64(v), nil + case int16: + return float64(v), nil + case int32: + return float64(v), nil + case int64: + return float64(v), nil + case uint: + return float64(v), nil + case uint8: + return float64(v), nil + case uint16: + return float64(v), nil + case uint32: + return float64(v), nil + case uint64: + return float64(v), nil + case float32: + return float64(v), nil + case float64: + return v, nil + case nil: + return 0, nil + } + return 0, fmt.Errorf("type \"%T\" unsupported", value) +} + +func ToInt64(value interface{}) (int64, error) { + switch v := value.(type) { + case string: + return strconv.ParseInt(v, 10, 64) + case []byte: + return strconv.ParseInt(string(v), 10, 64) + case fmt.Stringer: + return strconv.ParseInt(v.String(), 10, 64) + case int: + return int64(v), nil + case int8: + return int64(v), nil + case int16: + return int64(v), nil + case int32: + return int64(v), nil + case int64: + return v, nil + case uint: + return int64(v), nil + case uint8: + return int64(v), nil + case uint16: + return int64(v), nil + case uint32: + return int64(v), nil + case uint64: + return int64(v), nil + case float32: + return int64(v), nil + case float64: + return int64(v), nil + case nil: + return 0, nil + } + return 0, fmt.Errorf("type \"%T\" unsupported", value) +} + +func ToUint64(value interface{}) (uint64, error) { + switch v := value.(type) { + case string: + return strconv.ParseUint(v, 10, 64) + case []byte: + return strconv.ParseUint(string(v), 10, 64) + case fmt.Stringer: + return strconv.ParseUint(v.String(), 10, 64) + case int: + return uint64(v), nil + case int8: + return uint64(v), nil + case int16: + return uint64(v), nil + case int32: + return uint64(v), nil + case int64: + return uint64(v), nil + case uint: + return uint64(v), nil + case uint8: + return uint64(v), nil + case uint16: + return uint64(v), nil + case uint32: + return uint64(v), nil + case uint64: + return v, nil + case float32: + return uint64(v), nil + case float64: + return uint64(v), nil + case nil: + return 0, nil + } + return 0, fmt.Errorf("type \"%T\" unsupported", value) +} + +func ToBool(value interface{}) (bool, error) { + switch v := value.(type) { + case string: + return strconv.ParseBool(v) + case []byte: + return strconv.ParseBool(string(v)) + case fmt.Stringer: + return strconv.ParseBool(v.String()) + case int: + return v > 0, nil + case int8: + return v > 0, nil + case int16: + return v > 0, nil + case int32: + return v > 0, nil + case int64: + return v > 0, nil + case uint: + return v > 0, nil + case uint8: + return v > 0, nil + case uint16: + return v > 0, nil + case uint32: + return v > 0, nil + case uint64: + return v > 0, nil + case float32: + return v > 0, nil + case float64: + return v > 0, nil + case nil: + return false, nil + } + return false, fmt.Errorf("type \"%T\" unsupported", value) +} diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index aa273a4aa7fb5..7c4e0bcf45c76 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -167,6 +167,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/snmp_trap" _ "github.com/influxdata/telegraf/plugins/inputs/socket_listener" _ "github.com/influxdata/telegraf/plugins/inputs/solr" + _ "github.com/influxdata/telegraf/plugins/inputs/sql" _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" _ "github.com/influxdata/telegraf/plugins/inputs/stackdriver" _ "github.com/influxdata/telegraf/plugins/inputs/statsd" diff --git a/plugins/inputs/sql/README.md b/plugins/inputs/sql/README.md new file mode 100644 index 0000000000000..9c002df18dbd3 --- /dev/null +++ b/plugins/inputs/sql/README.md @@ -0,0 +1,153 @@ +# SQL Input Plugin + +This plugin reads metrics from performing SQL queries against a SQL server. Different server +types are supported and their settings might differ (especially the connection parameters). +Please check the list of [supported SQL drivers](../../../docs/SQL_DRIVERS_INPUT.md) for the +`driver` name and options for the data-source-name (`dsn`) options. + +### Configuration + +This section contains the default TOML to configure the plugin. You can +generate it using `telegraf --usage `. + +```toml +[[inputs.sql]] + ## Database Driver + ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for + ## a list of supported drivers. + driver = "mysql" + + ## Data source name for connecting + ## The syntax and supported options depends on selected driver. + dsn = "username:password@mysqlserver:3307/dbname?param=value" + + ## Timeout for any operation + # timeout = "5s" + + ## Connection time limits + ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections + ## will not be closed automatically. If you specify a positive time, the connections will be closed after + ## idleing or existing for at least that amount of time, respectively. + # connection_max_idle_time = "0s" + # connection_max_life_time = "0s" + + ## Connection count limits + ## By default the number of open connections is not limited and the number of maximum idle connections + ## will be inferred from the number of queries specified. If you specify a positive number for any of the + ## two options, connections will be closed when reaching the specified limit. The number of idle connections + ## will be clipped to the maximum number of connections limit if any. + # connection_max_open = 0 + # connection_max_idle = auto + + [[inputs.sql.query]] + ## Query to perform on the server + query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" + ## Alternatively to specifying the query directly you can select a file here containing the SQL query. + ## Only one of 'query' and 'query_script' can be specified! + # query_script = "/path/to/sql/script.sql" + + ## Name of the measurement + ## In case both measurement and 'measurement_col' are given, the latter takes precedence. + # measurement = "sql" + + ## Column name containing the name of the measurement + ## If given, this will take precedence over the 'measurement' setting. In case a query result + ## does not contain the specified column, we fall-back to the 'measurement' setting. + # measurement_column = "" + + ## Column name containing the time of the measurement + ## If ommited, the time of the query will be used. + # time_column = "" + + ## Format of the time contained in 'time_col' + ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. + ## See https://golang.org/pkg/time/#Time.Format for details. + # time_format = "unix" + + ## Column names containing tags + ## An empty include list will reject all columns and an empty exclude list will not exclude any column. + ## I.e. by default no columns will be returned as tag and the tags are empty. + # tag_columns_include = [] + # tag_columns_exclude = [] + + ## Column names containing fields (explicit types) + ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over + ## the automatic (driver-based) conversion below. + ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. + # field_columns_float = [] + # field_columns_int = [] + # field_columns_uint = [] + # field_columns_bool = [] + # field_columns_string = [] + + ## Column names containing fields (automatic types) + ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty + ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. + ## NOTE: We rely on the database driver to perform automatic datatype conversion. + # field_columns_include = [] + # field_columns_exclude = [] +``` + +### Options +#### Driver +The `driver` and `dsn` options specify how to connect to the database. As especially the `dsn` format and +values vary with the `driver` refer to the list of [supported SQL drivers](../../../docs/SQL_DRIVERS_INPUT.md) for possible values and more details. + +#### Connection limits +With these options you can limit the number of connections kept open by this plugin. Details about the exact +workings can be found in the [golang sql documentation](https://golang.org/pkg/database/sql/#DB.SetConnMaxIdleTime). + +#### Query sections +Multiple `query` sections can be specified for this plugin. Each specified query will first be prepared on the server +and then executed in every interval using the column mappings specified. Please note that `tag` and `field` columns +are not exclusive, i.e. a column can be added to both. When using both `include` and `exclude` lists, the `exclude` +list takes precedence over the `include` list. I.e. given you specify `foo` in both lists, `foo` will _never_ pass +the filter. In case any the columns specified in `measurement_col` or `time_col` are _not_ returned by the query, +the plugin falls-back to the documented defaults. Fields or tags specified in the includes of the options but missing +in the returned query are silently ignored. + +### Types +This plugin relies on the driver to do the type conversion. For the different properties of the metric the following +types are accepted. + +#### Measurement +Only columns of type `string` are accepted. + +#### Time +For the metric time columns of type `time` are accepted directly. For numeric columns, `time_format` should be set +to any of `unix`, `unix_ms`, `unix_ns` or `unix_us` accordingly. By default the a timestamp in `unix` format is +expected. For string columns, please specify the `time_format` accordingly. +See the [golang time documentation](https://golang.org/pkg/time/#Time.Format) for details. + +#### Tags +For tags columns with textual values (`string` and `bytes`), signed and unsigned integers (8, 16, 32 and 64 bit), +floating-point (32 and 64 bit), `boolean` and `time` values are accepted. Those values will be converted to string. + +#### Fields +For fields columns with textual values (`string` and `bytes`), signed and unsigned integers (8, 16, 32 and 64 bit), +floating-point (32 and 64 bit), `boolean` and `time` values are accepted. Here `bytes` will be converted to `string`, +signed and unsigned integer values will be converted to `int64` or `uint64` respectively. Floating-point values are converted to `float64` and `time` is converted to a nanosecond timestamp of type `int64`. + +### Example Output +Using the [MariaDB sample database](https://www.mariadbtutorial.com/getting-started/mariadb-sample-database) and the +configuration +```toml +[[inputs.sql]] + driver = "mysql" + dsn = "root:password@/nation" + + [[inputs.sql.query]] + query="SELECT * FROM guests" + measurement = "nation" + tag_cols_include = ["name"] + field_cols_exclude = ["name"] +``` + +Telegraf will output the following metrics +``` +nation,host=Hugin,name=John guest_id=1i 1611332164000000000 +nation,host=Hugin,name=Jane guest_id=2i 1611332164000000000 +nation,host=Hugin,name=Jean guest_id=3i 1611332164000000000 +nation,host=Hugin,name=Storm guest_id=4i 1611332164000000000 +nation,host=Hugin,name=Beast guest_id=5i 1611332164000000000 +``` diff --git a/plugins/inputs/sql/drivers.go b/plugins/inputs/sql/drivers.go new file mode 100644 index 0000000000000..09af9bfc890f8 --- /dev/null +++ b/plugins/inputs/sql/drivers.go @@ -0,0 +1,8 @@ +package sql + +import ( + // Blank imports to register the drivers + _ "github.com/denisenkom/go-mssqldb" + _ "github.com/go-sql-driver/mysql" + _ "github.com/jackc/pgx/v4/stdlib" +) diff --git a/plugins/inputs/sql/drivers_sqlite.go b/plugins/inputs/sql/drivers_sqlite.go new file mode 100644 index 0000000000000..4c9e56a8cd736 --- /dev/null +++ b/plugins/inputs/sql/drivers_sqlite.go @@ -0,0 +1,8 @@ +// +build linux,freebsd +// +build !mips !mips64 + +package sql + +import ( + _ "modernc.org/sqlite" +) diff --git a/plugins/inputs/sql/sql.go b/plugins/inputs/sql/sql.go new file mode 100644 index 0000000000000..383f04c40c454 --- /dev/null +++ b/plugins/inputs/sql/sql.go @@ -0,0 +1,542 @@ +package sql + +import ( + "context" + dbsql "database/sql" + "errors" + "fmt" + "io/ioutil" + "sort" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const sampleConfig = ` + ## Database Driver + ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for + ## a list of supported drivers. + driver = "mysql" + + ## Data source name for connecting + ## The syntax and supported options depends on selected driver. + dsn = "username:password@mysqlserver:3307/dbname?param=value" + + ## Timeout for any operation + # timeout = "5s" + + ## Connection time limits + ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections + ## will not be closed automatically. If you specify a positive time, the connections will be closed after + ## idleing or existing for at least that amount of time, respectively. + # connection_max_idle_time = "0s" + # connection_max_life_time = "0s" + + ## Connection count limits + ## By default the number of open connections is not limited and the number of maximum idle connections + ## will be inferred from the number of queries specified. If you specify a positive number for any of the + ## two options, connections will be closed when reaching the specified limit. The number of idle connections + ## will be clipped to the maximum number of connections limit if any. + # connection_max_open = 0 + # connection_max_idle = auto + + [[inputs.sql.query]] + ## Query to perform on the server + query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" + ## Alternatively to specifying the query directly you can select a file here containing the SQL query. + ## Only one of 'query' and 'query_script' can be specified! + # query_script = "/path/to/sql/script.sql" + + ## Name of the measurement + ## In case both measurement and 'measurement_col' are given, the latter takes precedence. + # measurement = "sql" + + ## Column name containing the name of the measurement + ## If given, this will take precedence over the 'measurement' setting. In case a query result + ## does not contain the specified column, we fall-back to the 'measurement' setting. + # measurement_column = "" + + ## Column name containing the time of the measurement + ## If ommited, the time of the query will be used. + # time_column = "" + + ## Format of the time contained in 'time_col' + ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. + ## See https://golang.org/pkg/time/#Time.Format for details. + # time_format = "unix" + + ## Column names containing tags + ## An empty include list will reject all columns and an empty exclude list will not exclude any column. + ## I.e. by default no columns will be returned as tag and the tags are empty. + # tag_columns_include = [] + # tag_columns_exclude = [] + + ## Column names containing fields (explicit types) + ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over + ## the automatic (driver-based) conversion below. + ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. + # field_columns_float = [] + # field_columns_int = [] + # field_columns_uint = [] + # field_columns_bool = [] + # field_columns_string = [] + + ## Column names containing fields (automatic types) + ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty + ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. + ## NOTE: We rely on the database driver to perform automatic datatype conversion. + # field_columns_include = [] + # field_columns_exclude = [] +` + +const magicIdleCount int = (-int(^uint(0) >> 1)) + +type Query struct { + Query string `toml:"query"` + Script string `toml:"query_script"` + Measurement string `toml:"measurement"` + MeasurementColumn string `toml:"measurement_column"` + TimeColumn string `toml:"time_column"` + TimeFormat string `toml:"time_format"` + TagColumnsInclude []string `toml:"tag_columns_include"` + TagColumnsExclude []string `toml:"tag_columns_exclude"` + FieldColumnsInclude []string `toml:"field_columns_include"` + FieldColumnsExclude []string `toml:"field_columns_exclude"` + FieldColumnsFloat []string `toml:"field_columns_float"` + FieldColumnsInt []string `toml:"field_columns_int"` + FieldColumnsUint []string `toml:"field_columns_uint"` + FieldColumnsBool []string `toml:"field_columns_bool"` + FieldColumnsString []string `toml:"field_columns_string"` + + statement *dbsql.Stmt + tagFilter filter.Filter + fieldFilter filter.Filter + fieldFilterFloat filter.Filter + fieldFilterInt filter.Filter + fieldFilterUint filter.Filter + fieldFilterBool filter.Filter + fieldFilterString filter.Filter +} + +func (q *Query) parse(ctx context.Context, acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time) (int, error) { + columnNames, err := rows.Columns() + if err != nil { + return 0, err + } + + // Prepare the list of datapoints according to the received row + columnData := make([]interface{}, len(columnNames)) + columnDataPtr := make([]interface{}, len(columnNames)) + + for i := range columnData { + columnDataPtr[i] = &columnData[i] + } + + rowCount := 0 + for rows.Next() { + measurement := q.Measurement + timestamp := t + tags := make(map[string]string) + fields := make(map[string]interface{}, len(columnNames)) + + // Do the parsing with (hopefully) automatic type conversion + if err := rows.Scan(columnDataPtr...); err != nil { + return 0, err + } + + for i, name := range columnNames { + if q.MeasurementColumn != "" && name == q.MeasurementColumn { + var ok bool + if measurement, ok = columnData[i].(string); !ok { + return 0, fmt.Errorf("measurement column type \"%T\" unsupported", columnData[i]) + } + } + + if q.TimeColumn != "" && name == q.TimeColumn { + var fieldvalue interface{} + var skipParsing bool + + switch v := columnData[i].(type) { + case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64: + fieldvalue = v + case []byte: + fieldvalue = string(v) + case time.Time: + timestamp = v + skipParsing = true + case fmt.Stringer: + fieldvalue = v.String() + default: + return 0, fmt.Errorf("time column %q of type \"%T\" unsupported", name, columnData[i]) + } + if !skipParsing { + if timestamp, err = internal.ParseTimestamp(q.TimeFormat, fieldvalue, ""); err != nil { + return 0, fmt.Errorf("parsing time failed: %v", err) + } + } + } + + if q.tagFilter.Match(name) { + tagvalue, err := internal.ToString(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting tag column %q failed: %v", name, err) + } + if v := strings.TrimSpace(tagvalue); v != "" { + tags[name] = v + } + } + + // Explicit type conversions take precedence + if q.fieldFilterFloat.Match(name) { + v, err := internal.ToFloat64(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to float failed: %v", name, err) + } + fields[name] = v + continue + } + + if q.fieldFilterInt.Match(name) { + v, err := internal.ToInt64(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to int failed: %v", name, err) + } + fields[name] = v + continue + } + + if q.fieldFilterUint.Match(name) { + v, err := internal.ToUint64(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to uint failed: %v", name, err) + } + fields[name] = v + continue + } + + if q.fieldFilterBool.Match(name) { + v, err := internal.ToBool(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to bool failed: %v", name, err) + } + fields[name] = v + continue + } + + if q.fieldFilterString.Match(name) { + v, err := internal.ToString(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to string failed: %v", name, err) + } + fields[name] = v + continue + } + + // Try automatic conversion for all remaining fields + if q.fieldFilter.Match(name) { + var fieldvalue interface{} + switch v := columnData[i].(type) { + case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool: + fieldvalue = v + case []byte: + fieldvalue = string(v) + case time.Time: + fieldvalue = v.UnixNano() + case nil: + fieldvalue = nil + case fmt.Stringer: + fieldvalue = v.String() + default: + return 0, fmt.Errorf("field column %q of type \"%T\" unsupported", name, columnData[i]) + } + if fieldvalue != nil { + fields[name] = fieldvalue + } + } + } + acc.AddFields(measurement, fields, tags, timestamp) + rowCount++ + } + + if err := rows.Err(); err != nil { + return rowCount, err + } + + return rowCount, nil +} + +type SQL struct { + Driver string `toml:"driver"` + Dsn string `toml:"dsn"` + Timeout config.Duration `toml:"timeout"` + MaxIdleTime config.Duration `toml:"connection_max_idle_time"` + MaxLifetime config.Duration `toml:"connection_max_life_time"` + MaxOpenConnections int `toml:"connection_max_open"` + MaxIdleConnections int `toml:"connection_max_idle"` + Queries []Query `toml:"query"` + Log telegraf.Logger `toml:"-"` + + driverName string + db *dbsql.DB +} + +func (s *SQL) Description() string { + return `Read metrics from SQL queries` +} + +func (s *SQL) SampleConfig() string { + return sampleConfig +} + +func (s *SQL) Init() error { + // Option handling + if s.Driver == "" { + return errors.New("missing SQL driver option") + } + + if s.Dsn == "" { + return errors.New("missing data source name (DSN) option") + } + + if s.Timeout <= 0 { + s.Timeout = config.Duration(5 * time.Second) + } + + if s.MaxIdleConnections == magicIdleCount { + // Determine the number by the number of queries + the golang default value + s.MaxIdleConnections = len(s.Queries) + 2 + } + + for i, q := range s.Queries { + if q.Query == "" && q.Script == "" { + return errors.New("neither 'query' nor 'query_script' specified") + } + + if q.Query != "" && q.Script != "" { + return errors.New("only one of 'query' and 'query_script' can be specified") + } + + // In case we got a script, we should read the query now. + if q.Script != "" { + query, err := ioutil.ReadFile(q.Script) + if err != nil { + return fmt.Errorf("reading script %q failed: %v", q.Script, err) + } + s.Queries[i].Query = string(query) + } + + // Time format + if q.TimeFormat == "" { + s.Queries[i].TimeFormat = "unix" + } + + // Compile the tag-filter + tagfilter, err := filter.NewIncludeExcludeFilterDefaults(q.TagColumnsInclude, q.TagColumnsExclude, false, false) + if err != nil { + return fmt.Errorf("creating tag filter failed: %v", err) + } + s.Queries[i].tagFilter = tagfilter + + // Compile the explicit type field-filter + fieldfilterFloat, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsFloat, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for float failed: %v", err) + } + s.Queries[i].fieldFilterFloat = fieldfilterFloat + + fieldfilterInt, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsInt, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for int failed: %v", err) + } + s.Queries[i].fieldFilterInt = fieldfilterInt + + fieldfilterUint, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsUint, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for uint failed: %v", err) + } + s.Queries[i].fieldFilterUint = fieldfilterUint + + fieldfilterBool, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsBool, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for bool failed: %v", err) + } + s.Queries[i].fieldFilterBool = fieldfilterBool + + fieldfilterString, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsString, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for string failed: %v", err) + } + s.Queries[i].fieldFilterString = fieldfilterString + + // Compile the field-filter + fieldfilter, err := filter.NewIncludeExcludeFilter(q.FieldColumnsInclude, q.FieldColumnsExclude) + if err != nil { + return fmt.Errorf("creating field filter failed: %v", err) + } + s.Queries[i].fieldFilter = fieldfilter + + if q.Measurement == "" { + s.Queries[i].Measurement = "sql" + } + } + + // Derive the sql-framework driver name from our config name. This abstracts the actual driver + // from the database-type the user wants. + aliases := map[string]string{ + "cockroach": "pgx", + "tidb": "mysql", + "mssql": "sqlserver", + "maria": "mysql", + "postgres": "pgx", + } + s.driverName = s.Driver + if driver, ok := aliases[s.Driver]; ok { + s.driverName = driver + } + + availDrivers := dbsql.Drivers() + if !choice.Contains(s.driverName, availDrivers) { + for d, r := range aliases { + if choice.Contains(r, availDrivers) { + availDrivers = append(availDrivers, d) + } + } + + // Sort the list of drivers and make them unique + sort.Strings(availDrivers) + last := 0 + for _, d := range availDrivers { + if d != availDrivers[last] { + last++ + availDrivers[last] = d + } + } + availDrivers = availDrivers[:last+1] + + return fmt.Errorf("driver %q not supported use one of %v", s.Driver, availDrivers) + } + + return nil +} + +func (s *SQL) Start(_ telegraf.Accumulator) error { + var err error + + // Connect to the database server + s.Log.Debugf("Connecting to %q...", s.Dsn) + s.db, err = dbsql.Open(s.driverName, s.Dsn) + if err != nil { + return err + } + + // Set the connection limits + // s.db.SetConnMaxIdleTime(time.Duration(s.MaxIdleTime)) // Requires go >= 1.15 + s.db.SetConnMaxLifetime(time.Duration(s.MaxLifetime)) + s.db.SetMaxOpenConns(s.MaxOpenConnections) + s.db.SetMaxIdleConns(s.MaxIdleConnections) + + // Test if the connection can be established + s.Log.Debugf("Testing connectivity...") + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout)) + err = s.db.PingContext(ctx) + cancel() + if err != nil { + return fmt.Errorf("connecting to database failed: %v", err) + } + + // Prepare the statements + for i, q := range s.Queries { + s.Log.Debugf("Preparing statement %q...", q.Query) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout)) + stmt, err := s.db.PrepareContext(ctx, q.Query) //nolint:sqlclosecheck // Closed in Stop() + cancel() + if err != nil { + return fmt.Errorf("preparing query %q failed: %v", q.Query, err) + } + s.Queries[i].statement = stmt + } + + return nil +} + +func (s *SQL) Stop() { + // Free the statements + for _, q := range s.Queries { + if q.statement != nil { + if err := q.statement.Close(); err != nil { + s.Log.Errorf("closing statement for query %q failed: %v", q.Query, err) + } + } + } + + // Close the connection to the server + if s.db != nil { + if err := s.db.Close(); err != nil { + s.Log.Errorf("closing database connection failed: %v", err) + } + } +} + +func (s *SQL) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout)) + defer cancel() + + tstart := time.Now() + for _, query := range s.Queries { + wg.Add(1) + + go func(q Query) { + defer wg.Done() + if err := s.executeQuery(ctx, acc, q, tstart); err != nil { + acc.AddError(err) + } + }(query) + } + wg.Wait() + s.Log.Debugf("Executed %d queries in %s", len(s.Queries), time.Since(tstart).String()) + + return nil +} + +func init() { + inputs.Add("sql", func() telegraf.Input { + return &SQL{ + MaxIdleTime: config.Duration(0), // unlimited + MaxLifetime: config.Duration(0), // unlimited + MaxOpenConnections: 0, // unlimited + MaxIdleConnections: magicIdleCount, // will trigger auto calculation + } + }) +} + +func (s *SQL) executeQuery(ctx context.Context, acc telegraf.Accumulator, q Query, tquery time.Time) error { + if q.statement == nil { + return fmt.Errorf("statement is nil for query %q", q.Query) + } + + // Execute the query + rows, err := q.statement.QueryContext(ctx) + if err != nil { + return err + } + defer rows.Close() + + // Handle the rows + columnNames, err := rows.Columns() + if err != nil { + return err + } + rowCount, err := q.parse(ctx, acc, rows, tquery) + s.Log.Debugf("Received %d rows and %d columns for query %q", rowCount, len(columnNames), q.Query) + + return err +} diff --git a/plugins/inputs/sql/sql_test.go b/plugins/inputs/sql/sql_test.go new file mode 100644 index 0000000000000..35010eeb5ecdf --- /dev/null +++ b/plugins/inputs/sql/sql_test.go @@ -0,0 +1,272 @@ +package sql + +import ( + "context" + "flag" + "fmt" + "testing" + "time" + + "math/rand" + "path/filepath" + + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +func pwgen(n int) string { + charset := []byte("abcdedfghijklmnopqrstABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") + + nchars := len(charset) + buffer := make([]byte, n) + + for i := range buffer { + buffer[i] = charset[rand.Intn(nchars)] + } + + return string(buffer) +} + +var spinup = flag.Bool("spinup", false, "Spin-up the required test containers") + +func TestMariaDB(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + logger := testutil.Logger{} + + addr := "127.0.0.1" + port := "3306" + passwd := "" + database := "foo" + + if *spinup { + logger.Infof("Spinning up container...") + + // Generate a random password + passwd = pwgen(32) + + // Determine the test-data mountpoint + testdata, err := filepath.Abs("testdata/mariadb") + require.NoError(t, err, "determining absolute path of test-data failed") + + // Spin-up the container + ctx := context.Background() + req := testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "mariadb", + Env: map[string]string{ + "MYSQL_ROOT_PASSWORD": passwd, + "MYSQL_DATABASE": database, + }, + BindMounts: map[string]string{ + testdata: "/docker-entrypoint-initdb.d", + }, + ExposedPorts: []string{"3306/tcp"}, + WaitingFor: wait.ForListeningPort("3306/tcp"), + }, + Started: true, + } + container, err := testcontainers.GenericContainer(ctx, req) + require.NoError(t, err, "starting container failed") + defer func() { + require.NoError(t, container.Terminate(ctx), "terminating container failed") + }() + + // Get the connection details from the container + addr, err = container.Host(ctx) + require.NoError(t, err, "getting container host address failed") + p, err := container.MappedPort(ctx, "3306/tcp") + require.NoError(t, err, "getting container host port failed") + port = p.Port() + } + + // Define the testset + var testset = []struct { + name string + queries []Query + expected []telegraf.Metric + }{ + { + name: "metric_one", + queries: []Query{ + { + Query: "SELECT * FROM metric_one", + TagColumnsInclude: []string{"tag_*"}, + FieldColumnsExclude: []string{"tag_*", "timestamp"}, + TimeColumn: "timestamp", + TimeFormat: "2006-01-02 15:04:05", + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "sql", + map[string]string{ + "tag_one": "tag1", + "tag_two": "tag2", + }, + map[string]interface{}{ + "int64_one": int64(1234), + "int64_two": int64(2345), + }, + time.Date(2021, 5, 17, 22, 4, 45, 0, time.UTC), + ), + }, + }, + } + + for _, tt := range testset { + t.Run(tt.name, func(t *testing.T) { + // Setup the plugin-under-test + plugin := &SQL{ + Driver: "maria", + Dsn: fmt.Sprintf("root:%s@tcp(%s:%s)/%s", passwd, addr, port, database), + Queries: tt.queries, + Log: logger, + } + + var acc testutil.Accumulator + + // Startup the plugin + err := plugin.Init() + require.NoError(t, err) + err = plugin.Start(&acc) + require.NoError(t, err) + + // Gather + err = plugin.Gather(&acc) + require.NoError(t, err) + require.Len(t, acc.Errors, 0) + + // Stopping the plugin + plugin.Stop() + + // Do the comparison + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} + +func TestPostgreSQL(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + logger := testutil.Logger{} + + addr := "127.0.0.1" + port := "5432" + passwd := "" + database := "foo" + + if *spinup { + logger.Infof("Spinning up container...") + + // Generate a random password + passwd = pwgen(32) + + // Determine the test-data mountpoint + testdata, err := filepath.Abs("testdata/postgres") + require.NoError(t, err, "determining absolute path of test-data failed") + + // Spin-up the container + ctx := context.Background() + req := testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: "postgres", + Env: map[string]string{ + "POSTGRES_PASSWORD": passwd, + "POSTGRES_DB": database, + }, + BindMounts: map[string]string{ + testdata: "/docker-entrypoint-initdb.d", + }, + ExposedPorts: []string{"5432/tcp"}, + WaitingFor: wait.ForListeningPort("5432/tcp"), + }, + Started: true, + } + container, err := testcontainers.GenericContainer(ctx, req) + require.NoError(t, err, "starting container failed") + defer func() { + require.NoError(t, container.Terminate(ctx), "terminating container failed") + }() + + // Get the connection details from the container + addr, err = container.Host(ctx) + require.NoError(t, err, "getting container host address failed") + p, err := container.MappedPort(ctx, "5432/tcp") + require.NoError(t, err, "getting container host port failed") + port = p.Port() + } + + // Define the testset + var testset = []struct { + name string + queries []Query + expected []telegraf.Metric + }{ + { + name: "metric_one", + queries: []Query{ + { + Query: "SELECT * FROM metric_one", + TagColumnsInclude: []string{"tag_*"}, + FieldColumnsExclude: []string{"tag_*", "timestamp"}, + TimeColumn: "timestamp", + TimeFormat: "2006-01-02 15:04:05", + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "sql", + map[string]string{ + "tag_one": "tag1", + "tag_two": "tag2", + }, + map[string]interface{}{ + "int64_one": int64(1234), + "int64_two": int64(2345), + }, + time.Date(2021, 5, 17, 22, 4, 45, 0, time.UTC), + ), + }, + }, + } + + for _, tt := range testset { + t.Run(tt.name, func(t *testing.T) { + // Setup the plugin-under-test + plugin := &SQL{ + Driver: "pgx", + Dsn: fmt.Sprintf("postgres://postgres:%v@%v:%v/%v", passwd, addr, port, database), + Queries: tt.queries, + Log: logger, + } + + var acc testutil.Accumulator + + // Startup the plugin + err := plugin.Init() + require.NoError(t, err) + err = plugin.Start(&acc) + require.NoError(t, err) + + // Gather + err = plugin.Gather(&acc) + require.NoError(t, err) + require.Len(t, acc.Errors, 0) + + // Stopping the plugin + plugin.Stop() + + // Do the comparison + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} diff --git a/plugins/inputs/sql/testdata/mariadb/expected.sql b/plugins/inputs/sql/testdata/mariadb/expected.sql new file mode 100644 index 0000000000000..49a3095db4da2 --- /dev/null +++ b/plugins/inputs/sql/testdata/mariadb/expected.sql @@ -0,0 +1,36 @@ +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `bar` ( + `baz` int(11) DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `bar` VALUES (1); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric three` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag four` text DEFAULT NULL, + `string two` text DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric three` VALUES ('2021-05-17 22:04:45','tag4','string2'); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric_one` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag_one` text DEFAULT NULL, + `tag_two` text DEFAULT NULL, + `int64_one` int(11) DEFAULT NULL, + `int64_two` int(11) DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric_one` VALUES ('2021-05-17 22:04:45','tag1','tag2',1234,2345); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric_two` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag_three` text DEFAULT NULL, + `string_one` text DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric_two` VALUES ('2021-05-17 22:04:45','tag3','string1'); diff --git a/plugins/inputs/sql/testdata/postgres/expected.sql b/plugins/inputs/sql/testdata/postgres/expected.sql new file mode 100644 index 0000000000000..8bc2b2fc83018 --- /dev/null +++ b/plugins/inputs/sql/testdata/postgres/expected.sql @@ -0,0 +1,41 @@ +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; +SET default_tablespace = ''; +SET default_table_access_method = heap; +CREATE TABLE public."metric three" ( + "timestamp" timestamp without time zone, + "tag four" text, + "string two" text +); +ALTER TABLE public."metric three" OWNER TO postgres; +CREATE TABLE public.metric_one ( + "timestamp" timestamp without time zone, + tag_one text, + tag_two text, + int64_one integer, + int64_two integer +); +ALTER TABLE public.metric_one OWNER TO postgres; +CREATE TABLE public.metric_two ( + "timestamp" timestamp without time zone, + tag_three text, + string_one text +); +ALTER TABLE public.metric_two OWNER TO postgres; +COPY public."metric three" ("timestamp", "tag four", "string two") FROM stdin; +2021-05-17 22:04:45 tag4 string2 +\. +COPY public.metric_one ("timestamp", tag_one, tag_two, int64_one, int64_two) FROM stdin; +2021-05-17 22:04:45 tag1 tag2 1234 2345 +\. +COPY public.metric_two ("timestamp", tag_three, string_one) FROM stdin; +2021-05-17 22:04:45 tag3 string1 +\. From 6cc942fa6e70ea1e93711822e1429bf33fea9630 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 15 Jun 2021 21:20:47 +0200 Subject: [PATCH 465/761] Fix segfault in processors/parser (#9283) --- plugins/processors/parser/parser.go | 25 ++++++++++++------------ plugins/processors/parser/parser_test.go | 24 +++++++++++++---------- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/plugins/processors/parser/parser.go b/plugins/processors/parser/parser.go index 63230763ab02b..a7f5b47a1597c 100644 --- a/plugins/processors/parser/parser.go +++ b/plugins/processors/parser/parser.go @@ -1,19 +1,19 @@ package parser import ( - "log" - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/processors" ) type Parser struct { parsers.Config - DropOriginal bool `toml:"drop_original"` - Merge string `toml:"merge"` - ParseFields []string `toml:"parse_fields"` - Parser parsers.Parser + DropOriginal bool `toml:"drop_original"` + Merge string `toml:"merge"` + ParseFields []string `toml:"parse_fields"` + Log telegraf.Logger `toml:"-"` + parser parsers.Parser } var SampleConfig = ` @@ -43,13 +43,14 @@ func (p *Parser) Description() string { } func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric { - if p.Parser == nil { + if p.parser == nil { var err error - p.Parser, err = parsers.NewParser(&p.Config) + p.parser, err = parsers.NewParser(&p.Config) if err != nil { - log.Printf("E! [processors.parser] could not create parser: %v", err) + p.Log.Errorf("could not create parser: %v", err) return metrics } + models.SetLoggerOnPlugin(p.parser, p.Log) } results := []telegraf.Metric{} @@ -67,7 +68,7 @@ func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric { case string: fromFieldMetric, err := p.parseField(value) if err != nil { - log.Printf("E! [processors.parser] could not parse field %s: %v", key, err) + p.Log.Errorf("could not parse field %s: %v", key, err) } for _, m := range fromFieldMetric { @@ -81,7 +82,7 @@ func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric { // prior to returning. newMetrics = append(newMetrics, fromFieldMetric...) default: - log.Printf("E! [processors.parser] field '%s' not a string, skipping", key) + p.Log.Errorf("field '%s' not a string, skipping", key) } } } @@ -114,7 +115,7 @@ func merge(base telegraf.Metric, metrics []telegraf.Metric) telegraf.Metric { } func (p *Parser) parseField(value string) ([]telegraf.Metric, error) { - return p.Parser.Parse([]byte(value)) + return p.parser.Parse([]byte(value)) } func init() { diff --git a/plugins/processors/parser/parser_test.go b/plugins/processors/parser/parser_test.go index 512a6118dd0f4..dedf15bf71506 100644 --- a/plugins/processors/parser/parser_test.go +++ b/plugins/processors/parser/parser_test.go @@ -7,17 +7,19 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/stretchr/testify/assert" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) //compares metrics without comparing time func compareMetrics(t *testing.T, expected, actual []telegraf.Metric) { - assert.Equal(t, len(expected), len(actual)) - for i, metric := range actual { - require.Equal(t, expected[i].Name(), metric.Name()) - require.Equal(t, expected[i].Fields(), metric.Fields()) - require.Equal(t, expected[i].Tags(), metric.Tags()) + require.Equal(t, len(expected), len(actual)) + for i, m := range actual { + require.Equal(t, expected[i].Name(), m.Name()) + require.Equal(t, expected[i].Fields(), m.Fields()) + require.Equal(t, expected[i].Tags(), m.Tags()) } } @@ -503,6 +505,7 @@ func TestApply(t *testing.T) { ParseFields: tt.parseFields, DropOriginal: tt.dropOriginal, Merge: tt.merge, + Log: testutil.Logger{Name: "processor.parser"}, } output := parser.Apply(tt.input) @@ -573,6 +576,7 @@ func TestBadApply(t *testing.T) { parser := Parser{ Config: tt.config, ParseFields: tt.parseFields, + Log: testutil.Logger{Name: "processor.parser"}, } output := parser.Apply(tt.input) @@ -584,17 +588,17 @@ func TestBadApply(t *testing.T) { // Benchmarks -func getMetricFields(metric telegraf.Metric) interface{} { +func getMetricFields(m telegraf.Metric) interface{} { key := "field3" - if value, ok := metric.Fields()[key]; ok { + if value, ok := m.Fields()[key]; ok { return value } return nil } -func getMetricFieldList(metric telegraf.Metric) interface{} { +func getMetricFieldList(m telegraf.Metric) interface{} { key := "field3" - fields := metric.FieldList() + fields := m.FieldList() for _, field := range fields { if field.Key == key { return field.Value From 55fa78c600f56cd1d1ce58b6db6cd97955465b10 Mon Sep 17 00:00:00 2001 From: Aleksandr Cupacenko Date: Tue, 15 Jun 2021 23:57:13 +0300 Subject: [PATCH 466/761] upgrade denisenkom go-mssql to v0.10.0 (#9358) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3892479cdeeec..c7d9335f67631 100644 --- a/go.mod +++ b/go.mod @@ -41,7 +41,7 @@ require ( github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect - github.com/denisenkom/go-mssqldb v0.9.0 + github.com/denisenkom/go-mssqldb v0.10.0 github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 github.com/docker/docker v20.10.6+incompatible diff --git a/go.sum b/go.sum index bd20f28427fa8..3e850d4b99485 100644 --- a/go.sum +++ b/go.sum @@ -414,8 +414,8 @@ github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhr github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.9.0 h1:RSohk2RsiZqLZ0zCjtfn3S4Gp4exhpBWHyQ7D0yGjAk= -github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= +github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= From d7afebf7e6ad11b5975c687bbc30c08014ae3500 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Tue, 15 Jun 2021 14:04:22 -0700 Subject: [PATCH 467/761] Fix connecting to the wrong url (#9329) --- plugins/inputs/kube_inventory/client.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/kube_inventory/client.go b/plugins/inputs/kube_inventory/client.go index 5b53dd1fb98d1..66455b004f918 100644 --- a/plugins/inputs/kube_inventory/client.go +++ b/plugins/inputs/kube_inventory/client.go @@ -29,6 +29,7 @@ func newClient(baseURL, namespace, bearerToken string, timeout time.Duration, tl CertFile: tlsConfig.TLSCert, KeyFile: tlsConfig.TLSKey, }, + Host: baseURL, BearerToken: bearerToken, ContentConfig: rest.ContentConfig{}, }) From 769f582245fe444448e42bc0c46c3ee77a59de00 Mon Sep 17 00:00:00 2001 From: Daniel Dyla Date: Tue, 15 Jun 2021 17:13:34 -0400 Subject: [PATCH 468/761] Update dynatrace output (#9363) - export timestamps - enrich dimensions with OneAgent data - Add default dimensions feature --- plugins/outputs/dynatrace/README.md | 32 ++-- plugins/outputs/dynatrace/dynatrace.go | 32 +++- plugins/outputs/dynatrace/dynatrace_test.go | 158 ++++++++++++++++++-- 3 files changed, 191 insertions(+), 31 deletions(-) diff --git a/plugins/outputs/dynatrace/README.md b/plugins/outputs/dynatrace/README.md index 5f25c70026177..666f821f6356c 100644 --- a/plugins/outputs/dynatrace/README.md +++ b/plugins/outputs/dynatrace/README.md @@ -16,7 +16,7 @@ The Dynatrace exporter may be enabled by adding an `[[outputs.dynatrace]]` secti All configurations are optional, but if a `url` other than the OneAgent metric ingestion endpoint is specified then an `api_token` is required. To see all available options, see [Configuration](#configuration) below. -### Running alongside Dynatrace OneAgent +### Running alongside Dynatrace OneAgent (preferred) If you run the Telegraf agent on a host or VM that is monitored by the Dynatrace OneAgent then you only need to enable the plugin, but need no further configuration. The Dynatrace Telegraf output plugin will send all metrics to the OneAgent which will use its secure and load balanced connection to send the metrics to your Dynatrace SaaS or Managed environment. Depending on your environment, you might have to enable metrics ingestion on the OneAgent first as described in the [Dynatrace documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/ingestion-methods/telegraf/). @@ -28,7 +28,7 @@ Note: The name and identifier of the host running Telegraf will be added as a di ## No options are required. By default, metrics will be exported via the OneAgent on the local host. ``` -## Running standalone +### Running standalone If you run the Telegraf agent on a host or VM without a OneAgent you will need to configure the environment API endpoint to send the metrics to and an API token for security. @@ -55,14 +55,6 @@ You can learn more about how to use the Dynatrace API [here](https://www.dynatra ## Configuration -### `url` - -*required*: `false` - -*default*: Local OneAgent endpoint - -Set your Dynatrace environment URL (e.g.: `https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest`) if you do not use a OneAgent or wish to export metrics directly to a Dynatrace metrics v2 endpoint. If a URL is set to anything other than the local OneAgent endpoint, then an API token is required. - ```toml [[outputs.dynatrace]] ## Leave empty or use the local ingest endpoint of your OneAgent monitored host (e.g.: http://127.0.0.1:14499/metrics/ingest). @@ -75,6 +67,21 @@ Set your Dynatrace environment URL (e.g.: `https://{your-environment-id}.live.dy insecure_skip_verify = false ## If you want to convert values represented as gauges to counters, add the metric names here additional_counters = [ ] + + ## Optional dimensions to be added to every metric + [outputs.dynatrace.default_dimensions] + default_key = "default value" +``` + +### `url` + +*required*: `false` + +*default*: Local OneAgent endpoint + +Set your Dynatrace environment URL (e.g.: `https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest`, see the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/metric-v2/post-ingest-metrics/) for details) if you do not use a OneAgent or wish to export metrics directly to a Dynatrace metrics v2 endpoint. If a URL is set to anything other than the local OneAgent endpoint, then an API token is required. + +```toml url = "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" ``` @@ -125,9 +132,8 @@ additional_counters = [ ] Default dimensions that will be added to every exported metric. ```toml -default_dimensions = { - key = "value" -} +[outputs.dynatrace.default_dimensions] +default_key = "default value" ``` ## Limitations diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index fd012d0e1c6f5..0cca17985598d 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -20,12 +20,16 @@ import ( // Dynatrace Configuration for the Dynatrace output plugin type Dynatrace struct { - URL string `toml:"url"` - APIToken string `toml:"api_token"` - Prefix string `toml:"prefix"` - Log telegraf.Logger `toml:"-"` - Timeout config.Duration `toml:"timeout"` - AddCounterMetrics []string `toml:"additional_counters"` + URL string `toml:"url"` + APIToken string `toml:"api_token"` + Prefix string `toml:"prefix"` + Log telegraf.Logger `toml:"-"` + Timeout config.Duration `toml:"timeout"` + AddCounterMetrics []string `toml:"additional_counters"` + DefaultDimensions map[string]string `toml:"default_dimensions"` + + normalizedDefaultDimensions dimensions.NormalizedDimensionList + normalizedStaticDimensions dimensions.NormalizedDimensionList tls.ClientConfig @@ -67,6 +71,10 @@ const sampleConfig = ` ## If you want to convert values represented as gauges to counters, add the metric names here additional_counters = [ ] + + ## Optional dimensions to be added to every metric + # [outputs.dynatrace.default_dimensions] + # default_key = "default value" ` // Connect Connects the Dynatrace output plugin to the Telegraf stream @@ -140,10 +148,12 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { dtMetric.WithPrefix(d.Prefix), dtMetric.WithDimensions( dimensions.MergeLists( - // dimensions.NewNormalizedDimensionList(e.opts.DefaultDimensions...), + d.normalizedDefaultDimensions, dimensions.NewNormalizedDimensionList(dims...), + d.normalizedStaticDimensions, ), ), + dtMetric.WithTimestamp(tm.Time()), typeOpt, ) @@ -236,6 +246,14 @@ func (d *Dynatrace) Init() error { }, Timeout: time.Duration(d.Timeout), } + + dims := []dimensions.Dimension{} + for key, value := range d.DefaultDimensions { + dims = append(dims, dimensions.NewDimension(key, value)) + } + d.normalizedDefaultDimensions = dimensions.NewNormalizedDimensionList(dims...) + d.normalizedStaticDimensions = dimensions.NewNormalizedDimensionList(dimensions.NewDimension("dt.metrics.source", "telegraf")) + return nil } diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index ae0e3390fa557..a994f0ef569f6 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/apiconstants" + "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/dimensions" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" @@ -121,13 +122,13 @@ func TestMissingAPIToken(t *testing.T) { require.Error(t, err) } -func TestSendMetric(t *testing.T) { +func TestSendMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result bodyBytes, err := ioutil.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield,host=192.168.0.1 gauge,3.14\nmymeasurement.value,host=192.168.0.2 count,3.14" + expected := "mymeasurement.myfield,dt.metrics.source=telegraf gauge,3.14 1289430000000\nmymeasurement.value,dt.metrics.source=telegraf count,3.14 1289430000000" if bodyString != expected { t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) } @@ -151,14 +152,14 @@ func TestSendMetric(t *testing.T) { m1 := metric.New( "mymeasurement", - map[string]string{"host": "192.168.0.1"}, + map[string]string{}, map[string]interface{}{"myfield": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) m2 := metric.New( "mymeasurement", - map[string]string{"host": "192.168.0.2"}, + map[string]string{}, map[string]interface{}{"value": float64(3.14)}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), telegraf.Counter, @@ -176,11 +177,14 @@ func TestSendSingleMetricWithUnorderedTags(t *testing.T) { bodyBytes, err := ioutil.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) + // use regex because dimension order isn't guaranteed + require.Equal(t, len(bodyString), 94) require.Regexp(t, regexp.MustCompile(`^mymeasurement\.myfield`), bodyString) require.Regexp(t, regexp.MustCompile(`a=test`), bodyString) require.Regexp(t, regexp.MustCompile(`b=test`), bodyString) require.Regexp(t, regexp.MustCompile(`c=test`), bodyString) - require.Regexp(t, regexp.MustCompile(`gauge,3.14$`), bodyString) + require.Regexp(t, regexp.MustCompile(`dt.metrics.source=telegraf`), bodyString) + require.Regexp(t, regexp.MustCompile(`gauge,3.14 1289430000000$`), bodyString) w.WriteHeader(http.StatusOK) err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) require.NoError(t, err) @@ -219,7 +223,7 @@ func TestSendMetricWithoutTags(t *testing.T) { bodyBytes, err := ioutil.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield gauge,3.14" + expected := "mymeasurement.myfield,dt.metrics.source=telegraf gauge,3.14 1289430000000" if bodyString != expected { t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) } @@ -261,13 +265,14 @@ func TestSendMetricWithUpperCaseTagKeys(t *testing.T) { require.NoError(t, err) bodyString := string(bodyBytes) - // expected := "mymeasurement.myfield,b_b=test,ccc=test,aaa=test gauge,3.14" // use regex because dimension order isn't guaranteed + require.Equal(t, len(bodyString), 100) require.Regexp(t, regexp.MustCompile(`^mymeasurement\.myfield`), bodyString) require.Regexp(t, regexp.MustCompile(`aaa=test`), bodyString) require.Regexp(t, regexp.MustCompile(`b_b=test`), bodyString) require.Regexp(t, regexp.MustCompile(`ccc=test`), bodyString) - require.Regexp(t, regexp.MustCompile(`gauge,3.14$`), bodyString) + require.Regexp(t, regexp.MustCompile(`dt.metrics.source=telegraf`), bodyString) + require.Regexp(t, regexp.MustCompile(`gauge,3.14 1289430000000$`), bodyString) err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) require.NoError(t, err) @@ -307,8 +312,9 @@ func TestSendBooleanMetricWithoutTags(t *testing.T) { require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed - require.Contains(t, bodyString, "mymeasurement.yes gauge,1") - require.Contains(t, bodyString, "mymeasurement.no gauge,0") + require.Equal(t, len(bodyString), 132) + require.Contains(t, bodyString, "mymeasurement.yes,dt.metrics.source=telegraf gauge,1 1289430000000") + require.Contains(t, bodyString, "mymeasurement.no,dt.metrics.source=telegraf gauge,0 1289430000000") err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) require.NoError(t, err) })) @@ -339,6 +345,136 @@ func TestSendBooleanMetricWithoutTags(t *testing.T) { require.NoError(t, err) } +func TestSendMetricWithDefaultDimensions(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + // check the encoded result + bodyBytes, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + bodyString := string(bodyBytes) + // use regex because field order isn't guaranteed + require.Equal(t, len(bodyString), 79) + require.Regexp(t, regexp.MustCompile("^mymeasurement.value"), bodyString) + require.Regexp(t, regexp.MustCompile("dt.metrics.source=telegraf"), bodyString) + require.Regexp(t, regexp.MustCompile("dim=value"), bodyString) + require.Regexp(t, regexp.MustCompile("gauge,32 1289430000000$"), bodyString) + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) + })) + defer ts.Close() + + d := &Dynatrace{DefaultDimensions: map[string]string{"dim": "value"}} + + d.URL = ts.URL + d.APIToken = "123" + d.Log = testutil.Logger{} + err := d.Init() + require.NoError(t, err) + err = d.Connect() + require.NoError(t, err) + + // Init metrics + + m1 := metric.New( + "mymeasurement", + map[string]string{}, + map[string]interface{}{"value": 32}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics := []telegraf.Metric{m1} + + err = d.Write(metrics) + require.NoError(t, err) +} + +func TestMetricDimensionsOverrideDefault(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + // check the encoded result + bodyBytes, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + bodyString := string(bodyBytes) + // use regex because field order isn't guaranteed + require.Equal(t, len(bodyString), 80) + require.Regexp(t, regexp.MustCompile("^mymeasurement.value"), bodyString) + require.Regexp(t, regexp.MustCompile("dt.metrics.source=telegraf"), bodyString) + require.Regexp(t, regexp.MustCompile("dim=metric"), bodyString) + require.Regexp(t, regexp.MustCompile("gauge,32 1289430000000$"), bodyString) + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) + })) + defer ts.Close() + + d := &Dynatrace{DefaultDimensions: map[string]string{"dim": "default"}} + + d.URL = ts.URL + d.APIToken = "123" + d.Log = testutil.Logger{} + err := d.Init() + require.NoError(t, err) + err = d.Connect() + require.NoError(t, err) + + // Init metrics + + m1 := metric.New( + "mymeasurement", + map[string]string{"dim": "metric"}, + map[string]interface{}{"value": 32}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics := []telegraf.Metric{m1} + + err = d.Write(metrics) + require.NoError(t, err) +} + +func TestStaticDimensionsOverrideMetric(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + // check the encoded result + bodyBytes, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + bodyString := string(bodyBytes) + // use regex because field order isn't guaranteed + require.Equal(t, len(bodyString), 53) + require.Regexp(t, regexp.MustCompile("^mymeasurement.value"), bodyString) + require.Regexp(t, regexp.MustCompile("dim=static"), bodyString) + require.Regexp(t, regexp.MustCompile("gauge,32 1289430000000$"), bodyString) + err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) + require.NoError(t, err) + })) + defer ts.Close() + + d := &Dynatrace{DefaultDimensions: map[string]string{"dim": "default"}} + + d.URL = ts.URL + d.APIToken = "123" + d.Log = testutil.Logger{} + err := d.Init() + require.NoError(t, err) + err = d.Connect() + require.NoError(t, err) + + d.normalizedStaticDimensions = dimensions.NewNormalizedDimensionList(dimensions.NewDimension("dim", "static")) + + // Init metrics + + m1 := metric.New( + "mymeasurement", + map[string]string{"dim": "metric"}, + map[string]interface{}{"value": 32}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics := []telegraf.Metric{m1} + + err = d.Write(metrics) + require.NoError(t, err) +} + func TestSendCounterMetricWithoutTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) @@ -346,7 +482,7 @@ func TestSendCounterMetricWithoutTags(t *testing.T) { bodyBytes, err := ioutil.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.value gauge,32" + expected := "mymeasurement.value,dt.metrics.source=telegraf gauge,32 1289430000000" if bodyString != expected { t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) } From cc82c7ccf9b055d13aec5132dd81e7bf03c62c06 Mon Sep 17 00:00:00 2001 From: Harkamal Singh <41585091+hsinghkalsi@users.noreply.github.com> Date: Tue, 15 Jun 2021 17:15:31 -0400 Subject: [PATCH 469/761] Added support to override metric_url in newrelic output plugin (#9342) --- plugins/outputs/newrelic/README.md | 4 ++++ plugins/outputs/newrelic/newrelic.go | 8 ++++++++ plugins/outputs/newrelic/newrelic_test.go | 8 ++++++++ 3 files changed, 20 insertions(+) diff --git a/plugins/outputs/newrelic/README.md b/plugins/outputs/newrelic/README.md index 462c0c3152f55..e15bedb4bdcb4 100644 --- a/plugins/outputs/newrelic/README.md +++ b/plugins/outputs/newrelic/README.md @@ -21,6 +21,10 @@ Telegraf minimum version: Telegraf 1.15.0 ## HTTP Proxy override. If unset use values from the standard ## proxy environment variables to determine proxy, if any. # http_proxy = "http://corporate.proxy:3128" + + ## Metric URL override to enable geographic location endpoints. + # If not set use values from the standard + # metric_url = "https://metric-api.newrelic.com/metric/v1" ``` [Metrics API]: https://docs.newrelic.com/docs/data-ingest-apis/get-data-new-relic/metric-api/introduction-metric-api diff --git a/plugins/outputs/newrelic/newrelic.go b/plugins/outputs/newrelic/newrelic.go index 4f67c5de06914..02b2b9c3ff0ae 100644 --- a/plugins/outputs/newrelic/newrelic.go +++ b/plugins/outputs/newrelic/newrelic.go @@ -21,6 +21,7 @@ type NewRelic struct { MetricPrefix string `toml:"metric_prefix"` Timeout config.Duration `toml:"timeout"` HTTPProxy string `toml:"http_proxy"` + MetricURL string `toml:"metric_url"` harvestor *telemetry.Harvester dc *cumulative.DeltaCalculator @@ -49,6 +50,10 @@ func (nr *NewRelic) SampleConfig() string { ## HTTP Proxy override. If unset use values from the standard ## proxy environment variables to determine proxy, if any. # http_proxy = "http://corporate.proxy:3128" + + ## Metric URL override to enable geographic location endpoints. + # If not set use values from the standard + # metric_url = "https://metric-api.newrelic.com/metric/v1" ` } @@ -77,6 +82,9 @@ func (nr *NewRelic) Connect() error { nr.errorCount++ nr.savedErrors[nr.errorCount] = errorString } + if nr.MetricURL != "" { + cfg.MetricsURLOverride = nr.MetricURL + } }) if err != nil { return fmt.Errorf("unable to connect to newrelic %v", err) diff --git a/plugins/outputs/newrelic/newrelic_test.go b/plugins/outputs/newrelic/newrelic_test.go index 7071176fcfae8..e545a1ac94e03 100644 --- a/plugins/outputs/newrelic/newrelic_test.go +++ b/plugins/outputs/newrelic/newrelic_test.go @@ -173,6 +173,14 @@ func TestNewRelic_Connect(t *testing.T) { }, wantErr: false, }, + { + name: "Test: Metric URL ", + newrelic: &NewRelic{ + InsightsKey: "12121212", + MetricURL: "https://test.nr.com", + }, + wantErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 07778c57d5d043c01652a20f153240fffb2f5785 Mon Sep 17 00:00:00 2001 From: nicolasme Date: Wed, 16 Jun 2021 00:11:23 +0200 Subject: [PATCH 470/761] Add s7comm external input plugin (#9360) --- EXTERNAL_PLUGINS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index 66e9143da9aee..225497e84ef53 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -19,6 +19,7 @@ Pull requests welcome. - [dnsmasq](https://github.com/machinly/dnsmasq-telegraf-plugin) - Gather dnsmasq statistics from dnsmasq - [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - Gather statistics from 389ds and from LDAP trees. - [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - Gather information from your X509 CRL files +- [s7comm](https://github.com/nicolasme/s7comm) - Gather information from Siemens PLC ## Outputs - [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. From ee0a86c4aee12003820d4198ed2548286bb9dd08 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 16 Jun 2021 00:46:53 +0200 Subject: [PATCH 471/761] Fix import of sqlite and ignore it on all platforms that require CGO. (#9359) --- plugins/inputs/sql/drivers_sqlite.go | 2 +- plugins/outputs/sql/sqlite.go | 8 ++------ plugins/outputs/sql/sqlite_test.go | 4 ++-- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/sql/drivers_sqlite.go b/plugins/inputs/sql/drivers_sqlite.go index 4c9e56a8cd736..26cf7e08b5170 100644 --- a/plugins/inputs/sql/drivers_sqlite.go +++ b/plugins/inputs/sql/drivers_sqlite.go @@ -1,4 +1,4 @@ -// +build linux,freebsd +// +build linux,freebsd,darwin // +build !mips !mips64 package sql diff --git a/plugins/outputs/sql/sqlite.go b/plugins/outputs/sql/sqlite.go index 2d93cda6b4a7b..3703f42923ac1 100644 --- a/plugins/outputs/sql/sqlite.go +++ b/plugins/outputs/sql/sqlite.go @@ -1,9 +1,5 @@ -// +build !mips -// +build !mipsle -// +build !s390x -// +build !ppc64le -// +build !windows -// +build !freebsd +// +build linux,freebsd,darwin +// +build !mips !mips64 package sql diff --git a/plugins/outputs/sql/sqlite_test.go b/plugins/outputs/sql/sqlite_test.go index 38784013c26cb..6ed08a2570662 100644 --- a/plugins/outputs/sql/sqlite_test.go +++ b/plugins/outputs/sql/sqlite_test.go @@ -1,5 +1,5 @@ -// +build linux -// +build 386 amd64 arm arm64 +// +build linux,freebsd +// +build !mips !mips64 package sql From daec1040c6411494414fc7a674932c7f607859f0 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 15 Jun 2021 17:50:20 -0500 Subject: [PATCH 472/761] Update json_v2 parser to handle null types (#9368) --- plugins/parsers/json_v2/parser.go | 40 ++++++++++--------- plugins/parsers/json_v2/parser_test.go | 4 ++ .../json_v2/testdata/null/expected.out | 1 + .../parsers/json_v2/testdata/null/input.json | 40 +++++++++++++++++++ .../json_v2/testdata/null/telegraf.conf | 8 ++++ 5 files changed, 75 insertions(+), 18 deletions(-) create mode 100644 plugins/parsers/json_v2/testdata/null/expected.out create mode 100644 plugins/parsers/json_v2/testdata/null/input.json create mode 100644 plugins/parsers/json_v2/testdata/null/telegraf.conf diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index 9ba7de2aa7cf9..e586b35ebddc2 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -306,31 +306,35 @@ func (p *Parser) expandArray(result MetricNode) ([]MetricNode, error) { return nil, err } } else { - if !result.Tag && !result.IsObject() { - if result.SetName == p.currentSettings.TimestampKey { - if p.currentSettings.TimestampFormat == "" { - err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") - return nil, err - } - timestamp, err := internal.ParseTimestamp(p.currentSettings.TimestampFormat, result.Value(), p.currentSettings.TimestampTimezone) - if err != nil { - return nil, err + if result.SetName == p.currentSettings.TimestampKey { + if p.currentSettings.TimestampFormat == "" { + err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") + return nil, err + } + timestamp, err := internal.ParseTimestamp(p.currentSettings.TimestampFormat, result.Value(), p.currentSettings.TimestampTimezone) + if err != nil { + return nil, err + } + result.Metric.SetTime(timestamp) + } else { + switch result.Value().(type) { + case nil: // Ignore JSON values that are set as null + default: + if result.Tag { + result.DesiredType = "string" } - result.Metric.SetTime(timestamp) - } else { v, err := p.convertType(result.Value(), result.DesiredType, result.SetName) if err != nil { return nil, err } - result.Metric.AddField(result.OutputName, v) - } - } else if !result.IsObject() { - v, err := p.convertType(result.Value(), "string", result.SetName) - if err != nil { - return nil, err + if result.Tag { + result.Metric.AddTag(result.OutputName, v.(string)) + } else { + result.Metric.AddField(result.OutputName, v) + } } - result.Metric.AddTag(result.OutputName, v.(string)) } + results = append(results, result) } diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index c8deda29edd94..b53eac0fe0ee8 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -69,6 +69,10 @@ func TestData(t *testing.T) { name: "Test multiple timestamps", test: "multiple_timestamps", }, + { + name: "Test field with null", + test: "null", + }, } for _, tc := range tests { diff --git a/plugins/parsers/json_v2/testdata/null/expected.out b/plugins/parsers/json_v2/testdata/null/expected.out new file mode 100644 index 0000000000000..4f99713cb069f --- /dev/null +++ b/plugins/parsers/json_v2/testdata/null/expected.out @@ -0,0 +1 @@ +file,id=ak0217l8ue0x,type=Feature detail="https://earthquake.usgs.gov/earthquakes/feed/v1.0/detail/ak0217l8ue0x.geojson",mag=1.5,place="63 km N of Petersville, Alaska",status="automatic",time=1623708726566,updated=1623709998223,url="https://earthquake.usgs.gov/earthquakes/eventpage/ak0217l8ue0x" diff --git a/plugins/parsers/json_v2/testdata/null/input.json b/plugins/parsers/json_v2/testdata/null/input.json new file mode 100644 index 0000000000000..757f5483c7ebe --- /dev/null +++ b/plugins/parsers/json_v2/testdata/null/input.json @@ -0,0 +1,40 @@ +{ + "type": "FeatureCollection", + "metadata": { + "generated": 1623710450000, + "url": "https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_hour.geojson", + "title": "USGS All Earthquakes, Past Hour", + "status": 200, + "api": "1.10.3", + "count": 10 + }, + "features": [ + { + "type": "Feature", + "properties": { + "mag": 1.5, + "place": "63 km N of Petersville, Alaska", + "time": 1623708726566, + "updated": 1623709998223, + "tz": null, + "url": "https://earthquake.usgs.gov/earthquakes/eventpage/ak0217l8ue0x", + "detail": "https://earthquake.usgs.gov/earthquakes/feed/v1.0/detail/ak0217l8ue0x.geojson", + "felt": null, + "cdi": null, + "mmi": null, + "alert": null, + "status": "automatic" + }, + "id": "ak0217l8ue0x" + } + ], + "bbox": [ + -157.5749, + 32.9001667, + 0.25, + -115.6211667, + 66.331, + 132.5 + ] + } + \ No newline at end of file diff --git a/plugins/parsers/json_v2/testdata/null/telegraf.conf b/plugins/parsers/json_v2/testdata/null/telegraf.conf new file mode 100644 index 0000000000000..a9e55ad1edc41 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/null/telegraf.conf @@ -0,0 +1,8 @@ +[[inputs.file]] + files = ["./testdata/null/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "features" + tags = ["type", "id"] + disable_prepend_keys = true From 28fbdd8fba3f4c2bc734902ed33b3ad252d30bd1 Mon Sep 17 00:00:00 2001 From: Mya Date: Wed, 16 Jun 2021 10:20:33 -0600 Subject: [PATCH 473/761] Use gosmi for SNMP traps (#9343) Use gosmi for snmp_trap plugin Co-authored-by: Logan McNaughton Co-authored-by: reimda --- docs/LICENSE_OF_DEPENDENCIES.md | 2 + go.mod | 1 + go.sum | 8 ++ plugins/inputs/snmp_trap/README.md | 4 + plugins/inputs/snmp_trap/snmp_trap.go | 130 ++++++++++--------- plugins/inputs/snmp_trap/snmp_trap_test.go | 140 ++++++++++++++++----- plugins/inputs/snmp_trap/testdata/test.mib | 40 ++++++ plugins/inputs/snmp_trap/testdata/test2 | 97 ++++++++++++++ 8 files changed, 323 insertions(+), 99 deletions(-) create mode 100644 plugins/inputs/snmp_trap/testdata/test.mib create mode 100644 plugins/inputs/snmp_trap/testdata/test2 diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 9e5e9386c1198..92965418ebe32 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -19,6 +19,7 @@ following works: - github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE) - github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE) - github.com/aerospike/aerospike-client-go [Apache License 2.0](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE) +- github.com/alecthomas/participle [MIT License](https://github.com/alecthomas/participle/blob/master/COPYING) - github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING) - github.com/aliyun/alibaba-cloud-sdk-go [Apache License 2.0](https://github.com/aliyun/alibaba-cloud-sdk-go/blob/master/LICENSE) - github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE) @@ -188,6 +189,7 @@ following works: - github.com/signalfx/golib [Apache License 2.0](https://github.com/signalfx/golib/blob/master/LICENSE) - github.com/signalfx/sapm-proto [Apache License 2.0](https://github.com/signalfx/sapm-proto/blob/master/LICENSE) - github.com/sirupsen/logrus [MIT License](https://github.com/sirupsen/logrus/blob/master/LICENSE) +- github.com/sleepinggenius2/gosmi [MIT License](https://github.com/sleepinggenius2/gosmi/blob/master/LICENSE) - github.com/snowflakedb/gosnowflake [Apache License 2.0](https://github.com/snowflakedb/gosnowflake/blob/master/LICENSE) - github.com/streadway/amqp [BSD 2-Clause "Simplified" License](https://github.com/streadway/amqp/blob/master/LICENSE) - github.com/stretchr/objx [MIT License](https://github.com/stretchr/objx/blob/master/LICENSE) diff --git a/go.mod b/go.mod index c7d9335f67631..6d11a213eac73 100644 --- a/go.mod +++ b/go.mod @@ -112,6 +112,7 @@ require ( github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/signalfx/golib/v3 v3.3.0 github.com/sirupsen/logrus v1.7.0 + github.com/sleepinggenius2/gosmi v0.4.3 github.com/snowflakedb/gosnowflake v1.5.0 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 github.com/stretchr/testify v1.7.0 diff --git a/go.sum b/go.sum index 3e850d4b99485..98ee98e484110 100644 --- a/go.sum +++ b/go.sum @@ -164,6 +164,12 @@ github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaR github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/go-thrift v0.0.0-20170109061633-7914173639b2/go.mod h1:CxCgO+NdpMdi9SsTlGbc0W+/UNxO3I0AabOEJZ3w61w= +github.com/alecthomas/kong v0.2.1/go.mod h1:+inYUSluD+p4L8KdviBSgzcqEjUQOfC5fQDRFuc36lI= +github.com/alecthomas/participle v0.4.1 h1:P2PJWzwrSpuCWXKnzqvw0b0phSfH1kJo4p2HvLynVsI= +github.com/alecthomas/participle v0.4.1/go.mod h1:T8u4bQOSMwrkTWOSyt8/jSFPEnRtd0FKFMjVfYBlqPs= +github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= +github.com/alecthomas/repr v0.0.0-20210301060118-828286944d6a/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -1370,6 +1376,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sleepinggenius2/gosmi v0.4.3 h1:99Zwzy1Cvgsh396sw07oR2G4ab88ILGZFMxSlGWnR6o= +github.com/sleepinggenius2/gosmi v0.4.3/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bTY2CNivIhsnDT0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md index 0680376c400db..f117c35cbeb56 100644 --- a/plugins/inputs/snmp_trap/README.md +++ b/plugins/inputs/snmp_trap/README.md @@ -31,6 +31,10 @@ information. ## 1024. See README.md for details ## # service_address = "udp://:162" + ## + ## Path to mib files + # path = ["/usr/share/snmp/mibs"] + ## ## Timeout running snmptranslate command # timeout = "5s" ## Snmp version diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index 32107eb5ffe71..9fffd8968d593 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -1,28 +1,25 @@ package snmp_trap import ( - "bufio" - "bytes" "fmt" "net" - "os/exec" + "os" + "path/filepath" "strconv" "strings" - "sync" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/sleepinggenius2/gosmi" + "github.com/sleepinggenius2/gosmi/types" "github.com/gosnmp/gosnmp" ) var defaultTimeout = config.Duration(time.Second * 5) -type execer func(config.Duration, string, ...string) ([]byte, error) - type mibEntry struct { mibName string oidText string @@ -32,6 +29,7 @@ type SnmpTrap struct { ServiceAddress string `toml:"service_address"` Timeout config.Duration `toml:"timeout"` Version string `toml:"version"` + Path []string `toml:"path"` // Settings for version 3 // Values: "noAuthNoPriv", "authNoPriv", "authPriv" @@ -44,19 +42,15 @@ type SnmpTrap struct { PrivProtocol string `toml:"priv_protocol"` PrivPassword string `toml:"priv_password"` - acc telegraf.Accumulator - listener *gosnmp.TrapListener - timeFunc func() time.Time - errCh chan error + acc telegraf.Accumulator + listener *gosnmp.TrapListener + timeFunc func() time.Time + lookupFunc func(string) (mibEntry, error) + errCh chan error makeHandlerWrapper func(gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc Log telegraf.Logger `toml:"-"` - - cacheLock sync.Mutex - cache map[string]mibEntry - - execCmd execer } var sampleConfig = ` @@ -68,6 +62,10 @@ var sampleConfig = ` ## 1024. See README.md for details ## # service_address = "udp://:162" + ## + ## Path to mib files + # path = ["/usr/share/snmp/mibs"] + ## ## Timeout running snmptranslate command # timeout = "5s" ## Snmp version, defaults to 2c @@ -104,6 +102,7 @@ func init() { inputs.Add("snmp_trap", func() telegraf.Input { return &SnmpTrap{ timeFunc: time.Now, + lookupFunc: lookup, ServiceAddress: "udp://:162", Timeout: defaultTimeout, Version: "2c", @@ -111,20 +110,50 @@ func init() { }) } -func realExecCmd(timeout config.Duration, arg0 string, args ...string) ([]byte, error) { - cmd := exec.Command(arg0, args...) - var out bytes.Buffer - cmd.Stdout = &out - err := internal.RunTimeout(cmd, time.Duration(timeout)) +func (s *SnmpTrap) Init() error { + // must init, append path for each directory, load module for every file + // or gosmi will fail without saying why + gosmi.Init() + err := s.getMibsPath() if err != nil { - return nil, err + s.Log.Errorf("Could not get path %v", err) } - return out.Bytes(), nil + return nil } -func (s *SnmpTrap) Init() error { - s.cache = map[string]mibEntry{} - s.execCmd = realExecCmd +func (s *SnmpTrap) getMibsPath() error { + var folders []string + for _, mibPath := range s.Path { + gosmi.AppendPath(mibPath) + folders = append(folders, mibPath) + err := filepath.Walk(mibPath, func(path string, info os.FileInfo, err error) error { + if info.Mode()&os.ModeSymlink != 0 { + s, _ := os.Readlink(path) + folders = append(folders, s) + } + return nil + }) + if err != nil { + s.Log.Errorf("Filepath could not be walked %v", err) + } + for _, folder := range folders { + err := filepath.Walk(folder, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + gosmi.AppendPath(path) + } else if info.Mode()&os.ModeSymlink == 0 { + _, err := gosmi.LoadModule(info.Name()) + if err != nil { + s.Log.Errorf("Module could not be loaded %v", err) + } + } + return nil + }) + if err != nil { + s.Log.Errorf("Filepath could not be walked %v", err) + } + } + folders = []string{} + } return nil } @@ -248,6 +277,7 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { func (s *SnmpTrap) Stop() { s.listener.Close() + defer gosmi.Exit() err := <-s.errCh if nil != err { s.Log.Errorf("Error stopping trap listener %v", err) @@ -281,7 +311,7 @@ func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { } if trapOid != "" { - e, err := s.lookup(trapOid) + e, err := s.lookupFunc(trapOid) if err != nil { s.Log.Errorf("Error resolving V1 OID, oid=%s, source=%s: %v", trapOid, tags["source"], err) return @@ -319,7 +349,7 @@ func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { var e mibEntry var err error - e, err = s.lookup(val) + e, err = s.lookupFunc(val) if nil != err { s.Log.Errorf("Error resolving value OID, oid=%s, source=%s: %v", val, tags["source"], err) return @@ -337,7 +367,7 @@ func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { value = v.Value } - e, err := s.lookup(v.Name) + e, err := s.lookupFunc(v.Name) if nil != err { s.Log.Errorf("Error resolving OID oid=%s, source=%s: %v", v.Name, tags["source"], err) return @@ -366,48 +396,16 @@ func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { } } -func (s *SnmpTrap) lookup(oid string) (e mibEntry, err error) { - s.cacheLock.Lock() - defer s.cacheLock.Unlock() - var ok bool - if e, ok = s.cache[oid]; !ok { - // cache miss. exec snmptranslate - e, err = s.snmptranslate(oid) - if err == nil { - s.cache[oid] = e - } - return e, err - } - return e, nil -} - -func (s *SnmpTrap) clear() { - s.cacheLock.Lock() - defer s.cacheLock.Unlock() - s.cache = map[string]mibEntry{} -} - -func (s *SnmpTrap) load(oid string, e mibEntry) { - s.cacheLock.Lock() - defer s.cacheLock.Unlock() - s.cache[oid] = e -} - -func (s *SnmpTrap) snmptranslate(oid string) (e mibEntry, err error) { - var out []byte - out, err = s.execCmd(s.Timeout, "snmptranslate", "-Td", "-Ob", "-m", "all", oid) +func lookup(oid string) (e mibEntry, err error) { + var node gosmi.SmiNode + node, err = gosmi.GetNodeByOID(types.OidMustFromString(oid)) + // ensure modules are loaded or node will be empty (might not error) if err != nil { return e, err } - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - ok := scanner.Scan() - if err = scanner.Err(); !ok && err != nil { - return e, err - } - - e.oidText = scanner.Text() + e.oidText = node.RenderQualified() i := strings.Index(e.oidText, "::") if i == -1 { diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index 98e3d7f09b2e5..f917a7bbff918 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -3,6 +3,7 @@ package snmp_trap import ( "fmt" "net" + "path/filepath" "strconv" "strings" "testing" @@ -11,35 +12,11 @@ import ( "github.com/gosnmp/gosnmp" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) -func TestLoad(t *testing.T) { - s := &SnmpTrap{} - require.Nil(t, s.Init()) - - defer s.clear() - s.load( - ".1.3.6.1.6.3.1.1.5.1", - mibEntry{ - "SNMPv2-MIB", - "coldStart", - }, - ) - - e, err := s.lookup(".1.3.6.1.6.3.1.1.5.1") - require.NoError(t, err) - require.Equal(t, "SNMPv2-MIB", e.mibName) - require.Equal(t, "coldStart", e.oidText) -} - -func fakeExecCmd(_ config.Duration, x string, y ...string) ([]byte, error) { - return nil, fmt.Errorf("mock " + x + " " + strings.Join(y, " ")) -} - func newMsgFlagsV3(secLevel string) gosnmp.SnmpV3MsgFlags { var msgFlags gosnmp.SnmpV3MsgFlags switch strings.ToLower(secLevel) { @@ -1284,6 +1261,15 @@ func TestReceiveTrap(t *testing.T) { timeFunc: func() time.Time { return fakeTime }, + lookupFunc: func(input string) (mibEntry, error) { + for _, entry := range tt.entries { + if input == entry.oid { + return mibEntry{entry.e.mibName, entry.e.oidText}, nil + } + } + return mibEntry{}, fmt.Errorf("Unexpected oid") + }, + //if cold start be answer otherwise err Log: testutil.Logger{}, Version: tt.version.String(), SecName: tt.secName, @@ -1293,19 +1279,13 @@ func TestReceiveTrap(t *testing.T) { PrivProtocol: tt.privProto, PrivPassword: tt.privPass, } - require.Nil(t, s.Init()) - // Don't look up oid with snmptranslate. - s.execCmd = fakeExecCmd + + require.NoError(t, s.Init()) + var acc testutil.Accumulator require.Nil(t, s.Start(&acc)) defer s.Stop() - // Preload the cache with the oids we'll use in this test - // so snmptranslate and mibs don't need to be installed. - for _, entry := range tt.entries { - s.load(entry.oid, entry.e) - } - var goSNMP gosnmp.GoSNMP if tt.version == gosnmp.Version3 { msgFlags := newMsgFlagsV3(tt.secLevel) @@ -1331,4 +1311,98 @@ func TestReceiveTrap(t *testing.T) { testutil.SortMetrics()) }) } + +} + +func TestGosmiSingleMib(t *testing.T) { + // We would prefer to specify port 0 and let the network + // stack choose an unused port for us but TrapListener + // doesn't have a way to return the autoselected port. + // Instead, we'll use an unusual port and hope it's + // unused. + const port = 12399 + + // Hook into the trap handler so the test knows when the + // trap has been received + received := make(chan int) + wrap := func(f gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc { + return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { + f(p, a) + received <- 0 + } + } + + fakeTime := time.Unix(456456456, 456) + now := uint32(123123123) + + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + trap := gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + } + + metrics := []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "2c", + "source": "127.0.0.1", + "community": "public", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + } + + // Set up the service input plugin + s := &SnmpTrap{ + ServiceAddress: "udp://:" + strconv.Itoa(port), + makeHandlerWrapper: wrap, + timeFunc: func() time.Time { + return fakeTime + }, + lookupFunc: lookup, + Log: testutil.Logger{}, + Version: "2c", + Path: []string{testDataPath}, + } + require.NoError(t, s.Init()) + + var acc testutil.Accumulator + require.Nil(t, s.Start(&acc)) + defer s.Stop() + + goSNMP := newGoSNMP(gosnmp.Version2c, port) + + // Send the trap + sendTrap(t, goSNMP, trap) + + // Wait for trap to be received + select { + case <-received: + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for trap to be received") + } + + // Verify plugin output + testutil.RequireMetricsEqual(t, + metrics, acc.GetTelegrafMetrics(), + testutil.SortMetrics()) } diff --git a/plugins/inputs/snmp_trap/testdata/test.mib b/plugins/inputs/snmp_trap/testdata/test.mib new file mode 100644 index 0000000000000..d8ff17af04eba --- /dev/null +++ b/plugins/inputs/snmp_trap/testdata/test.mib @@ -0,0 +1,40 @@ +SNMPv2-MIB DEFINITIONS ::= BEGIN + +IMPORTS + NOTIFICATION-TYPE, NOTIFICATION-GROUP + FROM test2; + + +snmpMIB MODULE-IDENTITY + LAST-UPDATED "2021060900Z" + ORGANIZATION "testing" + CONTACT-INFO + "EMail: testing@emai.com" + DESCRIPTION + "MIB module for testing snmp_trap plugin + for telegraf + " + ::={ coldStart 1 } + +snmpMIBObjects OBJECT IDENTIFIER ::= { snmpMIB 1 } + +system OBJECT IDENTIFIER ::= { sysUpTimeInstance 1 } + +coldStart NOTIFICATION-TYPE + STATUS current + DESCRIPTION + "A coldStart trap signifies that the SNMP entity, + supporting a notification originator application, is + reinitializing itself and that its configuration may + have been altered." + ::= { snmpTraps 1 } + +snmpBasicNotificationsGroup NOTIFICATION-GROUP + NOTIFICATIONS { coldStart, authenticationFailure } + STATUS current + DESCRIPTION + "The basic notifications implemented by an SNMP entity + supporting command responder applications." + ::= { snmpMIBGroups 7 } + +END diff --git a/plugins/inputs/snmp_trap/testdata/test2 b/plugins/inputs/snmp_trap/testdata/test2 new file mode 100644 index 0000000000000..e4950b902d803 --- /dev/null +++ b/plugins/inputs/snmp_trap/testdata/test2 @@ -0,0 +1,97 @@ +SNMPv2-MIB DEFINITIONS ::= BEGIN + +org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 6 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 2 } +sysUpTimeInstance OBJECT IDENTIFIER ::= { mgmt 1 } +transmission OBJECT IDENTIFIER ::= { sysUpTimeInstance 10 } + +experimental OBJECT IDENTIFIER ::= { internet 3 } + +private OBJECT IDENTIFIER ::= { internet 4 } +enterprises OBJECT IDENTIFIER ::= { private 1 } + +security OBJECT IDENTIFIER ::= { internet 5 } + +snmpV2 OBJECT IDENTIFIER ::= { internet 6 } + +-- transport domains +snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 } + +-- transport proxies +snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 } + +-- module identities +coldStart OBJECT IDENTIFIER ::= { snmpV2 3 } + +NOTIFICATION-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE NotificationName) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + | empty + Objects ::= + Object + + | Objects "," Object + Object ::= + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +NOTIFICATION-GROUP MACRO ::= +BEGIN + TYPE NOTATION ::= + NotificationsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + NotificationsPart ::= + "NOTIFICATIONS" "{" Notifications "}" + Notifications ::= + Notification + | Notifications "," Notification + Notification ::= + value(NotificationName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +END \ No newline at end of file From 775d4c2970de6d5fdb426093738f3f5daee6263a Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Thu, 17 Jun 2021 10:28:59 -0600 Subject: [PATCH 474/761] Update changelog (cherry picked from commit f4531151409d84cf27dd8bcc00dcda4c79701af6) --- CHANGELOG.md | 11 ++++++- etc/telegraf.conf | 78 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a40e857433be..8f0a59529a9ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v1.19.0-rc1 [2021-06-10] +## v1.19.0 [2021-06-17] #### Release Notes @@ -13,6 +13,14 @@ - [#9051](https://github.com/influxdata/telegraf/pull/9051) `outputs.kafka` Don't prevent telegraf from starting when there's a connection error - [#8795](https://github.com/influxdata/telegraf/pull/8795) `parsers.prometheusremotewrite` Update prometheus dependency to v2.21.0 - [#9295](https://github.com/influxdata/telegraf/pull/9295) `outputs.dynatrace` Use dynatrace-metric-utils +- [#9368](https://github.com/influxdata/telegraf/pull/9368) `parsers.json_v2` Update json_v2 parser to handle null types +- [#9359](https://github.com/influxdata/telegraf/pull/9359) `inputs.sql` Fix import of sqlite and ignore it on all platforms that require CGO. +- [#9329](https://github.com/influxdata/telegraf/pull/9329) `inputs.kube_inventory` Fix connecting to the wrong url +- [#9358](https://github.com/influxdata/telegraf/pull/9358) upgrade denisenkom go-mssql to v0.10.0 +- [#9283](https://github.com/influxdata/telegraf/pull/9283) `processors.parser` Fix segfault +- [#9243](https://github.com/influxdata/telegraf/pull/9243) `inputs.docker` Close all idle connections +- [#9338](https://github.com/influxdata/telegraf/pull/9338) `inputs.suricata` Support new JSON format +- [#9296](https://github.com/influxdata/telegraf/pull/9296) `outputs.influxdb` Fix endless retries #### Features @@ -54,6 +62,7 @@ - [OpenTelemetry](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry) - contributed by @jacobmarble - [Intel Data Plane Development Kit (DPDK)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dpdk) - contributed by @p-zak - [KNX](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/knx_listener) - contributed by @DocLambda +- [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sql) - contributed by @srebhan #### New Output Plugins diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 42e7d22b54b8f..492bf704087db 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -8046,6 +8046,84 @@ # # content_encoding = "identity" +# # Read metrics from SQL queries +# [[inputs.sql]] +# ## Database Driver +# ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for +# ## a list of supported drivers. +# driver = "mysql" +# +# ## Data source name for connecting +# ## The syntax and supported options depends on selected driver. +# dsn = "username:password@mysqlserver:3307/dbname?param=value" +# +# ## Timeout for any operation +# # timeout = "5s" +# +# ## Connection time limits +# ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections +# ## will not be closed automatically. If you specify a positive time, the connections will be closed after +# ## idleing or existing for at least that amount of time, respectively. +# # connection_max_idle_time = "0s" +# # connection_max_life_time = "0s" +# +# ## Connection count limits +# ## By default the number of open connections is not limited and the number of maximum idle connections +# ## will be inferred from the number of queries specified. If you specify a positive number for any of the +# ## two options, connections will be closed when reaching the specified limit. The number of idle connections +# ## will be clipped to the maximum number of connections limit if any. +# # connection_max_open = 0 +# # connection_max_idle = auto +# +# [[inputs.sql.query]] +# ## Query to perform on the server +# query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" +# ## Alternatively to specifying the query directly you can select a file here containing the SQL query. +# ## Only one of 'query' and 'query_script' can be specified! +# # query_script = "/path/to/sql/script.sql" +# +# ## Name of the measurement +# ## In case both measurement and 'measurement_col' are given, the latter takes precedence. +# # measurement = "sql" +# +# ## Column name containing the name of the measurement +# ## If given, this will take precedence over the 'measurement' setting. In case a query result +# ## does not contain the specified column, we fall-back to the 'measurement' setting. +# # measurement_column = "" +# +# ## Column name containing the time of the measurement +# ## If ommited, the time of the query will be used. +# # time_column = "" +# +# ## Format of the time contained in 'time_col' +# ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. +# ## See https://golang.org/pkg/time/#Time.Format for details. +# # time_format = "unix" +# +# ## Column names containing tags +# ## An empty include list will reject all columns and an empty exclude list will not exclude any column. +# ## I.e. by default no columns will be returned as tag and the tags are empty. +# # tag_columns_include = [] +# # tag_columns_exclude = [] +# +# ## Column names containing fields (explicit types) +# ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over +# ## the automatic (driver-based) conversion below. +# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. +# # field_columns_float = [] +# # field_columns_int = [] +# # field_columns_uint = [] +# # field_columns_bool = [] +# # field_columns_string = [] +# +# ## Column names containing fields (automatic types) +# ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty +# ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. +# ## NOTE: We rely on the database driver to perform automatic datatype conversion. +# # field_columns_include = [] +# # field_columns_exclude = [] + + # # Read metrics from Microsoft SQL Server # [[inputs.sqlserver]] # ## Specify instances to monitor with a list of connection strings. From 1ba865f0f0253821661c4673b57d048e3139b516 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Thu, 17 Jun 2021 15:24:34 -0600 Subject: [PATCH 475/761] Update build version to 1.20.0 --- build_version.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_version.txt b/build_version.txt index 815d5ca06d530..3989355915568 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.19.0 +1.20.0 From 9a794919e3393e4c0fe30d6e3ab1b690719baf23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Mon, 21 Jun 2021 17:07:52 +0200 Subject: [PATCH 476/761] Linter fixes for plugins/inputs/[de]* (#9379) --- .golangci.yml | 2 +- plugins/inputs/dcos/client.go | 34 ++++++------ plugins/inputs/dcos/dcos.go | 5 +- .../directory_monitor/directory_monitor.go | 22 ++++---- .../directory_monitor_test.go | 6 ++- plugins/inputs/disk/disk.go | 6 +-- plugins/inputs/disk/disk_test.go | 52 ++++++++++--------- plugins/inputs/diskio/diskio.go | 4 +- plugins/inputs/diskio/diskio_linux_test.go | 16 +++--- plugins/inputs/disque/disque.go | 4 +- plugins/inputs/dns_query/dns_query.go | 6 +-- plugins/inputs/docker/client.go | 16 +++--- plugins/inputs/docker/docker.go | 52 +++++++++---------- plugins/inputs/docker/docker_test.go | 18 +++++-- plugins/inputs/docker_log/docker_log.go | 16 +++--- plugins/inputs/ecs/ecs.go | 12 ++--- plugins/inputs/elasticsearch/elasticsearch.go | 3 +- .../elasticsearch/elasticsearch_test.go | 12 ++--- plugins/inputs/ethtool/ethtool_linux.go | 9 ++-- plugins/inputs/ethtool/ethtool_test.go | 17 +++--- .../eventhub_consumer/eventhub_consumer.go | 35 +++++++------ plugins/inputs/exec/exec.go | 7 +-- plugins/inputs/execd/execd_test.go | 4 +- 23 files changed, 187 insertions(+), 171 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 23218a5c7ff1b..47bfdae26e95f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -50,7 +50,7 @@ linters-settings: - name: error-return - name: error-strings - name: errorf - - name: flag-parameter +# - name: flag-parameter #disable for now - name: function-result-limit arguments: [ 3 ] - name: identical-branches diff --git a/plugins/inputs/dcos/client.go b/plugins/inputs/dcos/client.go index 534c2fcb1eab7..fcb976e311ccf 100644 --- a/plugins/inputs/dcos/client.go +++ b/plugins/inputs/dcos/client.go @@ -156,7 +156,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok return nil, err } - loc := c.url("/acs/api/v1/auth/login") + loc := c.toURL("/acs/api/v1/auth/login") req, err := http.NewRequest("POST", loc, bytes.NewBuffer(octets)) if err != nil { return nil, err @@ -208,7 +208,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok func (c *ClusterClient) GetSummary(ctx context.Context) (*Summary, error) { summary := &Summary{} - err := c.doGet(ctx, c.url("/mesos/master/state-summary"), summary) + err := c.doGet(ctx, c.toURL("/mesos/master/state-summary"), summary) if err != nil { return nil, err } @@ -220,7 +220,7 @@ func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Conta list := []string{} path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers", node) - err := c.doGet(ctx, c.url(path), &list) + err := c.doGet(ctx, c.toURL(path), &list) if err != nil { return nil, err } @@ -233,10 +233,10 @@ func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Conta return containers, nil } -func (c *ClusterClient) getMetrics(ctx context.Context, url string) (*Metrics, error) { +func (c *ClusterClient) getMetrics(ctx context.Context, address string) (*Metrics, error) { metrics := &Metrics{} - err := c.doGet(ctx, url, metrics) + err := c.doGet(ctx, address, metrics) if err != nil { return nil, err } @@ -246,21 +246,21 @@ func (c *ClusterClient) getMetrics(ctx context.Context, url string) (*Metrics, e func (c *ClusterClient) GetNodeMetrics(ctx context.Context, node string) (*Metrics, error) { path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/node", node) - return c.getMetrics(ctx, c.url(path)) + return c.getMetrics(ctx, c.toURL(path)) } func (c *ClusterClient) GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error) { path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s", node, container) - return c.getMetrics(ctx, c.url(path)) + return c.getMetrics(ctx, c.toURL(path)) } func (c *ClusterClient) GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error) { path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s/app", node, container) - return c.getMetrics(ctx, c.url(path)) + return c.getMetrics(ctx, c.toURL(path)) } -func createGetRequest(url string, token string) (*http.Request, error) { - req, err := http.NewRequest("GET", url, nil) +func createGetRequest(address string, token string) (*http.Request, error) { + req, err := http.NewRequest("GET", address, nil) if err != nil { return nil, err } @@ -273,8 +273,8 @@ func createGetRequest(url string, token string) (*http.Request, error) { return req, nil } -func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) error { - req, err := createGetRequest(url, c.token) +func (c *ClusterClient) doGet(ctx context.Context, address string, v interface{}) error { + req, err := createGetRequest(address, c.token) if err != nil { return err } @@ -304,7 +304,7 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er if resp.StatusCode < 200 || resp.StatusCode >= 300 { return &APIError{ - URL: url, + URL: address, StatusCode: resp.StatusCode, Title: resp.Status, } @@ -318,10 +318,10 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er return err } -func (c *ClusterClient) url(path string) string { - url := *c.clusterURL - url.Path = path - return url.String() +func (c *ClusterClient) toURL(path string) string { + clusterURL := *c.clusterURL + clusterURL.Path = path + return clusterURL.String() } func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) { diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index 25e4e4755cc30..8fcb321ff36cf 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -10,6 +10,7 @@ import ( "time" jwt "github.com/dgrijalva/jwt-go/v4" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" @@ -352,13 +353,13 @@ func (d *DCOS) createClient() (Client, error) { return nil, err } - url, err := url.Parse(d.ClusterURL) + address, err := url.Parse(d.ClusterURL) if err != nil { return nil, err } client := NewClusterClient( - url, + address, time.Duration(d.ResponseTimeout), d.MaxConnections, tlsCfg, diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go index d8ed8acf04764..45acd1c062ba9 100644 --- a/plugins/inputs/directory_monitor/directory_monitor.go +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -2,27 +2,27 @@ package directory_monitor import ( "bufio" + "compress/gzip" "context" "errors" "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" "regexp" + "sync" "time" + "golang.org/x/sync/semaphore" + "gopkg.in/djherbis/times.v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/selfstat" - "golang.org/x/sync/semaphore" - "gopkg.in/djherbis/times.v1" - - "compress/gzip" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" ) const sampleConfig = ` @@ -263,9 +263,7 @@ func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Read if err != nil { return err } - if firstLine { - firstLine = false - } + firstLine = false if err := monitor.sendMetrics(metrics); err != nil { return err diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go index 3cad4ee6857b9..2ad504637c6c2 100644 --- a/plugins/inputs/directory_monitor/directory_monitor_test.go +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -8,9 +8,10 @@ import ( "path/filepath" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestCSVGZImport(t *testing.T) { @@ -77,8 +78,9 @@ func TestCSVGZImport(t *testing.T) { // File should have gone back to the test directory, as we configured. _, err = os.Stat(filepath.Join(finishedDirectory, testCsvFile)) - _, err = os.Stat(filepath.Join(finishedDirectory, testCsvGzFile)) + require.NoError(t, err) + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvGzFile)) require.NoError(t, err) } diff --git a/plugins/inputs/disk/disk.go b/plugins/inputs/disk/disk.go index 0ceea27167389..0a0fbf6f728a3 100644 --- a/plugins/inputs/disk/disk.go +++ b/plugins/inputs/disk/disk.go @@ -13,7 +13,7 @@ type DiskStats struct { ps system.PS // Legacy support - Mountpoints []string `toml:"mountpoints"` + LegacyMountPoints []string `toml:"mountpoints"` MountPoints []string `toml:"mount_points"` IgnoreFS []string `toml:"ignore_fs"` @@ -38,8 +38,8 @@ func (ds *DiskStats) SampleConfig() string { func (ds *DiskStats) Gather(acc telegraf.Accumulator) error { // Legacy support: - if len(ds.Mountpoints) != 0 { - ds.MountPoints = ds.Mountpoints + if len(ds.LegacyMountPoints) != 0 { + ds.MountPoints = ds.LegacyMountPoints } disks, partitions, err := ds.ps.DiskUsage(ds.MountPoints, ds.IgnoreFS) diff --git a/plugins/inputs/disk/disk_test.go b/plugins/inputs/disk/disk_test.go index 13180fffb1c37..47a822b4410bf 100644 --- a/plugins/inputs/disk/disk_test.go +++ b/plugins/inputs/disk/disk_test.go @@ -5,12 +5,12 @@ import ( "os" "testing" - "github.com/influxdata/telegraf/plugins/inputs/system" - "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/disk" - "github.com/stretchr/testify/assert" + diskUtil "github.com/shirou/gopsutil/disk" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/inputs/system" + "github.com/influxdata/telegraf/testutil" ) type MockFileInfo struct { @@ -25,7 +25,7 @@ func TestDiskUsage(t *testing.T) { var acc testutil.Accumulator var err error - psAll := []disk.PartitionStat{ + psAll := []diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", @@ -39,7 +39,7 @@ func TestDiskUsage(t *testing.T) { Opts: "rw,noatime,nodiratime,errors=remount-ro", }, } - duAll := []disk.UsageStat{ + duAll := []diskUtil.UsageStat{ { Path: "/", Fstype: "ext4", @@ -72,7 +72,7 @@ func TestDiskUsage(t *testing.T) { numDiskMetrics := acc.NFields() expectedAllDiskMetrics := 14 - assert.Equal(t, expectedAllDiskMetrics, numDiskMetrics) + require.Equal(t, expectedAllDiskMetrics, numDiskMetrics) tags1 := map[string]string{ "path": string(os.PathSeparator), @@ -111,26 +111,28 @@ func TestDiskUsage(t *testing.T) { // We expect 6 more DiskMetrics to show up with an explicit match on "/" // and /home not matching the /dev in MountPoints err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc) - assert.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) + require.NoError(t, err) + require.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) // We should see all the diskpoints as MountPoints includes both // / and /home err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc) - assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields()) + require.NoError(t, err) + require.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields()) } func TestDiskUsageHostMountPrefix(t *testing.T) { tests := []struct { name string - partitionStats []disk.PartitionStat - usageStats []*disk.UsageStat + partitionStats []diskUtil.PartitionStat + usageStats []*diskUtil.UsageStat hostMountPrefix string expectedTags map[string]string expectedFields map[string]interface{} }{ { name: "no host mount prefix", - partitionStats: []disk.PartitionStat{ + partitionStats: []diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", @@ -138,7 +140,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Opts: "ro", }, }, - usageStats: []*disk.UsageStat{ + usageStats: []*diskUtil.UsageStat{ { Path: "/", Total: 42, @@ -162,7 +164,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, { name: "host mount prefix", - partitionStats: []disk.PartitionStat{ + partitionStats: []diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/hostfs/var", @@ -170,7 +172,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Opts: "ro", }, }, - usageStats: []*disk.UsageStat{ + usageStats: []*diskUtil.UsageStat{ { Path: "/hostfs/var", Total: 42, @@ -195,7 +197,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, { name: "host mount prefix exact match", - partitionStats: []disk.PartitionStat{ + partitionStats: []diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/hostfs", @@ -203,7 +205,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Opts: "ro", }, }, - usageStats: []*disk.UsageStat{ + usageStats: []*diskUtil.UsageStat{ { Path: "/hostfs", Total: 42, @@ -259,7 +261,7 @@ func TestDiskStats(t *testing.T) { var acc testutil.Accumulator var err error - duAll := []*disk.UsageStat{ + duAll := []*diskUtil.UsageStat{ { Path: "/", Fstype: "ext4", @@ -281,7 +283,7 @@ func TestDiskStats(t *testing.T) { InodesUsed: 2000, }, } - duFiltered := []*disk.UsageStat{ + duFiltered := []*diskUtil.UsageStat{ { Path: "/", Fstype: "ext4", @@ -294,7 +296,7 @@ func TestDiskStats(t *testing.T) { }, } - psAll := []*disk.PartitionStat{ + psAll := []*diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", @@ -309,7 +311,7 @@ func TestDiskStats(t *testing.T) { }, } - psFiltered := []*disk.PartitionStat{ + psFiltered := []*diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", @@ -327,7 +329,7 @@ func TestDiskStats(t *testing.T) { numDiskMetrics := acc.NFields() expectedAllDiskMetrics := 14 - assert.Equal(t, expectedAllDiskMetrics, numDiskMetrics) + require.Equal(t, expectedAllDiskMetrics, numDiskMetrics) tags1 := map[string]string{ "path": "/", @@ -366,10 +368,12 @@ func TestDiskStats(t *testing.T) { // We expect 6 more DiskMetrics to show up with an explicit match on "/" // and /home not matching the /dev in MountPoints err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc) - assert.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) + require.NoError(t, err) + require.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) // We should see all the diskpoints as MountPoints includes both // / and /home err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc) - assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields()) + require.NoError(t, err) + require.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields()) } diff --git a/plugins/inputs/diskio/diskio.go b/plugins/inputs/diskio/diskio.go index c347e90a36526..9458b2af7a68f 100644 --- a/plugins/inputs/diskio/diskio.go +++ b/plugins/inputs/diskio/diskio.go @@ -74,11 +74,11 @@ func hasMeta(s string) bool { func (d *DiskIO) init() error { for _, device := range d.Devices { if hasMeta(device) { - filter, err := filter.Compile(d.Devices) + deviceFilter, err := filter.Compile(d.Devices) if err != nil { return fmt.Errorf("error compiling device pattern: %s", err.Error()) } - d.deviceFilter = filter + d.deviceFilter = deviceFilter } } d.initialized = true diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index 222cb783f1870..ede35b5befead 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -92,12 +92,14 @@ func TestDiskIOStats_diskName(t *testing.T) { } for _, tc := range tests { - s := DiskIO{ - NameTemplates: tc.templates, - } - defer setupNullDisk(t, &s, "null")() - name, _ := s.diskName("null") - require.Equal(t, tc.expected, name, "Templates: %#v", tc.templates) + func() { + s := DiskIO{ + NameTemplates: tc.templates, + } + defer setupNullDisk(t, &s, "null")() //nolint:revive // done on purpose, cleaning will be executed properly + name, _ := s.diskName("null") + require.Equal(t, tc.expected, name, "Templates: %#v", tc.templates) + }() } } @@ -107,7 +109,7 @@ func TestDiskIOStats_diskTags(t *testing.T) { s := &DiskIO{ DeviceTags: []string{"MY_PARAM_2"}, } - defer setupNullDisk(t, s, "null")() + defer setupNullDisk(t, s, "null")() //nolint:revive // done on purpose, cleaning will be executed properly dt := s.diskTags("null") require.Equal(t, map[string]string{"MY_PARAM_2": "myval2"}, dt) } diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index 6c2606af4ad94..6fa63ec8bd874 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -65,10 +65,10 @@ var ErrProtocolError = errors.New("disque protocol error") // Returns one of the errors encountered while gather stats (if any). func (d *Disque) Gather(acc telegraf.Accumulator) error { if len(d.Servers) == 0 { - url := &url.URL{ + address := &url.URL{ Host: ":7711", } - return d.gatherServer(url, acc) + return d.gatherServer(address, acc) } var wg sync.WaitGroup diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index 4c721a0964776..a3b2f262ba7e0 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -16,9 +16,9 @@ import ( type ResultType uint64 const ( - Success ResultType = 0 - Timeout = 1 - Error = 2 + Success ResultType = iota + Timeout + Error ) type DNSQuery struct { diff --git a/plugins/inputs/docker/client.go b/plugins/inputs/docker/client.go index 14e4396980b9a..6abba44c549d6 100644 --- a/plugins/inputs/docker/client.go +++ b/plugins/inputs/docker/client.go @@ -7,7 +7,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - docker "github.com/docker/docker/client" + dockerClient "github.com/docker/docker/client" ) var ( @@ -27,7 +27,7 @@ type Client interface { } func NewEnvClient() (Client, error) { - client, err := docker.NewClientWithOpts(docker.FromEnv) + client, err := dockerClient.NewClientWithOpts(dockerClient.FromEnv) if err != nil { return nil, err } @@ -40,11 +40,11 @@ func NewClient(host string, tlsConfig *tls.Config) (Client, error) { } httpClient := &http.Client{Transport: transport} - client, err := docker.NewClientWithOpts( - docker.WithHTTPHeaders(defaultHeaders), - docker.WithHTTPClient(httpClient), - docker.WithVersion(version), - docker.WithHost(host)) + client, err := dockerClient.NewClientWithOpts( + dockerClient.WithHTTPHeaders(defaultHeaders), + dockerClient.WithHTTPClient(httpClient), + dockerClient.WithVersion(version), + dockerClient.WithHost(host)) if err != nil { return nil, err } @@ -53,7 +53,7 @@ func NewClient(host string, tlsConfig *tls.Config) (Client, error) { } type SocketClient struct { - client *docker.Client + client *dockerClient.Client } func (c *SocketClient) Info(ctx context.Context) (types.Info, error) { diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 47eab7ce2430e..4e6dc5ad4d221 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -15,11 +15,12 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal/choice" - "github.com/influxdata/telegraf/internal/docker" + dockerint "github.com/influxdata/telegraf/internal/docker" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -513,7 +514,7 @@ func (d *Docker) gatherContainer( return nil } - imageName, imageVersion := docker.ParseImage(container.Image) + imageName, imageVersion := dockerint.ParseImage(container.Image) tags := map[string]string{ "engine_host": d.engineHost, @@ -628,18 +629,16 @@ func (d *Docker) gatherContainerInspect( } } - parseContainerStats(v, acc, tags, container.ID, d.PerDeviceInclude, d.TotalInclude, daemonOSType) + d.parseContainerStats(v, acc, tags, container.ID, daemonOSType) return nil } -func parseContainerStats( +func (d *Docker) parseContainerStats( stat *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, id string, - perDeviceInclude []string, - totalInclude []string, daemonOSType string, ) { tm := stat.Read @@ -708,7 +707,7 @@ func parseContainerStats( acc.AddFields("docker_container_mem", memfields, tags, tm) - if choice.Contains("cpu", totalInclude) { + if choice.Contains("cpu", d.TotalInclude) { cpufields := map[string]interface{}{ "usage_total": stat.CPUStats.CPUUsage.TotalUsage, "usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode, @@ -735,7 +734,7 @@ func parseContainerStats( acc.AddFields("docker_container_cpu", cpufields, cputags, tm) } - if choice.Contains("cpu", perDeviceInclude) && len(stat.CPUStats.CPUUsage.PercpuUsage) > 0 { + if choice.Contains("cpu", d.PerDeviceInclude) && len(stat.CPUStats.CPUUsage.PercpuUsage) > 0 { // If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs // (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400) var percpuusage []uint64 @@ -770,12 +769,12 @@ func parseContainerStats( "container_id": id, } // Create a new network tag dictionary for the "network" tag - if choice.Contains("network", perDeviceInclude) { + if choice.Contains("network", d.PerDeviceInclude) { nettags := copyTags(tags) nettags["network"] = network acc.AddFields("docker_container_net", netfields, nettags, tm) } - if choice.Contains("network", totalInclude) { + if choice.Contains("network", d.TotalInclude) { for field, value := range netfields { if field == "container_id" { continue @@ -802,17 +801,14 @@ func parseContainerStats( } // totalNetworkStatMap could be empty if container is running with --net=host. - if choice.Contains("network", totalInclude) && len(totalNetworkStatMap) != 0 { + if choice.Contains("network", d.TotalInclude) && len(totalNetworkStatMap) != 0 { nettags := copyTags(tags) nettags["network"] = "total" totalNetworkStatMap["container_id"] = id acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, tm) } - perDeviceBlkio := choice.Contains("blkio", perDeviceInclude) - totalBlkio := choice.Contains("blkio", totalInclude) - - gatherBlockIOMetrics(stat, acc, tags, tm, id, perDeviceBlkio, totalBlkio) + d.gatherBlockIOMetrics(acc, stat, tags, tm, id) } // Make a map of devices to their block io stats @@ -877,27 +873,27 @@ func getDeviceStatMap(blkioStats types.BlkioStats) map[string]map[string]interfa return deviceStatMap } -func gatherBlockIOMetrics( - stat *types.StatsJSON, +func (d *Docker) gatherBlockIOMetrics( acc telegraf.Accumulator, + stat *types.StatsJSON, tags map[string]string, tm time.Time, id string, - perDevice bool, - total bool, ) { + perDeviceBlkio := choice.Contains("blkio", d.PerDeviceInclude) + totalBlkio := choice.Contains("blkio", d.TotalInclude) blkioStats := stat.BlkioStats deviceStatMap := getDeviceStatMap(blkioStats) totalStatMap := make(map[string]interface{}) for device, fields := range deviceStatMap { fields["container_id"] = id - if perDevice { + if perDeviceBlkio { iotags := copyTags(tags) iotags["device"] = device acc.AddFields("docker_container_blkio", fields, iotags, tm) } - if total { + if totalBlkio { for field, value := range fields { if field == "container_id" { continue @@ -922,7 +918,7 @@ func gatherBlockIOMetrics( } } } - if total { + if totalBlkio { totalStatMap["container_id"] = id iotags := copyTags(tags) iotags["device"] = "total" @@ -965,20 +961,20 @@ func (d *Docker) createContainerFilters() error { d.ContainerInclude = append(d.ContainerInclude, d.ContainerNames...) } - filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) + containerFilter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) if err != nil { return err } - d.containerFilter = filter + d.containerFilter = containerFilter return nil } func (d *Docker) createLabelFilters() error { - filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) + labelFilter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) if err != nil { return err } - d.labelFilter = filter + d.labelFilter = labelFilter return nil } @@ -986,11 +982,11 @@ func (d *Docker) createContainerStateFilters() error { if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 { d.ContainerStateInclude = []string{"running"} } - filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) + stateFilter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) if err != nil { return err } - d.stateFilter = filter + d.stateFilter = stateFilter return nil } diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index f5a8ff7a89b83..599adae409e99 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -12,10 +12,11 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) type MockClient struct { @@ -120,7 +121,12 @@ func TestDockerGatherContainerStats(t *testing.T) { "container_image": "redis/image", } - parseContainerStats(stats, &acc, tags, "123456789", containerMetricClasses, containerMetricClasses, "linux") + d := &Docker{ + Log: testutil.Logger{}, + PerDeviceInclude: containerMetricClasses, + TotalInclude: containerMetricClasses, + } + d.parseContainerStats(stats, &acc, tags, "123456789", "linux") // test docker_container_net measurement netfields := map[string]interface{}{ @@ -1270,8 +1276,12 @@ func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - parseContainerStats(tt.args.stat, &acc, tt.args.tags, tt.args.id, tt.args.perDeviceInclude, - tt.args.totalInclude, tt.args.daemonOSType) + d := &Docker{ + Log: testutil.Logger{}, + PerDeviceInclude: tt.args.perDeviceInclude, + TotalInclude: tt.args.totalInclude, + } + d.parseContainerStats(tt.args.stat, &acc, tt.args.tags, tt.args.id, tt.args.daemonOSType) actual := FilterMetrics(acc.GetTelegrafMetrics(), func(m telegraf.Metric) bool { return choice.Contains(m.Name(), diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index f877961ba2676..622f9924e4236 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -15,6 +15,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/pkg/stdcopy" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" @@ -307,8 +308,7 @@ func (d *DockerLogs) tailContainerLogs( func parseLine(line []byte) (time.Time, string, error) { parts := bytes.SplitN(line, []byte(" "), 2) - switch len(parts) { - case 1: + if len(parts) == 1 { parts = append(parts, []byte("")) } @@ -421,20 +421,20 @@ func (d *DockerLogs) Stop() { // Following few functions have been inherited from telegraf docker input plugin func (d *DockerLogs) createContainerFilters() error { - filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) + containerFilter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) if err != nil { return err } - d.containerFilter = filter + d.containerFilter = containerFilter return nil } func (d *DockerLogs) createLabelFilters() error { - filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) + labelFilter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) if err != nil { return err } - d.labelFilter = filter + d.labelFilter = labelFilter return nil } @@ -442,11 +442,11 @@ func (d *DockerLogs) createContainerStateFilters() error { if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 { d.ContainerStateInclude = []string{"running"} } - filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) + stateFilter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) if err != nil { return err } - d.stateFilter = filter + d.stateFilter = stateFilter return nil } diff --git a/plugins/inputs/ecs/ecs.go b/plugins/inputs/ecs/ecs.go index d563fef5038d5..f044e8d2cb7fe 100644 --- a/plugins/inputs/ecs/ecs.go +++ b/plugins/inputs/ecs/ecs.go @@ -220,20 +220,20 @@ func mergeTags(a map[string]string, b map[string]string) map[string]string { } func (ecs *Ecs) createContainerNameFilters() error { - filter, err := filter.NewIncludeExcludeFilter(ecs.ContainerNameInclude, ecs.ContainerNameExclude) + containerNameFilter, err := filter.NewIncludeExcludeFilter(ecs.ContainerNameInclude, ecs.ContainerNameExclude) if err != nil { return err } - ecs.containerNameFilter = filter + ecs.containerNameFilter = containerNameFilter return nil } func (ecs *Ecs) createLabelFilters() error { - filter, err := filter.NewIncludeExcludeFilter(ecs.LabelInclude, ecs.LabelExclude) + labelFilter, err := filter.NewIncludeExcludeFilter(ecs.LabelInclude, ecs.LabelExclude) if err != nil { return err } - ecs.labelFilter = filter + ecs.labelFilter = labelFilter return nil } @@ -250,11 +250,11 @@ func (ecs *Ecs) createContainerStatusFilters() error { ecs.ContainerStatusExclude[i] = strings.ToUpper(exclude) } - filter, err := filter.NewIncludeExcludeFilter(ecs.ContainerStatusInclude, ecs.ContainerStatusExclude) + statusFilter, err := filter.NewIncludeExcludeFilter(ecs.ContainerStatusInclude, ecs.ContainerStatusExclude) if err != nil { return err } - ecs.statusFilter = filter + ecs.statusFilter = statusFilter return nil } diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index aac23d707edba..0bd4ce677cd9e 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -644,7 +644,8 @@ func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now // determine shard tag and primary/replica designation shardType := "replica" - if flattened.Fields["routing_primary"] == true { + routingPrimary, _ := flattened.Fields["routing_primary"].(bool) + if routingPrimary { shardType = "primary" } delete(flattened.Fields, "routing_primary") diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 1a24d3caaf66e..8248d063b6883 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -6,9 +6,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func defaultTags() map[string]string { @@ -206,8 +206,8 @@ func TestGatherClusterStatsMaster(t *testing.T) { info.masterID = masterID es.serverInfo["http://example.com:9200"] = info - IsMasterResultTokens := strings.Split(string(IsMasterResult), " ") - require.Equal(t, masterID, IsMasterResultTokens[0], "catmaster is incorrect") + isMasterResultTokens := strings.Split(IsMasterResult, " ") + require.Equal(t, masterID, isMasterResultTokens[0], "catmaster is incorrect") // now get node status, which determines whether we're master var acc testutil.Accumulator @@ -244,8 +244,8 @@ func TestGatherClusterStatsNonMaster(t *testing.T) { masterID, err := es.getCatMaster("junk") require.NoError(t, err) - IsNotMasterResultTokens := strings.Split(string(IsNotMasterResult), " ") - require.Equal(t, masterID, IsNotMasterResultTokens[0], "catmaster is incorrect") + isNotMasterResultTokens := strings.Split(IsNotMasterResult, " ") + require.Equal(t, masterID, isNotMasterResultTokens[0], "catmaster is incorrect") // now get node status, which determines whether we're master var acc testutil.Accumulator diff --git a/plugins/inputs/ethtool/ethtool_linux.go b/plugins/inputs/ethtool/ethtool_linux.go index 13dabd2f8a6b6..08e21db50dede 100644 --- a/plugins/inputs/ethtool/ethtool_linux.go +++ b/plugins/inputs/ethtool/ethtool_linux.go @@ -6,15 +6,16 @@ import ( "net" "sync" + "github.com/pkg/errors" + ethtoolLib "github.com/safchain/ethtool" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/pkg/errors" - "github.com/safchain/ethtool" ) type CommandEthtool struct { - ethtool *ethtool.Ethtool + ethtool *ethtoolLib.Ethtool } func (e *Ethtool) Gather(acc telegraf.Accumulator) error { @@ -98,7 +99,7 @@ func (c *CommandEthtool) Init() error { return nil } - e, err := ethtool.NewEthtool() + e, err := ethtoolLib.NewEthtool() if err == nil { c.ethtool = e } diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go index ac5527733ce73..87bc136d2db11 100644 --- a/plugins/inputs/ethtool/ethtool_test.go +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -6,9 +6,10 @@ import ( "net" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/pkg/errors" "github.com/stretchr/testify/assert" + + "github.com/influxdata/telegraf/testutil" ) var command *Ethtool @@ -31,13 +32,12 @@ func (c *CommandEthtoolMock) Init() error { return nil } -func (c *CommandEthtoolMock) DriverName(intf string) (driverName string, err error) { +func (c *CommandEthtoolMock) DriverName(intf string) (string, error) { i := c.InterfaceMap[intf] if i != nil { - driverName = i.DriverName - return + return i.DriverName, nil } - return driverName, errors.New("interface not found") + return "", errors.New("interface not found") } func (c *CommandEthtoolMock) Interfaces() ([]net.Interface, error) { @@ -66,13 +66,12 @@ func (c *CommandEthtoolMock) Interfaces() ([]net.Interface, error) { return interfaceNames, nil } -func (c *CommandEthtoolMock) Stats(intf string) (stat map[string]uint64, err error) { +func (c *CommandEthtoolMock) Stats(intf string) (map[string]uint64, error) { i := c.InterfaceMap[intf] if i != nil { - stat = i.Stat - return + return i.Stat, nil } - return stat, errors.New("interface not found") + return nil, errors.New("interface not found") } func setup() { diff --git a/plugins/inputs/eventhub_consumer/eventhub_consumer.go b/plugins/inputs/eventhub_consumer/eventhub_consumer.go index 114a6335060ca..064502b0ed831 100644 --- a/plugins/inputs/eventhub_consumer/eventhub_consumer.go +++ b/plugins/inputs/eventhub_consumer/eventhub_consumer.go @@ -6,8 +6,9 @@ import ( "sync" "time" - eventhub "github.com/Azure/azure-event-hubs-go/v3" + eventhubClient "github.com/Azure/azure-event-hubs-go/v3" "github.com/Azure/azure-event-hubs-go/v3/persist" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" @@ -54,7 +55,7 @@ type EventHub struct { Log telegraf.Logger `toml:"-"` // Azure - hub *eventhub.Hub + hub *eventhubClient.Hub cancel context.CancelFunc wg sync.WaitGroup @@ -172,7 +173,7 @@ func (e *EventHub) Init() (err error) { } // Set hub options - hubOpts := []eventhub.HubOption{} + hubOpts := []eventhubClient.HubOption{} if e.PersistenceDir != "" { persister, err := persist.NewFilePersister(e.PersistenceDir) @@ -180,20 +181,20 @@ func (e *EventHub) Init() (err error) { return err } - hubOpts = append(hubOpts, eventhub.HubWithOffsetPersistence(persister)) + hubOpts = append(hubOpts, eventhubClient.HubWithOffsetPersistence(persister)) } if e.UserAgent != "" { - hubOpts = append(hubOpts, eventhub.HubWithUserAgent(e.UserAgent)) + hubOpts = append(hubOpts, eventhubClient.HubWithUserAgent(e.UserAgent)) } else { - hubOpts = append(hubOpts, eventhub.HubWithUserAgent(internal.ProductToken())) + hubOpts = append(hubOpts, eventhubClient.HubWithUserAgent(internal.ProductToken())) } // Create event hub connection if e.ConnectionString != "" { - e.hub, err = eventhub.NewHubFromConnectionString(e.ConnectionString, hubOpts...) + e.hub, err = eventhubClient.NewHubFromConnectionString(e.ConnectionString, hubOpts...) } else { - e.hub, err = eventhub.NewHubFromEnvironment(hubOpts...) + e.hub, err = eventhubClient.NewHubFromEnvironment(hubOpts...) } return err @@ -236,25 +237,25 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error { return nil } -func (e *EventHub) configureReceiver() []eventhub.ReceiveOption { - receiveOpts := []eventhub.ReceiveOption{} +func (e *EventHub) configureReceiver() []eventhubClient.ReceiveOption { + receiveOpts := []eventhubClient.ReceiveOption{} if e.ConsumerGroup != "" { - receiveOpts = append(receiveOpts, eventhub.ReceiveWithConsumerGroup(e.ConsumerGroup)) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithConsumerGroup(e.ConsumerGroup)) } if !e.FromTimestamp.IsZero() { - receiveOpts = append(receiveOpts, eventhub.ReceiveFromTimestamp(e.FromTimestamp)) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveFromTimestamp(e.FromTimestamp)) } else if e.Latest { - receiveOpts = append(receiveOpts, eventhub.ReceiveWithLatestOffset()) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithLatestOffset()) } if e.PrefetchCount != 0 { - receiveOpts = append(receiveOpts, eventhub.ReceiveWithPrefetchCount(e.PrefetchCount)) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithPrefetchCount(e.PrefetchCount)) } if e.Epoch != 0 { - receiveOpts = append(receiveOpts, eventhub.ReceiveWithEpoch(e.Epoch)) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithEpoch(e.Epoch)) } return receiveOpts @@ -263,7 +264,7 @@ func (e *EventHub) configureReceiver() []eventhub.ReceiveOption { // OnMessage handles an Event. When this function returns without error the // Event is immediately accepted and the offset is updated. If an error is // returned the Event is marked for redelivery. -func (e *EventHub) onMessage(ctx context.Context, event *eventhub.Event) error { +func (e *EventHub) onMessage(ctx context.Context, event *eventhubClient.Event) error { metrics, err := e.createMetrics(event) if err != nil { return err @@ -345,7 +346,7 @@ func deepCopyMetrics(in []telegraf.Metric) []telegraf.Metric { } // CreateMetrics returns the Metrics from the Event. -func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, error) { +func (e *EventHub) createMetrics(event *eventhubClient.Event) ([]telegraf.Metric, error) { metrics, err := e.parser.Parse(event.Data) if err != nil { return nil, err diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index afc6beb6a7a80..e8ba23db44522 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -4,20 +4,21 @@ import ( "bytes" "fmt" "io" - "os/exec" + osExec "os/exec" "path/filepath" "runtime" "strings" "sync" "time" + "github.com/kballard/go-shellquote" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/nagios" - "github.com/kballard/go-shellquote" ) const sampleConfig = ` @@ -76,7 +77,7 @@ func (c CommandRunner) Run( return nil, nil, fmt.Errorf("exec: unable to parse command, %s", err) } - cmd := exec.Command(splitCmd[0], splitCmd[1:]...) + cmd := osExec.Command(splitCmd[0], splitCmd[1:]...) var ( out bytes.Buffer diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index 72c84e1d12cc6..a8c8364394480 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -139,8 +139,8 @@ func (tm *TestMetricMaker) LogName() string { return tm.Name() } -func (tm *TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { - return metric +func (tm *TestMetricMaker) MakeMetric(aMetric telegraf.Metric) telegraf.Metric { + return aMetric } func (tm *TestMetricMaker) Log() telegraf.Logger { From 5f6c37bb8630cef888db8c2e76c9c149f6e90035 Mon Sep 17 00:00:00 2001 From: Jack Henschel Date: Mon, 21 Jun 2021 18:50:19 +0300 Subject: [PATCH 477/761] Adjust link to ceph documentation (#9378) --- plugins/inputs/ceph/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index dc58adb0ffe6b..5d5afadc19fad 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -2,7 +2,7 @@ Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. -Ceph has introduced a Telegraf and Influx plugin in the 13.x Mimic release. The Telegraf module sends to a Telegraf configured with a socket_listener. [Learn more in their docs](http://docs.ceph.com/docs/mimic/mgr/telegraf/) +Ceph has introduced a Telegraf and Influx plugin in the 13.x Mimic release. The Telegraf module sends to a Telegraf configured with a socket_listener. [Learn more in their docs](https://docs.ceph.com/en/latest/mgr/telegraf/) *Admin Socket Stats* From cf616939f19f738cc97ccb1cea8bbfde2cc79c5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Mon, 21 Jun 2021 18:56:16 +0200 Subject: [PATCH 478/761] kube_inventory: expand tls key/tls certificate documentation (#9357) --- plugins/inputs/kube_inventory/README.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index c9d6fb0be467d..7803d4fc4e9eb 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -68,8 +68,11 @@ avoid cardinality issues: selector_exclude = ["*"] ## Optional TLS Config + ## Trusted root certificates for server # tls_ca = "/path/to/cafile" + ## Used for TLS client certificate authentication # tls_cert = "/path/to/certfile" + ## Used for TLS client certificate authentication # tls_key = "/path/to/keyfile" ## Use TLS but skip chain & host verification # insecure_skip_verify = false @@ -127,6 +130,26 @@ subjects: namespace: default ``` +## Quickstart in k3s + +When monitoring [k3s](https://k3s.io) server instances one can re-use already generated administration token. +This is less secure than using the more restrictive dedicated telegraf user but more convienient to set up. + +```console +# an empty token will make telegraf use the client cert/key files instead +$ touch /run/telegraf-kubernetes-token +# replace `telegraf` with the user the telegraf process is running as +$ install -o telegraf -m400 /var/lib/rancher/k3s/server/tls/client-admin.crt /run/telegraf-kubernetes-cert +$ install -o telegraf -m400 /var/lib/rancher/k3s/server/tls/client-admin.key /run/telegraf-kubernetes-key +``` + +```toml +[kube_inventory] +bearer_token = "/run/telegraf-kubernetes-token" +tls_cert = "/run/telegraf-kubernetes-cert" +tls_key = "/run/telegraf-kubernetes-key" +``` + ### Metrics: - kubernetes_daemonset From 1453c47f017b26601b475ad42ab7ecd88f0d15d1 Mon Sep 17 00:00:00 2001 From: Mya Date: Mon, 21 Jun 2021 11:09:17 -0600 Subject: [PATCH 479/761] gjson dependancy updated to v1.8.0 (#9372) --- go.mod | 2 +- go.sum | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 6d11a213eac73..676bebd58e7a3 100644 --- a/go.mod +++ b/go.mod @@ -118,7 +118,7 @@ require ( github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/testcontainers/testcontainers-go v0.11.0 - github.com/tidwall/gjson v1.6.0 + github.com/tidwall/gjson v1.8.0 github.com/tinylib/msgp v1.1.5 github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 diff --git a/go.sum b/go.sum index 98ee98e484110..6470644bf5567 100644 --- a/go.sum +++ b/go.sum @@ -1431,12 +1431,13 @@ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/testcontainers/testcontainers-go v0.11.0 h1:HO5YOx2DYBHqcg4MzVWPj3FuHAv7USWVu94vCSsgiaM= github.com/testcontainers/testcontainers-go v0.11.0/go.mod h1:HztBCODzuA+YpMXGK8amjO8j50jz2gcT0BOzSKUiYIs= -github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= -github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= -github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= -github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/gjson v1.8.0 h1:Qt+orfosKn0rbNTZqHYDqBrmm3UDA4KRkv70fDzG+PQ= +github.com/tidwall/gjson v1.8.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= +github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= +github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8= +github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= From 81f882a271817ef8613f5e640a637b5437416bf9 Mon Sep 17 00:00:00 2001 From: Leandro Piccilli Date: Mon, 21 Jun 2021 23:35:26 +0200 Subject: [PATCH 480/761] Add Elasticsearch query input plugin (#3536) --- .gitignore | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/elasticsearch_query/README.md | 158 ++++ .../elasticsearch_query/aggregation_parser.go | 153 ++++ .../elasticsearch_query/aggregation_query.go | 206 +++++ .../elasticsearch_query.go | 314 ++++++++ .../elasticsearch_query_test.go | 713 ++++++++++++++++++ .../elasticsearch_query/testdata/nginx_logs | 500 ++++++++++++ 8 files changed, 2046 insertions(+) create mode 100755 plugins/inputs/elasticsearch_query/README.md create mode 100644 plugins/inputs/elasticsearch_query/aggregation_parser.go create mode 100644 plugins/inputs/elasticsearch_query/aggregation_query.go create mode 100644 plugins/inputs/elasticsearch_query/elasticsearch_query.go create mode 100644 plugins/inputs/elasticsearch_query/elasticsearch_query_test.go create mode 100644 plugins/inputs/elasticsearch_query/testdata/nginx_logs diff --git a/.gitignore b/.gitignore index c733e317ce1a7..7c3fbd21c3535 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ /vendor .DS_Store process.yml +/.vscode diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 7c4e0bcf45c76..95cfcf6626444 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -44,6 +44,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/dpdk" _ "github.com/influxdata/telegraf/plugins/inputs/ecs" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" + _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch_query" _ "github.com/influxdata/telegraf/plugins/inputs/ethtool" _ "github.com/influxdata/telegraf/plugins/inputs/eventhub_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/exec" diff --git a/plugins/inputs/elasticsearch_query/README.md b/plugins/inputs/elasticsearch_query/README.md new file mode 100755 index 0000000000000..881cb6609b5b0 --- /dev/null +++ b/plugins/inputs/elasticsearch_query/README.md @@ -0,0 +1,158 @@ +# Elasticsearch query input plugin + +This [elasticsearch](https://www.elastic.co/) query plugin queries endpoints to obtain metrics from data stored in an Elasticsearch cluster. + +The following is supported: + +- return number of hits for a search query +- calculate the avg/max/min/sum for a numeric field, filtered by a query, aggregated per tag +- count number of terms for a particular field + +## Elasticsearch support + +This plugins is tested against Elasticsearch 5.x and 6.x releases. +Currently it is known to break on 7.x or greater versions. + +## Configuration + +```toml +[[inputs.elasticsearch_query]] + ## The full HTTP endpoint URL for your Elasticsearch instance + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. + urls = [ "http://node1.es.example.com:9200" ] # required. + + ## Elasticsearch client timeout, defaults to "5s". + # timeout = "5s" + + ## Set to true to ask Elasticsearch a list of all cluster nodes, + ## thus it is not necessary to list all nodes in the urls config option + # enable_sniffer = false + + ## Set the interval to check if the Elasticsearch nodes are available + ## This option is only used if enable_sniffer is also set (0s to disable it) + # health_check_interval = "10s" + + ## HTTP basic authentication details (eg. when using x-pack) + # username = "telegraf" + # password = "mypassword" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + [[inputs.elasticsearch_query.aggregation]] + ## measurement name for the results of the aggregation query + measurement_name = "measurement" + + ## Elasticsearch indexes to query (accept wildcards). + index = "index-*" + + ## The date/time field in the Elasticsearch index (mandatory). + date_field = "@timestamp" + + ## Time window to query (eg. "1m" to query documents from last minute). + ## Normally should be set to same as collection interval + query_period = "1m" + + ## Lucene query to filter results + # filter_query = "*" + + ## Fields to aggregate values (must be numeric fields) + # metric_fields = ["metric"] + + ## Aggregation function to use on the metric fields + ## Must be set if 'metric_fields' is set + ## Valid values are: avg, sum, min, max, sum + # metric_function = "avg" + + ## Fields to be used as tags + ## Must be text, non-analyzed fields. Metric aggregations are performed per tag + # tags = ["field.keyword", "field2.keyword"] + + ## Set to true to not ignore documents when the tag(s) above are missing + # include_missing_tag = false + + ## String value of the tag when the tag does not exist + ## Used when include_missing_tag is true + # missing_tag_value = "null" +``` + +## Examples + +Please note that the `[[inputs.elasticsearch_query]]` is still required for all of the examples below. + +### Search the average response time, per URI and per response status code + +```toml +[[inputs.elasticsearch_query.aggregation]] + measurement_name = "http_logs" + index = "my-index-*" + filter_query = "*" + metric_fields = ["response_time"] + metric_function = "avg" + tags = ["URI.keyword", "response.keyword"] + include_missing_tag = true + missing_tag_value = "null" + date_field = "@timestamp" + query_period = "1m" +``` + +### Search the maximum response time per method and per URI + +```toml +[[inputs.elasticsearch_query.aggregation]] + measurement_name = "http_logs" + index = "my-index-*" + filter_query = "*" + metric_fields = ["response_time"] + metric_function = "max" + tags = ["method.keyword","URI.keyword"] + include_missing_tag = false + missing_tag_value = "null" + date_field = "@timestamp" + query_period = "1m" +``` + +### Search number of documents matching a filter query in all indices + +```toml +[[inputs.elasticsearch_query.aggregation]] + measurement_name = "http_logs" + index = "*" + filter_query = "product_1 AND HEAD" + query_period = "1m" + date_field = "@timestamp" +``` + +### Search number of documents matching a filter query, returning per response status code + +```toml +[[inputs.elasticsearch_query.aggregation]] + measurement_name = "http_logs" + index = "*" + filter_query = "downloads" + tags = ["response.keyword"] + include_missing_tag = false + date_field = "@timestamp" + query_period = "1m" +``` + +### Required parameters + +- `measurement_name`: The target measurement to be stored the results of the aggregation query. +- `index`: The index name to query on Elasticsearch +- `query_period`: The time window to query (eg. "1m" to query documents from last minute). Normally should be set to same as collection +- `date_field`: The date/time field in the Elasticsearch index + +### Optional parameters + +- `filter_query`: Lucene query to filter the results (default: "\*") +- `metric_fields`: The list of fields to perform metric aggregation (these must be indexed as numeric fields) +- `metric_funcion`: The single-value metric aggregation function to be performed on the `metric_fields` defined. Currently supported aggregations are "avg", "min", "max", "sum". (see [https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html) +- `tags`: The list of fields to be used as tags (these must be indexed as non-analyzed fields). A "terms aggregation" will be done per tag defined +- `include_missing_tag`: Set to true to not ignore documents where the tag(s) specified above does not exist. (If false, documents without the specified tag field will be ignored in `doc_count` and in the metric aggregation) +- `missing_tag_value`: The value of the tag that will be set for documents in which the tag field does not exist. Only used when `include_missing_tag` is set to `true`. diff --git a/plugins/inputs/elasticsearch_query/aggregation_parser.go b/plugins/inputs/elasticsearch_query/aggregation_parser.go new file mode 100644 index 0000000000000..c4dff05ee6fee --- /dev/null +++ b/plugins/inputs/elasticsearch_query/aggregation_parser.go @@ -0,0 +1,153 @@ +package elasticsearch_query + +import ( + "fmt" + + "github.com/influxdata/telegraf" + elastic5 "gopkg.in/olivere/elastic.v5" +) + +type resultMetric struct { + name string + fields map[string]interface{} + tags map[string]string +} + +func parseSimpleResult(acc telegraf.Accumulator, measurement string, searchResult *elastic5.SearchResult) { + fields := make(map[string]interface{}) + tags := make(map[string]string) + + fields["doc_count"] = searchResult.Hits.TotalHits + + acc.AddFields(measurement, fields, tags) +} + +func parseAggregationResult(acc telegraf.Accumulator, aggregationQueryList []aggregationQueryData, searchResult *elastic5.SearchResult) error { + measurements := map[string]map[string]string{} + + // organize the aggregation query data by measurement + for _, aggregationQuery := range aggregationQueryList { + if measurements[aggregationQuery.measurement] == nil { + measurements[aggregationQuery.measurement] = map[string]string{ + aggregationQuery.name: aggregationQuery.function, + } + } else { + t := measurements[aggregationQuery.measurement] + t[aggregationQuery.name] = aggregationQuery.function + measurements[aggregationQuery.measurement] = t + } + } + + // recurse over query aggregation results per measurement + for measurement, aggNameFunction := range measurements { + var m resultMetric + + m.fields = make(map[string]interface{}) + m.tags = make(map[string]string) + m.name = measurement + + _, err := recurseResponse(acc, aggNameFunction, searchResult.Aggregations, m) + if err != nil { + return err + } + } + return nil +} + +func recurseResponse(acc telegraf.Accumulator, aggNameFunction map[string]string, bucketResponse elastic5.Aggregations, m resultMetric) (resultMetric, error) { + var err error + + aggNames := getAggNames(bucketResponse) + if len(aggNames) == 0 { + // we've reached a single bucket or response without aggregation, nothing here + return m, nil + } + + // metrics aggregations response can contain multiple field values, so we iterate over them + for _, aggName := range aggNames { + aggFunction, found := aggNameFunction[aggName] + if !found { + return m, fmt.Errorf("child aggregation function '%s' not found %v", aggName, aggNameFunction) + } + + resp := getResponseAggregation(aggFunction, aggName, bucketResponse) + if resp == nil { + return m, fmt.Errorf("child aggregation '%s' not found", aggName) + } + + switch resp := resp.(type) { + case *elastic5.AggregationBucketKeyItems: + // we've found a terms aggregation, iterate over the buckets and try to retrieve the inner aggregation values + for _, bucket := range resp.Buckets { + var s string + var ok bool + m.fields["doc_count"] = bucket.DocCount + if s, ok = bucket.Key.(string); !ok { + return m, fmt.Errorf("bucket key is not a string (%s, %s)", aggName, aggFunction) + } + m.tags[aggName] = s + + // we need to recurse down through the buckets, as it may contain another terms aggregation + m, err = recurseResponse(acc, aggNameFunction, bucket.Aggregations, m) + if err != nil { + return m, err + } + + // if there are fields present after finishing the bucket, it is a complete metric + // store it and clean the fields to start a new metric + if len(m.fields) > 0 { + acc.AddFields(m.name, m.fields, m.tags) + m.fields = make(map[string]interface{}) + } + + // after finishing the bucket, remove its tag from the tags map + delete(m.tags, aggName) + } + + case *elastic5.AggregationValueMetric: + if resp.Value != nil { + m.fields[aggName] = *resp.Value + } else { + m.fields[aggName] = float64(0) + } + + default: + return m, fmt.Errorf("aggregation type %T not supported", resp) + } + } + + // if there are fields here it comes from a metrics aggregation without a parent terms aggregation + if len(m.fields) > 0 { + acc.AddFields(m.name, m.fields, m.tags) + m.fields = make(map[string]interface{}) + } + return m, nil +} + +func getResponseAggregation(function string, aggName string, aggs elastic5.Aggregations) (agg interface{}) { + switch function { + case "avg": + agg, _ = aggs.Avg(aggName) + case "sum": + agg, _ = aggs.Sum(aggName) + case "min": + agg, _ = aggs.Min(aggName) + case "max": + agg, _ = aggs.Max(aggName) + case "terms": + agg, _ = aggs.Terms(aggName) + } + + return agg +} + +// getAggNames returns the aggregation names from a response aggregation +func getAggNames(agg elastic5.Aggregations) (aggs []string) { + for k := range agg { + if (k != "key") && (k != "doc_count") { + aggs = append(aggs, k) + } + } + + return aggs +} diff --git a/plugins/inputs/elasticsearch_query/aggregation_query.go b/plugins/inputs/elasticsearch_query/aggregation_query.go new file mode 100644 index 0000000000000..b5fa9db3c667a --- /dev/null +++ b/plugins/inputs/elasticsearch_query/aggregation_query.go @@ -0,0 +1,206 @@ +package elasticsearch_query + +import ( + "context" + "fmt" + "strings" + "time" + + elastic5 "gopkg.in/olivere/elastic.v5" +) + +type aggKey struct { + measurement string + name string + function string + field string +} + +type aggregationQueryData struct { + aggKey + isParent bool + aggregation elastic5.Aggregation +} + +func (e *ElasticsearchQuery) runAggregationQuery(ctx context.Context, aggregation esAggregation) (*elastic5.SearchResult, error) { + now := time.Now().UTC() + from := now.Add(time.Duration(-aggregation.QueryPeriod)) + filterQuery := aggregation.FilterQuery + if filterQuery == "" { + filterQuery = "*" + } + + query := elastic5.NewBoolQuery() + query = query.Filter(elastic5.NewQueryStringQuery(filterQuery)) + query = query.Filter(elastic5.NewRangeQuery(aggregation.DateField).From(from).To(now)) + + search := e.esClient.Search().Index(aggregation.Index).Query(query).Size(0) + + // add only parent elastic.Aggregations to the search request, all the rest are subaggregations of these + for _, v := range aggregation.aggregationQueryList { + if v.isParent && v.aggregation != nil { + search.Aggregation(v.aggKey.name, v.aggregation) + } + } + + searchResult, err := search.Do(ctx) + if err != nil && searchResult != nil { + return searchResult, fmt.Errorf("%s - %s", searchResult.Error.Type, searchResult.Error.Reason) + } + + return searchResult, err +} + +// getMetricFields function returns a map of fields and field types on Elasticsearch that matches field.MetricFields +func (e *ElasticsearchQuery) getMetricFields(ctx context.Context, aggregation esAggregation) (map[string]string, error) { + mapMetricFields := make(map[string]string) + + for _, metricField := range aggregation.MetricFields { + resp, err := e.esClient.GetFieldMapping().Index(aggregation.Index).Field(metricField).Do(ctx) + if err != nil { + return mapMetricFields, fmt.Errorf("error retrieving field mappings for %s: %s", aggregation.Index, err.Error()) + } + + for _, index := range resp { + var ok bool + var mappings interface{} + if mappings, ok = index.(map[string]interface{})["mappings"]; !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", index) + } + + var types map[string]interface{} + if types, ok = mappings.(map[string]interface{}); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", mappings) + } + + var fields map[string]interface{} + for _, _type := range types { + if fields, ok = _type.(map[string]interface{}); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", _type) + } + + var field map[string]interface{} + for _, _field := range fields { + if field, ok = _field.(map[string]interface{}); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", _field) + } + + fullname := field["full_name"] + mapping := field["mapping"] + + var fname string + if fname, ok = fullname.(string); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected string, got %T)", fullname) + } + + var fieldTypes map[string]interface{} + if fieldTypes, ok = mapping.(map[string]interface{}); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", mapping) + } + + var fieldType interface{} + for _, _fieldType := range fieldTypes { + if fieldType, ok = _fieldType.(map[string]interface{})["type"]; !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", _fieldType) + } + + var ftype string + if ftype, ok = fieldType.(string); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected string, got %T)", fieldType) + } + mapMetricFields[fname] = ftype + } + } + } + } + } + + return mapMetricFields, nil +} + +func (aggregation *esAggregation) buildAggregationQuery() error { + // create one aggregation per metric field found & function defined for numeric fields + for k, v := range aggregation.mapMetricFields { + switch v { + case "long": + case "float": + case "integer": + case "short": + case "double": + case "scaled_float": + default: + continue + } + + agg, err := getFunctionAggregation(aggregation.MetricFunction, k) + if err != nil { + return err + } + + aggregationQuery := aggregationQueryData{ + aggKey: aggKey{ + measurement: aggregation.MeasurementName, + function: aggregation.MetricFunction, + field: k, + name: strings.Replace(k, ".", "_", -1) + "_" + aggregation.MetricFunction, + }, + isParent: true, + aggregation: agg, + } + + aggregation.aggregationQueryList = append(aggregation.aggregationQueryList, aggregationQuery) + } + + // create a terms aggregation per tag + for _, term := range aggregation.Tags { + agg := elastic5.NewTermsAggregation() + if aggregation.IncludeMissingTag && aggregation.MissingTagValue != "" { + agg.Missing(aggregation.MissingTagValue) + } + + agg.Field(term).Size(1000) + + // add each previous parent aggregations as subaggregations of this terms aggregation + for key, aggMap := range aggregation.aggregationQueryList { + if aggMap.isParent { + agg.Field(term).SubAggregation(aggMap.name, aggMap.aggregation).Size(1000) + // update subaggregation map with parent information + aggregation.aggregationQueryList[key].isParent = false + } + } + + aggregationQuery := aggregationQueryData{ + aggKey: aggKey{ + measurement: aggregation.MeasurementName, + function: "terms", + field: term, + name: strings.Replace(term, ".", "_", -1), + }, + isParent: true, + aggregation: agg, + } + + aggregation.aggregationQueryList = append(aggregation.aggregationQueryList, aggregationQuery) + } + + return nil +} + +func getFunctionAggregation(function string, aggfield string) (elastic5.Aggregation, error) { + var agg elastic5.Aggregation + + switch function { + case "avg": + agg = elastic5.NewAvgAggregation().Field(aggfield) + case "sum": + agg = elastic5.NewSumAggregation().Field(aggfield) + case "min": + agg = elastic5.NewMinAggregation().Field(aggfield) + case "max": + agg = elastic5.NewMaxAggregation().Field(aggfield) + default: + return nil, fmt.Errorf("aggregation function '%s' not supported", function) + } + + return agg, nil +} diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query.go b/plugins/inputs/elasticsearch_query/elasticsearch_query.go new file mode 100644 index 0000000000000..3c04f952b5bee --- /dev/null +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query.go @@ -0,0 +1,314 @@ +package elasticsearch_query + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + "sync" + "time" + + elastic5 "gopkg.in/olivere/elastic.v5" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const sampleConfig = ` + ## The full HTTP endpoint URL for your Elasticsearch instance + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. + urls = [ "http://node1.es.example.com:9200" ] # required. + + ## Elasticsearch client timeout, defaults to "5s". + # timeout = "5s" + + ## Set to true to ask Elasticsearch a list of all cluster nodes, + ## thus it is not necessary to list all nodes in the urls config option + # enable_sniffer = false + + ## Set the interval to check if the Elasticsearch nodes are available + ## This option is only used if enable_sniffer is also set (0s to disable it) + # health_check_interval = "10s" + + ## HTTP basic authentication details (eg. when using x-pack) + # username = "telegraf" + # password = "mypassword" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + [[inputs.elasticsearch_query.aggregation]] + ## measurement name for the results of the aggregation query + measurement_name = "measurement" + + ## Elasticsearch indexes to query (accept wildcards). + index = "index-*" + + ## The date/time field in the Elasticsearch index (mandatory). + date_field = "@timestamp" + + ## Time window to query (eg. "1m" to query documents from last minute). + ## Normally should be set to same as collection interval + query_period = "1m" + + ## Lucene query to filter results + # filter_query = "*" + + ## Fields to aggregate values (must be numeric fields) + # metric_fields = ["metric"] + + ## Aggregation function to use on the metric fields + ## Must be set if 'metric_fields' is set + ## Valid values are: avg, sum, min, max, sum + # metric_function = "avg" + + ## Fields to be used as tags + ## Must be text, non-analyzed fields. Metric aggregations are performed per tag + # tags = ["field.keyword", "field2.keyword"] + + ## Set to true to not ignore documents when the tag(s) above are missing + # include_missing_tag = false + + ## String value of the tag when the tag does not exist + ## Used when include_missing_tag is true + # missing_tag_value = "null" +` + +// ElasticsearchQuery struct +type ElasticsearchQuery struct { + URLs []string `toml:"urls"` + Username string `toml:"username"` + Password string `toml:"password"` + EnableSniffer bool `toml:"enable_sniffer"` + Timeout config.Duration `toml:"timeout"` + HealthCheckInterval config.Duration `toml:"health_check_interval"` + Aggregations []esAggregation `toml:"aggregation"` + + Log telegraf.Logger `toml:"-"` + + tls.ClientConfig + httpclient *http.Client + esClient *elastic5.Client +} + +// esAggregation struct +type esAggregation struct { + Index string `toml:"index"` + MeasurementName string `toml:"measurement_name"` + DateField string `toml:"date_field"` + QueryPeriod config.Duration `toml:"query_period"` + FilterQuery string `toml:"filter_query"` + MetricFields []string `toml:"metric_fields"` + MetricFunction string `toml:"metric_function"` + Tags []string `toml:"tags"` + IncludeMissingTag bool `toml:"include_missing_tag"` + MissingTagValue string `toml:"missing_tag_value"` + mapMetricFields map[string]string + aggregationQueryList []aggregationQueryData +} + +// SampleConfig returns sample configuration for this plugin. +func (e *ElasticsearchQuery) SampleConfig() string { + return sampleConfig +} + +// Description returns the plugin description. +func (e *ElasticsearchQuery) Description() string { + return `Derive metrics from aggregating Elasticsearch query results` +} + +// Init the plugin. +func (e *ElasticsearchQuery) Init() error { + if e.URLs == nil { + return fmt.Errorf("elasticsearch urls is not defined") + } + + err := e.connectToES() + if err != nil { + e.Log.Errorf("E! error connecting to elasticsearch: %s", err) + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout)) + defer cancel() + + for i, agg := range e.Aggregations { + if agg.MeasurementName == "" { + return fmt.Errorf("field 'measurement_name' is not set") + } + if agg.DateField == "" { + return fmt.Errorf("field 'date_field' is not set") + } + err = e.initAggregation(ctx, agg, i) + if err != nil { + e.Log.Errorf("%s", err) + return nil + } + } + return nil +} + +func (e *ElasticsearchQuery) initAggregation(ctx context.Context, agg esAggregation, i int) (err error) { + // retrieve field mapping and build queries only once + agg.mapMetricFields, err = e.getMetricFields(ctx, agg) + if err != nil { + return fmt.Errorf("not possible to retrieve fields: %v", err.Error()) + } + + for _, metricField := range agg.MetricFields { + if _, ok := agg.mapMetricFields[metricField]; !ok { + return fmt.Errorf("metric field '%s' not found on index '%s'", metricField, agg.Index) + } + } + + err = agg.buildAggregationQuery() + if err != nil { + return err + } + + e.Aggregations[i] = agg + return nil +} + +func (e *ElasticsearchQuery) connectToES() error { + var clientOptions []elastic5.ClientOptionFunc + + if e.esClient != nil { + if e.esClient.IsRunning() { + return nil + } + } + + if e.httpclient == nil { + httpclient, err := e.createHTTPClient() + if err != nil { + return err + } + e.httpclient = httpclient + } + + clientOptions = append(clientOptions, + elastic5.SetHttpClient(e.httpclient), + elastic5.SetSniff(e.EnableSniffer), + elastic5.SetURL(e.URLs...), + elastic5.SetHealthcheckInterval(time.Duration(e.HealthCheckInterval)), + ) + + if e.Username != "" { + clientOptions = append(clientOptions, elastic5.SetBasicAuth(e.Username, e.Password)) + } + + if time.Duration(e.HealthCheckInterval) == 0 { + clientOptions = append(clientOptions, elastic5.SetHealthcheck(false)) + } + + client, err := elastic5.NewClient(clientOptions...) + if err != nil { + return err + } + + // check for ES version on first node + esVersion, err := client.ElasticsearchVersion(e.URLs[0]) + if err != nil { + return fmt.Errorf("elasticsearch version check failed: %s", err) + } + + esVersionSplit := strings.Split(esVersion, ".") + + // quit if ES version is not supported + if len(esVersionSplit) == 0 { + return fmt.Errorf("elasticsearch version check failed") + } + + i, err := strconv.Atoi(esVersionSplit[0]) + if err != nil || i < 5 || i > 6 { + return fmt.Errorf("elasticsearch version %s not supported (currently supported versions are 5.x and 6.x)", esVersion) + } + + e.esClient = client + return nil +} + +// Gather writes the results of the queries from Elasticsearch to the Accumulator. +func (e *ElasticsearchQuery) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + err := e.connectToES() + if err != nil { + return err + } + + for i, agg := range e.Aggregations { + wg.Add(1) + go func(agg esAggregation, i int) { + defer wg.Done() + err := e.esAggregationQuery(acc, agg, i) + if err != nil { + acc.AddError(fmt.Errorf("elasticsearch query aggregation %s: %s ", agg.MeasurementName, err.Error())) + } + }(agg, i) + } + + wg.Wait() + return nil +} + +func (e *ElasticsearchQuery) createHTTPClient() (*http.Client, error) { + tlsCfg, err := e.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + tr := &http.Transport{ + ResponseHeaderTimeout: time.Duration(e.Timeout), + TLSClientConfig: tlsCfg, + } + httpclient := &http.Client{ + Transport: tr, + Timeout: time.Duration(e.Timeout), + } + + return httpclient, nil +} + +func (e *ElasticsearchQuery) esAggregationQuery(acc telegraf.Accumulator, aggregation esAggregation, i int) error { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout)) + defer cancel() + + // try to init the aggregation query if it is not done already + if aggregation.aggregationQueryList == nil { + err := e.initAggregation(ctx, aggregation, i) + if err != nil { + return err + } + aggregation = e.Aggregations[i] + } + + searchResult, err := e.runAggregationQuery(ctx, aggregation) + if err != nil { + return err + } + + if searchResult.Aggregations == nil { + parseSimpleResult(acc, aggregation.MeasurementName, searchResult) + return nil + } + + return parseAggregationResult(acc, aggregation.aggregationQueryList, searchResult) +} + +func init() { + inputs.Add("elasticsearch_query", func() telegraf.Input { + return &ElasticsearchQuery{ + Timeout: config.Duration(time.Second * 5), + HealthCheckInterval: config.Duration(time.Second * 10), + } + }) +} diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go new file mode 100644 index 0000000000000..6a89dc8eea617 --- /dev/null +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go @@ -0,0 +1,713 @@ +package elasticsearch_query + +import ( + "bufio" + "context" + "os" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + elastic5 "gopkg.in/olivere/elastic.v5" +) + +var ( + testindex = "test-elasticsearch_query-" + strconv.Itoa(int(time.Now().Unix())) + setupOnce sync.Once +) + +type esAggregationQueryTest struct { + queryName string + testAggregationQueryInput esAggregation + testAggregationQueryData []aggregationQueryData + expectedMetrics []telegraf.Metric + wantBuildQueryErr bool + wantGetMetricFieldsErr bool + wantQueryResErr bool +} + +var queryPeriod = config.Duration(time.Second * 600) + +var testEsAggregationData = []esAggregationQueryTest{ + { + "query 1", + esAggregation{ + Index: testindex, + MeasurementName: "measurement1", + MetricFields: []string{"size"}, + FilterQuery: "product_1", + MetricFunction: "avg", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"URI.keyword"}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement1", name: "size_avg", function: "avg", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement1", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement1", + map[string]string{"URI_keyword": "/downloads/product_1"}, + map[string]interface{}{"size_avg": float64(202.30038022813687), "doc_count": int64(263)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 2", + esAggregation{ + Index: testindex, + MeasurementName: "measurement2", + MetricFields: []string{"size"}, + FilterQuery: "downloads", + MetricFunction: "max", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"URI.keyword"}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement2", name: "size_max", function: "max", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement2", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement2", + map[string]string{"URI_keyword": "/downloads/product_1"}, + map[string]interface{}{"size_max": float64(3301), "doc_count": int64(263)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement2", + map[string]string{"URI_keyword": "/downloads/product_2"}, + map[string]interface{}{"size_max": float64(3318), "doc_count": int64(237)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 3", + esAggregation{ + Index: testindex, + MeasurementName: "measurement3", + MetricFields: []string{"size"}, + FilterQuery: "downloads", + MetricFunction: "sum", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"response.keyword"}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement3", name: "size_sum", function: "sum", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement3", name: "response_keyword", function: "terms", field: "response.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement3", + map[string]string{"response_keyword": "200"}, + map[string]interface{}{"size_sum": float64(22790), "doc_count": int64(22)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement3", + map[string]string{"response_keyword": "304"}, + map[string]interface{}{"size_sum": float64(0), "doc_count": int64(219)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement3", + map[string]string{"response_keyword": "404"}, + map[string]interface{}{"size_sum": float64(86932), "doc_count": int64(259)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 4", + esAggregation{ + Index: testindex, + MeasurementName: "measurement4", + MetricFields: []string{"size", "response_time"}, + FilterQuery: "downloads", + MetricFunction: "min", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + IncludeMissingTag: true, + MissingTagValue: "missing", + Tags: []string{"response.keyword", "URI.keyword", "method.keyword"}, + mapMetricFields: map[string]string{"size": "long", "response_time": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement4", name: "size_min", function: "min", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement4", name: "response_time_min", function: "min", field: "response_time"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement4", name: "response_keyword", function: "terms", field: "response.keyword"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement4", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement4", name: "method_keyword", function: "terms", field: "method.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "404", "URI_keyword": "/downloads/product_1", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(318), "response_time_min": float64(126), "doc_count": int64(146)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "304", "URI_keyword": "/downloads/product_1", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(0), "response_time_min": float64(71), "doc_count": int64(113)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_1", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(490), "response_time_min": float64(1514), "doc_count": int64(3)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "404", "URI_keyword": "/downloads/product_2", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(318), "response_time_min": float64(237), "doc_count": int64(113)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "304", "URI_keyword": "/downloads/product_2", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(0), "response_time_min": float64(134), "doc_count": int64(106)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_2", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(490), "response_time_min": float64(2), "doc_count": int64(13)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_1", "method_keyword": "HEAD"}, + map[string]interface{}{"size_min": float64(0), "response_time_min": float64(8479), "doc_count": int64(1)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_2", "method_keyword": "HEAD"}, + map[string]interface{}{"size_min": float64(0), "response_time_min": float64(1059), "doc_count": int64(5)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 5", + esAggregation{ + Index: testindex, + MeasurementName: "measurement5", + FilterQuery: "product_2", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"URI.keyword"}, + mapMetricFields: map[string]string{}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement5", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement5", + map[string]string{"URI_keyword": "/downloads/product_2"}, + map[string]interface{}{"doc_count": int64(237)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 6", + esAggregation{ + Index: testindex, + MeasurementName: "measurement6", + FilterQuery: "response: 200", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"URI.keyword", "response.keyword"}, + mapMetricFields: map[string]string{}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement6", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement6", name: "response_keyword", function: "terms", field: "response.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement6", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_1"}, + map[string]interface{}{"doc_count": int64(4)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement6", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_2"}, + map[string]interface{}{"doc_count": int64(18)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 7 - simple query", + esAggregation{ + Index: testindex, + MeasurementName: "measurement7", + FilterQuery: "response: 200", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{}, + }, + nil, + []telegraf.Metric{ + testutil.MustMetric( + "measurement7", + map[string]string{}, + map[string]interface{}{"doc_count": int64(22)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 8", + esAggregation{ + Index: testindex, + MeasurementName: "measurement8", + MetricFields: []string{"size"}, + FilterQuery: "downloads", + MetricFunction: "max", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement8", name: "size_max", function: "max", field: "size"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement8", + map[string]string{}, + map[string]interface{}{"size_max": float64(3318)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 9 - invalid function", + esAggregation{ + Index: testindex, + MeasurementName: "measurement9", + MetricFields: []string{"size"}, + FilterQuery: "downloads", + MetricFunction: "average", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{"size": "long"}, + }, + nil, + nil, + true, + false, + true, + }, + { + "query 10 - non-existing metric field", + esAggregation{ + Index: testindex, + MeasurementName: "measurement10", + MetricFields: []string{"none"}, + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{}, + }, + nil, + nil, + false, + false, + true, + }, + { + "query 11 - non-existing index field", + esAggregation{ + Index: "notanindex", + MeasurementName: "measurement11", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{}, + }, + nil, + nil, + false, + false, + true, + }, + { + "query 12 - non-existing timestamp field", + esAggregation{ + Index: testindex, + MeasurementName: "measurement12", + MetricFields: []string{"size"}, + MetricFunction: "avg", + DateField: "@notatimestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement12", name: "size_avg", function: "avg", field: "size"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement12", + map[string]string{}, + map[string]interface{}{"size_avg": float64(0)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 13 - non-existing tag field", + esAggregation{ + Index: testindex, + MeasurementName: "measurement13", + MetricFields: []string{"size"}, + MetricFunction: "avg", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + IncludeMissingTag: false, + Tags: []string{"nothere"}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement13", name: "size_avg", function: "avg", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement13", name: "nothere", function: "terms", field: "nothere"}, + isParent: true, + }, + }, + nil, + false, + false, + false, + }, +} + +func setupIntegrationTest() error { + type nginxlog struct { + IPaddress string `json:"IP"` + Timestamp time.Time `json:"@timestamp"` + Method string `json:"method"` + URI string `json:"URI"` + Httpversion string `json:"http_version"` + Response string `json:"response"` + Size float64 `json:"size"` + ResponseTime float64 `json:"response_time"` + } + + e := &ElasticsearchQuery{ + URLs: []string{"http://" + testutil.GetLocalHost() + ":9200"}, + Timeout: config.Duration(time.Second * 30), + Log: testutil.Logger{}, + } + + err := e.connectToES() + if err != nil { + return err + } + + bulkRequest := e.esClient.Bulk() + + // populate elasticsearch with nginx_logs test data file + file, err := os.Open("testdata/nginx_logs") + if err != nil { + return err + } + + defer file.Close() + + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + parts := strings.Split(scanner.Text(), " ") + size, _ := strconv.Atoi(parts[9]) + responseTime, _ := strconv.Atoi(parts[len(parts)-1]) + + logline := nginxlog{ + IPaddress: parts[0], + Timestamp: time.Now().UTC(), + Method: strings.Replace(parts[5], `"`, "", -1), + URI: parts[6], + Httpversion: strings.Replace(parts[7], `"`, "", -1), + Response: parts[8], + Size: float64(size), + ResponseTime: float64(responseTime), + } + + bulkRequest.Add(elastic5.NewBulkIndexRequest(). + Index(testindex). + Type("testquery_data"). + Doc(logline)) + } + if scanner.Err() != nil { + return err + } + + _, err = bulkRequest.Do(context.Background()) + if err != nil { + return err + } + + // wait 5s (default) for Elasticsearch to index, so results are consistent + time.Sleep(time.Second * 5) + return nil +} + +func TestElasticsearchQuery(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + setupOnce.Do(func() { + err := setupIntegrationTest() + require.NoError(t, err) + }) + + var acc testutil.Accumulator + e := &ElasticsearchQuery{ + URLs: []string{"http://" + testutil.GetLocalHost() + ":9200"}, + Timeout: config.Duration(time.Second * 30), + Log: testutil.Logger{}, + } + + err := e.connectToES() + require.NoError(t, err) + + var aggs []esAggregation + var aggsErr []esAggregation + + for _, agg := range testEsAggregationData { + if !agg.wantQueryResErr { + aggs = append(aggs, agg.testAggregationQueryInput) + } + } + e.Aggregations = aggs + + require.NoError(t, e.Init()) + require.NoError(t, e.Gather(&acc)) + + if len(acc.Errors) > 0 { + t.Errorf("%s", acc.Errors) + } + + var expectedMetrics []telegraf.Metric + for _, result := range testEsAggregationData { + expectedMetrics = append(expectedMetrics, result.expectedMetrics...) + } + testutil.RequireMetricsEqual(t, expectedMetrics, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) + + // aggregations that should return an error + for _, agg := range testEsAggregationData { + if agg.wantQueryResErr { + aggsErr = append(aggsErr, agg.testAggregationQueryInput) + } + } + e.Aggregations = aggsErr + require.NoError(t, e.Init()) + require.NoError(t, e.Gather(&acc)) + + if len(acc.Errors) != len(aggsErr) { + t.Errorf("expecting %v query result errors, got %v: %s", len(aggsErr), len(acc.Errors), acc.Errors) + } +} + +func TestElasticsearchQuery_getMetricFields(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + setupOnce.Do(func() { + err := setupIntegrationTest() + require.NoError(t, err) + }) + + type args struct { + ctx context.Context + aggregation esAggregation + } + + e := &ElasticsearchQuery{ + URLs: []string{"http://" + testutil.GetLocalHost() + ":9200"}, + Timeout: config.Duration(time.Second * 30), + Log: testutil.Logger{}, + } + + err := e.connectToES() + require.NoError(t, err) + + type test struct { + name string + e *ElasticsearchQuery + args args + want map[string]string + wantErr bool + } + + var tests []test + + for _, d := range testEsAggregationData { + tests = append(tests, test{ + "getMetricFields " + d.queryName, + e, + args{context.Background(), d.testAggregationQueryInput}, + d.testAggregationQueryInput.mapMetricFields, + d.wantGetMetricFieldsErr, + }) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.e.getMetricFields(tt.args.ctx, tt.args.aggregation) + if (err != nil) != tt.wantErr { + t.Errorf("ElasticsearchQuery.buildAggregationQuery() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if !cmp.Equal(got, tt.want) { + t.Errorf("ElasticsearchQuery.getMetricFields() = error = %s", cmp.Diff(got, tt.want)) + } + }) + } +} + +func TestElasticsearchQuery_buildAggregationQuery(t *testing.T) { + type test struct { + name string + aggregation esAggregation + want []aggregationQueryData + wantErr bool + } + var tests []test + + for _, d := range testEsAggregationData { + tests = append(tests, test{ + "build " + d.queryName, + d.testAggregationQueryInput, + d.testAggregationQueryData, + d.wantBuildQueryErr, + }) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.aggregation.buildAggregationQuery() + if (err != nil) != tt.wantErr { + t.Errorf("ElasticsearchQuery.buildAggregationQuery() error = %v, wantErr %v", err, tt.wantErr) + return + } + + opts := []cmp.Option{ + cmp.AllowUnexported(aggKey{}, aggregationQueryData{}), + cmpopts.IgnoreFields(aggregationQueryData{}, "aggregation"), + cmpopts.SortSlices(func(x, y aggregationQueryData) bool { return x.aggKey.name > y.aggKey.name }), + } + + if !cmp.Equal(tt.aggregation.aggregationQueryList, tt.want, opts...) { + t.Errorf("ElasticsearchQuery.buildAggregationQuery(): %s error = %s ", tt.name, cmp.Diff(tt.aggregation.aggregationQueryList, tt.want, opts...)) + } + }) + } +} diff --git a/plugins/inputs/elasticsearch_query/testdata/nginx_logs b/plugins/inputs/elasticsearch_query/testdata/nginx_logs new file mode 100644 index 0000000000000..f6e9c8a110226 --- /dev/null +++ b/plugins/inputs/elasticsearch_query/testdata/nginx_logs @@ -0,0 +1,500 @@ +93.180.71.3 - - [17/May/2015:08:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 12060 +93.180.71.3 - - [17/May/2015:08:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 12355 +80.91.33.133 - - [17/May/2015:08:05:24 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 26272 +217.168.17.5 - - [17/May/2015:08:05:34 +0000] "GET /downloads/product_1 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 1514 +217.168.17.5 - - [17/May/2015:08:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 2204 +93.180.71.3 - - [17/May/2015:08:05:57 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 6012 +217.168.17.5 - - [17/May/2015:08:05:02 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 11220 +217.168.17.5 - - [17/May/2015:08:05:42 +0000] "GET /downloads/product_1 HTTP/1.1" 404 332 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 17843 +80.91.33.133 - - [17/May/2015:08:05:01 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 22599 +93.180.71.3 - - [17/May/2015:08:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 24828 +217.168.17.5 - - [17/May/2015:08:05:12 +0000] "GET /downloads/product_2 HTTP/1.1" 200 3316 "-" "-" 6947 +188.138.60.101 - - [17/May/2015:08:05:49 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28288 +80.91.33.133 - - [17/May/2015:08:05:14 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 23182 +46.4.66.76 - - [17/May/2015:08:05:45 +0000] "GET /downloads/product_1 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 16302 +93.180.71.3 - - [17/May/2015:08:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 16102 +91.234.194.89 - - [17/May/2015:08:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20268 +80.91.33.133 - - [17/May/2015:08:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 2794 +37.26.93.214 - - [17/May/2015:08:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 319 "-" "Go 1.1 package http" 22809 +188.138.60.101 - - [17/May/2015:08:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8807 +93.180.71.3 - - [17/May/2015:08:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 30172 +46.4.66.76 - - [17/May/2015:08:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 1973 +62.75.198.179 - - [17/May/2015:08:05:06 +0000] "GET /downloads/product_2 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10182 +80.91.33.133 - - [17/May/2015:08:05:55 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 14307 +173.203.139.108 - - [17/May/2015:08:05:53 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10828 +210.245.80.75 - - [17/May/2015:08:05:32 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 21956 +46.4.83.163 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5726 +91.234.194.89 - - [17/May/2015:08:05:18 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10841 +31.22.86.126 - - [17/May/2015:08:05:24 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 18132 +217.168.17.5 - - [17/May/2015:08:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 200 3301 "-" "-" 10094 +80.91.33.133 - - [17/May/2015:08:05:50 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 12355 +173.203.139.108 - - [17/May/2015:08:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27325 +80.91.33.133 - - [17/May/2015:08:05:35 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 14101 +5.83.131.103 - - [17/May/2015:08:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20175 +80.91.33.133 - - [17/May/2015:08:05:59 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 21384 +200.6.73.40 - - [17/May/2015:08:05:42 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6570 +80.91.33.133 - - [17/May/2015:08:05:48 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 26145 +93.180.71.3 - - [17/May/2015:08:05:58 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 32705 +62.75.198.179 - - [17/May/2015:08:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18865 +50.57.209.92 - - [17/May/2015:08:05:41 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21639 +188.138.60.101 - - [17/May/2015:08:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31242 +46.4.66.76 - - [17/May/2015:08:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 5910 +50.57.209.92 - - [17/May/2015:08:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22900 +91.239.186.133 - - [17/May/2015:08:05:04 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23919 +173.203.139.108 - - [17/May/2015:08:05:08 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 25169 +80.91.33.133 - - [17/May/2015:08:05:04 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24395 +93.190.71.150 - - [17/May/2015:08:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 25750 +91.234.194.89 - - [17/May/2015:08:05:57 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 26673 +46.4.83.163 - - [17/May/2015:08:05:20 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32509 +173.203.139.108 - - [17/May/2015:08:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 404 335 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32714 +54.187.216.43 - - [17/May/2015:08:05:07 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 5016 +50.57.209.92 - - [17/May/2015:08:05:59 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14449 +80.91.33.133 - - [17/May/2015:08:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13183 +173.203.139.108 - - [17/May/2015:08:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 332 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 7791 +5.83.131.103 - - [17/May/2015:08:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 586 +173.203.139.108 - - [17/May/2015:08:05:14 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5036 +80.91.33.133 - - [17/May/2015:08:05:46 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20358 +50.57.209.92 - - [17/May/2015:08:05:01 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2106 +80.91.33.133 - - [17/May/2015:08:05:41 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9757 +37.26.93.214 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_2 HTTP/1.1" 200 3318 "-" "Go 1.1 package http" 6222 +23.23.226.37 - - [17/May/2015:08:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 200 2578 "-" "urlgrabber/3.9.1 yum/3.4.3" 9523 +93.180.71.3 - - [17/May/2015:08:05:20 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 7228 +173.203.139.108 - - [17/May/2015:08:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31464 +62.75.198.179 - - [17/May/2015:08:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 404 346 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22462 +31.22.86.126 - - [17/May/2015:08:05:10 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 29906 +50.57.209.92 - - [17/May/2015:08:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16217 +91.239.186.133 - - [17/May/2015:08:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18335 +46.4.66.76 - - [17/May/2015:08:05:00 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 27375 +200.6.73.40 - - [17/May/2015:08:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32073 +173.203.139.108 - - [17/May/2015:08:05:13 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31071 +93.190.71.150 - - [17/May/2015:08:05:35 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1200 +91.234.194.89 - - [17/May/2015:08:05:26 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13143 +173.203.139.108 - - [17/May/2015:08:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16138 +80.91.33.133 - - [17/May/2015:08:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 21432 +217.168.17.5 - - [17/May/2015:08:05:27 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 1419 +46.4.83.163 - - [17/May/2015:08:05:54 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28449 +80.91.33.133 - - [17/May/2015:08:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25906 +50.57.209.92 - - [17/May/2015:08:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27099 +173.203.139.108 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32238 +188.138.60.101 - - [17/May/2015:08:05:04 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 237 +80.91.33.133 - - [17/May/2015:08:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7103 +134.119.20.172 - - [17/May/2015:08:05:26 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5423 +173.203.139.108 - - [17/May/2015:08:05:29 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6373 +80.91.33.133 - - [17/May/2015:08:05:44 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 22230 +91.121.161.213 - - [17/May/2015:08:05:14 +0000] "GET /downloads/product_2 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14196 +80.91.33.133 - - [17/May/2015:08:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 17820 +80.91.33.133 - - [17/May/2015:08:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9097 +37.26.93.214 - - [17/May/2015:08:05:03 +0000] "GET /downloads/product_2 HTTP/1.1" 200 490 "-" "Go 1.1 package http" 27632 +5.83.131.103 - - [17/May/2015:08:05:57 +0000] "GET /downloads/product_1 HTTP/1.1" 404 346 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14609 +50.57.209.92 - - [17/May/2015:08:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21926 +173.203.139.108 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4915 +54.64.16.235 - - [17/May/2015:08:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 12816 +93.180.71.3 - - [17/May/2015:08:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 30742 +202.143.95.26 - - [17/May/2015:08:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24544 +202.143.95.26 - - [17/May/2015:08:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25819 +202.143.95.26 - - [17/May/2015:08:05:01 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 26831 +80.91.33.133 - - [17/May/2015:08:05:14 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 1344 +91.239.186.133 - - [17/May/2015:08:05:03 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4987 +173.203.139.108 - - [17/May/2015:08:05:35 +0000] "GET /downloads/product_1 HTTP/1.1" 404 328 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13419 +80.91.33.133 - - [17/May/2015:08:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12879 +87.233.156.242 - - [17/May/2015:08:05:37 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 20611 +62.75.198.179 - - [17/May/2015:08:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1387 +50.57.209.92 - - [17/May/2015:08:05:16 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31286 +80.91.33.133 - - [17/May/2015:08:05:53 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 15247 +93.190.71.150 - - [17/May/2015:08:05:34 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 134 +46.4.66.76 - - [17/May/2015:08:05:38 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 23909 +80.91.33.133 - - [17/May/2015:08:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 15771 +91.234.194.89 - - [17/May/2015:08:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4641 +217.168.17.5 - - [17/May/2015:08:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 6382 +46.4.83.163 - - [17/May/2015:08:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14599 +50.57.209.92 - - [17/May/2015:08:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 404 335 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8263 +200.6.73.40 - - [17/May/2015:08:05:46 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23514 +91.121.161.213 - - [17/May/2015:08:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29473 +80.91.33.133 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 26659 +188.138.60.101 - - [17/May/2015:08:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5147 +144.76.151.58 - - [17/May/2015:08:05:54 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21698 +134.119.20.172 - - [17/May/2015:09:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21077 +80.91.33.133 - - [17/May/2015:09:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 7173 +80.91.33.133 - - [17/May/2015:09:05:55 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 1878 +5.83.131.103 - - [17/May/2015:09:05:08 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 24451 +93.180.71.3 - - [17/May/2015:09:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 30170 +80.91.33.133 - - [17/May/2015:09:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13156 +50.57.209.92 - - [17/May/2015:09:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 404 332 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 306 +5.83.131.103 - - [17/May/2015:09:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 345 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 24862 +62.75.167.106 - - [17/May/2015:09:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10227 +37.26.93.214 - - [17/May/2015:09:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Go 1.1 package http" 28504 +93.64.134.186 - - [17/May/2015:09:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27681 +87.233.156.242 - - [17/May/2015:09:05:36 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 1502 +80.91.33.133 - - [17/May/2015:09:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 18177 +80.91.33.133 - - [17/May/2015:09:05:15 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7934 +54.193.30.212 - - [17/May/2015:09:05:23 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 2 +62.75.198.179 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23920 +91.239.186.133 - - [17/May/2015:09:05:46 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9333 +83.161.14.106 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 19640 +80.91.33.133 - - [17/May/2015:09:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 11061 +80.91.33.133 - - [17/May/2015:09:05:46 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 24501 +93.190.71.150 - - [17/May/2015:09:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 15895 +50.57.209.92 - - [17/May/2015:09:05:40 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20558 +80.91.33.133 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 2338 +80.91.33.133 - - [17/May/2015:09:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 12192 +217.168.17.5 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 9824 +80.91.33.133 - - [17/May/2015:09:05:59 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 2246 +54.191.136.177 - - [17/May/2015:09:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 7239 +80.91.33.133 - - [17/May/2015:09:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 21154 +91.234.194.89 - - [17/May/2015:09:05:57 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2966 +80.91.33.133 - - [17/May/2015:09:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 10715 +80.91.33.133 - - [17/May/2015:09:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 14856 +46.4.83.163 - - [17/May/2015:09:05:12 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17717 +91.121.161.213 - - [17/May/2015:09:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 404 346 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9951 +188.138.60.101 - - [17/May/2015:09:05:57 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 25787 +144.76.151.58 - - [17/May/2015:09:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4930 +195.154.77.170 - - [17/May/2015:09:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21921 +50.57.209.92 - - [17/May/2015:09:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29773 +31.22.86.126 - - [17/May/2015:09:05:41 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7593 +54.64.16.235 - - [17/May/2015:09:05:51 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 26867 +202.143.95.26 - - [17/May/2015:09:05:20 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 31361 +202.143.95.26 - - [17/May/2015:09:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13167 +87.233.156.242 - - [17/May/2015:09:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 22554 +62.75.167.106 - - [17/May/2015:09:05:37 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29795 +152.90.220.17 - - [17/May/2015:09:05:01 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18753 +80.91.33.133 - - [17/May/2015:09:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 27083 +93.180.71.3 - - [17/May/2015:09:05:38 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 28187 +80.91.33.133 - - [17/May/2015:09:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25595 +5.83.131.103 - - [17/May/2015:09:05:15 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 26070 +5.83.131.103 - - [17/May/2015:09:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 27724 +200.6.73.40 - - [17/May/2015:09:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8086 +46.4.88.134 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 4853 +50.57.209.92 - - [17/May/2015:09:05:34 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9464 +93.64.134.186 - - [17/May/2015:09:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 12194 +80.91.33.133 - - [17/May/2015:09:05:50 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 26621 +62.75.198.180 - - [17/May/2015:09:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29857 +80.91.33.133 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 20514 +80.91.33.133 - - [17/May/2015:09:05:36 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 5526 +62.75.198.179 - - [17/May/2015:09:05:46 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14143 +80.91.33.133 - - [17/May/2015:09:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 20873 +91.239.186.133 - - [17/May/2015:09:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23230 +80.91.33.133 - - [17/May/2015:09:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25246 +83.161.14.106 - - [17/May/2015:09:05:45 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 19052 +80.91.33.133 - - [17/May/2015:09:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12362 +195.154.77.170 - - [17/May/2015:09:05:35 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10153 +93.190.71.150 - - [17/May/2015:09:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22418 +80.91.33.133 - - [17/May/2015:09:05:43 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 6565 +80.91.33.133 - - [17/May/2015:09:05:44 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 9883 +144.76.160.62 - - [17/May/2015:09:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 2564 +91.121.161.213 - - [17/May/2015:09:05:34 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17140 +46.4.83.163 - - [17/May/2015:09:05:10 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22794 +91.234.194.89 - - [17/May/2015:09:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17718 +50.57.209.92 - - [17/May/2015:09:05:40 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5434 +188.138.60.101 - - [17/May/2015:09:05:41 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 573 +210.245.80.75 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 28482 +144.76.151.58 - - [17/May/2015:09:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31161 +80.91.33.133 - - [17/May/2015:09:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24151 +144.76.117.56 - - [17/May/2015:09:05:59 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 6185 +80.91.33.133 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 6276 +31.22.86.126 - - [17/May/2015:09:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 27127 +80.91.33.133 - - [17/May/2015:09:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 9549 +62.75.167.106 - - [17/May/2015:09:05:03 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21397 +87.233.156.242 - - [17/May/2015:09:05:17 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 10781 +152.90.220.18 - - [17/May/2015:09:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 19773 +93.180.71.3 - - [17/May/2015:09:05:01 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 11889 +80.91.33.133 - - [17/May/2015:09:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14111 +31.22.86.126 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 319 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 17787 +50.57.209.92 - - [17/May/2015:09:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18330 +5.83.131.103 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 8993 +46.4.88.134 - - [17/May/2015:09:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 17460 +80.91.33.133 - - [17/May/2015:09:05:06 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 32412 +80.91.33.133 - - [17/May/2015:09:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12639 +62.75.198.180 - - [17/May/2015:09:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32511 +80.91.33.133 - - [17/May/2015:09:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 29012 +80.91.33.133 - - [17/May/2015:09:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9767 +5.83.131.103 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 12212 +5.83.131.103 - - [17/May/2015:09:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 2440 +5.83.131.103 - - [17/May/2015:09:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 8157 +195.154.77.170 - - [17/May/2015:09:05:23 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16242 +202.143.95.26 - - [17/May/2015:09:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 22261 +93.64.134.186 - - [17/May/2015:09:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 15048 +85.214.47.178 - - [17/May/2015:09:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27105 +83.161.14.106 - - [17/May/2015:09:05:15 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 32234 +80.70.214.71 - - [17/May/2015:09:05:20 +0000] "HEAD /downloads/product_1 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 8479 +87.233.156.242 - - [17/May/2015:09:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 20831 +54.64.16.235 - - [17/May/2015:09:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 18289 +50.57.209.92 - - [17/May/2015:09:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9858 +91.239.186.133 - - [17/May/2015:09:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20442 +91.121.161.213 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9004 +200.6.73.40 - - [17/May/2015:09:05:30 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13221 +62.75.198.179 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 954 +93.190.71.150 - - [17/May/2015:09:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 26398 +80.91.33.133 - - [17/May/2015:09:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 22775 +80.91.33.133 - - [17/May/2015:09:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13886 +80.91.33.133 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 19340 +144.76.160.62 - - [17/May/2015:09:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 17157 +80.91.33.133 - - [17/May/2015:09:05:59 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9971 +217.168.17.5 - - [17/May/2015:09:05:12 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 26268 +80.91.33.133 - - [17/May/2015:09:05:47 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 5983 +80.91.33.133 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 15296 +144.76.117.56 - - [17/May/2015:09:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 13922 +144.76.151.58 - - [17/May/2015:09:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10692 +80.91.33.133 - - [17/May/2015:10:05:40 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 22550 +62.75.167.106 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20757 +80.91.33.133 - - [17/May/2015:10:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25956 +37.187.238.39 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 16674 +80.70.214.71 - - [17/May/2015:10:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 404 327 "-" "Wget/1.13.4 (linux-gnu)" 15327 +91.234.194.89 - - [17/May/2015:10:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21807 +80.91.33.133 - - [17/May/2015:10:05:10 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 20469 +188.138.60.101 - - [17/May/2015:10:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10122 +80.91.33.133 - - [17/May/2015:10:05:01 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 1971 +80.91.33.133 - - [17/May/2015:10:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7263 +93.180.71.3 - - [17/May/2015:10:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 953 +46.4.88.134 - - [17/May/2015:10:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 23703 +80.91.33.133 - - [17/May/2015:10:05:53 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 126 +62.210.138.59 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 19171 +31.22.86.126 - - [17/May/2015:10:05:38 +0000] "GET /downloads/product_1 HTTP/1.1" 404 335 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 31107 +80.91.33.133 - - [17/May/2015:10:05:16 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 8252 +54.86.157.236 - - [17/May/2015:10:05:24 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 25651 +195.154.233.202 - - [17/May/2015:10:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 3446 +54.86.157.236 - - [17/May/2015:10:05:43 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 20770 +80.91.33.133 - - [17/May/2015:10:05:14 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 27979 +94.23.21.169 - - [17/May/2015:10:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28723 +54.86.157.236 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 13439 +195.154.77.170 - - [17/May/2015:10:05:17 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22432 +54.86.157.236 - - [17/May/2015:10:05:36 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 1572 +85.214.47.178 - - [17/May/2015:10:05:57 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27196 +5.83.131.103 - - [17/May/2015:10:05:55 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 9637 +5.83.131.103 - - [17/May/2015:10:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 18830 +5.83.131.103 - - [17/May/2015:10:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 844 +5.83.131.103 - - [17/May/2015:10:05:08 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20882 +80.91.33.133 - - [17/May/2015:10:05:40 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 1325 +80.91.33.133 - - [17/May/2015:10:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 11125 +84.53.65.28 - - [17/May/2015:10:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10771 +80.91.33.133 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24891 +54.86.157.236 - - [17/May/2015:10:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 23541 +217.168.17.5 - - [17/May/2015:10:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 22323 +91.121.161.213 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29114 +80.70.214.71 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 404 329 "-" "Wget/1.13.4 (linux-gnu)" 13629 +144.76.160.62 - - [17/May/2015:10:05:10 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 32440 +54.86.157.236 - - [17/May/2015:10:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 20402 +93.64.134.186 - - [17/May/2015:10:05:54 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5113 +93.190.71.150 - - [17/May/2015:10:05:41 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31729 +87.233.156.242 - - [17/May/2015:10:05:02 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 28958 +80.91.33.133 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 15630 +91.239.186.133 - - [17/May/2015:10:05:50 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 7488 +62.75.198.179 - - [17/May/2015:10:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9316 +144.76.117.56 - - [17/May/2015:10:05:46 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 9965 +178.32.54.253 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2881 +37.187.238.39 - - [17/May/2015:10:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 17544 +83.161.14.106 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 11419 +54.86.157.236 - - [17/May/2015:10:05:48 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 16406 +91.194.188.90 - - [17/May/2015:10:05:51 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 28324 +83.161.14.106 - - [17/May/2015:10:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 1893 +80.91.33.133 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14697 +93.180.71.3 - - [17/May/2015:10:05:34 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 16168 +62.210.138.59 - - [17/May/2015:10:05:40 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 663 +46.4.88.134 - - [17/May/2015:10:05:16 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 27962 +202.143.95.26 - - [17/May/2015:10:05:50 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 18539 +202.143.95.26 - - [17/May/2015:10:05:02 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13495 +202.143.95.26 - - [17/May/2015:10:05:10 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 3192 +62.75.198.180 - - [17/May/2015:10:05:36 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4349 +144.76.137.134 - - [17/May/2015:10:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1395 +80.91.33.133 - - [17/May/2015:10:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12898 +54.86.157.236 - - [17/May/2015:10:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 26930 +80.70.214.71 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 326 "-" "Wget/1.13.4 (linux-gnu)" 16662 +91.234.194.89 - - [17/May/2015:10:05:06 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9445 +188.138.60.101 - - [17/May/2015:10:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18804 +80.91.33.133 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 22429 +195.154.233.202 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 8456 +94.23.21.169 - - [17/May/2015:10:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32187 +144.76.151.58 - - [17/May/2015:10:05:10 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29276 +80.91.33.133 - - [17/May/2015:10:05:42 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 9700 +62.75.167.106 - - [17/May/2015:10:05:31 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10078 +80.91.33.133 - - [17/May/2015:10:05:41 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7600 +50.57.209.92 - - [17/May/2015:10:05:16 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8540 +202.143.95.26 - - [17/May/2015:10:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24400 +200.6.73.40 - - [17/May/2015:10:05:38 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29363 +195.154.77.170 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17025 +54.187.216.43 - - [17/May/2015:10:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 27997 +80.91.33.133 - - [17/May/2015:10:05:04 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 1806 +80.91.33.133 - - [17/May/2015:10:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 28234 +54.86.157.236 - - [17/May/2015:10:05:06 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 19286 +202.143.95.26 - - [17/May/2015:10:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 404 325 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 19522 +202.143.95.26 - - [17/May/2015:10:05:40 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 23841 +54.86.157.236 - - [17/May/2015:10:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 31135 +80.91.33.133 - - [17/May/2015:10:05:50 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 21510 +80.91.33.133 - - [17/May/2015:10:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 26977 +80.91.33.133 - - [17/May/2015:10:05:55 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 1078 +80.91.33.133 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7473 +84.53.65.28 - - [17/May/2015:10:05:30 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28347 +92.50.100.22 - - [17/May/2015:10:05:15 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8699 +85.214.47.178 - - [17/May/2015:10:05:30 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2078 +80.91.33.133 - - [17/May/2015:10:05:08 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7013 +54.86.157.236 - - [17/May/2015:10:05:36 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 29440 +5.83.131.103 - - [17/May/2015:10:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 24206 +37.187.238.39 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 5674 +80.91.33.133 - - [17/May/2015:10:05:04 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 15781 +195.210.47.239 - - [17/May/2015:10:05:49 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 1462 +80.91.33.133 - - [17/May/2015:10:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9446 +54.64.16.235 - - [17/May/2015:10:05:12 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 23687 +178.32.54.253 - - [17/May/2015:10:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17314 +144.92.16.161 - - [17/May/2015:10:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 4021 +54.86.157.236 - - [17/May/2015:10:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 13168 +87.233.156.242 - - [17/May/2015:10:05:49 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 8142 +31.22.86.126 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 332 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 28923 +80.91.33.133 - - [17/May/2015:10:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 17021 +91.121.161.213 - - [17/May/2015:10:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 711 +80.91.33.133 - - [17/May/2015:10:05:06 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 15815 +50.57.209.92 - - [17/May/2015:10:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 12290 +91.239.186.133 - - [17/May/2015:10:05:15 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9172 +144.76.117.56 - - [17/May/2015:10:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 27106 +144.76.160.62 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 2607 +62.210.138.59 - - [17/May/2015:10:05:45 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 26922 +54.86.157.236 - - [17/May/2015:10:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 2045 +62.75.198.179 - - [17/May/2015:10:05:14 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14090 +93.190.71.150 - - [17/May/2015:10:05:07 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2233 +144.76.117.56 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14988 +94.23.21.169 - - [17/May/2015:10:05:23 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 11645 +91.194.188.90 - - [17/May/2015:10:05:05 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 28064 +93.64.134.186 - - [17/May/2015:10:05:51 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16583 +54.86.157.236 - - [17/May/2015:10:05:48 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 23208 +80.70.214.71 - - [17/May/2015:10:05:23 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 1059 +93.180.71.3 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 16367 +195.154.233.202 - - [17/May/2015:10:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 26788 +193.192.58.163 - - [17/May/2015:11:05:31 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6753 +144.76.137.134 - - [17/May/2015:11:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18307 +54.86.157.236 - - [17/May/2015:11:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 10520 +83.161.14.106 - - [17/May/2015:11:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 5640 +144.76.151.58 - - [17/May/2015:11:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9992 +144.92.16.161 - - [17/May/2015:11:05:06 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 3262 +195.154.77.170 - - [17/May/2015:11:05:20 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17687 +62.75.198.180 - - [17/May/2015:11:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18911 +91.234.194.89 - - [17/May/2015:11:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22038 +80.91.33.133 - - [17/May/2015:11:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 2238 +188.138.60.101 - - [17/May/2015:11:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10581 +62.75.167.106 - - [17/May/2015:11:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14869 +46.4.88.134 - - [17/May/2015:11:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 6669 +80.91.33.133 - - [17/May/2015:11:05:35 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12780 +80.91.33.133 - - [17/May/2015:11:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24133 +84.53.65.28 - - [17/May/2015:11:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14350 +152.90.220.17 - - [17/May/2015:11:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23513 +80.91.33.133 - - [17/May/2015:11:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 31695 +80.91.33.133 - - [17/May/2015:11:05:21 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12243 +178.32.54.253 - - [17/May/2015:11:05:44 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2641 +54.72.39.202 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 27639 +91.120.61.154 - - [17/May/2015:11:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21180 +37.187.238.39 - - [17/May/2015:11:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 30661 +85.214.47.178 - - [17/May/2015:11:05:12 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20380 +80.91.33.133 - - [17/May/2015:11:05:47 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 11957 +5.83.131.103 - - [17/May/2015:11:05:10 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 19230 +200.6.73.40 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4087 +5.83.131.103 - - [17/May/2015:11:05:45 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 16383 +91.121.161.213 - - [17/May/2015:11:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 11487 +91.239.186.133 - - [17/May/2015:11:05:40 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 11774 +50.57.209.92 - - [17/May/2015:11:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28472 +80.91.33.133 - - [17/May/2015:11:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24011 +144.92.16.161 - - [17/May/2015:11:05:44 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 26633 +87.233.156.242 - - [17/May/2015:11:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 16170 +94.23.21.169 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 15992 +5.83.131.103 - - [17/May/2015:11:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20999 +80.91.33.133 - - [17/May/2015:11:05:40 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 23097 +202.143.95.26 - - [17/May/2015:11:05:30 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 3282 +202.143.95.26 - - [17/May/2015:11:05:44 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 4869 +80.91.33.133 - - [17/May/2015:11:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 9310 +80.91.33.133 - - [17/May/2015:11:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 23547 +80.91.33.133 - - [17/May/2015:11:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 5516 +80.91.33.133 - - [17/May/2015:11:05:13 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 26601 +62.210.138.59 - - [17/May/2015:11:05:23 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 26830 +144.76.160.62 - - [17/May/2015:11:05:06 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 15405 +93.190.71.150 - - [17/May/2015:11:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16982 +80.91.33.133 - - [17/May/2015:11:05:00 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 6019 +202.143.95.26 - - [17/May/2015:11:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 3822 +193.192.58.163 - - [17/May/2015:11:05:54 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13461 +195.154.233.202 - - [17/May/2015:11:05:46 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 32439 +80.70.214.71 - - [17/May/2015:11:05:59 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 31402 +62.75.198.179 - - [17/May/2015:11:05:17 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 452 +80.91.33.133 - - [17/May/2015:11:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25508 +144.92.16.161 - - [17/May/2015:11:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 29252 +195.154.77.170 - - [17/May/2015:11:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 19649 +50.57.209.92 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 24457 +144.76.117.56 - - [17/May/2015:11:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 10519 +80.91.33.133 - - [17/May/2015:11:05:36 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 6815 +144.76.137.134 - - [17/May/2015:11:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 798 +188.138.60.101 - - [17/May/2015:11:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 19441 +54.172.198.124 - - [17/May/2015:11:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 200 2582 "-" "urlgrabber/3.9.1 yum/3.4.3" 17903 +37.187.238.39 - - [17/May/2015:11:05:27 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 3443 +178.32.54.253 - - [17/May/2015:11:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9634 +62.75.198.180 - - [17/May/2015:11:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5417 +62.75.167.106 - - [17/May/2015:11:05:26 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1055 +195.210.47.239 - - [17/May/2015:11:05:36 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 4218 +91.234.194.89 - - [17/May/2015:11:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23355 +31.22.86.126 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 29547 +91.194.188.90 - - [17/May/2015:11:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Wget/1.13.4 (linux-gnu)" 26988 +92.50.100.22 - - [17/May/2015:11:05:35 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13600 +144.76.151.58 - - [17/May/2015:11:05:45 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18988 +93.64.134.186 - - [17/May/2015:11:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2281 +85.214.47.178 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16054 +94.23.21.169 - - [17/May/2015:11:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21647 +80.91.33.133 - - [17/May/2015:11:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 31277 +80.91.33.133 - - [17/May/2015:11:05:20 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 19500 +91.121.161.213 - - [17/May/2015:11:05:03 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29579 +83.161.14.106 - - [17/May/2015:11:05:52 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 1080 +54.64.16.235 - - [17/May/2015:11:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 15057 +84.53.65.28 - - [17/May/2015:11:05:31 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5805 +80.91.33.133 - - [17/May/2015:11:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 32764 +50.57.209.92 - - [17/May/2015:11:05:15 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28248 +91.239.186.133 - - [17/May/2015:11:05:17 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32046 +144.92.16.161 - - [17/May/2015:11:05:30 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 31342 +62.210.138.59 - - [17/May/2015:11:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 22861 +210.245.80.75 - - [17/May/2015:11:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 32649 +80.91.33.133 - - [17/May/2015:11:05:12 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 11268 +83.161.14.106 - - [17/May/2015:11:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 8233 +87.233.156.242 - - [17/May/2015:11:05:02 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 10052 +5.83.131.103 - - [17/May/2015:11:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20084 +80.91.33.133 - - [17/May/2015:11:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9007 +91.120.61.154 - - [17/May/2015:11:05:48 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8410 +195.154.233.202 - - [17/May/2015:11:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 20582 +80.91.33.133 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 8327 +193.192.58.163 - - [17/May/2015:11:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4041 +93.190.71.150 - - [17/May/2015:11:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 26973 +144.76.160.62 - - [17/May/2015:11:05:20 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 24342 +50.57.209.92 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27744 +62.75.198.179 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2455 +193.192.59.41 - - [17/May/2015:11:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 19596 +195.154.77.170 - - [17/May/2015:11:05:35 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23424 +80.91.33.133 - - [17/May/2015:11:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 4171 +200.6.73.40 - - [17/May/2015:11:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8274 +188.138.60.101 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2949 +80.91.33.133 - - [17/May/2015:11:05:53 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 5641 +80.91.33.133 - - [17/May/2015:11:05:42 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 28746 +80.91.33.133 - - [17/May/2015:11:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 18396 +80.91.33.133 - - [17/May/2015:11:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 17638 +80.91.33.133 - - [17/May/2015:11:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 7865 +144.76.137.134 - - [17/May/2015:11:05:57 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4280 +80.70.214.71 - - [17/May/2015:11:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Wget/1.13.4 (linux-gnu)" 32436 +144.76.117.56 - - [17/May/2015:11:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 30048 +94.23.21.169 - - [17/May/2015:11:05:21 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6186 +198.61.216.151 - - [17/May/2015:11:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 21567 +80.91.33.133 - - [17/May/2015:11:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 674 +91.194.188.90 - - [17/May/2015:11:05:32 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 5354 +62.75.198.180 - - [17/May/2015:11:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5345 +80.91.33.133 - - [17/May/2015:11:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 2326 +31.22.86.126 - - [17/May/2015:12:05:15 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 3114 +84.53.65.28 - - [17/May/2015:12:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9036 +144.92.16.161 - - [17/May/2015:12:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 9410 +50.57.209.92 - - [17/May/2015:12:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2039 +5.83.131.103 - - [17/May/2015:12:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14852 +5.83.131.103 - - [17/May/2015:12:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 71 +62.75.167.106 - - [17/May/2015:12:05:01 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6439 +178.32.54.253 - - [17/May/2015:12:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8721 +91.121.161.213 - - [17/May/2015:12:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1795 +91.234.194.89 - - [17/May/2015:12:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8556 +37.187.238.39 - - [17/May/2015:12:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 17627 +91.239.186.133 - - [17/May/2015:12:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10970 +87.233.156.242 - - [17/May/2015:12:05:34 +0000] "GET /downloads/product_2 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 409 +202.143.95.26 - - [17/May/2015:12:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 10283 +144.76.151.58 - - [17/May/2015:12:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22461 +62.210.138.59 - - [17/May/2015:12:05:12 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 22736 +80.91.33.133 - - [17/May/2015:12:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 21014 +83.161.14.106 - - [17/May/2015:12:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 18047 +80.91.33.133 - - [17/May/2015:12:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25206 +5.83.131.103 - - [17/May/2015:12:05:21 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 15330 +80.91.33.133 - - [17/May/2015:12:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 8763 +198.61.216.151 - - [17/May/2015:12:05:59 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 11132 +195.154.77.170 - - [17/May/2015:12:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23768 From ac9bf5a0ec9d1039bc6f6ffb21906f1fabaffce4 Mon Sep 17 00:00:00 2001 From: Jarno Huuskonen Date: Tue, 22 Jun 2021 19:41:45 +0300 Subject: [PATCH 481/761] Fix x509_cert input plugin SNI support (#9289) --- plugins/inputs/x509_cert/x509_cert.go | 15 ++++++++++++--- plugins/inputs/x509_cert/x509_cert_test.go | 10 ++++++++++ 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 7c1b0657c7e80..fc81ebb717be1 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -66,8 +66,7 @@ func (c *X509Cert) SampleConfig() string { func (c *X509Cert) sourcesToURLs() error { for _, source := range c.Sources { if strings.HasPrefix(source, "file://") || - strings.HasPrefix(source, "/") || - strings.Index(source, ":\\") != 1 { + strings.HasPrefix(source, "/") { source = filepath.ToSlash(strings.TrimPrefix(source, "file://")) g, err := globpath.Compile(source) if err != nil { @@ -82,7 +81,6 @@ func (c *X509Cert) sourcesToURLs() error { if err != nil { return fmt.Errorf("failed to parse cert location - %s", err.Error()) } - c.locations = append(c.locations, u) } } @@ -127,6 +125,9 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica conn := tls.Client(ipConn, c.tlsCfg) defer conn.Close() + // reset SNI between requests + defer func() { c.tlsCfg.ServerName = "" }() + hsErr := conn.Handshake() if hsErr != nil { return nil, hsErr @@ -313,6 +314,14 @@ func (c *X509Cert) Init() error { tlsCfg = &tls.Config{} } + if tlsCfg.ServerName != "" && c.ServerName == "" { + // Save SNI from tlsCfg.ServerName to c.ServerName and reset tlsCfg.ServerName. + // We need to reset c.tlsCfg.ServerName for each certificate when there's + // no explicit SNI (c.tlsCfg.ServerName or c.ServerName) otherwise we'll always (re)use + // first uri HostName for all certs (see issue 8914) + c.ServerName = tlsCfg.ServerName + tlsCfg.ServerName = "" + } c.tlsCfg = tlsCfg return nil diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 3253c9ac9c7ae..4f09b903b4c24 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -316,6 +316,16 @@ func TestGatherCertMustNotTimeout(t *testing.T) { assert.True(t, acc.HasMeasurement("x509_cert")) } +func TestSourcesToURLs(t *testing.T) { + m := &X509Cert{ + Sources: []string{"https://www.influxdata.com:443", "tcp://influxdata.com:443", "file:///dummy_test_path_file.pem", "/tmp/dummy_test_path_glob*.pem"}, + } + require.NoError(t, m.Init()) + + assert.Equal(t, len(m.globpaths), 2) + assert.Equal(t, len(m.locations), 2) +} + func TestServerName(t *testing.T) { tests := []struct { name string From 8638a417246b0ae34f84ad5ae714ccb813a3a691 Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 22 Jun 2021 10:48:29 -0600 Subject: [PATCH 482/761] tags no longer required in included_keys (#9406) --- plugins/parsers/json_v2/parser.go | 2 ++ .../json_v2/testdata/nested_and_nonnested_tags/telegraf.conf | 2 -- plugins/parsers/json_v2/testdata/object/telegraf.conf | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index e586b35ebddc2..d013f6b35e24f 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -484,6 +484,8 @@ func (p *Parser) isIncluded(key string, val gjson.Result) bool { if len(p.currentSettings.IncludedKeys) == 0 { return true } + // automatically adds tags to included_keys so it does NOT have to be repeated in the config + p.currentSettings.IncludedKeys = append(p.currentSettings.IncludedKeys, p.currentSettings.Tags...) for _, i := range p.currentSettings.IncludedKeys { if i == key { return true diff --git a/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/telegraf.conf b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/telegraf.conf index 45692dc5df0e2..e1748b463690b 100644 --- a/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/telegraf.conf +++ b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/telegraf.conf @@ -9,10 +9,8 @@ data_format = "json_v2" disable_prepend_keys = true path = "@this" included_keys = [ - "hostname", "systemVoltage", "systemCurrent", "tables", - "tables_outputname", ] tags = ["hostname", "tables_outputname"] diff --git a/plugins/parsers/json_v2/testdata/object/telegraf.conf b/plugins/parsers/json_v2/testdata/object/telegraf.conf index 6ad244fd71418..50ed245a3cf00 100644 --- a/plugins/parsers/json_v2/testdata/object/telegraf.conf +++ b/plugins/parsers/json_v2/testdata/object/telegraf.conf @@ -6,7 +6,7 @@ [[inputs.file.json_v2.object]] path = "root.station" disable_prepend_keys = true - included_keys = ["name", "etd_destination", "etd_estimate_minutes"] + included_keys = ["etd_estimate_minutes"] tags = ["name", "etd_destination"] [inputs.file.json_v2.object.fields] etd_estimate_minutes = "int" From 812fbd6791c76cbc57ee9c266ef50be47bf450d6 Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 22 Jun 2021 10:54:33 -0600 Subject: [PATCH 483/761] Update signalfx to v3.3.0->v3.3.34 (#9375) --- go.mod | 2 +- go.sum | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 676bebd58e7a3..500e465417f29 100644 --- a/go.mod +++ b/go.mod @@ -110,7 +110,7 @@ require ( github.com/sensu/sensu-go/api/core/v2 v2.6.0 github.com/shirou/gopsutil v3.21.3+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect - github.com/signalfx/golib/v3 v3.3.0 + github.com/signalfx/golib/v3 v3.3.34 github.com/sirupsen/logrus v1.7.0 github.com/sleepinggenius2/gosmi v0.4.3 github.com/snowflakedb/gosnowflake v1.5.0 diff --git a/go.sum b/go.sum index 6470644bf5567..58314d8eba7d1 100644 --- a/go.sum +++ b/go.sum @@ -1348,12 +1348,12 @@ github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/signalfx/com_signalfx_metrics_protobuf v0.0.0-20190222193949-1fb69526e884 h1:KgLGEw137KEUtQnWBGzneCetphBj4+kKHRnhpAkXJC0= -github.com/signalfx/com_signalfx_metrics_protobuf v0.0.0-20190222193949-1fb69526e884/go.mod h1:muYA2clvwCdj7nzAJ5vJIXYpJsUumhAl4Uu1wUNpWzA= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 h1:X886QgwZH5qr9HIQkk3mWcNEhUxx6D8rUZumzLV4Wiw= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2/go.mod h1:tCQQqyJAVF1+mxNdqOi18sS/zaSrE6EMyWwRA2QTl70= github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 h1:WsShHmu12ZztYPfh9b+I+VjYD1o8iOHhB67WZCMEEE8= github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083/go.mod h1:adPDS6s7WaajdFBV9mQ7i0dKfQ8xiDnF9ZNETVPpp7c= -github.com/signalfx/golib/v3 v3.3.0 h1:vSXsAb73bdrlnjk5rnZ7y3t09Qzu9qfBEbXdcyBHsmE= -github.com/signalfx/golib/v3 v3.3.0/go.mod h1:GzjWpV0skAXZn7+u9LnkOkiXAx9KKd5XZcd5r+RoF5o= +github.com/signalfx/golib/v3 v3.3.34 h1:s78S24+exS0jH21oeSB1qPeiekIKkeXGv0hg7f67HvU= +github.com/signalfx/golib/v3 v3.3.34/go.mod h1:PB7OovVijH7OGhzMewarEcIZG3eG6akWMDucIb5Jnb4= github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8KfCSyuwy/VILnROdgCvbQLA5ch0nkbG7lKT0BXw= github.com/signalfx/sapm-proto v0.4.0 h1:5lQX++6FeIjUZEIcnSgBqhOpmSjMkRBW3y/4ZiKMo5E= github.com/signalfx/sapm-proto v0.4.0/go.mod h1:x3gtwJ1GRejtkghB4nYpwixh2zqJrLbPU959ZNhM0Fk= @@ -1504,6 +1504,7 @@ github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPS github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= From d63a7010d965b0ecf16424b4f4b67beb7df167ac Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 22 Jun 2021 20:45:03 +0200 Subject: [PATCH 484/761] Fix messing up the 'source' tag for https sources. (#9400) --- plugins/inputs/x509_cert/x509_cert.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index fc81ebb717be1..4ac115931a26a 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -102,14 +102,15 @@ func (c *X509Cert) serverName(u *url.URL) (string, error) { } func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certificate, error) { + protocol := u.Scheme switch u.Scheme { case "https": - u.Scheme = "tcp" + protocol = "tcp" fallthrough case "udp", "udp4", "udp6": fallthrough case "tcp", "tcp4", "tcp6": - ipConn, err := net.DialTimeout(u.Scheme, u.Host, timeout) + ipConn, err := net.DialTimeout(protocol, u.Host, timeout) if err != nil { return nil, err } From 84a37642d53edd9c598ef326655070da5a25b160 Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 22 Jun 2021 14:56:29 -0600 Subject: [PATCH 485/761] fixing insecure_skip_verify (#9413) --- plugins/outputs/http/http.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 5da273f2d40a6..76d97aa9040bc 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -13,7 +13,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" httpconfig "github.com/influxdata/telegraf/plugins/common/http" - "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) @@ -83,7 +82,6 @@ type HTTP struct { Password string `toml:"password"` Headers map[string]string `toml:"headers"` ContentEncoding string `toml:"content_encoding"` - tls.ClientConfig httpconfig.HTTPClientConfig client *http.Client From b846c5069dc284104c80791058aa6240117be5bd Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 22 Jun 2021 14:59:02 -0600 Subject: [PATCH 486/761] added a check for oid and name to prevent empty metrics (#9366) --- plugins/inputs/snmp/snmp.go | 6 ++++++ plugins/inputs/snmp/snmp_test.go | 26 +++++++++++++++++++++----- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index ec881205c6f68..7f2df6b689eac 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -173,6 +173,12 @@ type Table struct { // Init() builds & initializes the nested fields. func (t *Table) Init() error { + //makes sure oid or name is set in config file + //otherwise snmp will produce metrics with an empty name + if t.Oid == "" && t.Name == "" { + return fmt.Errorf("SNMP table in config file is not named. One or both of the oid and name settings must be set") + } + if t.initialized { return nil } diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index ef849f07b138c..f447f13c54e67 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -199,11 +199,12 @@ func TestSnmpInit_noTranslate(t *testing.T) { {Oid: ".1.1.1.3"}, }, Tables: []Table{ - {Fields: []Field{ - {Oid: ".1.1.1.4", Name: "four", IsTag: true}, - {Oid: ".1.1.1.5", Name: "five"}, - {Oid: ".1.1.1.6"}, - }}, + {Name: "testing", + Fields: []Field{ + {Oid: ".1.1.1.4", Name: "four", IsTag: true}, + {Oid: ".1.1.1.5", Name: "five"}, + {Oid: ".1.1.1.6"}, + }}, }, } @@ -235,6 +236,21 @@ func TestSnmpInit_noTranslate(t *testing.T) { assert.Equal(t, false, s.Tables[0].Fields[2].IsTag) } +func TestSnmpInit_noName_noOid(t *testing.T) { + s := &Snmp{ + Tables: []Table{ + {Fields: []Field{ + {Oid: ".1.1.1.4", Name: "four", IsTag: true}, + {Oid: ".1.1.1.5", Name: "five"}, + {Oid: ".1.1.1.6"}, + }}, + }, + } + + err := s.init() + require.Error(t, err) +} + func TestGetSNMPConnection_v2(t *testing.T) { s := &Snmp{ Agents: []string{"1.2.3.4:567", "1.2.3.4", "udp://127.0.0.1"}, From 5be079c2723a44d19e9862f8491e5a497ff67d72 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 22 Jun 2021 16:40:40 -0500 Subject: [PATCH 487/761] Update couchbase dependencies to v0.1.0 (#9412) --- docs/LICENSE_OF_DEPENDENCIES.md | 2 +- go.mod | 6 +++--- go.sum | 12 ++++++------ 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 92965418ebe32..fad0787c02428 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -54,7 +54,7 @@ following works: - github.com/containerd/containerd [Apache License 2.0](https://github.com/containerd/containerd/blob/master/LICENSE) - github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) - github.com/couchbase/gomemcached [MIT License](https://github.com/couchbase/gomemcached/blob/master/LICENSE) -- github.com/couchbase/goutils [COUCHBASE INC. COMMUNITY EDITION LICENSE](https://github.com/couchbase/goutils/blob/master/LICENSE.md) +- github.com/couchbase/goutils [Apache License 2.0](https://github.com/couchbase/goutils/blob/master/LICENSE.md) - github.com/davecgh/go-spew [ISC License](https://github.com/davecgh/go-spew/blob/master/LICENSE) - github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt) - github.com/devigned/tab [MIT License](https://github.com/devigned/tab/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 500e465417f29..9ba768dba6281 100644 --- a/go.mod +++ b/go.mod @@ -38,9 +38,9 @@ require ( github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 github.com/caio/go-tdigest v3.1.0+incompatible github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 - github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 - github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect - github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect + github.com/couchbase/go-couchbase v0.1.0 + github.com/couchbase/gomemcached v0.1.3 // indirect + github.com/couchbase/goutils v0.1.0 // indirect github.com/denisenkom/go-mssqldb v0.10.0 github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 diff --git a/go.sum b/go.sum index 58314d8eba7d1..3ac23ce93db20 100644 --- a/go.sum +++ b/go.sum @@ -399,12 +399,12 @@ github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+ github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ= -github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= -github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 h1:F8nmbiuX+gCz9xvWMi6Ak8HQntB4ATFXP46gaxifbp4= -github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= -github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8= -github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/couchbase/go-couchbase v0.1.0 h1:g4bCvDwRL+ZL6HLhYeRlXxEYP31Wpy0VFxnFw6efEp8= +github.com/couchbase/go-couchbase v0.1.0/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A= +github.com/couchbase/gomemcached v0.1.3 h1:HIc5qMYNbuhB7zNaiEtj61DCYkquAwrQlf64q7JzdEY= +github.com/couchbase/gomemcached v0.1.3/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo= +github.com/couchbase/goutils v0.1.0 h1:0WLlKJilu7IBm98T8nS9+J36lBFVLRUSIUtyD/uWpAE= +github.com/couchbase/goutils v0.1.0/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= From 3c0f152a7d4d8e96e46aa4309a1d5433836cc607 Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 22 Jun 2021 15:45:43 -0600 Subject: [PATCH 488/761] updated jwt to v1.2.2 and updated jwt-go to v3.2.3 (#9373) --- go.mod | 2 ++ go.sum | 9 ++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9ba768dba6281..d729390765dc3 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,7 @@ require ( github.com/docker/docker v20.10.6+incompatible github.com/dynatrace-oss/dynatrace-metric-utils-go v0.1.0 github.com/eclipse/paho.mqtt.golang v1.3.0 + github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.5.0 github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c @@ -93,6 +94,7 @@ require ( github.com/moby/ipvs v1.0.1 github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect + github.com/nats-io/jwt v1.2.2 // indirect github.com/nats-io/nats-server/v2 v2.1.4 github.com/nats-io/nats.go v1.10.0 github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 diff --git a/go.sum b/go.sum index 3ac23ce93db20..34f6eec3c0f2d 100644 --- a/go.sum +++ b/go.sum @@ -506,8 +506,9 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -1124,8 +1125,9 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+ github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= github.com/nats-io/nats-server/v2 v2.1.4 h1:BILRnsJ2Yb/fefiFbBWADpViGF69uh4sxe8poVDQ06g= github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg= @@ -1134,8 +1136,9 @@ github.com/nats-io/nats.go v1.10.0 h1:L8qnKaofSfNFbXg0C5F71LdjPRnmQwSsA4ukmkt1Tv github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.4 h1:aEsHIssIk6ETN5m2/MD8Y4B2X7FfXrBAUdkyRvbVYzA= github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.2.0 h1:WXKF7diOaPU9cJdLD7nuzwasQy9vT1tBqzXZZf3AMJM= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= From 314bc0ff7f9d59a93b1f048662ac3400769fcb85 Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Wed, 23 Jun 2021 09:26:13 -0700 Subject: [PATCH 489/761] chore: readme updates (#9367) --- plugins/inputs/aliyuncms/README.md | 2 +- plugins/inputs/cisco_telemetry_mdt/README.md | 2 +- plugins/inputs/dpdk/README.md | 2 +- plugins/inputs/knx_listener/README.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/aliyuncms/README.md b/plugins/inputs/aliyuncms/README.md index c239baa63e05c..4e351ea6d8b37 100644 --- a/plugins/inputs/aliyuncms/README.md +++ b/plugins/inputs/aliyuncms/README.md @@ -1,4 +1,4 @@ -# Alibaba (aka Aliyun) CloudMonitor Service Statistics Input +# Alibaba (Aliyun) CloudMonitor Service Statistics Input Plugin Here and after we use `Aliyun` instead `Alibaba` as it is default naming across web console and docs. This plugin will pull Metric Statistics from Aliyun CMS. diff --git a/plugins/inputs/cisco_telemetry_mdt/README.md b/plugins/inputs/cisco_telemetry_mdt/README.md index d15f122081d05..f4ca7243b8cde 100644 --- a/plugins/inputs/cisco_telemetry_mdt/README.md +++ b/plugins/inputs/cisco_telemetry_mdt/README.md @@ -1,4 +1,4 @@ -# Cisco model-driven telemetry (MDT) +# Cisco Model-Driven Telemetry (MDT) Input Plugin Cisco model-driven telemetry (MDT) is an input plugin that consumes telemetry data from Cisco IOS XR, IOS XE and NX-OS platforms. It supports TCP & GRPC dialout transports. diff --git a/plugins/inputs/dpdk/README.md b/plugins/inputs/dpdk/README.md index bd98af050427d..00398760d2e9d 100644 --- a/plugins/inputs/dpdk/README.md +++ b/plugins/inputs/dpdk/README.md @@ -1,4 +1,4 @@ -# DPDK Input Plugin +# Data Plane Development Kit (DPDK) Input Plugin The `dpdk` plugin collects metrics exposed by applications built with [Data Plane Development Kit](https://www.dpdk.org/) which is an extensive set of open source libraries designed for accelerating packet processing workloads. diff --git a/plugins/inputs/knx_listener/README.md b/plugins/inputs/knx_listener/README.md index de015ddc2793b..7a06462ffbb3e 100644 --- a/plugins/inputs/knx_listener/README.md +++ b/plugins/inputs/knx_listener/README.md @@ -1,4 +1,4 @@ -# KNX input plugin +# KNX Input Plugin The KNX input plugin that listens for messages on the KNX home-automation bus. This plugin connects to the KNX bus via a KNX-IP interface. From f9fc64efd61e0e72c9c0742af8ede5e2db7cf84d Mon Sep 17 00:00:00 2001 From: Mya Date: Wed, 23 Jun 2021 16:08:28 -0600 Subject: [PATCH 490/761] apimachinary updated to v0.21.1 (#9370) --- go.mod | 4 ++-- go.sum | 19 ++++++++++++++----- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index d729390765dc3..0d21185c3d966 100644 --- a/go.mod +++ b/go.mod @@ -136,7 +136,7 @@ require ( golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/sys v0.0.0-20210324051608-47abb6519492 + golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 golang.org/x/text v0.3.4 golang.org/x/tools v0.1.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 @@ -152,7 +152,7 @@ require ( gopkg.in/yaml.v2 v2.4.0 gotest.tools v2.2.0+incompatible k8s.io/api v0.20.4 - k8s.io/apimachinery v0.20.4 + k8s.io/apimachinery v0.21.1 k8s.io/client-go v0.20.4 modernc.org/sqlite v1.10.8 ) diff --git a/go.sum b/go.sum index 34f6eec3c0f2d..cef47cfccb2c2 100644 --- a/go.sum +++ b/go.sum @@ -541,8 +541,9 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= @@ -1089,6 +1090,7 @@ github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= @@ -1662,6 +1664,7 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1779,10 +1782,12 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2065,8 +2070,9 @@ k8s.io/api v0.20.4 h1:xZjKidCirayzX6tHONRQyTNDVIR55TYVqgATqo6ZULY= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4 h1:vhxQ0PPUUU2Ns1b9r4/UFp13UPs8cw2iOoTjnY9faa0= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.21.1 h1:Q6XuHGlj2xc+hlMCvqyYfbv3H7SRGn2c8NycxJquDVs= +k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= @@ -2082,10 +2088,12 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -2127,8 +2135,9 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= From c6c3efdb9716ced6174b0d08cbd161d1b56d5794 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 24 Jun 2021 16:43:23 -0400 Subject: [PATCH 491/761] Don't stop parsing after statsd parsing error (#9423) --- plugins/inputs/statsd/statsd.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 1aded7f9f1894..4416a19f4624e 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -3,7 +3,6 @@ package statsd import ( "bufio" "bytes" - "errors" "fmt" "net" "sort" @@ -18,6 +17,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/selfstat" + "github.com/pkg/errors" ) const ( @@ -35,6 +35,8 @@ const ( parserGoRoutines = 5 ) +var errParsing = errors.New("error parsing statsd line") + // Statsd allows the importing of statsd and dogstatsd data. type Statsd struct { // Protocol used on listener - udp or tcp @@ -568,6 +570,10 @@ func (s *Statsd) parser() error { } default: if err := s.parseStatsdLine(line); err != nil { + if errors.Cause(err) == errParsing { + // parsing errors log when the error occurs + continue + } return err } } @@ -605,7 +611,7 @@ func (s *Statsd) parseStatsdLine(line string) error { bits := strings.Split(line, ":") if len(bits) < 2 { s.Log.Errorf("Splitting ':', unable to parse metric: %s", line) - return errors.New("error Parsing statsd line") + return errParsing } // Extract bucket name from individual metric bits @@ -621,7 +627,7 @@ func (s *Statsd) parseStatsdLine(line string) error { pipesplit := strings.Split(bit, "|") if len(pipesplit) < 2 { s.Log.Errorf("Splitting '|', unable to parse metric: %s", line) - return errors.New("error parsing statsd line") + return errParsing } else if len(pipesplit) > 2 { sr := pipesplit[2] @@ -645,14 +651,14 @@ func (s *Statsd) parseStatsdLine(line string) error { m.mtype = pipesplit[1] default: s.Log.Errorf("Metric type %q unsupported", pipesplit[1]) - return errors.New("error parsing statsd line") + return errParsing } // Parse the value if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") { if m.mtype != "g" && m.mtype != "c" { s.Log.Errorf("+- values are only supported for gauges & counters, unable to parse metric: %s", line) - return errors.New("error parsing statsd line") + return errParsing } m.additive = true } @@ -662,7 +668,7 @@ func (s *Statsd) parseStatsdLine(line string) error { v, err := strconv.ParseFloat(pipesplit[0], 64) if err != nil { s.Log.Errorf("Parsing value to float64, unable to parse metric: %s", line) - return errors.New("error parsing statsd line") + return errParsing } m.floatvalue = v case "c": @@ -672,7 +678,7 @@ func (s *Statsd) parseStatsdLine(line string) error { v2, err2 := strconv.ParseFloat(pipesplit[0], 64) if err2 != nil { s.Log.Errorf("Parsing value to int64, unable to parse metric: %s", line) - return errors.New("error parsing statsd line") + return errParsing } v = int64(v2) } From e4bd01e0c706fedca7e2584322d3e458569f40a4 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 24 Jun 2021 22:50:24 +0200 Subject: [PATCH 492/761] Exclude read-timeout from being an error (#9429) --- plugins/inputs/dovecot/dovecot.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index ab5067534dea0..fbc3b79058187 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -93,7 +93,7 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype stri c, err := net.DialTimeout(proto, addr, defaultTimeout) if err != nil { - return fmt.Errorf("enable to connect to dovecot server '%s': %s", addr, err) + return fmt.Errorf("unable to connect to dovecot server '%s': %s", addr, err) } defer c.Close() @@ -113,7 +113,12 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype stri } var buf bytes.Buffer if _, err := io.Copy(&buf, c); err != nil { - return fmt.Errorf("copying message failed for dovecot server '%s': %s", addr, err) + // We need to accept the timeout here as reading from the connection will only terminate on EOF + // or on a timeout to happen. As EOF for TCP connections will only be sent on connection closing, + // the only way to get the whole message is to wait for the timeout to happen. + if nerr, ok := err.(net.Error); !ok || !nerr.Timeout() { + return fmt.Errorf("copying message failed for dovecot server '%s': %s", addr, err) + } } var host string From eba6191239626f50e7876a3b9c2432751589aeb1 Mon Sep 17 00:00:00 2001 From: Mya Date: Mon, 28 Jun 2021 12:50:37 -0600 Subject: [PATCH 493/761] nat-server upgrade to v2.2.6 (#9369) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 5 ++-- go.sum | 47 +++++++++++++-------------------- 3 files changed, 21 insertions(+), 32 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index fad0787c02428..b88ec2acfe79a 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -149,6 +149,7 @@ following works: - github.com/mdlayher/netlink [MIT License](https://github.com/mdlayher/netlink/blob/master/LICENSE.md) - github.com/microsoft/ApplicationInsights-Go [MIT License](https://github.com/microsoft/ApplicationInsights-Go/blob/master/LICENSE) - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) +- github.com/minio/highwayhash [Apache License 2.0](https://github.com/minio/highwayhash/blob/master/LICENSE) - github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE) - github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) - github.com/moby/ipvs [Apache License 2.0](https://github.com/moby/ipvs/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 0d21185c3d966..b90ad93e180b0 100644 --- a/go.mod +++ b/go.mod @@ -94,9 +94,8 @@ require ( github.com/moby/ipvs v1.0.1 github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect - github.com/nats-io/jwt v1.2.2 // indirect - github.com/nats-io/nats-server/v2 v2.1.4 - github.com/nats-io/nats.go v1.10.0 + github.com/nats-io/nats-server/v2 v2.2.6 + github.com/nats-io/nats.go v1.11.0 github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 github.com/nsqio/go-nsq v1.0.8 github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 diff --git a/go.sum b/go.sum index cef47cfccb2c2..73461ad6ff6b4 100644 --- a/go.sum +++ b/go.sum @@ -263,8 +263,6 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= @@ -272,6 +270,8 @@ github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7 github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= @@ -981,8 +981,9 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.3 h1:dB4Bn0tN3wdCzQxnS8r06kV74qN/TAfaIS0bVE8h3jc= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.12 h1:famVnQVu7QwryBN4jNseQdUKES71ZAOnB6UQQJPZvqk= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -1068,6 +1069,8 @@ github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7 github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= +github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -1085,10 +1088,6 @@ github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= -github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= -github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= -github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= -github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= @@ -1130,17 +1129,20 @@ github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5Vgl github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.2 h1:ejVCLO8gu6/4bOKIHQpmB5UhhUJfAQw55yvLWpfmKjI= +github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats-server/v2 v2.1.4 h1:BILRnsJ2Yb/fefiFbBWADpViGF69uh4sxe8poVDQ06g= -github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg= +github.com/nats-io/nats-server/v2 v2.2.6 h1:FPK9wWx9pagxcw14s8W9rlfzfyHm61uNLnJyybZbn48= +github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nats.go v1.10.0 h1:L8qnKaofSfNFbXg0C5F71LdjPRnmQwSsA4ukmkt1TvY= -github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE= +github.com/nats-io/nats.go v1.11.0 h1:L263PZkrmkRJRJT2YHU8GwWWvEvmr9/LUKuJTXsF32k= +github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= -github.com/nats-io/nkeys v0.2.0 h1:WXKF7diOaPU9cJdLD7nuzwasQy9vT1tBqzXZZf3AMJM= github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= @@ -1369,13 +1371,6 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -1399,8 +1394,6 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1466,10 +1459,6 @@ github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYM github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= -github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= -github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= @@ -1573,6 +1562,7 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1694,6 +1684,7 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1774,8 +1765,6 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1803,6 +1792,7 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1960,7 +1950,6 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= From e2ab2188db035617150885ffc860393ea59703e8 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 29 Jun 2021 23:01:46 +0200 Subject: [PATCH 494/761] Fix RabbitMQ regression in #9383 (#9443) --- plugins/inputs/rabbitmq/README.md | 6 + plugins/inputs/rabbitmq/rabbitmq.go | 198 +++-- plugins/inputs/rabbitmq/rabbitmq_test.go | 768 ++++++++++++++---- .../testdata/{ => set1}/exchanges.json | 0 .../testdata/{ => set1}/federation-links.json | 0 .../rabbitmq/testdata/{ => set1}/memory.json | 0 .../rabbitmq/testdata/{ => set1}/nodes.json | 0 .../testdata/{ => set1}/overview.json | 0 .../rabbitmq/testdata/{ => set1}/queues.json | 0 .../rabbitmq/testdata/set2/exchanges.json | 104 +++ .../testdata/set2/federation-links.json | 1 + .../inputs/rabbitmq/testdata/set2/memory.json | 31 + .../inputs/rabbitmq/testdata/set2/nodes.json | 417 ++++++++++ .../rabbitmq/testdata/set2/overview.json | 1 + .../inputs/rabbitmq/testdata/set2/queues.json | 356 ++++++++ 15 files changed, 1675 insertions(+), 207 deletions(-) rename plugins/inputs/rabbitmq/testdata/{ => set1}/exchanges.json (100%) rename plugins/inputs/rabbitmq/testdata/{ => set1}/federation-links.json (100%) rename plugins/inputs/rabbitmq/testdata/{ => set1}/memory.json (100%) rename plugins/inputs/rabbitmq/testdata/{ => set1}/nodes.json (100%) rename plugins/inputs/rabbitmq/testdata/{ => set1}/overview.json (100%) rename plugins/inputs/rabbitmq/testdata/{ => set1}/queues.json (100%) create mode 100644 plugins/inputs/rabbitmq/testdata/set2/exchanges.json create mode 100644 plugins/inputs/rabbitmq/testdata/set2/federation-links.json create mode 100644 plugins/inputs/rabbitmq/testdata/set2/memory.json create mode 100644 plugins/inputs/rabbitmq/testdata/set2/nodes.json create mode 100644 plugins/inputs/rabbitmq/testdata/set2/overview.json create mode 100644 plugins/inputs/rabbitmq/testdata/set2/queues.json diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index 1274b4ee230f8..5f106642adeb6 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -48,6 +48,12 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management ## specified, metrics for all exchanges are gathered. # exchanges = ["telegraf"] + ## Metrics to include and exclude. Globs accepted. + ## Note that an empty array for both will include all metrics + ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" + # metric_include = [] + # metric_exclude = [] + ## Queues to include and exclude. Globs accepted. ## Note that an empty array for both will include all queues # queue_name_include = [] diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index fd39bd090dbc5..13be5f63b1619 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -3,6 +3,7 @@ package rabbitmq import ( "encoding/json" "fmt" + "io/ioutil" "net/http" "strconv" "sync" @@ -47,15 +48,18 @@ type RabbitMQ struct { Queues []string `toml:"queues"` Exchanges []string `toml:"exchanges"` + MetricInclude []string `toml:"metric_include"` + MetricExclude []string `toml:"metric_exclude"` QueueInclude []string `toml:"queue_name_include"` QueueExclude []string `toml:"queue_name_exclude"` FederationUpstreamInclude []string `toml:"federation_upstream_include"` FederationUpstreamExclude []string `toml:"federation_upstream_exclude"` - Client *http.Client `toml:"-"` + Log telegraf.Logger `toml:"-"` - filterCreated bool + client *http.Client excludeEveryQueue bool + metricFilter filter.Filter queueFilter filter.Filter upstreamFilter filter.Filter } @@ -163,11 +167,11 @@ type Node struct { GcNumDetails Details `json:"gc_num_details"` GcBytesReclaimed int64 `json:"gc_bytes_reclaimed"` GcBytesReclaimedDetails Details `json:"gc_bytes_reclaimed_details"` - IoReadAvgTime int64 `json:"io_read_avg_time"` + IoReadAvgTime float64 `json:"io_read_avg_time"` IoReadAvgTimeDetails Details `json:"io_read_avg_time_details"` IoReadBytes int64 `json:"io_read_bytes"` IoReadBytesDetails Details `json:"io_read_bytes_details"` - IoWriteAvgTime int64 `json:"io_write_avg_time"` + IoWriteAvgTime float64 `json:"io_write_avg_time"` IoWriteAvgTimeDetails Details `json:"io_write_avg_time_details"` IoWriteBytes int64 `json:"io_write_bytes"` IoWriteBytesDetails Details `json:"io_write_bytes_details"` @@ -226,32 +230,44 @@ type MemoryResponse struct { // Memory details type Memory struct { - ConnectionReaders int64 `json:"connection_readers"` - ConnectionWriters int64 `json:"connection_writers"` - ConnectionChannels int64 `json:"connection_channels"` - ConnectionOther int64 `json:"connection_other"` - QueueProcs int64 `json:"queue_procs"` - QueueSlaveProcs int64 `json:"queue_slave_procs"` - Plugins int64 `json:"plugins"` - OtherProc int64 `json:"other_proc"` - Metrics int64 `json:"metrics"` - MgmtDb int64 `json:"mgmt_db"` - Mnesia int64 `json:"mnesia"` - OtherEts int64 `json:"other_ets"` - Binary int64 `json:"binary"` - MsgIndex int64 `json:"msg_index"` - Code int64 `json:"code"` - Atom int64 `json:"atom"` - OtherSystem int64 `json:"other_system"` - AllocatedUnused int64 `json:"allocated_unused"` - ReservedUnallocated int64 `json:"reserved_unallocated"` - Total int64 `json:"total"` + ConnectionReaders int64 `json:"connection_readers"` + ConnectionWriters int64 `json:"connection_writers"` + ConnectionChannels int64 `json:"connection_channels"` + ConnectionOther int64 `json:"connection_other"` + QueueProcs int64 `json:"queue_procs"` + QueueSlaveProcs int64 `json:"queue_slave_procs"` + Plugins int64 `json:"plugins"` + OtherProc int64 `json:"other_proc"` + Metrics int64 `json:"metrics"` + MgmtDb int64 `json:"mgmt_db"` + Mnesia int64 `json:"mnesia"` + OtherEts int64 `json:"other_ets"` + Binary int64 `json:"binary"` + MsgIndex int64 `json:"msg_index"` + Code int64 `json:"code"` + Atom int64 `json:"atom"` + OtherSystem int64 `json:"other_system"` + AllocatedUnused int64 `json:"allocated_unused"` + ReservedUnallocated int64 `json:"reserved_unallocated"` + Total interface{} `json:"total"` +} + +// Error response +type ErrorResponse struct { + Error string `json:"error"` + Reason string `json:"reason"` } // gatherFunc ... type gatherFunc func(r *RabbitMQ, acc telegraf.Accumulator) -var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues, gatherExchanges, gatherFederationLinks} +var gatherFunctions = map[string]gatherFunc{ + "exchange": gatherExchanges, + "federation": gatherFederationLinks, + "node": gatherNodes, + "overview": gatherOverview, + "queue": gatherQueues, +} var sampleConfig = ` ## Management Plugin url. (default: http://localhost:15672) @@ -291,6 +307,12 @@ var sampleConfig = ` ## specified, metrics for all exchanges are gathered. # exchanges = ["telegraf"] + ## Metrics to include and exclude. Globs accepted. + ## Note that an empty array for both will include all metrics + ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" + # metric_include = [] + # metric_exclude = [] + ## Queues to include and exclude. Globs accepted. ## Note that an empty array for both will include all queues queue_name_include = [] @@ -323,39 +345,47 @@ func (r *RabbitMQ) Description() string { return "Reads metrics from RabbitMQ servers via the Management Plugin" } -// Gather ... -func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error { - if r.Client == nil { - tlsCfg, err := r.ClientConfig.TLSConfig() - if err != nil { - return err - } - tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(r.ResponseHeaderTimeout), - TLSClientConfig: tlsCfg, - } - r.Client = &http.Client{ - Transport: tr, - Timeout: time.Duration(r.ClientTimeout), - } +func (r *RabbitMQ) Init() error { + var err error + + // Create gather filters + if err := r.createQueueFilter(); err != nil { + return err + } + if err := r.createUpstreamFilter(); err != nil { + return err } - // Create gather filters if not already created - if !r.filterCreated { - err := r.createQueueFilter() - if err != nil { - return err - } - err = r.createUpstreamFilter() - if err != nil { - return err - } - r.filterCreated = true + // Create a filter for the metrics + if r.metricFilter, err = filter.NewIncludeExcludeFilter(r.MetricInclude, r.MetricExclude); err != nil { + return err } + tlsCfg, err := r.ClientConfig.TLSConfig() + if err != nil { + return err + } + tr := &http.Transport{ + ResponseHeaderTimeout: time.Duration(r.ResponseHeaderTimeout), + TLSClientConfig: tlsCfg, + } + r.client = &http.Client{ + Transport: tr, + Timeout: time.Duration(r.ClientTimeout), + } + + return nil +} + +// Gather ... +func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup - wg.Add(len(gatherFunctions)) - for _, f := range gatherFunctions { + for name, f := range gatherFunctions { + // Query only metrics that are supported + if !r.metricFilter.Match(name) { + continue + } + wg.Add(1) go func(gf gatherFunc) { defer wg.Done() gf(r, acc) @@ -366,15 +396,16 @@ func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error { return nil } -func (r *RabbitMQ) requestJSON(u string, target interface{}) error { +func (r *RabbitMQ) requestEndpoint(u string) ([]byte, error) { if r.URL == "" { r.URL = DefaultURL } - u = fmt.Sprintf("%s%s", r.URL, u) + endpoint := r.URL + u + r.Log.Debugf("Requesting %q...", endpoint) - req, err := http.NewRequest("GET", u, nil) + req, err := http.NewRequest("GET", endpoint, nil) if err != nil { - return err + return nil, err } username := r.Username @@ -389,14 +420,39 @@ func (r *RabbitMQ) requestJSON(u string, target interface{}) error { req.SetBasicAuth(username, password) - resp, err := r.Client.Do(req) + resp, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + r.Log.Debugf("HTTP status code: %v %v", resp.StatusCode, http.StatusText(resp.StatusCode)) + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return nil, fmt.Errorf("getting %q failed: %v %v", u, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + return ioutil.ReadAll(resp.Body) +} + +func (r *RabbitMQ) requestJSON(u string, target interface{}) error { + buf, err := r.requestEndpoint(u) if err != nil { return err } + if err := json.Unmarshal(buf, target); err != nil { + if _, ok := err.(*json.UnmarshalTypeError); ok { + // Try to get the error reason from the response + var errResponse ErrorResponse + if json.Unmarshal(buf, &errResponse) == nil && errResponse.Error != "" { + // Return the error reason in the response + return fmt.Errorf("error response trying to get %q: %q (reason: %q)", u, errResponse.Error, errResponse.Reason) + } + } - defer resp.Body.Close() + return fmt.Errorf("decoding answer from %q failed: %v", u, err) + } - return json.NewDecoder(resp.Body).Decode(target) + return nil } func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator) { @@ -533,7 +589,27 @@ func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) { fields["mem_other_system"] = memory.Memory.OtherSystem fields["mem_allocated_unused"] = memory.Memory.AllocatedUnused fields["mem_reserved_unallocated"] = memory.Memory.ReservedUnallocated - fields["mem_total"] = memory.Memory.Total + switch v := memory.Memory.Total.(type) { + case float64: + fields["mem_total"] = int64(v) + case map[string]interface{}: + var foundEstimator bool + for _, estimator := range []string{"rss", "allocated", "erlang"} { + if x, found := v[estimator]; found { + if total, ok := x.(float64); ok { + fields["mem_total"] = int64(total) + foundEstimator = true + break + } + acc.AddError(fmt.Errorf("unknown type %T for %q total memory", x, estimator)) + } + } + if !foundEstimator { + acc.AddError(fmt.Errorf("no known memory estimation in %v", v)) + } + default: + acc.AddError(fmt.Errorf("unknown type %T for total memory", memory.Memory.Total)) + } } acc.AddFields("rabbitmq_node", fields, tags) diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index b65585b8f0a57..830819b0528e4 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -1,36 +1,40 @@ package rabbitmq import ( + "fmt" + "io/ioutil" "net/http" "net/http/httptest" - "testing" + "time" - "io/ioutil" + "testing" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestRabbitMQGeneratesMetrics(t *testing.T) { +func TestRabbitMQGeneratesMetricsSet1(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var jsonFilePath string switch r.URL.Path { case "/api/overview": - jsonFilePath = "testdata/overview.json" + jsonFilePath = "testdata/set1/overview.json" case "/api/nodes": - jsonFilePath = "testdata/nodes.json" + jsonFilePath = "testdata/set1/nodes.json" case "/api/queues": - jsonFilePath = "testdata/queues.json" + jsonFilePath = "testdata/set1/queues.json" case "/api/exchanges": - jsonFilePath = "testdata/exchanges.json" + jsonFilePath = "testdata/set1/exchanges.json" case "/api/federation-links": - jsonFilePath = "testdata/federation-links.json" + jsonFilePath = "testdata/set1/federation-links.json" case "/api/nodes/rabbit@vagrant-ubuntu-trusty-64/memory": - jsonFilePath = "testdata/memory.json" + jsonFilePath = "testdata/set1/memory.json" default: - require.Fail(t, "Cannot handle request") + http.Error(w, fmt.Sprintf("unknown path %q", r.URL.Path), http.StatusNotFound) + return } data, err := ioutil.ReadFile(jsonFilePath) @@ -41,155 +45,627 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { })) defer ts.Close() - r := &RabbitMQ{ + // Define test cases + expected := []telegraf.Metric{ + testutil.MustMetric("rabbitmq_overview", + map[string]string{ + "url": ts.URL, + }, + map[string]interface{}{ + "messages": int64(5), + "messages_ready": int64(32), + "messages_unacked": int64(27), + "messages_acked": int64(5246), + "messages_delivered": int64(5234), + "messages_delivered_get": int64(3333), + "messages_published": int64(5258), + "channels": int64(44), + "connections": int64(44), + "consumers": int64(65), + "exchanges": int64(43), + "queues": int64(62), + "clustering_listeners": int64(2), + "amqp_listeners": int64(2), + "return_unroutable": int64(10), + "return_unroutable_rate": float64(3.3), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_queue", + map[string]string{ + "auto_delete": "false", + "durable": "false", + "node": "rabbit@rmqlocal-0.rmqlocal.ankorabbitstatefulset3.svc.cluster.local", + "queue": "reply_a716f0523cd44941ad2ea6ce4a3869c3", + "url": ts.URL, + "vhost": "sorandomsorandom", + }, + map[string]interface{}{ + "consumers": int64(3), + "consumer_utilisation": float64(1.0), + "memory": int64(143776), + "message_bytes": int64(3), + "message_bytes_ready": int64(4), + "message_bytes_unacked": int64(5), + "message_bytes_ram": int64(6), + "message_bytes_persist": int64(7), + "messages": int64(44), + "messages_ready": int64(32), + "messages_unack": int64(44), + "messages_ack": int64(3457), + "messages_ack_rate": float64(9.9), + "messages_deliver": int64(22222), + "messages_deliver_rate": float64(333.4), + "messages_deliver_get": int64(3457), + "messages_deliver_get_rate": float64(0.2), + "messages_publish": int64(3457), + "messages_publish_rate": float64(11.2), + "messages_redeliver": int64(33), + "messages_redeliver_rate": float64(2.5), + "idle_since": "2015-11-01 8:22:14", + "slave_nodes": int64(1), + "synchronised_slave_nodes": int64(1), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_node", + map[string]string{ + "node": "rabbit@vagrant-ubuntu-trusty-64", + "url": ts.URL, + }, + map[string]interface{}{ + "disk_free": int64(3776), + "disk_free_limit": int64(50000000), + "disk_free_alarm": int64(0), + "fd_total": int64(1024), + "fd_used": int64(63), + "mem_limit": int64(2503), + "mem_used": int64(159707080), + "mem_alarm": int64(1), + "proc_total": int64(1048576), + "proc_used": int64(783), + "run_queue": int64(0), + "sockets_total": int64(829), + "sockets_used": int64(45), + "uptime": int64(7464827), + "running": int64(1), + "mnesia_disk_tx_count": int64(16), + "mnesia_ram_tx_count": int64(296), + "mnesia_disk_tx_count_rate": float64(1.1), + "mnesia_ram_tx_count_rate": float64(2.2), + "gc_num": int64(57280132), + "gc_bytes_reclaimed": int64(2533), + "gc_num_rate": float64(274.2), + "gc_bytes_reclaimed_rate": float64(16490856.3), + "io_read_avg_time": float64(983.0), + "io_read_avg_time_rate": float64(88.77), + "io_read_bytes": int64(1111), + "io_read_bytes_rate": float64(99.99), + "io_write_avg_time": float64(134.0), + "io_write_avg_time_rate": float64(4.32), + "io_write_bytes": int64(823), + "io_write_bytes_rate": float64(32.8), + "mem_connection_readers": int64(1234), + "mem_connection_writers": int64(5678), + "mem_connection_channels": int64(1133), + "mem_connection_other": int64(2840), + "mem_queue_procs": int64(2840), + "mem_queue_slave_procs": int64(0), + "mem_plugins": int64(1755976), + "mem_other_proc": int64(23056584), + "mem_metrics": int64(196536), + "mem_mgmt_db": int64(491272), + "mem_mnesia": int64(115600), + "mem_other_ets": int64(2121872), + "mem_binary": int64(418848), + "mem_msg_index": int64(42848), + "mem_code": int64(25179322), + "mem_atom": int64(1041593), + "mem_other_system": int64(14741981), + "mem_allocated_unused": int64(38208528), + "mem_reserved_unallocated": int64(0), + "mem_total": int64(83025920), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "true", + "durable": "false", + "exchange": "reply_a716f0523cd44941ad2ea6ce4a3869c3", + "internal": "false", + "type": "direct", + "url": ts.URL, + "vhost": "sorandomsorandom", + }, + map[string]interface{}{ + "messages_publish_in": int64(3678), + "messages_publish_in_rate": float64(3.2), + "messages_publish_out": int64(3677), + "messages_publish_out_rate": float64(5.1), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_federation", + map[string]string{ + "queue": "exampleLocalQueue", + "type": "queue", + "upstream": "ExampleFederationUpstream", + "upstream_queue": "exampleUpstreamQueue", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "acks_uncommitted": int64(1), + "consumers": int64(2), + "messages_unacknowledged": int64(3), + "messages_uncommitted": int64(4), + "messages_unconfirmed": int64(5), + "messages_confirm": int64(67), + "messages_publish": int64(890), + "messages_return_unroutable": int64(1), + }, + time.Unix(0, 0), + ), + } + + // Run the test + plugin := &RabbitMQ{ URL: ts.URL, + Log: testutil.Logger{}, } + require.NoError(t, plugin.Init()) acc := &testutil.Accumulator{} + require.NoError(t, plugin.Gather(acc)) - err := acc.GatherError(r.Gather) - require.NoError(t, err) - - overviewMetrics := map[string]interface{}{ - "messages": 5, - "messages_ready": 32, - "messages_unacked": 27, - "messages_acked": 5246, - "messages_delivered": 5234, - "messages_delivered_get": 3333, - "messages_published": 5258, - "channels": 44, - "connections": 44, - "consumers": 65, - "exchanges": 43, - "queues": 62, - "clustering_listeners": 2, - "amqp_listeners": 2, - "return_unroutable": 10, - "return_unroutable_rate": 3.3, - } - compareMetrics(t, overviewMetrics, acc, "rabbitmq_overview") - - queuesMetrics := map[string]interface{}{ - "consumers": 3, - "consumer_utilisation": 1.0, - "memory": 143776, - "message_bytes": 3, - "message_bytes_ready": 4, - "message_bytes_unacked": 5, - "message_bytes_ram": 6, - "message_bytes_persist": 7, - "messages": 44, - "messages_ready": 32, - "messages_unack": 44, - "messages_ack": 3457, - "messages_ack_rate": 9.9, - "messages_deliver": 22222, - "messages_deliver_rate": 333.4, - "messages_deliver_get": 3457, - "messages_deliver_get_rate": 0.2, - "messages_publish": 3457, - "messages_publish_rate": 11.2, - "messages_redeliver": 33, - "messages_redeliver_rate": 2.5, - "idle_since": "2015-11-01 8:22:14", - "slave_nodes": 1, - "synchronised_slave_nodes": 1, + acc.Wait(len(expected)) + require.Len(t, acc.Errors, 0) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestRabbitMQGeneratesMetricsSet2(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var jsonFilePath string + + switch r.URL.Path { + case "/api/overview": + jsonFilePath = "testdata/set2/overview.json" + case "/api/nodes": + jsonFilePath = "testdata/set2/nodes.json" + case "/api/queues": + jsonFilePath = "testdata/set2/queues.json" + case "/api/exchanges": + jsonFilePath = "testdata/set2/exchanges.json" + case "/api/federation-links": + jsonFilePath = "testdata/set2/federation-links.json" + case "/api/nodes/rabbit@rmqserver/memory": + jsonFilePath = "testdata/set2/memory.json" + default: + http.Error(w, fmt.Sprintf("unknown path %q", r.URL.Path), http.StatusNotFound) + return + } + + data, err := ioutil.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) + + _, err = w.Write(data) + require.NoError(t, err) + })) + defer ts.Close() + + // Define test cases + expected := []telegraf.Metric{ + testutil.MustMetric("rabbitmq_overview", + map[string]string{ + "url": ts.URL, + }, + map[string]interface{}{ + "messages": int64(30), + "messages_ready": int64(30), + "messages_unacked": int64(0), + "messages_acked": int64(3736443), + "messages_delivered": int64(3736446), + "messages_delivered_get": int64(3736446), + "messages_published": int64(770025), + "channels": int64(43), + "connections": int64(43), + "consumers": int64(37), + "exchanges": int64(8), + "queues": int64(34), + "clustering_listeners": int64(1), + "amqp_listeners": int64(2), + "return_unroutable": int64(0), + "return_unroutable_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_queue", + map[string]string{ + "auto_delete": "false", + "durable": "false", + "node": "rabbit@rmqserver", + "queue": "39fd2caf-63e5-41e3-c15a-ba8fa11434b2", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "consumers": int64(1), + "consumer_utilisation": float64(1.0), + "memory": int64(15840), + "message_bytes": int64(0), + "message_bytes_ready": int64(0), + "message_bytes_unacked": int64(0), + "message_bytes_ram": int64(0), + "message_bytes_persist": int64(0), + "messages": int64(0), + "messages_ready": int64(0), + "messages_unack": int64(0), + "messages_ack": int64(180), + "messages_ack_rate": float64(0.0), + "messages_deliver": int64(180), + "messages_deliver_rate": float64(0.0), + "messages_deliver_get": int64(180), + "messages_deliver_get_rate": float64(0.0), + "messages_publish": int64(180), + "messages_publish_rate": float64(0.0), + "messages_redeliver": int64(0), + "messages_redeliver_rate": float64(0.0), + "idle_since": "2021-06-28 15:54:14", + "slave_nodes": int64(0), + "synchronised_slave_nodes": int64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_queue", + map[string]string{ + "auto_delete": "false", + "durable": "false", + "node": "rabbit@rmqserver", + "queue": "39fd2cb4-aa2d-c08b-457a-62d0893523a1", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "consumers": int64(1), + "consumer_utilisation": float64(1.0), + "memory": int64(15600), + "message_bytes": int64(0), + "message_bytes_ready": int64(0), + "message_bytes_unacked": int64(0), + "message_bytes_ram": int64(0), + "message_bytes_persist": int64(0), + "messages": int64(0), + "messages_ready": int64(0), + "messages_unack": int64(0), + "messages_ack": int64(177), + "messages_ack_rate": float64(0.0), + "messages_deliver": int64(177), + "messages_deliver_rate": float64(0.0), + "messages_deliver_get": int64(177), + "messages_deliver_get_rate": float64(0.0), + "messages_publish": int64(177), + "messages_publish_rate": float64(0.0), + "messages_redeliver": int64(0), + "messages_redeliver_rate": float64(0.0), + "idle_since": "2021-06-28 15:54:14", + "slave_nodes": int64(0), + "synchronised_slave_nodes": int64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_queue", + map[string]string{ + "auto_delete": "false", + "durable": "false", + "node": "rabbit@rmqserver", + "queue": "39fd2cb5-3820-e01b-6e20-ba29d5553fc3", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "consumers": int64(1), + "consumer_utilisation": float64(1.0), + "memory": int64(15584), + "message_bytes": int64(0), + "message_bytes_ready": int64(0), + "message_bytes_unacked": int64(0), + "message_bytes_ram": int64(0), + "message_bytes_persist": int64(0), + "messages": int64(0), + "messages_ready": int64(0), + "messages_unack": int64(0), + "messages_ack": int64(175), + "messages_ack_rate": float64(0.0), + "messages_deliver": int64(175), + "messages_deliver_rate": float64(0.0), + "messages_deliver_get": int64(175), + "messages_deliver_get_rate": float64(0.0), + "messages_publish": int64(175), + "messages_publish_rate": float64(0.0), + "messages_redeliver": int64(0), + "messages_redeliver_rate": float64(0.0), + "idle_since": "2021-06-28 15:54:15", + "slave_nodes": int64(0), + "synchronised_slave_nodes": int64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_node", + map[string]string{ + "node": "rabbit@rmqserver", + "url": ts.URL, + }, + map[string]interface{}{ + "disk_free": int64(25086496768), + "disk_free_limit": int64(50000000), + "disk_free_alarm": int64(0), + "fd_total": int64(65536), + "fd_used": int64(78), + "mem_limit": int64(1717546188), + "mem_used": int64(387645440), + "mem_alarm": int64(0), + "proc_total": int64(1048576), + "proc_used": int64(1128), + "run_queue": int64(1), + "sockets_total": int64(58893), + "sockets_used": int64(43), + "uptime": int64(4150152129), + "running": int64(1), + "mnesia_disk_tx_count": int64(103), + "mnesia_ram_tx_count": int64(2257), + "mnesia_disk_tx_count_rate": float64(0.0), + "mnesia_ram_tx_count_rate": float64(0.0), + "gc_num": int64(329526389), + "gc_bytes_reclaimed": int64(13660012170840), + "gc_num_rate": float64(125.2), + "gc_bytes_reclaimed_rate": float64(6583379.2), + "io_read_avg_time": float64(0.0), + "io_read_avg_time_rate": float64(0.0), + "io_read_bytes": int64(1), + "io_read_bytes_rate": float64(0.0), + "io_write_avg_time": float64(0.0), + "io_write_avg_time_rate": float64(0.0), + "io_write_bytes": int64(193066), + "io_write_bytes_rate": float64(0.0), + "mem_connection_readers": int64(1246768), + "mem_connection_writers": int64(72108), + "mem_connection_channels": int64(308588), + "mem_connection_other": int64(4883596), + "mem_queue_procs": int64(780996), + "mem_queue_slave_procs": int64(0), + "mem_plugins": int64(11932828), + "mem_other_proc": int64(39203520), + "mem_metrics": int64(626932), + "mem_mgmt_db": int64(3341264), + "mem_mnesia": int64(396016), + "mem_other_ets": int64(3771384), + "mem_binary": int64(209324208), + "mem_msg_index": int64(32648), + "mem_code": int64(32810827), + "mem_atom": int64(1458513), + "mem_other_system": int64(14284124), + "mem_allocated_unused": int64(61026048), + "mem_reserved_unallocated": int64(0), + "mem_total": int64(385548288), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "", + "internal": "false", + "type": "direct", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(284725), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(284572), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.direct", + "internal": "false", + "type": "direct", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.fanout", + "internal": "false", + "type": "fanout", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.headers", + "internal": "false", + "type": "headers", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.match", + "internal": "false", + "type": "headers", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.rabbitmq.trace", + "internal": "true", + "type": "topic", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.topic", + "internal": "false", + "type": "topic", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "true", + "durable": "false", + "exchange": "Exchange", + "internal": "false", + "type": "topic", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(18006), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(60798), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), } - compareMetrics(t, queuesMetrics, acc, "rabbitmq_queue") - - nodeMetrics := map[string]interface{}{ - "disk_free": 3776, - "disk_free_limit": 50000000, - "disk_free_alarm": 0, - "fd_total": 1024, - "fd_used": 63, - "mem_limit": 2503, - "mem_used": 159707080, - "mem_alarm": 1, - "proc_total": 1048576, - "proc_used": 783, - "run_queue": 0, - "sockets_total": 829, - "sockets_used": 45, - "uptime": 7464827, - "running": 1, - "mnesia_disk_tx_count": 16, - "mnesia_ram_tx_count": 296, - "mnesia_disk_tx_count_rate": 1.1, - "mnesia_ram_tx_count_rate": 2.2, - "gc_num": 57280132, - "gc_bytes_reclaimed": 2533, - "gc_num_rate": 274.2, - "gc_bytes_reclaimed_rate": 16490856.3, - "io_read_avg_time": 983, - "io_read_avg_time_rate": 88.77, - "io_read_bytes": 1111, - "io_read_bytes_rate": 99.99, - "io_write_avg_time": 134, - "io_write_avg_time_rate": 4.32, - "io_write_bytes": 823, - "io_write_bytes_rate": 32.8, - "mem_connection_readers": 1234, - "mem_connection_writers": 5678, - "mem_connection_channels": 1133, - "mem_connection_other": 2840, - "mem_queue_procs": 2840, - "mem_queue_slave_procs": 0, - "mem_plugins": 1755976, - "mem_other_proc": 23056584, - "mem_metrics": 196536, - "mem_mgmt_db": 491272, - "mem_mnesia": 115600, - "mem_other_ets": 2121872, - "mem_binary": 418848, - "mem_msg_index": 42848, - "mem_code": 25179322, - "mem_atom": 1041593, - "mem_other_system": 14741981, - "mem_allocated_unused": 38208528, - "mem_reserved_unallocated": 0, - "mem_total": 83025920, + expectedErrors := []error{ + fmt.Errorf("error response trying to get \"/api/federation-links\": \"Object Not Found\" (reason: \"Not Found\")"), } - compareMetrics(t, nodeMetrics, acc, "rabbitmq_node") - exchangeMetrics := map[string]interface{}{ - "messages_publish_in": 3678, - "messages_publish_in_rate": 3.2, - "messages_publish_out": 3677, - "messages_publish_out_rate": 5.1, - } - compareMetrics(t, exchangeMetrics, acc, "rabbitmq_exchange") - - federationLinkMetrics := map[string]interface{}{ - "acks_uncommitted": 1, - "consumers": 2, - "messages_unacknowledged": 3, - "messages_uncommitted": 4, - "messages_unconfirmed": 5, - "messages_confirm": 67, - "messages_publish": 890, - "messages_return_unroutable": 1, + // Run the test + plugin := &RabbitMQ{ + URL: ts.URL, + Log: testutil.Logger{}, } - compareMetrics(t, federationLinkMetrics, acc, "rabbitmq_federation") + require.NoError(t, plugin.Init()) + + acc := &testutil.Accumulator{} + require.NoError(t, plugin.Gather(acc)) + + acc.Wait(len(expected)) + require.Len(t, acc.Errors, len(expectedErrors)) + require.ElementsMatch(t, expectedErrors, acc.Errors) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), testutil.SortMetrics()) } -func compareMetrics(t *testing.T, expectedMetrics map[string]interface{}, - accumulator *testutil.Accumulator, measurementKey string) { - measurement, exist := accumulator.Get(measurementKey) +func TestRabbitMQMetricFilerts(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, fmt.Sprintf("unknown path %q", r.URL.Path), http.StatusNotFound) + })) + defer ts.Close() - assert.True(t, exist, "There is measurement %s", measurementKey) - assert.Equal(t, len(expectedMetrics), len(measurement.Fields)) + metricErrors := map[string]error{ + "exchange": fmt.Errorf("getting \"/api/exchanges\" failed: 404 Not Found"), + "federation": fmt.Errorf("getting \"/api/federation-links\" failed: 404 Not Found"), + "node": fmt.Errorf("getting \"/api/nodes\" failed: 404 Not Found"), + "overview": fmt.Errorf("getting \"/api/overview\" failed: 404 Not Found"), + "queue": fmt.Errorf("getting \"/api/queues\" failed: 404 Not Found"), + } - for metricName, metricValue := range expectedMetrics { - actualMetricValue := measurement.Fields[metricName] + // Include test + for name, expected := range metricErrors { + plugin := &RabbitMQ{ + URL: ts.URL, + Log: testutil.Logger{}, + MetricInclude: []string{name}, + } + require.NoError(t, plugin.Init()) + + acc := &testutil.Accumulator{} + require.NoError(t, plugin.Gather(acc)) + require.Len(t, acc.Errors, 1) + require.ElementsMatch(t, []error{expected}, acc.Errors) + } - if accumulator.HasStringField(measurementKey, metricName) { - assert.Equal(t, metricValue, actualMetricValue, - "Metric name: %s", metricName) - } else { - assert.InDelta(t, metricValue, actualMetricValue, 0e5, - "Metric name: %s", metricName) + // Exclude test + for name := range metricErrors { + // Exclude the current metric error from the list of expected errors + var expected []error + for n, e := range metricErrors { + if n != name { + expected = append(expected, e) + } } + plugin := &RabbitMQ{ + URL: ts.URL, + Log: testutil.Logger{}, + MetricExclude: []string{name}, + } + require.NoError(t, plugin.Init()) + + acc := &testutil.Accumulator{} + require.NoError(t, plugin.Gather(acc)) + require.Len(t, acc.Errors, len(expected)) + require.ElementsMatch(t, expected, acc.Errors) } } diff --git a/plugins/inputs/rabbitmq/testdata/exchanges.json b/plugins/inputs/rabbitmq/testdata/set1/exchanges.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/exchanges.json rename to plugins/inputs/rabbitmq/testdata/set1/exchanges.json diff --git a/plugins/inputs/rabbitmq/testdata/federation-links.json b/plugins/inputs/rabbitmq/testdata/set1/federation-links.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/federation-links.json rename to plugins/inputs/rabbitmq/testdata/set1/federation-links.json diff --git a/plugins/inputs/rabbitmq/testdata/memory.json b/plugins/inputs/rabbitmq/testdata/set1/memory.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/memory.json rename to plugins/inputs/rabbitmq/testdata/set1/memory.json diff --git a/plugins/inputs/rabbitmq/testdata/nodes.json b/plugins/inputs/rabbitmq/testdata/set1/nodes.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/nodes.json rename to plugins/inputs/rabbitmq/testdata/set1/nodes.json diff --git a/plugins/inputs/rabbitmq/testdata/overview.json b/plugins/inputs/rabbitmq/testdata/set1/overview.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/overview.json rename to plugins/inputs/rabbitmq/testdata/set1/overview.json diff --git a/plugins/inputs/rabbitmq/testdata/queues.json b/plugins/inputs/rabbitmq/testdata/set1/queues.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/queues.json rename to plugins/inputs/rabbitmq/testdata/set1/queues.json diff --git a/plugins/inputs/rabbitmq/testdata/set2/exchanges.json b/plugins/inputs/rabbitmq/testdata/set2/exchanges.json new file mode 100644 index 0000000000000..df47fe44bbd7f --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/exchanges.json @@ -0,0 +1,104 @@ +[ + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "message_stats": { + "publish_in": 284725, + "publish_in_details": { + "rate": 0 + }, + "publish_out": 284572, + "publish_out_details": { + "rate": 0 + } + }, + "name": "", + "type": "direct", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": { + "x-expires": 300000 + }, + "auto_delete": true, + "durable": false, + "internal": false, + "message_stats": { + "publish_in": 18006, + "publish_in_details": { + "rate": 0 + }, + "publish_out": 60798, + "publish_out_details": { + "rate": 0 + } + }, + "name": "Exchange", + "type": "topic", + "user_who_performed_action": "user", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.direct", + "type": "direct", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.fanout", + "type": "fanout", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.headers", + "type": "headers", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.match", + "type": "headers", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": true, + "name": "amq.rabbitmq.trace", + "type": "topic", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.topic", + "type": "topic", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + } +] diff --git a/plugins/inputs/rabbitmq/testdata/set2/federation-links.json b/plugins/inputs/rabbitmq/testdata/set2/federation-links.json new file mode 100644 index 0000000000000..0d121cb2f3e64 --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/federation-links.json @@ -0,0 +1 @@ +{"error":"Object Not Found","reason":"Not Found"} diff --git a/plugins/inputs/rabbitmq/testdata/set2/memory.json b/plugins/inputs/rabbitmq/testdata/set2/memory.json new file mode 100644 index 0000000000000..d18558ae21e5a --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/memory.json @@ -0,0 +1,31 @@ +{ + "memory": { + "connection_readers": 1246768, + "connection_writers": 72108, + "connection_channels": 308588, + "connection_other": 4883596, + "queue_procs": 780996, + "queue_slave_procs": 0, + "quorum_queue_procs": 0, + "plugins": 11932828, + "other_proc": 39203520, + "metrics": 626932, + "mgmt_db": 3341264, + "mnesia": 396016, + "quorum_ets": 47920, + "other_ets": 3771384, + "binary": 209324208, + "msg_index": 32648, + "code": 32810827, + "atom": 1458513, + "other_system": 14284124, + "allocated_unused": 61026048, + "reserved_unallocated": 0, + "strategy": "rss", + "total": { + "erlang": 324522240, + "rss": 385548288, + "allocated": 385548288 + } + } +} diff --git a/plugins/inputs/rabbitmq/testdata/set2/nodes.json b/plugins/inputs/rabbitmq/testdata/set2/nodes.json new file mode 100644 index 0000000000000..6dcfb0d514efd --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/nodes.json @@ -0,0 +1,417 @@ +[ + { + "partitions": [], + "os_pid": "8268", + "fd_total": 65536, + "sockets_total": 58893, + "mem_limit": 1717546188, + "mem_alarm": false, + "disk_free_limit": 50000000, + "disk_free_alarm": false, + "proc_total": 1048576, + "rates_mode": "basic", + "uptime": 4150152129, + "run_queue": 1, + "processors": 4, + "exchange_types": [ + { + "name": "topic", + "description": "AMQP topic exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "fanout", + "description": "AMQP fanout exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "direct", + "description": "AMQP direct exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "headers", + "description": "AMQP headers exchange, as per the AMQP specification", + "enabled": true + } + ], + "auth_mechanisms": [ + { + "name": "PLAIN", + "description": "SASL PLAIN authentication mechanism", + "enabled": true + }, + { + "name": "AMQPLAIN", + "description": "QPid AMQPLAIN mechanism", + "enabled": true + }, + { + "name": "RABBIT-CR-DEMO", + "description": "RabbitMQ Demo challenge-response authentication mechanism", + "enabled": false + } + ], + "applications": [ + { + "name": "amqp_client", + "description": "RabbitMQ AMQP Client", + "version": "3.8.14" + }, + { + "name": "asn1", + "description": "The Erlang ASN1 compiler version 5.0.14", + "version": "5.0.14" + }, + { + "name": "aten", + "description": "Erlang node failure detector", + "version": "0.5.5" + }, + { + "name": "compiler", + "description": "ERTS CXC 138 10", + "version": "7.6.6" + }, + { + "name": "cowboy", + "description": "Small, fast, modern HTTP server.", + "version": "2.8.0" + }, + { + "name": "cowlib", + "description": "Support library for manipulating Web protocols.", + "version": "2.9.1" + }, + { + "name": "credentials_obfuscation", + "description": "Helper library that obfuscates sensitive values in process state", + "version": "2.4.0" + }, + { + "name": "crypto", + "description": "CRYPTO", + "version": "4.8.3" + }, + { + "name": "cuttlefish", + "description": "cuttlefish configuration abstraction", + "version": "2.6.0" + }, + { + "name": "gen_batch_server", + "description": "Generic batching server", + "version": "0.8.4" + }, + { + "name": "goldrush", + "description": "Erlang event stream processor", + "version": "0.1.9" + }, + { + "name": "inets", + "description": "INETS CXC 138 49", + "version": "7.3.2" + }, + { + "name": "jsx", + "description": "a streaming, evented json parsing toolkit", + "version": "2.11.0" + }, + { + "name": "kernel", + "description": "ERTS CXC 138 10", + "version": "7.2.1" + }, + { + "name": "lager", + "description": "Erlang logging framework", + "version": "3.8.2" + }, + { + "name": "mnesia", + "description": "MNESIA CXC 138 12", + "version": "4.18.1" + }, + { + "name": "observer_cli", + "description": "Visualize Erlang Nodes On The Command Line", + "version": "1.6.1" + }, + { + "name": "os_mon", + "description": "CPO CXC 138 46", + "version": "2.6.1" + }, + { + "name": "public_key", + "description": "Public key infrastructure", + "version": "1.9.2" + }, + { + "name": "ra", + "description": "Raft library", + "version": "1.1.8" + }, + { + "name": "rabbit", + "description": "RabbitMQ", + "version": "3.8.14" + }, + { + "name": "rabbit_common", + "description": "Modules shared by rabbitmq-server and rabbitmq-erlang-client", + "version": "3.8.14" + }, + { + "name": "rabbitmq_management", + "description": "RabbitMQ Management Console", + "version": "3.8.14" + }, + { + "name": "rabbitmq_management_agent", + "description": "RabbitMQ Management Agent", + "version": "3.8.14" + }, + { + "name": "rabbitmq_prelaunch", + "description": "RabbitMQ prelaunch setup", + "version": "3.8.14" + }, + { + "name": "rabbitmq_web_dispatch", + "description": "RabbitMQ Web Dispatcher", + "version": "3.8.14" + }, + { + "name": "ranch", + "description": "Socket acceptor pool for TCP protocols.", + "version": "1.7.1" + }, + { + "name": "recon", + "description": "Diagnostic tools for production use", + "version": "2.5.1" + }, + { + "name": "sasl", + "description": "SASL CXC 138 11", + "version": "4.0.1" + }, + { + "name": "ssl", + "description": "Erlang/OTP SSL application", + "version": "10.2.4" + }, + { + "name": "stdlib", + "description": "ERTS CXC 138 10", + "version": "3.14" + }, + { + "name": "stdout_formatter", + "description": "Tools to format paragraphs, lists and tables as plain text", + "version": "0.2.4" + }, + { + "name": "syntax_tools", + "description": "Syntax tools", + "version": "2.4" + }, + { + "name": "sysmon_handler", + "description": "Rate-limiting system_monitor event handler", + "version": "1.3.0" + }, + { + "name": "tools", + "description": "DEVTOOLS CXC 138 16", + "version": "3.4.3" + }, + { + "name": "xmerl", + "description": "XML parser", + "version": "1.3.26" + } + ], + "contexts": [ + { + "description": "RabbitMQ Management", + "path": "/", + "cowboy_opts": "[{sendfile,false}]", + "port": "15672" + } + ], + "log_files": [ + "c:/Users/user/AppData/Roaming/RabbitMQ/log/rabbit@rmqserver.log", + "c:/Users/user/AppData/Roaming/RabbitMQ/log/rabbit@rmqserver_upgrade.log" + ], + "db_dir": "c:/Users/user/AppData/Roaming/RabbitMQ/db/rabbit@rmqserver-mnesia", + "config_files": [ + "c:/Users/user/AppData/Roaming/RabbitMQ/advanced.config" + ], + "net_ticktime": 60, + "enabled_plugins": [ + "rabbitmq_management" + ], + "mem_calculation_strategy": "rss", + "ra_open_file_metrics": { + "ra_log_wal": 1, + "ra_log_segment_writer": 0 + }, + "name": "rabbit@rmqserver", + "type": "disc", + "running": true, + "mem_used": 387645440, + "mem_used_details": { + "rate": 419430.4 + }, + "fd_used": 78, + "fd_used_details": { + "rate": 0 + }, + "sockets_used": 43, + "sockets_used_details": { + "rate": 0 + }, + "proc_used": 1128, + "proc_used_details": { + "rate": 0 + }, + "disk_free": 25086496768, + "disk_free_details": { + "rate": -118784 + }, + "gc_num": 329526389, + "gc_num_details": { + "rate": 125.2 + }, + "gc_bytes_reclaimed": 13660012170840, + "gc_bytes_reclaimed_details": { + "rate": 6583379.2 + }, + "context_switches": 974149754, + "context_switches_details": { + "rate": 270 + }, + "io_read_count": 1, + "io_read_count_details": { + "rate": 0 + }, + "io_read_bytes": 1, + "io_read_bytes_details": { + "rate": 0 + }, + "io_read_avg_time": 0, + "io_read_avg_time_details": { + "rate": 0 + }, + "io_write_count": 45, + "io_write_count_details": { + "rate": 0 + }, + "io_write_bytes": 193066, + "io_write_bytes_details": { + "rate": 0 + }, + "io_write_avg_time": 0, + "io_write_avg_time_details": { + "rate": 0 + }, + "io_sync_count": 45, + "io_sync_count_details": { + "rate": 0 + }, + "io_sync_avg_time": 0, + "io_sync_avg_time_details": { + "rate": 0 + }, + "io_seek_count": 31, + "io_seek_count_details": { + "rate": 0 + }, + "io_seek_avg_time": 0, + "io_seek_avg_time_details": { + "rate": 0 + }, + "io_reopen_count": 0, + "io_reopen_count_details": { + "rate": 0 + }, + "mnesia_ram_tx_count": 2257, + "mnesia_ram_tx_count_details": { + "rate": 0 + }, + "mnesia_disk_tx_count": 103, + "mnesia_disk_tx_count_details": { + "rate": 0 + }, + "msg_store_read_count": 0, + "msg_store_read_count_details": { + "rate": 0 + }, + "msg_store_write_count": 1, + "msg_store_write_count_details": { + "rate": 0 + }, + "queue_index_journal_write_count": 165, + "queue_index_journal_write_count_details": { + "rate": 0 + }, + "queue_index_write_count": 0, + "queue_index_write_count_details": { + "rate": 0 + }, + "queue_index_read_count": 0, + "queue_index_read_count_details": { + "rate": 0 + }, + "io_file_handle_open_attempt_count": 882, + "io_file_handle_open_attempt_count_details": { + "rate": 0 + }, + "io_file_handle_open_attempt_avg_time": 0.05442176870748299, + "io_file_handle_open_attempt_avg_time_details": { + "rate": 0 + }, + "connection_created": 2310, + "connection_created_details": { + "rate": 0 + }, + "connection_closed": 2268, + "connection_closed_details": { + "rate": 0 + }, + "channel_created": 2310, + "channel_created_details": { + "rate": 0 + }, + "channel_closed": 2267, + "channel_closed_details": { + "rate": 0 + }, + "queue_declared": 144281, + "queue_declared_details": { + "rate": 0 + }, + "queue_created": 663, + "queue_created_details": { + "rate": 0 + }, + "queue_deleted": 629, + "queue_deleted_details": { + "rate": 0 + }, + "cluster_links": [], + "metrics_gc_queue_length": { + "connection_closed": 0, + "channel_closed": 0, + "consumer_deleted": 0, + "exchange_deleted": 0, + "queue_deleted": 0, + "vhost_deleted": 0, + "node_node_deleted": 0, + "channel_consumer_deleted": 0 + } + } +] diff --git a/plugins/inputs/rabbitmq/testdata/set2/overview.json b/plugins/inputs/rabbitmq/testdata/set2/overview.json new file mode 100644 index 0000000000000..51977d61cbcae --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/overview.json @@ -0,0 +1 @@ +{"management_version":"3.8.14","rates_mode":"basic","sample_retention_policies":{"global":[600,3600,28800,86400],"basic":[600,3600],"detailed":[600]},"exchange_types":[{"name":"direct","description":"AMQP direct exchange, as per the AMQP specification","enabled":true},{"name":"fanout","description":"AMQP fanout exchange, as per the AMQP specification","enabled":true},{"name":"headers","description":"AMQP headers exchange, as per the AMQP specification","enabled":true},{"name":"topic","description":"AMQP topic exchange, as per the AMQP specification","enabled":true}],"product_version":"3.8.14","product_name":"RabbitMQ","rabbitmq_version":"3.8.14","cluster_name":"rabbit@rmqserver","erlang_version":"23.2.7","erlang_full_version":"Erlang/OTP 23 [erts-11.1.8] [source] [64-bit] [smp:4:4] [ds:4:4:10] [async-threads:1]","disable_stats":false,"enable_queue_totals":false,"message_stats":{"ack":3736443,"ack_details":{"rate":0.0},"confirm":0,"confirm_details":{"rate":0.0},"deliver":3736446,"deliver_details":{"rate":0.0},"deliver_get":3736446,"deliver_get_details":{"rate":0.0},"deliver_no_ack":0,"deliver_no_ack_details":{"rate":0.0},"disk_reads":0,"disk_reads_details":{"rate":0.0},"disk_writes":55,"disk_writes_details":{"rate":0.0},"drop_unroutable":0,"drop_unroutable_details":{"rate":0.0},"get":0,"get_details":{"rate":0.0},"get_empty":0,"get_empty_details":{"rate":0.0},"get_no_ack":0,"get_no_ack_details":{"rate":0.0},"publish":770025,"publish_details":{"rate":0.0},"redeliver":1,"redeliver_details":{"rate":0.0},"return_unroutable":0,"return_unroutable_details":{"rate":0.0}},"churn_rates":{"channel_closed":2267,"channel_closed_details":{"rate":0.0},"channel_created":2310,"channel_created_details":{"rate":0.0},"connection_closed":2268,"connection_closed_details":{"rate":0.0},"connection_created":2310,"connection_created_details":{"rate":0.0},"queue_created":663,"queue_created_details":{"rate":0.0},"queue_declared":144281,"queue_declared_details":{"rate":0.0},"queue_deleted":629,"queue_deleted_details":{"rate":0.0}},"queue_totals":{"messages":30,"messages_details":{"rate":0.0},"messages_ready":30,"messages_ready_details":{"rate":0.0},"messages_unacknowledged":0,"messages_unacknowledged_details":{"rate":0.0}},"object_totals":{"channels":43,"connections":43,"consumers":37,"exchanges":8,"queues":34},"statistics_db_event_queue":0,"node":"rabbit@rmqserver","listeners":[{"node":"rabbit@rmqserver","protocol":"amqp","ip_address":"0.0.0.0","port":5672,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false}},{"node":"rabbit@rmqserver","protocol":"amqp","ip_address":"::","port":5672,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false}},{"node":"rabbit@rmqserver","protocol":"amqp/ssl","ip_address":"0.0.0.0","port":5671,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false,"versions":["tlsv1.3","tlsv1.2","tlsv1.1","tlsv1"],"cacertfile":"C:\\ProgramData\\Chain.pem","certfile":"C:\\ProgramData\\server.crt","keyfile":"C:\\ProgramData\\server.key","verify":"verify_peer","depth":3,"fail_if_no_peer_cert":false}},{"node":"rabbit@rmqserver","protocol":"amqp/ssl","ip_address":"::","port":5671,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false,"versions":["tlsv1.3","tlsv1.2","tlsv1.1","tlsv1"],"cacertfile":"C:\\ProgramData\\Chain.pem","certfile":"C:\\ProgramData\\server.crt","keyfile":"C:\\ProgramData\\server.key","verify":"verify_peer","depth":3,"fail_if_no_peer_cert":false}},{"node":"rabbit@rmqserver","protocol":"clustering","ip_address":"::","port":25672,"socket_opts":[]},{"node":"rabbit@rmqserver","protocol":"http","ip_address":"0.0.0.0","port":15672,"socket_opts":{"cowboy_opts":{"sendfile":false},"port":15672}},{"node":"rabbit@rmqserver","protocol":"http","ip_address":"::","port":15672,"socket_opts":{"cowboy_opts":{"sendfile":false},"port":15672}}],"contexts":[{"ssl_opts":[],"node":"rabbit@rmqserver","description":"RabbitMQ Management","path":"/","cowboy_opts":"[{sendfile,false}]","port":"15672"}]} diff --git a/plugins/inputs/rabbitmq/testdata/set2/queues.json b/plugins/inputs/rabbitmq/testdata/set2/queues.json new file mode 100644 index 0000000000000..6d8c2a831158a --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/queues.json @@ -0,0 +1,356 @@ +[ + { + "arguments": { + "x-expires": 300000 + }, + "auto_delete": false, + "backing_queue_status": { + "avg_ack_egress_rate": 0, + "avg_ack_ingress_rate": 0, + "avg_egress_rate": 0, + "avg_ingress_rate": 0, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_seq_id": 180, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": "infinity" + }, + "consumer_capacity": 1, + "consumer_utilisation": 1, + "consumers": 1, + "durable": false, + "effective_policy_definition": {}, + "exclusive": false, + "exclusive_consumer_tag": null, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 16174 + }, + "head_message_timestamp": null, + "idle_since": "2021-06-28 15:54:14", + "memory": 15840, + "message_bytes": 0, + "message_bytes_paged_out": 0, + "message_bytes_persistent": 0, + "message_bytes_ram": 0, + "message_bytes_ready": 0, + "message_bytes_unacknowledged": 0, + "message_stats": { + "ack": 180, + "ack_details": { + "rate": 0 + }, + "deliver": 180, + "deliver_details": { + "rate": 0 + }, + "deliver_get": 180, + "deliver_get_details": { + "rate": 0 + }, + "deliver_no_ack": 0, + "deliver_no_ack_details": { + "rate": 0 + }, + "get": 0, + "get_details": { + "rate": 0 + }, + "get_empty": 0, + "get_empty_details": { + "rate": 0 + }, + "get_no_ack": 0, + "get_no_ack_details": { + "rate": 0 + }, + "publish": 180, + "publish_details": { + "rate": 0 + }, + "redeliver": 0, + "redeliver_details": { + "rate": 0 + } + }, + "messages": 0, + "messages_details": { + "rate": 0 + }, + "messages_paged_out": 0, + "messages_persistent": 0, + "messages_ram": 0, + "messages_ready": 0, + "messages_ready_details": { + "rate": 0 + }, + "messages_ready_ram": 0, + "messages_unacknowledged": 0, + "messages_unacknowledged_details": { + "rate": 0 + }, + "messages_unacknowledged_ram": 0, + "name": "39fd2caf-63e5-41e3-c15a-ba8fa11434b2", + "node": "rabbit@rmqserver", + "operator_policy": null, + "policy": null, + "recoverable_slaves": null, + "reductions": 11766294, + "reductions_details": { + "rate": 0 + }, + "single_active_consumer_tag": null, + "state": "running", + "type": "classic", + "vhost": "/" + }, + { + "arguments": { + "x-expires": 300000 + }, + "auto_delete": false, + "backing_queue_status": { + "avg_ack_egress_rate": 0, + "avg_ack_ingress_rate": 0, + "avg_egress_rate": 0, + "avg_ingress_rate": 0, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_seq_id": 177, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": "infinity" + }, + "consumer_capacity": 1, + "consumer_utilisation": 1, + "consumers": 1, + "durable": false, + "effective_policy_definition": {}, + "exclusive": false, + "exclusive_consumer_tag": null, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 16205 + }, + "head_message_timestamp": null, + "idle_since": "2021-06-28 15:54:14", + "memory": 15600, + "message_bytes": 0, + "message_bytes_paged_out": 0, + "message_bytes_persistent": 0, + "message_bytes_ram": 0, + "message_bytes_ready": 0, + "message_bytes_unacknowledged": 0, + "message_stats": { + "ack": 177, + "ack_details": { + "rate": 0 + }, + "deliver": 177, + "deliver_details": { + "rate": 0 + }, + "deliver_get": 177, + "deliver_get_details": { + "rate": 0 + }, + "deliver_no_ack": 0, + "deliver_no_ack_details": { + "rate": 0 + }, + "get": 0, + "get_details": { + "rate": 0 + }, + "get_empty": 0, + "get_empty_details": { + "rate": 0 + }, + "get_no_ack": 0, + "get_no_ack_details": { + "rate": 0 + }, + "publish": 177, + "publish_details": { + "rate": 0 + }, + "redeliver": 0, + "redeliver_details": { + "rate": 0 + } + }, + "messages": 0, + "messages_details": { + "rate": 0 + }, + "messages_paged_out": 0, + "messages_persistent": 0, + "messages_ram": 0, + "messages_ready": 0, + "messages_ready_details": { + "rate": 0 + }, + "messages_ready_ram": 0, + "messages_unacknowledged": 0, + "messages_unacknowledged_details": { + "rate": 0 + }, + "messages_unacknowledged_ram": 0, + "name": "39fd2cb4-aa2d-c08b-457a-62d0893523a1", + "node": "rabbit@rmqserver", + "operator_policy": null, + "policy": null, + "recoverable_slaves": null, + "reductions": 11706656, + "reductions_details": { + "rate": 0 + }, + "single_active_consumer_tag": null, + "state": "running", + "type": "classic", + "vhost": "/" + }, + { + "arguments": { + "x-expires": 300000 + }, + "auto_delete": false, + "backing_queue_status": { + "avg_ack_egress_rate": 0, + "avg_ack_ingress_rate": 0, + "avg_egress_rate": 0, + "avg_ingress_rate": 0, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_seq_id": 175, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": "infinity" + }, + "consumer_capacity": 1, + "consumer_utilisation": 1, + "consumers": 1, + "durable": false, + "effective_policy_definition": {}, + "exclusive": false, + "exclusive_consumer_tag": null, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 16183 + }, + "head_message_timestamp": null, + "idle_since": "2021-06-28 15:54:15", + "memory": 15584, + "message_bytes": 0, + "message_bytes_paged_out": 0, + "message_bytes_persistent": 0, + "message_bytes_ram": 0, + "message_bytes_ready": 0, + "message_bytes_unacknowledged": 0, + "message_stats": { + "ack": 175, + "ack_details": { + "rate": 0 + }, + "deliver": 175, + "deliver_details": { + "rate": 0 + }, + "deliver_get": 175, + "deliver_get_details": { + "rate": 0 + }, + "deliver_no_ack": 0, + "deliver_no_ack_details": { + "rate": 0 + }, + "get": 0, + "get_details": { + "rate": 0 + }, + "get_empty": 0, + "get_empty_details": { + "rate": 0 + }, + "get_no_ack": 0, + "get_no_ack_details": { + "rate": 0 + }, + "publish": 175, + "publish_details": { + "rate": 0 + }, + "redeliver": 0, + "redeliver_details": { + "rate": 0 + } + }, + "messages": 0, + "messages_details": { + "rate": 0 + }, + "messages_paged_out": 0, + "messages_persistent": 0, + "messages_ram": 0, + "messages_ready": 0, + "messages_ready_details": { + "rate": 0 + }, + "messages_ready_ram": 0, + "messages_unacknowledged": 0, + "messages_unacknowledged_details": { + "rate": 0 + }, + "messages_unacknowledged_ram": 0, + "name": "39fd2cb5-3820-e01b-6e20-ba29d5553fc3", + "node": "rabbit@rmqserver", + "operator_policy": null, + "policy": null, + "recoverable_slaves": null, + "reductions": 11649471, + "reductions_details": { + "rate": 0 + }, + "single_active_consumer_tag": null, + "state": "running", + "type": "classic", + "vhost": "/" + } +] From 1a0e937d8a39fd093a50cea88daee7c88839b6c1 Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 29 Jun 2021 15:07:05 -0600 Subject: [PATCH 495/761] updated gopsutil to use a specific commit (#9446) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b90ad93e180b0..512506673e5dc 100644 --- a/go.mod +++ b/go.mod @@ -109,7 +109,7 @@ require ( github.com/riemann/riemann-go-client v0.5.0 github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/sensu/sensu-go/api/core/v2 v2.6.0 - github.com/shirou/gopsutil v3.21.3+incompatible + github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/signalfx/golib/v3 v3.3.34 github.com/sirupsen/logrus v1.7.0 diff --git a/go.sum b/go.sum index 73461ad6ff6b4..3b55fe11f8a33 100644 --- a/go.sum +++ b/go.sum @@ -1345,8 +1345,8 @@ github.com/sensu/sensu-go/api/core/v2 v2.6.0 h1:hEKPHFZZNDuWTlKr7Kgm2yog65ZdkBUq github.com/sensu/sensu-go/api/core/v2 v2.6.0/go.mod h1:97IK4ZQuvVjWvvoLkp+NgrD6ot30WDRz3LEbFUc/N34= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v3.21.3+incompatible h1:uenXGGa8ESCQq+dbgtl916dmg6PSAz2cXov0uORQ9v8= -github.com/shirou/gopsutil v3.21.3+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible h1:Rucj22V2P6ktUBqN5auqjyxRHLXqNX6CteXBXifRrgY= +github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= From 138c204388819c47c031511012ced2b9624c5e8f Mon Sep 17 00:00:00 2001 From: Gabi Davar Date: Thu, 1 Jul 2021 17:59:44 +0300 Subject: [PATCH 496/761] add OpenTelemetry entry (#9464) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index b579cdd811cf8..5180d0d822817 100644 --- a/README.md +++ b/README.md @@ -291,6 +291,7 @@ For documentation on the latest development code see the [documentation index][d * [openldap](./plugins/inputs/openldap) * [openntpd](./plugins/inputs/openntpd) * [opensmtpd](./plugins/inputs/opensmtpd) +* [opentelemetry](./plugins/inputs/opentelemetry) * [openweathermap](./plugins/inputs/openweathermap) * [pf](./plugins/inputs/pf) * [pgbouncer](./plugins/inputs/pgbouncer) From 9b22161d922940af03be95d7b16ff51490d3320b Mon Sep 17 00:00:00 2001 From: Niek Bruins Date: Thu, 1 Jul 2021 22:42:48 +0200 Subject: [PATCH 497/761] Fix nil pointer error in knx_listener (#9444) --- plugins/inputs/knx_listener/knx_listener.go | 8 +- .../inputs/knx_listener/knx_listener_test.go | 91 +++++++++++++++---- 2 files changed, 78 insertions(+), 21 deletions(-) diff --git a/plugins/inputs/knx_listener/knx_listener.go b/plugins/inputs/knx_listener/knx_listener.go index 3bb93fbb2dde3..98f19e922f7ad 100644 --- a/plugins/inputs/knx_listener/knx_listener.go +++ b/plugins/inputs/knx_listener/knx_listener.go @@ -148,9 +148,11 @@ func (kl *KNXListener) listen() { // Match GA to DataPointType and measurement name ga := msg.Destination.String() target, ok := kl.gaTargetMap[ga] - if !ok && !kl.gaLogbook[ga] { - kl.Log.Infof("Ignoring message %+v for unknown GA %q", msg, ga) - kl.gaLogbook[ga] = true + if !ok { + if !kl.gaLogbook[ga] { + kl.Log.Infof("Ignoring message %+v for unknown GA %q", msg, ga) + kl.gaLogbook[ga] = true + } continue } diff --git a/plugins/inputs/knx_listener/knx_listener_test.go b/plugins/inputs/knx_listener/knx_listener_test.go index 973605886e3b6..b0502fbbc8e95 100644 --- a/plugins/inputs/knx_listener/knx_listener_test.go +++ b/plugins/inputs/knx_listener/knx_listener_test.go @@ -38,13 +38,31 @@ func setValue(data dpt.DatapointValue, value interface{}) error { return nil } +type TestMessage struct { + address string + dpt string + value interface{} +} + +func ProduceKnxEvent(t *testing.T, address string, datapoint string, value interface{}) *knx.GroupEvent { + addr, err := cemi.NewGroupAddrString(address) + require.NoError(t, err) + + data, ok := dpt.Produce(datapoint) + require.True(t, ok) + err = setValue(data, value) + require.NoError(t, err) + + return &knx.GroupEvent{ + Command: knx.GroupWrite, + Destination: addr, + Data: data.Pack(), + } +} + func TestRegularReceives_DPT(t *testing.T) { // Define the test-cases - var testcases = []struct { - address string - dpt string - value interface{} - }{ + var testcases = []TestMessage{ {"1/0/1", "1.001", true}, {"1/0/2", "1.002", false}, {"1/0/3", "1.003", true}, @@ -95,19 +113,8 @@ func TestRegularReceives_DPT(t *testing.T) { // Send the defined test data for _, testcase := range testcases { - addr, err := cemi.NewGroupAddrString(testcase.address) - require.NoError(t, err) - - data, ok := dpt.Produce(testcase.dpt) - require.True(t, ok) - err = setValue(data, testcase.value) - require.NoError(t, err) - - client.Send(knx.GroupEvent{ - Command: knx.GroupWrite, - Destination: addr, - Data: data.Pack(), - }) + event := ProduceKnxEvent(t, testcase.address, testcase.dpt, testcase.value) + client.Send(*event) } // Give the accumulator some time to collect the data @@ -133,3 +140,51 @@ func TestRegularReceives_DPT(t *testing.T) { assert.True(t, !tstart.After(m.Time)) } } + +func TestRegularReceives_MultipleMessages(t *testing.T) { + listener := KNXListener{ + ServiceType: "dummy", + Measurements: []Measurement{ + {"temperature", "1.001", []string{"1/1/1"}}, + }, + Log: testutil.Logger{Name: "knx_listener"}, + } + + acc := &testutil.Accumulator{} + + // Setup the listener to test + err := listener.Start(acc) + require.NoError(t, err) + client := listener.client.(*KNXDummyInterface) + + testMessages := []TestMessage{ + {"1/1/1", "1.001", true}, + {"1/1/1", "1.001", false}, + {"1/1/2", "1.001", false}, + {"1/1/2", "1.001", true}, + } + + for _, testcase := range testMessages { + event := ProduceKnxEvent(t, testcase.address, testcase.dpt, testcase.value) + client.Send(*event) + } + + // Give the accumulator some time to collect the data + acc.Wait(2) + + // Stop the listener + listener.Stop() + + // Check if we got what we expected + require.Len(t, acc.Metrics, 2) + + assert.Equal(t, "temperature", acc.Metrics[0].Measurement) + assert.Equal(t, "1/1/1", acc.Metrics[0].Tags["groupaddress"]) + assert.Len(t, acc.Metrics[0].Fields, 1) + assert.Equal(t, true, acc.Metrics[0].Fields["value"]) + + assert.Equal(t, "temperature", acc.Metrics[1].Measurement) + assert.Equal(t, "1/1/1", acc.Metrics[1].Tags["groupaddress"]) + assert.Len(t, acc.Metrics[1].Fields, 1) + assert.Equal(t, false, acc.Metrics[1].Fields["value"]) +} From 25413b2b6dc28e535d9a38771dbdd324b2a03551 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 1 Jul 2021 22:48:16 +0200 Subject: [PATCH 498/761] XPath parser extension to allow parsing of JSON, MessagePack and Protocol-buffers (#9277) --- config/config.go | 60 ++++--- docs/LICENSE_OF_DEPENDENCIES.md | 3 + go.mod | 4 + go.sum | 15 +- plugins/parsers/registry.go | 53 +++--- plugins/parsers/{xml => xpath}/README.md | 110 ++++++++---- plugins/parsers/xpath/json_document.go | 65 +++++++ plugins/parsers/xpath/msgpack_document.go | 39 +++++ plugins/parsers/{xml => xpath}/parser.go | 118 +++++++------ plugins/parsers/{xml => xpath}/parser_test.go | 70 ++++++-- .../parsers/xpath/protocolbuffer_document.go | 161 ++++++++++++++++++ .../parsers/xpath/testcases/addressbook.conf | 28 +++ .../parsers/xpath/testcases/addressbook.dat | 17 ++ .../parsers/xpath/testcases/addressbook.proto | 28 +++ .../{xml => xpath}/testcases/earthquakes.conf | 0 .../testcases/earthquakes.quakeml | 0 .../{xml => xpath}/testcases/multisensor.xml | 0 .../testcases/multisensor_explicit_basic.conf | 0 .../testcases/multisensor_explicit_batch.conf | 0 .../multisensor_selection_batch.conf | 0 .../xpath/testcases/openweathermap_5d.json | 127 ++++++++++++++ .../testcases/openweathermap_5d.xml | 0 .../xpath/testcases/openweathermap_json.conf | 29 ++++ .../testcases/openweathermap_xml.conf} | 2 +- plugins/parsers/xpath/testcases/tracker.msg | 1 + .../xpath/testcases/tracker_msgpack.conf | 24 +++ plugins/parsers/xpath/xml_document.go | 65 +++++++ 27 files changed, 869 insertions(+), 150 deletions(-) rename plugins/parsers/{xml => xpath}/README.md (78%) create mode 100644 plugins/parsers/xpath/json_document.go create mode 100644 plugins/parsers/xpath/msgpack_document.go rename plugins/parsers/{xml => xpath}/parser.go (80%) rename plugins/parsers/{xml => xpath}/parser_test.go (93%) create mode 100644 plugins/parsers/xpath/protocolbuffer_document.go create mode 100644 plugins/parsers/xpath/testcases/addressbook.conf create mode 100644 plugins/parsers/xpath/testcases/addressbook.dat create mode 100644 plugins/parsers/xpath/testcases/addressbook.proto rename plugins/parsers/{xml => xpath}/testcases/earthquakes.conf (100%) rename plugins/parsers/{xml => xpath}/testcases/earthquakes.quakeml (100%) rename plugins/parsers/{xml => xpath}/testcases/multisensor.xml (100%) rename plugins/parsers/{xml => xpath}/testcases/multisensor_explicit_basic.conf (100%) rename plugins/parsers/{xml => xpath}/testcases/multisensor_explicit_batch.conf (100%) rename plugins/parsers/{xml => xpath}/testcases/multisensor_selection_batch.conf (100%) create mode 100644 plugins/parsers/xpath/testcases/openweathermap_5d.json rename plugins/parsers/{xml => xpath}/testcases/openweathermap_5d.xml (100%) create mode 100644 plugins/parsers/xpath/testcases/openweathermap_json.conf rename plugins/parsers/{xml/testcases/openweathermap.conf => xpath/testcases/openweathermap_xml.conf} (95%) create mode 100644 plugins/parsers/xpath/testcases/tracker.msg create mode 100644 plugins/parsers/xpath/testcases/tracker_msgpack.conf create mode 100644 plugins/parsers/xpath/xml_document.go diff --git a/config/config.go b/config/config.go index 0c990078ed0a6..56beed8ee4910 100644 --- a/config/config.go +++ b/config/config.go @@ -19,6 +19,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/aggregators" "github.com/influxdata/telegraf/plugins/inputs" @@ -1296,6 +1297,11 @@ func (c *Config) buildParser(name string, tbl *ast.Table) (parsers.Parser, error } logger := models.NewLogger("parsers", config.DataFormat, name) models.SetLoggerOnPlugin(parser, logger) + if initializer, ok := parser.(telegraf.Initializer); ok { + if err := initializer.Init(); err != nil { + return nil, err + } + } return parser, nil } @@ -1366,24 +1372,36 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, c.getFieldString(tbl, "value_field_name", &pc.ValueFieldName) - //for XML parser - if node, ok := tbl.Fields["xml"]; ok { - if subtbls, ok := node.([]*ast.Table); ok { - pc.XMLConfig = make([]parsers.XMLConfig, len(subtbls)) - for i, subtbl := range subtbls { - subcfg := pc.XMLConfig[i] - c.getFieldString(subtbl, "metric_name", &subcfg.MetricQuery) - c.getFieldString(subtbl, "metric_selection", &subcfg.Selection) - c.getFieldString(subtbl, "timestamp", &subcfg.Timestamp) - c.getFieldString(subtbl, "timestamp_format", &subcfg.TimestampFmt) - c.getFieldStringMap(subtbl, "tags", &subcfg.Tags) - c.getFieldStringMap(subtbl, "fields", &subcfg.Fields) - c.getFieldStringMap(subtbl, "fields_int", &subcfg.FieldsInt) - c.getFieldString(subtbl, "field_selection", &subcfg.FieldSelection) - c.getFieldBool(subtbl, "field_name_expansion", &subcfg.FieldNameExpand) - c.getFieldString(subtbl, "field_name", &subcfg.FieldNameQuery) - c.getFieldString(subtbl, "field_value", &subcfg.FieldValueQuery) - pc.XMLConfig[i] = subcfg + //for XPath parser family + if choice.Contains(pc.DataFormat, []string{"xml", "xpath_json", "xpath_msgpack", "xpath_protobuf"}) { + c.getFieldString(tbl, "xpath_protobuf_file", &pc.XPathProtobufFile) + c.getFieldString(tbl, "xpath_protobuf_type", &pc.XPathProtobufType) + c.getFieldBool(tbl, "xpath_print_document", &pc.XPathPrintDocument) + + // Determine the actual xpath configuration tables + node, xpathOK := tbl.Fields["xpath"] + if !xpathOK { + // Add this for backward compatibility + node, xpathOK = tbl.Fields[pc.DataFormat] + } + if xpathOK { + if subtbls, ok := node.([]*ast.Table); ok { + pc.XPathConfig = make([]parsers.XPathConfig, len(subtbls)) + for i, subtbl := range subtbls { + subcfg := pc.XPathConfig[i] + c.getFieldString(subtbl, "metric_name", &subcfg.MetricQuery) + c.getFieldString(subtbl, "metric_selection", &subcfg.Selection) + c.getFieldString(subtbl, "timestamp", &subcfg.Timestamp) + c.getFieldString(subtbl, "timestamp_format", &subcfg.TimestampFmt) + c.getFieldStringMap(subtbl, "tags", &subcfg.Tags) + c.getFieldStringMap(subtbl, "fields", &subcfg.Fields) + c.getFieldStringMap(subtbl, "fields_int", &subcfg.FieldsInt) + c.getFieldString(subtbl, "field_selection", &subcfg.FieldSelection) + c.getFieldBool(subtbl, "field_name_expansion", &subcfg.FieldNameExpand) + c.getFieldString(subtbl, "field_name", &subcfg.FieldNameQuery) + c.getFieldString(subtbl, "field_value", &subcfg.FieldValueQuery) + pc.XPathConfig[i] = subcfg + } } } } @@ -1551,13 +1569,15 @@ func (c *Config) missingTomlField(_ reflect.Type, key string) error { "grok_custom_pattern_files", "grok_custom_patterns", "grok_named_patterns", "grok_patterns", "grok_timezone", "grok_unique_timestamp", "influx_max_line_bytes", "influx_sort_fields", "influx_uint_support", "interval", "json_name_key", "json_query", "json_strict", - "json_string_fields", "json_time_format", "json_time_key", "json_timestamp_units", "json_timezone", + "json_string_fields", "json_time_format", "json_time_key", "json_timestamp_units", "json_timezone", "json_v2", "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", "name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision", "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", "separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys", "tagdrop", "tagexclude", "taginclude", "tagpass", "tags", "template", "templates", - "value_field_name", "wavefront_source_override", "wavefront_use_strict", "xml", "json_v2": + "value_field_name", "wavefront_source_override", "wavefront_use_strict", + "xml", "xpath", "xpath_json", "xpath_msgpack", "xpath_protobuf", "xpath_print_document", + "xpath_protobuf_file", "xpath_protobuf_type": // ignore fields that are common to all plugins. default: diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index b88ec2acfe79a..a801b109b5b19 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -23,6 +23,7 @@ following works: - github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING) - github.com/aliyun/alibaba-cloud-sdk-go [Apache License 2.0](https://github.com/aliyun/alibaba-cloud-sdk-go/blob/master/LICENSE) - github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE) +- github.com/antchfx/jsonquery [MIT License](https://github.com/antchfx/jsonquery/blob/master/LICENSE) - github.com/antchfx/xmlquery [MIT License](https://github.com/antchfx/xmlquery/blob/master/LICENSE) - github.com/antchfx/xpath [MIT License](https://github.com/antchfx/xpath/blob/master/LICENSE) - github.com/apache/arrow/go/arrow [Apache License 2.0](https://github.com/apache/arrow/blob/master/LICENSE.txt) @@ -64,6 +65,7 @@ following works: - github.com/docker/docker [Apache License 2.0](https://github.com/docker/docker/blob/master/LICENSE) - github.com/docker/go-connections [Apache License 2.0](https://github.com/docker/go-connections/blob/master/LICENSE) - github.com/docker/go-units [Apache License 2.0](https://github.com/docker/go-units/blob/master/LICENSE) +- github.com/doclambda/protobufquery [MIT License](https://github.com/doclambda/protobufquery/blob/master/LICENSE) - github.com/dynatrace-oss/dynatrace-metric-utils-go [Apache License 2.0](https://github.com/dynatrace-oss/dynatrace-metric-utils-go/blob/master/LICENSE) - github.com/eapache/go-resiliency [MIT License](https://github.com/eapache/go-resiliency/blob/master/LICENSE) - github.com/eapache/go-xerial-snappy [MIT License](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE) @@ -131,6 +133,7 @@ following works: - github.com/jaegertracing/jaeger [Apache License 2.0](https://github.com/jaegertracing/jaeger/blob/master/LICENSE) - github.com/james4k/rcon [MIT License](https://github.com/james4k/rcon/blob/master/LICENSE) - github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE) +- github.com/jhump/protoreflect [Apache License 2.0](https://github.com/jhump/protoreflect/blob/master/LICENSE) - github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) - github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE) - github.com/json-iterator/go [MIT License](https://github.com/json-iterator/go/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 512506673e5dc..7d9f66c0a315c 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 + github.com/antchfx/jsonquery v1.1.4 github.com/antchfx/xmlquery v1.3.5 github.com/antchfx/xpath v1.1.11 github.com/apache/thrift v0.13.0 @@ -45,6 +46,7 @@ require ( github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 github.com/docker/docker v20.10.6+incompatible + github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 github.com/dynatrace-oss/dynatrace-metric-utils-go v0.1.0 github.com/eclipse/paho.mqtt.golang v1.3.0 github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect @@ -82,6 +84,7 @@ require ( github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 github.com/jackc/pgx/v4 v4.6.0 github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a + github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca github.com/jmespath/go-jmespath v0.4.0 github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 @@ -142,6 +145,7 @@ require ( google.golang.org/api v0.29.0 google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a google.golang.org/grpc v1.37.0 + google.golang.org/protobuf v1.26.0 gopkg.in/djherbis/times.v1 v1.2.0 gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 diff --git a/go.sum b/go.sum index 3b55fe11f8a33..592cf33db6bbb 100644 --- a/go.sum +++ b/go.sum @@ -182,8 +182,11 @@ github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004/go.mod h1:pUKYbK5JQ+1Dfxk80P0q github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/antchfx/jsonquery v1.1.4 h1:+OlFO3QS9wjU0MKx9MgHm5f6o6hdd4e9mUTp0wTjxlM= +github.com/antchfx/jsonquery v1.1.4/go.mod h1:cHs8r6Bymd8j6HI6Ej1IJbjahKvLBcIEh54dfmo+E9A= github.com/antchfx/xmlquery v1.3.5 h1:I7TuBRqsnfFuL11ruavGm911Awx9IqSdiU6W/ztSmVw= github.com/antchfx/xmlquery v1.3.5/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= +github.com/antchfx/xpath v1.1.7/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= github.com/antchfx/xpath v1.1.11 h1:WOFtK8TVAjLm3lbgqeP0arlHpvCEeTANeWZ/csPpJkQ= github.com/antchfx/xpath v1.1.11/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= @@ -462,6 +465,8 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 h1:27379cxrsKlr7hAnW/xrusefspUPjqHVRW1K/bZgfGw= +github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60/go.mod h1:8Ia4zp86glrUhC29AAdK9hwTYh8RB6v0WRCtpplYqEg= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= @@ -760,6 +765,7 @@ github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEo github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -927,6 +933,8 @@ github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQD github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca h1:a0GZUdb+qnutF8shJxr2qs2qT3fnF+ptxTxPB8+oIvk= +github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -1150,6 +1158,7 @@ github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 h1:9YEHXplqlVkOltThchh+RxeO github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1/go.mod h1:2kY6OeOxrJ+RIQlVjWDc/pZlT3MIf30prs6drzMfJ6E= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= github.com/nsqio/go-nsq v1.0.8 h1:3L2F8tNLlwXXlp2slDUrUWSBn2O3nMh8R1/KEDFTHPk= github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= @@ -1853,7 +1862,9 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1963,6 +1974,7 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= @@ -2051,8 +2063,9 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4 h1:xZjKidCirayzX6tHONRQyTNDVIR55TYVqgATqo6ZULY= diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 4e01fb0a630fe..cc2102c9532d2 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -19,7 +19,7 @@ import ( "github.com/influxdata/telegraf/plugins/parsers/prometheusremotewrite" "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/plugins/parsers/wavefront" - "github.com/influxdata/telegraf/plugins/parsers/xml" + "github.com/influxdata/telegraf/plugins/parsers/xpath" ) type ParserFunc func() (Parser, error) @@ -159,16 +159,17 @@ type Config struct { // Value configuration ValueFieldName string `toml:"value_field_name"` - // XML configuration - XMLConfig []XMLConfig `toml:"xml"` + // XPath configuration + XPathPrintDocument bool `toml:"xpath_print_document"` + XPathProtobufFile string `toml:"xpath_protobuf_file"` + XPathProtobufType string `toml:"xpath_protobuf_type"` + XPathConfig []XPathConfig // JSONPath configuration JSONV2Config []JSONV2Config `toml:"json_v2"` } -type XMLConfig struct { - xml.Config -} +type XPathConfig xpath.Config type JSONV2Config struct { json_v2.Config @@ -261,8 +262,15 @@ func NewParser(config *Config) (Parser, error) { parser, err = NewPrometheusParser(config.DefaultTags) case "prometheusremotewrite": parser, err = NewPrometheusRemoteWriteParser(config.DefaultTags) - case "xml": - parser, err = NewXMLParser(config.MetricName, config.DefaultTags, config.XMLConfig) + case "xml", "xpath_json", "xpath_msgpack", "xpath_protobuf": + parser = &xpath.Parser{ + Format: config.DataFormat, + ProtobufMessageDef: config.XPathProtobufFile, + ProtobufMessageType: config.XPathProtobufType, + PrintDocument: config.XPathPrintDocument, + DefaultTags: config.DefaultTags, + Configs: NewXPathParserConfigs(config.MetricName, config.XPathConfig), + } case "json_v2": parser, err = NewJSONPathParser(config.JSONV2Config) default: @@ -382,30 +390,15 @@ func NewPrometheusRemoteWriteParser(defaultTags map[string]string) (Parser, erro }, nil } -func NewXMLParser(metricName string, defaultTags map[string]string, xmlConfigs []XMLConfig) (Parser, error) { +func NewXPathParserConfigs(metricName string, cfgs []XPathConfig) []xpath.Config { // Convert the config formats which is a one-to-one copy - configs := make([]xml.Config, len(xmlConfigs)) - for i, cfg := range xmlConfigs { - configs[i].MetricName = metricName - configs[i].MetricQuery = cfg.MetricQuery - configs[i].Selection = cfg.Selection - configs[i].Timestamp = cfg.Timestamp - configs[i].TimestampFmt = cfg.TimestampFmt - configs[i].Tags = cfg.Tags - configs[i].Fields = cfg.Fields - configs[i].FieldsInt = cfg.FieldsInt - - configs[i].FieldSelection = cfg.FieldSelection - configs[i].FieldNameQuery = cfg.FieldNameQuery - configs[i].FieldValueQuery = cfg.FieldValueQuery - - configs[i].FieldNameExpand = cfg.FieldNameExpand + configs := make([]xpath.Config, 0, len(cfgs)) + for _, cfg := range cfgs { + config := xpath.Config(cfg) + config.MetricName = metricName + configs = append(configs, config) } - - return &xml.Parser{ - Configs: configs, - DefaultTags: defaultTags, - }, nil + return configs } func NewJSONPathParser(jsonv2config []JSONV2Config) (Parser, error) { diff --git a/plugins/parsers/xml/README.md b/plugins/parsers/xpath/README.md similarity index 78% rename from plugins/parsers/xml/README.md rename to plugins/parsers/xpath/README.md index 02b3c4530ecb6..09823bbacf982 100644 --- a/plugins/parsers/xml/README.md +++ b/plugins/parsers/xpath/README.md @@ -1,13 +1,24 @@ -# XML +# XPath -The XML data format parser parses a [XML][xml] string into metric fields using [XPath][xpath] expressions. For supported -XPath functions check [the underlying XPath library][xpath lib]. +The XPath data format parser parses different formats into metric fields using [XPath][xpath] expressions. -**NOTE:** The type of fields are specified using [XPath functions][xpath lib]. The only exception are *integer* fields -that need to be specified in a `fields_int` section. +For supported XPath functions check [the underlying XPath library][xpath lib]. -### Configuration +**NOTE:** The type of fields are specified using [XPath functions][xpath lib]. The only exception are *integer* fields that need to be specified in a `fields_int` section. +### Supported data formats +| name | `data_format` setting | comment | +| --------------------------------------- | --------------------- | ------- | +| [Extensible Markup Language (XML)][xml] | `"xml"` | | +| [JSON][json] | `"xpath_json"` | | +| [MessagePack][msgpack] | `"xpath_msgpack"` | | +| [Protocol buffers][protobuf] | `"xpath_protobuf"` | [see additional parameters](protocol-buffers-additiona-settings)| + +#### Protocol buffers additional settings +For using the protocol-buffer format you need to specify a protocol buffer definition file (`.proto`) in `xpath_protobuf_file`, Furthermore, you need to specify which message type you want to use via `xpath_protobuf_type`. + +### Configuration (explicit) +In this configuration mode, you explicitly specify the field and tags you want to scrape out of your data. ```toml [[inputs.file]] files = ["example.xml"] @@ -18,44 +29,56 @@ that need to be specified in a `fields_int` section. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "xml" + ## PROTOCOL BUFFER definitions + ## Protocol buffer definition file + # xpath_protobuf_file = "sparkplug_b.proto" + ## Name of the protocol buffer message type to use in a fully qualified form. + # xpath_protobuf_type = ""org.eclipse.tahu.protobuf.Payload"" + + ## Print the internal XML document when in debug logging mode. + ## This is especially useful when using the parser with non-XML formats like protocol buffers + ## to get an idea on the expression necessary to derive fields etc. + # xpath_print_document = false + ## Multiple parsing sections are allowed - [[inputs.file.xml]] + [[inputs.file.xpath]] ## Optional: XPath-query to select a subset of nodes from the XML document. - #metric_selection = "/Bus/child::Sensor" + # metric_selection = "/Bus/child::Sensor" ## Optional: XPath-query to set the metric (measurement) name. - #metric_name = "string('example')" + # metric_name = "string('example')" ## Optional: Query to extract metric timestamp. ## If not specified the time of execution is used. - #timestamp = "/Gateway/Timestamp" + # timestamp = "/Gateway/Timestamp" ## Optional: Format of the timestamp determined by the query above. ## This can be any of "unix", "unix_ms", "unix_us", "unix_ns" or a valid Golang ## time format. If not specified, a "unix" timestamp (in seconds) is expected. - #timestamp_format = "2006-01-02T15:04:05Z" + # timestamp_format = "2006-01-02T15:04:05Z" ## Tag definitions using the given XPath queries. - [inputs.file.xml.tags] + [inputs.file.xpath.tags] name = "substring-after(Sensor/@name, ' ')" device = "string('the ultimate sensor')" ## Integer field definitions using XPath queries. - [inputs.file.xml.fields_int] + [inputs.file.xpath.fields_int] consumers = "Variable/@consumers" ## Non-integer field definitions using XPath queries. ## The field type is defined using XPath expressions such as number(), boolean() or string(). If no conversion is performed the field will be of type string. - [inputs.file.xml.fields] + [inputs.file.xpath.fields] temperature = "number(Variable/@temperature)" power = "number(Variable/@power)" frequency = "number(Variable/@frequency)" ok = "Mode != 'ok'" ``` -A configuration can contain muliple *xml* subsections for e.g. the file plugin to process the xml-string multiple times. -Consult the [XPath syntax][xpath] and the [underlying library's functions][xpath lib] for details and help regarding XPath queries. Consider using an XPath tester such as [xpather.com][xpather] or [Code Beautify's XPath Tester][xpath tester] for help developing and debugging +A configuration can contain muliple *xpath* subsections for e.g. the file plugin to process the xml-string multiple times. Consult the [XPath syntax][xpath] and the [underlying library's functions][xpath lib] for details and help regarding XPath queries. Consider using an XPath tester such as [xpather.com][xpather] or [Code Beautify's XPath Tester][xpath tester] for help developing and debugging your query. +## Configuration (batch) + Alternatively to the configuration above, fields can also be specified in a batch way. So contrary to specify the fields in a section, you can define a `name` and a `value` selector used to determine the name and value of the fields in the metric. @@ -69,21 +92,31 @@ metric. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "xml" + ## Name of the protocol buffer type to use. + ## This is only relevant when parsing protocol buffers and must contain the fully qualified + ## name of the type e.g. "org.eclipse.tahu.protobuf.Payload". + # xpath_protobuf_type = "" + + ## Print the internal XML document when in debug logging mode. + ## This is especially useful when using the parser with non-XML formats like protocol buffers + ## to get an idea on the expression necessary to derive fields etc. + # xpath_print_document = false + ## Multiple parsing sections are allowed - [[inputs.file.xml]] + [[inputs.file.xpath]] ## Optional: XPath-query to select a subset of nodes from the XML document. metric_selection = "/Bus/child::Sensor" ## Optional: XPath-query to set the metric (measurement) name. - #metric_name = "string('example')" + # metric_name = "string('example')" ## Optional: Query to extract metric timestamp. ## If not specified the time of execution is used. - #timestamp = "/Gateway/Timestamp" + # timestamp = "/Gateway/Timestamp" ## Optional: Format of the timestamp determined by the query above. ## This can be any of "unix", "unix_ms", "unix_us", "unix_ns" or a valid Golang ## time format. If not specified, a "unix" timestamp (in seconds) is expected. - #timestamp_format = "2006-01-02T15:04:05Z" + # timestamp_format = "2006-01-02T15:04:05Z" ## Field specifications using a selector. field_selection = "child::*" @@ -91,15 +124,15 @@ metric. ## These options are only to be used in combination with 'field_selection'! ## By default the node name and node content is used if a field-selection ## is specified. - #field_name = "name()" - #field_value = "." + # field_name = "name()" + # field_value = "." ## Optional: Expand field names relative to the selected node ## This allows to flatten out nodes with non-unique names in the subtree - #field_name_expansion = false + # field_name_expansion = false ## Tag definitions using the given XPath queries. - [inputs.file.xml.tags] + [inputs.file.xpath.tags] name = "substring-after(Sensor/@name, ' ')" device = "string('the ultimate sensor')" @@ -215,14 +248,14 @@ Config: files = ["example.xml"] data_format = "xml" - [[inputs.file.xml]] - [inputs.file.xml.tags] + [[inputs.file.xpath]] + [inputs.file.xpath.tags] gateway = "substring-before(/Gateway/Name, ' ')" - [inputs.file.xml.fields_int] + [inputs.file.xpath.fields_int] seqnr = "/Gateway/Sequence" - [inputs.file.xml.fields] + [inputs.file.xpath.fields] ok = "/Gateway/Status = 'ok'" ``` @@ -244,16 +277,16 @@ Config: files = ["example.xml"] data_format = "xml" - [[inputs.file.xml]] + [[inputs.file.xpath]] metric_name = "name(/Gateway/Status)" timestamp = "/Gateway/Timestamp" timestamp_format = "2006-01-02T15:04:05Z" - [inputs.file.xml.tags] + [inputs.file.xpath.tags] gateway = "substring-before(/Gateway/Name, ' ')" - [inputs.file.xml.fields] + [inputs.file.xpath.fields] ok = "/Gateway/Status = 'ok'" ``` @@ -273,7 +306,7 @@ Config: files = ["example.xml"] data_format = "xml" - [[inputs.file.xml]] + [[inputs.file.xpath]] metric_selection = "/Bus/child::Sensor" metric_name = "string('sensors')" @@ -281,13 +314,13 @@ Config: timestamp = "/Gateway/Timestamp" timestamp_format = "2006-01-02T15:04:05Z" - [inputs.file.xml.tags] + [inputs.file.xpath.tags] name = "substring-after(@name, ' ')" - [inputs.file.xml.fields_int] + [inputs.file.xpath.fields_int] consumers = "Variable/@consumers" - [inputs.file.xml.fields] + [inputs.file.xpath.fields] temperature = "number(Variable/@temperature)" power = "number(Variable/@power)" frequency = "number(Variable/@frequency)" @@ -314,7 +347,7 @@ Config: files = ["example.xml"] data_format = "xml" - [[inputs.file.xml]] + [[inputs.file.xpath]] metric_selection = "/Bus/child::Sensor" metric_name = "string('sensors')" @@ -325,7 +358,7 @@ Config: field_name = "name(@*[1])" field_value = "number(@*[1])" - [inputs.file.xml.tags] + [inputs.file.xpath.tags] name = "substring-after(@name, ' ')" ``` @@ -340,6 +373,9 @@ Using the `metric_selection` option we select all `Sensor` nodes in the XML docu For each selected *field-node* we use `field_name` and `field_value` to determining the field's name and value, respectively. The `field_name` derives the name of the first attribute of the node, while `field_value` derives the value of the first attribute and converts the result to a number. [xpath lib]: https://github.com/antchfx/xpath +[json]: https://www.json.org/ +[msgpack]: https://msgpack.org/ +[protobuf]: https://developers.google.com/protocol-buffers [xml]: https://www.w3.org/XML/ [xpath]: https://www.w3.org/TR/xpath/ [xpather]: http://xpather.com/ diff --git a/plugins/parsers/xpath/json_document.go b/plugins/parsers/xpath/json_document.go new file mode 100644 index 0000000000000..155ed6335bbfe --- /dev/null +++ b/plugins/parsers/xpath/json_document.go @@ -0,0 +1,65 @@ +package xpath + +import ( + "strings" + + "github.com/antchfx/jsonquery" + path "github.com/antchfx/xpath" +) + +type jsonDocument struct{} + +func (d *jsonDocument) Parse(buf []byte) (dataNode, error) { + return jsonquery.Parse(strings.NewReader(string(buf))) +} + +func (d *jsonDocument) QueryAll(node dataNode, expr string) ([]dataNode, error) { + // If this panics it's a programming error as we changed the document type while processing + native, err := jsonquery.QueryAll(node.(*jsonquery.Node), expr) + if err != nil { + return nil, err + } + + nodes := make([]dataNode, len(native)) + for i, n := range native { + nodes[i] = n + } + return nodes, nil +} + +func (d *jsonDocument) CreateXPathNavigator(node dataNode) path.NodeNavigator { + // If this panics it's a programming error as we changed the document type while processing + return jsonquery.CreateXPathNavigator(node.(*jsonquery.Node)) +} + +func (d *jsonDocument) GetNodePath(node, relativeTo dataNode, sep string) string { + names := make([]string, 0) + + // If these panic it's a programming error as we changed the document type while processing + nativeNode := node.(*jsonquery.Node) + nativeRelativeTo := relativeTo.(*jsonquery.Node) + + // Climb up the tree and collect the node names + n := nativeNode.Parent + for n != nil && n != nativeRelativeTo { + names = append(names, n.Data) + n = n.Parent + } + + if len(names) < 1 { + return "" + } + + // Construct the nodes + nodepath := "" + for _, name := range names { + nodepath = name + sep + nodepath + } + + return nodepath[:len(nodepath)-1] +} + +func (d *jsonDocument) OutputXML(node dataNode) string { + native := node.(*jsonquery.Node) + return native.OutputXML() +} diff --git a/plugins/parsers/xpath/msgpack_document.go b/plugins/parsers/xpath/msgpack_document.go new file mode 100644 index 0000000000000..6f5102deefdf4 --- /dev/null +++ b/plugins/parsers/xpath/msgpack_document.go @@ -0,0 +1,39 @@ +package xpath + +import ( + "bytes" + "fmt" + + "github.com/tinylib/msgp/msgp" + + "github.com/antchfx/jsonquery" + path "github.com/antchfx/xpath" +) + +type msgpackDocument jsonDocument + +func (d *msgpackDocument) Parse(buf []byte) (dataNode, error) { + var json bytes.Buffer + + // Unmarshal the message-pack binary message to JSON and proceed with the jsonquery class + if _, err := msgp.UnmarshalAsJSON(&json, buf); err != nil { + return nil, fmt.Errorf("unmarshalling to json failed: %v", err) + } + return jsonquery.Parse(&json) +} + +func (d *msgpackDocument) QueryAll(node dataNode, expr string) ([]dataNode, error) { + return (*jsonDocument)(d).QueryAll(node, expr) +} + +func (d *msgpackDocument) CreateXPathNavigator(node dataNode) path.NodeNavigator { + return (*jsonDocument)(d).CreateXPathNavigator(node) +} + +func (d *msgpackDocument) GetNodePath(node, relativeTo dataNode, sep string) string { + return (*jsonDocument)(d).GetNodePath(node, relativeTo, sep) +} + +func (d *msgpackDocument) OutputXML(node dataNode) string { + return (*jsonDocument)(d).OutputXML(node) +} diff --git a/plugins/parsers/xml/parser.go b/plugins/parsers/xpath/parser.go similarity index 80% rename from plugins/parsers/xml/parser.go rename to plugins/parsers/xpath/parser.go index 9282aab1f2e25..52224530a9250 100644 --- a/plugins/parsers/xml/parser.go +++ b/plugins/parsers/xpath/parser.go @@ -1,4 +1,4 @@ -package xml +package xpath import ( "fmt" @@ -6,17 +6,32 @@ import ( "strings" "time" - "github.com/antchfx/xmlquery" - "github.com/antchfx/xpath" + path "github.com/antchfx/xpath" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" ) +type dataNode interface{} + +type dataDocument interface { + Parse(buf []byte) (dataNode, error) + QueryAll(node dataNode, expr string) ([]dataNode, error) + CreateXPathNavigator(node dataNode) path.NodeNavigator + GetNodePath(node, relativeTo dataNode, sep string) string + OutputXML(node dataNode) string +} + type Parser struct { - Configs []Config - DefaultTags map[string]string - Log telegraf.Logger + Format string + ProtobufMessageDef string + ProtobufMessageType string + PrintDocument bool + Configs []Config + DefaultTags map[string]string + Log telegraf.Logger + + document dataDocument } type Config struct { @@ -35,14 +50,42 @@ type Config struct { FieldNameExpand bool `toml:"field_name_expansion"` } +func (p *Parser) Init() error { + switch p.Format { + case "", "xml": + p.document = &xmlDocument{} + case "xpath_json": + p.document = &jsonDocument{} + case "xpath_msgpack": + p.document = &msgpackDocument{} + case "xpath_protobuf": + pbdoc := protobufDocument{ + MessageDefinition: p.ProtobufMessageDef, + MessageType: p.ProtobufMessageType, + Log: p.Log, + } + if err := pbdoc.Init(); err != nil { + return err + } + p.document = &pbdoc + default: + return fmt.Errorf("unknown data-format %q for xpath parser", p.Format) + } + + return nil +} + func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { t := time.Now() // Parse the XML - doc, err := xmlquery.Parse(strings.NewReader(string(buf))) + doc, err := p.document.Parse(buf) if err != nil { return nil, err } + if p.PrintDocument { + p.Log.Debugf("XML document equivalent: %q", p.document.OutputXML(doc)) + } // Queries metrics := make([]telegraf.Metric, 0) @@ -50,7 +93,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { if len(config.Selection) == 0 { config.Selection = "/" } - selectedNodes, err := xmlquery.QueryAll(doc, config.Selection) + selectedNodes, err := p.document.QueryAll(doc, config.Selection) if err != nil { return nil, err } @@ -82,14 +125,14 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case 1: config := p.Configs[0] - doc, err := xmlquery.Parse(strings.NewReader(line)) + doc, err := p.document.Parse([]byte(line)) if err != nil { return nil, err } selected := doc if len(config.Selection) > 0 { - selectedNodes, err := xmlquery.QueryAll(doc, config.Selection) + selectedNodes, err := p.document.QueryAll(doc, config.Selection) if err != nil { return nil, err } @@ -111,7 +154,7 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { p.DefaultTags = tags } -func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, config Config) (telegraf.Metric, error) { +func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config Config) (telegraf.Metric, error) { var timestamp time.Time var metricname string @@ -119,7 +162,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c // otherwise. metricname = config.MetricName if len(config.MetricQuery) > 0 { - v, err := executeQuery(doc, selected, config.MetricQuery) + v, err := p.executeQuery(doc, selected, config.MetricQuery) if err != nil { return nil, fmt.Errorf("failed to query metric name: %v", err) } @@ -130,7 +173,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c // with the queried timestamp if an expresion was specified. timestamp = starttime if len(config.Timestamp) > 0 { - v, err := executeQuery(doc, selected, config.Timestamp) + v, err := p.executeQuery(doc, selected, config.Timestamp) if err != nil { return nil, fmt.Errorf("failed to query timestamp: %v", err) } @@ -177,7 +220,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c tags := make(map[string]string) for name, query := range config.Tags { // Execute the query and cast the returned values into strings - v, err := executeQuery(doc, selected, query) + v, err := p.executeQuery(doc, selected, query) if err != nil { return nil, fmt.Errorf("failed to query tag '%s': %v", name, err) } @@ -202,7 +245,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c fields := make(map[string]interface{}) for name, query := range config.FieldsInt { // Execute the query and cast the returned values into integers - v, err := executeQuery(doc, selected, query) + v, err := p.executeQuery(doc, selected, query) if err != nil { return nil, fmt.Errorf("failed to query field (int) '%s': %v", name, err) } @@ -228,7 +271,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c for name, query := range config.Fields { // Execute the query and store the result in fields - v, err := executeQuery(doc, selected, query) + v, err := p.executeQuery(doc, selected, query) if err != nil { return nil, fmt.Errorf("failed to query field '%s': %v", name, err) } @@ -247,14 +290,14 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c } // Query all fields - selectedFieldNodes, err := xmlquery.QueryAll(selected, config.FieldSelection) + selectedFieldNodes, err := p.document.QueryAll(selected, config.FieldSelection) if err != nil { return nil, err } p.Log.Debugf("Number of selected field nodes: %d", len(selectedFieldNodes)) if len(selectedFieldNodes) > 0 && selectedFieldNodes[0] != nil { for _, selectedfield := range selectedFieldNodes { - n, err := executeQuery(doc, selectedfield, fieldnamequery) + n, err := p.executeQuery(doc, selectedfield, fieldnamequery) if err != nil { return nil, fmt.Errorf("failed to query field name with query '%s': %v", fieldnamequery, err) } @@ -262,13 +305,13 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c if !ok { return nil, fmt.Errorf("failed to query field name with query '%s': result is not a string (%v)", fieldnamequery, n) } - v, err := executeQuery(doc, selectedfield, fieldvaluequery) + v, err := p.executeQuery(doc, selectedfield, fieldvaluequery) if err != nil { return nil, fmt.Errorf("failed to query field value for '%s': %v", name, err) } path := name if config.FieldNameExpand { - p := getNodePath(selectedfield, selected, "_") + p := p.document.GetNodePath(selectedfield, selected, "_") if len(p) > 0 { path = p + "_" + name } @@ -295,30 +338,7 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected *xmlquery.Node, c return metric.New(metricname, tags, fields, timestamp), nil } -func getNodePath(node, relativeTo *xmlquery.Node, sep string) string { - names := make([]string, 0) - - // Climb up the tree and collect the node names - n := node.Parent - for n != nil && n != relativeTo { - names = append(names, n.Data) - n = n.Parent - } - - if len(names) < 1 { - return "" - } - - // Construct the nodes - path := "" - for _, name := range names { - path = name + sep + path - } - - return path[:len(path)-1] -} - -func executeQuery(doc, selected *xmlquery.Node, query string) (r interface{}, err error) { +func (p *Parser) executeQuery(doc, selected dataNode, query string) (r interface{}, err error) { // Check if the query is relative or absolute and set the root for the query root := selected if strings.HasPrefix(query, "/") { @@ -326,7 +346,7 @@ func executeQuery(doc, selected *xmlquery.Node, query string) (r interface{}, er } // Compile the query - expr, err := xpath.Compile(query) + expr, err := path.Compile(query) if err != nil { return nil, fmt.Errorf("failed to compile query '%s': %v", query, err) } @@ -334,8 +354,8 @@ func executeQuery(doc, selected *xmlquery.Node, query string) (r interface{}, er // Evaluate the compiled expression and handle returned node-iterators // separately. Those iterators will be returned for queries directly // referencing a node (value or attribute). - n := expr.Evaluate(xmlquery.CreateXPathNavigator(root)) - if iter, ok := n.(*xpath.NodeIterator); ok { + n := expr.Evaluate(p.document.CreateXPathNavigator(root)) + if iter, ok := n.(*path.NodeIterator); ok { // We got an iterator, so take the first match and get the referenced // property. This will always be a string. if iter.MoveNext() { @@ -399,7 +419,7 @@ func splitLastPathElement(query string) []string { return elements } -func (p *Parser) debugEmptyQuery(operation string, root *xmlquery.Node, initialquery string) { +func (p *Parser) debugEmptyQuery(operation string, root dataNode, initialquery string) { if p.Log == nil { return } @@ -415,7 +435,7 @@ func (p *Parser) debugEmptyQuery(operation string, root *xmlquery.Node, initialq } for i := len(parts) - 1; i >= 0; i-- { q := parts[i] - nodes, err := xmlquery.QueryAll(root, q) + nodes, err := p.document.QueryAll(root, q) if err != nil { p.Log.Debugf("executing query %q in %s failed: %v", q, operation, err) return diff --git a/plugins/parsers/xml/parser_test.go b/plugins/parsers/xpath/parser_test.go similarity index 93% rename from plugins/parsers/xml/parser_test.go rename to plugins/parsers/xpath/parser_test.go index 023e9d20a6090..46e4dba690102 100644 --- a/plugins/parsers/xml/parser_test.go +++ b/plugins/parsers/xpath/parser_test.go @@ -1,4 +1,4 @@ -package xml +package xpath import ( "io/ioutil" @@ -12,7 +12,6 @@ import ( "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -127,6 +126,7 @@ func TestParseInvalidXML(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) _, err := parser.ParseLine(tt.input) require.Error(t, err) @@ -163,6 +163,7 @@ func TestInvalidTypeQueriesFail(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) _, err := parser.ParseLine(tt.input) require.Error(t, err) @@ -228,6 +229,7 @@ func TestInvalidTypeQueries(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) actual, err := parser.ParseLine(tt.input) require.NoError(t, err) @@ -357,6 +359,7 @@ func TestParseTimestamps(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) actual, err := parser.ParseLine(tt.input) require.NoError(t, err) @@ -561,6 +564,7 @@ func TestParseSingleValues(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) actual, err := parser.ParseLine(tt.input) require.NoError(t, err) @@ -772,6 +776,7 @@ func TestParseSingleAttributes(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) actual, err := parser.ParseLine(tt.input) require.NoError(t, err) @@ -858,6 +863,7 @@ func TestParseMultiValues(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) actual, err := parser.ParseLine(tt.input) require.NoError(t, err) @@ -970,6 +976,7 @@ func TestParseMultiNodes(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) actual, err := parser.Parse([]byte(tt.input)) require.NoError(t, err) @@ -1015,6 +1022,7 @@ func TestParseMetricQuery(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := &Parser{Configs: tt.configs, DefaultTags: tt.defaultTags, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) actual, err := parser.ParseLine(tt.input) require.NoError(t, err) @@ -1080,11 +1088,10 @@ func TestEmptySelection(t *testing.T) { }, } - logger := testutil.Logger{Name: "parsers.xml"} - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - parser := &Parser{Configs: tt.configs, DefaultTags: map[string]string{}, Log: logger} + parser := &Parser{Configs: tt.configs, DefaultTags: map[string]string{}, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) _, err := parser.Parse([]byte(tt.input)) require.Error(t, err) @@ -1110,14 +1117,26 @@ func TestTestCases(t *testing.T) { name: "field selection batch", filename: "testcases/multisensor_selection_batch.conf", }, - { - name: "openweathermap forecast", - filename: "testcases/openweathermap.conf", - }, { name: "earthquakes quakeml", filename: "testcases/earthquakes.conf", }, + { + name: "openweathermap forecast (xml)", + filename: "testcases/openweathermap_xml.conf", + }, + { + name: "openweathermap forecast (json)", + filename: "testcases/openweathermap_json.conf", + }, + { + name: "addressbook tutorial (protobuf)", + filename: "testcases/addressbook.conf", + }, + { + name: "message-pack", + filename: "testcases/tracker_msgpack.conf", + }, } parser := influx.NewParser(influx.NewMetricHandler()) @@ -1132,9 +1151,29 @@ func TestTestCases(t *testing.T) { // Load the xml-content input, err := testutil.ParseRawLinesFrom(header, "File:") require.NoError(t, err) - assert.Len(t, input, 1) + require.Len(t, input, 1) + + filefields := strings.Fields(input[0]) + require.GreaterOrEqual(t, len(filefields), 1) + datafile := filepath.FromSlash(filefields[0]) + fileformat := "" + if len(filefields) > 1 { + fileformat = filefields[1] + } + + // Load the protocol buffer information if required + var pbmsgdef, pbmsgtype string + if fileformat == "xpath_protobuf" { + input, err := testutil.ParseRawLinesFrom(header, "Protobuf:") + require.NoError(t, err) + require.Len(t, input, 1) + + protofields := strings.Fields(input[0]) + require.Len(t, protofields, 2) + pbmsgdef = protofields[0] + pbmsgtype = protofields[1] + } - datafile := filepath.FromSlash(input[0]) content, err := ioutil.ReadFile(datafile) require.NoError(t, err) @@ -1145,7 +1184,14 @@ func TestTestCases(t *testing.T) { expectedErrors, _ := testutil.ParseRawLinesFrom(header, "Expected Error:") // Setup the parser and run it. - parser := &Parser{Configs: []Config{*cfg}, Log: testutil.Logger{Name: "parsers.xml"}} + parser := &Parser{ + Format: fileformat, + ProtobufMessageDef: pbmsgdef, + ProtobufMessageType: pbmsgtype, + Configs: []Config{*cfg}, + Log: testutil.Logger{Name: "parsers.xml"}, + } + require.NoError(t, parser.Init()) outputs, err := parser.Parse(content) if len(expectedErrors) == 0 { require.NoError(t, err) diff --git a/plugins/parsers/xpath/protocolbuffer_document.go b/plugins/parsers/xpath/protocolbuffer_document.go new file mode 100644 index 0000000000000..4ae88812d96bb --- /dev/null +++ b/plugins/parsers/xpath/protocolbuffer_document.go @@ -0,0 +1,161 @@ +package xpath + +import ( + "fmt" + "sort" + "strings" + + "github.com/influxdata/telegraf" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/dynamicpb" + + "github.com/jhump/protoreflect/desc/protoparse" + + path "github.com/antchfx/xpath" + "github.com/doclambda/protobufquery" +) + +type protobufDocument struct { + MessageDefinition string + MessageType string + Log telegraf.Logger + msg *dynamicpb.Message +} + +func (d *protobufDocument) Init() error { + // Check the message definition and type + if d.MessageDefinition == "" { + return fmt.Errorf("protocol-buffer message-definition not set") + } + if d.MessageType == "" { + return fmt.Errorf("protocol-buffer message-type not set") + } + + // Load the file descriptors from the given protocol-buffer definition + parser := protoparse.Parser{} + fds, err := parser.ParseFiles(d.MessageDefinition) + if err != nil { + return fmt.Errorf("parsing protocol-buffer definition in %q failed: %v", d.MessageDefinition, err) + } + if len(fds) < 1 { + return fmt.Errorf("file %q does not contain file descriptors", d.MessageDefinition) + } + + // Register all definitions in the file in the global registry + for _, fd := range fds { + if fd == nil { + continue + } + fileDescProto := fd.AsFileDescriptorProto() + fileDesc, err := protodesc.NewFile(fileDescProto, nil) + if err != nil { + return fmt.Errorf("creating file descriptor from proto failed: %v", err) + } + if err := protoregistry.GlobalFiles.RegisterFile(fileDesc); err != nil { + return fmt.Errorf("registering file descriptor %q failed: %v", fileDesc.Package(), err) + } + } + + // Lookup given type in the loaded file descriptors + msgFullName := protoreflect.FullName(d.MessageType) + desc, err := protoregistry.GlobalFiles.FindDescriptorByName(msgFullName) + if err != nil { + d.Log.Infof("Could not find %q... Known messages:", msgFullName) + + var known []string + protoregistry.GlobalFiles.RangeFiles(func(fd protoreflect.FileDescriptor) bool { + name := strings.TrimSpace(string(fd.FullName())) + if name != "" { + known = append(known, name) + } + return true + }) + sort.Strings(known) + for _, name := range known { + d.Log.Infof(" %s", name) + } + return err + } + + // Get a prototypical message for later use + msgDesc, ok := desc.(protoreflect.MessageDescriptor) + if !ok { + return fmt.Errorf("%q is not a message descriptor (%T)", msgFullName, desc) + } + + d.msg = dynamicpb.NewMessage(msgDesc) + if d.msg == nil { + return fmt.Errorf("creating message template for %q failed", msgDesc.FullName()) + } + + return nil +} + +func (d *protobufDocument) Parse(buf []byte) (dataNode, error) { + msg := d.msg.New() + + // Unmarshal the received buffer + if err := proto.Unmarshal(buf, msg.Interface()); err != nil { + return nil, err + } + + return protobufquery.Parse(msg) +} + +func (d *protobufDocument) QueryAll(node dataNode, expr string) ([]dataNode, error) { + // If this panics it's a programming error as we changed the document type while processing + native, err := protobufquery.QueryAll(node.(*protobufquery.Node), expr) + if err != nil { + return nil, err + } + + nodes := make([]dataNode, len(native)) + for i, n := range native { + nodes[i] = n + } + return nodes, nil +} + +func (d *protobufDocument) CreateXPathNavigator(node dataNode) path.NodeNavigator { + // If this panics it's a programming error as we changed the document type while processing + return protobufquery.CreateXPathNavigator(node.(*protobufquery.Node)) +} + +func (d *protobufDocument) GetNodePath(node, relativeTo dataNode, sep string) string { + names := make([]string, 0) + + // If these panic it's a programming error as we changed the document type while processing + nativeNode := node.(*protobufquery.Node) + nativeRelativeTo := relativeTo.(*protobufquery.Node) + + // Climb up the tree and collect the node names + n := nativeNode.Parent + for n != nil && n != nativeRelativeTo { + names = append(names, n.Name) + n = n.Parent + } + + if len(names) < 1 { + return "" + } + + // Construct the nodes + nodepath := "" + for _, name := range names { + nodepath = name + sep + nodepath + } + + return nodepath[:len(nodepath)-1] +} + +func (d *protobufDocument) OutputXML(node dataNode) string { + native := node.(*protobufquery.Node) + return native.OutputXML() +} + +func init() { +} diff --git a/plugins/parsers/xpath/testcases/addressbook.conf b/plugins/parsers/xpath/testcases/addressbook.conf new file mode 100644 index 0000000000000..eeca8921d7b16 --- /dev/null +++ b/plugins/parsers/xpath/testcases/addressbook.conf @@ -0,0 +1,28 @@ +# Example for parsing an example protocol buffer data. +# +# File: +# testcases/addressbook.dat xpath_protobuf +# +# Protobuf: +# testcases/addressbook.proto addressbook.AddressBook +# +# Expected Output: +# addresses,id=101,name=John\ Doe age=42i,email="john@example.com" 1621430181000000000 +# addresses,id=102,name=Jane\ Doe age=40i 1621430181000000000 +# addresses,id=201,name=Jack\ Doe age=12i,email="jack@example.com" 1621430181000000000 +# addresses,id=301,name=Jack\ Buck age=19i,email="buck@example.com" 1621430181000000000 +# addresses,id=1001,name=Janet\ Doe age=16i,email="janet@example.com" 1621430181000000000 +# + +metric_name = "'addresses'" +metric_selection = "//people" + +[tags] + id = "id" + name = "name" + +[fields_int] + age = "age" + +[fields] + email = "email" diff --git a/plugins/parsers/xpath/testcases/addressbook.dat b/plugins/parsers/xpath/testcases/addressbook.dat new file mode 100644 index 0000000000000..a5c1d8feefa70 --- /dev/null +++ b/plugins/parsers/xpath/testcases/addressbook.dat @@ -0,0 +1,17 @@ + + +John Doeejohn@example.com * + +Jane Doef ( +3 +Jack Doejack@example.com * + 555-555-5555 +V + Jack Buckbuck@example.com * + 555-555-0000* + 555-555-0001* + 555-555-0002 +E + Janet Doejanet@example.com * + 555-777-0000* + 555-777-0001homeprivatefriends \ No newline at end of file diff --git a/plugins/parsers/xpath/testcases/addressbook.proto b/plugins/parsers/xpath/testcases/addressbook.proto new file mode 100644 index 0000000000000..3ed0eb566a987 --- /dev/null +++ b/plugins/parsers/xpath/testcases/addressbook.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package addressbook; + +message Person { + string name = 1; + int32 id = 2; // Unique ID number for this person. + string email = 3; + uint32 age = 4; + + enum PhoneType { + MOBILE = 0; + HOME = 1; + WORK = 2; + } + + message PhoneNumber { + string number = 1; + PhoneType type = 2; + } + + repeated PhoneNumber phones = 5; +} + +message AddressBook { + repeated Person people = 1; + repeated string tags = 2; +} diff --git a/plugins/parsers/xml/testcases/earthquakes.conf b/plugins/parsers/xpath/testcases/earthquakes.conf similarity index 100% rename from plugins/parsers/xml/testcases/earthquakes.conf rename to plugins/parsers/xpath/testcases/earthquakes.conf diff --git a/plugins/parsers/xml/testcases/earthquakes.quakeml b/plugins/parsers/xpath/testcases/earthquakes.quakeml similarity index 100% rename from plugins/parsers/xml/testcases/earthquakes.quakeml rename to plugins/parsers/xpath/testcases/earthquakes.quakeml diff --git a/plugins/parsers/xml/testcases/multisensor.xml b/plugins/parsers/xpath/testcases/multisensor.xml similarity index 100% rename from plugins/parsers/xml/testcases/multisensor.xml rename to plugins/parsers/xpath/testcases/multisensor.xml diff --git a/plugins/parsers/xml/testcases/multisensor_explicit_basic.conf b/plugins/parsers/xpath/testcases/multisensor_explicit_basic.conf similarity index 100% rename from plugins/parsers/xml/testcases/multisensor_explicit_basic.conf rename to plugins/parsers/xpath/testcases/multisensor_explicit_basic.conf diff --git a/plugins/parsers/xml/testcases/multisensor_explicit_batch.conf b/plugins/parsers/xpath/testcases/multisensor_explicit_batch.conf similarity index 100% rename from plugins/parsers/xml/testcases/multisensor_explicit_batch.conf rename to plugins/parsers/xpath/testcases/multisensor_explicit_batch.conf diff --git a/plugins/parsers/xml/testcases/multisensor_selection_batch.conf b/plugins/parsers/xpath/testcases/multisensor_selection_batch.conf similarity index 100% rename from plugins/parsers/xml/testcases/multisensor_selection_batch.conf rename to plugins/parsers/xpath/testcases/multisensor_selection_batch.conf diff --git a/plugins/parsers/xpath/testcases/openweathermap_5d.json b/plugins/parsers/xpath/testcases/openweathermap_5d.json new file mode 100644 index 0000000000000..c8e4dccd45890 --- /dev/null +++ b/plugins/parsers/xpath/testcases/openweathermap_5d.json @@ -0,0 +1,127 @@ +{ + "cod": "200", + "message": 0.0179, + "cnt": 96, + "list": [ + { + "dt": 1596632400, + "main": { + "temp": 280.16, + "feels_like": 280.41, + "temp_min": 280.16, + "temp_max": 280.16, + "pressure": 1010, + "sea_level": 1010, + "grnd_level": 1010, + "humidity": 70, + "temp_kf": 0 + }, + "weather": [ + { + "id": 804, + "main": "Clouds", + "description": "overcast clouds", + "icon": "04n" + } + ], + "clouds": { + "all": 100 + }, + "wind": { + "speed": 2.03, + "deg": 252, + "gust":5.46 + }, + "visibility": 10000, + "pop": 0.04, + "sys": { + "pod": "n" + }, + "dt_txt": "2020-08-05 13:00:00" + }, + { + "dt": 159663600, + "main": { + "temp": 281.16, + "feels_like": 281.41, + "temp_min": 281.16, + "temp_max": 281.16, + "pressure": 1011, + "sea_level": 1011, + "grnd_level": 1011, + "humidity": 71, + "temp_kf": 0 + }, + "weather": [ + { + "id": 804, + "main": "Clouds", + "description": "overcast clouds", + "icon": "04n" + } + ], + "clouds": { + "all": 100 + }, + "wind": { + "speed": 2.03, + "deg": 252, + "gust":5.46 + }, + "visibility": 10000, + "pop": 0.04, + "sys": { + "pod": "n" + }, + "dt_txt": "2020-08-05 14:00:00" + }, + { + "dt": 159667200, + "main": { + "temp": 282.16, + "feels_like": 282.41, + "temp_min": 282.16, + "temp_max": 282.16, + "pressure": 1012, + "sea_level": 1012, + "grnd_level": 1012, + "humidity": 71, + "temp_kf": 0 + }, + "weather": [ + { + "id": 804, + "main": "Clouds", + "description": "overcast clouds", + "icon": "04n" + } + ], + "clouds": { + "all": 100 + }, + "wind": { + "speed": 2.03, + "deg": 252, + "gust":5.46 + }, + "visibility": 10000, + "pop": 0.04, + "sys": { + "pod": "n" + }, + "dt_txt": "2020-08-05 15:00:00" + } + ], + "city": { + "id": 2643743, + "name": "London", + "coord": { + "lat": 51.5085, + "lon": -0.1258 + }, + "country": "GB", + "timezone": 0, + "sunrise": 1568958164, + "sunset": 1569002733 + } +} diff --git a/plugins/parsers/xml/testcases/openweathermap_5d.xml b/plugins/parsers/xpath/testcases/openweathermap_5d.xml similarity index 100% rename from plugins/parsers/xml/testcases/openweathermap_5d.xml rename to plugins/parsers/xpath/testcases/openweathermap_5d.xml diff --git a/plugins/parsers/xpath/testcases/openweathermap_json.conf b/plugins/parsers/xpath/testcases/openweathermap_json.conf new file mode 100644 index 0000000000000..d9b3e04b692eb --- /dev/null +++ b/plugins/parsers/xpath/testcases/openweathermap_json.conf @@ -0,0 +1,29 @@ +# Example for parsing openweathermap five-day-forecast data. +# +# File: +# testcases/openweathermap_5d.json xpath_json +# +# Expected Output: +# weather,city=London,country=GB humidity=70i,clouds=100i,wind_direction=252,wind_speed=2.03,temperature=137.86666666666667,precipitation=0 1596632400000000000 +# weather,city=London,country=GB wind_direction=252,wind_speed=2.03,temperature=138.42222222222225,precipitation=0,clouds=100i,humidity=71i 159663600000000000 +# weather,city=London,country=GB humidity=71i,clouds=100i,wind_direction=252,wind_speed=2.03,temperature=138.9777777777778,precipitation=0 159667200000000000 +# + +metric_name = "'weather'" +metric_selection = "//list/*" +timestamp = "dt" +timestamp_format = "unix" + +[tags] + city = "/city/name" + country = "/city/country" + +[fields_int] + humidity = "main/humidity" + clouds = "clouds/all" + +[fields] + precipitation = "number(main/precipitation)" + wind_direction = "number(wind/deg)" + wind_speed = "number(wind/speed)" + temperature = "(number(main/temp) - 32.0)*(5.0 div 9.0)" diff --git a/plugins/parsers/xml/testcases/openweathermap.conf b/plugins/parsers/xpath/testcases/openweathermap_xml.conf similarity index 95% rename from plugins/parsers/xml/testcases/openweathermap.conf rename to plugins/parsers/xpath/testcases/openweathermap_xml.conf index 99798582c6cf2..57b63cebdc694 100644 --- a/plugins/parsers/xml/testcases/openweathermap.conf +++ b/plugins/parsers/xpath/testcases/openweathermap_xml.conf @@ -1,7 +1,7 @@ # Example for parsing openweathermap five-day-forecast data. # # File: -# testcases/openweathermap_5d.xml +# testcases/openweathermap_5d.xml xml # # Expected Output: # weather,city=London,country=GB clouds=64i,humidity=96i,precipitation=5,temperature=16.89,wind_direction=253.5,wind_speed=4.9 1435654800000000000 diff --git a/plugins/parsers/xpath/testcases/tracker.msg b/plugins/parsers/xpath/testcases/tracker.msg new file mode 100644 index 0000000000000..3120a4321ed15 --- /dev/null +++ b/plugins/parsers/xpath/testcases/tracker.msg @@ -0,0 +1 @@ +geo@BsE^MydeviceTrackerAinfoqualityserial_number123abc456deffixétimestamp`V \ No newline at end of file diff --git a/plugins/parsers/xpath/testcases/tracker_msgpack.conf b/plugins/parsers/xpath/testcases/tracker_msgpack.conf new file mode 100644 index 0000000000000..168ad2cc97e4f --- /dev/null +++ b/plugins/parsers/xpath/testcases/tracker_msgpack.conf @@ -0,0 +1,24 @@ +# Example for parsing openweathermap five-day-forecast data. +# +# File: +# testcases/tracker.msg xpath_msgpack +# +# Expected Output: +# tracker,device=TrackerA,fixation=true serial="123abc456def",lat=37.78980863758897,lon=-122.39931057256935,quality=2i 1624528552000000000 +# + +metric_name = "'tracker'" +timestamp = "timestamp" +timestamp_format = "unix" + +[tags] + device = "device" + fixation = "info/fix" + +[fields_int] + quality = "info/quality" + +[fields] + serial = "info/serial_number" + lat = "number(/geo/*[1])" + lon = "number(/geo/*[2])" diff --git a/plugins/parsers/xpath/xml_document.go b/plugins/parsers/xpath/xml_document.go new file mode 100644 index 0000000000000..f2059b4c8333c --- /dev/null +++ b/plugins/parsers/xpath/xml_document.go @@ -0,0 +1,65 @@ +package xpath + +import ( + "strings" + + "github.com/antchfx/xmlquery" + path "github.com/antchfx/xpath" +) + +type xmlDocument struct{} + +func (d *xmlDocument) Parse(buf []byte) (dataNode, error) { + return xmlquery.Parse(strings.NewReader(string(buf))) +} + +func (d *xmlDocument) QueryAll(node dataNode, expr string) ([]dataNode, error) { + // If this panics it's a programming error as we changed the document type while processing + native, err := xmlquery.QueryAll(node.(*xmlquery.Node), expr) + if err != nil { + return nil, err + } + + nodes := make([]dataNode, len(native)) + for i, n := range native { + nodes[i] = n + } + return nodes, nil +} + +func (d *xmlDocument) CreateXPathNavigator(node dataNode) path.NodeNavigator { + // If this panics it's a programming error as we changed the document type while processing + return xmlquery.CreateXPathNavigator(node.(*xmlquery.Node)) +} + +func (d *xmlDocument) GetNodePath(node, relativeTo dataNode, sep string) string { + names := make([]string, 0) + + // If these panic it's a programming error as we changed the document type while processing + nativeNode := node.(*xmlquery.Node) + nativeRelativeTo := relativeTo.(*xmlquery.Node) + + // Climb up the tree and collect the node names + n := nativeNode.Parent + for n != nil && n != nativeRelativeTo { + names = append(names, n.Data) + n = n.Parent + } + + if len(names) < 1 { + return "" + } + + // Construct the nodes + nodepath := "" + for _, name := range names { + nodepath = name + sep + nodepath + } + + return nodepath[:len(nodepath)-1] +} + +func (d *xmlDocument) OutputXML(node dataNode) string { + native := node.(*xmlquery.Node) + return native.OutputXML(false) +} From e0ac5078bba56e5d451039f95914c94fa8d1ab9d Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Thu, 1 Jul 2021 13:50:35 -0700 Subject: [PATCH 499/761] Fix Couchbase regression (#9448) --- plugins/inputs/couchbase/couchbase.go | 436 +++++++++++---------- plugins/inputs/couchbase/couchbase_test.go | 71 ++-- 2 files changed, 267 insertions(+), 240 deletions(-) diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index e89393ee82316..b62a7e970305d 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -128,220 +128,220 @@ func (cb *Couchbase) gatherDetailedBucketStats(server, bucket string, fields map // Use length of any set of metrics, they will all be the same length. lastEntry := len(extendedBucketStats.Op.Samples.CouchTotalDiskSize) - 1 - cb.addBucketField(fields, "couch_total_disk_size", extendedBucketStats.Op.Samples.CouchTotalDiskSize[lastEntry]) - cb.addBucketField(fields, "couch_docs_fragmentation", extendedBucketStats.Op.Samples.CouchDocsFragmentation[lastEntry]) - cb.addBucketField(fields, "couch_views_fragmentation", extendedBucketStats.Op.Samples.CouchViewsFragmentation[lastEntry]) - cb.addBucketField(fields, "hit_ratio", extendedBucketStats.Op.Samples.HitRatio[lastEntry]) - cb.addBucketField(fields, "ep_cache_miss_rate", extendedBucketStats.Op.Samples.EpCacheMissRate[lastEntry]) - cb.addBucketField(fields, "ep_resident_items_rate", extendedBucketStats.Op.Samples.EpResidentItemsRate[lastEntry]) - cb.addBucketField(fields, "vb_avg_active_queue_age", extendedBucketStats.Op.Samples.VbAvgActiveQueueAge[lastEntry]) - cb.addBucketField(fields, "vb_avg_replica_queue_age", extendedBucketStats.Op.Samples.VbAvgReplicaQueueAge[lastEntry]) - cb.addBucketField(fields, "vb_avg_pending_queue_age", extendedBucketStats.Op.Samples.VbAvgPendingQueueAge[lastEntry]) - cb.addBucketField(fields, "vb_avg_total_queue_age", extendedBucketStats.Op.Samples.VbAvgTotalQueueAge[lastEntry]) - cb.addBucketField(fields, "vb_active_resident_items_ratio", extendedBucketStats.Op.Samples.VbActiveResidentItemsRatio[lastEntry]) - cb.addBucketField(fields, "vb_replica_resident_items_ratio", extendedBucketStats.Op.Samples.VbReplicaResidentItemsRatio[lastEntry]) - cb.addBucketField(fields, "vb_pending_resident_items_ratio", extendedBucketStats.Op.Samples.VbPendingResidentItemsRatio[lastEntry]) - cb.addBucketField(fields, "avg_disk_update_time", extendedBucketStats.Op.Samples.AvgDiskUpdateTime[lastEntry]) - cb.addBucketField(fields, "avg_disk_commit_time", extendedBucketStats.Op.Samples.AvgDiskCommitTime[lastEntry]) - cb.addBucketField(fields, "avg_bg_wait_time", extendedBucketStats.Op.Samples.AvgBgWaitTime[lastEntry]) - cb.addBucketField(fields, "avg_active_timestamp_drift", extendedBucketStats.Op.Samples.AvgActiveTimestampDrift[lastEntry]) - cb.addBucketField(fields, "avg_replica_timestamp_drift", extendedBucketStats.Op.Samples.AvgReplicaTimestampDrift[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views+indexes_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views+indexes_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsRemaining[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views+indexes_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesProducerCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views+indexes_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBacklogSize[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views+indexes_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsSent[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views+indexes_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBytes[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views+indexes_backoff", extendedBucketStats.Op.Samples.EpDcpViewsIndexesBackoff[lastEntry]) - cb.addBucketField(fields, "bg_wait_count", extendedBucketStats.Op.Samples.BgWaitCount[lastEntry]) - cb.addBucketField(fields, "bg_wait_total", extendedBucketStats.Op.Samples.BgWaitTotal[lastEntry]) - cb.addBucketField(fields, "bytes_read", extendedBucketStats.Op.Samples.BytesRead[lastEntry]) - cb.addBucketField(fields, "bytes_written", extendedBucketStats.Op.Samples.BytesWritten[lastEntry]) - cb.addBucketField(fields, "cas_badval", extendedBucketStats.Op.Samples.CasBadval[lastEntry]) - cb.addBucketField(fields, "cas_hits", extendedBucketStats.Op.Samples.CasHits[lastEntry]) - cb.addBucketField(fields, "cas_misses", extendedBucketStats.Op.Samples.CasMisses[lastEntry]) - cb.addBucketField(fields, "cmd_get", extendedBucketStats.Op.Samples.CmdGet[lastEntry]) - cb.addBucketField(fields, "cmd_lookup", extendedBucketStats.Op.Samples.CmdLookup[lastEntry]) - cb.addBucketField(fields, "cmd_set", extendedBucketStats.Op.Samples.CmdSet[lastEntry]) - cb.addBucketField(fields, "couch_docs_actual_disk_size", extendedBucketStats.Op.Samples.CouchDocsActualDiskSize[lastEntry]) - cb.addBucketField(fields, "couch_docs_data_size", extendedBucketStats.Op.Samples.CouchDocsDataSize[lastEntry]) - cb.addBucketField(fields, "couch_docs_disk_size", extendedBucketStats.Op.Samples.CouchDocsDiskSize[lastEntry]) - cb.addBucketField(fields, "couch_spatial_data_size", extendedBucketStats.Op.Samples.CouchSpatialDataSize[lastEntry]) - cb.addBucketField(fields, "couch_spatial_disk_size", extendedBucketStats.Op.Samples.CouchSpatialDiskSize[lastEntry]) - cb.addBucketField(fields, "couch_spatial_ops", extendedBucketStats.Op.Samples.CouchSpatialOps[lastEntry]) - cb.addBucketField(fields, "couch_views_actual_disk_size", extendedBucketStats.Op.Samples.CouchViewsActualDiskSize[lastEntry]) - cb.addBucketField(fields, "couch_views_data_size", extendedBucketStats.Op.Samples.CouchViewsDataSize[lastEntry]) - cb.addBucketField(fields, "couch_views_disk_size", extendedBucketStats.Op.Samples.CouchViewsDiskSize[lastEntry]) - cb.addBucketField(fields, "couch_views_ops", extendedBucketStats.Op.Samples.CouchViewsOps[lastEntry]) - cb.addBucketField(fields, "curr_connections", extendedBucketStats.Op.Samples.CurrConnections[lastEntry]) - cb.addBucketField(fields, "curr_items", extendedBucketStats.Op.Samples.CurrItems[lastEntry]) - cb.addBucketField(fields, "curr_items_tot", extendedBucketStats.Op.Samples.CurrItemsTot[lastEntry]) - cb.addBucketField(fields, "decr_hits", extendedBucketStats.Op.Samples.DecrHits[lastEntry]) - cb.addBucketField(fields, "decr_misses", extendedBucketStats.Op.Samples.DecrMisses[lastEntry]) - cb.addBucketField(fields, "delete_hits", extendedBucketStats.Op.Samples.DeleteHits[lastEntry]) - cb.addBucketField(fields, "delete_misses", extendedBucketStats.Op.Samples.DeleteMisses[lastEntry]) - cb.addBucketField(fields, "disk_commit_count", extendedBucketStats.Op.Samples.DiskCommitCount[lastEntry]) - cb.addBucketField(fields, "disk_commit_total", extendedBucketStats.Op.Samples.DiskCommitTotal[lastEntry]) - cb.addBucketField(fields, "disk_update_count", extendedBucketStats.Op.Samples.DiskUpdateCount[lastEntry]) - cb.addBucketField(fields, "disk_update_total", extendedBucketStats.Op.Samples.DiskUpdateTotal[lastEntry]) - cb.addBucketField(fields, "disk_write_queue", extendedBucketStats.Op.Samples.DiskWriteQueue[lastEntry]) - cb.addBucketField(fields, "ep_active_ahead_exceptions", extendedBucketStats.Op.Samples.EpActiveAheadExceptions[lastEntry]) - cb.addBucketField(fields, "ep_active_hlc_drift", extendedBucketStats.Op.Samples.EpActiveHlcDrift[lastEntry]) - cb.addBucketField(fields, "ep_active_hlc_drift_count", extendedBucketStats.Op.Samples.EpActiveHlcDriftCount[lastEntry]) - cb.addBucketField(fields, "ep_bg_fetched", extendedBucketStats.Op.Samples.EpBgFetched[lastEntry]) - cb.addBucketField(fields, "ep_clock_cas_drift_threshold_exceeded", extendedBucketStats.Op.Samples.EpClockCasDriftThresholdExceeded[lastEntry]) - cb.addBucketField(fields, "ep_data_read_failed", extendedBucketStats.Op.Samples.EpDataReadFailed[lastEntry]) - cb.addBucketField(fields, "ep_data_write_failed", extendedBucketStats.Op.Samples.EpDataWriteFailed[lastEntry]) - cb.addBucketField(fields, "ep_dcp_2i_backoff", extendedBucketStats.Op.Samples.EpDcp2IBackoff[lastEntry]) - cb.addBucketField(fields, "ep_dcp_2i_count", extendedBucketStats.Op.Samples.EpDcp2ICount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_2i_items_remaining", extendedBucketStats.Op.Samples.EpDcp2IItemsRemaining[lastEntry]) - cb.addBucketField(fields, "ep_dcp_2i_items_sent", extendedBucketStats.Op.Samples.EpDcp2IItemsSent[lastEntry]) - cb.addBucketField(fields, "ep_dcp_2i_producer_count", extendedBucketStats.Op.Samples.EpDcp2IProducerCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_2i_total_backlog_size", extendedBucketStats.Op.Samples.EpDcp2ITotalBacklogSize[lastEntry]) - cb.addBucketField(fields, "ep_dcp_2i_total_bytes", extendedBucketStats.Op.Samples.EpDcp2ITotalBytes[lastEntry]) - cb.addBucketField(fields, "ep_dcp_cbas_backoff", extendedBucketStats.Op.Samples.EpDcpCbasBackoff[lastEntry]) - cb.addBucketField(fields, "ep_dcp_cbas_count", extendedBucketStats.Op.Samples.EpDcpCbasCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_cbas_items_remaining", extendedBucketStats.Op.Samples.EpDcpCbasItemsRemaining[lastEntry]) - cb.addBucketField(fields, "ep_dcp_cbas_items_sent", extendedBucketStats.Op.Samples.EpDcpCbasItemsSent[lastEntry]) - cb.addBucketField(fields, "ep_dcp_cbas_producer_count", extendedBucketStats.Op.Samples.EpDcpCbasProducerCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_cbas_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpCbasTotalBacklogSize[lastEntry]) - cb.addBucketField(fields, "ep_dcp_cbas_total_bytes", extendedBucketStats.Op.Samples.EpDcpCbasTotalBytes[lastEntry]) - cb.addBucketField(fields, "ep_dcp_eventing_backoff", extendedBucketStats.Op.Samples.EpDcpEventingBackoff[lastEntry]) - cb.addBucketField(fields, "ep_dcp_eventing_count", extendedBucketStats.Op.Samples.EpDcpEventingCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_eventing_items_remaining", extendedBucketStats.Op.Samples.EpDcpEventingItemsRemaining[lastEntry]) - cb.addBucketField(fields, "ep_dcp_eventing_items_sent", extendedBucketStats.Op.Samples.EpDcpEventingItemsSent[lastEntry]) - cb.addBucketField(fields, "ep_dcp_eventing_producer_count", extendedBucketStats.Op.Samples.EpDcpEventingProducerCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_eventing_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpEventingTotalBacklogSize[lastEntry]) - cb.addBucketField(fields, "ep_dcp_eventing_total_bytes", extendedBucketStats.Op.Samples.EpDcpEventingTotalBytes[lastEntry]) - cb.addBucketField(fields, "ep_dcp_fts_backoff", extendedBucketStats.Op.Samples.EpDcpFtsBackoff[lastEntry]) - cb.addBucketField(fields, "ep_dcp_fts_count", extendedBucketStats.Op.Samples.EpDcpFtsCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_fts_items_remaining", extendedBucketStats.Op.Samples.EpDcpFtsItemsRemaining[lastEntry]) - cb.addBucketField(fields, "ep_dcp_fts_items_sent", extendedBucketStats.Op.Samples.EpDcpFtsItemsSent[lastEntry]) - cb.addBucketField(fields, "ep_dcp_fts_producer_count", extendedBucketStats.Op.Samples.EpDcpFtsProducerCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_fts_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpFtsTotalBacklogSize[lastEntry]) - cb.addBucketField(fields, "ep_dcp_fts_total_bytes", extendedBucketStats.Op.Samples.EpDcpFtsTotalBytes[lastEntry]) - cb.addBucketField(fields, "ep_dcp_other_backoff", extendedBucketStats.Op.Samples.EpDcpOtherBackoff[lastEntry]) - cb.addBucketField(fields, "ep_dcp_other_count", extendedBucketStats.Op.Samples.EpDcpOtherCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_other_items_remaining", extendedBucketStats.Op.Samples.EpDcpOtherItemsRemaining[lastEntry]) - cb.addBucketField(fields, "ep_dcp_other_items_sent", extendedBucketStats.Op.Samples.EpDcpOtherItemsSent[lastEntry]) - cb.addBucketField(fields, "ep_dcp_other_producer_count", extendedBucketStats.Op.Samples.EpDcpOtherProducerCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_other_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpOtherTotalBacklogSize[lastEntry]) - cb.addBucketField(fields, "ep_dcp_other_total_bytes", extendedBucketStats.Op.Samples.EpDcpOtherTotalBytes[lastEntry]) - cb.addBucketField(fields, "ep_dcp_replica_backoff", extendedBucketStats.Op.Samples.EpDcpReplicaBackoff[lastEntry]) - cb.addBucketField(fields, "ep_dcp_replica_count", extendedBucketStats.Op.Samples.EpDcpReplicaCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_replica_items_remaining", extendedBucketStats.Op.Samples.EpDcpReplicaItemsRemaining[lastEntry]) - cb.addBucketField(fields, "ep_dcp_replica_items_sent", extendedBucketStats.Op.Samples.EpDcpReplicaItemsSent[lastEntry]) - cb.addBucketField(fields, "ep_dcp_replica_producer_count", extendedBucketStats.Op.Samples.EpDcpReplicaProducerCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_replica_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBacklogSize[lastEntry]) - cb.addBucketField(fields, "ep_dcp_replica_total_bytes", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBytes[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views_backoff", extendedBucketStats.Op.Samples.EpDcpViewsBackoff[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views_count", extendedBucketStats.Op.Samples.EpDcpViewsCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsItemsRemaining[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsItemsSent[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsProducerCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsTotalBacklogSize[lastEntry]) - cb.addBucketField(fields, "ep_dcp_views_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsTotalBytes[lastEntry]) - cb.addBucketField(fields, "ep_dcp_xdcr_backoff", extendedBucketStats.Op.Samples.EpDcpXdcrBackoff[lastEntry]) - cb.addBucketField(fields, "ep_dcp_xdcr_count", extendedBucketStats.Op.Samples.EpDcpXdcrCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_xdcr_items_remaining", extendedBucketStats.Op.Samples.EpDcpXdcrItemsRemaining[lastEntry]) - cb.addBucketField(fields, "ep_dcp_xdcr_items_sent", extendedBucketStats.Op.Samples.EpDcpXdcrItemsSent[lastEntry]) - cb.addBucketField(fields, "ep_dcp_xdcr_producer_count", extendedBucketStats.Op.Samples.EpDcpXdcrProducerCount[lastEntry]) - cb.addBucketField(fields, "ep_dcp_xdcr_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBacklogSize[lastEntry]) - cb.addBucketField(fields, "ep_dcp_xdcr_total_bytes", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBytes[lastEntry]) - cb.addBucketField(fields, "ep_diskqueue_drain", extendedBucketStats.Op.Samples.EpDiskqueueDrain[lastEntry]) - cb.addBucketField(fields, "ep_diskqueue_fill", extendedBucketStats.Op.Samples.EpDiskqueueFill[lastEntry]) - cb.addBucketField(fields, "ep_diskqueue_items", extendedBucketStats.Op.Samples.EpDiskqueueItems[lastEntry]) - cb.addBucketField(fields, "ep_flusher_todo", extendedBucketStats.Op.Samples.EpFlusherTodo[lastEntry]) - cb.addBucketField(fields, "ep_item_commit_failed", extendedBucketStats.Op.Samples.EpItemCommitFailed[lastEntry]) - cb.addBucketField(fields, "ep_kv_size", extendedBucketStats.Op.Samples.EpKvSize[lastEntry]) - cb.addBucketField(fields, "ep_max_size", extendedBucketStats.Op.Samples.EpMaxSize[lastEntry]) - cb.addBucketField(fields, "ep_mem_high_wat", extendedBucketStats.Op.Samples.EpMemHighWat[lastEntry]) - cb.addBucketField(fields, "ep_mem_low_wat", extendedBucketStats.Op.Samples.EpMemLowWat[lastEntry]) - cb.addBucketField(fields, "ep_meta_data_memory", extendedBucketStats.Op.Samples.EpMetaDataMemory[lastEntry]) - cb.addBucketField(fields, "ep_num_non_resident", extendedBucketStats.Op.Samples.EpNumNonResident[lastEntry]) - cb.addBucketField(fields, "ep_num_ops_del_meta", extendedBucketStats.Op.Samples.EpNumOpsDelMeta[lastEntry]) - cb.addBucketField(fields, "ep_num_ops_del_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsDelRetMeta[lastEntry]) - cb.addBucketField(fields, "ep_num_ops_get_meta", extendedBucketStats.Op.Samples.EpNumOpsGetMeta[lastEntry]) - cb.addBucketField(fields, "ep_num_ops_set_meta", extendedBucketStats.Op.Samples.EpNumOpsSetMeta[lastEntry]) - cb.addBucketField(fields, "ep_num_ops_set_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsSetRetMeta[lastEntry]) - cb.addBucketField(fields, "ep_num_value_ejects", extendedBucketStats.Op.Samples.EpNumValueEjects[lastEntry]) - cb.addBucketField(fields, "ep_oom_errors", extendedBucketStats.Op.Samples.EpOomErrors[lastEntry]) - cb.addBucketField(fields, "ep_ops_create", extendedBucketStats.Op.Samples.EpOpsCreate[lastEntry]) - cb.addBucketField(fields, "ep_ops_update", extendedBucketStats.Op.Samples.EpOpsUpdate[lastEntry]) - cb.addBucketField(fields, "ep_overhead", extendedBucketStats.Op.Samples.EpOverhead[lastEntry]) - cb.addBucketField(fields, "ep_queue_size", extendedBucketStats.Op.Samples.EpQueueSize[lastEntry]) - cb.addBucketField(fields, "ep_replica_ahead_exceptions", extendedBucketStats.Op.Samples.EpReplicaAheadExceptions[lastEntry]) - cb.addBucketField(fields, "ep_replica_hlc_drift", extendedBucketStats.Op.Samples.EpReplicaHlcDrift[lastEntry]) - cb.addBucketField(fields, "ep_replica_hlc_drift_count", extendedBucketStats.Op.Samples.EpReplicaHlcDriftCount[lastEntry]) - cb.addBucketField(fields, "ep_tmp_oom_errors", extendedBucketStats.Op.Samples.EpTmpOomErrors[lastEntry]) - cb.addBucketField(fields, "ep_vb_total", extendedBucketStats.Op.Samples.EpVbTotal[lastEntry]) - cb.addBucketField(fields, "evictions", extendedBucketStats.Op.Samples.Evictions[lastEntry]) - cb.addBucketField(fields, "get_hits", extendedBucketStats.Op.Samples.GetHits[lastEntry]) - cb.addBucketField(fields, "get_misses", extendedBucketStats.Op.Samples.GetMisses[lastEntry]) - cb.addBucketField(fields, "incr_hits", extendedBucketStats.Op.Samples.IncrHits[lastEntry]) - cb.addBucketField(fields, "incr_misses", extendedBucketStats.Op.Samples.IncrMisses[lastEntry]) - cb.addBucketField(fields, "misses", extendedBucketStats.Op.Samples.Misses[lastEntry]) - cb.addBucketField(fields, "ops", extendedBucketStats.Op.Samples.Ops[lastEntry]) - cb.addBucketField(fields, "timestamp", extendedBucketStats.Op.Samples.Timestamp[lastEntry]) - cb.addBucketField(fields, "vb_active_eject", extendedBucketStats.Op.Samples.VbActiveEject[lastEntry]) - cb.addBucketField(fields, "vb_active_itm_memory", extendedBucketStats.Op.Samples.VbActiveItmMemory[lastEntry]) - cb.addBucketField(fields, "vb_active_meta_data_memory", extendedBucketStats.Op.Samples.VbActiveMetaDataMemory[lastEntry]) - cb.addBucketField(fields, "vb_active_num", extendedBucketStats.Op.Samples.VbActiveNum[lastEntry]) - cb.addBucketField(fields, "vb_active_num_non_resident", extendedBucketStats.Op.Samples.VbActiveNumNonResident[lastEntry]) - cb.addBucketField(fields, "vb_active_ops_create", extendedBucketStats.Op.Samples.VbActiveOpsCreate[lastEntry]) - cb.addBucketField(fields, "vb_active_ops_update", extendedBucketStats.Op.Samples.VbActiveOpsUpdate[lastEntry]) - cb.addBucketField(fields, "vb_active_queue_age", extendedBucketStats.Op.Samples.VbActiveQueueAge[lastEntry]) - cb.addBucketField(fields, "vb_active_queue_drain", extendedBucketStats.Op.Samples.VbActiveQueueDrain[lastEntry]) - cb.addBucketField(fields, "vb_active_queue_fill", extendedBucketStats.Op.Samples.VbActiveQueueFill[lastEntry]) - cb.addBucketField(fields, "vb_active_queue_size", extendedBucketStats.Op.Samples.VbActiveQueueSize[lastEntry]) - cb.addBucketField(fields, "vb_active_sync_write_aborted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAbortedCount[lastEntry]) - cb.addBucketField(fields, "vb_active_sync_write_accepted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAcceptedCount[lastEntry]) - cb.addBucketField(fields, "vb_active_sync_write_committed_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteCommittedCount[lastEntry]) - cb.addBucketField(fields, "vb_pending_curr_items", extendedBucketStats.Op.Samples.VbPendingCurrItems[lastEntry]) - cb.addBucketField(fields, "vb_pending_eject", extendedBucketStats.Op.Samples.VbPendingEject[lastEntry]) - cb.addBucketField(fields, "vb_pending_itm_memory", extendedBucketStats.Op.Samples.VbPendingItmMemory[lastEntry]) - cb.addBucketField(fields, "vb_pending_meta_data_memory", extendedBucketStats.Op.Samples.VbPendingMetaDataMemory[lastEntry]) - cb.addBucketField(fields, "vb_pending_num", extendedBucketStats.Op.Samples.VbPendingNum[lastEntry]) - cb.addBucketField(fields, "vb_pending_num_non_resident", extendedBucketStats.Op.Samples.VbPendingNumNonResident[lastEntry]) - cb.addBucketField(fields, "vb_pending_ops_create", extendedBucketStats.Op.Samples.VbPendingOpsCreate[lastEntry]) - cb.addBucketField(fields, "vb_pending_ops_update", extendedBucketStats.Op.Samples.VbPendingOpsUpdate[lastEntry]) - cb.addBucketField(fields, "vb_pending_queue_age", extendedBucketStats.Op.Samples.VbPendingQueueAge[lastEntry]) - cb.addBucketField(fields, "vb_pending_queue_drain", extendedBucketStats.Op.Samples.VbPendingQueueDrain[lastEntry]) - cb.addBucketField(fields, "vb_pending_queue_fill", extendedBucketStats.Op.Samples.VbPendingQueueFill[lastEntry]) - cb.addBucketField(fields, "vb_pending_queue_size", extendedBucketStats.Op.Samples.VbPendingQueueSize[lastEntry]) - cb.addBucketField(fields, "vb_replica_curr_items", extendedBucketStats.Op.Samples.VbReplicaCurrItems[lastEntry]) - cb.addBucketField(fields, "vb_replica_eject", extendedBucketStats.Op.Samples.VbReplicaEject[lastEntry]) - cb.addBucketField(fields, "vb_replica_itm_memory", extendedBucketStats.Op.Samples.VbReplicaItmMemory[lastEntry]) - cb.addBucketField(fields, "vb_replica_meta_data_memory", extendedBucketStats.Op.Samples.VbReplicaMetaDataMemory[lastEntry]) - cb.addBucketField(fields, "vb_replica_num", extendedBucketStats.Op.Samples.VbReplicaNum[lastEntry]) - cb.addBucketField(fields, "vb_replica_num_non_resident", extendedBucketStats.Op.Samples.VbReplicaNumNonResident[lastEntry]) - cb.addBucketField(fields, "vb_replica_ops_create", extendedBucketStats.Op.Samples.VbReplicaOpsCreate[lastEntry]) - cb.addBucketField(fields, "vb_replica_ops_update", extendedBucketStats.Op.Samples.VbReplicaOpsUpdate[lastEntry]) - cb.addBucketField(fields, "vb_replica_queue_age", extendedBucketStats.Op.Samples.VbReplicaQueueAge[lastEntry]) - cb.addBucketField(fields, "vb_replica_queue_drain", extendedBucketStats.Op.Samples.VbReplicaQueueDrain[lastEntry]) - cb.addBucketField(fields, "vb_replica_queue_fill", extendedBucketStats.Op.Samples.VbReplicaQueueFill[lastEntry]) - cb.addBucketField(fields, "vb_replica_queue_size", extendedBucketStats.Op.Samples.VbReplicaQueueSize[lastEntry]) - cb.addBucketField(fields, "vb_total_queue_age", extendedBucketStats.Op.Samples.VbTotalQueueAge[lastEntry]) - cb.addBucketField(fields, "xdc_ops", extendedBucketStats.Op.Samples.XdcOps[lastEntry]) - cb.addBucketField(fields, "allocstall", extendedBucketStats.Op.Samples.Allocstall[lastEntry]) - cb.addBucketField(fields, "cpu_cores_available", extendedBucketStats.Op.Samples.CPUCoresAvailable[lastEntry]) - cb.addBucketField(fields, "cpu_irq_rate", extendedBucketStats.Op.Samples.CPUIrqRate[lastEntry]) - cb.addBucketField(fields, "cpu_stolen_rate", extendedBucketStats.Op.Samples.CPUStolenRate[lastEntry]) - cb.addBucketField(fields, "cpu_sys_rate", extendedBucketStats.Op.Samples.CPUSysRate[lastEntry]) - cb.addBucketField(fields, "cpu_user_rate", extendedBucketStats.Op.Samples.CPUUserRate[lastEntry]) - cb.addBucketField(fields, "cpu_utilization_rate", extendedBucketStats.Op.Samples.CPUUtilizationRate[lastEntry]) - cb.addBucketField(fields, "hibernated_requests", extendedBucketStats.Op.Samples.HibernatedRequests[lastEntry]) - cb.addBucketField(fields, "hibernated_waked", extendedBucketStats.Op.Samples.HibernatedWaked[lastEntry]) - cb.addBucketField(fields, "mem_actual_free", extendedBucketStats.Op.Samples.MemActualFree[lastEntry]) - cb.addBucketField(fields, "mem_actual_used", extendedBucketStats.Op.Samples.MemActualUsed[lastEntry]) - cb.addBucketField(fields, "mem_free", extendedBucketStats.Op.Samples.MemFree[lastEntry]) - cb.addBucketField(fields, "mem_limit", extendedBucketStats.Op.Samples.MemLimit[lastEntry]) - cb.addBucketField(fields, "mem_total", extendedBucketStats.Op.Samples.MemTotal[lastEntry]) - cb.addBucketField(fields, "mem_used_sys", extendedBucketStats.Op.Samples.MemUsedSys[lastEntry]) - cb.addBucketField(fields, "odp_report_failed", extendedBucketStats.Op.Samples.OdpReportFailed[lastEntry]) - cb.addBucketField(fields, "rest_requests", extendedBucketStats.Op.Samples.RestRequests[lastEntry]) - cb.addBucketField(fields, "swap_total", extendedBucketStats.Op.Samples.SwapTotal[lastEntry]) - cb.addBucketField(fields, "swap_used", extendedBucketStats.Op.Samples.SwapUsed[lastEntry]) + cb.addBucketFieldChecked(fields, "couch_total_disk_size", extendedBucketStats.Op.Samples.CouchTotalDiskSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_docs_fragmentation", extendedBucketStats.Op.Samples.CouchDocsFragmentation, lastEntry) + cb.addBucketFieldChecked(fields, "couch_views_fragmentation", extendedBucketStats.Op.Samples.CouchViewsFragmentation, lastEntry) + cb.addBucketFieldChecked(fields, "hit_ratio", extendedBucketStats.Op.Samples.HitRatio, lastEntry) + cb.addBucketFieldChecked(fields, "ep_cache_miss_rate", extendedBucketStats.Op.Samples.EpCacheMissRate, lastEntry) + cb.addBucketFieldChecked(fields, "ep_resident_items_rate", extendedBucketStats.Op.Samples.EpResidentItemsRate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_avg_active_queue_age", extendedBucketStats.Op.Samples.VbAvgActiveQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_avg_replica_queue_age", extendedBucketStats.Op.Samples.VbAvgReplicaQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_avg_pending_queue_age", extendedBucketStats.Op.Samples.VbAvgPendingQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_avg_total_queue_age", extendedBucketStats.Op.Samples.VbAvgTotalQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_resident_items_ratio", extendedBucketStats.Op.Samples.VbActiveResidentItemsRatio, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_resident_items_ratio", extendedBucketStats.Op.Samples.VbReplicaResidentItemsRatio, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_resident_items_ratio", extendedBucketStats.Op.Samples.VbPendingResidentItemsRatio, lastEntry) + cb.addBucketFieldChecked(fields, "avg_disk_update_time", extendedBucketStats.Op.Samples.AvgDiskUpdateTime, lastEntry) + cb.addBucketFieldChecked(fields, "avg_disk_commit_time", extendedBucketStats.Op.Samples.AvgDiskCommitTime, lastEntry) + cb.addBucketFieldChecked(fields, "avg_bg_wait_time", extendedBucketStats.Op.Samples.AvgBgWaitTime, lastEntry) + cb.addBucketFieldChecked(fields, "avg_active_timestamp_drift", extendedBucketStats.Op.Samples.AvgActiveTimestampDrift, lastEntry) + cb.addBucketFieldChecked(fields, "avg_replica_timestamp_drift", extendedBucketStats.Op.Samples.AvgReplicaTimestampDrift, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_backoff", extendedBucketStats.Op.Samples.EpDcpViewsIndexesBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "bg_wait_count", extendedBucketStats.Op.Samples.BgWaitCount, lastEntry) + cb.addBucketFieldChecked(fields, "bg_wait_total", extendedBucketStats.Op.Samples.BgWaitTotal, lastEntry) + cb.addBucketFieldChecked(fields, "bytes_read", extendedBucketStats.Op.Samples.BytesRead, lastEntry) + cb.addBucketFieldChecked(fields, "bytes_written", extendedBucketStats.Op.Samples.BytesWritten, lastEntry) + cb.addBucketFieldChecked(fields, "cas_badval", extendedBucketStats.Op.Samples.CasBadval, lastEntry) + cb.addBucketFieldChecked(fields, "cas_hits", extendedBucketStats.Op.Samples.CasHits, lastEntry) + cb.addBucketFieldChecked(fields, "cas_misses", extendedBucketStats.Op.Samples.CasMisses, lastEntry) + cb.addBucketFieldChecked(fields, "cmd_get", extendedBucketStats.Op.Samples.CmdGet, lastEntry) + cb.addBucketFieldChecked(fields, "cmd_lookup", extendedBucketStats.Op.Samples.CmdLookup, lastEntry) + cb.addBucketFieldChecked(fields, "cmd_set", extendedBucketStats.Op.Samples.CmdSet, lastEntry) + cb.addBucketFieldChecked(fields, "couch_docs_actual_disk_size", extendedBucketStats.Op.Samples.CouchDocsActualDiskSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_docs_data_size", extendedBucketStats.Op.Samples.CouchDocsDataSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_docs_disk_size", extendedBucketStats.Op.Samples.CouchDocsDiskSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_spatial_data_size", extendedBucketStats.Op.Samples.CouchSpatialDataSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_spatial_disk_size", extendedBucketStats.Op.Samples.CouchSpatialDiskSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_spatial_ops", extendedBucketStats.Op.Samples.CouchSpatialOps, lastEntry) + cb.addBucketFieldChecked(fields, "couch_views_actual_disk_size", extendedBucketStats.Op.Samples.CouchViewsActualDiskSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_views_data_size", extendedBucketStats.Op.Samples.CouchViewsDataSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_views_disk_size", extendedBucketStats.Op.Samples.CouchViewsDiskSize, lastEntry) + cb.addBucketFieldChecked(fields, "couch_views_ops", extendedBucketStats.Op.Samples.CouchViewsOps, lastEntry) + cb.addBucketFieldChecked(fields, "curr_connections", extendedBucketStats.Op.Samples.CurrConnections, lastEntry) + cb.addBucketFieldChecked(fields, "curr_items", extendedBucketStats.Op.Samples.CurrItems, lastEntry) + cb.addBucketFieldChecked(fields, "curr_items_tot", extendedBucketStats.Op.Samples.CurrItemsTot, lastEntry) + cb.addBucketFieldChecked(fields, "decr_hits", extendedBucketStats.Op.Samples.DecrHits, lastEntry) + cb.addBucketFieldChecked(fields, "decr_misses", extendedBucketStats.Op.Samples.DecrMisses, lastEntry) + cb.addBucketFieldChecked(fields, "delete_hits", extendedBucketStats.Op.Samples.DeleteHits, lastEntry) + cb.addBucketFieldChecked(fields, "delete_misses", extendedBucketStats.Op.Samples.DeleteMisses, lastEntry) + cb.addBucketFieldChecked(fields, "disk_commit_count", extendedBucketStats.Op.Samples.DiskCommitCount, lastEntry) + cb.addBucketFieldChecked(fields, "disk_commit_total", extendedBucketStats.Op.Samples.DiskCommitTotal, lastEntry) + cb.addBucketFieldChecked(fields, "disk_update_count", extendedBucketStats.Op.Samples.DiskUpdateCount, lastEntry) + cb.addBucketFieldChecked(fields, "disk_update_total", extendedBucketStats.Op.Samples.DiskUpdateTotal, lastEntry) + cb.addBucketFieldChecked(fields, "disk_write_queue", extendedBucketStats.Op.Samples.DiskWriteQueue, lastEntry) + cb.addBucketFieldChecked(fields, "ep_active_ahead_exceptions", extendedBucketStats.Op.Samples.EpActiveAheadExceptions, lastEntry) + cb.addBucketFieldChecked(fields, "ep_active_hlc_drift", extendedBucketStats.Op.Samples.EpActiveHlcDrift, lastEntry) + cb.addBucketFieldChecked(fields, "ep_active_hlc_drift_count", extendedBucketStats.Op.Samples.EpActiveHlcDriftCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_bg_fetched", extendedBucketStats.Op.Samples.EpBgFetched, lastEntry) + cb.addBucketFieldChecked(fields, "ep_clock_cas_drift_threshold_exceeded", extendedBucketStats.Op.Samples.EpClockCasDriftThresholdExceeded, lastEntry) + cb.addBucketFieldChecked(fields, "ep_data_read_failed", extendedBucketStats.Op.Samples.EpDataReadFailed, lastEntry) + cb.addBucketFieldChecked(fields, "ep_data_write_failed", extendedBucketStats.Op.Samples.EpDataWriteFailed, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_backoff", extendedBucketStats.Op.Samples.EpDcp2IBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_count", extendedBucketStats.Op.Samples.EpDcp2ICount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_items_remaining", extendedBucketStats.Op.Samples.EpDcp2IItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_items_sent", extendedBucketStats.Op.Samples.EpDcp2IItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_producer_count", extendedBucketStats.Op.Samples.EpDcp2IProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_total_backlog_size", extendedBucketStats.Op.Samples.EpDcp2ITotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_total_bytes", extendedBucketStats.Op.Samples.EpDcp2ITotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_backoff", extendedBucketStats.Op.Samples.EpDcpCbasBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_count", extendedBucketStats.Op.Samples.EpDcpCbasCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_items_remaining", extendedBucketStats.Op.Samples.EpDcpCbasItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_items_sent", extendedBucketStats.Op.Samples.EpDcpCbasItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_producer_count", extendedBucketStats.Op.Samples.EpDcpCbasProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpCbasTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_total_bytes", extendedBucketStats.Op.Samples.EpDcpCbasTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_backoff", extendedBucketStats.Op.Samples.EpDcpEventingBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_count", extendedBucketStats.Op.Samples.EpDcpEventingCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_items_remaining", extendedBucketStats.Op.Samples.EpDcpEventingItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_items_sent", extendedBucketStats.Op.Samples.EpDcpEventingItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_producer_count", extendedBucketStats.Op.Samples.EpDcpEventingProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpEventingTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_total_bytes", extendedBucketStats.Op.Samples.EpDcpEventingTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_backoff", extendedBucketStats.Op.Samples.EpDcpFtsBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_count", extendedBucketStats.Op.Samples.EpDcpFtsCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_items_remaining", extendedBucketStats.Op.Samples.EpDcpFtsItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_items_sent", extendedBucketStats.Op.Samples.EpDcpFtsItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_producer_count", extendedBucketStats.Op.Samples.EpDcpFtsProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpFtsTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_total_bytes", extendedBucketStats.Op.Samples.EpDcpFtsTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_backoff", extendedBucketStats.Op.Samples.EpDcpOtherBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_count", extendedBucketStats.Op.Samples.EpDcpOtherCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_items_remaining", extendedBucketStats.Op.Samples.EpDcpOtherItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_items_sent", extendedBucketStats.Op.Samples.EpDcpOtherItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_producer_count", extendedBucketStats.Op.Samples.EpDcpOtherProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpOtherTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_other_total_bytes", extendedBucketStats.Op.Samples.EpDcpOtherTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_backoff", extendedBucketStats.Op.Samples.EpDcpReplicaBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_count", extendedBucketStats.Op.Samples.EpDcpReplicaCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_items_remaining", extendedBucketStats.Op.Samples.EpDcpReplicaItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_items_sent", extendedBucketStats.Op.Samples.EpDcpReplicaItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_producer_count", extendedBucketStats.Op.Samples.EpDcpReplicaProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_total_bytes", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_backoff", extendedBucketStats.Op.Samples.EpDcpViewsBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_count", extendedBucketStats.Op.Samples.EpDcpViewsCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_views_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_backoff", extendedBucketStats.Op.Samples.EpDcpXdcrBackoff, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_count", extendedBucketStats.Op.Samples.EpDcpXdcrCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_items_remaining", extendedBucketStats.Op.Samples.EpDcpXdcrItemsRemaining, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_items_sent", extendedBucketStats.Op.Samples.EpDcpXdcrItemsSent, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_producer_count", extendedBucketStats.Op.Samples.EpDcpXdcrProducerCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBacklogSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_total_bytes", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBytes, lastEntry) + cb.addBucketFieldChecked(fields, "ep_diskqueue_drain", extendedBucketStats.Op.Samples.EpDiskqueueDrain, lastEntry) + cb.addBucketFieldChecked(fields, "ep_diskqueue_fill", extendedBucketStats.Op.Samples.EpDiskqueueFill, lastEntry) + cb.addBucketFieldChecked(fields, "ep_diskqueue_items", extendedBucketStats.Op.Samples.EpDiskqueueItems, lastEntry) + cb.addBucketFieldChecked(fields, "ep_flusher_todo", extendedBucketStats.Op.Samples.EpFlusherTodo, lastEntry) + cb.addBucketFieldChecked(fields, "ep_item_commit_failed", extendedBucketStats.Op.Samples.EpItemCommitFailed, lastEntry) + cb.addBucketFieldChecked(fields, "ep_kv_size", extendedBucketStats.Op.Samples.EpKvSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_max_size", extendedBucketStats.Op.Samples.EpMaxSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_mem_high_wat", extendedBucketStats.Op.Samples.EpMemHighWat, lastEntry) + cb.addBucketFieldChecked(fields, "ep_mem_low_wat", extendedBucketStats.Op.Samples.EpMemLowWat, lastEntry) + cb.addBucketFieldChecked(fields, "ep_meta_data_memory", extendedBucketStats.Op.Samples.EpMetaDataMemory, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_non_resident", extendedBucketStats.Op.Samples.EpNumNonResident, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_ops_del_meta", extendedBucketStats.Op.Samples.EpNumOpsDelMeta, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_ops_del_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsDelRetMeta, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_ops_get_meta", extendedBucketStats.Op.Samples.EpNumOpsGetMeta, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_ops_set_meta", extendedBucketStats.Op.Samples.EpNumOpsSetMeta, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_ops_set_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsSetRetMeta, lastEntry) + cb.addBucketFieldChecked(fields, "ep_num_value_ejects", extendedBucketStats.Op.Samples.EpNumValueEjects, lastEntry) + cb.addBucketFieldChecked(fields, "ep_oom_errors", extendedBucketStats.Op.Samples.EpOomErrors, lastEntry) + cb.addBucketFieldChecked(fields, "ep_ops_create", extendedBucketStats.Op.Samples.EpOpsCreate, lastEntry) + cb.addBucketFieldChecked(fields, "ep_ops_update", extendedBucketStats.Op.Samples.EpOpsUpdate, lastEntry) + cb.addBucketFieldChecked(fields, "ep_overhead", extendedBucketStats.Op.Samples.EpOverhead, lastEntry) + cb.addBucketFieldChecked(fields, "ep_queue_size", extendedBucketStats.Op.Samples.EpQueueSize, lastEntry) + cb.addBucketFieldChecked(fields, "ep_replica_ahead_exceptions", extendedBucketStats.Op.Samples.EpReplicaAheadExceptions, lastEntry) + cb.addBucketFieldChecked(fields, "ep_replica_hlc_drift", extendedBucketStats.Op.Samples.EpReplicaHlcDrift, lastEntry) + cb.addBucketFieldChecked(fields, "ep_replica_hlc_drift_count", extendedBucketStats.Op.Samples.EpReplicaHlcDriftCount, lastEntry) + cb.addBucketFieldChecked(fields, "ep_tmp_oom_errors", extendedBucketStats.Op.Samples.EpTmpOomErrors, lastEntry) + cb.addBucketFieldChecked(fields, "ep_vb_total", extendedBucketStats.Op.Samples.EpVbTotal, lastEntry) + cb.addBucketFieldChecked(fields, "evictions", extendedBucketStats.Op.Samples.Evictions, lastEntry) + cb.addBucketFieldChecked(fields, "get_hits", extendedBucketStats.Op.Samples.GetHits, lastEntry) + cb.addBucketFieldChecked(fields, "get_misses", extendedBucketStats.Op.Samples.GetMisses, lastEntry) + cb.addBucketFieldChecked(fields, "incr_hits", extendedBucketStats.Op.Samples.IncrHits, lastEntry) + cb.addBucketFieldChecked(fields, "incr_misses", extendedBucketStats.Op.Samples.IncrMisses, lastEntry) + cb.addBucketFieldChecked(fields, "misses", extendedBucketStats.Op.Samples.Misses, lastEntry) + cb.addBucketFieldChecked(fields, "ops", extendedBucketStats.Op.Samples.Ops, lastEntry) + cb.addBucketFieldChecked(fields, "timestamp", extendedBucketStats.Op.Samples.Timestamp, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_eject", extendedBucketStats.Op.Samples.VbActiveEject, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_itm_memory", extendedBucketStats.Op.Samples.VbActiveItmMemory, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_meta_data_memory", extendedBucketStats.Op.Samples.VbActiveMetaDataMemory, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_num", extendedBucketStats.Op.Samples.VbActiveNum, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_num_non_resident", extendedBucketStats.Op.Samples.VbActiveNumNonResident, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_ops_create", extendedBucketStats.Op.Samples.VbActiveOpsCreate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_ops_update", extendedBucketStats.Op.Samples.VbActiveOpsUpdate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_queue_age", extendedBucketStats.Op.Samples.VbActiveQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_queue_drain", extendedBucketStats.Op.Samples.VbActiveQueueDrain, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_queue_fill", extendedBucketStats.Op.Samples.VbActiveQueueFill, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_queue_size", extendedBucketStats.Op.Samples.VbActiveQueueSize, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_sync_write_aborted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAbortedCount, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_sync_write_accepted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAcceptedCount, lastEntry) + cb.addBucketFieldChecked(fields, "vb_active_sync_write_committed_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteCommittedCount, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_curr_items", extendedBucketStats.Op.Samples.VbPendingCurrItems, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_eject", extendedBucketStats.Op.Samples.VbPendingEject, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_itm_memory", extendedBucketStats.Op.Samples.VbPendingItmMemory, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_meta_data_memory", extendedBucketStats.Op.Samples.VbPendingMetaDataMemory, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_num", extendedBucketStats.Op.Samples.VbPendingNum, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_num_non_resident", extendedBucketStats.Op.Samples.VbPendingNumNonResident, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_ops_create", extendedBucketStats.Op.Samples.VbPendingOpsCreate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_ops_update", extendedBucketStats.Op.Samples.VbPendingOpsUpdate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_queue_age", extendedBucketStats.Op.Samples.VbPendingQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_queue_drain", extendedBucketStats.Op.Samples.VbPendingQueueDrain, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_queue_fill", extendedBucketStats.Op.Samples.VbPendingQueueFill, lastEntry) + cb.addBucketFieldChecked(fields, "vb_pending_queue_size", extendedBucketStats.Op.Samples.VbPendingQueueSize, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_curr_items", extendedBucketStats.Op.Samples.VbReplicaCurrItems, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_eject", extendedBucketStats.Op.Samples.VbReplicaEject, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_itm_memory", extendedBucketStats.Op.Samples.VbReplicaItmMemory, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_meta_data_memory", extendedBucketStats.Op.Samples.VbReplicaMetaDataMemory, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_num", extendedBucketStats.Op.Samples.VbReplicaNum, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_num_non_resident", extendedBucketStats.Op.Samples.VbReplicaNumNonResident, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_ops_create", extendedBucketStats.Op.Samples.VbReplicaOpsCreate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_ops_update", extendedBucketStats.Op.Samples.VbReplicaOpsUpdate, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_queue_age", extendedBucketStats.Op.Samples.VbReplicaQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_queue_drain", extendedBucketStats.Op.Samples.VbReplicaQueueDrain, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_queue_fill", extendedBucketStats.Op.Samples.VbReplicaQueueFill, lastEntry) + cb.addBucketFieldChecked(fields, "vb_replica_queue_size", extendedBucketStats.Op.Samples.VbReplicaQueueSize, lastEntry) + cb.addBucketFieldChecked(fields, "vb_total_queue_age", extendedBucketStats.Op.Samples.VbTotalQueueAge, lastEntry) + cb.addBucketFieldChecked(fields, "xdc_ops", extendedBucketStats.Op.Samples.XdcOps, lastEntry) + cb.addBucketFieldChecked(fields, "allocstall", extendedBucketStats.Op.Samples.Allocstall, lastEntry) + cb.addBucketFieldChecked(fields, "cpu_cores_available", extendedBucketStats.Op.Samples.CPUCoresAvailable, lastEntry) + cb.addBucketFieldChecked(fields, "cpu_irq_rate", extendedBucketStats.Op.Samples.CPUIrqRate, lastEntry) + cb.addBucketFieldChecked(fields, "cpu_stolen_rate", extendedBucketStats.Op.Samples.CPUStolenRate, lastEntry) + cb.addBucketFieldChecked(fields, "cpu_sys_rate", extendedBucketStats.Op.Samples.CPUSysRate, lastEntry) + cb.addBucketFieldChecked(fields, "cpu_user_rate", extendedBucketStats.Op.Samples.CPUUserRate, lastEntry) + cb.addBucketFieldChecked(fields, "cpu_utilization_rate", extendedBucketStats.Op.Samples.CPUUtilizationRate, lastEntry) + cb.addBucketFieldChecked(fields, "hibernated_requests", extendedBucketStats.Op.Samples.HibernatedRequests, lastEntry) + cb.addBucketFieldChecked(fields, "hibernated_waked", extendedBucketStats.Op.Samples.HibernatedWaked, lastEntry) + cb.addBucketFieldChecked(fields, "mem_actual_free", extendedBucketStats.Op.Samples.MemActualFree, lastEntry) + cb.addBucketFieldChecked(fields, "mem_actual_used", extendedBucketStats.Op.Samples.MemActualUsed, lastEntry) + cb.addBucketFieldChecked(fields, "mem_free", extendedBucketStats.Op.Samples.MemFree, lastEntry) + cb.addBucketFieldChecked(fields, "mem_limit", extendedBucketStats.Op.Samples.MemLimit, lastEntry) + cb.addBucketFieldChecked(fields, "mem_total", extendedBucketStats.Op.Samples.MemTotal, lastEntry) + cb.addBucketFieldChecked(fields, "mem_used_sys", extendedBucketStats.Op.Samples.MemUsedSys, lastEntry) + cb.addBucketFieldChecked(fields, "odp_report_failed", extendedBucketStats.Op.Samples.OdpReportFailed, lastEntry) + cb.addBucketFieldChecked(fields, "rest_requests", extendedBucketStats.Op.Samples.RestRequests, lastEntry) + cb.addBucketFieldChecked(fields, "swap_total", extendedBucketStats.Op.Samples.SwapTotal, lastEntry) + cb.addBucketFieldChecked(fields, "swap_used", extendedBucketStats.Op.Samples.SwapUsed, lastEntry) return nil } @@ -354,6 +354,14 @@ func (cb *Couchbase) addBucketField(fields map[string]interface{}, fieldKey stri fields[fieldKey] = value } +func (cb *Couchbase) addBucketFieldChecked(fields map[string]interface{}, fieldKey string, values []float64, index int) { + if values == nil { + return + } + + cb.addBucketField(fields, fieldKey, values[index]) +} + func (cb *Couchbase) queryDetailedBucketStats(server, bucket string, bucketStats *BucketStats) error { // Set up an HTTP request to get the complete set of bucket stats. req, err := http.NewRequest("GET", server+"/pools/default/buckets/"+bucket+"/stats?", nil) diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go index 3b927e8c4f8e9..d8f6aa3ac3ad1 100644 --- a/plugins/inputs/couchbase/couchbase_test.go +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -89,34 +89,52 @@ func TestSanitizeURI(t *testing.T) { func TestGatherDetailedBucketMetrics(t *testing.T) { bucket := "Ducks" - fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" { - _, _ = w.Write([]byte(bucketStatsResponse)) - } else { - w.WriteHeader(http.StatusNotFound) - } - })) - - var err error - var cb Couchbase - cb.BucketStatsIncluded = []string{"couch_total_disk_size"} - err = cb.Init() - require.NoError(t, err) - var acc testutil.Accumulator - bucketStats := &BucketStats{} - if err := json.Unmarshal([]byte(bucketStatsResponse), bucketStats); err != nil { - t.Fatal("parse bucketResponse", err) + tests := []struct { + name string + response string + }{ + { + name: "with all fields", + response: bucketStatsResponse, + }, + { + name: "missing fields", + response: bucketStatsResponseWithMissing, + }, } - fields := make(map[string]interface{}) - err = cb.gatherDetailedBucketStats(fakeServer.URL, bucket, fields) - require.NoError(t, err) - - acc.AddFields("couchbase_bucket", fields, nil) - - // Ensure we gathered only one metric (the one that we configured). - require.Equal(t, len(acc.Metrics), 1) - require.Equal(t, len(acc.Metrics[0].Fields), 1) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" { + _, _ = w.Write([]byte(test.response)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + + var err error + var cb Couchbase + cb.BucketStatsIncluded = []string{"couch_total_disk_size"} + err = cb.Init() + require.NoError(t, err) + var acc testutil.Accumulator + bucketStats := &BucketStats{} + if err := json.Unmarshal([]byte(test.response), bucketStats); err != nil { + t.Fatal("parse bucketResponse", err) + } + + fields := make(map[string]interface{}) + err = cb.gatherDetailedBucketStats(fakeServer.URL, bucket, fields) + require.NoError(t, err) + + acc.AddFields("couchbase_bucket", fields, nil) + + // Ensure we gathered only one metric (the one that we configured). + require.Equal(t, len(acc.Metrics), 1) + require.Equal(t, len(acc.Metrics[0].Fields), 1) + }) + } } // From `/pools/default` on a real cluster @@ -126,3 +144,4 @@ const poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":4509725982 const bucketResponse string = `{"blastro-df": {"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}}` const bucketStatsResponse string = `{"op":{"samples":{"couch_total_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341],"couch_docs_fragmentation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_fragmentation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"hit_ratio":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_cache_miss_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_resident_items_rate":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_avg_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_replica_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_pending_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"avg_disk_update_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_disk_commit_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_bg_wait_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_active_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_replica_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bytes_read":[118.1818181818182,142.2805247225025,180.8080808080808,197.7800201816347,141.9939577039275,118.5410334346505,142.4242424242424,148.4848484848485,197.3816717019134,202.4291497975709,118.0625630676085,142.4242424242424,179.6165489404642,197.979797979798,142.4242424242424,118.1818181818182,142.2805247225025,148.4848484848485,197.979797979798,201.816347124117,118.1818181818182,142.4242424242424,148.4848484848485,197.7800201816347,142.4242424242424,118.1818181818182,142.2805247225025,179.7979797979798,197.1830985915493,202.6342451874367,118.1818181818182,142.2805247225025,180.4435483870968,198.3805668016194,142.2805247225025,118.1818181818182,142.2805247225025,148.4848484848485,197.979797979798,202.020202020202,118.0625630676085,118.1818181818182,204.040404040404,197.7800201816347,142.1370967741935,118.4210526315789,118.1818181818182,172.5529767911201,197.5806451612903,202.4291497975709,118.0625630676085,118.1818181818182,172.7272727272727,197.7800201816347,142.4242424242424,118.0625630676085,118.1818181818182,204.040404040404,197.979797979798,201.816347124117],"bytes_written":[36420.20202020202,37762.86579212916,37225.25252525252,50460.14127144299,37686.80765357502,36530.90172239109,37801.0101010101,37111.11111111111,50358.50956696878,60511.13360323886,36383.45105953582,37801.0101010101,37393.54187689203,50511.11111111111,37801.0101010101,36420.20202020202,37762.86579212916,37111.11111111111,50511.11111111111,60327.95156407669,36420.20202020202,37801.0101010101,37111.11111111111,50460.14127144299,37801.0101010101,36420.20202020202,37762.86579212916,37431.31313131313,50307.84708249497,60572.44174265451,36420.20202020202,37762.86579212916,37150.20161290323,50613.36032388664,37762.86579212916,36420.20202020202,37762.86579212916,37111.11111111111,50511.11111111111,60388.88888888889,36383.45105953582,36420.20202020202,38812.12121212122,50460.14127144299,37724.79838709677,36493.92712550607,36420.20202020202,38453.07769929364,50409.27419354839,60511.13360323886,36383.45105953582,36420.20202020202,38491.91919191919,50460.14127144299,37801.0101010101,36383.45105953582,36420.20202020202,38812.12121212122,50511.11111111111,60327.95156407669],"cas_badval":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_get":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_lookup":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_set":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_docs_actual_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341],"couch_docs_data_size":[531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373],"couch_docs_disk_size":[531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373],"couch_spatial_data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_actual_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"curr_connections":[14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14],"curr_items":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"curr_items_tot":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"decr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"decr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_write_queue":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_bg_fetched":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_clock_cas_drift_threshold_exceeded":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_read_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_write_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_flusher_todo":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_item_commit_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_kv_size":[10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340],"ep_max_size":[8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032],"ep_mem_high_wat":[7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627],"ep_mem_low_wat":[6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024],"ep_meta_data_memory":[68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68],"ep_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_get_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_value_ejects":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_overhead":[403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824],"ep_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_tmp_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_vb_total":[64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64],"evictions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"get_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"get_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"mem_used":[4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016],"misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"timestamp":[1615918120012,1615918121003,1615918121993,1615918122984,1615918123977,1615918124964,1615918125954,1615918126944,1615918127937,1615918128925,1615918129916,1615918130906,1615918131897,1615918132887,1615918133877,1615918134867,1615918135858,1615918136848,1615918137838,1615918138829,1615918139819,1615918140809,1615918141799,1615918142790,1615918143780,1615918144770,1615918145761,1615918146751,1615918147745,1615918148732,1615918149722,1615918150713,1615918151705,1615918152693,1615918153684,1615918154674,1615918155665,1615918156655,1615918157645,1615918158635,1615918159626,1615918160616,1615918161606,1615918162597,1615918163589,1615918164577,1615918165567,1615918166558,1615918167550,1615918168538,1615918169529,1615918170519,1615918171509,1615918172500,1615918173490,1615918174481,1615918175471,1615918176461,1615918177451,1615918178442],"vb_active_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_itm_memory":[88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88],"vb_active_meta_data_memory":[68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68],"vb_active_num":[64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64],"vb_active_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_aborted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_accepted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_committed_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_curr_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_itm_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_meta_data_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_curr_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_itm_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_meta_data_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_num":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"xdc_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"allocstall":[18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615],"cpu_cores_available":[12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12],"cpu_irq_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_stolen_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_sys_rate":[4.942965779467681,5.243268776570619,6.823027718550106,4.815073272854153,4.853128991060026,5.068836045056321,4.983108108108108,4.110738255033557,3.201347935973041,3.959561920808762,3.610411418975651,3.459915611814346,3.691275167785235,4.553119730185498,6.470588235294118,4.545454545454546,5.046257359125315,5.976430976430977,5.564924114671164,3.703703703703704,3.529411764705882,3.544303797468354,3.826787512588117,5.118961788031723,7.166947723440135,5.87248322147651,4.289318755256518,5.485232067510548,4.765886287625418,4.672897196261682,4.184100418410042,4.560810810810811,7.02928870292887,6.081081081081081,5.378151260504202,6.239460370994941,8.984047019311502,6.896551724137931,9.636517328825022,9.335576114381833,7.64063811922754,8.684654300168635,6.543624161073826,6.465155331654072,5.961376994122586,3.807106598984772,3.36417157275021,3.700588730025231,3.775167785234899,9.45945945945946,3.114478114478115,3.451178451178451,4.465037910699242,3.852596314907873,3.462837837837838,5.205709487825357,5.218855218855219,6.532663316582915,5.885057471264368,4.030226700251889],"cpu_user_rate":[15.20912547528517,9.58904109589041,10.76759061833689,8.443824145150035,8.301404853128991,10.95118898623279,9.797297297297296,6.879194630872483,6.823925863521483,6.908171861836562,6.54911838790932,6.835443037974684,7.382550335570469,10.28667790893761,16.97478991596639,11.53198653198653,9.75609756097561,11.11111111111111,12.05733558178752,7.154882154882155,6.890756302521009,6.666666666666667,7.150050352467271,10.23792357606345,12.7318718381113,9.479865771812081,7.905803195962994,8.016877637130802,9.19732441471572,9.600679694137638,7.364016736401673,8.108108108108109,15.31380753138075,13.85135135135135,10.58823529411765,12.64755480607083,18.47187237615449,13.28847771236333,19.8647506339814,21.86711522287637,23.5936188077246,22.17537942664418,12.08053691275168,16.96053736356003,32.49370277078086,8.20642978003384,10.17661900756939,7.653490328006728,10.82214765100671,14.27364864864865,6.986531986531986,7.407407407407407,10.02527379949452,11.55778894472362,8.192567567567568,12.34256926952141,14.05723905723906,28.64321608040201,13.14942528735632,7.388748950461797],"cpu_utilization_rate":[20.15209125475285,14.83230987246103,17.59061833688699,13.25889741800419,13.15453384418902,16.02002503128911,14.78040540540541,10.98993288590604,10.02527379949452,10.86773378264532,10.15952980688497,10.29535864978903,11.0738255033557,14.8397976391231,23.4453781512605,16.07744107744108,14.80235492010092,17.08754208754209,17.62225969645868,10.85858585858586,10.42016806722689,10.21097046413502,10.97683786505539,15.35688536409517,19.89881956155143,15.35234899328859,12.19512195121951,13.50210970464135,13.96321070234114,14.27357689039932,11.54811715481171,12.66891891891892,22.34309623430962,19.93243243243243,15.96638655462185,18.88701517706577,27.45591939546599,20.18502943650126,29.50126796280642,31.2026913372582,31.23425692695214,30.86003372681282,18.6241610738255,23.42569269521411,38.45507976490345,12.01353637901861,13.5407905803196,11.35407905803196,14.59731543624161,23.73310810810811,10.1010101010101,10.85858585858586,14.49031171019377,15.41038525963149,11.65540540540541,17.54827875734677,19.27609427609428,35.17587939698493,19.03448275862069,11.41897565071369],"hibernated_requests":[7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7],"hibernated_waked":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"mem_actual_free":[7004864512,6998364160,7056683008,7055605760,7059243008,7078457344,7079067648,7079514112,7078977536,7088099328,7091081216,7091773440,7091589120,7080108032,7073554432,7073914880,7080144896,7065124864,7063183360,7072677888,7073767424,7073542144,7073542144,7074902016,7053836288,7050895360,7055720448,7056822272,7057281024,7053025280,7052763136,7051984896,7049113600,7040618496,7045636096,7050907648,7021027328,7001329664,6985895936,6985895936,6955642880,7059750912,7058616320,7050067968,7047163904,7045873664,7050272768,7068528640,7073677312,7079116800,7078252544,7075880960,7065079808,7066251264,7065726976,7063486464,7064797184,7066206208,7068819456,7071809536],"mem_actual_used":[10175004672,10181505024,10123186176,10124263424,10120626176,10101411840,10100801536,10100355072,10100891648,10091769856,10088787968,10088095744,10088280064,10099761152,10106314752,10105954304,10099724288,10114744320,10116685824,10107191296,10106101760,10106327040,10106327040,10104967168,10126032896,10128973824,10124148736,10123046912,10122588160,10126843904,10127106048,10127884288,10130755584,10139250688,10134233088,10128961536,10158841856,10178539520,10193973248,10193973248,10224226304,10120118272,10121252864,10129801216,10132705280,10133995520,10129596416,10111340544,10106191872,10100752384,10101616640,10103988224,10114789376,10113617920,10114142208,10116382720,10115072000,10113662976,10111049728,10108059648],"mem_free":[7004864512,6998364160,7056683008,7055605760,7059243008,7078457344,7079067648,7079514112,7078977536,7088099328,7091081216,7091773440,7091589120,7080108032,7073554432,7073914880,7080144896,7065124864,7063183360,7072677888,7073767424,7073542144,7073542144,7074902016,7053836288,7050895360,7055720448,7056822272,7057281024,7053025280,7052763136,7051984896,7049113600,7040618496,7045636096,7050907648,7021027328,7001329664,6985895936,6985895936,6955642880,7059750912,7058616320,7050067968,7047163904,7045873664,7050272768,7068528640,7073677312,7079116800,7078252544,7075880960,7065079808,7066251264,7065726976,7063486464,7064797184,7066206208,7068819456,7071809536],"mem_limit":[17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184],"mem_total":[17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184],"mem_used_sys":[16694517760,16707862528,16608030720,16610041856,16604663808,16553811968,16553463808,16553369600,16553861120,16539238400,16536092672,16535760896,16535707648,16553418752,16559439872,16558895104,16554569728,16580468736,16582680576,16565084160,16564649984,16565272576,16565272576,16562823168,16599863296,16602157056,16597528576,16596774912,16595107840,16593002496,16593485824,16596668416,16598691840,16607469568,16599904256,16590753792,16644947968,16684613632,16714768384,16714768384,16781234176,16573353984,16575979520,16593072128,16603037696,16605077504,16599199744,16581554176,16570187776,16560140288,16561221632,16565153792,16577990656,16577200128,16578031616,16582909952,16569671680,16565702656,16560218112,16554315776],"odp_report_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"rest_requests":[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,8,2,2,2,2,2,2,2,2,3,2,2,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,2,2,2,2,2],"swap_total":[1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824],"swap_used":[122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392]},"samplesCount":60,"isPersistent":true,"lastTStamp":1615918178442,"interval":1000},"hot_keys":[{"name":"first-duck","ops":6.003482019571351e-05}]}` +const bucketStatsResponseWithMissing string = `{"op":{"samples":{"couch_total_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341]},"samplesCount":60,"isPersistent":true,"lastTStamp":1615918178442,"interval":1000},"hot_keys":[{"name":"first-duck","ops":6.003482019571351e-05}]}` From 17e86ab4ca97af454e19d34d6d0e815c248e3a08 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Thu, 1 Jul 2021 13:54:53 -0700 Subject: [PATCH 500/761] Add include_nodes filter for jenkins (#9351) --- plugins/inputs/jenkins/README.md | 4 ++- plugins/inputs/jenkins/jenkins.go | 39 ++++++++++---------------- plugins/inputs/jenkins/jenkins_test.go | 32 ++++++++++++++++++++- 3 files changed, 49 insertions(+), 26 deletions(-) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index 891e2fc0587d7..4d82f4e90ba31 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -45,7 +45,9 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API # job_include = [ "*" ] # job_exclude = [ ] - ## Nodes to exclude from gathering + ## Nodes to include or exclude from gathering + ## When using both lists, node_exclude has priority. + # node_include = [ "*" ] # node_exclude = [ ] ## Worker pool for jenkins plugin only diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index 6d764d175ce58..9543c3ab17b87 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -38,10 +38,10 @@ type Jenkins struct { MaxSubJobPerLayer int `toml:"max_subjob_per_layer"` JobExclude []string `toml:"job_exclude"` JobInclude []string `toml:"job_include"` - jobFilterExclude filter.Filter - jobFilterInclude filter.Filter + jobFilter filter.Filter NodeExclude []string `toml:"node_exclude"` + NodeInclude []string `toml:"node_include"` nodeFilter filter.Filter semaphore chan struct{} @@ -85,7 +85,9 @@ const sampleConfig = ` # job_include = [ "*" ] # job_exclude = [ ] - ## Nodes to exclude from gathering + ## Nodes to include or exclude from gathering + ## When using both lists, node_exclude has priority. + # node_include = [ "*" ] # node_exclude = [ ] ## Worker pool for jenkins plugin only @@ -162,21 +164,14 @@ func (j *Jenkins) initialize(client *http.Client) error { } j.Source = u.Hostname() - // init job filters - j.jobFilterExclude, err = filter.Compile(j.JobExclude) + // init filters + j.jobFilter, err = filter.NewIncludeExcludeFilter(j.JobInclude, j.JobExclude) if err != nil { - return fmt.Errorf("error compile job filters[%s]: %v", j.URL, err) + return fmt.Errorf("error compiling job filters[%s]: %v", j.URL, err) } - - j.jobFilterInclude, err = filter.Compile(j.JobInclude) - if err != nil { - return fmt.Errorf("error compile job filters[%s]: %v", j.URL, err) - } - - // init node filter - j.nodeFilter, err = filter.Compile(j.NodeExclude) + j.nodeFilter, err = filter.NewIncludeExcludeFilter(j.NodeInclude, j.NodeExclude) if err != nil { - return fmt.Errorf("error compile node filters[%s]: %v", j.URL, err) + return fmt.Errorf("error compiling node filters[%s]: %v", j.URL, err) } // init tcp pool with default value @@ -203,8 +198,9 @@ func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error { } tags["node_name"] = n.DisplayName - // filter out excluded node_name - if j.nodeFilter != nil && j.nodeFilter.Match(tags["node_name"]) { + + // filter out excluded or not included node_name + if !j.nodeFilter.Match(tags["node_name"]) { return nil } @@ -300,13 +296,8 @@ func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { return nil } - // filter out not included job. - if j.jobFilterInclude != nil && !j.jobFilterInclude.Match(jr.hierarchyName()) { - return nil - } - - // filter out excluded job. - if j.jobFilterExclude != nil && j.jobFilterExclude.Match(jr.hierarchyName()) { + // filter out excluded or not included jobs + if !j.jobFilter.Match(jr.hierarchyName()) { return nil } diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index b97da5a0f00a4..2b74d654a6d2d 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -156,7 +156,7 @@ func TestGatherNodeData(t *testing.T) { }, }, { - name: "filtered nodes", + name: "filtered nodes (excluded)", input: mockHandler{ responseMap: map[string]interface{}{ "/api/json": struct{}{}, @@ -184,6 +184,35 @@ func TestGatherNodeData(t *testing.T) { }, }, }, + { + name: "filtered nodes (included)", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + BusyExecutors: 4, + TotalExecutors: 8, + Computers: []node{ + {DisplayName: "filtered-1"}, + {DisplayName: "filtered-1"}, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "source": "127.0.0.1", + }, + Fields: map[string]interface{}{ + "busy_executors": 4, + "total_executors": 8, + }, + }, + }, + }, + }, { name: "normal data collection", input: mockHandler{ @@ -306,6 +335,7 @@ func TestGatherNodeData(t *testing.T) { URL: ts.URL, ResponseTimeout: config.Duration(time.Microsecond), NodeExclude: []string{"ignore-1", "ignore-2"}, + NodeInclude: []string{"master", "slave"}, } te := j.initialize(&http.Client{Transport: &http.Transport{}}) acc := new(testutil.Accumulator) From a0ec75a62b3c32b3868b8bb26c1fa6ecad167840 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Tue, 6 Jul 2021 12:57:52 -0700 Subject: [PATCH 501/761] Fix segfault in kube_inventory (#9456) --- plugins/inputs/kube_inventory/endpoint.go | 8 +- .../inputs/kube_inventory/endpoint_test.go | 96 +++++++++++++++++++ 2 files changed, 102 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/kube_inventory/endpoint.go b/plugins/inputs/kube_inventory/endpoint.go index 4b3cffa59fad3..89cbf6587bf0d 100644 --- a/plugins/inputs/kube_inventory/endpoint.go +++ b/plugins/inputs/kube_inventory/endpoint.go @@ -39,7 +39,9 @@ func (ki *KubernetesInventory) gatherEndpoint(e corev1.Endpoints, acc telegraf.A fields["ready"] = true tags["hostname"] = readyAddr.Hostname - tags["node_name"] = *readyAddr.NodeName + if readyAddr.NodeName != nil { + tags["node_name"] = *readyAddr.NodeName + } if readyAddr.TargetRef != nil { tags[strings.ToLower(readyAddr.TargetRef.Kind)] = readyAddr.TargetRef.Name } @@ -57,7 +59,9 @@ func (ki *KubernetesInventory) gatherEndpoint(e corev1.Endpoints, acc telegraf.A fields["ready"] = false tags["hostname"] = notReadyAddr.Hostname - tags["node_name"] = *notReadyAddr.NodeName + if notReadyAddr.NodeName != nil { + tags["node_name"] = *notReadyAddr.NodeName + } if notReadyAddr.TargetRef != nil { tags[strings.ToLower(notReadyAddr.TargetRef.Kind)] = notReadyAddr.TargetRef.Name } diff --git a/plugins/inputs/kube_inventory/endpoint_test.go b/plugins/inputs/kube_inventory/endpoint_test.go index 6feb262cbcee7..936a64b72544b 100644 --- a/plugins/inputs/kube_inventory/endpoint_test.go +++ b/plugins/inputs/kube_inventory/endpoint_test.go @@ -157,6 +157,102 @@ func TestEndpoint(t *testing.T) { }, hasError: false, }, + { + name: "endpoints missing node_name", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/endpoints/": &v1.EndpointsList{ + Items: []v1.Endpoints{ + { + Subsets: []v1.EndpointSubset{ + { + NotReadyAddresses: []v1.EndpointAddress{ + { + Hostname: "storage-6", + TargetRef: &v1.ObjectReference{ + Kind: "pod", + Name: "storage-6", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "server", + Protocol: "TCP", + Port: 8080, + }, + }, + }, + { + Addresses: []v1.EndpointAddress{ + { + Hostname: "storage-12", + TargetRef: &v1.ObjectReference{ + Kind: "pod", + Name: "storage-12", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "server", + Protocol: "TCP", + Port: 8080, + }, + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "storage", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, + }, + }, + }, + }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_endpoint", + map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-6", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-6", + }, + map[string]interface{}{ + "ready": false, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "kubernetes_endpoint", + map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-12", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-12", + }, + map[string]interface{}{ + "ready": true, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, } for _, v := range tests { From 285cae2b64d24bfcf254fe3a7837828ad16a3cfa Mon Sep 17 00:00:00 2001 From: Dmitry Alimov Date: Wed, 7 Jul 2021 00:04:06 +0300 Subject: [PATCH 502/761] Fix typo in perDeviceIncludeDeprecationWarning (#9442) --- plugins/inputs/docker/docker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 4e6dc5ad4d221..5320e77b27ce8 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -75,7 +75,7 @@ const ( defaultEndpoint = "unix:///var/run/docker.sock" - perDeviceIncludeDeprecationWarning = "'perdevice' setting is set to 'true' so 'blkio' and 'network' metrics will" + + perDeviceIncludeDeprecationWarning = "'perdevice' setting is set to 'true' so 'blkio' and 'network' metrics will " + "be collected. Please set it to 'false' and use 'perdevice_include' instead to control this behaviour as " + "'perdevice' will be deprecated" From c56a652b4d295b52c3583d191e41b8cf552db42b Mon Sep 17 00:00:00 2001 From: Thomas Casteleyn Date: Tue, 6 Jul 2021 23:20:53 +0200 Subject: [PATCH 503/761] Improve documentation (#9457) --- CONTRIBUTING.md | 6 +++--- README.md | 4 +++- config/README.md | 1 + docs/developers/README.md | 1 + 4 files changed, 8 insertions(+), 4 deletions(-) create mode 120000 config/README.md create mode 120000 docs/developers/README.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2ada24a762335..ea06a968ce1c6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,11 +14,11 @@ 1. Open a new [pull request][]. #### Contributing an External Plugin *(new)* -Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](plugins/inputs/execd), [Execd Output](/plugins/outputs/execd), and [Execd Processor](plugins/processors/execd) Plugins without having to change the plugin code. -Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. -Check out our [guidelines](docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. +Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](/plugins/inputs/execd), [Execd Output](/plugins/outputs/execd), and [Execd Processor](/plugins/processors/execd) Plugins without having to change the plugin code. +Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. +Check out our [guidelines](/docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. #### Security Vulnerability Reporting InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our diff --git a/README.md b/README.md index 5180d0d822817..d0d67cb1932dd 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,8 @@ Telegraf is plugin-driven and has the concept of 4 distinct plugin types: New plugins are designed to be easy to contribute, pull requests are welcomed and we work to incorporate as many pull requests as possible. +If none of the internal plugins fit your needs, you could have a look at the +[list of external plugins](EXTERNAL_PLUGINS.md). ## Try in Browser :rocket: @@ -29,7 +31,7 @@ There are many ways to contribute: - [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) - Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) - [Contribute plugins](CONTRIBUTING.md) -- [Contribute external plugins](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) *(experimental)* +- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) ## Minimum Requirements diff --git a/config/README.md b/config/README.md new file mode 120000 index 0000000000000..5455122d9fbb5 --- /dev/null +++ b/config/README.md @@ -0,0 +1 @@ +../docs/CONFIGURATION.md \ No newline at end of file diff --git a/docs/developers/README.md b/docs/developers/README.md new file mode 120000 index 0000000000000..f939e75f21a8b --- /dev/null +++ b/docs/developers/README.md @@ -0,0 +1 @@ +../../CONTRIBUTING.md \ No newline at end of file From 537ac63c6894f61301d73593d4c781489610708c Mon Sep 17 00:00:00 2001 From: reimda Date: Wed, 7 Jul 2021 13:09:52 -0600 Subject: [PATCH 504/761] Sqlserver input: require authentication method to be specified (#9388) --- plugins/inputs/sqlserver/README.md | 7 +++++- plugins/inputs/sqlserver/sqlserver.go | 28 +++++++++++++++------- plugins/inputs/sqlserver/sqlserver_test.go | 2 ++ 3 files changed, 27 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index d5ad22ee7a204..10f6064581dfb 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -52,6 +52,10 @@ GO "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", ] + ## Authentication method + ## valid methods: "connection_string", "AAD" + # auth_method = "connection_string" + ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" @@ -197,11 +201,12 @@ EXECUTE ('GRANT VIEW DATABASE STATE TO []') - On the SQL Server resource of the database(s) being monitored, go to "Firewalls and Virtual Networks" tab and allowlist the monitoring VM IP address. - On the Monitoring VM, update the telegraf config file with the database connection string in the following format. Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). - On the Monitoring VM, update the telegraf config file with the database connection string in the following format. -- On the Monitoring VM, update the telegraf config file with the database connection string in the following format. The connection string only provides the server and database name, but no password (since the VM's system-assigned managed identity would be used for authentication). +- On the Monitoring VM, update the telegraf config file with the database connection string in the following format. The connection string only provides the server and database name, but no password (since the VM's system-assigned managed identity would be used for authentication). The auth method must be set to "AAD" ```toml servers = [ "Server=.database.windows.net;Port=1433;Database=;app name=telegraf;log=1;", ] + auth_method = "AAD" ``` - Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 7da1218c084ae..95f6f9b9a1989 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" "log" - "regexp" + "strings" "sync" "time" @@ -19,6 +19,7 @@ import ( // SQLServer struct type SQLServer struct { Servers []string `toml:"servers"` + AuthMethod string `toml:"auth_method"` QueryVersion int `toml:"query_version"` AzureDB bool `toml:"azuredb"` DatabaseType string `toml:"database_type"` @@ -80,6 +81,10 @@ servers = [ "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", ] +## Authentication method +## valid methods: "connection_string", "AAD" +# auth_method = "connection_string" + ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" @@ -286,11 +291,11 @@ func (s *SQLServer) Start(acc telegraf.Accumulator) error { for _, serv := range s.Servers { var pool *sql.DB - // setup connection based on authentication - rx := regexp.MustCompile(`\b(?:(Password=((?:&(?:[a-z]+|#[0-9]+);|[^;]){0,})))\b`) - - // when password is provided in connection string, use SQL auth - if rx.MatchString(serv) { + switch strings.ToLower(s.AuthMethod) { + case "connection_string": + // Use the DSN (connection string) directly. In this case, + // empty username/password causes use of Windows + // integrated authentication. var err error pool, err = sql.Open("mssql", serv) @@ -298,8 +303,8 @@ func (s *SQLServer) Start(acc telegraf.Accumulator) error { acc.AddError(err) continue } - } else { - // otherwise assume AAD Auth with system-assigned managed identity (MSI) + case "aad": + // AAD Auth with system-assigned managed identity (MSI) // AAD Auth is only supported for Azure SQL Database or Azure SQL Managed Instance if s.DatabaseType == "SQLServer" { @@ -322,6 +327,8 @@ func (s *SQLServer) Start(acc telegraf.Accumulator) error { } pool = sql.OpenDB(connector) + default: + return fmt.Errorf("unknown auth method: %v", s.AuthMethod) } s.pools = append(s.pools, pool) @@ -553,6 +560,9 @@ func (s *SQLServer) refreshToken() (*adal.Token, error) { func init() { inputs.Add("sqlserver", func() telegraf.Input { - return &SQLServer{Servers: []string{defaultServer}} + return &SQLServer{ + Servers: []string{defaultServer}, + AuthMethod: "connection_string", + } }) } diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index 3d1ddd3094025..a9a022bd23fa7 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -191,11 +191,13 @@ func TestSqlServer_HealthMetric(t *testing.T) { Servers: []string{fakeServer1, fakeServer2}, IncludeQuery: []string{"DatabaseSize", "MemoryClerk"}, HealthMetric: true, + AuthMethod: "connection_string", } s2 := &SQLServer{ Servers: []string{fakeServer1}, IncludeQuery: []string{"DatabaseSize"}, + AuthMethod: "connection_string", } // acc1 should have the health metric because it is specified in the config From b3492fcfa06d75711f08429af5525ca2a8bcb8a4 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Wed, 7 Jul 2021 14:32:45 -0600 Subject: [PATCH 505/761] Update changelog (cherry picked from commit 7f90f97853ffc1d6cc8ea6d6d4324e68249e0735) --- CHANGELOG.md | 25 +++++++++++++++++++++++++ etc/telegraf.conf | 10 ++++++++++ 2 files changed, 35 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f0a59529a9ad..6dcf1617400e1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,28 @@ +## v1.19.1 [2021-07-07] + +#### Bugfixes + + - [#9388](https://github.com/influxdata/telegraf/pull/9388) `inputs.sqlserver` Require authentication method to be specified + - [#9456](https://github.com/influxdata/telegraf/pull/9456) `inputs.kube_inventory` Fix segfault in kube_inventory + - [#9448](https://github.com/influxdata/telegraf/pull/9448) `inputs.couchbase` Fix panic + - [#9444](https://github.com/influxdata/telegraf/pull/9444) `inputs.knx_listener` Fix nil pointer panic + - [#9446](https://github.com/influxdata/telegraf/pull/9446) `inputs.procstat` Update gopsutil module to fix panic + - [#9443](https://github.com/influxdata/telegraf/pull/9443) `inputs.rabbitmq` Fix JSON unmarshall regression + - [#9369](https://github.com/influxdata/telegraf/pull/9369) Update nat-server module to v2.2.6 + - [#9429](https://github.com/influxdata/telegraf/pull/9429) `inputs.dovecot` Exclude read-timeout from being an error + - [#9423](https://github.com/influxdata/telegraf/pull/9423) `inputs.statsd` Don't stop parsing after parsing error + - [#9370](https://github.com/influxdata/telegraf/pull/9370) Update apimachinary module to v0.21.1 + - [#9373](https://github.com/influxdata/telegraf/pull/9373) Update jwt module to v1.2.2 and jwt-go module to v3.2.3 + - [#9412](https://github.com/influxdata/telegraf/pull/9412) Update couchbase Module to v0.1.0 + - [#9366](https://github.com/influxdata/telegraf/pull/9366) `inputs.snmp` Add a check for oid and name to prevent empty metrics + - [#9413](https://github.com/influxdata/telegraf/pull/9413) `outputs.http` Fix toml error when parsing insecure_skip_verify + - [#9400](https://github.com/influxdata/telegraf/pull/9400) `inputs.x509_cert` Fix 'source' tag for https + - [#9375](https://github.com/influxdata/telegraf/pull/9375) Update signalfx module to v3.3.34 + - [#9406](https://github.com/influxdata/telegraf/pull/9406) `parsers.json_v2` Don't require tags to be added to included_keys + - [#9289](https://github.com/influxdata/telegraf/pull/9289) `inputs.x509_cert` Fix SNI support + - [#9372](https://github.com/influxdata/telegraf/pull/9372) Update gjson module to v1.8.0 + - [#9379](https://github.com/influxdata/telegraf/pull/9379) Linter fixes for plugins/inputs/[de]* + ## v1.19.0 [2021-06-17] #### Release Notes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 492bf704087db..6d11fa692706d 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -5637,6 +5637,12 @@ # ## specified, metrics for all exchanges are gathered. # # exchanges = ["telegraf"] # +# ## Metrics to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all metrics +# ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" +# # metric_include = [] +# # metric_exclude = [] +# # ## Queues to include and exclude. Globs accepted. # ## Note that an empty array for both will include all queues # queue_name_include = [] @@ -8137,6 +8143,10 @@ # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", # ] # +# ## Authentication method +# ## valid methods: "connection_string", "AAD" +# # auth_method = "connection_string" +# # ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 # ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. # ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" From 1b20680e3728367987c96111d48870478064ca34 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 8 Jul 2021 13:05:41 -0500 Subject: [PATCH 506/761] Fix json_v2 parser to handle nested objects in arrays properly (#9479) --- plugins/parsers/json_v2/parser.go | 9 ++++++++- plugins/parsers/json_v2/parser_test.go | 4 ++++ .../json_v2/testdata/array_of_objects/expected.out | 2 ++ .../json_v2/testdata/array_of_objects/input.json | 14 ++++++++++++++ .../testdata/array_of_objects/telegraf.conf | 9 +++++++++ 5 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 plugins/parsers/json_v2/testdata/array_of_objects/expected.out create mode 100644 plugins/parsers/json_v2/testdata/array_of_objects/input.json create mode 100644 plugins/parsers/json_v2/testdata/array_of_objects/telegraf.conf diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index d013f6b35e24f..da128880d1d01 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -381,6 +381,7 @@ func (p *Parser) processObjects(objects []JSONObject, input []byte) ([]telegraf. // If the object has multiple array's as elements it won't comine those, they will remain separate metrics func (p *Parser) combineObject(result MetricNode) ([]MetricNode, error) { var results []MetricNode + var combineObjectResult []MetricNode if result.IsArray() || result.IsObject() { var err error var prevArray bool @@ -437,7 +438,7 @@ func (p *Parser) combineObject(result MetricNode) ([]MetricNode, error) { arrayNode.Tag = tag if val.IsObject() { prevArray = false - _, err = p.combineObject(arrayNode) + combineObjectResult, err = p.combineObject(arrayNode) if err != nil { return false } @@ -477,6 +478,12 @@ func (p *Parser) combineObject(result MetricNode) ([]MetricNode, error) { } } + if len(results) == 0 { + // If the results are empty, use the results of the call to combine object + // This happens with nested objects in array's, see the test array_of_objects + results = combineObjectResult + } + return results, nil } diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index b53eac0fe0ee8..50c981c4d51f9 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -21,6 +21,10 @@ func TestData(t *testing.T) { name string test string }{ + { + name: "Test having an array of objects", + test: "array_of_objects", + }, { name: "Test using just fields and tags", test: "fields_and_tags", diff --git a/plugins/parsers/json_v2/testdata/array_of_objects/expected.out b/plugins/parsers/json_v2/testdata/array_of_objects/expected.out new file mode 100644 index 0000000000000..75f9e5e407f21 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/array_of_objects/expected.out @@ -0,0 +1,2 @@ +file properties_mag=5.17 +file properties_mag=6.2 diff --git a/plugins/parsers/json_v2/testdata/array_of_objects/input.json b/plugins/parsers/json_v2/testdata/array_of_objects/input.json new file mode 100644 index 0000000000000..6b43061bcba43 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/array_of_objects/input.json @@ -0,0 +1,14 @@ +{ + "features": [ + { + "properties": { + "mag": 5.17 + } + }, + { + "properties": { + "mag": 6.2 + } + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/array_of_objects/telegraf.conf b/plugins/parsers/json_v2/testdata/array_of_objects/telegraf.conf new file mode 100644 index 0000000000000..9a93a1d05a3be --- /dev/null +++ b/plugins/parsers/json_v2/testdata/array_of_objects/telegraf.conf @@ -0,0 +1,9 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/5940 + +[[inputs.file]] + files = ["./testdata/array_of_objects/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "features" + From f69b37b7599ea376b34cb7a023632d700863da6f Mon Sep 17 00:00:00 2001 From: Mat Wood Date: Thu, 8 Jul 2021 13:39:46 -0700 Subject: [PATCH 507/761] Adding RFC3164 support to inputs.syslog (#8454) --- go.mod | 2 +- go.sum | 4 +- plugins/inputs/syslog/README.md | 18 +++- plugins/inputs/syslog/commons_test.go | 16 +-- plugins/inputs/syslog/rfc3164_test.go | 123 ++++++++++++++++++++++ plugins/inputs/syslog/rfc5426_test.go | 11 +- plugins/inputs/syslog/syslog.go | 130 ++++++++++++++---------- plugins/outputs/syslog/syslog.go | 4 +- plugins/outputs/syslog/syslog_mapper.go | 2 +- 9 files changed, 236 insertions(+), 74 deletions(-) create mode 100644 plugins/inputs/syslog/rfc3164_test.go diff --git a/go.mod b/go.mod index 7d9f66c0a315c..55c38e7992ceb 100644 --- a/go.mod +++ b/go.mod @@ -75,7 +75,7 @@ require ( github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 github.com/hashicorp/consul/api v1.8.1 github.com/hashicorp/go-msgpack v0.5.5 // indirect - github.com/influxdata/go-syslog/v2 v2.0.1 + github.com/influxdata/go-syslog/v3 v3.0.0 github.com/influxdata/influxdb-observability/common v0.0.0-20210429174543-86ae73cafd31 github.com/influxdata/influxdb-observability/otel2influx v0.0.0-20210429174543-86ae73cafd31 github.com/influxdata/influxdb-observability/otlp v0.0.0-20210429174543-86ae73cafd31 diff --git a/go.sum b/go.sum index 592cf33db6bbb..cd46be1eb1009 100644 --- a/go.sum +++ b/go.sum @@ -860,8 +860,8 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e h1:3J1OB4RDKwXs5l8uEV6BP/tucOJOPDQysiT7/9cuXzA= github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e/go.mod h1:WYK/Z/aXq9cbMFIL5ihcA4sX/r/3/WCas/Qvs/2fXcA= github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= -github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s= -github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo= +github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I= +github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q= github.com/influxdata/influxdb v1.8.2/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= github.com/influxdata/influxdb-observability/common v0.0.0-20210428231528-a010f53e3e02/go.mod h1:PMngVYsW4uwtzIVmj0ZfLL9UIOwo7Vs+09QHkoYMZv8= github.com/influxdata/influxdb-observability/common v0.0.0-20210429174543-86ae73cafd31 h1:pfWcpiOrWLJvicIpCiFR8vqrkVbAuKUttWvQDmSlfUM= diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index 32c5f2717b630..a821a642b0ec8 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -55,6 +55,11 @@ Syslog messages should be formatted according to ## By default best effort parsing is off. # best_effort = false + ## The RFC standard to use for message parsing + ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) + ## Must be one of "RFC5424", or "RFC3164". + # syslog_standard = "RFC5424" + ## Character to prepend to SD-PARAMs (default = "_"). ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] @@ -155,9 +160,12 @@ echo "<13>1 2018-10-01T12:00:00.0Z example.org root - - - test" | nc -u 127.0.0. #### RFC3164 -RFC3164 encoded messages are not currently supported. You may see the following error if a message encoded in this format: -``` -E! Error in plugin [inputs.syslog]: expecting a version value in the range 1-999 [col 5] -``` +RFC3164 encoded messages are supported for UDP only, but not all vendors output valid RFC3164 messages by default + +- E.g. Cisco IOS -You can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. +If you see the following error, it is due to a message encoded in this format: + ``` + E! Error in plugin [inputs.syslog]: expecting a version value in the range 1-999 [col 5] + ``` + You can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. \ No newline at end of file diff --git a/plugins/inputs/syslog/commons_test.go b/plugins/inputs/syslog/commons_test.go index 5b30b3778ec8e..1764c891ad7b4 100644 --- a/plugins/inputs/syslog/commons_test.go +++ b/plugins/inputs/syslog/commons_test.go @@ -29,14 +29,15 @@ type testCaseStream struct { werr int // how many errors we expect in the strict mode? } -func newUDPSyslogReceiver(address string, bestEffort bool) *Syslog { +func newUDPSyslogReceiver(address string, bestEffort bool, rfc syslogRFC) *Syslog { return &Syslog{ Address: address, now: func() time.Time { return defaultTime }, - BestEffort: bestEffort, - Separator: "_", + BestEffort: bestEffort, + SyslogStandard: rfc, + Separator: "_", } } @@ -47,10 +48,11 @@ func newTCPSyslogReceiver(address string, keepAlive *config.Duration, maxConn in now: func() time.Time { return defaultTime }, - Framing: f, - ReadTimeout: &d, - BestEffort: bestEffort, - Separator: "_", + Framing: f, + ReadTimeout: &d, + BestEffort: bestEffort, + SyslogStandard: syslogRFC5424, + Separator: "_", } if keepAlive != nil { s.KeepAlivePeriod = keepAlive diff --git a/plugins/inputs/syslog/rfc3164_test.go b/plugins/inputs/syslog/rfc3164_test.go new file mode 100644 index 0000000000000..bd192a6d92a39 --- /dev/null +++ b/plugins/inputs/syslog/rfc3164_test.go @@ -0,0 +1,123 @@ +package syslog + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func timeMustParse(value string) time.Time { + format := "Jan 2 15:04:05 2006" + t, err := time.Parse(format, value) + if err != nil { + panic(fmt.Sprintf("couldn't parse time: %v", value)) + } + return t +} + +func getTestCasesForRFC3164() []testCasePacket { + currentYear := time.Now().Year() + ts := timeMustParse(fmt.Sprintf("Dec 2 16:31:03 %d", currentYear)).UnixNano() + testCases := []testCasePacket{ + { + name: "complete", + data: []byte("<13>Dec 2 16:31:03 host app: Test"), + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "appname": "app", + "severity": "notice", + "hostname": "host", + "facility": "user", + }, + map[string]interface{}{ + "timestamp": ts, + "message": "Test", + "facility_code": 1, + "severity_code": 5, + }, + defaultTime, + ), + wantStrict: testutil.MustMetric( + "syslog", + map[string]string{ + "appname": "app", + "severity": "notice", + "hostname": "host", + "facility": "user", + }, + map[string]interface{}{ + "timestamp": ts, + "message": "Test", + "facility_code": 1, + "severity_code": 5, + }, + defaultTime, + ), + }, + } + + return testCases +} + +func testRFC3164(t *testing.T, protocol string, address string, bestEffort bool) { + for _, tc := range getTestCasesForRFC3164() { + t.Run(tc.name, func(t *testing.T) { + // Create receiver + receiver := newUDPSyslogReceiver(protocol+"://"+address, bestEffort, syslogRFC3164) + acc := &testutil.Accumulator{} + require.NoError(t, receiver.Start(acc)) + defer receiver.Stop() + + // Connect + conn, err := net.Dial(protocol, address) + require.NotNil(t, conn) + require.NoError(t, err) + + // Write + _, err = conn.Write(tc.data) + conn.Close() + if err != nil { + if err, ok := err.(*net.OpError); ok { + if err.Err.Error() == "write: message too long" { + return + } + } + } + + // Waiting ... + if tc.wantStrict == nil && tc.werr || bestEffort && tc.werr { + acc.WaitError(1) + } + if tc.wantBestEffort != nil && bestEffort || tc.wantStrict != nil && !bestEffort { + acc.Wait(1) // RFC3164 mandates a syslog message per UDP packet + } + + // Compare + var got telegraf.Metric + var want telegraf.Metric + if len(acc.Metrics) > 0 { + got = acc.GetTelegrafMetrics()[0] + } + if bestEffort { + want = tc.wantBestEffort + } else { + want = tc.wantStrict + } + testutil.RequireMetricEqual(t, want, got) + }) + } +} + +func TestRFC3164BestEffort_udp(t *testing.T) { + testRFC3164(t, "udp", address, true) +} + +func TestRFC3164Strict_udp(t *testing.T) { + testRFC3164(t, "udp", address, false) +} diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index 4e4a5a2528834..ab3fe2ceaf60f 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -232,7 +232,7 @@ func testRFC5426(t *testing.T, protocol string, address string, bestEffort bool) for _, tc := range getTestCasesForRFC5426() { t.Run(tc.name, func(t *testing.T) { // Create receiver - receiver := newUDPSyslogReceiver(protocol+"://"+address, bestEffort) + receiver := newUDPSyslogReceiver(protocol+"://"+address, bestEffort, syslogRFC5424) acc := &testutil.Accumulator{} require.NoError(t, receiver.Start(acc)) defer receiver.Stop() @@ -325,10 +325,11 @@ func TestTimeIncrement_udp(t *testing.T) { // Create receiver receiver := &Syslog{ - Address: "udp://" + address, - now: getNow, - BestEffort: false, - Separator: "_", + Address: "udp://" + address, + now: getNow, + BestEffort: false, + SyslogStandard: syslogRFC5424, + Separator: "_", } acc := &testutil.Accumulator{} require.NoError(t, receiver.Start(acc)) diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 19e07913b72c4..fc7eab1fa0828 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -13,11 +13,11 @@ import ( "time" "unicode" - "github.com/influxdata/go-syslog/v2" - "github.com/influxdata/go-syslog/v2/nontransparent" - "github.com/influxdata/go-syslog/v2/octetcounting" - "github.com/influxdata/go-syslog/v2/rfc5424" - + syslog "github.com/influxdata/go-syslog/v3" + "github.com/influxdata/go-syslog/v3/nontransparent" + "github.com/influxdata/go-syslog/v3/octetcounting" + "github.com/influxdata/go-syslog/v3/rfc3164" + "github.com/influxdata/go-syslog/v3/rfc5424" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" @@ -25,8 +25,12 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +type syslogRFC string + const defaultReadTimeout = time.Second * 5 const ipMaxPacketSize = 64 * 1024 +const syslogRFC3164 = "RFC3164" +const syslogRFC5424 = "RFC5424" // Syslog is a syslog plugin type Syslog struct { @@ -36,6 +40,7 @@ type Syslog struct { MaxConnections int ReadTimeout *config.Duration Framing framing.Framing + SyslogStandard syslogRFC Trailer nontransparent.TrailerType BestEffort bool Separator string `toml:"sdparam_separator"` @@ -97,6 +102,11 @@ var sampleConfig = ` ## By default best effort parsing is off. # best_effort = false + ## The RFC standard to use for message parsing + ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) + ## Must be one of "RFC5424", or "RFC3164". + # syslog_standard = "RFC5424" + ## Character to prepend to SD-PARAMs (default = "_"). ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] @@ -228,10 +238,15 @@ func (s *Syslog) listenPacket(acc telegraf.Accumulator) { defer s.wg.Done() b := make([]byte, ipMaxPacketSize) var p syslog.Machine - if s.BestEffort { - p = rfc5424.NewParser(rfc5424.WithBestEffort()) - } else { + switch { + case !s.BestEffort && s.SyslogStandard == syslogRFC5424: p = rfc5424.NewParser() + case s.BestEffort && s.SyslogStandard == syslogRFC5424: + p = rfc5424.NewParser(rfc5424.WithBestEffort()) + case !s.BestEffort && s.SyslogStandard == syslogRFC3164: + p = rfc3164.NewParser(rfc3164.WithYear(rfc3164.CurrentYear{})) + case s.BestEffort && s.SyslogStandard == syslogRFC3164: + p = rfc3164.NewParser(rfc3164.WithYear(rfc3164.CurrentYear{}), rfc3164.WithBestEffort()) } for { n, _, err := s.udpListener.ReadFrom(b) @@ -379,58 +394,70 @@ func tags(msg syslog.Message) map[string]string { ts["severity"] = *msg.SeverityShortLevel() ts["facility"] = *msg.FacilityLevel() - if msg.Hostname() != nil { - ts["hostname"] = *msg.Hostname() + switch m := msg.(type) { + case *rfc5424.SyslogMessage: + populateCommonTags(&m.Base, ts) + case *rfc3164.SyslogMessage: + populateCommonTags(&m.Base, ts) } - - if msg.Appname() != nil { - ts["appname"] = *msg.Appname() - } - return ts } func fields(msg syslog.Message, s *Syslog) map[string]interface{} { - // Not checking assuming a minimally valid message - flds := map[string]interface{}{ - "version": msg.Version(), + flds := map[string]interface{}{} + + switch m := msg.(type) { + case *rfc5424.SyslogMessage: + populateCommonFields(&m.Base, flds) + // Not checking assuming a minimally valid message + flds["version"] = m.Version + + if m.StructuredData != nil { + for sdid, sdparams := range *m.StructuredData { + if len(sdparams) == 0 { + // When SD-ID does not have params we indicate its presence with a bool + flds[sdid] = true + continue + } + for name, value := range sdparams { + // Using whitespace as separator since it is not allowed by the grammar within SDID + flds[sdid+s.Separator+name] = value + } + } + } + case *rfc3164.SyslogMessage: + populateCommonFields(&m.Base, flds) } - flds["severity_code"] = int(*msg.Severity()) - flds["facility_code"] = int(*msg.Facility()) - if msg.Timestamp() != nil { - flds["timestamp"] = (*msg.Timestamp()).UnixNano() - } + return flds +} - if msg.ProcID() != nil { - flds["procid"] = *msg.ProcID() +func populateCommonFields(msg *syslog.Base, flds map[string]interface{}) { + flds["facility_code"] = int(*msg.Facility) + flds["severity_code"] = int(*msg.Severity) + if msg.Timestamp != nil { + flds["timestamp"] = (*msg.Timestamp).UnixNano() } - - if msg.MsgID() != nil { - flds["msgid"] = *msg.MsgID() + if msg.ProcID != nil { + flds["procid"] = *msg.ProcID } - - if msg.Message() != nil { - flds["message"] = strings.TrimRightFunc(*msg.Message(), func(r rune) bool { + if msg.MsgID != nil { + flds["msgid"] = *msg.MsgID + } + if msg.Message != nil { + flds["message"] = strings.TrimRightFunc(*msg.Message, func(r rune) bool { return unicode.IsSpace(r) }) } +} - if msg.StructuredData() != nil { - for sdid, sdparams := range *msg.StructuredData() { - if len(sdparams) == 0 { - // When SD-ID does not have params we indicate its presence with a bool - flds[sdid] = true - continue - } - for name, value := range sdparams { - // Using whitespace as separator since it is not allowed by the grammar within SDID - flds[sdid+s.Separator+name] = value - } - } +func populateCommonTags(msg *syslog.Base, ts map[string]string) { + if msg.Hostname != nil { + ts["hostname"] = *msg.Hostname + } + if msg.Appname != nil { + ts["appname"] = *msg.Appname } - - return flds } type unixCloser struct { @@ -463,12 +490,13 @@ func init() { defaultTimeout := config.Duration(defaultReadTimeout) inputs.Add("syslog", func() telegraf.Input { return &Syslog{ - Address: ":6514", - now: getNanoNow, - ReadTimeout: &defaultTimeout, - Framing: framing.OctetCounting, - Trailer: nontransparent.LF, - Separator: "_", + Address: ":6514", + now: getNanoNow, + ReadTimeout: &defaultTimeout, + Framing: framing.OctetCounting, + SyslogStandard: syslogRFC5424, + Trailer: nontransparent.LF, + Separator: "_", } }) } diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go index 39f1f6ec5817d..570ed15a79e6b 100644 --- a/plugins/outputs/syslog/syslog.go +++ b/plugins/outputs/syslog/syslog.go @@ -9,8 +9,8 @@ import ( "strings" "time" - "github.com/influxdata/go-syslog/v2/nontransparent" - "github.com/influxdata/go-syslog/v2/rfc5424" + "github.com/influxdata/go-syslog/v3/nontransparent" + "github.com/influxdata/go-syslog/v3/rfc5424" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" diff --git a/plugins/outputs/syslog/syslog_mapper.go b/plugins/outputs/syslog/syslog_mapper.go index 4e4848205ca28..28c74f3f97a6d 100644 --- a/plugins/outputs/syslog/syslog_mapper.go +++ b/plugins/outputs/syslog/syslog_mapper.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/influxdata/go-syslog/v2/rfc5424" + "github.com/influxdata/go-syslog/v3/rfc5424" "github.com/influxdata/telegraf" ) From 59e79fa8d4703bfcfef1cee0d81582f3de16fa5d Mon Sep 17 00:00:00 2001 From: Andre Nathan Date: Thu, 8 Jul 2021 17:54:22 -0300 Subject: [PATCH 508/761] Allow multiple keys when parsing cgroups (#8108) --- plugins/inputs/cgroup/README.md | 6 +- plugins/inputs/cgroup/cgroup_linux.go | 16 +-- plugins/inputs/cgroup/cgroup_test.go | 152 ++++++++++++++++++++++++++ 3 files changed, 164 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/cgroup/README.md b/plugins/inputs/cgroup/README.md index 3b755bbd8790d..7d0eede0f7f10 100644 --- a/plugins/inputs/cgroup/README.md +++ b/plugins/inputs/cgroup/README.md @@ -27,11 +27,11 @@ VAL1\n VAL0 VAL1 ...\n ``` -* New line separated key-space-value's +* Space separated keys and value, separated by new line ``` -KEY0 VAL0\n -KEY1 VAL1\n +KEY0 ... VAL0\n +KEY1 ... VAL1\n ``` diff --git a/plugins/inputs/cgroup/cgroup_linux.go b/plugins/inputs/cgroup/cgroup_linux.go index 6ecfd255a06b7..43aa68f233fc1 100644 --- a/plugins/inputs/cgroup/cgroup_linux.go +++ b/plugins/inputs/cgroup/cgroup_linux.go @@ -10,6 +10,7 @@ import ( "path/filepath" "regexp" "strconv" + "strings" "github.com/influxdata/telegraf" ) @@ -168,7 +169,7 @@ type fileFormat struct { parser func(measurement string, fields map[string]interface{}, b []byte) } -const keyPattern = "[[:alpha:]_]+" +const keyPattern = "[[:alnum:]:_]+" const valuePattern = "[\\d-]+" var fileFormats = [...]fileFormat{ @@ -208,17 +209,18 @@ var fileFormats = [...]fileFormat{ } }, }, - // KEY0 VAL0\n - // KEY1 VAL1\n + // KEY0 ... VAL0\n + // KEY1 ... VAL1\n // ... { - name: "New line separated key-space-value's", - pattern: "^(" + keyPattern + " " + valuePattern + "\n)+$", + name: "Space separated keys and value, separated by new line", + pattern: "^((" + keyPattern + " )+" + valuePattern + "\n)+$", parser: func(measurement string, fields map[string]interface{}, b []byte) { - re := regexp.MustCompile("(" + keyPattern + ") (" + valuePattern + ")\n") + re := regexp.MustCompile("((?:" + keyPattern + " ?)+) (" + valuePattern + ")\n") matches := re.FindAllStringSubmatch(string(b), -1) for _, v := range matches { - fields[measurement+"."+v[1]] = numberOrString(v[2]) + k := strings.ReplaceAll(v[1], " ", ".") + fields[measurement+"."+k] = numberOrString(v[2]) } }, }, diff --git a/plugins/inputs/cgroup/cgroup_test.go b/plugins/inputs/cgroup/cgroup_test.go index b3094baef31ae..bd7a191b31df7 100644 --- a/plugins/inputs/cgroup/cgroup_test.go +++ b/plugins/inputs/cgroup/cgroup_test.go @@ -180,3 +180,155 @@ func TestCgroupStatistics_6(t *testing.T) { } acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) } + +// ====================================================================== + +var cg7 = &CGroup{ + Paths: []string{"testdata/blkio"}, + Files: []string{"blkio.throttle.io_serviced"}, +} + +func TestCgroupStatistics_7(t *testing.T) { + var acc testutil.Accumulator + + err := acc.GatherError(cg7.Gather) + require.NoError(t, err) + + tags := map[string]string{ + "path": "testdata/blkio", + } + fields := map[string]interface{}{ + "blkio.throttle.io_serviced.11:0.Read": int64(0), + "blkio.throttle.io_serviced.11:0.Write": int64(0), + "blkio.throttle.io_serviced.11:0.Sync": int64(0), + "blkio.throttle.io_serviced.11:0.Async": int64(0), + "blkio.throttle.io_serviced.11:0.Total": int64(0), + "blkio.throttle.io_serviced.8:0.Read": int64(49134), + "blkio.throttle.io_serviced.8:0.Write": int64(216703), + "blkio.throttle.io_serviced.8:0.Sync": int64(177906), + "blkio.throttle.io_serviced.8:0.Async": int64(87931), + "blkio.throttle.io_serviced.8:0.Total": int64(265837), + "blkio.throttle.io_serviced.7:7.Read": int64(0), + "blkio.throttle.io_serviced.7:7.Write": int64(0), + "blkio.throttle.io_serviced.7:7.Sync": int64(0), + "blkio.throttle.io_serviced.7:7.Async": int64(0), + "blkio.throttle.io_serviced.7:7.Total": int64(0), + "blkio.throttle.io_serviced.7:6.Read": int64(0), + "blkio.throttle.io_serviced.7:6.Write": int64(0), + "blkio.throttle.io_serviced.7:6.Sync": int64(0), + "blkio.throttle.io_serviced.7:6.Async": int64(0), + "blkio.throttle.io_serviced.7:6.Total": int64(0), + "blkio.throttle.io_serviced.7:5.Read": int64(0), + "blkio.throttle.io_serviced.7:5.Write": int64(0), + "blkio.throttle.io_serviced.7:5.Sync": int64(0), + "blkio.throttle.io_serviced.7:5.Async": int64(0), + "blkio.throttle.io_serviced.7:5.Total": int64(0), + "blkio.throttle.io_serviced.7:4.Read": int64(0), + "blkio.throttle.io_serviced.7:4.Write": int64(0), + "blkio.throttle.io_serviced.7:4.Sync": int64(0), + "blkio.throttle.io_serviced.7:4.Async": int64(0), + "blkio.throttle.io_serviced.7:4.Total": int64(0), + "blkio.throttle.io_serviced.7:3.Read": int64(0), + "blkio.throttle.io_serviced.7:3.Write": int64(0), + "blkio.throttle.io_serviced.7:3.Sync": int64(0), + "blkio.throttle.io_serviced.7:3.Async": int64(0), + "blkio.throttle.io_serviced.7:3.Total": int64(0), + "blkio.throttle.io_serviced.7:2.Read": int64(0), + "blkio.throttle.io_serviced.7:2.Write": int64(0), + "blkio.throttle.io_serviced.7:2.Sync": int64(0), + "blkio.throttle.io_serviced.7:2.Async": int64(0), + "blkio.throttle.io_serviced.7:2.Total": int64(0), + "blkio.throttle.io_serviced.7:1.Read": int64(0), + "blkio.throttle.io_serviced.7:1.Write": int64(0), + "blkio.throttle.io_serviced.7:1.Sync": int64(0), + "blkio.throttle.io_serviced.7:1.Async": int64(0), + "blkio.throttle.io_serviced.7:1.Total": int64(0), + "blkio.throttle.io_serviced.7:0.Read": int64(0), + "blkio.throttle.io_serviced.7:0.Write": int64(0), + "blkio.throttle.io_serviced.7:0.Sync": int64(0), + "blkio.throttle.io_serviced.7:0.Async": int64(0), + "blkio.throttle.io_serviced.7:0.Total": int64(0), + "blkio.throttle.io_serviced.1:15.Read": int64(3), + "blkio.throttle.io_serviced.1:15.Write": int64(0), + "blkio.throttle.io_serviced.1:15.Sync": int64(0), + "blkio.throttle.io_serviced.1:15.Async": int64(3), + "blkio.throttle.io_serviced.1:15.Total": int64(3), + "blkio.throttle.io_serviced.1:14.Read": int64(3), + "blkio.throttle.io_serviced.1:14.Write": int64(0), + "blkio.throttle.io_serviced.1:14.Sync": int64(0), + "blkio.throttle.io_serviced.1:14.Async": int64(3), + "blkio.throttle.io_serviced.1:14.Total": int64(3), + "blkio.throttle.io_serviced.1:13.Read": int64(3), + "blkio.throttle.io_serviced.1:13.Write": int64(0), + "blkio.throttle.io_serviced.1:13.Sync": int64(0), + "blkio.throttle.io_serviced.1:13.Async": int64(3), + "blkio.throttle.io_serviced.1:13.Total": int64(3), + "blkio.throttle.io_serviced.1:12.Read": int64(3), + "blkio.throttle.io_serviced.1:12.Write": int64(0), + "blkio.throttle.io_serviced.1:12.Sync": int64(0), + "blkio.throttle.io_serviced.1:12.Async": int64(3), + "blkio.throttle.io_serviced.1:12.Total": int64(3), + "blkio.throttle.io_serviced.1:11.Read": int64(3), + "blkio.throttle.io_serviced.1:11.Write": int64(0), + "blkio.throttle.io_serviced.1:11.Sync": int64(0), + "blkio.throttle.io_serviced.1:11.Async": int64(3), + "blkio.throttle.io_serviced.1:11.Total": int64(3), + "blkio.throttle.io_serviced.1:10.Read": int64(3), + "blkio.throttle.io_serviced.1:10.Write": int64(0), + "blkio.throttle.io_serviced.1:10.Sync": int64(0), + "blkio.throttle.io_serviced.1:10.Async": int64(3), + "blkio.throttle.io_serviced.1:10.Total": int64(3), + "blkio.throttle.io_serviced.1:9.Read": int64(3), + "blkio.throttle.io_serviced.1:9.Write": int64(0), + "blkio.throttle.io_serviced.1:9.Sync": int64(0), + "blkio.throttle.io_serviced.1:9.Async": int64(3), + "blkio.throttle.io_serviced.1:9.Total": int64(3), + "blkio.throttle.io_serviced.1:8.Read": int64(3), + "blkio.throttle.io_serviced.1:8.Write": int64(0), + "blkio.throttle.io_serviced.1:8.Sync": int64(0), + "blkio.throttle.io_serviced.1:8.Async": int64(3), + "blkio.throttle.io_serviced.1:8.Total": int64(3), + "blkio.throttle.io_serviced.1:7.Read": int64(3), + "blkio.throttle.io_serviced.1:7.Write": int64(0), + "blkio.throttle.io_serviced.1:7.Sync": int64(0), + "blkio.throttle.io_serviced.1:7.Async": int64(3), + "blkio.throttle.io_serviced.1:7.Total": int64(3), + "blkio.throttle.io_serviced.1:6.Read": int64(3), + "blkio.throttle.io_serviced.1:6.Write": int64(0), + "blkio.throttle.io_serviced.1:6.Sync": int64(0), + "blkio.throttle.io_serviced.1:6.Async": int64(3), + "blkio.throttle.io_serviced.1:6.Total": int64(3), + "blkio.throttle.io_serviced.1:5.Read": int64(3), + "blkio.throttle.io_serviced.1:5.Write": int64(0), + "blkio.throttle.io_serviced.1:5.Sync": int64(0), + "blkio.throttle.io_serviced.1:5.Async": int64(3), + "blkio.throttle.io_serviced.1:5.Total": int64(3), + "blkio.throttle.io_serviced.1:4.Read": int64(3), + "blkio.throttle.io_serviced.1:4.Write": int64(0), + "blkio.throttle.io_serviced.1:4.Sync": int64(0), + "blkio.throttle.io_serviced.1:4.Async": int64(3), + "blkio.throttle.io_serviced.1:4.Total": int64(3), + "blkio.throttle.io_serviced.1:3.Read": int64(3), + "blkio.throttle.io_serviced.1:3.Write": int64(0), + "blkio.throttle.io_serviced.1:3.Sync": int64(0), + "blkio.throttle.io_serviced.1:3.Async": int64(3), + "blkio.throttle.io_serviced.1:3.Total": int64(3), + "blkio.throttle.io_serviced.1:2.Read": int64(3), + "blkio.throttle.io_serviced.1:2.Write": int64(0), + "blkio.throttle.io_serviced.1:2.Sync": int64(0), + "blkio.throttle.io_serviced.1:2.Async": int64(3), + "blkio.throttle.io_serviced.1:2.Total": int64(3), + "blkio.throttle.io_serviced.1:1.Read": int64(3), + "blkio.throttle.io_serviced.1:1.Write": int64(0), + "blkio.throttle.io_serviced.1:1.Sync": int64(0), + "blkio.throttle.io_serviced.1:1.Async": int64(3), + "blkio.throttle.io_serviced.1:1.Total": int64(3), + "blkio.throttle.io_serviced.1:0.Read": int64(3), + "blkio.throttle.io_serviced.1:0.Write": int64(0), + "blkio.throttle.io_serviced.1:0.Sync": int64(0), + "blkio.throttle.io_serviced.1:0.Async": int64(3), + "blkio.throttle.io_serviced.1:0.Total": int64(3), + "blkio.throttle.io_serviced.Total": int64(265885), + } + acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) +} From 55a27bb62d7c0e55f0569d16b6bb1834613faa0e Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Fri, 9 Jul 2021 19:24:44 -0700 Subject: [PATCH 509/761] chore: fixing link in influxdb_listener plugin (#9431) --- plugins/inputs/influxdb_listener/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/influxdb_listener/README.md b/plugins/inputs/influxdb_listener/README.md index aae77fb965f7a..0912c36087b75 100644 --- a/plugins/inputs/influxdb_listener/README.md +++ b/plugins/inputs/influxdb_listener/README.md @@ -75,5 +75,5 @@ Metrics are created from InfluxDB Line Protocol in the request body. curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' ``` -[influxdb_http_api]: https://docs.influxdata.com/influxdb/latest/guides/writing_data/ +[influxdb_http_api]: https://docs.influxdata.com/influxdb/v1.8/guides/write_data/ [http_listener_v2]: /plugins/inputs/http_listener_v2/README.md From f2d9dbe8cc74cc580d73a910497bba5e6843f725 Mon Sep 17 00:00:00 2001 From: Daniel Dyla Date: Sat, 10 Jul 2021 01:58:51 -0400 Subject: [PATCH 510/761] Update the dynatrace metric utils v0.1->v0.2 (#9399) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 55c38e7992ceb..131c635eae52a 100644 --- a/go.mod +++ b/go.mod @@ -47,7 +47,7 @@ require ( github.com/dimchansky/utfbom v1.1.1 github.com/docker/docker v20.10.6+incompatible github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 - github.com/dynatrace-oss/dynatrace-metric-utils-go v0.1.0 + github.com/dynatrace-oss/dynatrace-metric-utils-go v0.2.0 github.com/eclipse/paho.mqtt.golang v1.3.0 github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 diff --git a/go.sum b/go.sum index cd46be1eb1009..47af7e375b3ad 100644 --- a/go.sum +++ b/go.sum @@ -474,8 +474,8 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dynatrace-oss/dynatrace-metric-utils-go v0.1.0 h1:ldKn47mFgWCoiJRXA32psdEACPKffX9O1Msh1K8M+f0= -github.com/dynatrace-oss/dynatrace-metric-utils-go v0.1.0/go.mod h1:qw0E9EJ0PnSlhWawDNuqE0zhc1hqOBUCFIAj3dd9DNw= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.2.0 h1:TEG5Jj7RYM2JBCUH3nLqCmSZy6srnaefvXxjUTCuHyA= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.2.0/go.mod h1:qw0E9EJ0PnSlhWawDNuqE0zhc1hqOBUCFIAj3dd9DNw= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= From 2267733a04ca86554cf8fb182dc7e6c150c62fa2 Mon Sep 17 00:00:00 2001 From: Madhushree Sreenivasa Date: Tue, 13 Jul 2021 14:08:41 -0700 Subject: [PATCH 511/761] Provide detailed error message in telegraf log (#9466) --- plugins/inputs/sqlserver/sqlserver.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 95f6f9b9a1989..4a965bec15afd 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -256,11 +256,12 @@ func (s *SQLServer) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(pool *sql.DB, query Query, serverIndex int) { defer wg.Done() - queryError := s.gatherServer(pool, query, acc) + connectionString := s.Servers[serverIndex] + queryError := s.gatherServer(pool, query, acc, connectionString) if s.HealthMetric { mutex.Lock() - s.gatherHealth(healthMetrics, s.Servers[serverIndex], queryError) + s.gatherHealth(healthMetrics, connectionString, queryError) mutex.Unlock() } @@ -344,12 +345,21 @@ func (s *SQLServer) Stop() { } } -func (s *SQLServer) gatherServer(pool *sql.DB, query Query, acc telegraf.Accumulator) error { +func (s *SQLServer) gatherServer(pool *sql.DB, query Query, acc telegraf.Accumulator, connectionString string) error { // execute query rows, err := pool.Query(query.Script) if err != nil { - return fmt.Errorf("script %s failed: %w", query.ScriptName, err) + serverName, databaseName := getConnectionIdentifiers(connectionString) + + // Error msg based on the format in SSMS. SQLErrorClass() is another term for severity/level: http://msdn.microsoft.com/en-us/library/dd304156.aspx + if sqlerr, ok := err.(mssql.Error); ok { + return fmt.Errorf("Query %s failed for server: %s and database: %s with Msg %d, Level %d, State %d:, Line %d, Error: %w", query.ScriptName, + serverName, databaseName, sqlerr.SQLErrorNumber(), sqlerr.SQLErrorClass(), sqlerr.SQLErrorState(), sqlerr.SQLErrorLineNo(), err) + } + + return fmt.Errorf("Query %s failed for server: %s and database: %s with Error: %w", query.ScriptName, serverName, databaseName, err) } + defer rows.Close() // grab the column information from the result From f57ffa2a9b77504a97f61009b6adc5e227c9c5d8 Mon Sep 17 00:00:00 2001 From: Minni Walia Date: Tue, 13 Jul 2021 21:25:24 +0000 Subject: [PATCH 512/761] Add new output plugin for Azure Data Explorer(ADX) (#9426) --- docs/LICENSE_OF_DEPENDENCIES.md | 2 + go.mod | 3 +- go.sum | 16 +- plugins/outputs/all/all.go | 1 + plugins/outputs/azure_data_explorer/README.md | 175 ++++++++++++ .../azure_data_explorer.go | 255 ++++++++++++++++++ .../azure_data_explorer_test.go | 200 ++++++++++++++ 7 files changed, 649 insertions(+), 3 deletions(-) create mode 100644 plugins/outputs/azure_data_explorer/README.md create mode 100644 plugins/outputs/azure_data_explorer/azure_data_explorer.go create mode 100644 plugins/outputs/azure_data_explorer/azure_data_explorer_test.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index a801b109b5b19..9fb1221cfe0a5 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -8,6 +8,7 @@ following works: - collectd.org [MIT License](https://git.octo.it/?p=collectd.git;a=blob;f=COPYING;hb=HEAD) - github.com/Azure/azure-amqp-common-go [MIT License](https://github.com/Azure/azure-amqp-common-go/blob/master/LICENSE) - github.com/Azure/azure-event-hubs-go [MIT License](https://github.com/Azure/azure-event-hubs-go/blob/master/LICENSE) +- github.com/Azure/azure-kusto-go [MIT](https://github.com/Azure/azure-kusto-go/blob/master/LICENSE) - github.com/Azure/azure-pipeline-go [MIT License](https://github.com/Azure/azure-pipeline-go/blob/master/LICENSE) - github.com/Azure/azure-sdk-for-go [Apache License 2.0](https://github.com/Azure/azure-sdk-for-go/blob/master/LICENSE) - github.com/Azure/azure-storage-blob-go [MIT License](https://github.com/Azure/azure-storage-blob-go/blob/master/LICENSE) @@ -141,6 +142,7 @@ following works: - github.com/karrick/godirwalk [BSD 2-Clause "Simplified" License](https://github.com/karrick/godirwalk/blob/master/LICENSE) - github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE) - github.com/klauspost/compress [BSD 3-Clause Clear License](https://github.com/klauspost/compress/blob/master/LICENSE) +- github.com/kylelemons/godebug [Apache License](https://github.com/kylelemons/godebug/blob/master/LICENSE) - github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) - github.com/mattn/go-colorable [MIT License](https://github.com/mattn/go-colorable/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 131c635eae52a..23ce741745a8c 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,8 @@ require ( code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.5.0 github.com/Azure/azure-event-hubs-go/v3 v3.2.0 - github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 + github.com/Azure/azure-kusto-go v0.3.2 + github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd github.com/Azure/go-autorest/autorest v0.11.17 github.com/Azure/go-autorest/autorest/adal v0.9.10 github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 diff --git a/go.sum b/go.sum index 47af7e375b3ad..99a3ff0d30668 100644 --- a/go.sum +++ b/go.sum @@ -41,19 +41,24 @@ github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQY github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= github.com/Azure/azure-event-hubs-go/v3 v3.2.0 h1:CQlxKH5a4NX1ZmbdqXUPRwuNGh2XvtgmhkZvkEuWzhs= github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4clbrULrQ3q7+icmqHyyLc= +github.com/Azure/azure-kusto-go v0.3.2 h1:XpS9co6GvEDl2oICF9HsjEsQVwEpRK6wbNWb9Z+uqsY= +github.com/Azure/azure-kusto-go v0.3.2/go.mod h1:wd50n4qlsSxh+G4f80t+Fnl2ShK9AcXD+lMOstiKuYo= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v44.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v45.1.0+incompatible h1:kxtaPD8n2z5Za+9e3sKsYG2IX6PG2R6VXtgS7gAbh3A= github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= +github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= -github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 h1:7MiZ6Th+YTmwUdrKmFg5OMsGYz7IdQwjqL0RPxkhhOQ= -github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= +github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= +github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY= github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= @@ -63,6 +68,7 @@ github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.17 h1:2zCdHwNgRH+St1J+ZMf66xI8aLr/5KMy+wWLH97zwYM= @@ -70,6 +76,7 @@ github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqe github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= @@ -396,6 +403,7 @@ github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmf github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= @@ -439,6 +447,7 @@ github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2x github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -654,6 +663,7 @@ github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= @@ -1040,6 +1050,7 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -1056,6 +1067,7 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 8fc5f8b75ed90..1bda2a78caaa5 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -5,6 +5,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/amon" _ "github.com/influxdata/telegraf/plugins/outputs/amqp" _ "github.com/influxdata/telegraf/plugins/outputs/application_insights" + _ "github.com/influxdata/telegraf/plugins/outputs/azure_data_explorer" _ "github.com/influxdata/telegraf/plugins/outputs/azure_monitor" _ "github.com/influxdata/telegraf/plugins/outputs/cloud_pubsub" _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" diff --git a/plugins/outputs/azure_data_explorer/README.md b/plugins/outputs/azure_data_explorer/README.md new file mode 100644 index 0000000000000..bb6d0d039b0d2 --- /dev/null +++ b/plugins/outputs/azure_data_explorer/README.md @@ -0,0 +1,175 @@ +# Azure Data Explorer output plugin + +This plugin writes metrics collected by any of the input plugins of Telegraf to [Azure Data Explorer](https://azure.microsoft.com/en-au/services/data-explorer/). + +## Pre-requisites: +- [Create Azure Data Explorer cluster and database](https://docs.microsoft.com/en-us/azure/data-explorer/create-cluster-database-portal) +- VM/compute or container to host Telegraf - it could be hosted locally where an app/services to be monitored are deployed or remotely on a dedicated monitoring compute/container. + + +## Configuration: + +```toml +[[outputs.azure_data_explorer]] + ## The URI property of the Azure Data Explorer resource on Azure + ## ex: https://myadxresource.australiasoutheast.kusto.windows.net + # endpoint_url = "" + + ## The Azure Data Explorer database that the metrics will be ingested into. + ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. + ## ex: "exampledatabase" + # database = "" + + ## Timeout for Azure Data Explorer operations + # timeout = "15s" + + ## Type of metrics grouping used when pushing to Azure Data Explorer. + ## Default is "TablePerMetric" for one table per different metric. + ## For more information, please check the plugin README. + # metrics_grouping_type = "TablePerMetric" + + ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). + # table_name = "" + + # timeout = "20s" + +``` + +## Metrics Grouping + +Metrics can be grouped in two ways to be sent to Azure Data Explorer. To specify which metric grouping type the plugin should use, the respective value should be given to the `metrics_grouping_type` in the config file. If no value is given to `metrics_grouping_type`, by default, the metrics will be grouped using `TablePerMetric`. + +### TablePerMetric + +The plugin will group the metrics by the metric name, and will send each group of metrics to an Azure Data Explorer table. If the table doesn't exist the plugin will create the table, if the table exists then the plugin will try to merge the Telegraf metric schema to the existing table. For more information about the merge process check the [`.create-merge` documentation](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/create-merge-table-command). + +The table name will match the `name` property of the metric, this means that the name of the metric should comply with the Azure Data Explorer table naming constraints in case you plan to add a prefix to the metric name. + + +### SingleTable + +The plugin will send all the metrics received to a single Azure Data Explorer table. The name of the table must be supplied via `table_name` the config file. If the table doesn't exist the plugin will create the table, if the table exists then the plugin will try to merge the Telegraf metric schema to the existing table. For more information about the merge process check the [`.create-merge` documentation](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/create-merge-table-command). + + +## Tables Schema + +The schema of the Azure Data Explorer table will match the structure of the Telegraf `Metric` object. The corresponding Azure Data Explorer command would be like the following: +``` +.create-merge table ['table-name'] (['fields']:dynamic, ['name']:string, ['tags']:dynamic, ['timestamp']:datetime) +``` + +The corresponding table mapping would be like the following: +``` +.create-or-alter table ['table-name'] ingestion json mapping 'table-name_mapping' '[{"column":"fields", "Properties":{"Path":"$[\'fields\']"}},{"column":"name", "Properties":{"Path":"$[\'name\']"}},{"column":"tags", "Properties":{"Path":"$[\'tags\']"}},{"column":"timestamp", "Properties":{"Path":"$[\'timestamp\']"}}]' +``` + +**Note**: This plugin will automatically create Azure Data Explorer tables and corresponding table mapping as per the above mentioned commands. Since the `Metric` object is a complex type, the only output format supported is JSON. + +## Authentiation + +### Supported Authentication Methods +This plugin provides several types of authentication. The plugin will check the existence of several specific environment variables, and consequently will choose the right method. + +These methods are: + + +1. AAD Application Tokens (Service Principals with secrets or certificates). + + For guidance on how to create and register an App in Azure Active Directory check [this article](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app#register-an-application), and for more information on the Service Principals check [this article](https://docs.microsoft.com/en-us/azure/active-directory/develop/app-objects-and-service-principals). + + +2. AAD User Tokens + - Allows Telegraf to authenticate like a user. This method is mainly used + for development purposes only. + +3. Managed Service Identity (MSI) token + - If you are running Telegraf from Azure VM or infrastructure, then this is the prefered authentication method. + +[principal]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects + +Whichever method, the designated Principal needs to be assigned the `Database User` role on the Database level in the Azure Data Explorer. This role will allow the plugin to create the required tables and ingest data into it. + +### Configurations of the chosen Authentication Method + +The plugin will authenticate using the first available of the +following configurations, **it's important to understand that the assessment, and consequently choosing the authentication method, will happen in order as below**: + +1. **Client Credentials**: Azure AD Application ID and Secret. + + Set the following environment variables: + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CLIENT_SECRET`: Specifies the app secret to use. + +2. **Client Certificate**: Azure AD Application ID and X.509 Certificate. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use. + - `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use. + +3. **Resource Owner Password**: Azure AD User and Password. This grant type is + *not recommended*, use device login instead if you need interactive login. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_USERNAME`: Specifies the username to use. + - `AZURE_PASSWORD`: Specifies the password to use. + +4. **Azure Managed Service Identity**: Delegate credential management to the + platform. Requires that code is running in Azure, e.g. on a VM. All + configuration is handled by Azure. See [Azure Managed Service Identity][msi] + for more details. Only available when using the [Azure Resource Manager][arm]. + +[msi]: https://docs.microsoft.com/en-us/azure/active-directory/msi-overview +[arm]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview + + +## Querying collected metrics data in Azure Data Explorer +With all above configurations, you will have data stored in following standard format for each metric type stored as an Azure Data Explorer table - +ColumnName | ColumnType +---------- | ---------- +fields | dynamic +name | string +tags | dynamic +timestamp | datetime + +As "fields" and "tags" are of dynamic data type so following multiple ways to query this data - +1. **Query JSON attributes directly**: This is one of the coolest feature of Azure Data Explorer so you can run query like this - + ``` + Tablename + | where fields.size_kb == 9120 + ``` +2. **Use [Update policy](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/updatepolicy)**: to transform data, in this case, to flatten dynamic data type columns. This is the recommended performant way for querying over large data volumes compared to querying directly over JSON attributes. + ``` + // Function to transform data + .create-or-alter function Transform_TargetTableName() { + SourceTableName + | extend clerk_type = tags.clerk_type + | extend host = tags.host + } + + // Create the destination table (if it doesn't exist already) + .set-or-append TargetTableName <| Transform_TargetTableName() | limit 0 + + // Apply update policy on destination table + .alter table TargetTableName policy update + @'[{"IsEnabled": true, "Source": "SourceTableName", "Query": "Transform_TargetTableName()", "IsTransactional": false, "PropagateIngestionProperties": false}]' + + ``` + There are two ways to flatten dynamic columns as explained below. You can use either of these ways in above mentioned update policy function - 'Transform_TargetTableName()' + - Use [bag_unpack plugin](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/bag-unpackplugin) to unpack the dynamic columns as shown below. This method will unpack all columns, it could lead to issues in case source schema changes. + ``` + Tablename + | evaluate bag_unpack(tags) + | evaluate bag_unpack(fields) + ``` + + - Use [extend](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/extendoperator) operator as shown below. This is the best way provided you know what columns are needed in the final destination table. Another benefit of this method is even if schema changes, it will not break your queries or dashboards. + ``` + Tablename + | extend clerk_type = tags.clerk_type + | extend host = tags.host + ``` + diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer.go b/plugins/outputs/azure_data_explorer/azure_data_explorer.go new file mode 100644 index 0000000000000..6d411fd05c3b9 --- /dev/null +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer.go @@ -0,0 +1,255 @@ +package azure_data_explorer + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "strings" + "time" + + "github.com/Azure/azure-kusto-go/kusto" + "github.com/Azure/azure-kusto-go/kusto/ingest" + "github.com/Azure/azure-kusto-go/kusto/unsafe" + "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" + "github.com/influxdata/telegraf/plugins/serializers/json" +) + +type AzureDataExplorer struct { + Endpoint string `toml:"endpoint_url"` + Database string `toml:"database"` + Log telegraf.Logger `toml:"-"` + Timeout config.Duration `toml:"timeout"` + MetricsGrouping string `toml:"metrics_grouping_type"` + TableName string `toml:"table_name"` + client localClient + ingesters map[string]localIngestor + serializer serializers.Serializer + createIngestor ingestorFactory +} + +const ( + tablePerMetric = "tablepermetric" + singleTable = "singletable" +) + +type localIngestor interface { + FromReader(ctx context.Context, reader io.Reader, options ...ingest.FileOption) (*ingest.Result, error) +} + +type localClient interface { + Mgmt(ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) +} + +type ingestorFactory func(localClient, string, string) (localIngestor, error) + +const createTableCommand = `.create-merge table ['%s'] (['fields']:dynamic, ['name']:string, ['tags']:dynamic, ['timestamp']:datetime);` +const createTableMappingCommand = `.create-or-alter table ['%s'] ingestion json mapping '%s_mapping' '[{"column":"fields", "Properties":{"Path":"$[\'fields\']"}},{"column":"name", "Properties":{"Path":"$[\'name\']"}},{"column":"tags", "Properties":{"Path":"$[\'tags\']"}},{"column":"timestamp", "Properties":{"Path":"$[\'timestamp\']"}}]'` + +func (adx *AzureDataExplorer) Description() string { + return "Sends metrics to Azure Data Explorer" +} + +func (adx *AzureDataExplorer) SampleConfig() string { + return ` + ## Azure Data Exlorer cluster endpoint + ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" + endpoint_url = "" + + ## The Azure Data Explorer database that the metrics will be ingested into. + ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. + ## ex: "exampledatabase" + database = "" + + ## Timeout for Azure Data Explorer operations + # timeout = "20s" + + ## Type of metrics grouping used when pushing to Azure Data Explorer. + ## Default is "TablePerMetric" for one table per different metric. + ## For more information, please check the plugin README. + # metrics_grouping_type = "TablePerMetric" + + ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). + # table_name = "" + +` +} + +func (adx *AzureDataExplorer) Connect() error { + authorizer, err := auth.NewAuthorizerFromEnvironmentWithResource(adx.Endpoint) + if err != nil { + return err + } + authorization := kusto.Authorization{ + Authorizer: authorizer, + } + client, err := kusto.New(adx.Endpoint, authorization) + + if err != nil { + return err + } + adx.client = client + adx.ingesters = make(map[string]localIngestor) + adx.createIngestor = createRealIngestor + + return nil +} + +func (adx *AzureDataExplorer) Close() error { + adx.client = nil + adx.ingesters = nil + + return nil +} + +func (adx *AzureDataExplorer) Write(metrics []telegraf.Metric) error { + if adx.MetricsGrouping == tablePerMetric { + return adx.writeTablePerMetric(metrics) + } + return adx.writeSingleTable(metrics) +} + +func (adx *AzureDataExplorer) writeTablePerMetric(metrics []telegraf.Metric) error { + tableMetricGroups := make(map[string][]byte) + // Group metrics by name and serialize them + for _, m := range metrics { + tableName := m.Name() + metricInBytes, err := adx.serializer.Serialize(m) + if err != nil { + return err + } + if existingBytes, ok := tableMetricGroups[tableName]; ok { + tableMetricGroups[tableName] = append(existingBytes, metricInBytes...) + } else { + tableMetricGroups[tableName] = metricInBytes + } + } + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, time.Duration(adx.Timeout)) + defer cancel() + + // Push the metrics for each table + format := ingest.FileFormat(ingest.JSON) + for tableName, tableMetrics := range tableMetricGroups { + if err := adx.pushMetrics(ctx, format, tableName, tableMetrics); err != nil { + return err + } + } + + return nil +} + +func (adx *AzureDataExplorer) writeSingleTable(metrics []telegraf.Metric) error { + //serialise each metric in metrics - store in byte[] + metricsArray := make([]byte, 0) + for _, m := range metrics { + metricsInBytes, err := adx.serializer.Serialize(m) + if err != nil { + return err + } + metricsArray = append(metricsArray, metricsInBytes...) + } + + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, time.Duration(adx.Timeout)) + defer cancel() + + //push metrics to a single table + format := ingest.FileFormat(ingest.JSON) + err := adx.pushMetrics(ctx, format, adx.TableName, metricsArray) + return err +} + +func (adx *AzureDataExplorer) pushMetrics(ctx context.Context, format ingest.FileOption, tableName string, metricsArray []byte) error { + ingestor, err := adx.getIngestor(ctx, tableName) + if err != nil { + return err + } + + reader := bytes.NewReader(metricsArray) + mapping := ingest.IngestionMappingRef(fmt.Sprintf("%s_mapping", tableName), ingest.JSON) + if _, err := ingestor.FromReader(ctx, reader, format, mapping); err != nil { + adx.Log.Errorf("sending ingestion request to Azure Data Explorer for table %q failed: %v", tableName, err) + } + return nil +} + +func (adx *AzureDataExplorer) getIngestor(ctx context.Context, tableName string) (localIngestor, error) { + ingestor := adx.ingesters[tableName] + + if ingestor == nil { + if err := adx.createAzureDataExplorerTable(ctx, tableName); err != nil { + return nil, fmt.Errorf("creating table for %q failed: %v", tableName, err) + } + //create a new ingestor client for the table + tempIngestor, err := adx.createIngestor(adx.client, adx.Database, tableName) + if err != nil { + return nil, fmt.Errorf("creating ingestor for %q failed: %v", tableName, err) + } + adx.ingesters[tableName] = tempIngestor + ingestor = tempIngestor + } + return ingestor, nil +} + +func (adx *AzureDataExplorer) createAzureDataExplorerTable(ctx context.Context, tableName string) error { + createStmt := kusto.NewStmt("", kusto.UnsafeStmt(unsafe.Stmt{Add: true, SuppressWarning: true})).UnsafeAdd(fmt.Sprintf(createTableCommand, tableName)) + if _, err := adx.client.Mgmt(ctx, adx.Database, createStmt); err != nil { + return err + } + + createTableMappingstmt := kusto.NewStmt("", kusto.UnsafeStmt(unsafe.Stmt{Add: true, SuppressWarning: true})).UnsafeAdd(fmt.Sprintf(createTableMappingCommand, tableName, tableName)) + if _, err := adx.client.Mgmt(ctx, adx.Database, createTableMappingstmt); err != nil { + return err + } + + return nil +} + +func (adx *AzureDataExplorer) Init() error { + if adx.Endpoint == "" { + return errors.New("Endpoint configuration cannot be empty") + } + if adx.Database == "" { + return errors.New("Database configuration cannot be empty") + } + + adx.MetricsGrouping = strings.ToLower(adx.MetricsGrouping) + if adx.MetricsGrouping == singleTable && adx.TableName == "" { + return errors.New("Table name cannot be empty for SingleTable metrics grouping type") + } + if adx.MetricsGrouping == "" { + adx.MetricsGrouping = tablePerMetric + } + if !(adx.MetricsGrouping == singleTable || adx.MetricsGrouping == tablePerMetric) { + return errors.New("Metrics grouping type is not valid") + } + + serializer, err := json.NewSerializer(time.Second) + if err != nil { + return err + } + adx.serializer = serializer + return nil +} + +func init() { + outputs.Add("azure_data_explorer", func() telegraf.Output { + return &AzureDataExplorer{ + Timeout: config.Duration(20 * time.Second), + } + }) +} + +func createRealIngestor(client localClient, database string, tableName string) (localIngestor, error) { + ingestor, err := ingest.New(client.(*kusto.Client), database, tableName) + if ingestor != nil { + return ingestor, nil + } + return nil, err +} diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go new file mode 100644 index 0000000000000..f85d074cb1f6f --- /dev/null +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go @@ -0,0 +1,200 @@ +package azure_data_explorer + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "testing" + "time" + + "github.com/Azure/azure-kusto-go/kusto" + "github.com/Azure/azure-kusto-go/kusto/ingest" + "github.com/influxdata/telegraf" + telegrafJson "github.com/influxdata/telegraf/plugins/serializers/json" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const createTableCommandExpected = `.create-merge table ['%s'] (['fields']:dynamic, ['name']:string, ['tags']:dynamic, ['timestamp']:datetime);` +const createTableMappingCommandExpected = `.create-or-alter table ['%s'] ingestion json mapping '%s_mapping' '[{"column":"fields", "Properties":{"Path":"$[\'fields\']"}},{"column":"name", "Properties":{"Path":"$[\'name\']"}},{"column":"tags", "Properties":{"Path":"$[\'tags\']"}},{"column":"timestamp", "Properties":{"Path":"$[\'timestamp\']"}}]'` + +func TestWrite(t *testing.T) { + testCases := []struct { + name string + inputMetric []telegraf.Metric + client *fakeClient + createIngestor ingestorFactory + metricsGrouping string + tableName string + expected map[string]interface{} + expectedWriteError string + }{ + { + name: "Valid metric", + inputMetric: testutil.MockMetrics(), + client: &fakeClient{ + queries: make([]string, 0), + internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { + f.queries = append(f.queries, query.String()) + return &kusto.RowIterator{}, nil + }, + }, + createIngestor: createFakeIngestor, + metricsGrouping: tablePerMetric, + expected: map[string]interface{}{ + "metricName": "test1", + "fields": map[string]interface{}{ + "value": 1.0, + }, + "tags": map[string]interface{}{ + "tag1": "value1", + }, + "timestamp": float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).UnixNano() / int64(time.Second)), + }, + }, + { + name: "Error in Mgmt", + inputMetric: testutil.MockMetrics(), + client: &fakeClient{ + queries: make([]string, 0), + internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { + return nil, errors.New("Something went wrong") + }, + }, + createIngestor: createFakeIngestor, + metricsGrouping: tablePerMetric, + expected: map[string]interface{}{ + "metricName": "test1", + "fields": map[string]interface{}{ + "value": 1.0, + }, + "tags": map[string]interface{}{ + "tag1": "value1", + }, + "timestamp": float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).UnixNano() / int64(time.Second)), + }, + expectedWriteError: "creating table for \"test1\" failed: Something went wrong", + }, + { + name: "SingleTable metric grouping type", + inputMetric: testutil.MockMetrics(), + client: &fakeClient{ + queries: make([]string, 0), + internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { + f.queries = append(f.queries, query.String()) + return &kusto.RowIterator{}, nil + }, + }, + createIngestor: createFakeIngestor, + metricsGrouping: singleTable, + expected: map[string]interface{}{ + "metricName": "test1", + "fields": map[string]interface{}{ + "value": 1.0, + }, + "tags": map[string]interface{}{ + "tag1": "value1", + }, + "timestamp": float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).UnixNano() / int64(time.Second)), + }, + }, + } + + for _, tC := range testCases { + t.Run(tC.name, func(t *testing.T) { + serializer, err := telegrafJson.NewSerializer(time.Second) + require.NoError(t, err) + + plugin := AzureDataExplorer{ + Endpoint: "someendpoint", + Database: "databasename", + Log: testutil.Logger{}, + MetricsGrouping: tC.metricsGrouping, + TableName: tC.tableName, + client: tC.client, + ingesters: map[string]localIngestor{}, + createIngestor: tC.createIngestor, + serializer: serializer, + } + + errorInWrite := plugin.Write(testutil.MockMetrics()) + + if tC.expectedWriteError != "" { + require.EqualError(t, errorInWrite, tC.expectedWriteError) + } else { + require.NoError(t, errorInWrite) + + expectedNameOfMetric := tC.expected["metricName"].(string) + expectedNameOfTable := expectedNameOfMetric + createdIngestor := plugin.ingesters[expectedNameOfMetric] + + if tC.metricsGrouping == singleTable { + expectedNameOfTable = tC.tableName + createdIngestor = plugin.ingesters[expectedNameOfTable] + } + + require.NotNil(t, createdIngestor) + createdFakeIngestor := createdIngestor.(*fakeIngestor) + require.Equal(t, expectedNameOfMetric, createdFakeIngestor.actualOutputMetric["name"]) + + expectedFields := tC.expected["fields"].(map[string]interface{}) + require.Equal(t, expectedFields, createdFakeIngestor.actualOutputMetric["fields"]) + + expectedTags := tC.expected["tags"].(map[string]interface{}) + require.Equal(t, expectedTags, createdFakeIngestor.actualOutputMetric["tags"]) + + expectedTime := tC.expected["timestamp"].(float64) + require.Equal(t, expectedTime, createdFakeIngestor.actualOutputMetric["timestamp"]) + + createTableString := fmt.Sprintf(createTableCommandExpected, expectedNameOfTable) + require.Equal(t, createTableString, tC.client.queries[0]) + + createTableMappingString := fmt.Sprintf(createTableMappingCommandExpected, expectedNameOfTable, expectedNameOfTable) + require.Equal(t, createTableMappingString, tC.client.queries[1]) + } + }) + } +} + +func TestInitBlankEndpoint(t *testing.T) { + plugin := AzureDataExplorer{ + Log: testutil.Logger{}, + client: &fakeClient{}, + ingesters: map[string]localIngestor{}, + createIngestor: createFakeIngestor, + } + + errorInit := plugin.Init() + require.Error(t, errorInit) + require.Equal(t, "Endpoint configuration cannot be empty", errorInit.Error()) +} + +type fakeClient struct { + queries []string + internalMgmt func(client *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) +} + +func (f *fakeClient) Mgmt(ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { + return f.internalMgmt(f, ctx, db, query, options...) +} + +type fakeIngestor struct { + actualOutputMetric map[string]interface{} +} + +func createFakeIngestor(client localClient, database string, tableName string) (localIngestor, error) { + return &fakeIngestor{}, nil +} +func (f *fakeIngestor) FromReader(ctx context.Context, reader io.Reader, options ...ingest.FileOption) (*ingest.Result, error) { + scanner := bufio.NewScanner(reader) + scanner.Scan() + firstLine := scanner.Text() + err := json.Unmarshal([]byte(firstLine), &f.actualOutputMetric) + if err != nil { + return nil, err + } + return &ingest.Result{}, nil +} From 4591c62cfc5ac5f806f1e4cd6cf9b63ba37610ac Mon Sep 17 00:00:00 2001 From: Jacob Hochstetler Date: Tue, 13 Jul 2021 16:58:49 -0500 Subject: [PATCH 513/761] Http plugin add cookie auth (#9395) --- plugins/common/cookie/cookie.go | 95 ++++++++++ plugins/common/cookie/cookie_test.go | 269 +++++++++++++++++++++++++++ plugins/common/http/config.go | 11 +- plugins/inputs/http/README.md | 13 ++ plugins/inputs/http/http.go | 12 +- plugins/outputs/http/README.md | 13 ++ plugins/outputs/http/http.go | 12 +- 7 files changed, 422 insertions(+), 3 deletions(-) create mode 100644 plugins/common/cookie/cookie.go create mode 100644 plugins/common/cookie/cookie_test.go diff --git a/plugins/common/cookie/cookie.go b/plugins/common/cookie/cookie.go new file mode 100644 index 0000000000000..92dab9104dcc5 --- /dev/null +++ b/plugins/common/cookie/cookie.go @@ -0,0 +1,95 @@ +package cookie + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" +) + +type CookieAuthConfig struct { + URL string `toml:"cookie_auth_url"` + Method string `toml:"cookie_auth_method"` + + // HTTP Basic Auth Credentials + Username string `toml:"cookie_auth_username"` + Password string `toml:"cookie_auth_password"` + + Body string `toml:"cookie_auth_body"` + Renewal config.Duration `toml:"cookie_auth_renewal"` + + client *http.Client +} + +func (c *CookieAuthConfig) Start(client *http.Client, log telegraf.Logger) (err error) { + c.client = client + + if c.Method == "" { + c.Method = http.MethodPost + } + + // add cookie jar to HTTP client + if c.client.Jar, err = cookiejar.New(nil); err != nil { + return err + } + + if err = c.auth(); err != nil { + return err + } + + // continual auth renewal if set + if c.Renewal > 0 { + ticker := time.NewTicker(time.Duration(c.Renewal)) + go func() { + for range ticker.C { + if err := c.auth(); err != nil && log != nil { + log.Errorf("renewal failed for %q: %v", c.URL, err) + } + } + }() + } + + return nil +} + +func (c *CookieAuthConfig) auth() error { + var body io.ReadCloser + if c.Body != "" { + body = ioutil.NopCloser(strings.NewReader(c.Body)) + defer body.Close() + } + + req, err := http.NewRequest(c.Method, c.URL, body) + if err != nil { + return err + } + + if c.Username != "" { + req.SetBasicAuth(c.Username, c.Password) + } + + resp, err := c.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if _, err = io.Copy(ioutil.Discard, resp.Body); err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("cookie auth renewal received status code: %v (%v)", + resp.StatusCode, + http.StatusText(resp.StatusCode), + ) + } + + return nil +} diff --git a/plugins/common/cookie/cookie_test.go b/plugins/common/cookie/cookie_test.go new file mode 100644 index 0000000000000..0231e10dd2eda --- /dev/null +++ b/plugins/common/cookie/cookie_test.go @@ -0,0 +1,269 @@ +package cookie_test + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/cookie" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const ( + reqUser = "testUser" + reqPasswd = "testPassword" + reqBody = "a body" + + authEndpointNoCreds = "/auth" + authEndpointWithBasicAuth = "/authWithCreds" + authEndpointWithBasicAuthOnlyUsername = "/authWithCredsUser" + authEndpointWithBody = "/authWithBody" +) + +var fakeCookie = &http.Cookie{ + Name: "test-cookie", + Value: "this is an auth cookie", +} + +type fakeServer struct { + *httptest.Server + *int32 +} + +func newFakeServer(t *testing.T) fakeServer { + var c int32 + return fakeServer{ + Server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + authed := func() { + atomic.AddInt32(&c, 1) // increment auth counter + http.SetCookie(w, fakeCookie) // set fake cookie + } + switch r.URL.Path { + case authEndpointNoCreds: + authed() + case authEndpointWithBody: + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + if !cmp.Equal([]byte(reqBody), body) { + w.WriteHeader(http.StatusUnauthorized) + return + } + authed() + case authEndpointWithBasicAuth: + u, p, ok := r.BasicAuth() + if !ok || u != reqUser || p != reqPasswd { + w.WriteHeader(http.StatusUnauthorized) + return + } + authed() + case authEndpointWithBasicAuthOnlyUsername: + u, p, ok := r.BasicAuth() + if !ok || u != reqUser || p != "" { + w.WriteHeader(http.StatusUnauthorized) + return + } + authed() + default: + // ensure cookie exists on request + if _, err := r.Cookie(fakeCookie.Name); err != nil { + w.WriteHeader(http.StatusForbidden) + return + } + _, _ = w.Write([]byte("good test response")) + } + })), + int32: &c, + } +} + +func (s fakeServer) checkResp(t *testing.T, expCode int) { + t.Helper() + resp, err := s.Client().Get(s.URL + "/endpoint") + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, expCode, resp.StatusCode) + + if expCode == http.StatusOK { + require.Len(t, resp.Request.Cookies(), 1) + require.Equal(t, "test-cookie", resp.Request.Cookies()[0].Name) + } +} + +func (s fakeServer) checkAuthCount(t *testing.T, atLeast int32) { + t.Helper() + require.GreaterOrEqual(t, atomic.LoadInt32(s.int32), atLeast) +} + +func TestAuthConfig_Start(t *testing.T) { + const ( + renewal = 50 * time.Millisecond + renewalCheck = 5 * renewal + ) + type fields struct { + Method string + Username string + Password string + Body string + } + type args struct { + renewal time.Duration + endpoint string + } + tests := []struct { + name string + fields fields + args args + wantErr error + assert func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) + }{ + { + name: "zero renewal does not renew", + args: args{ + renewal: 0, + endpoint: authEndpointNoCreds, + }, + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + // should have Cookie Authed once + srv.checkAuthCount(t, 1) + srv.checkResp(t, http.StatusOK) + time.Sleep(renewalCheck) + // should have never Cookie Authed again + srv.checkAuthCount(t, 1) + srv.checkResp(t, http.StatusOK) + }, + }, + { + name: "success no creds, no body, default method", + args: args{ + renewal: renewal, + endpoint: authEndpointNoCreds, + }, + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + // should have Cookie Authed once + srv.checkAuthCount(t, 1) + // default method set + require.Equal(t, http.MethodPost, c.Method) + srv.checkResp(t, http.StatusOK) + time.Sleep(renewalCheck) + // should have Cookie Authed at least twice more + srv.checkAuthCount(t, 3) + srv.checkResp(t, http.StatusOK) + }, + }, + { + name: "success with creds, no body", + fields: fields{ + Method: http.MethodPost, + Username: reqUser, + Password: reqPasswd, + }, + args: args{ + renewal: renewal, + endpoint: authEndpointWithBasicAuth, + }, + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + // should have Cookie Authed once + srv.checkAuthCount(t, 1) + srv.checkResp(t, http.StatusOK) + time.Sleep(renewalCheck) + // should have Cookie Authed at least twice more + srv.checkAuthCount(t, 3) + srv.checkResp(t, http.StatusOK) + }, + }, + { + name: "failure with bad creds", + fields: fields{ + Method: http.MethodPost, + Username: reqUser, + Password: "a bad password", + }, + args: args{ + renewal: renewal, + endpoint: authEndpointWithBasicAuth, + }, + wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + // should have never Cookie Authed + srv.checkAuthCount(t, 0) + srv.checkResp(t, http.StatusForbidden) + time.Sleep(renewalCheck) + // should have still never Cookie Authed + srv.checkAuthCount(t, 0) + srv.checkResp(t, http.StatusForbidden) + }, + }, + { + name: "success with no creds, with good body", + fields: fields{ + Method: http.MethodPost, + Body: reqBody, + }, + args: args{ + renewal: renewal, + endpoint: authEndpointWithBody, + }, + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + // should have Cookie Authed once + srv.checkAuthCount(t, 1) + srv.checkResp(t, http.StatusOK) + time.Sleep(renewalCheck) + // should have Cookie Authed at least twice more + srv.checkAuthCount(t, 3) + srv.checkResp(t, http.StatusOK) + }, + }, + { + name: "failure with bad body", + fields: fields{ + Method: http.MethodPost, + Body: "a bad body", + }, + args: args{ + renewal: renewal, + endpoint: authEndpointWithBody, + }, + wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + // should have never Cookie Authed + srv.checkAuthCount(t, 0) + srv.checkResp(t, http.StatusForbidden) + time.Sleep(renewalCheck) + // should have still never Cookie Authed + srv.checkAuthCount(t, 0) + srv.checkResp(t, http.StatusForbidden) + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + srv := newFakeServer(t) + c := &cookie.CookieAuthConfig{ + URL: srv.URL + tt.args.endpoint, + Method: tt.fields.Method, + Username: tt.fields.Username, + Password: tt.fields.Password, + Body: tt.fields.Body, + Renewal: config.Duration(tt.args.renewal), + } + + if err := c.Start(srv.Client(), testutil.Logger{Name: "cookie_auth"}); tt.wantErr != nil { + require.EqualError(t, err, tt.wantErr.Error()) + } else { + require.NoError(t, err) + } + + if tt.assert != nil { + tt.assert(t, c, srv) + } + }) + } +} diff --git a/plugins/common/http/config.go b/plugins/common/http/config.go index b61a346be7868..07b486cba294e 100644 --- a/plugins/common/http/config.go +++ b/plugins/common/http/config.go @@ -5,7 +5,9 @@ import ( "net/http" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/cookie" oauthConfig "github.com/influxdata/telegraf/plugins/common/oauth" "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/common/tls" @@ -19,9 +21,10 @@ type HTTPClientConfig struct { proxy.HTTPProxy tls.ClientConfig oauthConfig.OAuth2Config + cookie.CookieAuthConfig } -func (h *HTTPClientConfig) CreateClient(ctx context.Context) (*http.Client, error) { +func (h *HTTPClientConfig) CreateClient(ctx context.Context, log telegraf.Logger) (*http.Client, error) { tlsCfg, err := h.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -50,5 +53,11 @@ func (h *HTTPClientConfig) CreateClient(ctx context.Context) (*http.Client, erro client = h.OAuth2Config.CreateOauth2Client(ctx, client) + if h.CookieAuthConfig.URL != "" { + if err := h.CookieAuthConfig.Start(client, log); err != nil { + return nil, err + } + } + return client, nil } diff --git a/plugins/inputs/http/README.md b/plugins/inputs/http/README.md index 4b799043b5edc..95591b9f0ad22 100644 --- a/plugins/inputs/http/README.md +++ b/plugins/inputs/http/README.md @@ -50,6 +50,15 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Optional Cookie authentication + # cookie_auth_url = "https://localhost/authMe" + # cookie_auth_method = "POST" + # cookie_auth_username = "username" + # cookie_auth_password = "pa$$word" + # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' + ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie + # cookie_auth_renewal = "5m" + ## Amount of time allowed to complete the HTTP request # timeout = "5s" @@ -73,3 +82,7 @@ The default values below are added if the input format does not specify a value: - http - tags: - url + +### Optional Cookie Authentication Settings: + +The optional Cookie Authentication Settings will retrieve a cookie from the given authorization endpoint, and use it in subsequent API requests. This is useful for services that do not provide OAuth or Basic Auth authentication, e.g. the [Tesla Powerwall API](https://www.tesla.com/support/energy/powerwall/own/monitoring-from-home-network), which uses a Cookie Auth Body to retrieve an authorization cookie. The Cookie Auth Renewal interval will renew the authorization by retrieving a new cookie at the given interval. diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index a0cffd07d6486..c61465a54c36f 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -35,6 +35,7 @@ type HTTP struct { client *http.Client httpconfig.HTTPClientConfig + Log telegraf.Logger `toml:"-"` // The parser will automatically be set by Telegraf core code because // this plugin implements the ParserInput interface (i.e. the SetParser method) @@ -84,6 +85,15 @@ var sampleConfig = ` ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Optional Cookie authentication + # cookie_auth_url = "https://localhost/authMe" + # cookie_auth_method = "POST" + # cookie_auth_username = "username" + # cookie_auth_password = "pa$$word" + # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' + ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie + # cookie_auth_renewal = "5m" + ## Amount of time allowed to complete the HTTP request # timeout = "5s" @@ -109,7 +119,7 @@ func (*HTTP) Description() string { func (h *HTTP) Init() error { ctx := context.Background() - client, err := h.HTTPClientConfig.CreateClient(ctx) + client, err := h.HTTPClientConfig.CreateClient(ctx, h.Log) if err != nil { return err } diff --git a/plugins/outputs/http/README.md b/plugins/outputs/http/README.md index 27de975c0761a..d90192b705a4f 100644 --- a/plugins/outputs/http/README.md +++ b/plugins/outputs/http/README.md @@ -34,6 +34,15 @@ data formats. For data_formats that support batching, metrics are sent in batch ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Optional Cookie authentication + # cookie_auth_url = "https://localhost/authMe" + # cookie_auth_method = "POST" + # cookie_auth_username = "username" + # cookie_auth_password = "pa$$word" + # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' + ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie + # cookie_auth_renewal = "5m" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: @@ -54,3 +63,7 @@ data formats. For data_formats that support batching, metrics are sent in batch ## Zero means no limit. # idle_conn_timeout = 0 ``` + +### Optional Cookie Authentication Settings: + +The optional Cookie Authentication Settings will retrieve a cookie from the given authorization endpoint, and use it in subsequent API requests. This is useful for services that do not provide OAuth or Basic Auth authentication, e.g. the [Tesla Powerwall API](https://www.tesla.com/support/energy/powerwall/own/monitoring-from-home-network), which uses a Cookie Auth Body to retrieve an authorization cookie. The Cookie Auth Renewal interval will renew the authorization by retrieving a new cookie at the given interval. diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 76d97aa9040bc..83faef0dae241 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -48,6 +48,15 @@ var sampleConfig = ` ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Optional Cookie authentication + # cookie_auth_url = "https://localhost/authMe" + # cookie_auth_method = "POST" + # cookie_auth_username = "username" + # cookie_auth_password = "pa$$word" + # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' + ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie + # cookie_auth_renewal = "5m" + ## Data format to output. ## Each data format has it's own unique set of configuration options, read ## more about them here: @@ -83,6 +92,7 @@ type HTTP struct { Headers map[string]string `toml:"headers"` ContentEncoding string `toml:"content_encoding"` httpconfig.HTTPClientConfig + Log telegraf.Logger `toml:"-"` client *http.Client serializer serializers.Serializer @@ -102,7 +112,7 @@ func (h *HTTP) Connect() error { } ctx := context.Background() - client, err := h.HTTPClientConfig.CreateClient(ctx) + client, err := h.HTTPClientConfig.CreateClient(ctx, h.Log) if err != nil { return err } From 2a72295734eaee0343dac595598070c97fa725b7 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 15 Jul 2021 11:11:58 -0500 Subject: [PATCH 514/761] Detect changes to config and reload telegraf (copy of pr #8529) (#9485) --- cmd/telegraf/telegraf.go | 53 ++++++++++++++++++++++++++++++++++++++- go.mod | 3 ++- go.sum | 4 +-- internal/usage.go | 3 +++ internal/usage_windows.go | 3 +++ 5 files changed, 62 insertions(+), 4 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 02acdbbdebeb4..688c1e5bdd6c5 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -15,6 +15,7 @@ import ( "syscall" "time" + "github.com/influxdata/tail/watch" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/config" @@ -27,6 +28,7 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" _ "github.com/influxdata/telegraf/plugins/outputs/all" _ "github.com/influxdata/telegraf/plugins/processors/all" + "gopkg.in/tomb.v1" ) type sliceFlags []string @@ -53,7 +55,7 @@ var fTestWait = flag.Int("test-wait", 0, "wait up to this many seconds for servi var fConfigs sliceFlags var fConfigDirs sliceFlags - +var fWatchConfig = flag.String("watch-config", "", "Monitoring config changes [notify, poll]") var fVersion = flag.Bool("version", false, "display the version and exit") var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration") @@ -115,6 +117,15 @@ func reloadLoop( signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt, syscall.SIGHUP, syscall.SIGTERM, syscall.SIGINT) + if *fWatchConfig != "" { + for _, fConfig := range fConfigs { + if _, err := os.Stat(fConfig); err == nil { + go watchLocalConfig(signals, fConfig) + } else { + log.Printf("W! Cannot watch config %s: %s", fConfig, err) + } + } + } go func() { select { case sig := <-signals: @@ -136,6 +147,46 @@ func reloadLoop( } } +func watchLocalConfig(signals chan os.Signal, fConfig string) { + var mytomb tomb.Tomb + var watcher watch.FileWatcher + if *fWatchConfig == "poll" { + watcher = watch.NewPollingFileWatcher(fConfig) + } else { + watcher = watch.NewInotifyFileWatcher(fConfig) + } + changes, err := watcher.ChangeEvents(&mytomb, 0) + if err != nil { + log.Printf("E! Error watching config: %s\n", err) + return + } + log.Println("I! Config watcher started") + select { + case <-changes.Modified: + log.Println("I! Config file modified") + case <-changes.Deleted: + // deleted can mean moved. wait a bit a check existence + <-time.After(time.Second) + if _, err := os.Stat(fConfig); err == nil { + log.Println("I! Config file overwritten") + } else { + log.Println("W! Config file deleted") + if err := watcher.BlockUntilExists(&mytomb); err != nil { + log.Printf("E! Cannot watch for config: %s\n", err.Error()) + return + } + log.Println("I! Config file appeared") + } + case <-changes.Truncated: + log.Println("I! Config file truncated") + case <-mytomb.Dying(): + log.Println("I! Config watcher ended") + return + } + mytomb.Done() + signals <- syscall.SIGHUP +} + func runAgent(ctx context.Context, inputFilters []string, outputFilters []string, diff --git a/go.mod b/go.mod index 23ce741745a8c..05d870a52af08 100644 --- a/go.mod +++ b/go.mod @@ -80,7 +80,7 @@ require ( github.com/influxdata/influxdb-observability/common v0.0.0-20210429174543-86ae73cafd31 github.com/influxdata/influxdb-observability/otel2influx v0.0.0-20210429174543-86ae73cafd31 github.com/influxdata/influxdb-observability/otlp v0.0.0-20210429174543-86ae73cafd31 - github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4 + github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 github.com/jackc/pgx/v4 v4.6.0 @@ -153,6 +153,7 @@ require ( gopkg.in/ldap.v3 v3.1.0 gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 gopkg.in/olivere/elastic.v5 v5.0.70 + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/yaml.v2 v2.4.0 gotest.tools v2.2.0+incompatible k8s.io/api v0.20.4 diff --git a/go.sum b/go.sum index 99a3ff0d30668..073cab6b455d3 100644 --- a/go.sum +++ b/go.sum @@ -886,8 +886,8 @@ github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZg github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= -github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4 h1:K3A5vHPs/p8OjI4SL3l1+hs/98mhxTVDcV1Ap0c265E= -github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM= +github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 h1:0rQOs1VHLVFpAAOIR0mJEvVOIaMYFgYdreeVbgI9sII= +github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= diff --git a/internal/usage.go b/internal/usage.go index 6eff30e6b0b21..1a4b3a3496281 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -16,6 +16,9 @@ The commands & flags are: --aggregator-filter filter the aggregators to enable, separator is : --config configuration file to load --config-directory directory containing additional *.conf files + --watch-config Telegraf will restart on local config changes. Monitor changes + using either fs notifications or polling. Valid values: 'inotify' or 'poll'. + Monitoring is off by default. --plugin-directory directory containing *.so files, this directory will be searched recursively. Any Plugin found will be loaded and namespaced. diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 7fee6a1f1595c..236e1426b345c 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -16,6 +16,9 @@ The commands & flags are: --aggregator-filter filter the aggregators to enable, separator is : --config configuration file to load --config-directory directory containing additional *.conf files + --watch-config Telegraf will restart on local config changes. Monitor changes + using either fs notifications or polling. Valid values: 'inotify' or 'poll'. + Monitoring is off by default. --debug turn on debug logging --input-filter filter the inputs to enable, separator is : --input-list print available input plugins. From ff8ed3776246d3756fb8ddb46e2c882b1a02bd84 Mon Sep 17 00:00:00 2001 From: Mya Date: Mon, 19 Jul 2021 08:53:07 -0600 Subject: [PATCH 515/761] fixed percentiles not being able to be ints (#9447) --- plugins/inputs/statsd/statsd.go | 17 +++++++++++++++-- plugins/inputs/statsd/statsd_test.go | 15 ++++++++++++--- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 4416a19f4624e..fbbfef251adf9 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -37,6 +37,19 @@ const ( var errParsing = errors.New("error parsing statsd line") +// Number will get parsed as an int or float depending on what is passed +type Number float64 + +func (n *Number) UnmarshalTOML(b []byte) error { + value, err := strconv.ParseFloat(string(b), 64) + if err != nil { + return err + } + + *n = Number(value) + return nil +} + // Statsd allows the importing of statsd and dogstatsd data. type Statsd struct { // Protocol used on listener - udp or tcp @@ -51,7 +64,7 @@ type Statsd struct { // Percentiles specifies the percentiles that will be calculated for timing // and histogram stats. - Percentiles []float64 + Percentiles []Number PercentileLimit int DeleteGauges bool @@ -307,7 +320,7 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { fields[prefix+"count"] = stats.Count() for _, percentile := range s.Percentiles { name := fmt.Sprintf("%s%v_percentile", prefix, percentile) - fields[name] = stats.Percentile(percentile) + fields[name] = stats.Percentile(float64(percentile)) } } diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index bef21b8de9eff..a236d638ba330 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -397,7 +397,7 @@ func TestParse_Counters(t *testing.T) { // Tests low-level functionality of timings func TestParse_Timings(t *testing.T) { s := NewTestStatsd() - s.Percentiles = []float64{90.0} + s.Percentiles = []Number{90.0} acc := &testutil.Accumulator{} // Test that timings work @@ -1186,7 +1186,7 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{"measurement.field"} - s.Percentiles = []float64{90.0} + s.Percentiles = []Number{90.0} acc := &testutil.Accumulator{} validLines := []string{ @@ -1234,7 +1234,7 @@ func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{} - s.Percentiles = []float64{90.0} + s.Percentiles = []Number{90.0} acc := &testutil.Accumulator{} validLines := []string{ @@ -1664,3 +1664,12 @@ func TestUdp(t *testing.T) { testutil.IgnoreTime(), ) } + +func TestParse_Ints(t *testing.T) { + s := NewTestStatsd() + s.Percentiles = []Number{90} + acc := &testutil.Accumulator{} + + require.NoError(t, s.Gather(acc)) + require.Equal(t, s.Percentiles, []Number{90.0}) +} From 2eb0ee2e1ec1b2a41e594f841609546012a3a502 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Mon, 19 Jul 2021 20:23:12 -0700 Subject: [PATCH 516/761] Add support for large uint64 and int64 numbers (#9520) --- plugins/parsers/json_v2/parser.go | 14 ++++++------ plugins/parsers/json_v2/parser_test.go | 4 ++++ .../testdata/large_numbers/expected.out | 3 +++ .../json_v2/testdata/large_numbers/input.json | 17 ++++++++++++++ .../testdata/large_numbers/telegraf.conf | 22 +++++++++++++++++++ 5 files changed, 53 insertions(+), 7 deletions(-) create mode 100644 plugins/parsers/json_v2/testdata/large_numbers/expected.out create mode 100644 plugins/parsers/json_v2/testdata/large_numbers/input.json create mode 100644 plugins/parsers/json_v2/testdata/large_numbers/telegraf.conf diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index da128880d1d01..ef8981dffc859 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -323,7 +323,7 @@ func (p *Parser) expandArray(result MetricNode) ([]MetricNode, error) { if result.Tag { result.DesiredType = "string" } - v, err := p.convertType(result.Value(), result.DesiredType, result.SetName) + v, err := p.convertType(result.Result, result.DesiredType, result.SetName) if err != nil { return nil, err } @@ -525,8 +525,8 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { } // convertType will convert the value parsed from the input JSON to the specified type in the config -func (p *Parser) convertType(input interface{}, desiredType string, name string) (interface{}, error) { - switch inputType := input.(type) { +func (p *Parser) convertType(input gjson.Result, desiredType string, name string) (interface{}, error) { + switch inputType := input.Value().(type) { case string: if desiredType != "string" { switch desiredType { @@ -537,7 +537,7 @@ func (p *Parser) convertType(input interface{}, desiredType string, name string) } return r, nil case "int": - r, err := strconv.Atoi(inputType) + r, err := strconv.ParseInt(inputType, 10, 64) if err != nil { return nil, fmt.Errorf("Unable to convert field '%s' to type int: %v", name, err) } @@ -579,9 +579,9 @@ func (p *Parser) convertType(input interface{}, desiredType string, name string) case "string": return fmt.Sprint(inputType), nil case "int": - return int64(inputType), nil + return input.Int(), nil case "uint": - return uint64(inputType), nil + return input.Uint(), nil case "bool": if inputType == 0 { return false, nil @@ -596,5 +596,5 @@ func (p *Parser) convertType(input interface{}, desiredType string, name string) return nil, fmt.Errorf("unknown format '%T' for field '%s'", inputType, name) } - return input, nil + return input.Value(), nil } diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index 50c981c4d51f9..9321d7256fada 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -77,6 +77,10 @@ func TestData(t *testing.T) { name: "Test field with null", test: "null", }, + { + name: "Test large numbers (int64, uin64, float64)", + test: "large_numbers", + }, } for _, tc := range tests { diff --git a/plugins/parsers/json_v2/testdata/large_numbers/expected.out b/plugins/parsers/json_v2/testdata/large_numbers/expected.out new file mode 100644 index 0000000000000..1edb0565f6313 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/large_numbers/expected.out @@ -0,0 +1,3 @@ +file large=4294967296i,larger=9007199254740991i,largest=9223372036854775807i +file large=9007199254740991u,larger=9223372036854775807u,largest=18446744073709551615u +file large=4294967296,larger=4.294967296663e+09,largest=9007199254740991 diff --git a/plugins/parsers/json_v2/testdata/large_numbers/input.json b/plugins/parsers/json_v2/testdata/large_numbers/input.json new file mode 100644 index 0000000000000..a800d0cd0d4e5 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/large_numbers/input.json @@ -0,0 +1,17 @@ +{ + "int": { + "large": 4294967296, + "larger": 9007199254740991, + "largest": 9223372036854775807 + }, + "uint": { + "large": 9007199254740991, + "larger": 9223372036854775807, + "largest": 18446744073709551615 + }, + "float": { + "large": 4294967296, + "larger": 4.294967296663e+09, + "largest": 9007199254740991 + } +} diff --git a/plugins/parsers/json_v2/testdata/large_numbers/telegraf.conf b/plugins/parsers/json_v2/testdata/large_numbers/telegraf.conf new file mode 100644 index 0000000000000..a0b9736a045a6 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/large_numbers/telegraf.conf @@ -0,0 +1,22 @@ +[[inputs.file]] + files = ["./testdata/large_numbers/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "int" + [inputs.file.json_v2.object.fields] + large = "int" + larger = "int" + largest = "int" + [[inputs.file.json_v2.object]] + path = "uint" + [inputs.file.json_v2.object.fields] + large = "uint" + larger = "uint" + largest = "uint" + [[inputs.file.json_v2.object]] + path = "float" + [inputs.file.json_v2.object.fields] + large = "float" + larger = "float" + largest = "float" From 8965291f294aa5d5b478ab30f19224beb5fe25a0 Mon Sep 17 00:00:00 2001 From: Imran Ismail Date: Wed, 21 Jul 2021 05:08:29 +0800 Subject: [PATCH 517/761] Fix prometheus cadvisor authentication (#9497) --- plugins/inputs/prometheus/kubernetes.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 7a85d88e2c59b..c1fb3828114bc 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -88,7 +88,7 @@ func (p *Prometheus) start(ctx context.Context) error { return case <-time.After(time.Second): if p.isNodeScrapeScope { - err = p.cAdvisor(ctx) + err = p.cAdvisor(ctx, config.BearerToken) if err != nil { p.Log.Errorf("Unable to monitor pods with node scrape scope: %s", err.Error()) } @@ -145,10 +145,13 @@ func (p *Prometheus) watchPod(ctx context.Context, client *kubernetes.Clientset) return nil } -func (p *Prometheus) cAdvisor(ctx context.Context) error { +func (p *Prometheus) cAdvisor(ctx context.Context, bearerToken string) error { // The request will be the same each time podsURL := fmt.Sprintf("https://%s:10250/pods", p.NodeIP) req, err := http.NewRequest("GET", podsURL, nil) + req.Header.Set("Authorization", "Bearer "+bearerToken) + req.Header.Add("Accept", "application/json") + if err != nil { return fmt.Errorf("error when creating request to %s to get pod list: %w", podsURL, err) } From 403ce477c1f494edaf5d57e848aa9e89b95f6f71 Mon Sep 17 00:00:00 2001 From: Daniel Dyla Date: Wed, 21 Jul 2021 10:53:23 -0400 Subject: [PATCH 518/761] [output dynatrace] Initialize loggedMetrics map (#9491) --- plugins/outputs/dynatrace/dynatrace.go | 20 +++--- plugins/outputs/dynatrace/dynatrace_test.go | 70 +++++++++++++++++++++ 2 files changed, 81 insertions(+), 9 deletions(-) diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 0cca17985598d..c66bc8da2171e 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -210,17 +210,18 @@ func (d *Dynatrace) send(msg string) error { } defer resp.Body.Close() - // print metric line results as info log - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusBadRequest { - bodyBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - d.Log.Errorf("Dynatrace error reading response") - } - bodyString := string(bodyBytes) - d.Log.Debugf("Dynatrace returned: %s", bodyString) - } else { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusBadRequest { return fmt.Errorf("request failed with response code:, %d", resp.StatusCode) } + + // print metric line results as info log + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + d.Log.Errorf("Dynatrace error reading response") + } + bodyString := string(bodyBytes) + d.Log.Debugf("Dynatrace returned: %s", bodyString) + return nil } @@ -253,6 +254,7 @@ func (d *Dynatrace) Init() error { } d.normalizedDefaultDimensions = dimensions.NewNormalizedDimensionList(dims...) d.normalizedStaticDimensions = dimensions.NewNormalizedDimensionList(dimensions.NewDimension("dt.metrics.source", "telegraf")) + d.loggedMetrics = make(map[string]bool) return nil } diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index a994f0ef569f6..d9076906c1020 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -515,3 +515,73 @@ func TestSendCounterMetricWithoutTags(t *testing.T) { err = d.Write(metrics) require.NoError(t, err) } + +var warnfCalledTimes int + +type loggerStub struct { + testutil.Logger +} + +func (l loggerStub) Warnf(format string, args ...interface{}) { + warnfCalledTimes++ +} + +func TestSendUnsupportedMetric(t *testing.T) { + warnfCalledTimes = 0 + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Fatal("should not export because the only metric is an invalid type") + })) + defer ts.Close() + + d := &Dynatrace{} + + logStub := loggerStub{} + + d.URL = ts.URL + d.APIToken = "123" + d.Log = logStub + err := d.Init() + require.NoError(t, err) + err = d.Connect() + require.NoError(t, err) + + // Init metrics + + m1 := metric.New( + "mymeasurement", + map[string]string{}, + map[string]interface{}{"metric1": "unsupported_type"}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics := []telegraf.Metric{m1} + + err = d.Write(metrics) + require.NoError(t, err) + // Warnf called for invalid export + require.Equal(t, 1, warnfCalledTimes) + + err = d.Write(metrics) + require.NoError(t, err) + // Warnf skipped for more invalid exports with the same name + require.Equal(t, 1, warnfCalledTimes) + + m2 := metric.New( + "mymeasurement", + map[string]string{}, + map[string]interface{}{"metric2": "unsupported_type"}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics = []telegraf.Metric{m2} + + err = d.Write(metrics) + require.NoError(t, err) + // Warnf called again for invalid export with a new metric name + require.Equal(t, 2, warnfCalledTimes) + + err = d.Write(metrics) + require.NoError(t, err) + // Warnf skipped for more invalid exports with the same name + require.Equal(t, 2, warnfCalledTimes) +} From cae338814bc9296fd07a844f2a59619d17be6dfd Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Thu, 22 Jul 2021 13:50:23 -0700 Subject: [PATCH 519/761] Switch MongoDB libraries (#9493) --- docs/LICENSE_OF_DEPENDENCIES.md | 6 +- go.mod | 15 +- go.sum | 41 +++-- plugins/inputs/mongodb/mongodb.go | 168 ++++++++---------- plugins/inputs/mongodb/mongodb_server.go | 107 +++++++---- plugins/inputs/mongodb/mongodb_server_test.go | 6 +- plugins/inputs/mongodb/mongodb_test.go | 55 ++---- plugins/inputs/mongodb/mongostat.go | 61 ++++--- 8 files changed, 243 insertions(+), 216 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 9fb1221cfe0a5..22b8393dcdaeb 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -214,9 +214,14 @@ following works: - github.com/wavefronthq/wavefront-sdk-go [Apache License 2.0](https://github.com/wavefrontHQ/wavefront-sdk-go/blob/master/LICENSE) - github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE) - github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) +- github.com/xdg-go/pbkdf2 [Apache License 2.0](https://github.com/xdg-go/pbkdf2/blob/main/LICENSE) +- github.com/xdg-go/scram [Apache License 2.0](https://github.com/xdg-go/scram/blob/master/LICENSE) +- github.com/xdg-go/stringprep [Apache License 2.0](https://github.com/xdg-go/stringprep/blob/master/LICENSE) - github.com/xdg/scram [Apache License 2.0](https://github.com/xdg-go/scram/blob/master/LICENSE) - github.com/xdg/stringprep [Apache License 2.0](https://github.com/xdg-go/stringprep/blob/master/LICENSE) +- github.com/youmark/pkcs8 [MIT License](https://github.com/youmark/pkcs8/blob/master/LICENSE) - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) +- go.mongodb.org/mongo-driver [Apache License 2.0](https://github.com/mongodb/mongo-go-driver/blob/master/LICENSE) - go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) - go.starlark.net [BSD 3-Clause "New" or "Revised" License](https://github.com/google/starlark-go/blob/master/LICENSE) - go.uber.org/atomic [MIT License](https://pkg.go.dev/go.uber.org/atomic?tab=licenses) @@ -248,7 +253,6 @@ following works: - gopkg.in/jcmturner/gokrb5.v7 [Apache License 2.0](https://github.com/jcmturner/gokrb5/tree/v7.5.0/LICENSE) - gopkg.in/jcmturner/rpc.v1 [Apache License 2.0](https://github.com/jcmturner/rpc/blob/v1.1.0/LICENSE) - gopkg.in/ldap.v3 [MIT License](https://github.com/go-ldap/ldap/blob/v3.1.7/LICENSE) -- gopkg.in/mgo.v2 [BSD 2-Clause "Simplified" License](https://github.com/go-mgo/mgo/blob/v2/LICENSE) - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) - gopkg.in/tomb.v2 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v2/LICENSE) diff --git a/go.mod b/go.mod index 05d870a52af08..053765ee69c94 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/apache/thrift v0.13.0 github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 - github.com/aws/aws-sdk-go v1.34.34 + github.com/aws/aws-sdk-go v1.38.69 github.com/aws/aws-sdk-go-v2 v1.3.2 github.com/aws/aws-sdk-go-v2/config v1.1.5 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 @@ -63,7 +63,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/protobuf v1.5.1 - github.com/golang/snappy v0.0.1 + github.com/golang/snappy v0.0.3 github.com/google/go-cmp v0.5.5 github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.13 @@ -90,6 +90,7 @@ require ( github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 + github.com/klauspost/compress v1.13.1 // indirect github.com/lib/pq v1.3.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b @@ -133,14 +134,17 @@ require ( github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c + github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect + go.mongodb.org/mongo-driver v1.5.3 go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/multierr v1.6.0 // indirect + golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 - golang.org/x/text v0.3.4 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 + golang.org/x/text v0.3.6 golang.org/x/tools v0.1.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.29.0 @@ -151,7 +155,6 @@ require ( gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/ldap.v3 v3.1.0 - gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 gopkg.in/olivere/elastic.v5 v5.0.70 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 073cab6b455d3..ffd7695ca2a72 100644 --- a/go.sum +++ b/go.sum @@ -222,8 +222,9 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.34.34 h1:5dC0ZU0xy25+UavGNEkQ/5MOQwxXDA2YXtjCL1HfYKI= -github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.38.69 h1:V489lmrdkIQSfF6OAGZZ1Cavcm7eczCm2JcGvX+yHRg= +github.com/aws/aws-sdk-go v1.38.69/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= github.com/aws/aws-sdk-go-v2 v1.3.2 h1:RQj8l98yKUm0UV2Wd3w/Ms+TXV9Rs1E6Kr5tRRMfyU4= @@ -720,8 +721,9 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/addlicense v0.0.0-20190510175307-22550fa7c1b0/go.mod h1:QtPG26W17m+OIQgE6gQ24gC1M6pUaMBAbFrTIDtwG/E= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= @@ -1000,8 +1002,10 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.12 h1:famVnQVu7QwryBN4jNseQdUKES71ZAOnB6UQQJPZvqk= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ= +github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -1257,6 +1261,7 @@ github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChl github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= @@ -1497,6 +1502,12 @@ github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOF github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= @@ -1508,6 +1519,9 @@ github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1528,6 +1542,8 @@ go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.5.3 h1:wWbFB6zaGHpzguF3f7tW94sVE8sFl3lHx8OZx/4OuFI= +go.mongodb.org/mongo-driver v1.5.3/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1577,6 +1593,7 @@ golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1584,8 +1601,10 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1694,8 +1713,9 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1796,8 +1816,8 @@ golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1807,8 +1827,10 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2033,7 +2055,6 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE= gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ= -gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE= diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 82a1b75c4e4fb..0366636200064 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -1,10 +1,10 @@ package mongodb import ( + "context" "crypto/tls" "crypto/x509" "fmt" - "net" "net/url" "strings" "sync" @@ -13,13 +13,14 @@ import ( "github.com/influxdata/telegraf" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "gopkg.in/mgo.v2" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/readpref" ) type MongoDB struct { Servers []string Ssl Ssl - mongos map[string]*Server GatherClusterStatus bool GatherPerdbStats bool GatherColStats bool @@ -27,7 +28,9 @@ type MongoDB struct { ColStatsDbs []string tlsint.ClientConfig - Log telegraf.Logger + Log telegraf.Logger `toml:"-"` + + clients []*Server } type Ssl struct { @@ -78,118 +81,103 @@ func (*MongoDB) Description() string { return "Read metrics from one or many MongoDB servers" } -var localhost = &url.URL{Host: "mongodb://127.0.0.1:27017"} +func (m *MongoDB) Init() error { + var tlsConfig *tls.Config + if m.Ssl.Enabled { + // Deprecated TLS config + tlsConfig = &tls.Config{ + InsecureSkipVerify: m.ClientConfig.InsecureSkipVerify, + } + if len(m.Ssl.CaCerts) == 0 { + return fmt.Errorf("you must explicitly set insecure_skip_verify to skip cerificate validation") + } + + roots := x509.NewCertPool() + for _, caCert := range m.Ssl.CaCerts { + if ok := roots.AppendCertsFromPEM([]byte(caCert)); !ok { + return fmt.Errorf("failed to parse root certificate") + } + } + tlsConfig.RootCAs = roots + } else { + var err error + tlsConfig, err = m.ClientConfig.TLSConfig() + if err != nil { + return err + } + } -// Reads stats from all configured servers accumulates stats. -// Returns one of the errors encountered while gather stats (if any). -func (m *MongoDB) Gather(acc telegraf.Accumulator) error { if len(m.Servers) == 0 { - return m.gatherServer(m.getMongoServer(localhost), acc) + m.Servers = []string{"mongodb://127.0.0.1:27017"} } - var wg sync.WaitGroup - for i, serv := range m.Servers { - if !strings.HasPrefix(serv, "mongodb://") { + for _, connURL := range m.Servers { + if !strings.HasPrefix(connURL, "mongodb://") && !strings.HasPrefix(connURL, "mongodb+srv://") { // Preserve backwards compatibility for hostnames without a // scheme, broken in go 1.8. Remove in Telegraf 2.0 - serv = "mongodb://" + serv - m.Log.Warnf("Using %q as connection URL; please update your configuration to use an URL", serv) - m.Servers[i] = serv + connURL = "mongodb://" + connURL + m.Log.Warnf("Using %q as connection URL; please update your configuration to use an URL", connURL) } - u, err := url.Parse(serv) + u, err := url.Parse(connURL) if err != nil { - m.Log.Errorf("Unable to parse address %q: %s", serv, err.Error()) - continue - } - if u.Host == "" { - m.Log.Errorf("Unable to parse address %q", serv) - continue + return fmt.Errorf("unable to parse connection URL: %q", err) } - wg.Add(1) - go func(srv *Server) { - defer wg.Done() - err := m.gatherServer(srv, acc) - if err != nil { - m.Log.Errorf("Error in plugin: %v", err) - } - }(m.getMongoServer(u)) - } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() //nolint:revive - wg.Wait() - return nil -} - -func (m *MongoDB) getMongoServer(url *url.URL) *Server { - if _, ok := m.mongos[url.Host]; !ok { - m.mongos[url.Host] = &Server{ - Log: m.Log, - URL: url, + opts := options.Client().ApplyURI(connURL) + if tlsConfig != nil { + opts.TLSConfig = tlsConfig } - } - return m.mongos[url.Host] -} - -func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { - if server.Session == nil { - var dialAddrs []string - if server.URL.User != nil { - dialAddrs = []string{server.URL.String()} - } else { - dialAddrs = []string{server.URL.Host} + if opts.ReadPreference == nil { + opts.ReadPreference = readpref.Nearest() } - dialInfo, err := mgo.ParseURL(dialAddrs[0]) + + client, err := mongo.Connect(ctx, opts) if err != nil { - return fmt.Errorf("unable to parse URL %q: %s", dialAddrs[0], err.Error()) - } - dialInfo.Direct = true - dialInfo.Timeout = 5 * time.Second - - var tlsConfig *tls.Config - - if m.Ssl.Enabled { - // Deprecated TLS config - tlsConfig = &tls.Config{} - if len(m.Ssl.CaCerts) > 0 { - roots := x509.NewCertPool() - for _, caCert := range m.Ssl.CaCerts { - ok := roots.AppendCertsFromPEM([]byte(caCert)) - if !ok { - return fmt.Errorf("failed to parse root certificate") - } - } - tlsConfig.RootCAs = roots - } else { - tlsConfig.InsecureSkipVerify = true - } - } else { - tlsConfig, err = m.ClientConfig.TLSConfig() - if err != nil { - return err - } + return fmt.Errorf("unable to connect to MongoDB: %q", err) } - // If configured to use TLS, add a dial function - if tlsConfig != nil { - dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) { - return tls.Dial("tcp", addr.String(), tlsConfig) - } + err = client.Ping(ctx, opts.ReadPreference) + if err != nil { + return fmt.Errorf("unable to connect to MongoDB: %s", err) } - sess, err := mgo.DialWithInfo(dialInfo) - if err != nil { - return fmt.Errorf("unable to connect to MongoDB: %s", err.Error()) + server := &Server{ + client: client, + hostname: u.Host, + Log: m.Log, } - server.Session = sess + m.clients = append(m.clients, server) } - return server.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.GatherTopStat, m.ColStatsDbs) + + return nil +} + +// Reads stats from all configured servers accumulates stats. +// Returns one of the errors encountered while gather stats (if any). +func (m *MongoDB) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + for _, client := range m.clients { + wg.Add(1) + go func(srv *Server) { + defer wg.Done() + err := srv.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.GatherTopStat, m.ColStatsDbs) + if err != nil { + m.Log.Errorf("failed to gather data: %q", err) + } + }(client) + } + + wg.Wait() + return nil } func init() { inputs.Add("mongodb", func() telegraf.Input { return &MongoDB{ - mongos: make(map[string]*Server), GatherClusterStatus: true, GatherPerdbStats: false, GatherColStats: false, diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index e362a0bd7f008..723b0698b9ac8 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -1,19 +1,22 @@ package mongodb import ( + "context" "fmt" - "net/url" + "go.mongodb.org/mongo-driver/bson/primitive" "strings" "time" "github.com/influxdata/telegraf" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/x/bsonx" ) type Server struct { - URL *url.URL - Session *mgo.Session + client *mongo.Client + hostname string lastResult *MongoStatus Log telegraf.Logger @@ -21,12 +24,12 @@ type Server struct { func (s *Server) getDefaultTags() map[string]string { tags := make(map[string]string) - tags["hostname"] = s.URL.Host + tags["hostname"] = s.hostname return tags } type oplogEntry struct { - Timestamp bson.MongoTimestamp `bson:"ts"` + Timestamp primitive.Timestamp `bson:"ts"` } func IsAuthorization(err error) bool { @@ -41,15 +44,23 @@ func (s *Server) authLog(err error) { } } +func (s *Server) runCommand(database string, cmd interface{}, result interface{}) error { + r := s.client.Database(database).RunCommand(context.Background(), cmd) + if r.Err() != nil { + return r.Err() + } + return r.Decode(result) +} + func (s *Server) gatherServerStatus() (*ServerStatus, error) { serverStatus := &ServerStatus{} - err := s.Session.DB("admin").Run(bson.D{ + err := s.runCommand("admin", bson.D{ { - Name: "serverStatus", + Key: "serverStatus", Value: 1, }, { - Name: "recordStats", + Key: "recordStats", Value: 0, }, }, serverStatus) @@ -61,9 +72,9 @@ func (s *Server) gatherServerStatus() (*ServerStatus, error) { func (s *Server) gatherReplSetStatus() (*ReplSetStatus, error) { replSetStatus := &ReplSetStatus{} - err := s.Session.DB("admin").Run(bson.D{ + err := s.runCommand("admin", bson.D{ { - Name: "replSetGetStatus", + Key: "replSetGetStatus", Value: 1, }, }, replSetStatus) @@ -74,35 +85,52 @@ func (s *Server) gatherReplSetStatus() (*ReplSetStatus, error) { } func (s *Server) gatherTopStatData() (*TopStats, error) { - topStats := &TopStats{} - err := s.Session.DB("admin").Run(bson.D{ + dest := &bsonx.Doc{} + err := s.runCommand("admin", bson.D{ { - Name: "top", + Key: "top", Value: 1, }, - }, topStats) + }, dest) + if err != nil { + return nil, err + } + + // From: https://github.com/mongodb/mongo-tools/blob/master/mongotop/mongotop.go#L49-L70 + // Remove 'note' field that prevents easy decoding, then round-trip + // again to simplify unpacking into the nested data structure + totals, err := dest.LookupErr("totals") + if err != nil { + return nil, err + } + recoded, err := totals.Document().Delete("note").MarshalBSON() if err != nil { return nil, err } - return topStats, nil + topInfo := make(map[string]TopStatCollection) + if err := bson.Unmarshal(recoded, &topInfo); err != nil { + return nil, err + } + + return &TopStats{Totals: topInfo}, nil } func (s *Server) gatherClusterStatus() (*ClusterStatus, error) { - chunkCount, err := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count() + chunkCount, err := s.client.Database("config").Collection("chunks").CountDocuments(context.Background(), bson.M{"jumbo": true}) if err != nil { return nil, err } return &ClusterStatus{ - JumboChunksCount: int64(chunkCount), + JumboChunksCount: chunkCount, }, nil } func (s *Server) gatherShardConnPoolStats() (*ShardStats, error) { shardStats := &ShardStats{} - err := s.Session.DB("admin").Run(bson.D{ + err := s.runCommand("admin", bson.D{ { - Name: "shardConnPoolStats", + Key: "shardConnPoolStats", Value: 1, }, }, &shardStats) @@ -114,9 +142,9 @@ func (s *Server) gatherShardConnPoolStats() (*ShardStats, error) { func (s *Server) gatherDBStats(name string) (*Db, error) { stats := &DbStatsData{} - err := s.Session.DB(name).Run(bson.D{ + err := s.runCommand(name, bson.D{ { - Name: "dbStats", + Key: "dbStats", Value: 1, }, }, stats) @@ -134,19 +162,25 @@ func (s *Server) getOplogReplLag(collection string) (*OplogStats, error) { query := bson.M{"ts": bson.M{"$exists": true}} var first oplogEntry - err := s.Session.DB("local").C(collection).Find(query).Sort("$natural").Limit(1).One(&first) - if err != nil { + firstResult := s.client.Database("local").Collection(collection).FindOne(context.Background(), query, options.FindOne().SetSort(bson.M{"$natural": 1})) + if firstResult.Err() != nil { + return nil, firstResult.Err() + } + if err := firstResult.Decode(&first); err != nil { return nil, err } var last oplogEntry - err = s.Session.DB("local").C(collection).Find(query).Sort("-$natural").Limit(1).One(&last) - if err != nil { + lastResult := s.client.Database("local").Collection(collection).FindOne(context.Background(), query, options.FindOne().SetSort(bson.M{"$natural": -1})) + if lastResult.Err() != nil { + return nil, lastResult.Err() + } + if err := lastResult.Decode(&last); err != nil { return nil, err } - firstTime := time.Unix(int64(first.Timestamp>>32), 0) - lastTime := time.Unix(int64(last.Timestamp>>32), 0) + firstTime := time.Unix(int64(first.Timestamp.T), 0) + lastTime := time.Unix(int64(last.Timestamp.T), 0) stats := &OplogStats{ TimeDiff: int64(lastTime.Sub(firstTime).Seconds()), } @@ -168,7 +202,7 @@ func (s *Server) gatherOplogStats() (*OplogStats, error) { } func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) { - names, err := s.Session.DatabaseNames() + names, err := s.client.ListDatabaseNames(context.Background(), bson.D{}) if err != nil { return nil, err } @@ -177,16 +211,16 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) for _, dbName := range names { if stringInSlice(dbName, colStatsDbs) || len(colStatsDbs) == 0 { var colls []string - colls, err = s.Session.DB(dbName).CollectionNames() + colls, err = s.client.Database(dbName).ListCollectionNames(context.Background(), bson.D{}) if err != nil { s.Log.Errorf("Error getting collection names: %s", err.Error()) continue } for _, colName := range colls { colStatLine := &ColStatsData{} - err = s.Session.DB(dbName).Run(bson.D{ + err = s.runCommand(dbName, bson.D{ { - Name: "collStats", + Key: "collStats", Value: colName, }, }, colStatLine) @@ -207,9 +241,6 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) } func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, gatherDbStats bool, gatherColStats bool, gatherTopStat bool, colStatsDbs []string) error { - s.Session.SetMode(mgo.Eventual, true) - s.Session.SetSocketTimeout(0) - serverStatus, err := s.gatherServerStatus() if err != nil { return err @@ -257,7 +288,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, dbStats := &DbStats{} if gatherDbStats { - names, err := s.Session.DatabaseNames() + names, err := s.client.ListDatabaseNames(context.Background(), bson.D{}) if err != nil { return err } @@ -300,7 +331,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, durationInSeconds = 1 } data := NewMongodbData( - NewStatLine(*s.lastResult, *result, s.URL.Host, true, durationInSeconds), + NewStatLine(*s.lastResult, *result, s.hostname, true, durationInSeconds), s.getDefaultTags(), ) data.AddDefaultStats() diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index 463d7af1b1f65..2cf58689a6eab 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -15,7 +15,7 @@ func TestGetDefaultTags(t *testing.T) { in string out string }{ - {"hostname", server.Url.Host}, + {"hostname", server.hostname}, } defaultTags := server.getDefaultTags() for _, tt := range tagTests { @@ -28,11 +28,11 @@ func TestGetDefaultTags(t *testing.T) { func TestAddDefaultStats(t *testing.T) { var acc testutil.Accumulator - err := server.gatherData(&acc, false) + err := server.gatherData(&acc, false, true, true, true, []string{"local"}) require.NoError(t, err) // need to call this twice so it can perform the diff - err = server.gatherData(&acc, false) + err = server.gatherData(&acc, false, true, true, true, []string{"local"}) require.NoError(t, err) for key := range defaultStats { diff --git a/plugins/inputs/mongodb/mongodb_test.go b/plugins/inputs/mongodb/mongodb_test.go index cd3b741e250d8..9484118dd19ab 100644 --- a/plugins/inputs/mongodb/mongodb_test.go +++ b/plugins/inputs/mongodb/mongodb_test.go @@ -3,60 +3,41 @@ package mongodb import ( + "context" "log" "math/rand" - "net/url" "os" "testing" "time" - "gopkg.in/mgo.v2" + "github.com/influxdata/telegraf/testutil" ) -var connect_url string var server *Server -func init() { - connect_url = os.Getenv("MONGODB_URL") - if connect_url == "" { - connect_url = "127.0.0.1:27017" - server = &Server{URL: &url.URL{Host: connect_url}} - } else { - full_url, err := url.Parse(connect_url) - if err != nil { - log.Fatalf("Unable to parse URL (%s), %s\n", full_url, err.Error()) - } - server = &Server{URL: full_url} +func testSetup(_ *testing.M) { + connectionString := os.Getenv("MONGODB_URL") + if connectionString == "" { + connectionString = "mongodb://127.0.0.1:27017" } -} -func testSetup(m *testing.M) { - var err error - var dialAddrs []string - if server.URL.User != nil { - dialAddrs = []string{server.URL.String()} - } else { - dialAddrs = []string{server.URL.Host} - } - dialInfo, err := mgo.ParseURL(dialAddrs[0]) - if err != nil { - log.Fatalf("Unable to parse URL (%s), %s\n", dialAddrs[0], err.Error()) - } - dialInfo.Direct = true - dialInfo.Timeout = 5 * time.Second - sess, err := mgo.DialWithInfo(dialInfo) - if err != nil { - log.Fatalf("Unable to connect to MongoDB, %s\n", err.Error()) + m := &MongoDB{ + Log: testutil.Logger{}, + Servers: []string{connectionString}, } - server.Session = sess - server.Session, _ = mgo.Dial(server.URL.Host) + err := m.Init() if err != nil { - log.Fatalln(err.Error()) + log.Fatalf("Failed to connect to MongoDB: %v", err) } + + server = m.clients[0] } -func testTeardown(m *testing.M) { - server.Session.Close() +func testTeardown(_ *testing.M) { + err := server.client.Disconnect(context.Background()) + if err != nil { + log.Fatalf("failed to disconnect: %v", err) + } } func TestMain(m *testing.M) { diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index c4cfa45c5c0e7..41f735d389c7a 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -41,6 +41,8 @@ type MongoStatus struct { } type ServerStatus struct { + SampleTime time.Time `bson:""` + Flattened map[string]interface{} `bson:""` Host string `bson:"host"` Version string `bson:"version"` Process string `bson:"process"` @@ -64,7 +66,7 @@ type ServerStatus struct { Mem *MemStats `bson:"mem"` Repl *ReplStatus `bson:"repl"` ShardCursorType map[string]interface{} `bson:"shardCursorType"` - StorageEngine map[string]string `bson:"storageEngine"` + StorageEngine *StorageEngine `bson:"storageEngine"` WiredTiger *WiredTiger `bson:"wiredTiger"` Metrics *MetricsStats `bson:"metrics"` TCMallocStats *TCMallocStats `bson:"tcmalloc"` @@ -171,11 +173,7 @@ type ShardHostStatsData struct { } type TopStats struct { - Totals map[string]TopStatCollections `bson:"totals"` -} - -type TopStatCollections struct { - TSCollection TopStatCollection `bson:",inline"` + Totals map[string]TopStatCollection `bson:"totals"` } type TopStatCollection struct { @@ -238,6 +236,10 @@ type CacheStats struct { UnmodifiedPagesEvicted int64 `bson:"unmodified pages evicted"` } +type StorageEngine struct { + Name string `bson:"name"` +} + // TransactionStats stores transaction checkpoints in WiredTiger. type TransactionStats struct { TransCheckpointsTotalTimeMsecs int64 `bson:"transaction checkpoint total time (msecs)"` @@ -246,7 +248,7 @@ type TransactionStats struct { // ReplStatus stores data related to replica sets. type ReplStatus struct { - SetName interface{} `bson:"setName"` + SetName string `bson:"setName"` IsMaster interface{} `bson:"ismaster"` Secondary interface{} `bson:"secondary"` IsReplicaSet interface{} `bson:"isreplicaset"` @@ -932,8 +934,8 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.TotalCreatedC = newStat.Connections.TotalCreated // set the storage engine appropriately - if newStat.StorageEngine != nil && newStat.StorageEngine["name"] != "" { - returnVal.StorageEngine = newStat.StorageEngine["name"] + if newStat.StorageEngine != nil && newStat.StorageEngine.Name != "" { + returnVal.StorageEngine = newStat.StorageEngine.Name } else { returnVal.StorageEngine = "mmapv1" } @@ -1159,10 +1161,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.Repl != nil { - setName, isReplSet := newStat.Repl.SetName.(string) - if isReplSet { - returnVal.ReplSetName = setName - } + returnVal.ReplSetName = newStat.Repl.SetName // BEGIN code modification if newStat.Repl.IsMaster.(bool) { returnVal.NodeType = "PRI" @@ -1407,24 +1406,24 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec for collection, data := range newMongo.TopStats.Totals { topStatDataLine := &TopStatLine{ CollectionName: collection, - TotalTime: data.TSCollection.Total.Time, - TotalCount: data.TSCollection.Total.Count, - ReadLockTime: data.TSCollection.ReadLock.Time, - ReadLockCount: data.TSCollection.ReadLock.Count, - WriteLockTime: data.TSCollection.WriteLock.Time, - WriteLockCount: data.TSCollection.WriteLock.Count, - QueriesTime: data.TSCollection.Queries.Time, - QueriesCount: data.TSCollection.Queries.Count, - GetMoreTime: data.TSCollection.GetMore.Time, - GetMoreCount: data.TSCollection.GetMore.Count, - InsertTime: data.TSCollection.Insert.Time, - InsertCount: data.TSCollection.Insert.Count, - UpdateTime: data.TSCollection.Update.Time, - UpdateCount: data.TSCollection.Update.Count, - RemoveTime: data.TSCollection.Remove.Time, - RemoveCount: data.TSCollection.Remove.Count, - CommandsTime: data.TSCollection.Commands.Time, - CommandsCount: data.TSCollection.Commands.Count, + TotalTime: data.Total.Time, + TotalCount: data.Total.Count, + ReadLockTime: data.ReadLock.Time, + ReadLockCount: data.ReadLock.Count, + WriteLockTime: data.WriteLock.Time, + WriteLockCount: data.WriteLock.Count, + QueriesTime: data.Queries.Time, + QueriesCount: data.Queries.Count, + GetMoreTime: data.GetMore.Time, + GetMoreCount: data.GetMore.Count, + InsertTime: data.Insert.Time, + InsertCount: data.Insert.Count, + UpdateTime: data.Update.Time, + UpdateCount: data.Update.Count, + RemoveTime: data.Remove.Time, + RemoveCount: data.Remove.Count, + CommandsTime: data.Commands.Time, + CommandsCount: data.Commands.Count, } returnVal.TopStatLines = append(returnVal.TopStatLines, *topStatDataLine) } From d6b7d4da2cfc53598f95bcbd295d9914b9350ce2 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 22 Jul 2021 19:09:01 -0500 Subject: [PATCH 520/761] Simplify how nesting is handled (#9504) --- plugins/parsers/json_v2/parser.go | 65 ++++--------------- plugins/parsers/json_v2/parser_test.go | 4 ++ .../testdata/complex_nesting/expected.out | 3 + .../testdata/complex_nesting/input.json | 31 +++++++++ .../testdata/complex_nesting/telegraf.conf | 9 +++ .../multiple_arrays_in_object/expected.out | 15 +++-- 6 files changed, 70 insertions(+), 57 deletions(-) create mode 100644 plugins/parsers/json_v2/testdata/complex_nesting/expected.out create mode 100644 plugins/parsers/json_v2/testdata/complex_nesting/input.json create mode 100644 plugins/parsers/json_v2/testdata/complex_nesting/telegraf.conf diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index ef8981dffc859..fa0946621cde4 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -184,11 +184,7 @@ func (p *Parser) processMetric(data []DataSet, input []byte, tag bool) ([]telegr return nil, err } - var m []telegraf.Metric - for _, n := range nodes { - m = append(m, n.Metric) - } - metrics = append(metrics, m) + metrics = append(metrics, nodes) } for i := 1; i < len(metrics); i++ { @@ -229,8 +225,8 @@ func mergeMetric(a telegraf.Metric, m telegraf.Metric) { } // expandArray will recursively create a new MetricNode for each element in a JSON array or single value -func (p *Parser) expandArray(result MetricNode) ([]MetricNode, error) { - var results []MetricNode +func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { + var results []telegraf.Metric if result.IsObject() { if !p.iterateObjects { @@ -262,8 +258,7 @@ func (p *Parser) expandArray(result MetricNode) ([]MetricNode, error) { Metric: m, Result: val, } - var r []MetricNode - r, err = p.combineObject(n) + r, err := p.combineObject(n) if err != nil { return false } @@ -274,7 +269,7 @@ func (p *Parser) expandArray(result MetricNode) ([]MetricNode, error) { } if len(results) != 0 { for _, newResult := range results { - mergeMetric(result.Metric, newResult.Metric) + mergeMetric(result.Metric, newResult) } } return true @@ -294,8 +289,7 @@ func (p *Parser) expandArray(result MetricNode) ([]MetricNode, error) { Metric: m, Result: val, } - var r []MetricNode - r, err = p.expandArray(n) + r, err := p.expandArray(n) if err != nil { return false } @@ -335,7 +329,7 @@ func (p *Parser) expandArray(result MetricNode) ([]MetricNode, error) { } } - results = append(results, result) + results = append(results, result.Metric) } return results, nil @@ -369,9 +363,7 @@ func (p *Parser) processObjects(objects []JSONObject, input []byte) ([]telegraf. if err != nil { return nil, err } - for _, m := range metrics { - t = append(t, m.Metric) - } + t = append(t, metrics...) } return t, nil @@ -379,12 +371,10 @@ func (p *Parser) processObjects(objects []JSONObject, input []byte) ([]telegraf. // combineObject will add all fields/tags to a single metric // If the object has multiple array's as elements it won't comine those, they will remain separate metrics -func (p *Parser) combineObject(result MetricNode) ([]MetricNode, error) { - var results []MetricNode - var combineObjectResult []MetricNode +func (p *Parser) combineObject(result MetricNode) ([]telegraf.Metric, error) { + var results []telegraf.Metric if result.IsArray() || result.IsObject() { var err error - var prevArray bool result.ForEach(func(key, val gjson.Result) bool { // Determine if field/tag set name is configured var setName string @@ -436,38 +426,18 @@ func (p *Parser) combineObject(result MetricNode) ([]MetricNode, error) { } arrayNode.Tag = tag + if val.IsObject() { - prevArray = false - combineObjectResult, err = p.combineObject(arrayNode) + results, err = p.combineObject(arrayNode) if err != nil { return false } } else { - var r []MetricNode - r, err = p.expandArray(arrayNode) + r, err := p.expandArray(arrayNode) if err != nil { return false } - if prevArray { - if !arrayNode.IsArray() { - // If another non-array element was found, merge it into all previous gathered metrics - if len(results) != 0 { - for _, newResult := range results { - mergeMetric(result.Metric, newResult.Metric) - } - } - } else { - // Multiple array's won't be merged but kept separate, add additional metrics gathered from an array - results = append(results, r...) - } - } else { - // Continue using the same metric if its an object - results = r - } - - if val.IsArray() { - prevArray = true - } + results = cartesianProduct(results, r) } return true @@ -477,13 +447,6 @@ func (p *Parser) combineObject(result MetricNode) ([]MetricNode, error) { return nil, err } } - - if len(results) == 0 { - // If the results are empty, use the results of the call to combine object - // This happens with nested objects in array's, see the test array_of_objects - results = combineObjectResult - } - return results, nil } diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index 9321d7256fada..f0f018034dc5b 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -21,6 +21,10 @@ func TestData(t *testing.T) { name string test string }{ + { + name: "Test complex nesting", + test: "complex_nesting", + }, { name: "Test having an array of objects", test: "array_of_objects", diff --git a/plugins/parsers/json_v2/testdata/complex_nesting/expected.out b/plugins/parsers/json_v2/testdata/complex_nesting/expected.out new file mode 100644 index 0000000000000..265549c57abce --- /dev/null +++ b/plugins/parsers/json_v2/testdata/complex_nesting/expected.out @@ -0,0 +1,3 @@ +file,properties_place=Antelope\ Valley\,\ CA geometry_coordinates=-119.4998333,geometry_type="Point",id="nc73584926",properties_mag=6,properties_updated=1.626277167263e+12,type="Feature" +file,properties_place=Antelope\ Valley\,\ CA geometry_coordinates=38.5075,geometry_type="Point",id="nc73584926",properties_mag=6,properties_updated=1.626277167263e+12,type="Feature" +file,properties_place=Antelope\ Valley\,\ CA geometry_coordinates=7.45,geometry_type="Point",id="nc73584926",properties_mag=6,properties_updated=1.626277167263e+12,type="Feature" diff --git a/plugins/parsers/json_v2/testdata/complex_nesting/input.json b/plugins/parsers/json_v2/testdata/complex_nesting/input.json new file mode 100644 index 0000000000000..69bff40a45983 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/complex_nesting/input.json @@ -0,0 +1,31 @@ +{ + "type": "FeatureCollection", + "metadata": { + "generated": 1626285886000, + "url": "https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/significant_week.geojson", + "title": "USGS Significant Earthquakes, Past Week", + "status": 200, + "api": "1.10.3", + "count": 1 + }, + "features": [ + { + "type": "Feature", + "properties": { + "mag": 6, + "place": "Antelope Valley, CA", + "time": 1625784588110, + "updated": 1626277167263 + }, + "geometry": { + "type": "Point", + "coordinates": [ + -119.4998333, + 38.5075, + 7.45 + ] + }, + "id": "nc73584926" + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/complex_nesting/telegraf.conf b/plugins/parsers/json_v2/testdata/complex_nesting/telegraf.conf new file mode 100644 index 0000000000000..66347da8410b9 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/complex_nesting/telegraf.conf @@ -0,0 +1,9 @@ +[[inputs.file]] + files = ["./testdata/complex_nesting/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "features" + timestamp_key = "properties_time" + timestamp_format = "unix_ms" + tags = ["properties_place"] diff --git a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out index 814d044ce6b6f..2948da1720f64 100644 --- a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out +++ b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out @@ -1,6 +1,9 @@ -file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party" -file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past" -file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",name="Bilbo",species="hobbit" -file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",name="Frodo",species="hobbit" -file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",random=1 -file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",random=2 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party",name="Bilbo",species="hobbit",random=1 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party",name="Bilbo",species="hobbit",random=2 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party",name="Frodo",species="hobbit",random=1 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party",name="Frodo",species="hobbit",random=2 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Bilbo",species="hobbit",random=1 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Bilbo",species="hobbit",random=2 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Frodo",species="hobbit",random=1 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Frodo",species="hobbit",random=2 + From 32d4234ae4edb39a9ae86915d457974d167a0d0c Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Thu, 22 Jul 2021 17:44:36 -0700 Subject: [PATCH 521/761] Prevent x509_cert from hanging on UDP connection (#9323) --- docs/LICENSE_OF_DEPENDENCIES.md | 4 ++ go.mod | 3 +- go.sum | 18 ++++++++- plugins/inputs/x509_cert/README.md | 5 ++- plugins/inputs/x509_cert/x509_cert.go | 44 ++++++++++++++++++++-- plugins/inputs/x509_cert/x509_cert_test.go | 32 ++++++++++++++++ 6 files changed, 100 insertions(+), 6 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 22b8393dcdaeb..7ae13c1143db4 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -176,6 +176,10 @@ following works: - github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE) - github.com/philhofer/fwd [MIT License](https://github.com/philhofer/fwd/blob/master/LICENSE.md) - github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE) +- github.com/pion/dtls [MIT License](https://github.com/pion/dtls/blob/master/LICENSE) +- github.com/pion/logging [MIT License](https://github.com/pion/logging/blob/master/LICENSE) +- github.com/pion/transport [MIT License](https://github.com/pion/transport/blob/master/LICENSE) +- github.com/pion/udp [MIT License](https://github.com/pion/udp/blob/master/LICENSE) - github.com/pkg/browser [BSD 2-Clause "Simplified" License](https://github.com/pkg/browser/blob/master/LICENSE) - github.com/pkg/errors [BSD 2-Clause "Simplified" License](https://github.com/pkg/errors/blob/master/LICENSE) - github.com/pmezard/go-difflib [BSD 3-Clause Clear License](https://github.com/pmezard/go-difflib/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 053765ee69c94..f0f36e2df8717 100644 --- a/go.mod +++ b/go.mod @@ -105,6 +105,7 @@ require ( github.com/nsqio/go-nsq v1.0.8 github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 github.com/openzipkin/zipkin-go-opentracing v0.3.4 + github.com/pion/dtls/v2 v2.0.9 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_model v0.2.0 @@ -140,7 +141,7 @@ require ( go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect - golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 + golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 diff --git a/go.sum b/go.sum index ffd7695ca2a72..20b7759feef97 100644 --- a/go.sum +++ b/go.sum @@ -1272,6 +1272,15 @@ github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0 github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pion/dtls/v2 v2.0.9 h1:7Ow+V++YSZQMYzggI0P9vLJz/hUFcffsfGMfT/Qy+u8= +github.com/pion/dtls/v2 v2.0.9/go.mod h1:O0Wr7si/Zj5/EBFlDzDd6UtVxx25CE1r7XM7BQKYQho= +github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/transport v0.12.2/go.mod h1:N3+vZQD9HlDP5GWkZ85LohxNsDcNgofQmyL6ojX5d8Q= +github.com/pion/transport v0.12.3 h1:vdBfvfU/0Wq8kd2yhUMSDB/x+O4Z9MYVl2fJ5BT4JZw= +github.com/pion/transport v0.12.3/go.mod h1:OViWW9SP2peE/HbwBvARicmAVnesphkNkCVZIWJ6q9A= +github.com/pion/udp v0.1.1 h1:8UAPvyqmsxK8oOjloDk4wUt63TzFe9WEJkg5lChlj7o= +github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1602,6 +1611,7 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -1691,12 +1701,16 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c h1:KHUzaHIpjWVlVVNh65G3hhuj3KB1HnjY6Cq5cTvRQT8= +golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1815,7 +1829,9 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md index f206f6c0979a5..5211c38e9a9c2 100644 --- a/plugins/inputs/x509_cert/README.md +++ b/plugins/inputs/x509_cert/README.md @@ -3,6 +3,8 @@ This plugin provides information about X509 certificate accessible via local file or network connection. +When using a UDP address as a certificate source, the server must support [DTLS](https://en.wikipedia.org/wiki/Datagram_Transport_Layer_Security). + ### Configuration @@ -11,7 +13,8 @@ file or network connection. [[inputs.x509_cert]] ## List certificate sources, support wildcard expands for files ## Prefix your entry with 'file://' if you intend to use relative paths - sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443", + sources = ["tcp://example.org:443", "https://influxdata.com:443", + "udp://127.0.0.1:4433", "/etc/ssl/certs/ssl-cert-snakeoil.pem", "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] ## Timeout for SSL connection diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index 4ac115931a26a..b106f91b772f6 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -7,6 +7,7 @@ import ( "crypto/x509" "encoding/pem" "fmt" + "github.com/pion/dtls/v2" "io/ioutil" "net" "net/url" @@ -24,7 +25,8 @@ import ( const sampleConfig = ` ## List certificate sources ## Prefix your entry with 'file://' if you intend to use relative paths - sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443", + sources = ["tcp://example.org:443", "https://influxdata.com:443", + "udp://127.0.0.1:4433", "/etc/ssl/certs/ssl-cert-snakeoil.pem", "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] ## Timeout for SSL connection @@ -104,11 +106,47 @@ func (c *X509Cert) serverName(u *url.URL) (string, error) { func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certificate, error) { protocol := u.Scheme switch u.Scheme { + case "udp", "udp4", "udp6": + ipConn, err := net.DialTimeout(u.Scheme, u.Host, timeout) + if err != nil { + return nil, err + } + defer ipConn.Close() + + serverName, err := c.serverName(u) + if err != nil { + return nil, err + } + + dtlsCfg := &dtls.Config{ + InsecureSkipVerify: true, + Certificates: c.tlsCfg.Certificates, + RootCAs: c.tlsCfg.RootCAs, + ServerName: serverName, + } + conn, err := dtls.Client(ipConn, dtlsCfg) + if err != nil { + return nil, err + } + defer conn.Close() + + rawCerts := conn.ConnectionState().PeerCertificates + var certs []*x509.Certificate + for _, rawCert := range rawCerts { + parsed, err := x509.ParseCertificate(rawCert) + if err != nil { + return nil, err + } + + if parsed != nil { + certs = append(certs, parsed) + } + } + + return certs, nil case "https": protocol = "tcp" fallthrough - case "udp", "udp4", "udp6": - fallthrough case "tcp", "tcp4", "tcp6": ipConn, err := net.DialTimeout(protocol, u.Host, timeout) if err != nil { diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 4f09b903b4c24..9c42c09bdabda 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -4,8 +4,10 @@ import ( "crypto/tls" "encoding/base64" "fmt" + "github.com/pion/dtls/v2" "io/ioutil" "math/big" + "net" "net/url" "os" "path/filepath" @@ -260,6 +262,36 @@ func TestGatherChain(t *testing.T) { } } +func TestGatherUDPCert(t *testing.T) { + pair, err := tls.X509KeyPair([]byte(pki.ReadServerCert()), []byte(pki.ReadServerKey())) + require.NoError(t, err) + + cfg := &dtls.Config{ + Certificates: []tls.Certificate{pair}, + } + + addr := &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 0} + listener, err := dtls.Listen("udp", addr, cfg) + require.NoError(t, err) + defer listener.Close() + + go func() { + _, _ = listener.Accept() + }() + + m := &X509Cert{ + Sources: []string{"udp://" + listener.Addr().String()}, + Log: testutil.Logger{}, + } + require.NoError(t, m.Init()) + + var acc testutil.Accumulator + require.NoError(t, m.Gather(&acc)) + + assert.Len(t, acc.Errors, 0) + assert.True(t, acc.HasMeasurement("x509_cert")) +} + func TestStrings(t *testing.T) { sc := X509Cert{} require.NoError(t, sc.Init()) From 754b7ff4c14b6dc11722c79e397894844993571e Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Mon, 26 Jul 2021 19:36:09 +0200 Subject: [PATCH 522/761] Example input plugin (#9409) --- .../{EXAMPLE_README.md => example/README.md} | 0 plugins/inputs/example/example.go | 136 ++++++ plugins/inputs/example/example_test.go | 439 ++++++++++++++++++ plugins/inputs/mock_Plugin.go | 39 -- 4 files changed, 575 insertions(+), 39 deletions(-) rename plugins/inputs/{EXAMPLE_README.md => example/README.md} (100%) create mode 100644 plugins/inputs/example/example.go create mode 100644 plugins/inputs/example/example_test.go delete mode 100644 plugins/inputs/mock_Plugin.go diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/example/README.md similarity index 100% rename from plugins/inputs/EXAMPLE_README.md rename to plugins/inputs/example/README.md diff --git a/plugins/inputs/example/example.go b/plugins/inputs/example/example.go new file mode 100644 index 0000000000000..c8f5992fe660a --- /dev/null +++ b/plugins/inputs/example/example.go @@ -0,0 +1,136 @@ +package example + +import ( + "fmt" + "math/rand" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Example struct should be named the same as the Plugin +type Example struct { + // Example for a mandatory option to set a tag + DeviceName string `toml:"device_name"` + + // Config options are converted to the correct type automatically + NumberFields int64 `toml:"number_fields"` + + // We can also use booleans and have diverging names between user-configuration options and struct members + EnableRandomVariable bool `toml:"enable_random"` + + // Example of passing a duration option allowing the format of e.g. "100ms", "5m" or "1h" + Timeout config.Duration `toml:"timeout"` + + // Telegraf logging facility + // The exact name is important to allow automatic initialization by telegraf. + Log telegraf.Logger `toml:"-"` + + // This is a non-exported internal state. + count int64 +} + +// Usually the default (example) configuration is contained in this constant. +// Please use '## '' to denote comments and '# ' to specify default settings and start each line with two spaces. +const sampleConfig = ` + ## Device name used as a tag + ## This is a mandatory option that needs to be set by the user, so we do not + ## comment it. + device_name = "" + + ## Number of fields contained in the output + ## This should be greater than zero and less then ten. + ## Here, two is the default, so we comment the option with the default value shown. + # number_fields = 2 + + ## Enable setting the field(s) to random values + ## By default, the field values are set to zero. + # enable_random = false + + ## Specify a duration allowing time-unit suffixes ('ns','ms', 's', 'm', etc.) + # timeout = "100ms" +` + +// Description will appear directly above the plugin definition in the config file +func (m *Example) Description() string { + return `This is an example plugin` +} + +// SampleConfig will populate the sample configuration portion of the plugin's configuration +func (m *Example) SampleConfig() string { + return sampleConfig +} + +// Init can be implemented to do one-time processing stuff like initializing variables +func (m *Example) Init() error { + // Check your options according to your requirements + if m.DeviceName == "" { + return fmt.Errorf("device name cannot be empty") + } + + // Set your defaults. + // Please note: In golang all fields are initialzed to their nil value, so you should not + // set these fields if the nil value is what you want (e.g. for booleans). + if m.NumberFields < 1 { + m.Log.Debugf("Setting number of fields to default from invalid value %d", m.NumberFields) + m.NumberFields = 2 + } + + // Initialze your internal states + m.count = 1 + + return nil +} + +// Gather defines what data the plugin will gather. +func (m *Example) Gather(acc telegraf.Accumulator) error { + // Imagine some completely arbitrary error occuring here + if m.NumberFields > 10 { + return fmt.Errorf("too many fields") + } + + // For illustration we gather three metrics in one go + for run := 0; run < 3; run++ { + // Imagine an error occurs here but you want to keep the other + // metrics, then you cannot simply return, as this would drop + // all later metrics. Simply accumulate errors in this case + // and ignore the metric. + if m.EnableRandomVariable && m.DeviceName == "flappy" && run > 1 { + acc.AddError(fmt.Errorf("too many runs for random values")) + continue + } + + // Construct the fields + fields := map[string]interface{}{"count": m.count} + for i := int64(1); i < m.NumberFields; i++ { + name := fmt.Sprintf("field%d", i) + value := 0.0 + if m.EnableRandomVariable { + value = rand.Float64() + } + fields[name] = value + } + + // Construct the tags + tags := map[string]string{"device": m.DeviceName} + + // Add the metric with the current timestamp + acc.AddFields("example", fields, tags) + + m.count++ + } + + return nil +} + +// Register the plugin +func init() { + inputs.Add("example", func() telegraf.Input { + return &Example{ + // Set the default timeout here to distinguish it from the user setting it to zero + Timeout: config.Duration(100 * time.Millisecond), + } + }) +} diff --git a/plugins/inputs/example/example_test.go b/plugins/inputs/example/example_test.go new file mode 100644 index 0000000000000..1c3b4b0a5e66e --- /dev/null +++ b/plugins/inputs/example/example_test.go @@ -0,0 +1,439 @@ +package example + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" +) + +// This file should contain a set of unit-tests to cover your plugin. This will ease +// spotting bugs and mistakes when later modifying or extending the functionality. +// To do so, please write one 'TestXYZ' function per 'case' e.g. default init, +// things that should fail or expected values from a mockup. + +func TestInitDefault(t *testing.T) { + // This test should succeed with the default initialization. + + // Use whatever you use in the init() function plus the mandatory options. + // ATTENTION: Always initialze the "Log" as you will get SIGSEGV otherwise. + plugin := &Example{ + DeviceName: "test", + Timeout: config.Duration(100 * time.Millisecond), + Log: testutil.Logger{}, + } + + // Test the initialization succeeds + require.NoError(t, plugin.Init()) + + // Also test that default values are set correctly + require.Equal(t, config.Duration(100*time.Millisecond), plugin.Timeout) + require.Equal(t, "test", plugin.DeviceName) + require.Equal(t, int64(2), plugin.NumberFields) +} + +func TestInitFail(t *testing.T) { + // You should also test for your safety nets to work i.e. you get errors for + // invalid configuration-option values. So check your error paths in Init() + // and check if you reach them + + // We setup a table-test here to specify "setting" - "expected error" values. + // Eventhough it seems overkill here for the example plugin, we reuse this structure + // later for checking the metrics + tests := []struct { + name string + plugin *Example + expected string + }{ + { + name: "all empty", + plugin: &Example{}, + expected: "device name cannot be empty", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Always initialze the logger to avoid SIGSEGV. This is done automatically by + // telegraf during normal operation. + tt.plugin.Log = testutil.Logger{} + err := tt.plugin.Init() + require.Error(t, err) + require.EqualError(t, err, tt.expected) + }) + } +} + +func TestFixedValue(t *testing.T) { + // You can organize the test e.g. by operation mode (like we do here random vs. fixed), by features or + // by different metrics gathered. Please choose the partitioning most suited for your plugin + + // We again setup a table-test here to specify "setting" - "expected output metric" pairs. + tests := []struct { + name string + plugin *Example + expected []telegraf.Metric + }{ + { + name: "count only", + plugin: &Example{ + DeviceName: "test", + NumberFields: 1, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 2, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 3, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "default settings", + plugin: &Example{ + DeviceName: "test", + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + "field1": float64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 2, + "field1": float64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 3, + "field1": float64(0), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "more fields", + plugin: &Example{ + DeviceName: "test", + NumberFields: 4, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + "field1": float64(0), + "field2": float64(0), + "field3": float64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 2, + "field1": float64(0), + "field2": float64(0), + "field3": float64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 3, + "field1": float64(0), + "field2": float64(0), + "field3": float64(0), + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + + // Call gather and check no error occurs. In case you use acc.AddError() somewhere + // in your code, it is not sufficient to only check the return value of Gather(). + require.NoError(t, tt.plugin.Gather(&acc)) + require.Len(t, acc.Errors, 0, "found errors accumulated by acc.AddError()") + + // Wait for the expected number of metrics to avoid flaky tests due to + // race conditions. + acc.Wait(len(tt.expected)) + + // Compare the metrics in a convenient way. Here we ignore + // the metric time during comparision as we cannot inject the time + // during test. For more comparision options check testutil package. + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} + +func TestRandomValue(t *testing.T) { + // Sometimes, you cannot know the exact outcome of the gather cycle e.g. if the gathering involves random data. + // However, you should check the result nevertheless, applying as many conditions as you can. + + // We again setup a table-test here to specify "setting" - "expected output metric" pairs. + tests := []struct { + name string + plugin *Example + template telegraf.Metric + }{ + { + name: "count only", + plugin: &Example{ + DeviceName: "test", + NumberFields: 1, + EnableRandomVariable: true, + }, + template: testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + }, + time.Unix(0, 0), + ), + }, + { + name: "default settings", + plugin: &Example{ + DeviceName: "test", + EnableRandomVariable: true, + }, + template: testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + "field1": float64(0), + }, + time.Unix(0, 0), + ), + }, + { + name: "more fields", + plugin: &Example{ + DeviceName: "test", + NumberFields: 4, + EnableRandomVariable: true, + }, + template: testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + "field1": float64(0), + "field2": float64(0), + "field3": float64(0), + }, + time.Unix(0, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + + // Call gather and check no error occurs. In case you use acc.AddError() somewhere + // in your code, it is not sufficient to only check the return value of Gather(). + require.NoError(t, tt.plugin.Gather(&acc)) + require.Len(t, acc.Errors, 0, "found errors accumulated by acc.AddError()") + + // Wait for the expected number of metrics to avoid flaky tests due to + // race conditions. + acc.Wait(3) + + // Compare all aspects of the metric that are known to you + for i, m := range acc.GetTelegrafMetrics() { + require.Equal(t, m.Name(), tt.template.Name()) + require.Equal(t, m.Tags(), tt.template.Tags()) + + // Check if all expected fields are there + fields := m.Fields() + for k := range tt.template.Fields() { + if k == "count" { + require.Equal(t, fields["count"], int64(i+1)) + continue + } + _, found := fields[k] + require.Truef(t, found, "field %q not found", k) + } + } + }) + } +} + +func TestGatherFail(t *testing.T) { + // You should also test for error conditions in your Gather() method. Try to cover all error paths. + + // We again setup a table-test here to specify "setting" - "expected error" pair. + tests := []struct { + name string + plugin *Example + expected string + }{ + { + name: "too many fields", + plugin: &Example{ + DeviceName: "test", + NumberFields: 11, + }, + expected: "too many fields", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + + err := tt.plugin.Gather(&acc) + require.Error(t, err) + require.EqualError(t, err, tt.expected) + }) + } +} + +func TestRandomValueFailPartial(t *testing.T) { + // You should also test for error conditions in your Gather() with partial output. This is required when + // using acc.AddError() as Gather() might succeed (return nil) but there are some metrics missing. + + // We again setup a table-test here to specify "setting" - "expected output metric" and "errors". + tests := []struct { + name string + plugin *Example + expected []telegraf.Metric + expectedErr string + }{ + { + name: "flappy gather", + plugin: &Example{ + DeviceName: "flappy", + NumberFields: 1, + EnableRandomVariable: true, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "example", + map[string]string{ + "device": "flappy", + }, + map[string]interface{}{ + "count": 1, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "flappy", + }, + map[string]interface{}{ + "count": 2, + }, + time.Unix(0, 0), + ), + }, + expectedErr: "too many runs for random values", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + + // Call gather and check no error occurs. However, we expect an error accumulated by acc.AddError() + require.NoError(t, tt.plugin.Gather(&acc)) + + // Wait for the expected number of metrics to avoid flaky tests due to + // race conditions. + acc.Wait(len(tt.expected)) + + // Check the accumulated errors + require.Len(t, acc.Errors, 1) + require.EqualError(t, acc.Errors[0], tt.expectedErr) + + // Compare the expected partial metrics. + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} diff --git a/plugins/inputs/mock_Plugin.go b/plugins/inputs/mock_Plugin.go deleted file mode 100644 index 7270954dc5f8d..0000000000000 --- a/plugins/inputs/mock_Plugin.go +++ /dev/null @@ -1,39 +0,0 @@ -package inputs - -import ( - "github.com/influxdata/telegraf" - - "github.com/stretchr/testify/mock" -) - -// MockPlugin struct should be named the same as the Plugin -type MockPlugin struct { - mock.Mock - - constructedVariable string -} - -// Description will appear directly above the plugin definition in the config file -func (m *MockPlugin) Description() string { - return `This is an example plugin` -} - -// SampleConfig will populate the sample configuration portion of the plugin's configuration -func (m *MockPlugin) SampleConfig() string { - return ` sampleVar = 'foo'` -} - -// Init can be implemented to do one-time processing stuff like initializing variables -func (m *MockPlugin) Init() error { - m.constructedVariable = "I'm initialized now." - return nil -} - -// Gather defines what data the plugin will gather. -func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error { - ret := m.Called(_a0) - - r0 := ret.Error(0) - - return r0 -} From a1dae0d2c186534286b172ac2929feb0f0089f45 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 26 Jul 2021 20:39:23 -0500 Subject: [PATCH 523/761] Update Go to v1.16.6 (#9542) --- .circleci/config.yml | 6 +++--- Makefile | 4 ++-- scripts/alpine.docker | 2 +- scripts/buster.docker | 2 +- scripts/ci-1.16.docker | 2 +- scripts/mac_installgo.sh | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 03fe58b17a739..010c54a0fedfd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -13,7 +13,7 @@ executors: go-1_16: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.16.5' + - image: 'quay.io/influxdb/telegraf-ci:1.16.6' environment: GOFLAGS: -p=8 mac: @@ -150,7 +150,7 @@ jobs: steps: - checkout - check-changed-files-or-halt-windows - - run: choco upgrade golang --version=1.16.5 + - run: choco upgrade golang --version=1.16.6 - run: choco install make - run: git config --system core.longpaths true - run: make test-windows @@ -448,4 +448,4 @@ workflows: filters: branches: only: - - master \ No newline at end of file + - master diff --git a/Makefile b/Makefile index 4f6ef13e8e4af..5cf7d2383604f 100644 --- a/Makefile +++ b/Makefile @@ -201,8 +201,8 @@ ci-1.15: .PHONY: ci-1.16 ci-1.16: - docker build -t quay.io/influxdb/telegraf-ci:1.16.5 - < scripts/ci-1.16.docker - docker push quay.io/influxdb/telegraf-ci:1.16.5 + docker build -t quay.io/influxdb/telegraf-ci:1.16.6 - < scripts/ci-1.16.docker + docker push quay.io/influxdb/telegraf-ci:1.16.6 .PHONY: install install: $(buildbin) diff --git a/scripts/alpine.docker b/scripts/alpine.docker index 673498c6f598e..d5b8b85f6abb7 100644 --- a/scripts/alpine.docker +++ b/scripts/alpine.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.5 as builder +FROM golang:1.16.6 as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/buster.docker b/scripts/buster.docker index 4276730b4bf1e..685d30067e0ef 100644 --- a/scripts/buster.docker +++ b/scripts/buster.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.5-buster as builder +FROM golang:1.16.6-buster as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/ci-1.16.docker b/scripts/ci-1.16.docker index 585abc137e060..f0b2badafd521 100644 --- a/scripts/ci-1.16.docker +++ b/scripts/ci-1.16.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.5 +FROM golang:1.16.6 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/mac_installgo.sh b/scripts/mac_installgo.sh index 285db8b315fc2..aab4731c22f30 100644 --- a/scripts/mac_installgo.sh +++ b/scripts/mac_installgo.sh @@ -3,8 +3,8 @@ set -eux GO_ARCH="darwin-amd64" -GO_VERSION="1.16.5" -GO_VERSION_SHA="be761716d5bfc958a5367440f68ba6563509da2f539ad1e1864bd42fe553f277" # from https://golang.org/dl +GO_VERSION="1.16.6" +GO_VERSION_SHA="e4e83e7c6891baa00062ed37273ce95835f0be77ad8203a29ec56dbf3d87508a" # from https://golang.org/dl # This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.) path="/usr/local/Cellar" From 57ecd1d21b51eed1982c522c4fb1ed9b4b1231e2 Mon Sep 17 00:00:00 2001 From: Mark Wilkinson - m82labs Date: Mon, 26 Jul 2021 21:55:03 -0400 Subject: [PATCH 524/761] Worktable workfile stats (#8587) --- plugins/inputs/sqlserver/azuresqlqueries.go | 6 ++++-- plugins/inputs/sqlserver/sqlserverqueries.go | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go index 318509ac28ee5..17361c20d41f8 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqlqueries.go @@ -527,8 +527,8 @@ WITH PerfCounters AS ( ,'Mirrored Write Transactions/sec' ,'Group Commit Time' ,'Group Commits/Sec' - ,'Distributed Query' - ,'DTC calls' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' ,'Query Store CPU usage' ) OR ( spi.[object_name] LIKE '%User Settable%' @@ -1068,6 +1068,8 @@ WITH PerfCounters AS ( ,'Mirrored Write Transactions/sec' ,'Group Commit Time' ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' ,'Distributed Query' ,'DTC calls' ,'Query Store CPU usage' diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index 1d46e5cd91277..49bde3fb915a2 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -409,6 +409,8 @@ SELECT DISTINCT ,'Mirrored Write Transactions/sec' ,'Group Commit Time' ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' ,'Distributed Query' ,'DTC calls' ,'Query Store CPU usage' From a48e11d0d1ea8ca1c056a888b0b3d7955a8ca2d8 Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 27 Jul 2021 14:34:35 -0600 Subject: [PATCH 525/761] Bug Fix Snmp empty metric name (#9519) --- plugins/processors/ifname/ifname.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index 714578779a7a0..10623c041dd2d 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -349,6 +349,7 @@ func init() { func makeTableNoMock(fieldName string) (*si.Table, error) { var err error tab := si.Table{ + Name: "ifTable", IndexAsTag: true, Fields: []si.Field{ {Oid: fieldName}, From 348c18db7571e25f987ce4cefce58535dd964ab1 Mon Sep 17 00:00:00 2001 From: Dominik Rosiek <58699848+sumo-drosiek@users.noreply.github.com> Date: Tue, 27 Jul 2021 23:10:50 +0200 Subject: [PATCH 526/761] feat(http_listener_v2): allows multiple paths and add path_tag (#9529) --- plugins/inputs/http_listener_v2/README.md | 11 +++- .../http_listener_v2/http_listener_v2.go | 32 +++++++++-- .../http_listener_v2/http_listener_v2_test.go | 56 +++++++++++++++++++ .../parsers/prometheusremotewrite/README.md | 4 +- 4 files changed, 93 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md index 108a8d50a9a2a..a87ec3f833890 100644 --- a/plugins/inputs/http_listener_v2/README.md +++ b/plugins/inputs/http_listener_v2/README.md @@ -19,7 +19,14 @@ This is a sample configuration for the plugin. service_address = ":8080" ## Path to listen to. - # path = "/telegraf" + ## This option is deprecated and only available for backward-compatibility. Please use paths instead. + # path = "" + + ## Paths to listen to. + # paths = ["/telegraf"] + + ## Save path as http_listener_v2_path tag if set to true + # path_tag = false ## HTTP methods to accept. # methods = ["POST", "PUT"] @@ -59,7 +66,7 @@ This is a sample configuration for the plugin. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "json" + data_format = "influx" ``` ### Metrics: diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 89714bb0818b1..5b511de57fb54 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -15,6 +15,7 @@ import ( "github.com/golang/snappy" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/choice" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" @@ -26,8 +27,9 @@ import ( const defaultMaxBodySize = 500 * 1024 * 1024 const ( - body = "body" - query = "query" + body = "body" + query = "query" + pathTag = "http_listener_v2_path" ) // TimeFunc provides a timestamp for the metrics @@ -37,6 +39,8 @@ type TimeFunc func() time.Time type HTTPListenerV2 struct { ServiceAddress string `toml:"service_address"` Path string `toml:"path"` + Paths []string `toml:"paths"` + PathTag bool `toml:"path_tag"` Methods []string `toml:"methods"` DataSource string `toml:"data_source"` ReadTimeout config.Duration `toml:"read_timeout"` @@ -64,7 +68,14 @@ const sampleConfig = ` service_address = ":8080" ## Path to listen to. - # path = "/telegraf" + ## This option is deprecated and only available for backward-compatibility. Please use paths instead. + # path = "" + + ## Paths to listen to. + # paths = ["/telegraf"] + + ## Save path as http_listener_v2_path tag if set to true + # path_tag = false ## HTTP methods to accept. # methods = ["POST", "PUT"] @@ -75,7 +86,7 @@ const sampleConfig = ` # write_timeout = "10s" ## Maximum allowed http request body size in bytes. - ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) + ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) # max_body_size = "500MB" ## Part of the request to consume. Available options are "body" and @@ -136,6 +147,11 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { h.WriteTimeout = config.Duration(time.Second * 10) } + // Append h.Path to h.Paths + if h.Path != "" && !choice.Contains(h.Path, h.Paths) { + h.Paths = append(h.Paths, h.Path) + } + h.acc = acc tlsConf, err := h.ServerConfig.TLSConfig() @@ -189,7 +205,7 @@ func (h *HTTPListenerV2) Stop() { func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { handler := h.serveWrite - if req.URL.Path != h.Path { + if !choice.Contains(req.URL.Path, h.Paths) { handler = http.NotFound } @@ -251,6 +267,10 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) } } + if h.PathTag { + m.AddTag(pathTag, req.URL.Path) + } + h.acc.AddMetric(m) } @@ -370,7 +390,7 @@ func init() { return &HTTPListenerV2{ ServiceAddress: ":8080", TimeFunc: time.Now, - Path: "/telegraf", + Paths: []string{"/telegraf"}, Methods: []string{"POST", "PUT"}, DataSource: body, } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index 6b906f9cec3e3..5daaf2785ffe3 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -230,6 +230,62 @@ func TestWriteHTTP(t *testing.T) { ) } +// http listener should add request path as configured path_tag +func TestWriteHTTPWithPathTag(t *testing.T) { + listener := newTestHTTPListenerV2() + listener.PathTag = true + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "http_listener_v2_path": "/write"}, + ) +} + +// http listener should add request path as configured path_tag (trimming it before) +func TestWriteHTTPWithMultiplePaths(t *testing.T) { + listener := newTestHTTPListenerV2() + listener.Paths = []string{"/alternative_write"} + listener.PathTag = true + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to /write + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + // post single message to /alternative_write + resp, err = http.Post(createURL(listener, "http", "/alternative_write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "http_listener_v2_path": "/write"}, + ) + + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "http_listener_v2_path": "/alternative_write"}, + ) +} + // http listener should add a newline at the end of the buffer if it's not there func TestWriteHTTPNoNewline(t *testing.T) { listener := newTestHTTPListenerV2() diff --git a/plugins/parsers/prometheusremotewrite/README.md b/plugins/parsers/prometheusremotewrite/README.md index b409e9e6d5c8f..6d2c17ef898dc 100644 --- a/plugins/parsers/prometheusremotewrite/README.md +++ b/plugins/parsers/prometheusremotewrite/README.md @@ -9,8 +9,8 @@ Converts prometheus remote write samples directly into Telegraf metrics. It can ## Address and port to host HTTP listener on service_address = ":1234" - ## Path to listen to. - path = "/receive" + ## Paths to listen to. + paths = ["/receive"] ## Data format to consume. data_format = "prometheusremotewrite" From 51720f3bd73beb25170e82ec1decd65c477e5779 Mon Sep 17 00:00:00 2001 From: Raphael Yu Date: Wed, 28 Jul 2021 05:13:12 +0800 Subject: [PATCH 527/761] Attach the pod labels to the `kubernetes_pod_volume` & `kubernetes_pod_network` metrics. (#9438) --- plugins/inputs/kubernetes/kubernetes.go | 28 +++++++++++++------- plugins/inputs/kubernetes/kubernetes_test.go | 4 +++ 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index 32bfc04a061e6..ab1cf4bfe4afc 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -234,6 +234,17 @@ func (k *Kubernetes) LoadJSON(url string, v interface{}) error { func buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, acc telegraf.Accumulator) { for _, pod := range summaryMetrics.Pods { + podLabels := make(map[string]string) + for _, info := range podInfo { + if info.Name == pod.PodRef.Name && info.Namespace == pod.PodRef.Namespace { + for k, v := range info.Labels { + if labelFilter.Match(k) { + podLabels[k] = v + } + } + } + } + for _, container := range pod.Containers { tags := map[string]string{ "node_name": summaryMetrics.Node.NodeName, @@ -241,16 +252,9 @@ func buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFi "container_name": container.Name, "pod_name": pod.PodRef.Name, } - for _, info := range podInfo { - if info.Name == pod.PodRef.Name && info.Namespace == pod.PodRef.Namespace { - for k, v := range info.Labels { - if labelFilter.Match(k) { - tags[k] = v - } - } - } + for k, v := range podLabels { + tags[k] = v } - fields := make(map[string]interface{}) fields["cpu_usage_nanocores"] = container.CPU.UsageNanoCores fields["cpu_usage_core_nanoseconds"] = container.CPU.UsageCoreNanoSeconds @@ -275,6 +279,9 @@ func buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFi "namespace": pod.PodRef.Namespace, "volume_name": volume.Name, } + for k, v := range podLabels { + tags[k] = v + } fields := make(map[string]interface{}) fields["available_bytes"] = volume.AvailableBytes fields["capacity_bytes"] = volume.CapacityBytes @@ -287,6 +294,9 @@ func buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFi "pod_name": pod.PodRef.Name, "namespace": pod.PodRef.Namespace, } + for k, v := range podLabels { + tags[k] = v + } fields := make(map[string]interface{}) fields["rx_bytes"] = pod.Network.RXBytes fields["rx_errors"] = pod.Network.RXErrors diff --git a/plugins/inputs/kubernetes/kubernetes_test.go b/plugins/inputs/kubernetes/kubernetes_test.go index 531dd13f950c8..864905448780d 100644 --- a/plugins/inputs/kubernetes/kubernetes_test.go +++ b/plugins/inputs/kubernetes/kubernetes_test.go @@ -141,6 +141,8 @@ func TestKubernetesStats(t *testing.T) { "volume_name": "volume1", "namespace": "foons", "pod_name": "foopod", + "app": "foo", + "superkey": "foobar", } acc.AssertContainsTaggedFields(t, "kubernetes_pod_volume", fields, tags) @@ -154,6 +156,8 @@ func TestKubernetesStats(t *testing.T) { "node_name": "node1", "namespace": "foons", "pod_name": "foopod", + "app": "foo", + "superkey": "foobar", } acc.AssertContainsTaggedFields(t, "kubernetes_pod_network", fields, tags) } From 27b98083f4480564f0b62c2bb32042773db47cb6 Mon Sep 17 00:00:00 2001 From: bhsu-ms <72472578+bhsu-ms@users.noreply.github.com> Date: Tue, 27 Jul 2021 14:14:49 -0700 Subject: [PATCH 528/761] Change the timeout from all queries to per query (#9471) --- plugins/inputs/sql/README.md | 1 + plugins/inputs/sql/sql.go | 8 +++----- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/sql/README.md b/plugins/inputs/sql/README.md index 9c002df18dbd3..cc8a464016d28 100644 --- a/plugins/inputs/sql/README.md +++ b/plugins/inputs/sql/README.md @@ -22,6 +22,7 @@ generate it using `telegraf --usage `. dsn = "username:password@mysqlserver:3307/dbname?param=value" ## Timeout for any operation + ## Note that the timeout for queries is per query not per gather. # timeout = "5s" ## Connection time limits diff --git a/plugins/inputs/sql/sql.go b/plugins/inputs/sql/sql.go index 383f04c40c454..c6c4658d83959 100644 --- a/plugins/inputs/sql/sql.go +++ b/plugins/inputs/sql/sql.go @@ -30,6 +30,7 @@ const sampleConfig = ` dsn = "username:password@mysqlserver:3307/dbname?param=value" ## Timeout for any operation + ## Note that the timeout for queries is per query not per gather. # timeout = "5s" ## Connection time limits @@ -486,16 +487,13 @@ func (s *SQL) Stop() { func (s *SQL) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup - - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout)) - defer cancel() - tstart := time.Now() for _, query := range s.Queries { wg.Add(1) - go func(q Query) { defer wg.Done() + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout)) + defer cancel() if err := s.executeQuery(ctx, acc, q, tstart); err != nil { acc.AddError(err) } From 3f9643dd7e66df83bb18f78490685d31929e5dbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Jaber?= Date: Tue, 27 Jul 2021 16:16:53 -0500 Subject: [PATCH 529/761] [Docs] Clarify tagging behavior (#9461) --- docs/CONFIGURATION.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 4965a4337f8d8..70e7981c9450b 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -144,6 +144,7 @@ combining an integer value and time unit as a string value. Valid time units ar Global tags can be specified in the `[global_tags]` table in key="value" format. All metrics that are gathered will be tagged with the tags specified. +Global tags are overriden by tags set by plugins. ```toml [global_tags] @@ -432,7 +433,7 @@ Parameters that can be used with any aggregator plugin: the name of the input). - **name_prefix**: Specifies a prefix to attach to the measurement name. - **name_suffix**: Specifies a suffix to attach to the measurement name. -- **tags**: A map of tags to apply to a specific input's measurements. +- **tags**: A map of tags to apply to the measurement - behavior varies based on aggregator. The [metric filtering][] parameters can be used to limit what metrics are handled by the aggregator. Excluded metrics are passed downstream to the next From f241f91112e3c5217bad0d2279e841c9d49c0267 Mon Sep 17 00:00:00 2001 From: Hwanjin Jeong Date: Wed, 28 Jul 2021 06:17:42 +0900 Subject: [PATCH 530/761] Support Landing page on Prometheus landing page (#8641) --- .../prometheus_client/prometheus_client.go | 6 ++++- .../prometheus_client_v1_test.go | 27 +++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 13ba74f822e8f..9c54c2dade83a 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -159,12 +159,16 @@ func (p *PrometheusClient) Init() error { authHandler := internal.AuthHandler(p.BasicUsername, p.BasicPassword, "prometheus", onAuthError) rangeHandler := internal.IPRangeHandler(ipRange, onError) promHandler := promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}) + landingPageHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Telegraf Output Plugin: Prometheus Client ")) + }) mux := http.NewServeMux() if p.Path == "" { - p.Path = "/" + p.Path = "/metrics" } mux.Handle(p.Path, authHandler(rangeHandler(promHandler))) + mux.Handle("/", authHandler(rangeHandler(landingPageHandler))) tlsConfig, err := p.TLSConfig() if err != nil { diff --git a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go index adf18c9f0f076..39b8fec262095 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "net/http" "net/http/httptest" + "net/url" "strings" "testing" "time" @@ -400,3 +401,29 @@ rpc_duration_seconds_count 2693 }) } } + +func TestLandingPage(t *testing.T) { + Logger := testutil.Logger{Name: "outputs.prometheus_client"} + output := PrometheusClient{ + Listen: ":0", + CollectorsExclude: []string{"process"}, + MetricVersion: 1, + Log: Logger, + } + expected := "Telegraf Output Plugin: Prometheus Client" + + err := output.Init() + require.NoError(t, err) + + err = output.Connect() + require.NoError(t, err) + + u, err := url.Parse(fmt.Sprintf("http://%s/", output.url.Host)) + resp, err := http.Get(u.String()) + require.NoError(t, err) + + actual, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, expected, strings.TrimSpace(string(actual))) +} From 1a42c7d289977b0d262e6fb456741ff060a577ca Mon Sep 17 00:00:00 2001 From: Martin Molnar Date: Tue, 27 Jul 2021 23:23:01 +0200 Subject: [PATCH 531/761] For Prometheus Input add ability to query Consul Service catalog (#5464) --- plugins/inputs/prometheus/README.md | 33 ++++ plugins/inputs/prometheus/consul.go | 208 ++++++++++++++++++++++++ plugins/inputs/prometheus/kubernetes.go | 5 +- plugins/inputs/prometheus/prometheus.go | 45 ++++- 4 files changed, 280 insertions(+), 11 deletions(-) create mode 100644 plugins/inputs/prometheus/consul.go diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index 88aa5be4941f2..c826fd0e015ab 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -58,6 +58,19 @@ in Prometheus format. # field selector to target pods # eg. To scrape pods on a specific node # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + + ## Scrape Services available in Consul Catalog + # [inputs.prometheus.consul] + # enabled = true + # agent = "http://localhost:8500" + # query_interval = "5m" + + # [[inputs.prometheus.consul.query]] + # name = "a service name" + # tag = "a service tag" + # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' + # [inputs.prometheus.consul.query.tags] + # host = "{{.Node}}" ## Use bearer token for authorization. ('bearer_token' takes priority) # bearer_token = "/path/to/bearer/token" @@ -117,6 +130,26 @@ env: If using node level scrape scope, `pod_scrape_interval` specifies how often (in seconds) the pod list for scraping should updated. If not specified, the default is 60 seconds. +#### Consul Service Discovery + +Enabling this option and configuring consul `agent` url will allow the plugin to query +consul catalog for available services. Using `query_interval` the plugin will periodically +query the consul catalog for services with `name` and `tag` and refresh the list of scraped urls. +It can use the information from the catalog to build the scraped url and additional tags from a template. + +Multiple consul queries can be configured, each for different service. +The following example fields can be used in url or tag templates: +* Node +* Address +* NodeMeta +* ServicePort +* ServiceAddress +* ServiceTags +* ServiceMeta + +For full list of available fields and their type see struct CatalogService in +https://github.com/hashicorp/consul/blob/master/api/catalog.go + #### Bearer Token If set, the file specified by the `bearer_token` parameter will be read on diff --git a/plugins/inputs/prometheus/consul.go b/plugins/inputs/prometheus/consul.go new file mode 100644 index 0000000000000..2f008a495c09b --- /dev/null +++ b/plugins/inputs/prometheus/consul.go @@ -0,0 +1,208 @@ +package prometheus + +import ( + "bytes" + "context" + "fmt" + "net/url" + "strings" + "text/template" + "time" + + "github.com/hashicorp/consul/api" + "github.com/influxdata/telegraf/config" +) + +type ConsulConfig struct { + // Address of the Consul agent. The address must contain a hostname or an IP address + // and optionally a port (format: "host:port"). + Enabled bool `toml:"enabled"` + Agent string `toml:"agent"` + QueryInterval config.Duration `toml:"query_interval"` + Queries []*ConsulQuery `toml:"query"` +} + +// One Consul service discovery query +type ConsulQuery struct { + // A name of the searched services (not ID) + ServiceName string `toml:"name"` + + // A tag of the searched services + ServiceTag string `toml:"tag"` + + // A DC of the searched services + ServiceDc string `toml:"dc"` + + // A template URL of the Prometheus gathering interface. The hostname part + // of the URL will be replaced by discovered address and port. + ServiceURL string `toml:"url"` + + // Extra tags to add to metrics found in Consul + ServiceExtraTags map[string]string `toml:"tags"` + + serviceURLTemplate *template.Template + serviceExtraTagsTemplate map[string]*template.Template + + // Store last error status and change log level depending on repeated occurence + lastQueryFailed bool +} + +func (p *Prometheus) startConsul(ctx context.Context) error { + consulAPIConfig := api.DefaultConfig() + if p.ConsulConfig.Agent != "" { + consulAPIConfig.Address = p.ConsulConfig.Agent + } + + consul, err := api.NewClient(consulAPIConfig) + if err != nil { + return fmt.Errorf("cannot connect to the Consul agent: %v", err) + } + + // Parse the template for metrics URL, drop queries with template parse errors + i := 0 + for _, q := range p.ConsulConfig.Queries { + serviceURLTemplate, err := template.New("URL").Parse(q.ServiceURL) + if err != nil { + p.Log.Errorf("Could not parse the Consul query URL template (%s), skipping it. Error: %s", q.ServiceURL, err) + continue + } + q.serviceURLTemplate = serviceURLTemplate + + // Allow to use join function in tags + templateFunctions := template.FuncMap{"join": strings.Join} + // Parse the tag value templates + q.serviceExtraTagsTemplate = make(map[string]*template.Template) + for tagName, tagTemplateString := range q.ServiceExtraTags { + tagTemplate, err := template.New(tagName).Funcs(templateFunctions).Parse(tagTemplateString) + if err != nil { + p.Log.Errorf("Could not parse the Consul query Extra Tag template (%s), skipping it. Error: %s", tagTemplateString, err) + continue + } + q.serviceExtraTagsTemplate[tagName] = tagTemplate + } + p.ConsulConfig.Queries[i] = q + i++ + } + // Prevent memory leak by erasing truncated values + for j := i; j < len(p.ConsulConfig.Queries); j++ { + p.ConsulConfig.Queries[j] = nil + } + p.ConsulConfig.Queries = p.ConsulConfig.Queries[:i] + + catalog := consul.Catalog() + + p.wg.Add(1) + go func() { + // Store last error status and change log level depending on repeated occurence + var refreshFailed = false + defer p.wg.Done() + err := p.refreshConsulServices(catalog) + if err != nil { + refreshFailed = true + p.Log.Errorf("Unable to refreh Consul services: %v", err) + } + for { + select { + case <-ctx.Done(): + return + case <-time.After(time.Duration(p.ConsulConfig.QueryInterval)): + err := p.refreshConsulServices(catalog) + if err != nil { + message := fmt.Sprintf("Unable to refreh Consul services: %v", err) + if refreshFailed { + p.Log.Debug(message) + } else { + p.Log.Warn(message) + } + refreshFailed = true + } else if refreshFailed { + refreshFailed = false + p.Log.Info("Successfully refreshed Consul services after previous errors") + } + } + } + }() + + return nil +} + +func (p *Prometheus) refreshConsulServices(c *api.Catalog) error { + consulServiceURLs := make(map[string]URLAndAddress) + + p.Log.Debugf("Refreshing Consul services") + + for _, q := range p.ConsulConfig.Queries { + queryOptions := api.QueryOptions{} + if q.ServiceDc != "" { + queryOptions.Datacenter = q.ServiceDc + } + + // Request services from Consul + consulServices, _, err := c.Service(q.ServiceName, q.ServiceTag, &queryOptions) + if err != nil { + return err + } + if len(consulServices) == 0 { + p.Log.Debugf("Queried Consul for Service (%s, %s) but did not find any instances", q.ServiceName, q.ServiceTag) + continue + } + p.Log.Debugf("Queried Consul for Service (%s, %s) and found %d instances", q.ServiceName, q.ServiceTag, len(consulServices)) + + for _, consulService := range consulServices { + uaa, err := p.getConsulServiceURL(q, consulService) + if err != nil { + message := fmt.Sprintf("Unable to get scrape URLs from Consul for Service (%s, %s): %s", q.ServiceName, q.ServiceTag, err) + if q.lastQueryFailed { + p.Log.Debug(message) + } else { + p.Log.Warn(message) + } + q.lastQueryFailed = true + break + } + if q.lastQueryFailed { + p.Log.Infof("Created scrape URLs from Consul for Service (%s, %s)", q.ServiceName, q.ServiceTag) + } + q.lastQueryFailed = false + p.Log.Debugf("Adding scrape URL from Consul for Service (%s, %s): %s", q.ServiceName, q.ServiceTag, uaa.URL.String()) + consulServiceURLs[uaa.URL.String()] = *uaa + } + } + + p.lock.Lock() + p.consulServices = consulServiceURLs + p.lock.Unlock() + + return nil +} + +func (p *Prometheus) getConsulServiceURL(q *ConsulQuery, s *api.CatalogService) (*URLAndAddress, error) { + var buffer bytes.Buffer + buffer.Reset() + err := q.serviceURLTemplate.Execute(&buffer, s) + if err != nil { + return nil, err + } + serviceURL, err := url.Parse(buffer.String()) + if err != nil { + return nil, err + } + + extraTags := make(map[string]string) + for tagName, tagTemplate := range q.serviceExtraTagsTemplate { + buffer.Reset() + err = tagTemplate.Execute(&buffer, s) + if err != nil { + return nil, err + } + extraTags[tagName] = buffer.String() + } + + p.Log.Debugf("Will scrape metrics from Consul Service %s", serviceURL.String()) + + return &URLAndAddress{ + URL: serviceURL, + OriginalURL: serviceURL, + Tags: extraTags, + }, nil +} diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index c1fb3828114bc..e78c64af3fcd4 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -12,7 +12,6 @@ import ( "net/url" "os/user" "path/filepath" - "sync" "time" "github.com/ghodss/yaml" @@ -55,7 +54,7 @@ func loadClient(kubeconfigPath string) (*kubernetes.Clientset, error) { return kubernetes.NewForConfig(&config) } -func (p *Prometheus) start(ctx context.Context) error { +func (p *Prometheus) startK8s(ctx context.Context) error { config, err := rest.InClusterConfig() if err != nil { return fmt.Errorf("failed to get InClusterConfig - %v", err) @@ -77,8 +76,6 @@ func (p *Prometheus) start(ctx context.Context) error { } } - p.wg = sync.WaitGroup{} - p.wg.Add(1) go func() { defer p.wg.Done() diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 4a3b71408c552..adeb452253a37 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -41,6 +41,9 @@ type Prometheus struct { // Field Selector/s for Kubernetes KubernetesFieldSelector string `toml:"kubernetes_field_selector"` + // Consul SD configuration + ConsulConfig ConsulConfig `toml:"consul"` + // Bearer Token authorization file path BearerToken string `toml:"bearer_token"` BearerTokenString string `toml:"bearer_token_string"` @@ -77,6 +80,9 @@ type Prometheus struct { podLabelSelector labels.Selector podFieldSelector fields.Selector isNodeScrapeScope bool + + // List of consul services to scrape + consulServices map[string]URLAndAddress } var sampleConfig = ` @@ -127,6 +133,19 @@ var sampleConfig = ` # eg. To scrape pods on a specific node # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + ## Scrape Services available in Consul Catalog + # [inputs.prometheus.consul] + # enabled = true + # agent = "http://localhost:8500" + # query_interval = "5m" + + # [[inputs.prometheus.consul.query]] + # name = "a service name" + # tag = "a service tag" + # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' + # [inputs.prometheus.consul.query.tags] + # host = "{{.Node}}" + ## Use bearer token for authorization. ('bearer_token' takes priority) # bearer_token = "/path/to/bearer/token" ## OR @@ -238,6 +257,10 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { p.lock.Lock() defer p.lock.Unlock() + // add all services collected from consul + for k, v := range p.consulServices { + allURLs[k] = v + } // loop through all pods scraped via the prometheus annotation on the pods for k, v := range p.kubernetesPods { allURLs[k] = v @@ -463,20 +486,27 @@ func fieldSelectorIsSupported(fieldSelector fields.Selector) (bool, string) { return true, "" } -// Start will start the Kubernetes scraping if enabled in the configuration +// Start will start the Kubernetes and/or Consul scraping if enabled in the configuration func (p *Prometheus) Start(_ telegraf.Accumulator) error { + var ctx context.Context + p.wg = sync.WaitGroup{} + ctx, p.cancel = context.WithCancel(context.Background()) + + if p.ConsulConfig.Enabled && len(p.ConsulConfig.Queries) > 0 { + if err := p.startConsul(ctx); err != nil { + return err + } + } if p.MonitorPods { - var ctx context.Context - ctx, p.cancel = context.WithCancel(context.Background()) - return p.start(ctx) + if err := p.startK8s(ctx); err != nil { + return err + } } return nil } func (p *Prometheus) Stop() { - if p.MonitorPods { - p.cancel() - } + p.cancel() p.wg.Wait() } @@ -485,6 +515,7 @@ func init() { return &Prometheus{ ResponseTimeout: config.Duration(time.Second * 3), kubernetesPods: map[string]URLAndAddress{}, + consulServices: map[string]URLAndAddress{}, URLTag: "url", } }) From 87c94e4ac3ed4dd4124fcb26a00e067157a5ebca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 27 Jul 2021 23:28:26 +0200 Subject: [PATCH 532/761] Linter fixes for plugins/inputs/[fg]* (#9387) --- plugins/inputs/fail2ban/fail2ban_test.go | 6 + plugins/inputs/file/file_test.go | 8 +- .../filecount/filesystem_helpers_test.go | 5 +- plugins/inputs/filestat/filestat.go | 4 +- plugins/inputs/filestat/filestat_test.go | 2 +- plugins/inputs/fluentd/fluentd.go | 4 +- plugins/inputs/fluentd/fluentd_test.go | 5 +- plugins/inputs/github/github.go | 31 ++-- plugins/inputs/gnmi/gnmi.go | 136 +++++++++--------- plugins/inputs/gnmi/gnmi_test.go | 128 ++++++++--------- 10 files changed, 172 insertions(+), 157 deletions(-) diff --git a/plugins/inputs/fail2ban/fail2ban_test.go b/plugins/inputs/fail2ban/fail2ban_test.go index 8ec313a1fbdda..1afac3d789abd 100644 --- a/plugins/inputs/fail2ban/fail2ban_test.go +++ b/plugins/inputs/fail2ban/fail2ban_test.go @@ -103,29 +103,35 @@ func TestHelperProcess(_ *testing.T) { if !strings.HasSuffix(cmd, "fail2ban-client") { //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // os.Exit called intentionally os.Exit(1) } if len(args) == 1 && args[0] == "status" { //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusOutput) + //nolint:revive // os.Exit called intentionally os.Exit(0) } else if len(args) == 2 && args[0] == "status" { if args[1] == "sshd" { //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusSshdOutput) + //nolint:revive // os.Exit called intentionally os.Exit(0) } else if args[1] == "postfix" { //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusPostfixOutput) + //nolint:revive // os.Exit called intentionally os.Exit(0) } else if args[1] == "dovecot" { //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusDovecotOutput) + //nolint:revive // os.Exit called intentionally os.Exit(0) } } //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, "invalid argument") + //nolint:revive // os.Exit called intentionally os.Exit(1) } diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go index f8f7d773f719d..e633559236bd2 100644 --- a/plugins/inputs/file/file_test.go +++ b/plugins/inputs/file/file_test.go @@ -11,15 +11,18 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestRefreshFilePaths(t *testing.T) { wd, err := os.Getwd() + require.NoError(t, err) + r := File{ Files: []string{filepath.Join(wd, "dev/testfiles/**.log")}, } @@ -100,7 +103,8 @@ func TestGrokParser(t *testing.T) { require.NoError(t, err) err = r.Gather(&acc) - require.Equal(t, len(acc.Metrics), 2) + require.NoError(t, err) + require.Len(t, acc.Metrics, 2) } func TestCharacterEncoding(t *testing.T) { diff --git a/plugins/inputs/filecount/filesystem_helpers_test.go b/plugins/inputs/filecount/filesystem_helpers_test.go index b1dacc25bc731..8a6d9cf2aa035 100644 --- a/plugins/inputs/filecount/filesystem_helpers_test.go +++ b/plugins/inputs/filecount/filesystem_helpers_test.go @@ -53,11 +53,12 @@ func TestRealFS(t *testing.T) { fs = getTestFileSystem() // now, the same test as above will return an error as the file doesn't exist in our fake fs expectedError := "Stat " + getTestdataDir() + "/qux: No such file or directory" - fileInfo, err = fs.Stat(getTestdataDir() + "/qux") - require.Equal(t, expectedError, err.Error()) + _, err = fs.Stat(getTestdataDir() + "/qux") + require.Error(t, err, expectedError) // and verify that what we DO expect to find, we do fileInfo, err = fs.Stat("/testdata/foo") require.NoError(t, err) + require.NotNil(t, fileInfo) } func getTestFileSystem() fakeFileSystem { diff --git a/plugins/inputs/filestat/filestat.go b/plugins/inputs/filestat/filestat.go index 9450f9a41b77c..7d1143b74aaed 100644 --- a/plugins/inputs/filestat/filestat.go +++ b/plugins/inputs/filestat/filestat.go @@ -114,11 +114,11 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error { } if f.Md5 { - md5, err := getMd5(fileName) + md5Hash, err := getMd5(fileName) if err != nil { acc.AddError(err) } else { - fields["md5_sum"] = md5 + fields["md5_sum"] = md5Hash } } diff --git a/plugins/inputs/filestat/filestat_test.go b/plugins/inputs/filestat/filestat_test.go index 1c827f8dbe9ea..ea1bee47e4fb4 100644 --- a/plugins/inputs/filestat/filestat_test.go +++ b/plugins/inputs/filestat/filestat_test.go @@ -198,7 +198,7 @@ func TestGetMd5(t *testing.T) { require.NoError(t, err) require.Equal(t, "5a7e9b77fa25e7bb411dbd17cf403c1f", md5) - md5, err = getMd5("/tmp/foo/bar/fooooo") + _, err = getMd5("/tmp/foo/bar/fooooo") require.Error(t, err) } diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go index dac25769a207c..03f46c67ce515 100644 --- a/plugins/inputs/fluentd/fluentd.go +++ b/plugins/inputs/fluentd/fluentd.go @@ -63,11 +63,11 @@ func parse(data []byte) (datapointArray []pluginData, err error) { if err = json.Unmarshal(data, &endpointData); err != nil { err = fmt.Errorf("processing JSON structure") - return + return nil, err } datapointArray = append(datapointArray, endpointData.Payload...) - return + return datapointArray, err } // Description - display description diff --git a/plugins/inputs/fluentd/fluentd_test.go b/plugins/inputs/fluentd/fluentd_test.go index 61cd6576ec648..a822c763f1402 100644 --- a/plugins/inputs/fluentd/fluentd_test.go +++ b/plugins/inputs/fluentd/fluentd_test.go @@ -8,8 +8,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) // sampleJSON from fluentd version '0.14.9' @@ -127,6 +128,8 @@ func Test_Gather(t *testing.T) { })) requestURL, err := url.Parse(fluentdTest.Endpoint) + require.NoError(t, err) + require.NotNil(t, requestURL) ts.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go index 020775cb43e8c..31fcc56aecdae 100644 --- a/plugins/inputs/github/github.go +++ b/plugins/inputs/github/github.go @@ -8,12 +8,13 @@ import ( "sync" "time" - "github.com/google/go-github/v32/github" + githubLib "github.com/google/go-github/v32/github" + "golang.org/x/oauth2" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/selfstat" - "golang.org/x/oauth2" ) // GitHub - plugin main structure @@ -23,7 +24,7 @@ type GitHub struct { AdditionalFields []string `toml:"additional_fields"` EnterpriseBaseURL string `toml:"enterprise_base_url"` HTTPTimeout config.Duration `toml:"http_timeout"` - githubClient *github.Client + githubClient *githubLib.Client obfuscatedToken string @@ -68,7 +69,7 @@ func (g *GitHub) Description() string { } // Create GitHub Client -func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) { +func (g *GitHub) createGitHubClient(ctx context.Context) (*githubLib.Client, error) { httpClient := &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, @@ -93,11 +94,11 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) return g.newGithubClient(httpClient) } -func (g *GitHub) newGithubClient(httpClient *http.Client) (*github.Client, error) { +func (g *GitHub) newGithubClient(httpClient *http.Client) (*githubLib.Client, error) { if g.EnterpriseBaseURL != "" { - return github.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient) + return githubLib.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient) } - return github.NewClient(httpClient), nil + return githubLib.NewClient(httpClient), nil } // Gather GitHub Metrics @@ -172,16 +173,16 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error { return nil } -func (g *GitHub) handleRateLimit(response *github.Response, err error) { +func (g *GitHub) handleRateLimit(response *githubLib.Response, err error) { if err == nil { g.RateLimit.Set(int64(response.Rate.Limit)) g.RateRemaining.Set(int64(response.Rate.Remaining)) - } else if _, ok := err.(*github.RateLimitError); ok { + } else if _, ok := err.(*githubLib.RateLimitError); ok { g.RateLimitErrors.Incr(1) } } -func splitRepositoryName(repositoryName string) (string, string, error) { +func splitRepositoryName(repositoryName string) (owner string, repository string, err error) { splits := strings.SplitN(repositoryName, "/", 2) if len(splits) != 2 { @@ -191,7 +192,7 @@ func splitRepositoryName(repositoryName string) (string, string, error) { return splits[0], splits[1], nil } -func getLicense(rI *github.Repository) string { +func getLicense(rI *githubLib.Repository) string { if licenseName := rI.GetLicense().GetName(); licenseName != "" { return licenseName } @@ -199,7 +200,7 @@ func getLicense(rI *github.Repository) string { return "None" } -func getTags(repositoryInfo *github.Repository) map[string]string { +func getTags(repositoryInfo *githubLib.Repository) map[string]string { return map[string]string{ "owner": repositoryInfo.GetOwner().GetLogin(), "name": repositoryInfo.GetName(), @@ -208,7 +209,7 @@ func getTags(repositoryInfo *github.Repository) map[string]string { } } -func getFields(repositoryInfo *github.Repository) map[string]interface{} { +func getFields(repositoryInfo *githubLib.Repository) map[string]interface{} { return map[string]interface{}{ "stars": repositoryInfo.GetStargazersCount(), "subscribers": repositoryInfo.GetSubscribersCount(), @@ -221,9 +222,9 @@ func getFields(repositoryInfo *github.Repository) map[string]interface{} { } func (g *GitHub) getPullRequestFields(ctx context.Context, owner, repo string) (map[string]interface{}, error) { - options := github.SearchOptions{ + options := githubLib.SearchOptions{ TextMatch: false, - ListOptions: github.ListOptions{ + ListOptions: githubLib.ListOptions{ PerPage: 100, Page: 1, }, diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index 34bea672d7925..a6a3c3a2c6ef3 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -14,16 +14,17 @@ import ( "sync" "time" + gnmiLib "github.com/openconfig/gnmi/proto/gnmi" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" internaltls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" - "github.com/openconfig/gnmi/proto/gnmi" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" ) // gNMI plugin instance @@ -51,10 +52,10 @@ type GNMI struct { internaltls.ClientConfig // Internal state - aliases map[string]string - acc telegraf.Accumulator - cancel context.CancelFunc - wg sync.WaitGroup + internalAliases map[string]string + acc telegraf.Accumulator + cancel context.CancelFunc + wg sync.WaitGroup Log telegraf.Logger } @@ -79,7 +80,7 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { var err error var ctx context.Context var tlscfg *tls.Config - var request *gnmi.SubscribeRequest + var request *gnmiLib.SubscribeRequest c.acc = acc ctx, c.cancel = context.WithCancel(context.Background()) @@ -102,9 +103,9 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { } // Invert explicit alias list and prefill subscription names - c.aliases = make(map[string]string, len(c.Subscriptions)+len(c.Aliases)) + c.internalAliases = make(map[string]string, len(c.Subscriptions)+len(c.Aliases)) for _, subscription := range c.Subscriptions { - var gnmiLongPath, gnmiShortPath *gnmi.Path + var gnmiLongPath, gnmiShortPath *gnmiLib.Path // Build the subscription path without keys if gnmiLongPath, err = parsePath(subscription.Origin, subscription.Path, ""); err != nil { @@ -129,12 +130,12 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { name = path.Base(shortPath) } if len(name) > 0 { - c.aliases[longPath] = name - c.aliases[shortPath] = name + c.internalAliases[longPath] = name + c.internalAliases[shortPath] = name } } - for alias, path := range c.Aliases { - c.aliases[path] = alias + for alias, encodingPath := range c.Aliases { + c.internalAliases[encodingPath] = alias } // Create a goroutine for each device, dial and subscribe @@ -158,21 +159,21 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { } // Create a new gNMI SubscribeRequest -func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { +func (c *GNMI) newSubscribeRequest() (*gnmiLib.SubscribeRequest, error) { // Create subscription objects - subscriptions := make([]*gnmi.Subscription, len(c.Subscriptions)) + subscriptions := make([]*gnmiLib.Subscription, len(c.Subscriptions)) for i, subscription := range c.Subscriptions { gnmiPath, err := parsePath(subscription.Origin, subscription.Path, "") if err != nil { return nil, err } - mode, ok := gnmi.SubscriptionMode_value[strings.ToUpper(subscription.SubscriptionMode)] + mode, ok := gnmiLib.SubscriptionMode_value[strings.ToUpper(subscription.SubscriptionMode)] if !ok { return nil, fmt.Errorf("invalid subscription mode %s", subscription.SubscriptionMode) } - subscriptions[i] = &gnmi.Subscription{ + subscriptions[i] = &gnmiLib.Subscription{ Path: gnmiPath, - Mode: gnmi.SubscriptionMode(mode), + Mode: gnmiLib.SubscriptionMode(mode), SampleInterval: uint64(time.Duration(subscription.SampleInterval).Nanoseconds()), SuppressRedundant: subscription.SuppressRedundant, HeartbeatInterval: uint64(time.Duration(subscription.HeartbeatInterval).Nanoseconds()), @@ -189,12 +190,12 @@ func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { return nil, fmt.Errorf("unsupported encoding %s", c.Encoding) } - return &gnmi.SubscribeRequest{ - Request: &gnmi.SubscribeRequest_Subscribe{ - Subscribe: &gnmi.SubscriptionList{ + return &gnmiLib.SubscribeRequest{ + Request: &gnmiLib.SubscribeRequest_Subscribe{ + Subscribe: &gnmiLib.SubscriptionList{ Prefix: gnmiPath, - Mode: gnmi.SubscriptionList_STREAM, - Encoding: gnmi.Encoding(gnmi.Encoding_value[strings.ToUpper(c.Encoding)]), + Mode: gnmiLib.SubscriptionList_STREAM, + Encoding: gnmiLib.Encoding(gnmiLib.Encoding_value[strings.ToUpper(c.Encoding)]), Subscription: subscriptions, UpdatesOnly: c.UpdatesOnly, }, @@ -203,7 +204,7 @@ func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { } // SubscribeGNMI and extract telemetry data -func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Config, request *gnmi.SubscribeRequest) error { +func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Config, request *gnmiLib.SubscribeRequest) error { var opt grpc.DialOption if tlscfg != nil { opt = grpc.WithTransportCredentials(credentials.NewTLS(tlscfg)) @@ -217,7 +218,7 @@ func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Co } defer client.Close() - subscribeClient, err := gnmi.NewGNMIClient(client).Subscribe(ctx) + subscribeClient, err := gnmiLib.NewGNMIClient(client).Subscribe(ctx) if err != nil { return fmt.Errorf("failed to setup subscription: %v", err) } @@ -233,7 +234,7 @@ func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Co c.Log.Debugf("Connection to gNMI device %s established", address) defer c.Log.Debugf("Connection to gNMI device %s closed", address) for ctx.Err() == nil { - var reply *gnmi.SubscribeResponse + var reply *gnmiLib.SubscribeResponse if reply, err = subscribeClient.Recv(); err != nil { if err != io.EOF && ctx.Err() == nil { return fmt.Errorf("aborted gNMI subscription: %v", err) @@ -246,17 +247,17 @@ func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Co return nil } -func (c *GNMI) handleSubscribeResponse(address string, reply *gnmi.SubscribeResponse) { +func (c *GNMI) handleSubscribeResponse(address string, reply *gnmiLib.SubscribeResponse) { switch response := reply.Response.(type) { - case *gnmi.SubscribeResponse_Update: + case *gnmiLib.SubscribeResponse_Update: c.handleSubscribeResponseUpdate(address, response) - case *gnmi.SubscribeResponse_Error: + case *gnmiLib.SubscribeResponse_Error: c.Log.Errorf("Subscribe error (%d), %q", response.Error.Code, response.Error.Message) } } // Handle SubscribeResponse_Update message from gNMI and parse contained telemetry data -func (c *GNMI) handleSubscribeResponseUpdate(address string, response *gnmi.SubscribeResponse_Update) { +func (c *GNMI) handleSubscribeResponseUpdate(address string, response *gnmiLib.SubscribeResponse_Update) { var prefix, prefixAliasPath string grouper := metric.NewSeriesGrouper() timestamp := time.Unix(0, response.Update.Timestamp) @@ -289,7 +290,7 @@ func (c *GNMI) handleSubscribeResponseUpdate(address string, response *gnmi.Subs // Lookup alias if alias-path has changed if aliasPath != lastAliasPath { name = prefix - if alias, ok := c.aliases[aliasPath]; ok { + if alias, ok := c.internalAliases[aliasPath]; ok { name = alias } else { c.Log.Debugf("No measurement alias for gNMI path: %s", name) @@ -325,13 +326,13 @@ func (c *GNMI) handleSubscribeResponseUpdate(address string, response *gnmi.Subs } // Add grouped measurements - for _, metric := range grouper.Metrics() { - c.acc.AddMetric(metric) + for _, metricToAdd := range grouper.Metrics() { + c.acc.AddMetric(metricToAdd) } } // HandleTelemetryField and add it to a measurement -func (c *GNMI) handleTelemetryField(update *gnmi.Update, tags map[string]string, prefix string) (string, map[string]interface{}) { +func (c *GNMI) handleTelemetryField(update *gnmiLib.Update, tags map[string]string, prefix string) (string, map[string]interface{}) { gpath, aliasPath, err := c.handlePath(update.Path, tags, prefix) if err != nil { c.Log.Errorf("handling path %q failed: %v", update.Path, err) @@ -347,25 +348,25 @@ func (c *GNMI) handleTelemetryField(update *gnmi.Update, tags map[string]string, } switch val := update.Val.Value.(type) { - case *gnmi.TypedValue_AsciiVal: + case *gnmiLib.TypedValue_AsciiVal: value = val.AsciiVal - case *gnmi.TypedValue_BoolVal: + case *gnmiLib.TypedValue_BoolVal: value = val.BoolVal - case *gnmi.TypedValue_BytesVal: + case *gnmiLib.TypedValue_BytesVal: value = val.BytesVal - case *gnmi.TypedValue_DecimalVal: + case *gnmiLib.TypedValue_DecimalVal: value = float64(val.DecimalVal.Digits) / math.Pow(10, float64(val.DecimalVal.Precision)) - case *gnmi.TypedValue_FloatVal: + case *gnmiLib.TypedValue_FloatVal: value = val.FloatVal - case *gnmi.TypedValue_IntVal: + case *gnmiLib.TypedValue_IntVal: value = val.IntVal - case *gnmi.TypedValue_StringVal: + case *gnmiLib.TypedValue_StringVal: value = val.StringVal - case *gnmi.TypedValue_UintVal: + case *gnmiLib.TypedValue_UintVal: value = val.UintVal - case *gnmi.TypedValue_JsonIetfVal: + case *gnmiLib.TypedValue_JsonIetfVal: jsondata = val.JsonIetfVal - case *gnmi.TypedValue_JsonVal: + case *gnmiLib.TypedValue_JsonVal: jsondata = val.JsonVal } @@ -387,13 +388,12 @@ func (c *GNMI) handleTelemetryField(update *gnmi.Update, tags map[string]string, } // Parse path to path-buffer and tag-field -func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string) (string, string, error) { - var aliasPath string +func (c *GNMI) handlePath(gnmiPath *gnmiLib.Path, tags map[string]string, prefix string) (pathBuffer string, aliasPath string, err error) { builder := bytes.NewBufferString(prefix) // Prefix with origin - if len(path.Origin) > 0 { - if _, err := builder.WriteString(path.Origin); err != nil { + if len(gnmiPath.Origin) > 0 { + if _, err := builder.WriteString(gnmiPath.Origin); err != nil { return "", "", err } if _, err := builder.WriteRune(':'); err != nil { @@ -402,7 +402,7 @@ func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string } // Parse generic keys from prefix - for _, elem := range path.Elem { + for _, elem := range gnmiPath.Elem { if len(elem.Name) > 0 { if _, err := builder.WriteRune('/'); err != nil { return "", "", err @@ -413,7 +413,7 @@ func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string } name := builder.String() - if _, exists := c.aliases[name]; exists { + if _, exists := c.internalAliases[name]; exists { aliasPath = name } @@ -435,21 +435,21 @@ func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string } //ParsePath from XPath-like string to gNMI path structure -func parsePath(origin string, path string, target string) (*gnmi.Path, error) { +func parsePath(origin string, pathToParse string, target string) (*gnmiLib.Path, error) { var err error - gnmiPath := gnmi.Path{Origin: origin, Target: target} + gnmiPath := gnmiLib.Path{Origin: origin, Target: target} - if len(path) > 0 && path[0] != '/' { - return nil, fmt.Errorf("path does not start with a '/': %s", path) + if len(pathToParse) > 0 && pathToParse[0] != '/' { + return nil, fmt.Errorf("path does not start with a '/': %s", pathToParse) } - elem := &gnmi.PathElem{} + elem := &gnmiLib.PathElem{} start, name, value, end := 0, -1, -1, -1 - path = path + "/" + pathToParse = pathToParse + "/" - for i := 0; i < len(path); i++ { - if path[i] == '[' { + for i := 0; i < len(pathToParse); i++ { + if pathToParse[i] == '[' { if name >= 0 { break } @@ -458,37 +458,37 @@ func parsePath(origin string, path string, target string) (*gnmi.Path, error) { elem.Key = make(map[string]string) } name = i + 1 - } else if path[i] == '=' { + } else if pathToParse[i] == '=' { if name <= 0 || value >= 0 { break } value = i + 1 - } else if path[i] == ']' { + } else if pathToParse[i] == ']' { if name <= 0 || value <= name { break } - elem.Key[path[name:value-1]] = strings.Trim(path[value:i], "'\"") + elem.Key[pathToParse[name:value-1]] = strings.Trim(pathToParse[value:i], "'\"") name, value = -1, -1 - } else if path[i] == '/' { + } else if pathToParse[i] == '/' { if name < 0 { if end < 0 { end = i } if end > start { - elem.Name = path[start:end] + elem.Name = pathToParse[start:end] gnmiPath.Elem = append(gnmiPath.Elem, elem) - gnmiPath.Element = append(gnmiPath.Element, path[start:i]) + gnmiPath.Element = append(gnmiPath.Element, pathToParse[start:i]) } start, name, value, end = i+1, -1, -1, -1 - elem = &gnmi.PathElem{} + elem = &gnmiLib.PathElem{} } } } if name >= 0 || value >= 0 { - err = fmt.Errorf("Invalid gNMI path: %s", path) + err = fmt.Errorf("Invalid gNMI path: %s", pathToParse) } if err != nil { diff --git a/plugins/inputs/gnmi/gnmi_test.go b/plugins/inputs/gnmi/gnmi_test.go index cfc43e8246186..17a955c4875dc 100644 --- a/plugins/inputs/gnmi/gnmi_test.go +++ b/plugins/inputs/gnmi/gnmi_test.go @@ -9,54 +9,54 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/testutil" - "github.com/openconfig/gnmi/proto/gnmi" - "github.com/stretchr/testify/assert" + gnmiLib "github.com/openconfig/gnmi/proto/gnmi" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/metadata" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" ) func TestParsePath(t *testing.T) { path := "/foo/bar/bla[shoo=woo][shoop=/woop/]/z" parsed, err := parsePath("theorigin", path, "thetarget") - assert.NoError(t, err) - assert.Equal(t, parsed.Origin, "theorigin") - assert.Equal(t, parsed.Target, "thetarget") - assert.Equal(t, parsed.Element, []string{"foo", "bar", "bla[shoo=woo][shoop=/woop/]", "z"}) - assert.Equal(t, parsed.Elem, []*gnmi.PathElem{{Name: "foo"}, {Name: "bar"}, - {Name: "bla", Key: map[string]string{"shoo": "woo", "shoop": "/woop/"}}, {Name: "z"}}) + require.NoError(t, err) + require.Equal(t, "theorigin", parsed.Origin) + require.Equal(t, "thetarget", parsed.Target) + require.Equal(t, []string{"foo", "bar", "bla[shoo=woo][shoop=/woop/]", "z"}, parsed.Element) + require.Equal(t, []*gnmiLib.PathElem{{Name: "foo"}, {Name: "bar"}, + {Name: "bla", Key: map[string]string{"shoo": "woo", "shoop": "/woop/"}}, {Name: "z"}}, parsed.Elem) parsed, err = parsePath("", "", "") - assert.NoError(t, err) - assert.Equal(t, *parsed, gnmi.Path{}) + require.NoError(t, err) + require.Equal(t, gnmiLib.Path{}, *parsed) parsed, err = parsePath("", "/foo[[", "") - assert.Nil(t, parsed) - assert.Equal(t, errors.New("Invalid gNMI path: /foo[[/"), err) + require.Nil(t, parsed) + require.Equal(t, errors.New("Invalid gNMI path: /foo[[/"), err) } type MockServer struct { - SubscribeF func(gnmi.GNMI_SubscribeServer) error + SubscribeF func(gnmiLib.GNMI_SubscribeServer) error GRPCServer *grpc.Server } -func (s *MockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { +func (s *MockServer) Capabilities(context.Context, *gnmiLib.CapabilityRequest) (*gnmiLib.CapabilityResponse, error) { return nil, nil } -func (s *MockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) { +func (s *MockServer) Get(context.Context, *gnmiLib.GetRequest) (*gnmiLib.GetResponse, error) { return nil, nil } -func (s *MockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) { +func (s *MockServer) Set(context.Context, *gnmiLib.SetRequest) (*gnmiLib.SetResponse, error) { return nil, nil } -func (s *MockServer) Subscribe(server gnmi.GNMI_SubscribeServer) error { +func (s *MockServer) Subscribe(server gnmiLib.GNMI_SubscribeServer) error { return s.SubscribeF(server) } @@ -66,12 +66,12 @@ func TestWaitError(t *testing.T) { grpcServer := grpc.NewServer() gnmiServer := &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { return fmt.Errorf("testerror") }, GRPCServer: grpcServer, } - gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) plugin := &GNMI{ Log: testutil.Logger{}, @@ -107,7 +107,7 @@ func TestUsernamePassword(t *testing.T) { grpcServer := grpc.NewServer() gnmiServer := &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { metadata, ok := metadata.FromIncomingContext(server.Context()) if !ok { return errors.New("failed to get metadata") @@ -127,7 +127,7 @@ func TestUsernamePassword(t *testing.T) { }, GRPCServer: grpcServer, } - gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) plugin := &GNMI{ Log: testutil.Logger{}, @@ -159,12 +159,12 @@ func TestUsernamePassword(t *testing.T) { errors.New("aborted gNMI subscription: rpc error: code = Unknown desc = success")) } -func mockGNMINotification() *gnmi.Notification { - return &gnmi.Notification{ +func mockGNMINotification() *gnmiLib.Notification { + return &gnmiLib.Notification{ Timestamp: 1543236572000000000, - Prefix: &gnmi.Path{ + Prefix: &gnmiLib.Path{ Origin: "type", - Elem: []*gnmi.PathElem{ + Elem: []*gnmiLib.PathElem{ { Name: "model", Key: map[string]string{"foo": "bar"}, @@ -172,35 +172,35 @@ func mockGNMINotification() *gnmi.Notification { }, Target: "subscription", }, - Update: []*gnmi.Update{ + Update: []*gnmiLib.Update{ { - Path: &gnmi.Path{ - Elem: []*gnmi.PathElem{ + Path: &gnmiLib.Path{ + Elem: []*gnmiLib.PathElem{ {Name: "some"}, { Name: "path", Key: map[string]string{"name": "str", "uint64": "1234"}}, }, }, - Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_IntVal{IntVal: 5678}}, + Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_IntVal{IntVal: 5678}}, }, { - Path: &gnmi.Path{ - Elem: []*gnmi.PathElem{ + Path: &gnmiLib.Path{ + Elem: []*gnmiLib.PathElem{ {Name: "other"}, {Name: "path"}, }, }, - Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "foobar"}}, + Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_StringVal{StringVal: "foobar"}}, }, { - Path: &gnmi.Path{ - Elem: []*gnmi.PathElem{ + Path: &gnmiLib.Path{ + Elem: []*gnmiLib.PathElem{ {Name: "other"}, {Name: "this"}, }, }, - Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "that"}}, + Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_StringVal{StringVal: "that"}}, }, }, } @@ -229,20 +229,20 @@ func TestNotification(t *testing.T) { }, }, server: &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { notification := mockGNMINotification() - err := server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) if err != nil { return err } - err = server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}) + err = server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}) if err != nil { return err } notification.Prefix.Elem[0].Key["foo"] = "bar2" notification.Update[0].Path.Elem[1].Key["name"] = "str2" - notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}} - return server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + notification.Update[0].Val = &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}} + return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) }, }, expected: []telegraf.Metric{ @@ -318,14 +318,14 @@ func TestNotification(t *testing.T) { }, }, server: &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { - response := &gnmi.SubscribeResponse{ - Response: &gnmi.SubscribeResponse_Update{ - Update: &gnmi.Notification{ + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { + response := &gnmiLib.SubscribeResponse{ + Response: &gnmiLib.SubscribeResponse_Update{ + Update: &gnmiLib.Notification{ Timestamp: 1543236572000000000, - Prefix: &gnmi.Path{ + Prefix: &gnmiLib.Path{ Origin: "type", - Elem: []*gnmi.PathElem{ + Elem: []*gnmiLib.PathElem{ { Name: "state", }, @@ -342,11 +342,11 @@ func TestNotification(t *testing.T) { }, Target: "subscription", }, - Update: []*gnmi.Update{ + Update: []*gnmiLib.Update{ { - Path: &gnmi.Path{}, - Val: &gnmi.TypedValue{ - Value: &gnmi.TypedValue_IntVal{IntVal: 42}, + Path: &gnmiLib.Path{}, + Val: &gnmiLib.TypedValue{ + Value: &gnmiLib.TypedValue_IntVal{IntVal: 42}, }, }, }, @@ -382,7 +382,7 @@ func TestNotification(t *testing.T) { grpcServer := grpc.NewServer() tt.server.GRPCServer = grpcServer - gnmi.RegisterGNMIServer(grpcServer, tt.server) + gnmiLib.RegisterGNMIServer(grpcServer, tt.server) var acc testutil.Accumulator err = tt.plugin.Start(&acc) @@ -424,10 +424,10 @@ func TestSubscribeResponseError(t *testing.T) { ml := &MockLogger{} plugin := &GNMI{Log: ml} // TODO: FIX SA1019: gnmi.Error is deprecated: Do not use. - errorResponse := &gnmi.SubscribeResponse_Error{Error: &gnmi.Error{Message: me, Code: mc}} - plugin.handleSubscribeResponse("127.0.0.1:0", &gnmi.SubscribeResponse{Response: errorResponse}) + errorResponse := &gnmiLib.SubscribeResponse_Error{Error: &gnmiLib.Error{Message: me, Code: mc}} + plugin.handleSubscribeResponse("127.0.0.1:0", &gnmiLib.SubscribeResponse{Response: errorResponse}) require.NotEmpty(t, ml.lastFormat) - require.Equal(t, ml.lastArgs, []interface{}{mc, me}) + require.Equal(t, []interface{}{mc, me}, ml.lastArgs) } func TestRedial(t *testing.T) { @@ -443,13 +443,13 @@ func TestRedial(t *testing.T) { grpcServer := grpc.NewServer() gnmiServer := &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { notification := mockGNMINotification() - return server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) }, GRPCServer: grpcServer, } - gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) var wg sync.WaitGroup wg.Add(1) @@ -473,16 +473,16 @@ func TestRedial(t *testing.T) { grpcServer = grpc.NewServer() gnmiServer = &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { notification := mockGNMINotification() notification.Prefix.Elem[0].Key["foo"] = "bar2" notification.Update[0].Path.Elem[1].Key["name"] = "str2" - notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_BoolVal{BoolVal: false}} - return server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + notification.Update[0].Val = &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_BoolVal{BoolVal: false}} + return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) }, GRPCServer: grpcServer, } - gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) wg.Add(1) go func() { From 837eb31b3fc6ef328d5076373efe5d13e2d336d8 Mon Sep 17 00:00:00 2001 From: Roger Coll Aumatell Date: Tue, 27 Jul 2021 23:31:24 +0200 Subject: [PATCH 533/761] Suricata alerts (#9322) --- plugins/inputs/suricata/README.md | 19 +++++- plugins/inputs/suricata/suricata.go | 76 +++++++++++++++------ plugins/inputs/suricata/suricata_test.go | 64 +++++++++++++++++ plugins/inputs/suricata/testdata/test3.json | 1 + 4 files changed, 140 insertions(+), 20 deletions(-) create mode 100644 plugins/inputs/suricata/testdata/test3.json diff --git a/plugins/inputs/suricata/README.md b/plugins/inputs/suricata/README.md index 18b26298e7af4..61f940a8df01d 100644 --- a/plugins/inputs/suricata/README.md +++ b/plugins/inputs/suricata/README.md @@ -4,6 +4,7 @@ This plugin reports internal performance counters of the Suricata IDS/IPS engine, such as captured traffic volume, memory usage, uptime, flow counters, and much more. It provides a socket for the Suricata log output to write JSON stats output to, and processes the incoming data to fit Telegraf's format. +It can also report for triggered Suricata IDS/IPS alerts. ### Configuration @@ -17,6 +18,9 @@ stats output to, and processes the incoming data to fit Telegraf's format. # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" # becomes "detect_alert" when delimiter is "_". delimiter = "_" + + # Detect alert logs + alerts = false ``` ### Metrics @@ -26,7 +30,7 @@ stats output. See http://suricata.readthedocs.io/en/latest/performance/statistics.html for more information. -All fields are numeric. +All fields for Suricata stats are numeric. - suricata - tags: - thread: `Global` for global statistics (if enabled), thread IDs (e.g. `W#03-enp0s31f6`) for thread-specific statistics @@ -94,6 +98,19 @@ All fields are numeric. - tcp_synack - ... +Some fields of the Suricata alerts are strings, for example the signatures. See https://suricata.readthedocs.io/en/suricata-6.0.0/output/eve/eve-json-format.html?highlight=priority#event-type-alert for more information. + +- suricata_alert + - fields: + - action + - gid + - severity + - signature + - source_ip + - source_port + - target_port + - target_port + - ... #### Suricata configuration diff --git a/plugins/inputs/suricata/suricata.go b/plugins/inputs/suricata/suricata.go index 8fd48b5cfd747..5e1dc384478b7 100644 --- a/plugins/inputs/suricata/suricata.go +++ b/plugins/inputs/suricata/suricata.go @@ -25,6 +25,7 @@ const ( type Suricata struct { Source string `toml:"source"` Delimiter string `toml:"delimiter"` + Alerts bool `toml:"alerts"` inputListener *net.UnixListener cancel context.CancelFunc @@ -36,11 +37,11 @@ type Suricata struct { // Description returns the plugin description. func (s *Suricata) Description() string { - return "Suricata stats plugin" + return "Suricata stats and alerts plugin" } const sampleConfig = ` - ## Data sink for Suricata stats log + ## Data sink for Suricata stats and alerts logs # This is expected to be a filename of a # unix socket to be created for listening. source = "/var/run/suricata-stats.sock" @@ -48,6 +49,9 @@ const sampleConfig = ` # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" # becomes "detect_alert" when delimiter is "_". delimiter = "_" + + ## Detect alert logs + # alerts = false ` // SampleConfig returns a sample TOML section to illustrate configuration @@ -100,8 +104,12 @@ func (s *Suricata) readInput(ctx context.Context, acc telegraf.Accumulator, conn line, rerr := reader.ReadBytes('\n') if rerr != nil { return rerr - } else if len(line) > 0 { - s.parse(acc, line) + } + if len(line) > 0 { + err := s.parse(acc, line) + if err != nil { + acc.AddError(err) + } } } } @@ -158,28 +166,35 @@ func flexFlatten(outmap map[string]interface{}, field string, v interface{}, del case string: outmap[field] = v case float64: - outmap[field] = v.(float64) + outmap[field] = t default: return fmt.Errorf("unsupported type %T encountered", t) } return nil } -func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { - // initial parsing - var result map[string]interface{} - err := json.Unmarshal(sjson, &result) - if err != nil { - acc.AddError(err) +func (s *Suricata) parseAlert(acc telegraf.Accumulator, result map[string]interface{}) { + if _, ok := result["alert"].(map[string]interface{}); !ok { + s.Log.Debug("'alert' sub-object does not have required structure") return } - // check for presence of relevant stats - if _, ok := result["stats"]; !ok { - s.Log.Debug("Input does not contain necessary 'stats' sub-object") - return + totalmap := make(map[string]interface{}) + for k, v := range result["alert"].(map[string]interface{}) { + //source and target fields are maps + err := flexFlatten(totalmap, k, v, s.Delimiter) + if err != nil { + s.Log.Debugf("Flattening alert failed: %v", err) + // we skip this subitem as something did not parse correctly + continue + } } + //threads field do not exist in alert output, always global + acc.AddFields("suricata_alert", totalmap, nil) +} + +func (s *Suricata) parseStats(acc telegraf.Accumulator, result map[string]interface{}) { if _, ok := result["stats"].(map[string]interface{}); !ok { s.Log.Debug("The 'stats' sub-object does not have required structure") return @@ -193,9 +208,9 @@ func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { for k, t := range v { outmap := make(map[string]interface{}) if threadStruct, ok := t.(map[string]interface{}); ok { - err = flexFlatten(outmap, "", threadStruct, s.Delimiter) + err := flexFlatten(outmap, "", threadStruct, s.Delimiter) if err != nil { - s.Log.Debug(err) + s.Log.Debugf("Flattening alert failed: %v", err) // we skip this thread as something did not parse correctly continue } @@ -206,10 +221,11 @@ func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { s.Log.Debug("The 'threads' sub-object does not have required structure") } } else { - err = flexFlatten(totalmap, k, v, s.Delimiter) + err := flexFlatten(totalmap, k, v, s.Delimiter) if err != nil { - s.Log.Debug(err.Error()) + s.Log.Debugf("Flattening alert failed: %v", err) // we skip this subitem as something did not parse correctly + continue } } } @@ -224,6 +240,28 @@ func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { } } +func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) error { + // initial parsing + var result map[string]interface{} + err := json.Unmarshal(sjson, &result) + if err != nil { + return err + } + // check for presence of relevant stats or alert + _, ok := result["stats"] + _, ok2 := result["alert"] + if !ok && !ok2 { + s.Log.Debugf("Invalid input without 'stats' or 'alert' object: %v", result) + return fmt.Errorf("input does not contain 'stats' or 'alert' object") + } + if ok { + s.parseStats(acc, result) + } else if ok2 && s.Alerts { + s.parseAlert(acc, result) + } + return nil +} + // Gather measures and submits one full set of telemetry to Telegraf. // Not used here, submission is completely input-driven. func (s *Suricata) Gather(_ telegraf.Accumulator) error { diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index ab03de057c18c..9b620efc3e216 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -29,6 +29,7 @@ func TestSuricataLarge(t *testing.T) { s := Suricata{ Source: tmpfn, Delimiter: ".", + Alerts: true, Log: testutil.Logger{ Name: "inputs.suricata", }, @@ -40,6 +41,46 @@ func TestSuricataLarge(t *testing.T) { data, err := ioutil.ReadFile("testdata/test1.json") require.NoError(t, err) + c, err := net.Dial("unix", tmpfn) + require.NoError(t, err) + _, err = c.Write(data) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + + //test suricata alerts + data2, err := ioutil.ReadFile("testdata/test2.json") + require.NoError(t, err) + _, err = c.Write(data2) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) + + acc.Wait(1) +} + +func TestSuricataAlerts(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + require.NoError(t, err) + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Delimiter: ".", + Alerts: true, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + require.NoError(t, s.Start(&acc)) + defer s.Stop() + + data, err := ioutil.ReadFile("testdata/test3.json") + require.NoError(t, err) + c, err := net.Dial("unix", tmpfn) require.NoError(t, err) _, err = c.Write(data) @@ -49,6 +90,29 @@ func TestSuricataLarge(t *testing.T) { require.NoError(t, c.Close()) acc.Wait(1) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "suricata_alert", + map[string]string{}, + map[string]interface{}{ + "action": "allowed", + "category": "Misc activity", + "gid": float64(1), + "rev": float64(0), + "signature": "Corrupted HTTP body", + "signature_id": float64(6), + "severity": float64(3), + "source.ip": "10.0.0.5", + "target.ip": "179.60.192.3", + "source.port": float64(18715), + "target.port": float64(80), + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } func TestSuricata(t *testing.T) { diff --git a/plugins/inputs/suricata/testdata/test3.json b/plugins/inputs/suricata/testdata/test3.json new file mode 100644 index 0000000000000..3e8649e66a14a --- /dev/null +++ b/plugins/inputs/suricata/testdata/test3.json @@ -0,0 +1 @@ +{"timestamp":"2021-05-30T20:07:13.208777+0200","flow_id":1696236471136137,"in_iface":"s1-suricata","event_type":"alert","src_ip":"10.0.0.5","src_port":18715,"dest_ip":"179.60.192.3","dest_port":80,"proto":"TCP","alert":{"action":"allowed","gid":1,"source":{"ip":"10.0.0.5","port":18715},"target":{"ip":"179.60.192.3","port":80},"signature_id":6,"rev":0,"signature":"Corrupted HTTP body","severity": 3,"category":"Misc activity","severity":3},"flow":{"pkts_toserver":1,"pkts_toclient":0,"bytes_toserver":174,"bytes_toclient":0,"start":"2021-05-30T20:07:13.208777+0200"}} From ecf27ab9560cfa0be55af3cc857d836daf50bcd0 Mon Sep 17 00:00:00 2001 From: Marcus Ilgner Date: Tue, 27 Jul 2021 23:32:00 +0200 Subject: [PATCH 534/761] Fix handling bool in sql input plugin (#9540) --- internal/type_conversions.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/type_conversions.go b/internal/type_conversions.go index ed4ed374a3ffd..e2506a9068de3 100644 --- a/internal/type_conversions.go +++ b/internal/type_conversions.go @@ -191,6 +191,8 @@ func ToBool(value interface{}) (bool, error) { return v > 0, nil case float64: return v > 0, nil + case bool: + return v, nil case nil: return false, nil } From 80829b3b5afbd173adf0692be2144015c0508c91 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 27 Jul 2021 23:39:43 +0200 Subject: [PATCH 535/761] Fix attempt to connect to an empty list of servers. (#9503) --- plugins/inputs/nsq_consumer/nsq_consumer.go | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index 718a2ed3e321c..34360472ab0b9 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -2,6 +2,7 @@ package nsq_consumer import ( "context" + "fmt" "sync" "github.com/influxdata/telegraf" @@ -134,15 +135,28 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { return nil })) + // For backward compatibility + if n.Server != "" { + n.Nsqd = append(n.Nsqd, n.Server) + } + + // Check if we have anything to connect to + if len(n.Nsqlookupd) == 0 && len(n.Nsqd) == 0 { + return fmt.Errorf("either 'nsqd' or 'nsqlookupd' needs to be specified") + } + if len(n.Nsqlookupd) > 0 { err := n.consumer.ConnectToNSQLookupds(n.Nsqlookupd) if err != nil && err != nsq.ErrAlreadyConnected { return err } } - err := n.consumer.ConnectToNSQDs(append(n.Nsqd, n.Server)) - if err != nil && err != nsq.ErrAlreadyConnected { - return err + + if len(n.Nsqd) > 0 { + err := n.consumer.ConnectToNSQDs(n.Nsqd) + if err != nil && err != nsq.ErrAlreadyConnected { + return err + } } n.wg.Add(1) From 5843b27d75aad266fc829ee7a81978bf00f25d8f Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 27 Jul 2021 16:51:56 -0500 Subject: [PATCH 536/761] Update procstat to support cgroup globs & include systemd unit children (Copy of #7890) (#9488) --- plugins/inputs/procstat/README.md | 4 +- plugins/inputs/procstat/procstat.go | 187 ++++++++++++++++------- plugins/inputs/procstat/procstat_test.go | 26 +++- 3 files changed, 155 insertions(+), 62 deletions(-) diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 8d43d86eaf568..9e573be521c84 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -26,8 +26,9 @@ Processes can be selected for monitoring using one of several methods: # pattern = "nginx" ## user as argument for pgrep (ie, pgrep -u ) # user = "nginx" - ## Systemd unit name + ## Systemd unit name, supports globs when include_systemd_children is set to true # systemd_unit = "nginx.service" + # systemd_all = true ## CGroup name or path # cgroup = "systemd/system.slice/nginx.service" @@ -80,6 +81,7 @@ the `win_perf_counters` input plugin as a more mature alternative. - user (when selected) - systemd_unit (when defined) - cgroup (when defined) + - cgroup_full (when cgroup or systemd_unit is used with glob) - win_service (when defined) - fields: - child_major_faults (int) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index b3fa30a56992d..b838df651f636 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io/ioutil" + "os" "os/exec" "path/filepath" "runtime" @@ -24,19 +25,20 @@ var ( type PID int32 type Procstat struct { - PidFinder string `toml:"pid_finder"` - PidFile string `toml:"pid_file"` - Exe string - Pattern string - Prefix string - CmdLineTag bool `toml:"cmdline_tag"` - ProcessName string - User string - SystemdUnit string - CGroup string `toml:"cgroup"` - PidTag bool - WinService string `toml:"win_service"` - Mode string + PidFinder string `toml:"pid_finder"` + PidFile string `toml:"pid_file"` + Exe string + Pattern string + Prefix string + CmdLineTag bool `toml:"cmdline_tag"` + ProcessName string + User string + SystemdUnit string `toml:"systemd_unit"` + IncludeSystemdChildren bool `toml:"include_systemd_children"` + CGroup string `toml:"cgroup"` + PidTag bool + WinService string `toml:"win_service"` + Mode string solarisMode bool @@ -56,9 +58,10 @@ var sampleConfig = ` # pattern = "nginx" ## user as argument for pgrep (ie, pgrep -u ) # user = "nginx" - ## Systemd unit name + ## Systemd unit name, supports globs when include_systemd_children is set to true # systemd_unit = "nginx.service" - ## CGroup name or path + # include_systemd_children = false + ## CGroup name or path, supports globs # cgroup = "systemd/system.slice/nginx.service" ## Windows service name @@ -100,6 +103,12 @@ func (p *Procstat) Description() string { return "Monitor process cpu and memory usage" } +type PidsTags struct { + PIDS []PID + Tags map[string]string + Err error +} + func (p *Procstat) Gather(acc telegraf.Accumulator) error { if p.createPIDFinder == nil { switch p.PidFinder { @@ -116,33 +125,48 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { p.createProcess = defaultProcess } - pids, tags, err := p.findPids() + pidCount := 0 now := time.Now() - - if err != nil { - fields := map[string]interface{}{ - "pid_count": 0, - "running": 0, - "result_code": 1, + newProcs := make(map[PID]Process, len(p.procs)) + pidTags := p.findPids() + for _, pidTag := range pidTags { + pids := pidTag.PIDS + tags := pidTag.Tags + err := pidTag.Err + pidCount += len(pids) + if err != nil { + fields := map[string]interface{}{ + "pid_count": 0, + "running": 0, + "result_code": 1, + } + tags := map[string]string{ + "pid_finder": p.PidFinder, + "result": "lookup_error", + } + acc.AddFields("procstat_lookup", fields, tags, now) + return err } - tags := map[string]string{ - "pid_finder": p.PidFinder, - "result": "lookup_error", + + err = p.updateProcesses(pids, tags, p.procs, newProcs) + if err != nil { + acc.AddError(fmt.Errorf("procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", + p.Exe, p.PidFile, p.Pattern, p.User, err.Error())) } - acc.AddFields("procstat_lookup", fields, tags, now) - return err } - p.procs = p.updateProcesses(pids, tags, p.procs) + p.procs = newProcs + for _, proc := range p.procs { p.addMetric(proc, acc, now) } fields := map[string]interface{}{ - "pid_count": len(pids), + "pid_count": pidCount, "running": len(p.procs), "result_code": 0, } + tags := make(map[string]string) tags["pid_finder"] = p.PidFinder tags["result"] = "success" acc.AddFields("procstat_lookup", fields, tags, now) @@ -183,9 +207,9 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time //If cmd_line tag is true and it is not already set add cmdline as a tag if p.CmdLineTag { if _, ok := proc.Tags()["cmdline"]; !ok { - Cmdline, err := proc.Cmdline() + cmdline, err := proc.Cmdline() if err == nil { - proc.Tags()["cmdline"] = Cmdline + proc.Tags()["cmdline"] = cmdline } } } @@ -313,9 +337,7 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time } // Update monitored Processes -func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo map[PID]Process) map[PID]Process { - procs := make(map[PID]Process, len(prevInfo)) - +func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo map[PID]Process, procs map[PID]Process) error { for _, pid := range pids { info, ok := prevInfo[pid] if ok { @@ -350,8 +372,7 @@ func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo } } } - - return procs + return nil } // Create and return PIDGatherer lazily @@ -367,16 +388,34 @@ func (p *Procstat) getPIDFinder() (PIDFinder, error) { } // Get matching PIDs and their initial tags -func (p *Procstat) findPids() ([]PID, map[string]string, error) { +func (p *Procstat) findPids() []PidsTags { + var pidTags []PidsTags + + if p.SystemdUnit != "" { + groups := p.systemdUnitPIDs() + return groups + } else if p.CGroup != "" { + groups := p.cgroupPIDs() + return groups + } else { + f, err := p.getPIDFinder() + if err != nil { + pidTags = append(pidTags, PidsTags{nil, nil, err}) + return pidTags + } + pids, tags, err := p.SimpleFindPids(f) + pidTags = append(pidTags, PidsTags{pids, tags, err}) + } + + return pidTags +} + +// Get matching PIDs and their initial tags +func (p *Procstat) SimpleFindPids(f PIDFinder) ([]PID, map[string]string, error) { var pids []PID tags := make(map[string]string) var err error - f, err := p.getPIDFinder() - if err != nil { - return nil, nil, err - } - if p.PidFile != "" { pids, err = f.PidFile(p.PidFile) tags = map[string]string{"pidfile": p.PidFile} @@ -389,12 +428,6 @@ func (p *Procstat) findPids() ([]PID, map[string]string, error) { } else if p.User != "" { pids, err = f.UID(p.User) tags = map[string]string{"user": p.User} - } else if p.SystemdUnit != "" { - pids, err = p.systemdUnitPIDs() - tags = map[string]string{"systemd_unit": p.SystemdUnit} - } else if p.CGroup != "" { - pids, err = p.cgroupPIDs() - tags = map[string]string{"cgroup": p.CGroup} } else if p.WinService != "" { pids, err = p.winServicePIDs() tags = map[string]string{"win_service": p.WinService} @@ -408,8 +441,23 @@ func (p *Procstat) findPids() ([]PID, map[string]string, error) { // execCommand is so tests can mock out exec.Command usage. var execCommand = exec.Command -func (p *Procstat) systemdUnitPIDs() ([]PID, error) { +func (p *Procstat) systemdUnitPIDs() []PidsTags { + if p.IncludeSystemdChildren { + p.CGroup = fmt.Sprintf("systemd/system.slice/%s", p.SystemdUnit) + return p.cgroupPIDs() + } + + var pidTags []PidsTags + + pids, err := p.simpleSystemdUnitPIDs() + tags := map[string]string{"systemd_unit": p.SystemdUnit} + pidTags = append(pidTags, PidsTags{pids, tags, err}) + return pidTags +} + +func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) { var pids []PID + cmd := execCommand("systemctl", "show", p.SystemdUnit) out, err := cmd.Output() if err != nil { @@ -426,23 +474,48 @@ func (p *Procstat) systemdUnitPIDs() ([]PID, error) { if len(kv[1]) == 0 || bytes.Equal(kv[1], []byte("0")) { return nil, nil } - pid, err := strconv.ParseInt(string(kv[1]), 10, 32) + pid, err := strconv.Atoi(string(kv[1])) if err != nil { return nil, fmt.Errorf("invalid pid '%s'", kv[1]) } pids = append(pids, PID(pid)) } + return pids, nil } -func (p *Procstat) cgroupPIDs() ([]PID, error) { - var pids []PID +func (p *Procstat) cgroupPIDs() []PidsTags { + var pidTags []PidsTags procsPath := p.CGroup if procsPath[0] != '/' { procsPath = "/sys/fs/cgroup/" + procsPath } - procsPath = filepath.Join(procsPath, "cgroup.procs") + items, err := filepath.Glob(procsPath) + if err != nil { + pidTags = append(pidTags, PidsTags{nil, nil, fmt.Errorf("glob failed '%s'", err)}) + return pidTags + } + for _, item := range items { + pids, err := p.singleCgroupPIDs(item) + tags := map[string]string{"cgroup": p.CGroup, "cgroup_full": item} + pidTags = append(pidTags, PidsTags{pids, tags, err}) + } + + return pidTags +} + +func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { + var pids []PID + + ok, err := isDir(path) + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("not a directory %s", path) + } + procsPath := filepath.Join(path, "cgroup.procs") out, err := ioutil.ReadFile(procsPath) if err != nil { return nil, err @@ -461,6 +534,14 @@ func (p *Procstat) cgroupPIDs() ([]PID, error) { return pids, nil } +func isDir(path string) (bool, error) { + result, err := os.Stat(path) + if err != nil { + return false, err + } + return result.IsDir(), nil +} + func (p *Procstat) winServicePIDs() ([]PID, error) { var pids []PID diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index e9289493b2c58..2d8687e75013b 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -369,10 +369,15 @@ func TestGather_systemdUnitPIDs(t *testing.T) { createPIDFinder: pidFinder([]PID{}), SystemdUnit: "TestGather_systemdUnitPIDs", } - pids, tags, err := p.findPids() - require.NoError(t, err) - assert.Equal(t, []PID{11408}, pids) - assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"]) + pidsTags := p.findPids() + for _, pidsTag := range pidsTags { + pids := pidsTag.PIDS + tags := pidsTag.Tags + err := pidsTag.Err + require.NoError(t, err) + assert.Equal(t, []PID{11408}, pids) + assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"]) + } } func TestGather_cgroupPIDs(t *testing.T) { @@ -390,10 +395,15 @@ func TestGather_cgroupPIDs(t *testing.T) { createPIDFinder: pidFinder([]PID{}), CGroup: td, } - pids, tags, err := p.findPids() - require.NoError(t, err) - assert.Equal(t, []PID{1234, 5678}, pids) - assert.Equal(t, td, tags["cgroup"]) + pidsTags := p.findPids() + for _, pidsTag := range pidsTags { + pids := pidsTag.PIDS + tags := pidsTag.Tags + err := pidsTag.Err + require.NoError(t, err) + assert.Equal(t, []PID{1234, 5678}, pids) + assert.Equal(t, td, tags["cgroup"]) + } } func TestProcstatLookupMetric(t *testing.T) { From fdec5f1f3147cac65064852d14c326ae61cf4c99 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Wed, 28 Jul 2021 13:50:18 -0700 Subject: [PATCH 537/761] Prevent segfault in persistent volume claims (#9549) --- .../kube_inventory/persistentvolumeclaim.go | 8 ++-- .../persistentvolumeclaim_test.go | 46 +++++++++++++++++++ 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim.go b/plugins/inputs/kube_inventory/persistentvolumeclaim.go index 10a6abbf72e39..a5d30d6dca6f4 100644 --- a/plugins/inputs/kube_inventory/persistentvolumeclaim.go +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim.go @@ -39,9 +39,11 @@ func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc corev1.Persistent "phase": string(pvc.Status.Phase), "storageclass": *pvc.Spec.StorageClassName, } - for key, val := range pvc.Spec.Selector.MatchLabels { - if ki.selectorFilter.Match(key) { - tags["selector_"+key] = val + if pvc.Spec.Selector != nil { + for key, val := range pvc.Spec.Selector.MatchLabels { + if ki.selectorFilter.Match(key) { + tags["selector_"+key] = val + } } } diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go index 796b055f90d9c..b4e468acd71e7 100644 --- a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go @@ -88,6 +88,52 @@ func TestPersistentVolumeClaim(t *testing.T) { }, hasError: false, }, + { + name: "no label selectors", + hasError: false, + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{ + Items: []corev1.PersistentVolumeClaim{ + { + Status: corev1.PersistentVolumeClaimStatus{ + Phase: "bound", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8", + StorageClassName: toStrPtr("ebs-1"), + Selector: nil, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "pc1", + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: metav1.Time{Time: now}, + }, + }, + }, + }, + }, + }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_persistentvolumeclaim", + map[string]string{ + "pvc_name": "pc1", + "namespace": "ns1", + "storageclass": "ebs-1", + "phase": "bound", + }, + map[string]interface{}{ + "phase_type": 0, + }, + time.Unix(0, 0), + ), + }, + }, } for _, v := range tests { From 8d2b1e8dc1899685657ccb99d72327dd29d8978e Mon Sep 17 00:00:00 2001 From: Mya Date: Wed, 28 Jul 2021 14:55:23 -0600 Subject: [PATCH 538/761] Fix metrics reported as written but not actually written (#9526) --- plugins/outputs/influxdb/http_test.go | 17 +++++++++++++++++ plugins/outputs/influxdb/influxdb.go | 24 ++++++++++++++---------- 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index 39ac2b108da91..e19d8d2e580c9 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -1077,6 +1077,19 @@ func TestDBRPTagsCreateDatabaseCalledOnDatabaseNotFound(t *testing.T) { handlers := &MockHandlerChain{ handlers: []http.HandlerFunc{ + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/query": + if r.FormValue("q") != `CREATE DATABASE "telegraf"` { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusForbidden) + w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`)) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/write": @@ -1133,8 +1146,12 @@ func TestDBRPTagsCreateDatabaseCalledOnDatabaseNotFound(t *testing.T) { err = output.Connect() require.NoError(t, err) + + // this write fails, but we're expecting it to drop the metrics and not retry, so no error. err = output.Write(metrics) require.NoError(t, err) + + // expects write to succeed err = output.Write(metrics) require.NoError(t, err) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 36b38a9c906c5..1ea39a5e56505 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -1,3 +1,4 @@ +//nolint package influxdb import ( @@ -224,17 +225,20 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { switch apiError := err.(type) { case *DatabaseNotFoundError: - if !i.SkipDatabaseCreation { - allErrorsAreDatabaseNotFoundErrors = false - err := client.CreateDatabase(ctx, apiError.Database) - if err != nil { - i.Log.Errorf("When writing to [%s]: database %q not found and failed to recreate", - client.URL(), apiError.Database) - } else { - // try another client, if all clients fail with this error, do not return error - continue - } + if i.SkipDatabaseCreation { + continue } + // retry control + // error so the write is retried + err := client.CreateDatabase(ctx, apiError.Database) + if err != nil { + i.Log.Errorf("When writing to [%s]: database %q not found and failed to recreate", + client.URL(), apiError.Database) + } else { + return errors.New("database created; retry write") + } + default: + allErrorsAreDatabaseNotFoundErrors = false } } From 126825d94463ea678fdce345eb38ea4e3354ad24 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 28 Jul 2021 17:22:37 -0500 Subject: [PATCH 539/761] Fix procstat plugin README to match sample config (#9553) --- plugins/inputs/procstat/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 9e573be521c84..f0b9858601ade 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -28,7 +28,7 @@ Processes can be selected for monitoring using one of several methods: # user = "nginx" ## Systemd unit name, supports globs when include_systemd_children is set to true # systemd_unit = "nginx.service" - # systemd_all = true + # include_systemd_children = false ## CGroup name or path # cgroup = "systemd/system.slice/nginx.service" From ecafff3d4a5c9ba8917947919f264d06429be205 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Wed, 28 Jul 2021 17:10:26 -0600 Subject: [PATCH 540/761] Update changelog (cherry picked from commit 0e5741eda7c819b3089aefd2fda6662ed499a837) --- CHANGELOG.md | 33 +++++++++++++++++++++++++++++++++ etc/telegraf.conf | 7 ++++++- 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6dcf1617400e1..f7ecd8d59ed24 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,36 @@ +## v1.19.2 [2021-07-28] + +#### Release Notes + + - [#9542](https://github.com/influxdata/telegraf/pull/9542) Update Go to v1.16.6 + +#### Bugfixes + + - [#9363](https://github.com/influxdata/telegraf/pull/9363) `outputs.dynatrace` Update dynatrace output to allow optional default dimensions + - [#9526](https://github.com/influxdata/telegraf/pull/9526) `outputs.influxdb` Fix metrics reported as written but not actually written + - [#9549](https://github.com/influxdata/telegraf/pull/9549) `inputs.kube_inventory` Prevent segfault in persistent volume claims + - [#9503](https://github.com/influxdata/telegraf/pull/9503) `inputs.nsq_consumer` Fix connection error when not using server setting + - [#9540](https://github.com/influxdata/telegraf/pull/9540) `inputs.sql` Fix handling bool column + - [#9387](https://github.com/influxdata/telegraf/pull/9387) Linter fixes for plugins/inputs/[fg]* + - [#9438](https://github.com/influxdata/telegraf/pull/9438) `inputs.kubernetes` Attach the pod labels to kubernetes_pod_volume and kubernetes_pod_network metrics + - [#9519](https://github.com/influxdata/telegraf/pull/9519) `processors.ifname` Fix SNMP empty metric name + - [#8587](https://github.com/influxdata/telegraf/pull/8587) `inputs.sqlserver` Add tempdb troubleshooting stats and missing V2 query metrics + - [#9323](https://github.com/influxdata/telegraf/pull/9323) `inputs.x509_cert` Prevent x509_cert from hanging on UDP connection + - [#9504](https://github.com/influxdata/telegraf/pull/9504) `parsers.json_v2` Simplify how nesting is handled + - [#9493](https://github.com/influxdata/telegraf/pull/9493) `inputs.mongodb` Switch to official mongo-go-driver module to fix SSL auth failure + - [#9491](https://github.com/influxdata/telegraf/pull/9491) `outputs.dynatrace` Fix panic caused by uninitialized loggedMetrics map + - [#9497](https://github.com/influxdata/telegraf/pull/9497) `inputs.prometheus` Fix prometheus cadvisor authentication + - [#9520](https://github.com/influxdata/telegraf/pull/9520) `parsers.json_v2` Add support for large uint64 and int64 numbers + - [#9447](https://github.com/influxdata/telegraf/pull/9447) `inputs.statsd` Fix regression that didn't allow integer percentiles + - [#9466](https://github.com/influxdata/telegraf/pull/9466) `inputs.sqlserver` Provide detailed error message in telegraf log + - [#9399](https://github.com/influxdata/telegraf/pull/9399) Update dynatrace-metric-utils-go module to v0.2.0 + - [#8108](https://github.com/influxdata/telegraf/pull/8108) `inputs.cgroup` Allow multiple keys when parsing cgroups + - [#9479](https://github.com/influxdata/telegraf/pull/9479) `parsers.json_v2` Fix json_v2 parser to handle nested objects in arrays properly + +#### Features + + - [#9485](https://github.com/influxdata/telegraf/pull/9485) Add option to automatically reload settings when config file is modified + ## v1.19.1 [2021-07-07] #### Bugfixes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 6d11fa692706d..6c3c0e98b36bb 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -562,6 +562,10 @@ # # ## If you want to convert values represented as gauges to counters, add the metric names here # additional_counters = [ ] +# +# ## Optional dimensions to be added to every metric +# # [outputs.dynatrace.default_dimensions] +# # default_key = "default value" # # Configuration for Elasticsearch to send metrics to. @@ -6317,7 +6321,8 @@ # [[inputs.x509_cert]] # ## List certificate sources # ## Prefix your entry with 'file://' if you intend to use relative paths -# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443", +# sources = ["tcp://example.org:443", "https://influxdata.com:443", +# "udp://127.0.0.1:4433", "/etc/ssl/certs/ssl-cert-snakeoil.pem", # "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] # # ## Timeout for SSL connection From 9fcd5a5b54636a1fcadda7e476741aa0a6a2c5e8 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Fri, 30 Jul 2021 15:14:23 -0400 Subject: [PATCH 541/761] fix test so it hits a fake service (#9564) --- plugins/inputs/phpfpm/phpfpm_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index c3a3f29f570f5..50d8d604efb5b 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -274,7 +274,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { //When not passing server config, we default to localhost //We just want to make sure we did request stat from localhost func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { - r := &phpfpm{} + r := &phpfpm{Urls: []string{"http://bad.localhost:62001/status"}} require.NoError(t, r.Init()) @@ -282,7 +282,7 @@ func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Contains(t, err.Error(), "127.0.0.1/status") + assert.Contains(t, err.Error(), "/status") } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t *testing.T) { From 3633853235151ece94fc52ae463409b55812b10b Mon Sep 17 00:00:00 2001 From: R290 <46033588+R290@users.noreply.github.com> Date: Fri, 30 Jul 2021 22:26:47 +0200 Subject: [PATCH 542/761] Do not skip good quality nodes after a bad quality node is encountered (#9550) --- plugins/inputs/opcua/opcua_client.go | 28 +++++++++++++---------- plugins/inputs/opcua/opcua_client_test.go | 10 ++++++-- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index ac7becbe09e4d..7654887387ef2 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -31,6 +31,7 @@ type OpcUA struct { RequestTimeout config.Duration `toml:"request_timeout"` RootNodes []NodeSettings `toml:"nodes"` Groups []GroupSettings `toml:"group"` + Log telegraf.Logger `toml:"-"` nodes []Node nodeData []OPCData @@ -470,15 +471,16 @@ func (o *OpcUA) getData() error { } o.ReadSuccess.Incr(1) for i, d := range resp.Results { + o.nodeData[i].Quality = d.Status if d.Status != ua.StatusOK { - return fmt.Errorf("status not OK: %v", d.Status) + o.Log.Errorf("status not OK for node %v: %v", o.nodes[i].tag.FieldName, d.Status) + continue } o.nodeData[i].TagName = o.nodes[i].tag.FieldName if d.Value != nil { o.nodeData[i].Value = d.Value.Value() o.nodeData[i].DataType = d.Value.Type() } - o.nodeData[i].Quality = d.Status o.nodeData[i].TimeStamp = d.ServerTimestamp.String() o.nodeData[i].Time = d.SourceTimestamp.String() } @@ -532,17 +534,19 @@ func (o *OpcUA) Gather(acc telegraf.Accumulator) error { } for i, n := range o.nodes { - fields := make(map[string]interface{}) - tags := map[string]string{ - "id": n.idStr, - } - for k, v := range n.metricTags { - tags[k] = v - } + if o.nodeData[i].Quality == ua.StatusOK { + fields := make(map[string]interface{}) + tags := map[string]string{ + "id": n.idStr, + } + for k, v := range n.metricTags { + tags[k] = v + } - fields[o.nodeData[i].TagName] = o.nodeData[i].Value - fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.nodeData[i].Quality)) - acc.AddFields(n.metricName, fields, tags) + fields[o.nodeData[i].TagName] = o.nodeData[i].Value + fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.nodeData[i].Quality)) + acc.AddFields(n.metricName, fields, tags) + } } return nil } diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go index ffa8521dd05a8..b509d2eaf67a3 100644 --- a/plugins/inputs/opcua/opcua_client_test.go +++ b/plugins/inputs/opcua/opcua_client_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -16,7 +17,7 @@ type OPCTags struct { Namespace string IdentifierType string Identifier string - Want string + Want interface{} } func TestClient1Integration(t *testing.T) { @@ -28,6 +29,8 @@ func TestClient1Integration(t *testing.T) { {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, {"ProductUri", "0", "i", "2262", "http://open62541.org"}, {"ManufacturerName", "0", "i", "2263", "open62541"}, + {"badnode", "1", "i", "1337", nil}, + {"goodnode", "1", "s", "the.answer", "42"}, } var o OpcUA @@ -40,6 +43,8 @@ func TestClient1Integration(t *testing.T) { o.RequestTimeout = config.Duration(1 * time.Second) o.SecurityPolicy = "None" o.SecurityMode = "None" + o.Log = testutil.Logger{} + for _, tags := range testopctags { o.RootNodes = append(o.RootNodes, MapOPCTag(tags)) } @@ -60,7 +65,7 @@ func TestClient1Integration(t *testing.T) { if compare != testopctags[i].Want { t.Errorf("Tag %s: Values %v for type %s does not match record", o.nodes[i].tag.FieldName, value.Interface(), types) } - } else { + } else if testopctags[i].Want != nil { t.Errorf("Tag: %s has value: %v", o.nodes[i].tag.FieldName, v.Value) } } @@ -250,6 +255,7 @@ func TestValidateOPCTags(t *testing.T) { t.Run(tt.name, func(t *testing.T) { o := OpcUA{ nodes: tt.nodes, + Log: testutil.Logger{}, } require.Equal(t, tt.err, o.validateOPCTags()) }) From c9bbb3241c07b5e930e895de86948da372823a83 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Fri, 30 Jul 2021 15:49:49 -0500 Subject: [PATCH 543/761] Update vmware/govmomi to v0.26.0 (#9552) --- go.mod | 2 +- go.sum | 7 +++++-- plugins/inputs/vsphere/README.md | 5 ++++- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index f0f36e2df8717..18b57dfef4930 100644 --- a/go.mod +++ b/go.mod @@ -130,7 +130,7 @@ require ( github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 github.com/vjeantet/grok v1.0.1 - github.com/vmware/govmomi v0.19.0 + github.com/vmware/govmomi v0.26.0 github.com/wavefronthq/wavefront-sdk-go v0.9.7 github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect diff --git a/go.sum b/go.sum index 20b7759feef97..26a15a78e006f 100644 --- a/go.sum +++ b/go.sum @@ -166,6 +166,7 @@ github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrU github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 h1:5sXbqlSomvdjlRbWyNqkPsJ3Fg+tQZCbgeX1VGljbQY= github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/a8m/tree v0.0.0-20210115125333-10a5fd5b637d/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg= github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE= github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= @@ -432,6 +433,7 @@ github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhr github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= @@ -1500,8 +1502,9 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7Zo github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= -github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY= -github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/vmware/govmomi v0.26.0 h1:JMZR5c7MHH3nCEAVYS3WyRIA35W3+b3tLwAqxVzq1Rw= +github.com/vmware/govmomi v0.26.0/go.mod h1:daTuJEcQosNMXYJOeku0qdBJP9SOLLWB3Mqz8THtv6o= +github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/wavefronthq/wavefront-sdk-go v0.9.7 h1:SrtABcXXeKCW5SerQYsnCzHo15GeggjZmL+DjtTy6CI= github.com/wavefronthq/wavefront-sdk-go v0.9.7/go.mod h1:JTGsu+KKgxx+GitC65VVdftN2iep1nVpQi/8EGR6v4Y= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index d43f559b16eb8..7d73ea7e35855 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -8,7 +8,10 @@ The VMware vSphere plugin uses the vSphere API to gather metrics from multiple v * Datastores ## Supported versions of vSphere -This plugin supports vSphere version 5.5 through 6.7. + +This plugin supports vSphere version 6.5, 6.7 and 7.0. It may work with versions 5.1, 5.5 and 6.0, but neither are officially supported. + +Compatibility information was found [here](https://github.com/vmware/govmomi/tree/v0.26.0#compatibility) ## Configuration From 83984c57fdc7add0c1812b2a2e219fbaa3c78ecc Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Fri, 30 Jul 2021 16:54:36 -0400 Subject: [PATCH 544/761] Upgrade hashicorp/consul/api to 1.9.1 (#9565) --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 18b57dfef4930..1e9f93bf7ebf8 100644 --- a/go.mod +++ b/go.mod @@ -74,7 +74,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 - github.com/hashicorp/consul/api v1.8.1 + github.com/hashicorp/consul/api v1.9.1 github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/influxdata/go-syslog/v3 v3.0.0 github.com/influxdata/influxdb-observability/common v0.0.0-20210429174543-86ae73cafd31 diff --git a/go.sum b/go.sum index 26a15a78e006f..ab296be021048 100644 --- a/go.sum +++ b/go.sum @@ -808,12 +808,12 @@ github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvG github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.6.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= -github.com/hashicorp/consul/api v1.8.1 h1:BOEQaMWoGMhmQ29fC26bi0qb7/rId9JzZP2V0Xmx7m8= -github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= +github.com/hashicorp/consul/api v1.9.1 h1:SngrdG2L62qqLsUz85qcPhFZ78rPf8tcD5qjMgs6MME= +github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= -github.com/hashicorp/consul/sdk v0.7.0 h1:H6R9d008jDcHPQPAqPNuydAshJ4v5/8URdFnUvK/+sc= -github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= From 1872037eb759d5ca82506592e53bd0d2c3df28d8 Mon Sep 17 00:00:00 2001 From: Anatoly Ivanov Date: Mon, 2 Aug 2021 16:32:32 -0700 Subject: [PATCH 545/761] docs: Adding links to net_irtt and dht_sensor external plugins (#9569) --- EXTERNAL_PLUGINS.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index 225497e84ef53..fc71044d6172d 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -20,6 +20,8 @@ Pull requests welcome. - [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - Gather statistics from 389ds and from LDAP trees. - [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - Gather information from your X509 CRL files - [s7comm](https://github.com/nicolasme/s7comm) - Gather information from Siemens PLC +- [net_irtt](https://github.com/iAnatoly/telegraf-input-net_irtt) - Gather information from IRTT network test +- [dht_sensor](https://github.com/iAnatoly/telegraf-input-dht_sensor) - Gather temperature and humidity from DHTXX sensors ## Outputs - [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. From 4928d5b30e3c6bef4007bab11fdafb919fc6e5c1 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 3 Aug 2021 16:03:33 -0500 Subject: [PATCH 546/761] docs: Add logo (#9574) --- README.md | 7 ++++++- TelegrafTiger.png | Bin 0 -> 7748 bytes 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 TelegrafTiger.png diff --git a/README.md b/README.md index d0d67cb1932dd..ebd3b9a663c95 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,9 @@ -# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) [![Total alerts](https://img.shields.io/lgtm/alerts/g/influxdata/telegraf.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/influxdata/telegraf/alerts/) + +# Telegraf + +![tiger](TelegrafTiger.png "tiger") + +[![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) [![Total alerts](https://img.shields.io/lgtm/alerts/g/influxdata/telegraf.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/influxdata/telegraf/alerts/) [![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack) Telegraf is an agent for collecting, processing, aggregating, and writing metrics. diff --git a/TelegrafTiger.png b/TelegrafTiger.png new file mode 100644 index 0000000000000000000000000000000000000000..f6765a8d77b42c8e246f64e7883d28491e2bcf10 GIT binary patch literal 7748 zcmai3MN}J((?p5}ha$zHc#0M&T3m}ug1bY3;>EQ<(L%7|#VrslxVyVkT!R+3?|1(n zzW4Uc?#y8?bDG^Kbyay>?048mNJzMf3No5VNXWqd<`GQP|8P6Sc<8_O#zyjsBob0h zJmAR;?LUtEO;cV9scM|+@IRuguA(h_VF>!)l6O)0U-AD{YMho>|65DpuBoc^LKeG= zfr5v0b~~exT|K-;%iZB5?8l;Q>N$|fg@i=yrYIw+?Y+Eec$o30gh=+ig8J&hZQ%6C z7J7)LKZjFs`U{VVRfau*>sBxNLoerY%#n496ejST+dVIEO;qh_RuJp{34<*9iRMr? zI>5}8Sx-MjsQ9%4niQP3VN`Ljxj~}KAoOsZ^5vWhJH|Ys9a{UE;Vg^g@Dg1?z&;SD z2J*O}2GesMbT9*X&$+Mwg)<$@6-81e27Q4_^#q3)@IAJ(OyLVrVf70!FuiyVPxkxk z#hn>Z91zY}rXNx_$HGn$JE?hEMh49xzaL}T~Rlxrx>tH~CYTn*V^{z{~K%zIJ? zjIdjMqnmnAUd|`TpgpWu$F^O?Z33*2H3#$T)cfLM=C+Wq8@xORp6BpiGJA-VEGdSj z__is7!u?oR_wRw%9mx+J681Vc{%d;l4{L%W?4u@SrDvm$wQ!UC0&N`cXM8>6hsg00+!c{b|rHH?zwXq=T73x+HLQ5l4hY2+jl&!X*sy zKb-T`M5Jtv@BFQQ4bf=?3_isoiXpg76f3Ki&LR)Zc6Jhye&hF}G=!dj& zaJ)1}0YCNjUgty@3wx(RKdQCM->yC`CdmQ+M7^CRjl~aR#ByReYI6fps~vxf5DiR+ zl1wK5`+3?x7@aFN2{-Jm2U8x#yj^G^kRpy0|4n3oJwL6-gTV%5b^o}ZUyfKUk9kk^ z5fJ+qTx=^u5oynow>kQP>#v@fHusen60N&RM3e5+_l`3da+5i=S=s}r;+vWQaA>|+ zYexHR^clcKJTH%2dK_y$xt33;b=doz`GYGM)Ab;JTF>0=TN~fW{GIP1K>{^ zjECHyzee}zvLJW$QmYjdpnLoyTducO-ylblfICYHlfNEG8>aGOPaAKprOlbwd$*HB zFGoI&1aVezVx@1-MrOz<*^$?h?(t8h(sLDsHNyuk8!73{gXaumAy^24oW<*~vM5>b z3~}Db55Ks!!94t9X;yFA^&(IVRNJ=Zqztv$_J^MnU9$)@&0Uzj;Yw+X>h^s@vWP0B zccC@Ks}1&{Dxa?C+)#Sy>p@A9{K#VI*1Lj zQ>c9#+wm}t@9X#I&BLcI+4+(YXj~4~NXN=CErDO}MF zjgG`(qgd$fA`CA>*@txE`JXocQVErT=B1u;Yc0F)g|k-oQ|eA0ejG9QY3dFogbUm7 z@x=f83C8on@9X~D-=AIiuTK!*4^3m6)CfWDecwRilpU;Z=ur*BR#UEii{1Ac;LalE z%wadvR~LAa|7a647LP-hE(2EN?)-GGXnq{bPud7e+|B*QWvdn5E$Owv5xWAUHnHKz z8}}PGJsdHz@-DIFp1P~WS3Nj4_fdr3PQ{^@7SN~D7K54nhu zHCfzbe-h(~B$Oo)mRu%=o2A*kZVlYu*Dl!}M8{*t{QPbTBA00675%v6M zAoqT7#QlA`5csK>}n1oyQPc4Ov;-d1jf*;~F^dymUkUAH76OvWE zG1chi@%w+G?`#)zJLrVlydZ=p8ikbf`*EzgOE({p&0;0eZAk}HH7@0>756TR4?Ujc zDeZW&cs^d0#{Tm0%O?@2p|vOdt?$$pyB=jz#?dxJKz_%SF3-}IUD{mC)d2wG)twW0 zU9IBgtH1}S^CwCv%Fm0Rl7jlJJ%l2{*#4%qWnAao;s06=JOO0Qmzil36NJ52lpv{U zttBj#cR%7Fm?z?TZd!y-2Gw_1;#yqW^qre9;F+&Tsq+aKB`$->cQ(iK{`{i7=FB3! zlfU(#A95fSpz~`1FXnlto8k>^7CG z=-6>B)y9kJ$VEPsP^epqp%w9`t6XAtYKoKRU!H#aM^iFEA!_&qLXssjdELjP4iAfl zh?N-k*r^)tBdbB4vcDb86=y`+wxLmaJwu1k8NS=VbjO&oQc+D;?r|(M0pTE_`?7ZCQFDz;Y_r|MTrTn^EIKGn!F?tIP~1 zUZ1${cLBd3Srg4sYZ|QJ!Dw!}4`ZMqW7-R9hhT@yw3ry@Ag2RPX7&di{fuY24`He^ zYb_t56F%Ier!e8m#ESgsvR$_CHl8Msx>gTpqPEFuvO{fB$|dVg|>Ui}DpS3-Yn+?XAF}57UH2id0h1gCl$NEUg-T|AkN>s{YSRSJfd*$NY!gWpK_j2 zGHucBjhKfq@^wz@sPJ*F>`C3+N%P2P6r(CuwZARz%u+#l0*248-J)I`BELpy30;Ga zW1z7_Fr~E%r`OWn*JV^e@&XPlotlzq`eMG_8dAez!4vi!$@S53VLIgRIGP^%EB2wM zy-_dh8$*$Q*#Ml5rds;5oC2!MFE-oQuZ>VDkych+C!>$CuL_Kd`0Vz@VR79K^w~Ws zOQjUfb}_llBM!km_8|Uwgo@9p%+~?D7^uW**e`Ga!k|OhQEcZuKeBP{%e`x%@!?1fI~-0OhGEaNXV#)FR#j60+6oD)3D#)?3S}v299}uT7lmw z7SuOqR>D&0YbSqmS=Rn5*Z7;A62#_4nK3Xa*FJ@~*sUNt;d}S7{tEmnGN>@shKYoGa}8xhxrkgW8+@A?qCP1>7@krDN2LFt*AiuP)uaDk*P~&_z7d;E6G$~fd1YP7Q;%4R8dUe>r(PF z{GjacB%ZDe4=x7rmoBPu&6>!rpD}t5tIqhT)d)5`Ect`B(6FNa6?iS!bMkGuh#3nQ z`Plh1zkC?+L&s4fnh4M4!kyYO@vF$hnU9?7B|_O=rGBKJ<*QxCWjbC{RtJYuE3x=` z=UG+9kwKY{h@4(ztK>-$CvtCVvB89i)I@RD6w^Er64R{`iPQ)O=-;lj9NBzcn=73C zPRW%TC;c6h{-FgarY*uNfr7RjlU=Vwr=D8GjL2=FZkD`4tN-_=Zmd_m^mo42lk75m z$Z|!O>GOyBPg!UJ<(@&ZZ8}-PFc*IK5yctHY$`;+7+Vb*Ao6WBC1hk-A2)0SV*H*2 zsJYxi^UeBGh(!W)bj_4a0KfwWX`g$S(DIdcm#Y3si^S;P*IJRw^o>+$!Z#9v)$MT? z+QX>s*&sURk{Q^M8jJIp!m z{jn*mu;;Aj9-aE?C1A3yz5IL7dU~PnA|(iUG$h?*qvApO z7+j&WKFSX_M{oAQa#6KSA^?nBgZL@$7HgL5; zU5?fpD zo67ADCH7k1&X0wh2Nc7;6_$+<7{+329iL zq9VaK+XHpaW(`S5W-!Y&JXOS{k@g(Yh1HJ<=XWsujf?7xT`%%gRLrd5M}yBVv?vBL zHo7;>Ha0QydMEK>O;>*9?A7fSOP7={e-SLkEE0Cok!lE@R}MOK^H#oPGd zBR5C2)c{3+qfe@+9Dvhfeh#}y1jbvVktavADv%#^_cC(K3&tsx*mxVJE0#KCx{3Mg z2jjsggZ5sFk8h;T4<1yl>hx9Gi7)g%NP0l&iag>&JseV6Xm}?>e~aiU_Kzz8AGxhA zhF^BJA>$QUaujR>w{+~iHzW)rgGp_-S>3Lj=@cHQ=aITC)cWlB;NoRWGgp`;QYQhJr2a#)dKfsOf&`5+jAhM9#(``xRXCzSAG2G+kV+zxc2nk7ns6R-Yb^N z9_|OSQzTqb4|mKJjUqUfIT6600xa$;YZ^M z0tv->OgCGknEvkBwO&IxCE0s2jgWJj@l|T-`8dwm-dP}_0D;Yij@WZXSZYve#-f&x zFo*hVqZ4Bb)AkLXJSsptA0I4#R;*=Asz&kS-e)~KI-YKW$dCWLgnueeHPlV)KPD3r z>C;(a0E&+Vt0*0Hi9ojhQfe*orwhdB*7FJ#CynyPWPj9MjKz?Z^v!9k8OC9e`T?vH z)w-nc;x!c{twU`!?rF(ahCgX9+ZO05wJnWZlTU(*;g8UMgGJ!1t8vzmv@yE%uD2Z# z@oPv48mER5?%Y`EWxm2UkH<;t3+j%Ntin659tz{7%k-e=R%z+3h6<&>HK<%PsMv z#taW;9X@PT%1b6;ke7pQ-?2j0Us)d3hXcY|O6o=#ZQ0Z@tSWDC zzw@Py0x*~K;5va(85PD{~NJ?v;xQ;O9{uF}~t;!hmOS#)wD5k%q>X&sc) z2_G9?_})~U&E>74RPg2rrG#c`ACHz5q@l%I404)6nB1RBJsM?`O9GlT1Cjd~QvzL$ zk=#Ey!T@Qq6>VUN+EW1xeXDtM1uE6OmXNNa?b}Hg>&@=5YS3*TY1zs3plQ+7kLB&i z^^T$zBA`^+^@#1;8w*Dp1w}1d&{P+Ku27Q*Ch)rXtkc5Z%IAP%h}Cpfsi>}+6F$rR zs(=Do0QUGdi9ILn+An*!g%RM6O66stZOqB=#2Dg|ejR%Xi6Fl(GxRAo64R+dKv}CG zo@X|ngu5BxG=$xzgrgl^Nn_3Cf}+2VX~sLObZWV~FUG4+o~n$CdGCjRRT*4c!cxKC zhMk2h0~dk|fu0NxN+<{+lzp$a`x3VuDVQ1U6Q$GdoM8m7nw`<(5wgK(M1nJ{)zu!- z8n6@wi$YsQniW@hofTQ(Lg^shdkbjpb9do3#$1&U!|&2CkPoLkoP%AHeIjLTqtG)6BEyo;ahmt2;gp5JHxYY$;)Tjuo_L-dwN0DhkgLmB)#Xu8gkr*_FQ^7#)U3> zU%+rt{R0(%gD5PR7ff|N>jO(zQ^?S34H&G?MgDeO&HT%cpt36o=ZP|*O*E~99vZLC z24pNa`>WYt`FrGC{TF;E6zZYXtX^nF<`_|pt*)k$j7eKZ*#nN*fbJlEoPp+%F^#x= zp%Uul3w`S@nt!1~kcIl=h&cZX^GF3Kovu`%XxwckSr|9xph9Nomw1-@uU$#m(mwnk z7nkqP9it($>nc^~4)EtrpQalkyB^^eANdfbwhj76lNyxU{l^F3an!Th{-!Xrd0Qxe z2jsHc;XOZ%FVK_M{2M$iTD2P(O={Ul{6Hwr2cQ{^mm@QQQV6WaeKN7oRk62}s7o$f zq2#)b`Mtgw+TtM%whGpcT-Liz>qO-H}GL6l-o}Kqv4J zxrInEgJ6zXn}x>|46m^CJ({a0*~R~}LpB;xTRAW5JsZAKbreWSXu1?YH+{FmIUTe= z`TUFmB-ojcL#TUth02vTSh3Q&Z~I*VrT?h7Gk-U3?44D82kQkd^N(XpBYKp?tPIp6 zHumGnZ%#TuVtXZ7L1p8A(w(qOgAcx44=fm){&j2#o@L&Mx@MQvoJA%*T@CbdF%GJK zLI);L2t}~=Y&D~Tboty+M#g6Zeu8Ch&mpLvNAtX2`a7k08pZIqJdVADsKEHNq$P~YSW*n|=kHUqv#JcR)Aby|w?Dw;u|h7KuRp;k6&l>j>+)(4g_$G4QIy?? zZFkmGv-)O4rlZfcrS4x|NBJ&E3|Qxq8g@)EH=sefMTQ*bDGGe*KjWhbuz?T|f11wd zMs~dH*a%`;Xq(FlzS=f}`Hi|^Ewu>%()g2&ttEqsvIE~h9C0o1bC|e~&xozjLeZRc zf4jUeLS+;N%YKV^O6QEBY^G#_gGbautX^Fcn$?c!6HrJk*G~?iHiW~#8p?nuaA7={ zE}F96WCc8#J)Lm@e8b3T9FTHDJ8SBBoJ0V6cEG zo|k0hQynUDIs-ltlULh6dQK)Yxo$bU1DP+>+;_=DXY5kx#>iIwIhnpL$t#3diG*=F zc~KD5gtyYGWGwozVgYThO#!vYCJ&l%ef6NkTv7n9`d4w@prPefH}J6edhv)_2zY2T zgfoJ)0IiP)UhQq*DeYdKQ^-2nWFs{jUMbe7fr6$kAAkiZ{;{v!P50e$-AK~di-|pv z$+W0I#IZ^6)X*bNpju~A?NJxucjjbI2ui|FPy^+;#lt zS8#uLTjO+!X%|eOK_|plupgBIoBxoV$wJgsnPEWYpfX@5a9PA@NZ22t3Z_UgE=s|K z?1V|uqQ?7v?fc;HL1fMT%Rp8y@sRV>=MW1*DTxPv_9`m1nSTZMk%hz@6cw6fe%u=a z#zRsgX1`;LBon?7wekcNOnj?M9!a#wjq8+~cl~Ocg#p$9w6T@E5CNBxzey_}-f+}B z=&iylIWSQ)gu-~xHAJ@`CG57=UDbJhwb!n&x#HR}ADgJnhlq{y-*&r=!vpMQUGIxQ z!YUHSPOBx)7`@xr(VIT(ma)nO9N+GmL2=8~%v-`@OC)?Popb%U=cA7(pBXRSrZ6|H z)xA6SWXXl;CNGdVZa47rqPrSxt!GY=FUgk(4+8ev#eD9tT?|h(?XS-%~4vJoME`i$avlD7Qj@W2Z{yUr2F-!cql(35l=51sA4k>OD;qfJDC1= r)W|CfnsBizzSUymj_diCh(O+T^__llv_=2#MT?{;t143^Wg7B-hxs2Q literal 0 HcmV?d00001 From 8bf97809a8f304b8db340626b3af7ee39edf410f Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 3 Aug 2021 16:05:22 -0500 Subject: [PATCH 547/761] docs: information on new conventional commit format (#9573) --- .github/PULL_REQUEST_TEMPLATE.md | 1 + CONTRIBUTING.md | 11 +++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 822d809c46255..1c717ddbb1a15 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -5,6 +5,7 @@ show completion. --> - [ ] Updated associated README.md. - [ ] Wrote appropriate unit tests. +- [ ] Pull request title or commits are in [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) (e.g. feat: or fix:) - - Nom - - - + Nom String + - - Code - String - - - Cur - String - + + + + Code + String + + + + Cur + String + @@ -40,8 +40,8 @@ LIFETIME(MIN 300 MAX 600); 3306 - wrong - wrong + default + 127.0.0.1 1 @@ -56,8 +56,7 @@ LIFETIME(MIN 300 MAX 600); - - - + + 300 From 83bd10b4db19c282eedfbb492ac74b0ed2e94374 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Tue, 10 Aug 2021 14:47:23 -0700 Subject: [PATCH 561/761] feat: Pull metrics from multiple AWS CloudWatch namespaces (#9386) --- plugins/inputs/cloudwatch/README.md | 6 +- plugins/inputs/cloudwatch/cloudwatch.go | 179 ++++++++++--------- plugins/inputs/cloudwatch/cloudwatch_test.go | 53 ++++-- 3 files changed, 144 insertions(+), 94 deletions(-) diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index e09acc518de57..97592f5197ab7 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -75,8 +75,10 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## Configure the TTL for the internal cache of metrics. # cache_ttl = "1h" - ## Metric Statistic Namespace (required) - namespace = "AWS/ELB" + ## Metric Statistic Namespaces (required) + namespaces = ["AWS/ELB"] + # A single metric statistic namespace that will be appended to namespaces on startup + # namespace = "AWS/ELB" ## Maximum requests per second. Note that the global default AWS rate limit is ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index fff2da1d3a8e0..1cd7958301611 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -34,6 +34,7 @@ type CloudWatch struct { Period config.Duration `toml:"period"` Delay config.Duration `toml:"delay"` Namespace string `toml:"namespace"` + Namespaces []string `toml:"namespaces"` Metrics []*Metric `toml:"metrics"` CacheTTL config.Duration `toml:"cache_ttl"` RateLimit int `toml:"ratelimit"` @@ -71,7 +72,7 @@ type metricCache struct { ttl time.Duration built time.Time metrics []filteredMetric - queries []*cwClient.MetricDataQuery + queries map[string][]*cwClient.MetricDataQuery } type cloudwatchClient interface { @@ -139,8 +140,10 @@ func (c *CloudWatch) SampleConfig() string { ## Configure the TTL for the internal cache of metrics. # cache_ttl = "1h" - ## Metric Statistic Namespace (required) - namespace = "AWS/ELB" + ## Metric Statistic Namespaces (required) + namespaces = ["AWS/ELB"] + # A single metric statistic namespace that will be appended to namespaces on startup + # namespace = "AWS/ELB" ## Maximum requests per second. Note that the global default AWS rate limit is ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a @@ -181,25 +184,28 @@ func (c *CloudWatch) Description() string { return "Pull Metric Statistics from Amazon CloudWatch" } -// Gather takes in an accumulator and adds the metrics that the Input -// gathers. This is called every "interval". -func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { - if c.statFilter == nil { - var err error - // Set config level filter (won't change throughout life of plugin). - c.statFilter, err = filter.NewIncludeExcludeFilter(c.StatisticInclude, c.StatisticExclude) - if err != nil { - return err - } +func (c *CloudWatch) Init() error { + if len(c.Namespace) != 0 { + c.Namespaces = append(c.Namespaces, c.Namespace) } - if c.client == nil { - err := c.initializeCloudWatch() - if err != nil { - return err - } + err := c.initializeCloudWatch() + if err != nil { + return err } + // Set config level filter (won't change throughout life of plugin). + c.statFilter, err = filter.NewIncludeExcludeFilter(c.StatisticInclude, c.StatisticExclude) + if err != nil { + return err + } + + return nil +} + +// Gather takes in an accumulator and adds the metrics that the Input +// gathers. This is called every "interval". +func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { filteredMetrics, err := getFilteredMetrics(c) if err != nil { return err @@ -221,32 +227,34 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { wg := sync.WaitGroup{} rLock := sync.Mutex{} - results := []*cwClient.MetricDataResult{} + results := map[string][]*cwClient.MetricDataResult{} - // 500 is the maximum number of metric data queries a `GetMetricData` request can contain. - batchSize := 500 - var batches [][]*cwClient.MetricDataQuery + for namespace, namespacedQueries := range queries { + // 500 is the maximum number of metric data queries a `GetMetricData` request can contain. + batchSize := 500 + var batches [][]*cwClient.MetricDataQuery - for batchSize < len(queries) { - queries, batches = queries[batchSize:], append(batches, queries[0:batchSize:batchSize]) - } - batches = append(batches, queries) - - for i := range batches { - wg.Add(1) - <-lmtr.C - go func(inm []*cwClient.MetricDataQuery) { - defer wg.Done() - result, err := c.gatherMetrics(c.getDataInputs(inm)) - if err != nil { - acc.AddError(err) - return - } + for batchSize < len(namespacedQueries) { + namespacedQueries, batches = namespacedQueries[batchSize:], append(batches, namespacedQueries[0:batchSize:batchSize]) + } + batches = append(batches, namespacedQueries) + + for i := range batches { + wg.Add(1) + <-lmtr.C + go func(n string, inm []*cwClient.MetricDataQuery) { + defer wg.Done() + result, err := c.gatherMetrics(c.getDataInputs(inm)) + if err != nil { + acc.AddError(err) + return + } - rLock.Lock() - results = append(results, result...) - rLock.Unlock() - }(batches[i]) + rLock.Lock() + results[n] = append(results[n], result...) + rLock.Unlock() + }(namespace, batches[i]) + } } wg.Wait() @@ -323,11 +331,13 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { } } for _, name := range m.MetricNames { - metrics = append(metrics, &cwClient.Metric{ - Namespace: aws.String(c.Namespace), - MetricName: aws.String(name), - Dimensions: dimensions, - }) + for _, namespace := range c.Namespaces { + metrics = append(metrics, &cwClient.Metric{ + Namespace: aws.String(namespace), + MetricName: aws.String(name), + Dimensions: dimensions, + }) + } } } else { allMetrics, err := c.fetchNamespaceMetrics() @@ -337,11 +347,13 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { for _, name := range m.MetricNames { for _, metric := range allMetrics { if isSelected(name, metric, m.Dimensions) { - metrics = append(metrics, &cwClient.Metric{ - Namespace: aws.String(c.Namespace), - MetricName: aws.String(name), - Dimensions: metric.Dimensions, - }) + for _, namespace := range c.Namespaces { + metrics = append(metrics, &cwClient.Metric{ + Namespace: aws.String(namespace), + MetricName: aws.String(name), + Dimensions: metric.Dimensions, + }) + } } } } @@ -399,24 +411,26 @@ func (c *CloudWatch) fetchNamespaceMetrics() ([]*cwClient.Metric, error) { recentlyActive = nil } params = &cwClient.ListMetricsInput{ - Namespace: aws.String(c.Namespace), Dimensions: []*cwClient.DimensionFilter{}, NextToken: token, MetricName: nil, RecentlyActive: recentlyActive, } - for { - resp, err := c.client.ListMetrics(params) - if err != nil { - return nil, err - } + for _, namespace := range c.Namespaces { + params.Namespace = aws.String(namespace) + for { + resp, err := c.client.ListMetrics(params) + if err != nil { + return nil, err + } - metrics = append(metrics, resp.Metrics...) - if resp.NextToken == nil { - break - } + metrics = append(metrics, resp.Metrics...) + if resp.NextToken == nil { + break + } - params.NextToken = resp.NextToken + params.NextToken = resp.NextToken + } } return metrics, nil @@ -437,21 +451,21 @@ func (c *CloudWatch) updateWindow(relativeTo time.Time) { } // getDataQueries gets all of the possible queries so we can maximize the request payload. -func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) []*cwClient.MetricDataQuery { +func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string][]*cwClient.MetricDataQuery { if c.metricCache != nil && c.metricCache.queries != nil && c.metricCache.isValid() { return c.metricCache.queries } c.queryDimensions = map[string]*map[string]string{} - dataQueries := []*cwClient.MetricDataQuery{} + dataQueries := map[string][]*cwClient.MetricDataQuery{} for i, filtered := range filteredMetrics { for j, metric := range filtered.metrics { id := strconv.Itoa(j) + "_" + strconv.Itoa(i) dimension := ctod(metric.Dimensions) if filtered.statFilter.Match("average") { c.queryDimensions["average_"+id] = dimension - dataQueries = append(dataQueries, &cwClient.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], &cwClient.MetricDataQuery{ Id: aws.String("average_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_average")), MetricStat: &cwClient.MetricStat{ @@ -463,7 +477,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) []*cwClien } if filtered.statFilter.Match("maximum") { c.queryDimensions["maximum_"+id] = dimension - dataQueries = append(dataQueries, &cwClient.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], &cwClient.MetricDataQuery{ Id: aws.String("maximum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_maximum")), MetricStat: &cwClient.MetricStat{ @@ -475,7 +489,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) []*cwClien } if filtered.statFilter.Match("minimum") { c.queryDimensions["minimum_"+id] = dimension - dataQueries = append(dataQueries, &cwClient.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], &cwClient.MetricDataQuery{ Id: aws.String("minimum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_minimum")), MetricStat: &cwClient.MetricStat{ @@ -487,7 +501,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) []*cwClien } if filtered.statFilter.Match("sum") { c.queryDimensions["sum_"+id] = dimension - dataQueries = append(dataQueries, &cwClient.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], &cwClient.MetricDataQuery{ Id: aws.String("sum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_sum")), MetricStat: &cwClient.MetricStat{ @@ -499,7 +513,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) []*cwClien } if filtered.statFilter.Match("sample_count") { c.queryDimensions["sample_count_"+id] = dimension - dataQueries = append(dataQueries, &cwClient.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], &cwClient.MetricDataQuery{ Id: aws.String("sample_count_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_sample_count")), MetricStat: &cwClient.MetricStat{ @@ -554,24 +568,27 @@ func (c *CloudWatch) gatherMetrics( func (c *CloudWatch) aggregateMetrics( acc telegraf.Accumulator, - metricDataResults []*cwClient.MetricDataResult, + metricDataResults map[string][]*cwClient.MetricDataResult, ) error { var ( - grouper = internalMetric.NewSeriesGrouper() - namespace = sanitizeMeasurement(c.Namespace) + grouper = internalMetric.NewSeriesGrouper() ) - for _, result := range metricDataResults { - tags := map[string]string{} + for namespace, results := range metricDataResults { + namespace = sanitizeMeasurement(namespace) - if dimensions, ok := c.queryDimensions[*result.Id]; ok { - tags = *dimensions - } - tags["region"] = c.Region + for _, result := range results { + tags := map[string]string{} - for i := range result.Values { - if err := grouper.Add(namespace, tags, *result.Timestamps[i], *result.Label, *result.Values[i]); err != nil { - acc.AddError(err) + if dimensions, ok := c.queryDimensions[*result.Id]; ok { + tags = *dimensions + } + tags["region"] = c.Region + + for i := range result.Values { + if err := grouper.Add(namespace, tags, *result.Timestamps[i], *result.Label, *result.Values[i]); err != nil { + acc.AddError(err) + } } } } diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 860bf41d97e3c..3114240ec77a9 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -116,8 +116,9 @@ func TestGather(t *testing.T) { } var acc testutil.Accumulator - c.client = &mockGatherCloudWatchClient{} + require.NoError(t, c.Init()) + c.client = &mockGatherCloudWatchClient{} require.NoError(t, acc.GatherError(c.Gather)) fields := map[string]interface{}{} @@ -135,6 +136,26 @@ func TestGather(t *testing.T) { acc.AssertContainsTaggedFields(t, "cloudwatch_aws_elb", fields, tags) } +func TestGather_MultipleNamespaces(t *testing.T) { + duration, _ := time.ParseDuration("1m") + internalDuration := config.Duration(duration) + c := &CloudWatch{ + Namespaces: []string{"AWS/ELB", "AWS/EC2"}, + Delay: internalDuration, + Period: internalDuration, + RateLimit: 200, + } + + var acc testutil.Accumulator + + require.NoError(t, c.Init()) + c.client = &mockGatherCloudWatchClient{} + require.NoError(t, acc.GatherError(c.Gather)) + + require.True(t, acc.HasMeasurement("cloudwatch_aws_elb")) + require.True(t, acc.HasMeasurement("cloudwatch_aws_ec2")) +} + type mockSelectMetricsCloudWatchClient struct{} func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ *cwClient.ListMetricsInput) (*cwClient.ListMetricsOutput, error) { @@ -215,8 +236,7 @@ func TestSelectMetrics(t *testing.T) { }, }, } - err := c.initializeCloudWatch() - require.NoError(t, err) + require.NoError(t, c.Init()) c.client = &mockSelectMetricsCloudWatchClient{} filtered, err := getFilteredMetrics(c) // We've asked for 2 (out of 4) metrics, over all 3 load balancers in all 2 @@ -231,18 +251,20 @@ func TestGenerateStatisticsInputParams(t *testing.T) { Value: aws.String("p-example"), } + namespace := "AWS/ELB" m := &cwClient.Metric{ MetricName: aws.String("Latency"), Dimensions: []*cwClient.Dimension{d}, + Namespace: &namespace, } duration, _ := time.ParseDuration("1m") internalDuration := config.Duration(duration) c := &CloudWatch{ - Namespace: "AWS/ELB", - Delay: internalDuration, - Period: internalDuration, + Namespaces: []string{namespace}, + Delay: internalDuration, + Period: internalDuration, } require.NoError(t, c.initializeCloudWatch()) @@ -253,7 +275,7 @@ func TestGenerateStatisticsInputParams(t *testing.T) { statFilter, _ := filter.NewIncludeExcludeFilter(nil, nil) queries := c.getDataQueries([]filteredMetric{{metrics: []*cwClient.Metric{m}, statFilter: statFilter}}) - params := c.getDataInputs(queries) + params := c.getDataInputs(queries[namespace]) require.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) require.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) @@ -268,18 +290,20 @@ func TestGenerateStatisticsInputParamsFiltered(t *testing.T) { Value: aws.String("p-example"), } + namespace := "AWS/ELB" m := &cwClient.Metric{ MetricName: aws.String("Latency"), Dimensions: []*cwClient.Dimension{d}, + Namespace: &namespace, } duration, _ := time.ParseDuration("1m") internalDuration := config.Duration(duration) c := &CloudWatch{ - Namespace: "AWS/ELB", - Delay: internalDuration, - Period: internalDuration, + Namespaces: []string{namespace}, + Delay: internalDuration, + Period: internalDuration, } require.NoError(t, c.initializeCloudWatch()) @@ -290,7 +314,7 @@ func TestGenerateStatisticsInputParamsFiltered(t *testing.T) { statFilter, _ := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil) queries := c.getDataQueries([]filteredMetric{{metrics: []*cwClient.Metric{m}, statFilter: statFilter}}) - params := c.getDataInputs(queries) + params := c.getDataInputs(queries[namespace]) require.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) require.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) @@ -354,3 +378,10 @@ func TestProxyFunction(t *testing.T) { require.NoError(t, err) require.Equal(t, "www.penguins.com", proxyResult.Host) } + +func TestCombineNamespaces(t *testing.T) { + c := &CloudWatch{Namespace: "AWS/ELB", Namespaces: []string{"AWS/EC2", "AWS/Billing"}} + + require.NoError(t, c.Init()) + require.Equal(t, []string{"AWS/EC2", "AWS/Billing", "AWS/ELB"}, c.Namespaces) +} From eb41218fe07a0855f79845bdd559b3e2ce66d0d1 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Tue, 10 Aug 2021 14:48:02 -0700 Subject: [PATCH 562/761] fix: CrateDB replace dots in tag keys with underscores (#9566) --- plugins/outputs/cratedb/README.md | 2 + plugins/outputs/cratedb/cratedb.go | 46 +++++++++++------- plugins/outputs/cratedb/cratedb_test.go | 62 +++++++++++++++---------- 3 files changed, 69 insertions(+), 41 deletions(-) diff --git a/plugins/outputs/cratedb/README.md b/plugins/outputs/cratedb/README.md index 50386fbbc94d1..11214092d26c2 100644 --- a/plugins/outputs/cratedb/README.md +++ b/plugins/outputs/cratedb/README.md @@ -35,4 +35,6 @@ config option, see below. table = "metrics" # If true, and the metrics table does not exist, create it automatically. table_create = true + # The character(s) to replace any '.' in an object key with + key_separator = "_" ``` diff --git a/plugins/outputs/cratedb/cratedb.go b/plugins/outputs/cratedb/cratedb.go index a28e29dc0e47c..b56787114d709 100644 --- a/plugins/outputs/cratedb/cratedb.go +++ b/plugins/outputs/cratedb/cratedb.go @@ -20,11 +20,12 @@ import ( const MaxInt64 = int64(^uint64(0) >> 1) type CrateDB struct { - URL string - Timeout config.Duration - Table string - TableCreate bool `toml:"table_create"` - DB *sql.DB + URL string + Timeout config.Duration + Table string + TableCreate bool `toml:"table_create"` + KeySeparator string `toml:"key_separator"` + DB *sql.DB } var sampleConfig = ` @@ -37,6 +38,8 @@ var sampleConfig = ` table = "metrics" # If true, and the metrics table does not exist, create it automatically. table_create = true + # The character(s) to replace any '.' in an object key with + key_separator = "_" ` func (c *CrateDB) Connect() error { @@ -68,15 +71,21 @@ CREATE TABLE IF NOT EXISTS ` + c.Table + ` ( func (c *CrateDB) Write(metrics []telegraf.Metric) error { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(c.Timeout)) defer cancel() - if sql, err := insertSQL(c.Table, metrics); err != nil { + + generatedSQL, err := insertSQL(c.Table, c.KeySeparator, metrics) + if err != nil { return err - } else if _, err := c.DB.ExecContext(ctx, sql); err != nil { + } + + _, err = c.DB.ExecContext(ctx, generatedSQL) + if err != nil { return err } + return nil } -func insertSQL(table string, metrics []telegraf.Metric) (string, error) { +func insertSQL(table string, keyReplacement string, metrics []telegraf.Metric) (string, error) { rows := make([]string, len(metrics)) for i, m := range metrics { cols := []interface{}{ @@ -89,7 +98,7 @@ func insertSQL(table string, metrics []telegraf.Metric) (string, error) { escapedCols := make([]string, len(cols)) for i, col := range cols { - escaped, err := escapeValue(col) + escaped, err := escapeValue(col, keyReplacement) if err != nil { return "", err } @@ -113,7 +122,7 @@ VALUES // inputs. // // [1] https://github.com/influxdata/telegraf/pull/3210#issuecomment-339273371 -func escapeValue(val interface{}) (string, error) { +func escapeValue(val interface{}, keyReplacement string) (string, error) { switch t := val.(type) { case string: return escapeString(t, `'`), nil @@ -131,11 +140,11 @@ func escapeValue(val interface{}) (string, error) { return strconv.FormatBool(t), nil case time.Time: // see https://crate.io/docs/crate/reference/sql/data_types.html#timestamp - return escapeValue(t.Format("2006-01-02T15:04:05.999-0700")) + return escapeValue(t.Format("2006-01-02T15:04:05.999-0700"), keyReplacement) case map[string]string: - return escapeObject(convertMap(t)) + return escapeObject(convertMap(t), keyReplacement) case map[string]interface{}: - return escapeObject(t) + return escapeObject(t, keyReplacement) default: // This might be panic worthy under normal circumstances, but it's probably // better to not shut down the entire telegraf process because of one @@ -154,7 +163,7 @@ func convertMap(m map[string]string) map[string]interface{} { return c } -func escapeObject(m map[string]interface{}) (string, error) { +func escapeObject(m map[string]interface{}, keyReplacement string) (string, error) { // There is a decent chance that the implementation below doesn't catch all // edge cases, but it's hard to tell since the format seems to be a bit // underspecified. @@ -171,12 +180,15 @@ func escapeObject(m map[string]interface{}) (string, error) { // Now we build our key = val pairs pairs := make([]string, 0, len(m)) for _, k := range keys { - // escape the value of our key k (potentially recursive) - val, err := escapeValue(m[k]) + key := escapeString(strings.ReplaceAll(k, ".", keyReplacement), `"`) + + // escape the value of the value at k (potentially recursive) + val, err := escapeValue(m[k], keyReplacement) if err != nil { return "", err } - pairs = append(pairs, escapeString(k, `"`)+" = "+val) + + pairs = append(pairs, key+" = "+val) } return `{` + strings.Join(pairs, ", ") + `}`, nil } diff --git a/plugins/outputs/cratedb/cratedb_test.go b/plugins/outputs/cratedb/cratedb_test.go index 66a2bfa794cc9..0bdfd8d3e2652 100644 --- a/plugins/outputs/cratedb/cratedb_test.go +++ b/plugins/outputs/cratedb/cratedb_test.go @@ -2,7 +2,6 @@ package cratedb import ( "database/sql" - "fmt" "os" "strings" "testing" @@ -49,9 +48,9 @@ func TestConnectAndWriteIntegration(t *testing.T) { // the rows using their primary keys in order to take advantage of // read-after-write consistency in CrateDB. for _, m := range metrics { - hashIDVal, err := escapeValue(hashID(m)) + hashIDVal, err := escapeValue(hashID(m), "_") require.NoError(t, err) - timestamp, err := escapeValue(m.Time()) + timestamp, err := escapeValue(m.Time(), "_") require.NoError(t, err) var id int64 @@ -85,7 +84,7 @@ VALUES } for _, test := range tests { - if got, err := insertSQL("my_table", test.Metrics); err != nil { + if got, err := insertSQL("my_table", "_", test.Metrics); err != nil { t.Error(err) } else if got != test.Want { t.Errorf("got:\n%s\n\nwant:\n%s", got, test.Want) @@ -93,17 +92,13 @@ VALUES } } -func Test_escapeValueIntegration(t *testing.T) { - t.Skip("Skipping due to trust authentication failure") - - if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" { - t.Skip("Skipping test on CircleCI due to docker failures") - } +type escapeValueTest struct { + Value interface{} + Want string +} - tests := []struct { - Val interface{} - Want string - }{ +func escapeValueTests() []escapeValueTest { + return []escapeValueTest{ // string {`foo`, `'foo'`}, {`foo'bar 'yeah`, `'foo''bar ''yeah'`}, @@ -122,6 +117,7 @@ func Test_escapeValueIntegration(t *testing.T) { {map[string]string(nil), `{}`}, {map[string]string{"foo": "bar"}, `{"foo" = 'bar'}`}, {map[string]string{"foo": "bar", "one": "more"}, `{"foo" = 'bar', "one" = 'more'}`}, + {map[string]string{"f.oo": "bar", "o.n.e": "more"}, `{"f_oo" = 'bar', "o_n_e" = 'more'}`}, // map[string]interface{} {map[string]interface{}{}, `{}`}, {map[string]interface{}(nil), `{}`}, @@ -130,29 +126,47 @@ func Test_escapeValueIntegration(t *testing.T) { {map[string]interface{}{"foo": map[string]interface{}{"one": "more"}}, `{"foo" = {"one" = 'more'}}`}, {map[string]interface{}{`fo"o`: `b'ar`, `ab'c`: `xy"z`, `on"""e`: `mo'''re`}, `{"ab'c" = 'xy"z', "fo""o" = 'b''ar', "on""""""e" = 'mo''''''re'}`}, } +} - url := testURL() - fmt.Println("url", url) - db, err := sql.Open("pgx", url) +func Test_escapeValueIntegration(t *testing.T) { + t.Skip("Skipping due to trust authentication failure") + + if os.Getenv("CIRCLE_PROJECT_REPONAME") != "" { + t.Skip("Skipping test on CircleCI due to docker failures") + } + + db, err := sql.Open("pgx", testURL()) require.NoError(t, err) defer db.Close() + tests := escapeValueTests() for _, test := range tests { - got, err := escapeValue(test.Val) - if err != nil { - t.Errorf("val: %#v: %s", test.Val, err) - } else if got != test.Want { - t.Errorf("got:\n%s\n\nwant:\n%s", got, test.Want) - } + got, err := escapeValue(test.Value, "_") + require.NoError(t, err, "value: %#v", test.Value) // This is a smoke test that will blow up if our escaping causing a SQL - // syntax error, which may allow for an attack. + // syntax error, which may allow for an attack.= var reply interface{} row := db.QueryRow("SELECT " + got) require.NoError(t, row.Scan(&reply)) } } +func Test_escapeValue(t *testing.T) { + tests := escapeValueTests() + for _, test := range tests { + got, err := escapeValue(test.Value, "_") + require.NoError(t, err, "value: %#v", test.Value) + require.Equal(t, got, test.Want) + } +} + +func Test_circumeventingStringEscape(t *testing.T) { + value, err := escapeObject(map[string]interface{}{"a.b": "c"}, `_"`) + require.NoError(t, err) + require.Equal(t, value, `{"a_""b" = 'c'}`) +} + func Test_hashID(t *testing.T) { tests := []struct { Name string From cfd7348e5c13612fe63bad3237e9e254eebc9c9b Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Tue, 17 Aug 2021 14:13:43 -0700 Subject: [PATCH 563/761] docs: improve redis commands documentation (#9606) --- plugins/inputs/redis/README.md | 12 ++++++++--- plugins/inputs/redis/redis.go | 38 +++++++++++++++++++++++++--------- 2 files changed, 37 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index 4327a28bb98ee..bd89ea75346b2 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -10,15 +10,21 @@ ## e.g. ## tcp://localhost:6379 ## tcp://:password@192.168.99.100 + ## unix:///var/run/redis.sock ## ## If no servers are specified, then localhost is used as the host. ## If no port is specified, 6379 is used servers = ["tcp://localhost:6379"] + ## Optional. Specify redis commands to retrieve values # [[inputs.redis.commands]] - # command = ["get", "sample-key"] - # field = "sample-key-value" - # type = "string" + # # The command to run where each argument is a separate element + # command = ["get", "sample-key"] + # # The field to store the result in + # field = "sample-key-value" + # # The type of the result + # # Can be "string", "integer", or "float" + # type = "string" ## specify server password # password = "s#cr@t%" diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index fdc5dcd14cb12..b66d4ea41d36b 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -32,8 +32,8 @@ type Redis struct { Log telegraf.Logger - clients []Client - initialized bool + clients []Client + connected bool } type Client interface { @@ -201,9 +201,13 @@ var sampleConfig = ` ## Optional. Specify redis commands to retrieve values # [[inputs.redis.commands]] - # command = ["get", "sample-key"] - # field = "sample-key-value" - # type = "string" + # # The command to run where each argument is a separate element + # command = ["get", "sample-key"] + # # The field to store the result in + # field = "sample-key-value" + # # The type of the result + # # Can be "string", "integer", or "float" + # type = "string" ## specify server password # password = "s#cr@t%" @@ -230,8 +234,18 @@ var Tracking = map[string]string{ "role": "replication_role", } -func (r *Redis) init() error { - if r.initialized { +func (r *Redis) Init() error { + for _, command := range r.Commands { + if command.Type != "string" && command.Type != "integer" && command.Type != "float" { + return fmt.Errorf(`unknown result type: expected one of "string", "integer", "float"; got %q`, command.Type) + } + } + + return nil +} + +func (r *Redis) connect() error { + if r.connected { return nil } @@ -299,15 +313,15 @@ func (r *Redis) init() error { } } - r.initialized = true + r.connected = true return nil } // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). func (r *Redis) Gather(acc telegraf.Accumulator) error { - if !r.initialized { - err := r.init() + if !r.connected { + err := r.connect() if err != nil { return err } @@ -333,6 +347,10 @@ func (r *Redis) gatherCommandValues(client Client, acc telegraf.Accumulator) err for _, command := range r.Commands { val, err := client.Do(command.Type, command.Command...) if err != nil { + if strings.Contains(err.Error(), "unexpected type=") { + return fmt.Errorf("could not get command result: %s", err) + } + return err } From 967e31e3036fa7a1fb9be83eb9249d8bca265ae8 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 17 Aug 2021 16:14:15 -0500 Subject: [PATCH 564/761] fix: wireguard unknown revision when using direct (#9620) --- go.mod | 6 ++++++ go.sum | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 3cbedd83680a8..0f6bdcd55c1cc 100644 --- a/go.mod +++ b/go.mod @@ -171,3 +171,9 @@ replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible // replaced due to https//github.com/mdlayher/apcupsd/issues/10 replace github.com/mdlayher/apcupsd => github.com/influxdata/apcupsd v0.0.0-20210427145308-694d5caead0e + +//proxy.golang.org has versions of golang.zx2c4.com/wireguard with leading v's, whereas the git repo has tags without leading v's: https://git.zx2c4.com/wireguard-go/refs/tags +//So, fetching this module with version v0.0.20200121 (as done by the transitive dependency +//https://github.com/WireGuard/wgctrl-go/blob/e35592f146e40ce8057113d14aafcc3da231fbac/go.mod#L12 ) was not working when using GOPROXY=direct. +//Replacing with the pseudo-version works around this. +replace golang.zx2c4.com/wireguard v0.0.20200121 => golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090 diff --git a/go.sum b/go.sum index 90806c2041df6..d17f8209df7da 100644 --- a/go.sum +++ b/go.sum @@ -1943,8 +1943,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.zx2c4.com/wireguard v0.0.20200121 h1:vcswa5Q6f+sylDfjqyrVNNrjsFUUbPsgAQTBCAg/Qf8= -golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= +golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090 h1:LJ5Rrj8y0yBul+KpB2v9dFhYuHRs1s9caVu4VK6MgMo= +golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= From 41c384a978743edba728bf64134d1d48447d2eb5 Mon Sep 17 00:00:00 2001 From: Nicolai Scheer <5212218+nscheer@users.noreply.github.com> Date: Tue, 17 Aug 2021 23:19:02 +0200 Subject: [PATCH 565/761] feat: add bool datatype for sql output plugin (#9598) Co-authored-by: Nicolai Scheer --- plugins/outputs/sql/README.md | 1 + plugins/outputs/sql/sql.go | 4 ++++ plugins/outputs/sql/sql_test.go | 8 ++++++++ plugins/outputs/sql/testdata/mariadb/expected.sql | 6 ++++-- plugins/outputs/sql/testdata/postgres/expected.sql | 8 +++++--- 5 files changed, 22 insertions(+), 5 deletions(-) diff --git a/plugins/outputs/sql/README.md b/plugins/outputs/sql/README.md index 6fb215612ecaf..77b89762a7a87 100644 --- a/plugins/outputs/sql/README.md +++ b/plugins/outputs/sql/README.md @@ -104,6 +104,7 @@ through the convert settings. # timestamp = "TIMESTAMP" # defaultvalue = "TEXT" # unsigned = "UNSIGNED" + # bool = "BOOL" ``` ## Driver-specific information diff --git a/plugins/outputs/sql/sql.go b/plugins/outputs/sql/sql.go index 3e003d3309873..fecaf2f6e7661 100644 --- a/plugins/outputs/sql/sql.go +++ b/plugins/outputs/sql/sql.go @@ -22,6 +22,7 @@ type ConvertStruct struct { Timestamp string Defaultvalue string Unsigned string + Bool string } type SQL struct { @@ -103,6 +104,8 @@ func (p *SQL) deriveDatatype(value interface{}) string { datatype = p.Convert.Real case string: datatype = p.Convert.Text + case bool: + datatype = p.Convert.Bool default: datatype = p.Convert.Defaultvalue p.Log.Errorf("Unknown datatype: '%T' %v", value, value) @@ -272,6 +275,7 @@ func newSQL() *SQL { Timestamp: "TIMESTAMP", Defaultvalue: "TEXT", Unsigned: "UNSIGNED", + Bool: "BOOL", }, } } diff --git a/plugins/outputs/sql/sql_test.go b/plugins/outputs/sql/sql_test.go index c57570442c617..5dad6752d4cfe 100644 --- a/plugins/outputs/sql/sql_test.go +++ b/plugins/outputs/sql/sql_test.go @@ -100,6 +100,14 @@ var ( Key: "int64_two", Value: int64(2345), }, + { + Key: "bool_one", + Value: true, + }, + { + Key: "bool_two", + Value: false, + }, }, ts, ), diff --git a/plugins/outputs/sql/testdata/mariadb/expected.sql b/plugins/outputs/sql/testdata/mariadb/expected.sql index 49a3095db4da2..43e0fa5e545b0 100644 --- a/plugins/outputs/sql/testdata/mariadb/expected.sql +++ b/plugins/outputs/sql/testdata/mariadb/expected.sql @@ -21,10 +21,12 @@ CREATE TABLE `metric_one` ( `tag_one` text DEFAULT NULL, `tag_two` text DEFAULT NULL, `int64_one` int(11) DEFAULT NULL, - `int64_two` int(11) DEFAULT NULL + `int64_two` int(11) DEFAULT NULL, + `bool_one` tinyint(1) DEFAULT NULL, + `bool_two` tinyint(1) DEFAULT NULL ); /*!40101 SET character_set_client = @saved_cs_client */; -INSERT INTO `metric_one` VALUES ('2021-05-17 22:04:45','tag1','tag2',1234,2345); +INSERT INTO `metric_one` VALUES ('2021-05-17 22:04:45','tag1','tag2',1234,2345,1,0); /*!40101 SET @saved_cs_client = @@character_set_client */; /*!40101 SET character_set_client = utf8 */; CREATE TABLE `metric_two` ( diff --git a/plugins/outputs/sql/testdata/postgres/expected.sql b/plugins/outputs/sql/testdata/postgres/expected.sql index 8bc2b2fc83018..c1ee733ac12d4 100644 --- a/plugins/outputs/sql/testdata/postgres/expected.sql +++ b/plugins/outputs/sql/testdata/postgres/expected.sql @@ -21,7 +21,9 @@ CREATE TABLE public.metric_one ( tag_one text, tag_two text, int64_one integer, - int64_two integer + int64_two integer, + bool_one boolean, + bool_two boolean ); ALTER TABLE public.metric_one OWNER TO postgres; CREATE TABLE public.metric_two ( @@ -33,8 +35,8 @@ ALTER TABLE public.metric_two OWNER TO postgres; COPY public."metric three" ("timestamp", "tag four", "string two") FROM stdin; 2021-05-17 22:04:45 tag4 string2 \. -COPY public.metric_one ("timestamp", tag_one, tag_two, int64_one, int64_two) FROM stdin; -2021-05-17 22:04:45 tag1 tag2 1234 2345 +COPY public.metric_one ("timestamp", tag_one, tag_two, int64_one, int64_two, bool_one, bool_two) FROM stdin; +2021-05-17 22:04:45 tag1 tag2 1234 2345 t f \. COPY public.metric_two ("timestamp", tag_three, string_one) FROM stdin; 2021-05-17 22:04:45 tag3 string1 From 02ccbec348aa5171da7fe237d70f7f6d28782723 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 17 Aug 2021 16:22:14 -0500 Subject: [PATCH 566/761] fix: cookie test (#9608) --- plugins/common/cookie/cookie.go | 5 ++-- plugins/common/cookie/cookie_test.go | 34 +++++++++++++++------------- plugins/common/http/config.go | 3 ++- 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/plugins/common/cookie/cookie.go b/plugins/common/cookie/cookie.go index 92dab9104dcc5..10213f78d9b37 100644 --- a/plugins/common/cookie/cookie.go +++ b/plugins/common/cookie/cookie.go @@ -9,6 +9,7 @@ import ( "strings" "time" + clockutil "github.com/benbjohnson/clock" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" ) @@ -27,7 +28,7 @@ type CookieAuthConfig struct { client *http.Client } -func (c *CookieAuthConfig) Start(client *http.Client, log telegraf.Logger) (err error) { +func (c *CookieAuthConfig) Start(client *http.Client, log telegraf.Logger, clock clockutil.Clock) (err error) { c.client = client if c.Method == "" { @@ -45,7 +46,7 @@ func (c *CookieAuthConfig) Start(client *http.Client, log telegraf.Logger) (err // continual auth renewal if set if c.Renewal > 0 { - ticker := time.NewTicker(time.Duration(c.Renewal)) + ticker := clock.Ticker(time.Duration(c.Renewal)) go func() { for range ticker.C { if err := c.auth(); err != nil && log != nil { diff --git a/plugins/common/cookie/cookie_test.go b/plugins/common/cookie/cookie_test.go index 0231e10dd2eda..036ca2b5bb5a7 100644 --- a/plugins/common/cookie/cookie_test.go +++ b/plugins/common/cookie/cookie_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + clockutil "github.com/benbjohnson/clock" "github.com/google/go-cmp/cmp" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/cookie" @@ -121,7 +122,7 @@ func TestAuthConfig_Start(t *testing.T) { fields fields args args wantErr error - assert func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) + assert func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) }{ { name: "zero renewal does not renew", @@ -129,12 +130,11 @@ func TestAuthConfig_Start(t *testing.T) { renewal: 0, endpoint: authEndpointNoCreds, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { // should have Cookie Authed once srv.checkAuthCount(t, 1) srv.checkResp(t, http.StatusOK) - time.Sleep(renewalCheck) - // should have never Cookie Authed again + mock.Add(renewalCheck) srv.checkAuthCount(t, 1) srv.checkResp(t, http.StatusOK) }, @@ -145,13 +145,13 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointNoCreds, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { // should have Cookie Authed once srv.checkAuthCount(t, 1) // default method set require.Equal(t, http.MethodPost, c.Method) srv.checkResp(t, http.StatusOK) - time.Sleep(renewalCheck) + mock.Add(renewalCheck) // should have Cookie Authed at least twice more srv.checkAuthCount(t, 3) srv.checkResp(t, http.StatusOK) @@ -168,11 +168,11 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointWithBasicAuth, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { // should have Cookie Authed once srv.checkAuthCount(t, 1) srv.checkResp(t, http.StatusOK) - time.Sleep(renewalCheck) + mock.Add(renewalCheck) // should have Cookie Authed at least twice more srv.checkAuthCount(t, 3) srv.checkResp(t, http.StatusOK) @@ -190,11 +190,11 @@ func TestAuthConfig_Start(t *testing.T) { endpoint: authEndpointWithBasicAuth, }, wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { // should have never Cookie Authed srv.checkAuthCount(t, 0) srv.checkResp(t, http.StatusForbidden) - time.Sleep(renewalCheck) + mock.Add(renewalCheck) // should have still never Cookie Authed srv.checkAuthCount(t, 0) srv.checkResp(t, http.StatusForbidden) @@ -210,11 +210,11 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointWithBody, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { // should have Cookie Authed once srv.checkAuthCount(t, 1) srv.checkResp(t, http.StatusOK) - time.Sleep(renewalCheck) + mock.Add(renewalCheck) // should have Cookie Authed at least twice more srv.checkAuthCount(t, 3) srv.checkResp(t, http.StatusOK) @@ -231,11 +231,11 @@ func TestAuthConfig_Start(t *testing.T) { endpoint: authEndpointWithBody, }, wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer) { + assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { // should have never Cookie Authed srv.checkAuthCount(t, 0) srv.checkResp(t, http.StatusForbidden) - time.Sleep(renewalCheck) + mock.Add(renewalCheck) // should have still never Cookie Authed srv.checkAuthCount(t, 0) srv.checkResp(t, http.StatusForbidden) @@ -255,15 +255,17 @@ func TestAuthConfig_Start(t *testing.T) { Renewal: config.Duration(tt.args.renewal), } - if err := c.Start(srv.Client(), testutil.Logger{Name: "cookie_auth"}); tt.wantErr != nil { + mock := clockutil.NewMock() + if err := c.Start(srv.Client(), testutil.Logger{Name: "cookie_auth"}, mock); tt.wantErr != nil { require.EqualError(t, err, tt.wantErr.Error()) } else { require.NoError(t, err) } if tt.assert != nil { - tt.assert(t, c, srv) + tt.assert(t, c, srv, mock) } + srv.Close() }) } } diff --git a/plugins/common/http/config.go b/plugins/common/http/config.go index 07b486cba294e..bd6ce4fefa308 100644 --- a/plugins/common/http/config.go +++ b/plugins/common/http/config.go @@ -5,6 +5,7 @@ import ( "net/http" "time" + "github.com/benbjohnson/clock" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/cookie" @@ -54,7 +55,7 @@ func (h *HTTPClientConfig) CreateClient(ctx context.Context, log telegraf.Logger client = h.OAuth2Config.CreateOauth2Client(ctx, client) if h.CookieAuthConfig.URL != "" { - if err := h.CookieAuthConfig.Start(client, log); err != nil { + if err := h.CookieAuthConfig.Start(client, log, clock.New()); err != nil { return nil, err } } From fe144e7c990e03bb4f32e8ae2a3eee24919eafd9 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Tue, 17 Aug 2021 14:54:55 -0700 Subject: [PATCH 567/761] fix: issues with prometheus kubernetes pod discovery (#9605) --- plugins/inputs/prometheus/kubernetes.go | 50 ++++++++++++++----------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index e78c64af3fcd4..0e658003a7122 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -111,35 +111,43 @@ func (p *Prometheus) watchPod(ctx context.Context, client *kubernetes.Clientset) LabelSelector: p.KubernetesLabelSelector, FieldSelector: p.KubernetesFieldSelector, }) + defer watcher.Stop() if err != nil { return err } - pod := &corev1.Pod{} - go func() { - for event := range watcher.ResultChan() { - pod = &corev1.Pod{} - // If the pod is not "ready", there will be no ip associated with it. - if pod.Annotations["prometheus.io/scrape"] != "true" || - !podReady(pod.Status.ContainerStatuses) { - continue - } - switch event.Type { - case watch.Added: - registerPod(pod, p) - case watch.Modified: - // To avoid multiple actions for each event, unregister on the first event - // in the delete sequence, when the containers are still "ready". - if pod.GetDeletionTimestamp() != nil { - unregisterPod(pod, p) - } else { + for { + select { + case <-ctx.Done(): + return nil + default: + for event := range watcher.ResultChan() { + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return fmt.Errorf("Unexpected object when getting pods") + } + + // If the pod is not "ready", there will be no ip associated with it. + if pod.Annotations["prometheus.io/scrape"] != "true" || + !podReady(pod.Status.ContainerStatuses) { + continue + } + + switch event.Type { + case watch.Added: registerPod(pod, p) + case watch.Modified: + // To avoid multiple actions for each event, unregister on the first event + // in the delete sequence, when the containers are still "ready". + if pod.GetDeletionTimestamp() != nil { + unregisterPod(pod, p) + } else { + registerPod(pod, p) + } } } } - }() - - return nil + } } func (p *Prometheus) cAdvisor(ctx context.Context, bearerToken string) error { From ee5c50988a5a09ad1da5362f89c4de04823bb6f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Aug 2021 15:20:17 -0600 Subject: [PATCH 568/761] fix: Bump github.com/aws/aws-sdk-go-v2 from 1.3.2 to 1.8.0 (#9636) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 5 +++-- go.sum | 8 ++++++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 1ec09fe87f486..ca0ef3e401bd9 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -38,6 +38,7 @@ following works: - github.com/aws/aws-sdk-go-v2/credentials [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/credentials/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/ec2/imds [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/ec2/imds/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/s3/manager [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/internal/ini [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/config/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/ec2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/accept-encoding/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/presigned-url/LICENSE.txt) diff --git a/go.mod b/go.mod index 0f6bdcd55c1cc..cf18136373821 100644 --- a/go.mod +++ b/go.mod @@ -29,11 +29,12 @@ require ( github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/aws/aws-sdk-go v1.38.69 - github.com/aws/aws-sdk-go-v2 v1.3.2 + github.com/aws/aws-sdk-go-v2 v1.8.0 github.com/aws/aws-sdk-go-v2/config v1.1.5 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 + github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 // indirect github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 - github.com/aws/smithy-go v1.3.1 + github.com/aws/smithy-go v1.7.0 github.com/benbjohnson/clock v1.0.3 github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmatcuk/doublestar/v3 v3.0.0 diff --git a/go.sum b/go.sum index d17f8209df7da..9433c42e6e0ca 100644 --- a/go.sum +++ b/go.sum @@ -228,8 +228,9 @@ github.com/aws/aws-sdk-go v1.38.69 h1:V489lmrdkIQSfF6OAGZZ1Cavcm7eczCm2JcGvX+yHR github.com/aws/aws-sdk-go v1.38.69/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= -github.com/aws/aws-sdk-go-v2 v1.3.2 h1:RQj8l98yKUm0UV2Wd3w/Ms+TXV9Rs1E6Kr5tRRMfyU4= github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8= +github.com/aws/aws-sdk-go-v2 v1.8.0 h1:HcN6yDnHV9S7D69E7To0aUppJhiJNEzQSNcUxc7r3qo= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2/config v1.1.5 h1:imDWOGwlIrRpHLallJ9mli2SIQ4egtGKtFUFsuGRIaQ= github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E= github.com/aws/aws-sdk-go-v2/credentials v1.1.5 h1:R9v/eN5cXv5yMLC619xRYl5PgCSuy5SarizmM7+qqSA= @@ -238,6 +239,8 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 h1:zoOz5V56jO/rGixsCDnrQtAz github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2 h1:Doa5wabOIDA0XZzBX5yCTAPGwDCVZ8Ux0wh29AUDmN4= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2/go.mod h1:Azf567f5wBUfUbwpyJJnLM/geFFIzEulGR30L+nQZOE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 h1:xu45foJnwMwBqSkIMKyJP9kbyHi5hdhZ/WiJ7D2sHZ0= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 h1:+VnEgB1yp+7KlOsk6FXX/v/fU9uL5oSujIMkKQBBmp8= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0/go.mod h1:/6514fU/SRcY3+ousB1zjUqiXjruSuti2qcfE70osOc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4 h1:8yeByqOL6UWBsOOXsHnW93/ukwL66O008tRfxXxnTwA= @@ -254,8 +257,9 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/ github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 h1:fKw6QSGcFlvZCBPYx3fo4sL0HfTmaT06ZtMHJfQQNQQ= github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= -github.com/aws/smithy-go v1.3.1 h1:xJFO4pK0y9J8fCl34uGsSJX5KNnGbdARDlA5BPhXnwE= github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.7.0 h1:+cLHMRrDZvQ4wk+KuQ9yH6eEg6KZEJ9RI2IkDqnygCg= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= From 65a7fadaa92b28256517ca4b4d64389f1eac2180 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Aug 2021 15:22:45 -0600 Subject: [PATCH 569/761] fix: Bump github.com/golang/snappy from 0.0.3 to 0.0.4 (#9637) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index cf18136373821..7e2528392d227 100644 --- a/go.mod +++ b/go.mod @@ -64,7 +64,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/protobuf v1.5.2 - github.com/golang/snappy v0.0.3 + github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.5.6 github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.13 diff --git a/go.sum b/go.sum index 9433c42e6e0ca..484da4129fb51 100644 --- a/go.sum +++ b/go.sum @@ -729,8 +729,9 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/addlicense v0.0.0-20190510175307-22550fa7c1b0/go.mod h1:QtPG26W17m+OIQgE6gQ24gC1M6pUaMBAbFrTIDtwG/E= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= From 229b46eb682981375504464fc817a9b44aa28d46 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Aug 2021 15:23:49 -0600 Subject: [PATCH 570/761] fix: Bump github.com/testcontainers/testcontainers-go from 0.11.0 to 0.11.1 (#9638) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 7e2528392d227..b48f4850a1d84 100644 --- a/go.mod +++ b/go.mod @@ -47,7 +47,7 @@ require ( github.com/denisenkom/go-mssqldb v0.10.0 github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 - github.com/docker/docker v20.10.6+incompatible + github.com/docker/docker v20.10.7+incompatible github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 github.com/dynatrace-oss/dynatrace-metric-utils-go v0.2.0 github.com/eclipse/paho.mqtt.golang v1.3.0 @@ -124,7 +124,7 @@ require ( github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/testcontainers/testcontainers-go v0.11.0 + github.com/testcontainers/testcontainers-go v0.11.1 github.com/tidwall/gjson v1.8.0 github.com/tinylib/msgp v1.1.5 github.com/tklauser/go-sysconf v0.3.5 // indirect diff --git a/go.sum b/go.sum index 484da4129fb51..91bd038463435 100644 --- a/go.sum +++ b/go.sum @@ -461,8 +461,8 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= -github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -1468,8 +1468,8 @@ github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOs github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= -github.com/testcontainers/testcontainers-go v0.11.0 h1:HO5YOx2DYBHqcg4MzVWPj3FuHAv7USWVu94vCSsgiaM= -github.com/testcontainers/testcontainers-go v0.11.0/go.mod h1:HztBCODzuA+YpMXGK8amjO8j50jz2gcT0BOzSKUiYIs= +github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= +github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= github.com/tidwall/gjson v1.8.0 h1:Qt+orfosKn0rbNTZqHYDqBrmm3UDA4KRkv70fDzG+PQ= github.com/tidwall/gjson v1.8.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= From 47bf30cb1849af52d20d1ce08a3451e81c21bb0d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Aug 2021 15:24:30 -0600 Subject: [PATCH 571/761] fix: Bump github.com/sirupsen/logrus from 1.7.0 to 1.8.1 (#9639) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index b48f4850a1d84..1267d6e431f4b 100644 --- a/go.mod +++ b/go.mod @@ -118,7 +118,7 @@ require ( github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/signalfx/golib/v3 v3.3.34 - github.com/sirupsen/logrus v1.7.0 + github.com/sirupsen/logrus v1.8.1 github.com/sleepinggenius2/gosmi v0.4.3 github.com/snowflakedb/gosnowflake v1.5.0 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 diff --git a/go.sum b/go.sum index 91bd038463435..c638e769fc74b 100644 --- a/go.sum +++ b/go.sum @@ -1415,8 +1415,9 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sleepinggenius2/gosmi v0.4.3 h1:99Zwzy1Cvgsh396sw07oR2G4ab88ILGZFMxSlGWnR6o= github.com/sleepinggenius2/gosmi v0.4.3/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bTY2CNivIhsnDT0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= From c8a9aa225962a327a26b1ef1c9cc65036fa4707c Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Wed, 18 Aug 2021 18:27:27 -0600 Subject: [PATCH 572/761] Update changelog (cherry picked from commit 25b04d4720926c47eef54e61ce79951fc8b34d49) --- CHANGELOG.md | 23 +++++++++++++++++++++++ etc/telegraf.conf | 12 +++++++++--- 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f7ecd8d59ed24..053e9ee59bbf7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,26 @@ +## v1.19.3 [2021-08-18] + +#### Bugfixes + + - [#9639](https://github.com/influxdata/telegraf/pull/9639) Update sirupsen/logrus module from 1.7.0 to 1.8.1 + - [#9638](https://github.com/influxdata/telegraf/pull/9638) Update testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 + - [#9637](https://github.com/influxdata/telegraf/pull/9637) Update golang/snappy module from 0.0.3 to 0.0.4 + - [#9636](https://github.com/influxdata/telegraf/pull/9636) Update aws/aws-sdk-go-v2 module from 1.3.2 to 1.8.0 + - [#9605](https://github.com/influxdata/telegraf/pull/9605) `inputs.prometheus` Fix prometheus kubernetes pod discovery + - [#9606](https://github.com/influxdata/telegraf/pull/9606) `inputs.redis` Improve redis commands documentation + - [#9566](https://github.com/influxdata/telegraf/pull/9566) `outputs.cratedb` Replace dots in tag keys with underscores + - [#9401](https://github.com/influxdata/telegraf/pull/9401) `inputs.clickhouse` Fix panic, improve handling empty result set + - [#9583](https://github.com/influxdata/telegraf/pull/9583) `inputs.opcua` Avoid closing session on a closed connection + - [#9576](https://github.com/influxdata/telegraf/pull/9576) `processors.aws` Refactor ec2 init for config-api + - [#9571](https://github.com/influxdata/telegraf/pull/9571) `outputs.loki` Sort logs by timestamp before writing to Loki + - [#9524](https://github.com/influxdata/telegraf/pull/9524) `inputs.opcua` Fix reconnection regression introduced in 1.19.1 + - [#9581](https://github.com/influxdata/telegraf/pull/9581) `inputs.kube_inventory` Fix k8s nodes and pods parsing error + - [#9577](https://github.com/influxdata/telegraf/pull/9577) Update sensu/go module to v2.9.0 + - [#9554](https://github.com/influxdata/telegraf/pull/9554) `inputs.postgresql` Normalize unix socket path + - [#9565](https://github.com/influxdata/telegraf/pull/9565) Update hashicorp/consul/api module to 1.9.1 + - [#9552](https://github.com/influxdata/telegraf/pull/9552) `inputs.vsphere` Update vmware/govmomi module to v0.26.0 in order to support vSphere 7.0 + - [#9550](https://github.com/influxdata/telegraf/pull/9550) `inputs.opcua` Do not skip good quality nodes after a bad quality node is encountered + ## v1.19.2 [2021-07-28] #### Release Notes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 6c3c0e98b36bb..c49761c947bc4 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -506,6 +506,8 @@ # table = "metrics" # # If true, and the metrics table does not exist, create it automatically. # table_create = true +# # The character(s) to replace any '.' in an object key with +# key_separator = "_" # # Configuration for DataDog API to send metrics to. @@ -5741,9 +5743,13 @@ # # ## Optional. Specify redis commands to retrieve values # # [[inputs.redis.commands]] -# # command = ["get", "sample-key"] -# # field = "sample-key-value" -# # type = "string" +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" # # ## specify server password # # password = "s#cr@t%" From 3d7d5f2b360baa1bb5848eddfea2fffa24096c84 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 19 Aug 2021 15:30:37 -0500 Subject: [PATCH 573/761] fix: prefix dependabot commits with "fix:" (#9641) --- .github/dependabot.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index c1de7d8fd2824..2068f1f06444d 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -7,3 +7,5 @@ updates: ignore: # Dependabot isn't able to update this packages that do not match the source, so anything with a version - dependency-name: "*.v*" + commit-message: + prefix: "fix:" From 34565a303db841c359c5a20fd5909ea58837fa1c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Aug 2021 15:31:04 -0500 Subject: [PATCH 574/761] fix: Bump github.com/aws/aws-sdk-go-v2/config from 1.1.5 to 1.6.0 --- go.mod | 5 ++--- go.sum | 18 ++++++++++++------ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 1267d6e431f4b..de630f9a84fd2 100644 --- a/go.mod +++ b/go.mod @@ -30,9 +30,8 @@ require ( github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/aws/aws-sdk-go v1.38.69 github.com/aws/aws-sdk-go-v2 v1.8.0 - github.com/aws/aws-sdk-go-v2/config v1.1.5 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 - github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 // indirect + github.com/aws/aws-sdk-go-v2/config v1.6.0 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0 github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 github.com/aws/smithy-go v1.7.0 github.com/benbjohnson/clock v1.0.3 diff --git a/go.sum b/go.sum index c638e769fc74b..1b482f7449834 100644 --- a/go.sum +++ b/go.sum @@ -231,12 +231,15 @@ github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rk github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8= github.com/aws/aws-sdk-go-v2 v1.8.0 h1:HcN6yDnHV9S7D69E7To0aUppJhiJNEzQSNcUxc7r3qo= github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= -github.com/aws/aws-sdk-go-v2/config v1.1.5 h1:imDWOGwlIrRpHLallJ9mli2SIQ4egtGKtFUFsuGRIaQ= github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E= -github.com/aws/aws-sdk-go-v2/credentials v1.1.5 h1:R9v/eN5cXv5yMLC619xRYl5PgCSuy5SarizmM7+qqSA= +github.com/aws/aws-sdk-go-v2/config v1.6.0 h1:rtoCnNObhVm7me+v9sA2aY+NtHNZjjWWC3ifXVci+wE= +github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= github.com/aws/aws-sdk-go-v2/credentials v1.1.5/go.mod h1:Ir1R6tPiR1/2y1hes8yOijFMz54hzSmgcmCDo6F45Qc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 h1:zoOz5V56jO/rGixsCDnrQtAzYRYM2hGA/43U6jVMFbo= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2 h1:Uud/fZzm0lqqhE8kvXYJFAJ3PGnagKoUcvHq1hXfBZw= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0 h1:SGqDJun6tydgsSIFxv9+EYBJVqVUwg2QMJp6PbNq8C8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2 h1:Doa5wabOIDA0XZzBX5yCTAPGwDCVZ8Ux0wh29AUDmN4= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2/go.mod h1:Azf567f5wBUfUbwpyJJnLM/geFFIzEulGR30L+nQZOE= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 h1:xu45foJnwMwBqSkIMKyJP9kbyHi5hdhZ/WiJ7D2sHZ0= @@ -246,16 +249,19 @@ github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0/go.mod h1:/6514fU/SRcY3+ousB1zjU github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4 h1:8yeByqOL6UWBsOOXsHnW93/ukwL66O008tRfxXxnTwA= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4/go.mod h1:BCfU3Uo2fhKcMZFp9zU5QQGQxqWCOYmZ/27Dju3S/do= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1/go.mod h1:PISaKWylTYAyruocNk4Lr9miOOJjOcVBd7twCPbydDk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6 h1:ldYIsOP4WyjdzW8t6RC/aSieajrlx+3UN3UCZy1KM5Y= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6/go.mod h1:L0KWr0ASo83PRZu9NaZaDsw3koS6PspKv137DMDZjHo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2 h1:Xv1rGYgsRRn0xw9JFNnfpBMZam54PrWpC4rJOJ9koA8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2 h1:aU8H58DoYxNo8R1TaSPTofkuxfQNnoqZmWL+G3+k/vA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2/go.mod h1:nnutjMLuna0s3GVY/MAkpLX03thyNER06gXvnMAPj5g= github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0 h1:VbwXUI3L0hyhVmrFxbDxrs6cBX8TNFX0YxCpooMNjvY= github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0/go.mod h1:uwA7gs93Qcss43astPUb1eq4RyceNmYWAQjZFDOAMLo= -github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 h1:B7ec5wE4+3Ldkurmq0C4gfQFtElGTG+/iTpi/YPMzi4= github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/J0tzWCMXHbw6FZ0j1GkWM= -github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 h1:fKw6QSGcFlvZCBPYx3fo4sL0HfTmaT06ZtMHJfQQNQQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2 h1:b+U3WrF9ON3f32FH19geqmiod4uKcMv/q+wosQjjyyM= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1 h1:1Pls85C5CFjhE3aH+h85/hyAk89kQNlAWlEQtIkaFyc= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.7.0 h1:+cLHMRrDZvQ4wk+KuQ9yH6eEg6KZEJ9RI2IkDqnygCg= From 3a7d9b6d9801aedeed62ee7f7d5ec5ce9ad608f0 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 23 Aug 2021 15:37:44 -0500 Subject: [PATCH 575/761] fix: support 1.17 & 1.16.7 Go versions (#9642) --- .circleci/config.yml | 86 +- Makefile | 14 +- agent/agent_posix.go | 1 + agent/agent_windows.go | 1 + cmd/telegraf/telegraf_posix.go | 1 + cmd/telegraf/telegraf_windows.go | 1 + docs/LICENSE_OF_DEPENDENCIES.md | 1 - go.mod | 181 +- go.sum | 40 +- internal/exec_unix.go | 1 + internal/exec_windows.go | 1 + internal/globpath/globpath_test.go | 1 + internal/goplugin/noplugin.go | 1 + internal/goplugin/plugin.go | 1 + internal/process/process_posix.go | 1 + internal/process/process_test.go | 1 + internal/process/process_windows.go | 1 + internal/usage.go | 1 + internal/usage_windows.go | 1 + logger/event_logger.go | 3 +- logger/event_logger_test.go | 3 +- plugins/inputs/bcache/bcache.go | 1 + plugins/inputs/bcache/bcache_test.go | 1 + plugins/inputs/bcache/bcache_windows.go | 1 + plugins/inputs/cgroup/cgroup_linux.go | 1 + plugins/inputs/cgroup/cgroup_notlinux.go | 1 + plugins/inputs/cgroup/cgroup_test.go | 1 + plugins/inputs/conntrack/conntrack.go | 1 + .../inputs/conntrack/conntrack_notlinux.go | 1 + plugins/inputs/conntrack/conntrack_test.go | 1 + plugins/inputs/diskio/diskio_linux_test.go | 1 + plugins/inputs/diskio/diskio_other.go | 1 + plugins/inputs/dmcache/dmcache_linux.go | 1 + plugins/inputs/dmcache/dmcache_linux_test.go | 1 + plugins/inputs/dmcache/dmcache_notlinux.go | 1 + plugins/inputs/dpdk/dpdk.go | 1 + plugins/inputs/dpdk/dpdk_connector.go | 1 + plugins/inputs/dpdk/dpdk_connector_test.go | 1 + plugins/inputs/dpdk/dpdk_notlinux.go | 1 + plugins/inputs/dpdk/dpdk_test.go | 1 + plugins/inputs/dpdk/dpdk_utils.go | 1 + plugins/inputs/dpdk/dpdk_utils_test.go | 1 + plugins/inputs/ethtool/ethtool_linux.go | 1 + plugins/inputs/ethtool/ethtool_notlinux.go | 1 + plugins/inputs/ethtool/ethtool_test.go | 1 + plugins/inputs/exec/exec_test.go | 1 + plugins/inputs/execd/execd_posix.go | 1 + plugins/inputs/execd/execd_windows.go | 1 + plugins/inputs/execd/shim/goshim_posix.go | 1 + plugins/inputs/execd/shim/goshim_windows.go | 1 + plugins/inputs/execd/shim/shim_posix_test.go | 1 + plugins/inputs/file/file_test.go | 1 + plugins/inputs/filecount/filecount_test.go | 1 + .../filecount/filesystem_helpers_test.go | 1 + plugins/inputs/filestat/filestat_test.go | 1 + .../http_response/http_response_test.go | 1 + plugins/inputs/infiniband/infiniband_linux.go | 1 + .../inputs/infiniband/infiniband_notlinux.go | 1 + plugins/inputs/infiniband/infiniband_test.go | 1 + plugins/inputs/intel_powerstat/file.go | 1 + .../inputs/intel_powerstat/intel_powerstat.go | 1 + .../intel_powerstat_notlinux.go | 1 + .../intel_powerstat/intel_powerstat_test.go | 1 + plugins/inputs/intel_powerstat/msr.go | 1 + plugins/inputs/intel_powerstat/msr_test.go | 1 + plugins/inputs/intel_powerstat/rapl.go | 1 + plugins/inputs/intel_powerstat/rapl_test.go | 1 + .../inputs/intel_powerstat/unit_converter.go | 1 + plugins/inputs/intel_rdt/intel_rdt.go | 1 + plugins/inputs/intel_rdt/intel_rdt_test.go | 1 + plugins/inputs/intel_rdt/intel_rdt_windows.go | 1 + plugins/inputs/intel_rdt/processes.go | 1 + plugins/inputs/intel_rdt/publisher.go | 1 + plugins/inputs/intel_rdt/publisher_test.go | 1 + plugins/inputs/iptables/iptables.go | 1 + plugins/inputs/iptables/iptables_nocompile.go | 1 + plugins/inputs/iptables/iptables_test.go | 1 + plugins/inputs/ipvs/ipvs.go | 1 + plugins/inputs/ipvs/ipvs_notlinux.go | 1 + plugins/inputs/kernel/kernel.go | 1 + plugins/inputs/kernel/kernel_notlinux.go | 1 + plugins/inputs/kernel/kernel_test.go | 1 + plugins/inputs/kernel_vmstat/kernel_vmstat.go | 1 + .../kernel_vmstat/kernel_vmstat_notlinux.go | 1 + .../kernel_vmstat/kernel_vmstat_test.go | 1 + plugins/inputs/logparser/logparser.go | 1 + plugins/inputs/logparser/logparser_solaris.go | 1 + plugins/inputs/lustre2/lustre2.go | 1 + plugins/inputs/lustre2/lustre2_test.go | 1 + plugins/inputs/lustre2/lustre2_windows.go | 1 + plugins/inputs/mongodb/mongodb_server_test.go | 1 + plugins/inputs/mongodb/mongodb_test.go | 1 + plugins/inputs/nats/nats.go | 1 + plugins/inputs/nats/nats_freebsd.go | 1 + plugins/inputs/nats/nats_test.go | 1 + plugins/inputs/phpfpm/phpfpm_test.go | 1 + plugins/inputs/ping/ping_notwindows.go | 1 + plugins/inputs/ping/ping_test.go | 1 + plugins/inputs/ping/ping_windows.go | 1 + plugins/inputs/ping/ping_windows_test.go | 1 + plugins/inputs/postfix/postfix.go | 1 + plugins/inputs/postfix/postfix_test.go | 1 + plugins/inputs/postfix/postfix_windows.go | 1 + plugins/inputs/postfix/stat_ctim.go | 1 + plugins/inputs/postfix/stat_ctimespec.go | 1 + plugins/inputs/postfix/stat_none.go | 1 + .../inputs/processes/processes_notwindows.go | 1 + plugins/inputs/processes/processes_test.go | 1 + plugins/inputs/processes/processes_windows.go | 1 + .../procstat/native_finder_notwindows.go | 1 + .../inputs/procstat/win_service_notwindows.go | 1 + .../inputs/procstat/win_service_windows.go | 1 + plugins/inputs/ras/ras.go | 1 + plugins/inputs/ras/ras_notlinux.go | 1 + plugins/inputs/ras/ras_test.go | 1 + .../inputs/rethinkdb/rethinkdb_server_test.go | 1 + plugins/inputs/rethinkdb/rethinkdb_test.go | 1 + .../riemann_listener/riemann_listener.go | 2 +- plugins/inputs/sensors/sensors.go | 1 + plugins/inputs/sensors/sensors_notlinux.go | 1 + plugins/inputs/sensors/sensors_test.go | 1 + plugins/inputs/snmp/snmp_mocks_generate.go | 1 + plugins/inputs/sql/drivers_sqlite.go | 5 +- plugins/inputs/synproxy/synproxy_linux.go | 1 + plugins/inputs/synproxy/synproxy_notlinux.go | 1 + plugins/inputs/synproxy/synproxy_test.go | 1 + plugins/inputs/sysstat/sysstat.go | 1 + .../inputs/sysstat/sysstat_interval_test.go | 4 +- plugins/inputs/sysstat/sysstat_notlinux.go | 1 + plugins/inputs/sysstat/sysstat_test.go | 1 + .../systemd_units/systemd_units_notlinux.go | 1 + plugins/inputs/tail/tail.go | 1 + plugins/inputs/tail/tail_solaris.go | 1 + plugins/inputs/varnish/varnish.go | 1 + plugins/inputs/varnish/varnish_test.go | 1 + plugins/inputs/varnish/varnish_windows.go | 1 + plugins/inputs/win_eventlog/event.go | 3 +- .../inputs/win_eventlog/syscall_windows.go | 3 +- plugins/inputs/win_eventlog/util.go | 3 +- plugins/inputs/win_eventlog/util_test.go | 3 +- plugins/inputs/win_eventlog/win_eventlog.go | 3 +- .../win_eventlog/win_eventlog_notwindows.go | 1 + .../inputs/win_eventlog/win_eventlog_test.go | 3 +- .../inputs/win_eventlog/zsyscall_windows.go | 3 +- plugins/inputs/win_perf_counters/kernel32.go | 1 + plugins/inputs/win_perf_counters/pdh.go | 1 + plugins/inputs/win_perf_counters/pdh_386.go | 1 + plugins/inputs/win_perf_counters/pdh_amd64.go | 1 + .../win_perf_counters/performance_query.go | 1 + .../win_perf_counters/win_perf_counters.go | 1 + .../win_perf_counters_integration_test.go | 1 + .../win_perf_counters_notwindows.go | 1 + .../win_perf_counters_test.go | 1 + plugins/inputs/win_services/win_services.go | 1 + .../win_services_integration_test.go | 1 + .../win_services/win_services_notwindows.go | 1 + .../inputs/win_services/win_services_test.go | 1 + plugins/inputs/wireless/wireless_linux.go | 1 + plugins/inputs/wireless/wireless_notlinux.go | 1 + plugins/inputs/wireless/wireless_test.go | 1 + plugins/inputs/zfs/zfs_freebsd.go | 1 + plugins/inputs/zfs/zfs_freebsd_test.go | 1 + plugins/inputs/zfs/zfs_linux.go | 1 + plugins/inputs/zfs/zfs_linux_test.go | 1 + plugins/inputs/zfs/zfs_other.go | 1 + plugins/outputs/sql/sqlite.go | 5 +- plugins/outputs/sql/sqlite_test.go | 4 +- plugins/parsers/influx/machine.go | 3129 +++++++++-------- plugins/processors/filepath/filepath_test.go | 1 + plugins/processors/port_name/services_path.go | 1 + .../port_name/services_path_notwindows.go | 1 + scripts/alpine.docker | 2 +- scripts/buster.docker | 2 +- scripts/ci-1.16.docker | 2 +- scripts/{ci-1.15.docker => ci-1.17.docker} | 2 +- scripts/mac_installgo.sh | 4 +- 176 files changed, 2167 insertions(+), 1494 deletions(-) rename scripts/{ci-1.15.docker => ci-1.17.docker} (95%) diff --git a/.circleci/config.yml b/.circleci/config.yml index 010c54a0fedfd..3daec86da98b4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,16 +4,16 @@ orbs: aws-cli: circleci/aws-cli@1.4.0 executors: - go-1_15: + go-1_16: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.15.8' + - image: 'quay.io/influxdb/telegraf-ci:1.16.7' environment: GOFLAGS: -p=8 - go-1_16: + go-1_17: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.16.6' + - image: 'quay.io/influxdb/telegraf-ci:1.17.0' environment: GOFLAGS: -p=8 mac: @@ -88,7 +88,7 @@ commands: - 'dist' jobs: deps: - executor: go-1_16 + executor: go-1_17 steps: - checkout - restore_cache: @@ -105,21 +105,21 @@ jobs: root: '/go' paths: - '*' - test-go-1_15: - executor: go-1_15 + test-go-1_16: + executor: go-1_16 steps: - test-go - test-go-1_15-386: - executor: go-1_15 + test-go-1_16-386: + executor: go-1_16 steps: - test-go: goarch: "386" - test-go-1_16: - executor: go-1_16 + test-go-1_17: + executor: go-1_17 steps: - test-go - test-go-1_16-386: - executor: go-1_16 + test-go-1_17-386: + executor: go-1_17 steps: - test-go: goarch: "386" @@ -150,79 +150,79 @@ jobs: steps: - checkout - check-changed-files-or-halt-windows - - run: choco upgrade golang --version=1.16.6 + - run: choco upgrade golang --version=1.17.0 - run: choco install make - run: git config --system core.longpaths true - run: make test-windows windows-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: windows darwin-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: darwin i386-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: i386 ppc641e-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: ppc641e s390x-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: s390x armel-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: armel amd64-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: amd64 arm64-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: arm64 mipsel-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: mipsel mips-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: mips static-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: static armhf-package: - executor: go-1_16 + executor: go-1_17 steps: - package-build: type: armhf release: - executor: go-1_16 + executor: go-1_17 steps: - package-build: release: true nightly: - executor: go-1_16 + executor: go-1_17 steps: - package-build: nightly: true @@ -277,7 +277,7 @@ jobs: path: './dist' destination: 'build/dist' test-awaiter: - executor: go-1_16 + executor: go-1_17 steps: - run: command: | @@ -299,25 +299,25 @@ workflows: filters: tags: only: /.*/ - - 'test-go-1_15': + - 'test-go-1_16': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1_15-386': + - 'test-go-1_16-386': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1_16': + - 'test-go-1_17': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1_16-386': + - 'test-go-1_17-386': requires: - 'deps' filters: @@ -333,10 +333,10 @@ workflows: only: /.*/ - 'test-awaiter': requires: - - 'test-go-1_15' - - 'test-go-1_15-386' - 'test-go-1_16' - 'test-go-1_16-386' + - 'test-go-1_17' + - 'test-go-1_17-386' - 'windows-package': requires: - 'test-go-windows' @@ -395,10 +395,10 @@ workflows: requires: - 'test-go-windows' - 'test-go-mac' - - 'test-go-1_15' - - 'test-go-1_15-386' - 'test-go-1_16' - 'test-go-1_16-386' + - 'test-go-1_17' + - 'test-go-1_17-386' filters: tags: only: /.*/ @@ -420,16 +420,16 @@ workflows: nightly: jobs: - 'deps' - - 'test-go-1_15': + - 'test-go-1_16': requires: - 'deps' - - 'test-go-1_15-386': + - 'test-go-1_16-386': requires: - 'deps' - - 'test-go-1_16': + - 'test-go-1_17': requires: - 'deps' - - 'test-go-1_16-386': + - 'test-go-1_17-386': requires: - 'deps' - 'test-go-mac' @@ -438,10 +438,10 @@ workflows: requires: - 'test-go-windows' - 'test-go-mac' - - 'test-go-1_15' - - 'test-go-1_15-386' - 'test-go-1_16' - 'test-go-1_16-386' + - 'test-go-1_17' + - 'test-go-1_17-386' triggers: - schedule: cron: "0 7 * * *" diff --git a/Makefile b/Makefile index 5cf7d2383604f..f0bb01dd2a35e 100644 --- a/Makefile +++ b/Makefile @@ -194,15 +194,15 @@ plugin-%: @echo "Starting dev environment for $${$(@)} input plugin..." @docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up -.PHONY: ci-1.15 -ci-1.15: - docker build -t quay.io/influxdb/telegraf-ci:1.15.8 - < scripts/ci-1.15.docker - docker push quay.io/influxdb/telegraf-ci:1.15.8 - .PHONY: ci-1.16 ci-1.16: - docker build -t quay.io/influxdb/telegraf-ci:1.16.6 - < scripts/ci-1.16.docker - docker push quay.io/influxdb/telegraf-ci:1.16.6 + docker build -t quay.io/influxdb/telegraf-ci:1.16.7 - < scripts/ci-1.16.docker + docker push quay.io/influxdb/telegraf-ci:1.16.7 + +.PHONY: ci-1.17 +ci-1.17: + docker build -t quay.io/influxdb/telegraf-ci:1.17.0 - < scripts/ci-1.17.docker + docker push quay.io/influxdb/telegraf-ci:1.17.0 .PHONY: install install: $(buildbin) diff --git a/agent/agent_posix.go b/agent/agent_posix.go index 09552cac07026..e43c3a7817a88 100644 --- a/agent/agent_posix.go +++ b/agent/agent_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package agent diff --git a/agent/agent_windows.go b/agent/agent_windows.go index 94ed9d006acb2..3196dc70e78e2 100644 --- a/agent/agent_windows.go +++ b/agent/agent_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package agent diff --git a/cmd/telegraf/telegraf_posix.go b/cmd/telegraf/telegraf_posix.go index a2d6b1e4e365c..21ad935b7147e 100644 --- a/cmd/telegraf/telegraf_posix.go +++ b/cmd/telegraf/telegraf_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package main diff --git a/cmd/telegraf/telegraf_windows.go b/cmd/telegraf/telegraf_windows.go index 8bd14d64eaa19..38222f2d0871d 100644 --- a/cmd/telegraf/telegraf_windows.go +++ b/cmd/telegraf/telegraf_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package main diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index ca0ef3e401bd9..1ec09fe87f486 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -38,7 +38,6 @@ following works: - github.com/aws/aws-sdk-go-v2/credentials [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/credentials/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/ec2/imds [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/ec2/imds/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/feature/s3/manager [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) -- github.com/aws/aws-sdk-go-v2/internal/ini [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/config/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/ec2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/accept-encoding/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/presigned-url/LICENSE.txt) diff --git a/go.mod b/go.mod index de630f9a84fd2..f09d594111d42 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/influxdata/telegraf -go 1.16 +go 1.17 require ( cloud.google.com/go v0.56.0 @@ -8,73 +8,137 @@ require ( cloud.google.com/go/pubsub v1.2.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.5.0 + github.com/Azure/azure-amqp-common-go/v3 v3.0.0 // indirect github.com/Azure/azure-event-hubs-go/v3 v3.2.0 github.com/Azure/azure-kusto-go v0.3.2 + github.com/Azure/azure-pipeline-go v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go v45.1.0+incompatible // indirect + github.com/Azure/azure-storage-blob-go v0.13.0 // indirect github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd + github.com/Azure/go-amqp v0.12.6 // indirect + github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.17 github.com/Azure/go-autorest/autorest/adal v0.9.10 github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 + github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.2.0 // indirect + github.com/Azure/go-autorest/logger v0.2.0 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/BurntSushi/toml v0.3.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee + github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 // indirect + github.com/Microsoft/hcsshim v0.8.16 // indirect github.com/Shopify/sarama v1.27.2 github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect github.com/aerospike/aerospike-client-go v1.27.0 + github.com/alecthomas/participle v0.4.1 // indirect github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 github.com/antchfx/jsonquery v1.1.4 github.com/antchfx/xmlquery v1.3.5 github.com/antchfx/xpath v1.1.11 + github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 // indirect github.com/apache/thrift v0.13.0 github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 + github.com/armon/go-metrics v0.3.3 // indirect github.com/aws/aws-sdk-go v1.38.69 - github.com/aws/aws-sdk-go-v2 v1.8.0 - github.com/aws/aws-sdk-go-v2/config v1.6.0 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0 + github.com/aws/aws-sdk-go-v2 v1.3.2 + github.com/aws/aws-sdk-go-v2/config v1.1.5 + github.com/aws/aws-sdk-go-v2/credentials v1.1.5 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2 // indirect github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 - github.com/aws/smithy-go v1.7.0 + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 // indirect + github.com/aws/smithy-go v1.3.1 github.com/benbjohnson/clock v1.0.3 + github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmatcuk/doublestar/v3 v3.0.0 github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 github.com/caio/go-tdigest v3.1.0+incompatible + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 + github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 // indirect + github.com/containerd/containerd v1.5.0-beta.4 // indirect + github.com/coreos/go-semver v0.3.0 // indirect github.com/couchbase/go-couchbase v0.1.0 github.com/couchbase/gomemcached v0.1.3 // indirect github.com/couchbase/goutils v0.1.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/denisenkom/go-mssqldb v0.10.0 + github.com/devigned/tab v0.1.1 // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 - github.com/docker/docker v20.10.7+incompatible + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/docker v20.10.6+incompatible + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.4.0 // indirect github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 github.com/dynatrace-oss/dynatrace-metric-utils-go v0.2.0 + github.com/eapache/go-resiliency v1.2.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/echlebek/timeproxy v1.0.0 // indirect github.com/eclipse/paho.mqtt.golang v1.3.0 + github.com/fatih/color v1.9.0 // indirect github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.5.0 + github.com/go-logr/logr v0.4.0 // indirect + github.com/go-ole/go-ole v1.2.4 // indirect github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c github.com/go-redis/redis v6.15.9+incompatible github.com/go-sql-driver/mysql v1.6.0 + github.com/go-stack/stack v1.8.0 // indirect github.com/goburrow/modbus v0.1.0 // indirect github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v3.3.0+incompatible + github.com/gogo/googleapis v1.4.0 // indirect github.com/gogo/protobuf v1.3.2 + github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang/geo v0.0.0-20190916061304-5b978397cfec + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect github.com/golang/protobuf v1.5.2 - github.com/golang/snappy v0.0.4 + github.com/golang/snappy v0.0.3 + github.com/google/flatbuffers v1.11.0 // indirect github.com/google/go-cmp v0.5.6 github.com/google/go-github/v32 v32.1.0 + github.com/google/go-querystring v1.0.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/uuid v1.2.0 // indirect + github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/googleapis/gnostic v0.4.1 // indirect github.com/gopcua/opcua v0.1.13 github.com/gorilla/mux v1.7.3 github.com/gorilla/websocket v1.4.2 github.com/gosnmp/gosnmp v1.32.0 github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b + github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 github.com/hashicorp/consul/api v1.9.1 + github.com/hashicorp/go-cleanhttp v0.5.1 // indirect + github.com/hashicorp/go-hclog v0.12.2 // indirect + github.com/hashicorp/go-immutable-radix v1.2.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/serf v0.9.5 // indirect github.com/influxdata/go-syslog/v3 v3.0.0 github.com/influxdata/influxdb-observability/common v0.2.4 github.com/influxdata/influxdb-observability/influx2otel v0.2.4 @@ -82,88 +146,187 @@ require ( github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.5.0 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.0.1 // indirect + github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 // indirect + github.com/jackc/pgtype v1.3.0 // indirect github.com/jackc/pgx/v4 v4.6.0 + github.com/jaegertracing/jaeger v1.15.1 // indirect github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a + github.com/jcmturner/gofork v1.0.0 // indirect github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca github.com/jmespath/go-jmespath v0.4.0 + github.com/jpillora/backoff v1.0.0 // indirect + github.com/json-iterator/go v1.1.10 // indirect + github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/klauspost/compress v1.13.1 // indirect + github.com/kr/pretty v0.2.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect github.com/lib/pq v1.3.0 // indirect + github.com/mailru/easyjson v0.7.1 // indirect + github.com/mattn/go-colorable v0.1.6 // indirect + github.com/mattn/go-ieproxy v0.0.1 // indirect + github.com/mattn/go-isatty v0.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b + github.com/mdlayher/genetlink v1.0.0 // indirect + github.com/mdlayher/netlink v1.1.0 // indirect github.com/microsoft/ApplicationInsights-Go v0.4.4 github.com/miekg/dns v1.1.31 + github.com/minio/highwayhash v1.0.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.2.2 // indirect github.com/moby/ipvs v1.0.1 + github.com/moby/sys/mount v0.2.0 // indirect + github.com/moby/sys/mountinfo v0.4.1 // indirect + github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/morikuni/aec v1.0.0 // indirect github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect + github.com/nats-io/jwt/v2 v2.0.2 // indirect github.com/nats-io/nats-server/v2 v2.2.6 github.com/nats-io/nats.go v1.11.0 + github.com/nats-io/nkeys v0.3.0 // indirect + github.com/nats-io/nuid v1.0.1 // indirect github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 github.com/nsqio/go-nsq v1.0.8 github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/opencontainers/runc v1.0.0-rc93 // indirect + github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go-opentracing v0.3.4 + github.com/philhofer/fwd v1.1.1 // indirect + github.com/pierrec/lz4 v2.5.2+incompatible // indirect github.com/pion/dtls/v2 v2.0.9 + github.com/pion/logging v0.2.2 // indirect + github.com/pion/transport v0.12.3 // indirect + github.com/pion/udp v0.1.1 // indirect + github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 // indirect github.com/pkg/errors v0.9.1 + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.15.0 github.com/prometheus/procfs v0.6.0 github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 + github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/riemann/riemann-go-client v0.5.0 + github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 + github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e // indirect + github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/sensu/sensu-go/api/core/v2 v2.9.0 github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect + github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect + github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect github.com/signalfx/golib/v3 v3.3.34 - github.com/sirupsen/logrus v1.8.1 + github.com/signalfx/sapm-proto v0.4.0 // indirect + github.com/sirupsen/logrus v1.7.0 github.com/sleepinggenius2/gosmi v0.4.3 github.com/snowflakedb/gosnowflake v1.5.0 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 + github.com/stretchr/objx v0.2.0 // indirect github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/testcontainers/testcontainers-go v0.11.1 + github.com/testcontainers/testcontainers-go v0.11.0 github.com/tidwall/gjson v1.8.0 + github.com/tidwall/match v1.0.3 // indirect + github.com/tidwall/pretty v1.1.0 // indirect github.com/tinylib/msgp v1.1.5 github.com/tklauser/go-sysconf v0.3.5 // indirect + github.com/tklauser/numcpus v0.2.2 // indirect github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 + github.com/vishvananda/netlink v1.1.0 // indirect + github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df // indirect github.com/vjeantet/grok v1.0.1 github.com/vmware/govmomi v0.26.0 github.com/wavefronthq/wavefront-sdk-go v0.9.7 github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.0.2 // indirect + github.com/xdg-go/stringprep v1.0.2 // indirect github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c + github.com/xdg/stringprep v1.0.0 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect + go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.mongodb.org/mongo-driver v1.5.3 + go.opencensus.io v0.22.3 // indirect go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c go.starlark.net v0.0.0-20210406145628-7a1108eaa012 + go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect + golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20210610132358-84b48f89b13b golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect golang.org/x/text v0.3.6 + golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect golang.org/x/tools v0.1.2 + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + golang.zx2c4.com/wireguard v0.0.20200121 // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.29.0 + google.golang.org/appengine v1.6.6 // indirect google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 google.golang.org/grpc v1.39.0 google.golang.org/protobuf v1.27.1 + gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/djherbis/times.v1 v1.2.0 gopkg.in/fatih/pool.v2 v2.0.0 // indirect + gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/gorethink/gorethink.v3 v3.0.5 + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.42.0 // indirect + gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect + gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect + gopkg.in/jcmturner/gokrb5.v7 v7.5.0 // indirect + gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect gopkg.in/ldap.v3 v3.1.0 gopkg.in/olivere/elastic.v5 v5.0.70 + gopkg.in/sourcemap.v1 v1.0.5 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 + gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gotest.tools v2.2.0+incompatible + honnef.co/go/tools v0.0.1-2020.1.4 // indirect k8s.io/api v0.20.4 k8s.io/apimachinery v0.21.1 k8s.io/client-go v0.20.4 + k8s.io/klog/v2 v2.8.0 // indirect + k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect + modernc.org/cc/v3 v3.33.5 // indirect + modernc.org/ccgo/v3 v3.9.4 // indirect + modernc.org/libc v1.9.5 // indirect + modernc.org/mathutil v1.2.2 // indirect + modernc.org/memory v1.0.4 // indirect + modernc.org/opt v0.1.1 // indirect modernc.org/sqlite v1.10.8 + modernc.org/strutil v1.1.0 // indirect + modernc.org/token v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.1.0 // indirect + sigs.k8s.io/yaml v1.2.0 // indirect ) // replaced due to https://github.com/satori/go.uuid/issues/73 diff --git a/go.sum b/go.sum index 1b482f7449834..d17f8209df7da 100644 --- a/go.sum +++ b/go.sum @@ -228,44 +228,34 @@ github.com/aws/aws-sdk-go v1.38.69 h1:V489lmrdkIQSfF6OAGZZ1Cavcm7eczCm2JcGvX+yHR github.com/aws/aws-sdk-go v1.38.69/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= +github.com/aws/aws-sdk-go-v2 v1.3.2 h1:RQj8l98yKUm0UV2Wd3w/Ms+TXV9Rs1E6Kr5tRRMfyU4= github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8= -github.com/aws/aws-sdk-go-v2 v1.8.0 h1:HcN6yDnHV9S7D69E7To0aUppJhiJNEzQSNcUxc7r3qo= -github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2/config v1.1.5 h1:imDWOGwlIrRpHLallJ9mli2SIQ4egtGKtFUFsuGRIaQ= github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E= -github.com/aws/aws-sdk-go-v2/config v1.6.0 h1:rtoCnNObhVm7me+v9sA2aY+NtHNZjjWWC3ifXVci+wE= -github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= +github.com/aws/aws-sdk-go-v2/credentials v1.1.5 h1:R9v/eN5cXv5yMLC619xRYl5PgCSuy5SarizmM7+qqSA= github.com/aws/aws-sdk-go-v2/credentials v1.1.5/go.mod h1:Ir1R6tPiR1/2y1hes8yOijFMz54hzSmgcmCDo6F45Qc= -github.com/aws/aws-sdk-go-v2/credentials v1.3.2 h1:Uud/fZzm0lqqhE8kvXYJFAJ3PGnagKoUcvHq1hXfBZw= -github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6 h1:zoOz5V56jO/rGixsCDnrQtAzYRYM2hGA/43U6jVMFbo= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0 h1:SGqDJun6tydgsSIFxv9+EYBJVqVUwg2QMJp6PbNq8C8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2 h1:Doa5wabOIDA0XZzBX5yCTAPGwDCVZ8Ux0wh29AUDmN4= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2/go.mod h1:Azf567f5wBUfUbwpyJJnLM/geFFIzEulGR30L+nQZOE= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 h1:xu45foJnwMwBqSkIMKyJP9kbyHi5hdhZ/WiJ7D2sHZ0= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 h1:+VnEgB1yp+7KlOsk6FXX/v/fU9uL5oSujIMkKQBBmp8= github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0/go.mod h1:/6514fU/SRcY3+ousB1zjUqiXjruSuti2qcfE70osOc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4 h1:8yeByqOL6UWBsOOXsHnW93/ukwL66O008tRfxXxnTwA= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4/go.mod h1:BCfU3Uo2fhKcMZFp9zU5QQGQxqWCOYmZ/27Dju3S/do= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1/go.mod h1:PISaKWylTYAyruocNk4Lr9miOOJjOcVBd7twCPbydDk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6 h1:ldYIsOP4WyjdzW8t6RC/aSieajrlx+3UN3UCZy1KM5Y= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6/go.mod h1:L0KWr0ASo83PRZu9NaZaDsw3koS6PspKv137DMDZjHo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2 h1:Xv1rGYgsRRn0xw9JFNnfpBMZam54PrWpC4rJOJ9koA8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2 h1:aU8H58DoYxNo8R1TaSPTofkuxfQNnoqZmWL+G3+k/vA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2/go.mod h1:nnutjMLuna0s3GVY/MAkpLX03thyNER06gXvnMAPj5g= github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0 h1:VbwXUI3L0hyhVmrFxbDxrs6cBX8TNFX0YxCpooMNjvY= github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0/go.mod h1:uwA7gs93Qcss43astPUb1eq4RyceNmYWAQjZFDOAMLo= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 h1:B7ec5wE4+3Ldkurmq0C4gfQFtElGTG+/iTpi/YPMzi4= github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/J0tzWCMXHbw6FZ0j1GkWM= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.2 h1:b+U3WrF9ON3f32FH19geqmiod4uKcMv/q+wosQjjyyM= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= +github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 h1:fKw6QSGcFlvZCBPYx3fo4sL0HfTmaT06ZtMHJfQQNQQ= github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.1 h1:1Pls85C5CFjhE3aH+h85/hyAk89kQNlAWlEQtIkaFyc= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= +github.com/aws/smithy-go v1.3.1 h1:xJFO4pK0y9J8fCl34uGsSJX5KNnGbdARDlA5BPhXnwE= github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.7.0 h1:+cLHMRrDZvQ4wk+KuQ9yH6eEg6KZEJ9RI2IkDqnygCg= -github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -467,8 +457,8 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= -github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= +github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -735,9 +725,8 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/addlicense v0.0.0-20190510175307-22550fa7c1b0/go.mod h1:QtPG26W17m+OIQgE6gQ24gC1M6pUaMBAbFrTIDtwG/E= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= @@ -1421,9 +1410,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sleepinggenius2/gosmi v0.4.3 h1:99Zwzy1Cvgsh396sw07oR2G4ab88ILGZFMxSlGWnR6o= github.com/sleepinggenius2/gosmi v0.4.3/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bTY2CNivIhsnDT0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= @@ -1475,8 +1463,8 @@ github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOs github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= -github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= -github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= +github.com/testcontainers/testcontainers-go v0.11.0 h1:HO5YOx2DYBHqcg4MzVWPj3FuHAv7USWVu94vCSsgiaM= +github.com/testcontainers/testcontainers-go v0.11.0/go.mod h1:HztBCODzuA+YpMXGK8amjO8j50jz2gcT0BOzSKUiYIs= github.com/tidwall/gjson v1.8.0 h1:Qt+orfosKn0rbNTZqHYDqBrmm3UDA4KRkv70fDzG+PQ= github.com/tidwall/gjson v1.8.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= diff --git a/internal/exec_unix.go b/internal/exec_unix.go index 60b606cfb5f32..0f5d3fca037db 100644 --- a/internal/exec_unix.go +++ b/internal/exec_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package internal diff --git a/internal/exec_windows.go b/internal/exec_windows.go index 7bab1baf3ac3f..708051dda3a2c 100644 --- a/internal/exec_windows.go +++ b/internal/exec_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package internal diff --git a/internal/globpath/globpath_test.go b/internal/globpath/globpath_test.go index 33779f912a027..bc286bc75419e 100644 --- a/internal/globpath/globpath_test.go +++ b/internal/globpath/globpath_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/internal/goplugin/noplugin.go b/internal/goplugin/noplugin.go index 089972d465196..65fcee418e388 100644 --- a/internal/goplugin/noplugin.go +++ b/internal/goplugin/noplugin.go @@ -1,3 +1,4 @@ +//go:build !goplugin // +build !goplugin package goplugin diff --git a/internal/goplugin/plugin.go b/internal/goplugin/plugin.go index 7e58ec32e92c2..3af051833b6a7 100644 --- a/internal/goplugin/plugin.go +++ b/internal/goplugin/plugin.go @@ -1,3 +1,4 @@ +//go:build goplugin // +build goplugin package goplugin diff --git a/internal/process/process_posix.go b/internal/process/process_posix.go index 7b42b7da13214..8f736bc673592 100644 --- a/internal/process/process_posix.go +++ b/internal/process/process_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package process diff --git a/internal/process/process_test.go b/internal/process/process_test.go index b9cad3598ce13..228f2f1e1b28d 100644 --- a/internal/process/process_test.go +++ b/internal/process/process_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package process diff --git a/internal/process/process_windows.go b/internal/process/process_windows.go index 0995d52469b07..3aefd20f4aa9c 100644 --- a/internal/process/process_windows.go +++ b/internal/process/process_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package process diff --git a/internal/usage.go b/internal/usage.go index 1a4b3a3496281..916b5cb86e908 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package internal diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 236e1426b345c..9a1169851cd74 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package internal diff --git a/logger/event_logger.go b/logger/event_logger.go index 44d5bce656a04..bb0672de76c5c 100644 --- a/logger/event_logger.go +++ b/logger/event_logger.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows package logger diff --git a/logger/event_logger_test.go b/logger/event_logger_test.go index 05c27b1757e87..d268252779867 100644 --- a/logger/event_logger_test.go +++ b/logger/event_logger_test.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows package logger diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 8c21c701f3da3..3195cf4dabcbb 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // bcache doesn't aim for Windows diff --git a/plugins/inputs/bcache/bcache_test.go b/plugins/inputs/bcache/bcache_test.go index b9d786fa91bec..857538a8d6f72 100644 --- a/plugins/inputs/bcache/bcache_test.go +++ b/plugins/inputs/bcache/bcache_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package bcache diff --git a/plugins/inputs/bcache/bcache_windows.go b/plugins/inputs/bcache/bcache_windows.go index 9a580cc940106..faeba8888bb3b 100644 --- a/plugins/inputs/bcache/bcache_windows.go +++ b/plugins/inputs/bcache/bcache_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package bcache diff --git a/plugins/inputs/cgroup/cgroup_linux.go b/plugins/inputs/cgroup/cgroup_linux.go index 43aa68f233fc1..d1eda6e7a3b07 100644 --- a/plugins/inputs/cgroup/cgroup_linux.go +++ b/plugins/inputs/cgroup/cgroup_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package cgroup diff --git a/plugins/inputs/cgroup/cgroup_notlinux.go b/plugins/inputs/cgroup/cgroup_notlinux.go index 2bc227410a6e2..1c9c08ec41ac5 100644 --- a/plugins/inputs/cgroup/cgroup_notlinux.go +++ b/plugins/inputs/cgroup/cgroup_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package cgroup diff --git a/plugins/inputs/cgroup/cgroup_test.go b/plugins/inputs/cgroup/cgroup_test.go index bd7a191b31df7..ba74247eeb1f3 100644 --- a/plugins/inputs/cgroup/cgroup_test.go +++ b/plugins/inputs/cgroup/cgroup_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package cgroup diff --git a/plugins/inputs/conntrack/conntrack.go b/plugins/inputs/conntrack/conntrack.go index bf6c021c80f4a..f1b04fb0d965a 100644 --- a/plugins/inputs/conntrack/conntrack.go +++ b/plugins/inputs/conntrack/conntrack.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package conntrack diff --git a/plugins/inputs/conntrack/conntrack_notlinux.go b/plugins/inputs/conntrack/conntrack_notlinux.go index 11948731bb88d..6ad8e4a10e3c5 100644 --- a/plugins/inputs/conntrack/conntrack_notlinux.go +++ b/plugins/inputs/conntrack/conntrack_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package conntrack diff --git a/plugins/inputs/conntrack/conntrack_test.go b/plugins/inputs/conntrack/conntrack_test.go index e554f4e90d262..50f56d831791e 100644 --- a/plugins/inputs/conntrack/conntrack_test.go +++ b/plugins/inputs/conntrack/conntrack_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package conntrack diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index ede35b5befead..1a97aabf40db5 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package diskio diff --git a/plugins/inputs/diskio/diskio_other.go b/plugins/inputs/diskio/diskio_other.go index 1c883e904f92c..458a64c13f7bb 100644 --- a/plugins/inputs/diskio/diskio_other.go +++ b/plugins/inputs/diskio/diskio_other.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package diskio diff --git a/plugins/inputs/dmcache/dmcache_linux.go b/plugins/inputs/dmcache/dmcache_linux.go index 8e8d7de918560..712e67900ba4d 100644 --- a/plugins/inputs/dmcache/dmcache_linux.go +++ b/plugins/inputs/dmcache/dmcache_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dmcache diff --git a/plugins/inputs/dmcache/dmcache_linux_test.go b/plugins/inputs/dmcache/dmcache_linux_test.go index 30e32b1e876a4..93cd1e85e79bb 100644 --- a/plugins/inputs/dmcache/dmcache_linux_test.go +++ b/plugins/inputs/dmcache/dmcache_linux_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dmcache diff --git a/plugins/inputs/dmcache/dmcache_notlinux.go b/plugins/inputs/dmcache/dmcache_notlinux.go index ee1065638cab7..96aa0c65712ff 100644 --- a/plugins/inputs/dmcache/dmcache_notlinux.go +++ b/plugins/inputs/dmcache/dmcache_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package dmcache diff --git a/plugins/inputs/dpdk/dpdk.go b/plugins/inputs/dpdk/dpdk.go index 293dbee90adf3..261784942232c 100644 --- a/plugins/inputs/dpdk/dpdk.go +++ b/plugins/inputs/dpdk/dpdk.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dpdk diff --git a/plugins/inputs/dpdk/dpdk_connector.go b/plugins/inputs/dpdk/dpdk_connector.go index 1129d16d31604..9cd9c81c4362b 100644 --- a/plugins/inputs/dpdk/dpdk_connector.go +++ b/plugins/inputs/dpdk/dpdk_connector.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dpdk diff --git a/plugins/inputs/dpdk/dpdk_connector_test.go b/plugins/inputs/dpdk/dpdk_connector_test.go index a322964979fe8..f5580417c3c67 100644 --- a/plugins/inputs/dpdk/dpdk_connector_test.go +++ b/plugins/inputs/dpdk/dpdk_connector_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dpdk diff --git a/plugins/inputs/dpdk/dpdk_notlinux.go b/plugins/inputs/dpdk/dpdk_notlinux.go index a86625ff5c93f..1831b1212ae78 100644 --- a/plugins/inputs/dpdk/dpdk_notlinux.go +++ b/plugins/inputs/dpdk/dpdk_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package dpdk diff --git a/plugins/inputs/dpdk/dpdk_test.go b/plugins/inputs/dpdk/dpdk_test.go index cfee021e9e6bb..41d2da3d07777 100644 --- a/plugins/inputs/dpdk/dpdk_test.go +++ b/plugins/inputs/dpdk/dpdk_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dpdk diff --git a/plugins/inputs/dpdk/dpdk_utils.go b/plugins/inputs/dpdk/dpdk_utils.go index 962186a424893..b7049d8365597 100644 --- a/plugins/inputs/dpdk/dpdk_utils.go +++ b/plugins/inputs/dpdk/dpdk_utils.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dpdk diff --git a/plugins/inputs/dpdk/dpdk_utils_test.go b/plugins/inputs/dpdk/dpdk_utils_test.go index 6697e9ab38113..87e8a6c8248c3 100644 --- a/plugins/inputs/dpdk/dpdk_utils_test.go +++ b/plugins/inputs/dpdk/dpdk_utils_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dpdk diff --git a/plugins/inputs/ethtool/ethtool_linux.go b/plugins/inputs/ethtool/ethtool_linux.go index 08e21db50dede..6c0116e6e8089 100644 --- a/plugins/inputs/ethtool/ethtool_linux.go +++ b/plugins/inputs/ethtool/ethtool_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package ethtool diff --git a/plugins/inputs/ethtool/ethtool_notlinux.go b/plugins/inputs/ethtool/ethtool_notlinux.go index b022e0a46bb72..ce149ecd6e69c 100644 --- a/plugins/inputs/ethtool/ethtool_notlinux.go +++ b/plugins/inputs/ethtool/ethtool_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package ethtool diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go index 87bc136d2db11..14cf14d811683 100644 --- a/plugins/inputs/ethtool/ethtool_test.go +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package ethtool diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index bdd11433d1ab6..d0647476c77ae 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/plugins/inputs/execd/execd_posix.go b/plugins/inputs/execd/execd_posix.go index 9593aaba0af29..a90b1a92dddf5 100644 --- a/plugins/inputs/execd/execd_posix.go +++ b/plugins/inputs/execd/execd_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package execd diff --git a/plugins/inputs/execd/execd_windows.go b/plugins/inputs/execd/execd_windows.go index 15e6798f2389b..9b1f22204bdc4 100644 --- a/plugins/inputs/execd/execd_windows.go +++ b/plugins/inputs/execd/execd_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package execd diff --git a/plugins/inputs/execd/shim/goshim_posix.go b/plugins/inputs/execd/shim/goshim_posix.go index 4e4a04f141b65..8d7faa2268878 100644 --- a/plugins/inputs/execd/shim/goshim_posix.go +++ b/plugins/inputs/execd/shim/goshim_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package shim diff --git a/plugins/inputs/execd/shim/goshim_windows.go b/plugins/inputs/execd/shim/goshim_windows.go index 317f8a2f3d4cb..90adfeff6f6c9 100644 --- a/plugins/inputs/execd/shim/goshim_windows.go +++ b/plugins/inputs/execd/shim/goshim_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package shim diff --git a/plugins/inputs/execd/shim/shim_posix_test.go b/plugins/inputs/execd/shim/shim_posix_test.go index 75484c45c78a0..36e0afcd83167 100644 --- a/plugins/inputs/execd/shim/shim_posix_test.go +++ b/plugins/inputs/execd/shim/shim_posix_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package shim diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go index e633559236bd2..ab09753ca1145 100644 --- a/plugins/inputs/file/file_test.go +++ b/plugins/inputs/file/file_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 74a3e2ec391c5..d02c28fb6f170 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/plugins/inputs/filecount/filesystem_helpers_test.go b/plugins/inputs/filecount/filesystem_helpers_test.go index 8a6d9cf2aa035..a3a3310d3fb4e 100644 --- a/plugins/inputs/filecount/filesystem_helpers_test.go +++ b/plugins/inputs/filecount/filesystem_helpers_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/plugins/inputs/filestat/filestat_test.go b/plugins/inputs/filestat/filestat_test.go index ea1bee47e4fb4..ac2a9f9a9f75b 100644 --- a/plugins/inputs/filestat/filestat_test.go +++ b/plugins/inputs/filestat/filestat_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 557639027ff03..40917bba1bc39 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when https://github.com/influxdata/telegraf/issues/8451 is fixed diff --git a/plugins/inputs/infiniband/infiniband_linux.go b/plugins/inputs/infiniband/infiniband_linux.go index 224d35bc2fce0..2868c683e7ebb 100644 --- a/plugins/inputs/infiniband/infiniband_linux.go +++ b/plugins/inputs/infiniband/infiniband_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package infiniband diff --git a/plugins/inputs/infiniband/infiniband_notlinux.go b/plugins/inputs/infiniband/infiniband_notlinux.go index 5b19672d975d8..8ad6731c17bd7 100644 --- a/plugins/inputs/infiniband/infiniband_notlinux.go +++ b/plugins/inputs/infiniband/infiniband_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package infiniband diff --git a/plugins/inputs/infiniband/infiniband_test.go b/plugins/inputs/infiniband/infiniband_test.go index 7f747eb5fd89f..c382a1fdf9dd0 100644 --- a/plugins/inputs/infiniband/infiniband_test.go +++ b/plugins/inputs/infiniband/infiniband_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package infiniband diff --git a/plugins/inputs/intel_powerstat/file.go b/plugins/inputs/intel_powerstat/file.go index 7953726fd9ba8..a07dd57e16a57 100644 --- a/plugins/inputs/intel_powerstat/file.go +++ b/plugins/inputs/intel_powerstat/file.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/intel_powerstat.go b/plugins/inputs/intel_powerstat/intel_powerstat.go index 9340fdec814b1..181e7642da4b8 100644 --- a/plugins/inputs/intel_powerstat/intel_powerstat.go +++ b/plugins/inputs/intel_powerstat/intel_powerstat.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go b/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go index f46755cee92b7..256e64970094e 100644 --- a/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go +++ b/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/intel_powerstat_test.go b/plugins/inputs/intel_powerstat/intel_powerstat_test.go index d65756595927e..ce01e77997cdc 100644 --- a/plugins/inputs/intel_powerstat/intel_powerstat_test.go +++ b/plugins/inputs/intel_powerstat/intel_powerstat_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/msr.go b/plugins/inputs/intel_powerstat/msr.go index 8d39164d6e783..6c19b56eb7cc5 100644 --- a/plugins/inputs/intel_powerstat/msr.go +++ b/plugins/inputs/intel_powerstat/msr.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/msr_test.go b/plugins/inputs/intel_powerstat/msr_test.go index 945716e15a105..b03d2b00960a9 100644 --- a/plugins/inputs/intel_powerstat/msr_test.go +++ b/plugins/inputs/intel_powerstat/msr_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/rapl.go b/plugins/inputs/intel_powerstat/rapl.go index 1e4b465fd7974..32d60ac89c705 100644 --- a/plugins/inputs/intel_powerstat/rapl.go +++ b/plugins/inputs/intel_powerstat/rapl.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/rapl_test.go b/plugins/inputs/intel_powerstat/rapl_test.go index 551f06f890ea4..5333ec13aaa79 100644 --- a/plugins/inputs/intel_powerstat/rapl_test.go +++ b/plugins/inputs/intel_powerstat/rapl_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/unit_converter.go b/plugins/inputs/intel_powerstat/unit_converter.go index 43dc79e6efc4a..7dd8c0d0d1aa0 100644 --- a/plugins/inputs/intel_powerstat/unit_converter.go +++ b/plugins/inputs/intel_powerstat/unit_converter.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package intel_powerstat diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index 69cc914227fc8..89370062d730e 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/intel_rdt_test.go b/plugins/inputs/intel_rdt/intel_rdt_test.go index 7e876425724ec..1eecbc5018125 100644 --- a/plugins/inputs/intel_rdt/intel_rdt_test.go +++ b/plugins/inputs/intel_rdt/intel_rdt_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/intel_rdt_windows.go b/plugins/inputs/intel_rdt/intel_rdt_windows.go index e3ab0978fb374..64f9ebbe94b68 100644 --- a/plugins/inputs/intel_rdt/intel_rdt_windows.go +++ b/plugins/inputs/intel_rdt/intel_rdt_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/processes.go b/plugins/inputs/intel_rdt/processes.go index ff86a4e6b745c..dd172b6d92dd2 100644 --- a/plugins/inputs/intel_rdt/processes.go +++ b/plugins/inputs/intel_rdt/processes.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/publisher.go b/plugins/inputs/intel_rdt/publisher.go index a01d730382da9..a567e1aacb1fa 100644 --- a/plugins/inputs/intel_rdt/publisher.go +++ b/plugins/inputs/intel_rdt/publisher.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/publisher_test.go b/plugins/inputs/intel_rdt/publisher_test.go index 5248ede7a16db..7db71e9ac5afa 100644 --- a/plugins/inputs/intel_rdt/publisher_test.go +++ b/plugins/inputs/intel_rdt/publisher_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt diff --git a/plugins/inputs/iptables/iptables.go b/plugins/inputs/iptables/iptables.go index e56f8b31d5725..89924b88de7c8 100644 --- a/plugins/inputs/iptables/iptables.go +++ b/plugins/inputs/iptables/iptables.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package iptables diff --git a/plugins/inputs/iptables/iptables_nocompile.go b/plugins/inputs/iptables/iptables_nocompile.go index f71b4208e62fb..17c0eaced90e5 100644 --- a/plugins/inputs/iptables/iptables_nocompile.go +++ b/plugins/inputs/iptables/iptables_nocompile.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package iptables diff --git a/plugins/inputs/iptables/iptables_test.go b/plugins/inputs/iptables/iptables_test.go index 681d8bbfc130e..4c62ef6d6a86a 100644 --- a/plugins/inputs/iptables/iptables_test.go +++ b/plugins/inputs/iptables/iptables_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package iptables diff --git a/plugins/inputs/ipvs/ipvs.go b/plugins/inputs/ipvs/ipvs.go index 65db5efe3bf7f..7dea5240aab0f 100644 --- a/plugins/inputs/ipvs/ipvs.go +++ b/plugins/inputs/ipvs/ipvs.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package ipvs diff --git a/plugins/inputs/ipvs/ipvs_notlinux.go b/plugins/inputs/ipvs/ipvs_notlinux.go index bbbb1240b62a8..b46035f2c2b3c 100644 --- a/plugins/inputs/ipvs/ipvs_notlinux.go +++ b/plugins/inputs/ipvs/ipvs_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package ipvs diff --git a/plugins/inputs/kernel/kernel.go b/plugins/inputs/kernel/kernel.go index 404c62d88c2b8..22311e9a0f12d 100644 --- a/plugins/inputs/kernel/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package kernel diff --git a/plugins/inputs/kernel/kernel_notlinux.go b/plugins/inputs/kernel/kernel_notlinux.go index 05f6e55c453c5..838a97071a6d4 100644 --- a/plugins/inputs/kernel/kernel_notlinux.go +++ b/plugins/inputs/kernel/kernel_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package kernel diff --git a/plugins/inputs/kernel/kernel_test.go b/plugins/inputs/kernel/kernel_test.go index 2068237d5b60f..462624c2eb40d 100644 --- a/plugins/inputs/kernel/kernel_test.go +++ b/plugins/inputs/kernel/kernel_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package kernel diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat.go b/plugins/inputs/kernel_vmstat/kernel_vmstat.go index 66e7c7d664748..2019e0cbfddb3 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package kernel_vmstat diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go index 11a5d2e553dff..d687b13a9e72d 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package kernel_vmstat diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go index eca873ff71896..6bbb9d7b5b12f 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package kernel_vmstat diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 5fec865eaa8d7..83f5abd210bdd 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -1,3 +1,4 @@ +//go:build !solaris // +build !solaris package logparser diff --git a/plugins/inputs/logparser/logparser_solaris.go b/plugins/inputs/logparser/logparser_solaris.go index 28afe26772846..da482b97d27be 100644 --- a/plugins/inputs/logparser/logparser_solaris.go +++ b/plugins/inputs/logparser/logparser_solaris.go @@ -1,3 +1,4 @@ +//go:build solaris // +build solaris package logparser diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 5327386339f84..00aa288b316a8 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // Package lustre2 (doesn't aim for Windows) diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index e89c33b5a46e3..52c7e87f08fc6 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package lustre2 diff --git a/plugins/inputs/lustre2/lustre2_windows.go b/plugins/inputs/lustre2/lustre2_windows.go index 0c4d970608e09..cd3aea1b534f1 100644 --- a/plugins/inputs/lustre2/lustre2_windows.go +++ b/plugins/inputs/lustre2/lustre2_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package lustre2 diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index 2cf58689a6eab..64fb191639105 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package mongodb diff --git a/plugins/inputs/mongodb/mongodb_test.go b/plugins/inputs/mongodb/mongodb_test.go index 9484118dd19ab..24aa2fe3e0d04 100644 --- a/plugins/inputs/mongodb/mongodb_test.go +++ b/plugins/inputs/mongodb/mongodb_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package mongodb diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index 53f688bb3bcd9..c2adab29b324d 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -1,3 +1,4 @@ +//go:build !freebsd || (freebsd && cgo) // +build !freebsd freebsd,cgo package nats diff --git a/plugins/inputs/nats/nats_freebsd.go b/plugins/inputs/nats/nats_freebsd.go index 08d08ba760df0..f50ba2cfcf678 100644 --- a/plugins/inputs/nats/nats_freebsd.go +++ b/plugins/inputs/nats/nats_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd && !cgo // +build freebsd,!cgo package nats diff --git a/plugins/inputs/nats/nats_test.go b/plugins/inputs/nats/nats_test.go index 7207df94cfd02..135951405feda 100644 --- a/plugins/inputs/nats/nats_test.go +++ b/plugins/inputs/nats/nats_test.go @@ -1,3 +1,4 @@ +//go:build !freebsd || (freebsd && cgo) // +build !freebsd freebsd,cgo package nats diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 50d8d604efb5b..d51c576aad7f0 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/plugins/inputs/ping/ping_notwindows.go b/plugins/inputs/ping/ping_notwindows.go index a014a8237e8e7..f6bd751c2a4e3 100644 --- a/plugins/inputs/ping/ping_notwindows.go +++ b/plugins/inputs/ping/ping_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package ping diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 895b9c1fdf5b9..7faba097c4562 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package ping diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go index f53d6f09a7373..1d3d933e7736b 100644 --- a/plugins/inputs/ping/ping_windows.go +++ b/plugins/inputs/ping/ping_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ping diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index 0986d58bc74a8..6df8af3732a5f 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ping diff --git a/plugins/inputs/postfix/postfix.go b/plugins/inputs/postfix/postfix.go index f72474a114f94..e2d271f51cba1 100644 --- a/plugins/inputs/postfix/postfix.go +++ b/plugins/inputs/postfix/postfix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // postfix doesn't aim for Windows diff --git a/plugins/inputs/postfix/postfix_test.go b/plugins/inputs/postfix/postfix_test.go index ad997eebdbbe7..782a0c78c95b9 100644 --- a/plugins/inputs/postfix/postfix_test.go +++ b/plugins/inputs/postfix/postfix_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package postfix diff --git a/plugins/inputs/postfix/postfix_windows.go b/plugins/inputs/postfix/postfix_windows.go index 122c1543da55d..3a2c5e5cb3619 100644 --- a/plugins/inputs/postfix/postfix_windows.go +++ b/plugins/inputs/postfix/postfix_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package postfix diff --git a/plugins/inputs/postfix/stat_ctim.go b/plugins/inputs/postfix/stat_ctim.go index 456df5ffd4dd2..06ddccb178fce 100644 --- a/plugins/inputs/postfix/stat_ctim.go +++ b/plugins/inputs/postfix/stat_ctim.go @@ -1,3 +1,4 @@ +//go:build dragonfly || linux || netbsd || openbsd || solaris // +build dragonfly linux netbsd openbsd solaris package postfix diff --git a/plugins/inputs/postfix/stat_ctimespec.go b/plugins/inputs/postfix/stat_ctimespec.go index 40e0de6cc4a40..03f4e0a435f2c 100644 --- a/plugins/inputs/postfix/stat_ctimespec.go +++ b/plugins/inputs/postfix/stat_ctimespec.go @@ -1,3 +1,4 @@ +//go:build darwin || freebsd // +build darwin freebsd package postfix diff --git a/plugins/inputs/postfix/stat_none.go b/plugins/inputs/postfix/stat_none.go index d9b67b1663af8..c1ca6a41c662f 100644 --- a/plugins/inputs/postfix/stat_none.go +++ b/plugins/inputs/postfix/stat_none.go @@ -1,3 +1,4 @@ +//go:build !dragonfly && !linux && !netbsd && !openbsd && !solaris && !darwin && !freebsd // +build !dragonfly,!linux,!netbsd,!openbsd,!solaris,!darwin,!freebsd package postfix diff --git a/plugins/inputs/processes/processes_notwindows.go b/plugins/inputs/processes/processes_notwindows.go index 9faec83afa7d0..3c685cf1ebf7f 100644 --- a/plugins/inputs/processes/processes_notwindows.go +++ b/plugins/inputs/processes/processes_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package processes diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go index de04fecb56fc1..144b80f3fc1ec 100644 --- a/plugins/inputs/processes/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package processes diff --git a/plugins/inputs/processes/processes_windows.go b/plugins/inputs/processes/processes_windows.go index 567373c7c7260..f798a1668c738 100644 --- a/plugins/inputs/processes/processes_windows.go +++ b/plugins/inputs/processes/processes_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package processes diff --git a/plugins/inputs/procstat/native_finder_notwindows.go b/plugins/inputs/procstat/native_finder_notwindows.go index 9d7409ba1df8e..528b083ae628b 100644 --- a/plugins/inputs/procstat/native_finder_notwindows.go +++ b/plugins/inputs/procstat/native_finder_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package procstat diff --git a/plugins/inputs/procstat/win_service_notwindows.go b/plugins/inputs/procstat/win_service_notwindows.go index a0a776d33736f..b7efcee17cdc1 100644 --- a/plugins/inputs/procstat/win_service_notwindows.go +++ b/plugins/inputs/procstat/win_service_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package procstat diff --git a/plugins/inputs/procstat/win_service_windows.go b/plugins/inputs/procstat/win_service_windows.go index 06dffc8472089..5d9c196e388c0 100644 --- a/plugins/inputs/procstat/win_service_windows.go +++ b/plugins/inputs/procstat/win_service_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package procstat diff --git a/plugins/inputs/ras/ras.go b/plugins/inputs/ras/ras.go index a8599c4a78d0f..a8d4ba727d7df 100644 --- a/plugins/inputs/ras/ras.go +++ b/plugins/inputs/ras/ras.go @@ -1,3 +1,4 @@ +//go:build linux && (386 || amd64 || arm || arm64) // +build linux // +build 386 amd64 arm arm64 diff --git a/plugins/inputs/ras/ras_notlinux.go b/plugins/inputs/ras/ras_notlinux.go index 74f0aaf9fc59f..b0795fd794f6f 100644 --- a/plugins/inputs/ras/ras_notlinux.go +++ b/plugins/inputs/ras/ras_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux || (linux && !386 && !amd64 && !arm && !arm64) // +build !linux linux,!386,!amd64,!arm,!arm64 package ras diff --git a/plugins/inputs/ras/ras_test.go b/plugins/inputs/ras/ras_test.go index a90258bb4423b..656200fde95cc 100644 --- a/plugins/inputs/ras/ras_test.go +++ b/plugins/inputs/ras/ras_test.go @@ -1,3 +1,4 @@ +//go:build linux && (386 || amd64 || arm || arm64) // +build linux // +build 386 amd64 arm arm64 diff --git a/plugins/inputs/rethinkdb/rethinkdb_server_test.go b/plugins/inputs/rethinkdb/rethinkdb_server_test.go index 82ff292804a8c..0119131900b61 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package rethinkdb diff --git a/plugins/inputs/rethinkdb/rethinkdb_test.go b/plugins/inputs/rethinkdb/rethinkdb_test.go index 9a09864cad91a..651042ab13783 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package rethinkdb diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index 24bdd11540e1b..a38d5989cb5d0 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -368,7 +368,7 @@ func (rsl *RiemannSocketListener) Start(acc telegraf.Accumulator) error { // Handle cancellations from the process func processOsSignals(cancelFunc context.CancelFunc) { - signalChan := make(chan os.Signal) + signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, os.Interrupt) for { sig := <-signalChan diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index d3a8ba762f379..f2590c105272a 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sensors diff --git a/plugins/inputs/sensors/sensors_notlinux.go b/plugins/inputs/sensors/sensors_notlinux.go index 62a6211598f4e..424e96181b46b 100644 --- a/plugins/inputs/sensors/sensors_notlinux.go +++ b/plugins/inputs/sensors/sensors_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package sensors diff --git a/plugins/inputs/sensors/sensors_test.go b/plugins/inputs/sensors/sensors_test.go index 6bf1b616cb985..be4cace6eab79 100644 --- a/plugins/inputs/sensors/sensors_test.go +++ b/plugins/inputs/sensors/sensors_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sensors diff --git a/plugins/inputs/snmp/snmp_mocks_generate.go b/plugins/inputs/snmp/snmp_mocks_generate.go index 7227771a7e4fa..f87f9029b0d06 100644 --- a/plugins/inputs/snmp/snmp_mocks_generate.go +++ b/plugins/inputs/snmp/snmp_mocks_generate.go @@ -1,3 +1,4 @@ +//go:build generate // +build generate package main diff --git a/plugins/inputs/sql/drivers_sqlite.go b/plugins/inputs/sql/drivers_sqlite.go index 26cf7e08b5170..945e2b8425a3b 100644 --- a/plugins/inputs/sql/drivers_sqlite.go +++ b/plugins/inputs/sql/drivers_sqlite.go @@ -1,4 +1,7 @@ -// +build linux,freebsd,darwin +//go:build linux && freebsd && darwin && (!mips || !mips64) +// +build linux +// +build freebsd +// +build darwin // +build !mips !mips64 package sql diff --git a/plugins/inputs/synproxy/synproxy_linux.go b/plugins/inputs/synproxy/synproxy_linux.go index bcc9729384282..93cd26e3343f3 100644 --- a/plugins/inputs/synproxy/synproxy_linux.go +++ b/plugins/inputs/synproxy/synproxy_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package synproxy diff --git a/plugins/inputs/synproxy/synproxy_notlinux.go b/plugins/inputs/synproxy/synproxy_notlinux.go index 71a223644d8ed..f12fc70656eba 100644 --- a/plugins/inputs/synproxy/synproxy_notlinux.go +++ b/plugins/inputs/synproxy/synproxy_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package synproxy diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go index fc5d67d6a064a..dd733253635b8 100644 --- a/plugins/inputs/synproxy/synproxy_test.go +++ b/plugins/inputs/synproxy/synproxy_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package synproxy diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 01b4db9fa4af9..7e69ff41ccdf2 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sysstat diff --git a/plugins/inputs/sysstat/sysstat_interval_test.go b/plugins/inputs/sysstat/sysstat_interval_test.go index 972eb9af936de..f714ec10b1c36 100644 --- a/plugins/inputs/sysstat/sysstat_interval_test.go +++ b/plugins/inputs/sysstat/sysstat_interval_test.go @@ -1,5 +1,5 @@ -// +build !race -// +build linux +//go:build !race && linux +// +build !race,linux package sysstat diff --git a/plugins/inputs/sysstat/sysstat_notlinux.go b/plugins/inputs/sysstat/sysstat_notlinux.go index e97e71e78280c..6b5dd6fcf18cb 100644 --- a/plugins/inputs/sysstat/sysstat_notlinux.go +++ b/plugins/inputs/sysstat/sysstat_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package sysstat diff --git a/plugins/inputs/sysstat/sysstat_test.go b/plugins/inputs/sysstat/sysstat_test.go index 1766130391bbb..64b596bb329ba 100644 --- a/plugins/inputs/sysstat/sysstat_test.go +++ b/plugins/inputs/sysstat/sysstat_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sysstat diff --git a/plugins/inputs/systemd_units/systemd_units_notlinux.go b/plugins/inputs/systemd_units/systemd_units_notlinux.go index f53cea3de6eba..32f5b97cc37ec 100644 --- a/plugins/inputs/systemd_units/systemd_units_notlinux.go +++ b/plugins/inputs/systemd_units/systemd_units_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package systemd_units diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index d84c09ff8d3c2..d5bda84732ad8 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -1,3 +1,4 @@ +//go:build !solaris // +build !solaris package tail diff --git a/plugins/inputs/tail/tail_solaris.go b/plugins/inputs/tail/tail_solaris.go index 802088da28248..093dd16a06c23 100644 --- a/plugins/inputs/tail/tail_solaris.go +++ b/plugins/inputs/tail/tail_solaris.go @@ -1,5 +1,6 @@ // Skipping plugin on Solaris due to fsnotify support // +//go:build solaris // +build solaris package tail diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index e4f18bee42ed3..d9872b9d81af7 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package varnish diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index 4ba9e941a52ee..088c08378c1ef 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package varnish diff --git a/plugins/inputs/varnish/varnish_windows.go b/plugins/inputs/varnish/varnish_windows.go index 0c85c106f2b4f..9fed7dfc2a3c8 100644 --- a/plugins/inputs/varnish/varnish_windows.go +++ b/plugins/inputs/varnish/varnish_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package varnish diff --git a/plugins/inputs/win_eventlog/event.go b/plugins/inputs/win_eventlog/event.go index 2169ce8b490b3..86ddefdcb95e0 100644 --- a/plugins/inputs/win_eventlog/event.go +++ b/plugins/inputs/win_eventlog/event.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/syscall_windows.go b/plugins/inputs/win_eventlog/syscall_windows.go index df02913eee2af..d7bc07d0a5d42 100644 --- a/plugins/inputs/win_eventlog/syscall_windows.go +++ b/plugins/inputs/win_eventlog/syscall_windows.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/util.go b/plugins/inputs/win_eventlog/util.go index 7435cdb09ceaf..276e7514228e0 100644 --- a/plugins/inputs/win_eventlog/util.go +++ b/plugins/inputs/win_eventlog/util.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/util_test.go b/plugins/inputs/win_eventlog/util_test.go index ce7428dd391d2..1dc90cc2326d3 100644 --- a/plugins/inputs/win_eventlog/util_test.go +++ b/plugins/inputs/win_eventlog/util_test.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/win_eventlog.go b/plugins/inputs/win_eventlog/win_eventlog.go index 8d0efe3119d97..2ee303d483530 100644 --- a/plugins/inputs/win_eventlog/win_eventlog.go +++ b/plugins/inputs/win_eventlog/win_eventlog.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/win_eventlog_notwindows.go b/plugins/inputs/win_eventlog/win_eventlog_notwindows.go index 005077aa64c7d..e78ad6133b367 100644 --- a/plugins/inputs/win_eventlog/win_eventlog_notwindows.go +++ b/plugins/inputs/win_eventlog/win_eventlog_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows //revive:disable-next-line:var-naming diff --git a/plugins/inputs/win_eventlog/win_eventlog_test.go b/plugins/inputs/win_eventlog/win_eventlog_test.go index 9f922431ed776..bd6a434f40088 100644 --- a/plugins/inputs/win_eventlog/win_eventlog_test.go +++ b/plugins/inputs/win_eventlog/win_eventlog_test.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/zsyscall_windows.go b/plugins/inputs/win_eventlog/zsyscall_windows.go index 5c7b0a504b0bf..34c17471691e8 100644 --- a/plugins/inputs/win_eventlog/zsyscall_windows.go +++ b/plugins/inputs/win_eventlog/zsyscall_windows.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_perf_counters/kernel32.go b/plugins/inputs/win_perf_counters/kernel32.go index 9cdadedc873bd..09cbd4be5f182 100644 --- a/plugins/inputs/win_perf_counters/kernel32.go +++ b/plugins/inputs/win_perf_counters/kernel32.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go index 3a24761b9d593..d4e5f14a1c267 100644 --- a/plugins/inputs/win_perf_counters/pdh.go +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/pdh_386.go b/plugins/inputs/win_perf_counters/pdh_386.go index 134d15c8d1461..ec572db72447e 100644 --- a/plugins/inputs/win_perf_counters/pdh_386.go +++ b/plugins/inputs/win_perf_counters/pdh_386.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/pdh_amd64.go b/plugins/inputs/win_perf_counters/pdh_amd64.go index ff3b39335bcd4..1afedc317260e 100644 --- a/plugins/inputs/win_perf_counters/pdh_amd64.go +++ b/plugins/inputs/win_perf_counters/pdh_amd64.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/performance_query.go b/plugins/inputs/win_perf_counters/performance_query.go index a59f96b84dc43..ab130a41dec3f 100644 --- a/plugins/inputs/win_perf_counters/performance_query.go +++ b/plugins/inputs/win_perf_counters/performance_query.go @@ -1,4 +1,5 @@ // Go API over pdh syscalls +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 4bcbbfb1b2318..3a74e34a5228a 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go index 43b20eb611577..a5ae58370ab4a 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go b/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go index 427f5d5461ff3..00af92b722552 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 930e923754ac8..969b518d0f2b0 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_services/win_services.go b/plugins/inputs/win_services/win_services.go index 185e9b6b67de4..38f873a99284d 100644 --- a/plugins/inputs/win_services/win_services.go +++ b/plugins/inputs/win_services/win_services.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_services diff --git a/plugins/inputs/win_services/win_services_integration_test.go b/plugins/inputs/win_services/win_services_integration_test.go index 998aa1ed5eb2f..3c831642a01cf 100644 --- a/plugins/inputs/win_services/win_services_integration_test.go +++ b/plugins/inputs/win_services/win_services_integration_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows //these tests must be run under administrator account diff --git a/plugins/inputs/win_services/win_services_notwindows.go b/plugins/inputs/win_services/win_services_notwindows.go index 062c11cfc8eed..aa2f3534ca74d 100644 --- a/plugins/inputs/win_services/win_services_notwindows.go +++ b/plugins/inputs/win_services/win_services_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package win_services diff --git a/plugins/inputs/win_services/win_services_test.go b/plugins/inputs/win_services/win_services_test.go index 7d1672e8f6515..69a75372dd086 100644 --- a/plugins/inputs/win_services/win_services_test.go +++ b/plugins/inputs/win_services/win_services_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_services diff --git a/plugins/inputs/wireless/wireless_linux.go b/plugins/inputs/wireless/wireless_linux.go index 75890a7901074..706f9700d12c9 100644 --- a/plugins/inputs/wireless/wireless_linux.go +++ b/plugins/inputs/wireless/wireless_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package wireless diff --git a/plugins/inputs/wireless/wireless_notlinux.go b/plugins/inputs/wireless/wireless_notlinux.go index 4769acc970e42..435559ca58529 100644 --- a/plugins/inputs/wireless/wireless_notlinux.go +++ b/plugins/inputs/wireless/wireless_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package wireless diff --git a/plugins/inputs/wireless/wireless_test.go b/plugins/inputs/wireless/wireless_test.go index 6c562887e54db..20c10de88a347 100644 --- a/plugins/inputs/wireless/wireless_test.go +++ b/plugins/inputs/wireless/wireless_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package wireless diff --git a/plugins/inputs/zfs/zfs_freebsd.go b/plugins/inputs/zfs/zfs_freebsd.go index 491388147d93c..24f6a50997612 100644 --- a/plugins/inputs/zfs/zfs_freebsd.go +++ b/plugins/inputs/zfs/zfs_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package zfs diff --git a/plugins/inputs/zfs/zfs_freebsd_test.go b/plugins/inputs/zfs/zfs_freebsd_test.go index 4d1fea0ae483a..816f82b6dbf5b 100644 --- a/plugins/inputs/zfs/zfs_freebsd_test.go +++ b/plugins/inputs/zfs/zfs_freebsd_test.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package zfs diff --git a/plugins/inputs/zfs/zfs_linux.go b/plugins/inputs/zfs/zfs_linux.go index 276880d7dea97..ac3ca6ee81d23 100644 --- a/plugins/inputs/zfs/zfs_linux.go +++ b/plugins/inputs/zfs/zfs_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package zfs diff --git a/plugins/inputs/zfs/zfs_linux_test.go b/plugins/inputs/zfs/zfs_linux_test.go index 7d8aff81c689c..52622582029a5 100644 --- a/plugins/inputs/zfs/zfs_linux_test.go +++ b/plugins/inputs/zfs/zfs_linux_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package zfs diff --git a/plugins/inputs/zfs/zfs_other.go b/plugins/inputs/zfs/zfs_other.go index 98de02be917dd..963afd3580ff8 100644 --- a/plugins/inputs/zfs/zfs_other.go +++ b/plugins/inputs/zfs/zfs_other.go @@ -1,3 +1,4 @@ +//go:build !linux && !freebsd // +build !linux,!freebsd package zfs diff --git a/plugins/outputs/sql/sqlite.go b/plugins/outputs/sql/sqlite.go index 3703f42923ac1..15666101a957d 100644 --- a/plugins/outputs/sql/sqlite.go +++ b/plugins/outputs/sql/sqlite.go @@ -1,4 +1,7 @@ -// +build linux,freebsd,darwin +//go:build linux && freebsd && darwin && (!mips || !mips64) +// +build linux +// +build freebsd +// +build darwin // +build !mips !mips64 package sql diff --git a/plugins/outputs/sql/sqlite_test.go b/plugins/outputs/sql/sqlite_test.go index 6ed08a2570662..d54ffe877a80f 100644 --- a/plugins/outputs/sql/sqlite_test.go +++ b/plugins/outputs/sql/sqlite_test.go @@ -1,4 +1,6 @@ -// +build linux,freebsd +//go:build linux && freebsd && (!mips || !mips64) +// +build linux +// +build freebsd // +build !mips !mips64 package sql diff --git a/plugins/parsers/influx/machine.go b/plugins/parsers/influx/machine.go index 5d715af1c4aaf..2649cdb42bc37 100644 --- a/plugins/parsers/influx/machine.go +++ b/plugins/parsers/influx/machine.go @@ -1,4 +1,3 @@ - //line plugins/parsers/influx/machine.go.rl:1 package influx @@ -16,19 +15,16 @@ func (e *readErr) Error() string { } var ( - ErrNameParse = errors.New("expected measurement name") - ErrFieldParse = errors.New("expected field") - ErrTagParse = errors.New("expected tag") + ErrNameParse = errors.New("expected measurement name") + ErrFieldParse = errors.New("expected field") + ErrTagParse = errors.New("expected tag") ErrTimestampParse = errors.New("expected timestamp") - ErrParse = errors.New("parse error") - EOF = errors.New("EOF") + ErrParse = errors.New("parse error") + EOF = errors.New("EOF") ) - //line plugins/parsers/influx/machine.go.rl:318 - - //line plugins/parsers/influx/machine.go:33 const LineProtocol_start int = 46 const LineProtocol_first_final int = 46 @@ -39,7 +35,6 @@ const LineProtocol_en_discard_line int = 34 const LineProtocol_en_align int = 85 const LineProtocol_en_series int = 37 - //line plugins/parsers/influx/machine.go.rl:321 type Handler interface { @@ -69,26 +64,25 @@ type machine struct { func NewMachine(handler Handler) *machine { m := &machine{ - handler: handler, + handler: handler, initState: LineProtocol_en_align, } - //line plugins/parsers/influx/machine.go.rl:354 - + //line plugins/parsers/influx/machine.go.rl:355 - + //line plugins/parsers/influx/machine.go.rl:356 - + //line plugins/parsers/influx/machine.go.rl:357 - + //line plugins/parsers/influx/machine.go.rl:358 - + //line plugins/parsers/influx/machine.go.rl:359 - + //line plugins/parsers/influx/machine.go:90 { - ( m.cs) = LineProtocol_start + (m.cs) = LineProtocol_start } //line plugins/parsers/influx/machine.go.rl:360 @@ -98,24 +92,23 @@ func NewMachine(handler Handler) *machine { func NewSeriesMachine(handler Handler) *machine { m := &machine{ - handler: handler, + handler: handler, initState: LineProtocol_en_series, } - //line plugins/parsers/influx/machine.go.rl:371 - + //line plugins/parsers/influx/machine.go.rl:372 - + //line plugins/parsers/influx/machine.go.rl:373 - + //line plugins/parsers/influx/machine.go.rl:374 - + //line plugins/parsers/influx/machine.go.rl:375 - + //line plugins/parsers/influx/machine.go:117 { - ( m.cs) = LineProtocol_start + (m.cs) = LineProtocol_start } //line plugins/parsers/influx/machine.go.rl:376 @@ -135,10 +128,9 @@ func (m *machine) SetData(data []byte) { m.beginMetric = false m.finishMetric = false - //line plugins/parsers/influx/machine.go:140 { - ( m.cs) = LineProtocol_start + (m.cs) = LineProtocol_start } //line plugins/parsers/influx/machine.go.rl:393 @@ -163,391 +155,391 @@ func (m *machine) Next() error { func (m *machine) exec() error { var err error - + //line plugins/parsers/influx/machine.go:168 { - if ( m.p) == ( m.pe) { - goto _test_eof - } - goto _resume + if (m.p) == (m.pe) { + goto _test_eof + } + goto _resume -_again: - switch ( m.cs) { - case 46: - goto st46 - case 1: - goto st1 - case 2: - goto st2 - case 3: - goto st3 - case 0: - goto st0 - case 4: - goto st4 - case 5: - goto st5 - case 6: - goto st6 - case 47: - goto st47 - case 48: - goto st48 - case 49: - goto st49 - case 7: - goto st7 - case 8: - goto st8 - case 9: - goto st9 - case 10: - goto st10 - case 50: - goto st50 - case 51: - goto st51 - case 52: - goto st52 - case 53: - goto st53 - case 54: - goto st54 - case 55: - goto st55 - case 56: - goto st56 - case 57: - goto st57 - case 58: - goto st58 - case 59: - goto st59 - case 60: - goto st60 - case 61: - goto st61 - case 62: - goto st62 - case 63: - goto st63 - case 64: - goto st64 - case 65: - goto st65 - case 66: - goto st66 - case 67: - goto st67 - case 68: - goto st68 - case 69: - goto st69 - case 11: - goto st11 - case 12: - goto st12 - case 13: - goto st13 - case 14: - goto st14 - case 15: - goto st15 - case 70: - goto st70 - case 16: - goto st16 - case 17: - goto st17 - case 71: - goto st71 - case 72: - goto st72 - case 73: - goto st73 - case 74: - goto st74 - case 75: - goto st75 - case 76: - goto st76 - case 77: - goto st77 - case 78: - goto st78 - case 79: - goto st79 - case 18: - goto st18 - case 19: - goto st19 - case 20: - goto st20 - case 80: - goto st80 - case 21: - goto st21 - case 22: - goto st22 - case 23: - goto st23 - case 81: - goto st81 - case 24: - goto st24 - case 25: - goto st25 - case 82: - goto st82 - case 83: - goto st83 - case 26: - goto st26 - case 27: - goto st27 - case 28: - goto st28 - case 29: - goto st29 - case 30: - goto st30 - case 31: - goto st31 - case 32: - goto st32 - case 33: - goto st33 - case 34: - goto st34 - case 84: - goto st84 - case 37: - goto st37 - case 86: - goto st86 - case 87: - goto st87 - case 38: - goto st38 - case 39: - goto st39 - case 40: - goto st40 - case 41: - goto st41 - case 88: - goto st88 - case 42: - goto st42 - case 89: - goto st89 - case 43: - goto st43 - case 44: - goto st44 - case 45: - goto st45 - case 85: - goto st85 - case 35: - goto st35 - case 36: - goto st36 - } + _again: + switch m.cs { + case 46: + goto st46 + case 1: + goto st1 + case 2: + goto st2 + case 3: + goto st3 + case 0: + goto st0 + case 4: + goto st4 + case 5: + goto st5 + case 6: + goto st6 + case 47: + goto st47 + case 48: + goto st48 + case 49: + goto st49 + case 7: + goto st7 + case 8: + goto st8 + case 9: + goto st9 + case 10: + goto st10 + case 50: + goto st50 + case 51: + goto st51 + case 52: + goto st52 + case 53: + goto st53 + case 54: + goto st54 + case 55: + goto st55 + case 56: + goto st56 + case 57: + goto st57 + case 58: + goto st58 + case 59: + goto st59 + case 60: + goto st60 + case 61: + goto st61 + case 62: + goto st62 + case 63: + goto st63 + case 64: + goto st64 + case 65: + goto st65 + case 66: + goto st66 + case 67: + goto st67 + case 68: + goto st68 + case 69: + goto st69 + case 11: + goto st11 + case 12: + goto st12 + case 13: + goto st13 + case 14: + goto st14 + case 15: + goto st15 + case 70: + goto st70 + case 16: + goto st16 + case 17: + goto st17 + case 71: + goto st71 + case 72: + goto st72 + case 73: + goto st73 + case 74: + goto st74 + case 75: + goto st75 + case 76: + goto st76 + case 77: + goto st77 + case 78: + goto st78 + case 79: + goto st79 + case 18: + goto st18 + case 19: + goto st19 + case 20: + goto st20 + case 80: + goto st80 + case 21: + goto st21 + case 22: + goto st22 + case 23: + goto st23 + case 81: + goto st81 + case 24: + goto st24 + case 25: + goto st25 + case 82: + goto st82 + case 83: + goto st83 + case 26: + goto st26 + case 27: + goto st27 + case 28: + goto st28 + case 29: + goto st29 + case 30: + goto st30 + case 31: + goto st31 + case 32: + goto st32 + case 33: + goto st33 + case 34: + goto st34 + case 84: + goto st84 + case 37: + goto st37 + case 86: + goto st86 + case 87: + goto st87 + case 38: + goto st38 + case 39: + goto st39 + case 40: + goto st40 + case 41: + goto st41 + case 88: + goto st88 + case 42: + goto st42 + case 89: + goto st89 + case 43: + goto st43 + case 44: + goto st44 + case 45: + goto st45 + case 85: + goto st85 + case 35: + goto st35 + case 36: + goto st36 + } - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof - } -_resume: - switch ( m.cs) { - case 46: - goto st_case_46 - case 1: - goto st_case_1 - case 2: - goto st_case_2 - case 3: - goto st_case_3 - case 0: - goto st_case_0 - case 4: - goto st_case_4 - case 5: - goto st_case_5 - case 6: - goto st_case_6 - case 47: - goto st_case_47 - case 48: - goto st_case_48 - case 49: - goto st_case_49 - case 7: - goto st_case_7 - case 8: - goto st_case_8 - case 9: - goto st_case_9 - case 10: - goto st_case_10 - case 50: - goto st_case_50 - case 51: - goto st_case_51 - case 52: - goto st_case_52 - case 53: - goto st_case_53 - case 54: - goto st_case_54 - case 55: - goto st_case_55 - case 56: - goto st_case_56 - case 57: - goto st_case_57 - case 58: - goto st_case_58 - case 59: - goto st_case_59 - case 60: - goto st_case_60 - case 61: - goto st_case_61 - case 62: - goto st_case_62 - case 63: - goto st_case_63 - case 64: - goto st_case_64 - case 65: - goto st_case_65 - case 66: - goto st_case_66 - case 67: - goto st_case_67 - case 68: - goto st_case_68 - case 69: - goto st_case_69 - case 11: - goto st_case_11 - case 12: - goto st_case_12 - case 13: - goto st_case_13 - case 14: - goto st_case_14 - case 15: - goto st_case_15 - case 70: - goto st_case_70 - case 16: - goto st_case_16 - case 17: - goto st_case_17 - case 71: - goto st_case_71 - case 72: - goto st_case_72 - case 73: - goto st_case_73 - case 74: - goto st_case_74 - case 75: - goto st_case_75 - case 76: - goto st_case_76 - case 77: - goto st_case_77 - case 78: - goto st_case_78 - case 79: - goto st_case_79 - case 18: - goto st_case_18 - case 19: - goto st_case_19 - case 20: - goto st_case_20 - case 80: - goto st_case_80 - case 21: - goto st_case_21 - case 22: - goto st_case_22 - case 23: - goto st_case_23 - case 81: - goto st_case_81 - case 24: - goto st_case_24 - case 25: - goto st_case_25 - case 82: - goto st_case_82 - case 83: - goto st_case_83 - case 26: - goto st_case_26 - case 27: - goto st_case_27 - case 28: - goto st_case_28 - case 29: - goto st_case_29 - case 30: - goto st_case_30 - case 31: - goto st_case_31 - case 32: - goto st_case_32 - case 33: - goto st_case_33 - case 34: - goto st_case_34 - case 84: - goto st_case_84 - case 37: - goto st_case_37 - case 86: - goto st_case_86 - case 87: - goto st_case_87 - case 38: - goto st_case_38 - case 39: - goto st_case_39 - case 40: - goto st_case_40 - case 41: - goto st_case_41 - case 88: - goto st_case_88 - case 42: - goto st_case_42 - case 89: - goto st_case_89 - case 43: - goto st_case_43 - case 44: - goto st_case_44 - case 45: - goto st_case_45 - case 85: - goto st_case_85 - case 35: - goto st_case_35 - case 36: - goto st_case_36 - } - goto st_out + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof + } + _resume: + switch m.cs { + case 46: + goto st_case_46 + case 1: + goto st_case_1 + case 2: + goto st_case_2 + case 3: + goto st_case_3 + case 0: + goto st_case_0 + case 4: + goto st_case_4 + case 5: + goto st_case_5 + case 6: + goto st_case_6 + case 47: + goto st_case_47 + case 48: + goto st_case_48 + case 49: + goto st_case_49 + case 7: + goto st_case_7 + case 8: + goto st_case_8 + case 9: + goto st_case_9 + case 10: + goto st_case_10 + case 50: + goto st_case_50 + case 51: + goto st_case_51 + case 52: + goto st_case_52 + case 53: + goto st_case_53 + case 54: + goto st_case_54 + case 55: + goto st_case_55 + case 56: + goto st_case_56 + case 57: + goto st_case_57 + case 58: + goto st_case_58 + case 59: + goto st_case_59 + case 60: + goto st_case_60 + case 61: + goto st_case_61 + case 62: + goto st_case_62 + case 63: + goto st_case_63 + case 64: + goto st_case_64 + case 65: + goto st_case_65 + case 66: + goto st_case_66 + case 67: + goto st_case_67 + case 68: + goto st_case_68 + case 69: + goto st_case_69 + case 11: + goto st_case_11 + case 12: + goto st_case_12 + case 13: + goto st_case_13 + case 14: + goto st_case_14 + case 15: + goto st_case_15 + case 70: + goto st_case_70 + case 16: + goto st_case_16 + case 17: + goto st_case_17 + case 71: + goto st_case_71 + case 72: + goto st_case_72 + case 73: + goto st_case_73 + case 74: + goto st_case_74 + case 75: + goto st_case_75 + case 76: + goto st_case_76 + case 77: + goto st_case_77 + case 78: + goto st_case_78 + case 79: + goto st_case_79 + case 18: + goto st_case_18 + case 19: + goto st_case_19 + case 20: + goto st_case_20 + case 80: + goto st_case_80 + case 21: + goto st_case_21 + case 22: + goto st_case_22 + case 23: + goto st_case_23 + case 81: + goto st_case_81 + case 24: + goto st_case_24 + case 25: + goto st_case_25 + case 82: + goto st_case_82 + case 83: + goto st_case_83 + case 26: + goto st_case_26 + case 27: + goto st_case_27 + case 28: + goto st_case_28 + case 29: + goto st_case_29 + case 30: + goto st_case_30 + case 31: + goto st_case_31 + case 32: + goto st_case_32 + case 33: + goto st_case_33 + case 34: + goto st_case_34 + case 84: + goto st_case_84 + case 37: + goto st_case_37 + case 86: + goto st_case_86 + case 87: + goto st_case_87 + case 38: + goto st_case_38 + case 39: + goto st_case_39 + case 40: + goto st_case_40 + case 41: + goto st_case_41 + case 88: + goto st_case_88 + case 42: + goto st_case_42 + case 89: + goto st_case_89 + case 43: + goto st_case_43 + case 44: + goto st_case_44 + case 45: + goto st_case_45 + case 85: + goto st_case_85 + case 35: + goto st_case_35 + case 36: + goto st_case_36 + } + goto st_out st46: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof46 } st_case_46: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr31 case 13: @@ -561,33 +553,33 @@ _resume: case 92: goto tr81 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr80 } goto tr79 -tr29: + tr29: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st1 -tr79: + goto st1 + tr79: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st1 + goto st1 st1: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof1 } st_case_1: //line plugins/parsers/influx/machine.go:590 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr2 case 13: @@ -599,43 +591,49 @@ tr79: case 92: goto st8 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr1 } goto st1 -tr1: - ( m.cs) = 2 + tr1: + (m.cs) = 2 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr56: - ( m.cs) = 2 + goto _again + tr56: + (m.cs) = 2 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st2: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof2 } st_case_2: //line plugins/parsers/influx/machine.go:638 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr7 case 13: @@ -649,23 +647,23 @@ tr56: case 92: goto tr8 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st2 } goto tr5 -tr5: + tr5: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st3 + goto st3 st3: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof3 } st_case_3: //line plugins/parsers/influx/machine.go:668 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr7 case 44: @@ -675,99 +673,119 @@ tr5: case 92: goto st12 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr7 } goto st3 -tr2: - ( m.cs) = 0 + tr2: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- + err = ErrTagParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr7: - ( m.cs) = 0 + goto _again + tr7: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:39 - err = ErrFieldParse - ( m.p)-- + err = ErrFieldParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr31: - ( m.cs) = 0 + goto _again + tr31: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:32 - err = ErrNameParse - ( m.p)-- + err = ErrNameParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr35: - ( m.cs) = 0 + goto _again + tr35: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:53 - err = ErrTimestampParse - ( m.p)-- + err = ErrTimestampParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr82: - ( m.cs) = 0 + goto _again + tr82: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:39 - err = ErrFieldParse - ( m.p)-- + err = ErrFieldParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } //line plugins/parsers/influx/machine.go.rl:53 - err = ErrTimestampParse - ( m.p)-- + err = ErrTimestampParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr135: + goto _again + tr135: //line plugins/parsers/influx/machine.go.rl:73 - ( m.p)-- + (m.p)-- - {goto st46 } + { + goto st46 + } - goto st0 + goto st0 //line plugins/parsers/influx/machine.go:754 -st_case_0: + st_case_0: st0: - ( m.cs) = 0 + (m.cs) = 0 goto _out -tr10: + tr10: //line plugins/parsers/influx/machine.go.rl:108 - m.key = m.text() + m.key = m.text() - goto st4 + goto st4 st4: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof4 } st_case_4: //line plugins/parsers/influx/machine.go:770 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 34: goto st5 case 45: @@ -785,16 +803,16 @@ tr10: case 116: goto tr20 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr16 } goto tr7 st5: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof5 } st_case_5: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr22 case 34: @@ -803,39 +821,39 @@ tr10: goto tr24 } goto tr21 -tr21: + tr21: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st6 -tr22: + goto st6 + tr22: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st6 -tr26: + goto st6 + tr26: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st6 + goto st6 st6: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof6 } st_case_6: //line plugins/parsers/influx/machine.go:838 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr26 case 34: @@ -844,43 +862,49 @@ tr26: goto st13 } goto st6 -tr23: - ( m.cs) = 47 + tr23: + (m.cs) = 47 //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p //line plugins/parsers/influx/machine.go.rl:148 - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddString(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr27: - ( m.cs) = 47 + goto _again + tr27: + (m.cs) = 47 //line plugins/parsers/influx/machine.go.rl:148 - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddString(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st47: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof47 } st_case_47: //line plugins/parsers/influx/machine.go:883 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr34 case 13: @@ -890,69 +914,81 @@ tr27: case 44: goto st11 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st48 } goto tr82 -tr110: - ( m.cs) = 48 + tr110: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr117: - ( m.cs) = 48 + goto _again + tr117: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr122: - ( m.cs) = 48 + goto _again + tr122: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr127: - ( m.cs) = 48 + goto _again + tr127: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st48: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof48 } st_case_48: //line plugins/parsers/influx/machine.go:955 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr34 case 13: @@ -963,130 +999,148 @@ tr127: goto tr86 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr87 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto st48 } goto tr35 -tr34: + tr34: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st49 -tr89: - ( m.cs) = 49 + goto st49 + tr89: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr111: - ( m.cs) = 49 + goto _again + tr111: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr118: - ( m.cs) = 49 + goto _again + tr118: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr123: - ( m.cs) = 49 + goto _again + tr123: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr128: - ( m.cs) = 49 + goto _again + tr128: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again + goto _again st49: //line plugins/parsers/influx/machine.go.rl:172 - m.finishMetric = true - ( m.cs) = 85; - {( m.p)++; goto _out } + m.finishMetric = true + (m.cs) = 85 + { + (m.p)++ + goto _out + } - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof49 } st_case_49: //line plugins/parsers/influx/machine.go:1089 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr31 case 13: @@ -1100,23 +1154,23 @@ tr128: case 92: goto tr32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st7 } goto tr29 -tr80: + tr80: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true - goto st7 + goto st7 st7: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof7 } st_case_7: //line plugins/parsers/influx/machine.go:1119 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr31 case 13: @@ -1130,140 +1184,155 @@ tr80: case 92: goto tr32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st7 } goto tr29 -tr32: + tr32: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st8 -tr81: + goto st8 + tr81: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st8 + goto st8 st8: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof8 } st_case_8: //line plugins/parsers/influx/machine.go:1159 - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto st0 } goto st1 -tr90: - ( m.cs) = 9 + tr90: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr112: - ( m.cs) = 9 + goto _again + tr112: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr119: - ( m.cs) = 9 + goto _again + tr119: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr124: - ( m.cs) = 9 + goto _again + tr124: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr129: - ( m.cs) = 9 + goto _again + tr129: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st9: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof9 } st_case_9: //line plugins/parsers/influx/machine.go:1234 - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr34 } goto st0 -tr86: + tr86: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st10 + goto st10 st10: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof10 } st_case_10: //line plugins/parsers/influx/machine.go:1250 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st50 } goto tr35 -tr87: + tr87: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st50 + goto st50 st50: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof50 } st_case_50: //line plugins/parsers/influx/machine.go:1266 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1272,34 +1341,37 @@ tr87: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st52 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 -tr88: - ( m.cs) = 51 + tr88: + (m.cs) = 51 //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st51: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof51 } st_case_51: //line plugins/parsers/influx/machine.go:1302 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr34 case 13: @@ -1307,16 +1379,16 @@ tr88: case 32: goto st51 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st51 } goto st0 st52: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof52 } st_case_52: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1325,20 +1397,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st53 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st53: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof53 } st_case_53: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1347,20 +1419,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st54 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st54: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof54 } st_case_54: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1369,20 +1441,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st55 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st55: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof55 } st_case_55: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1391,20 +1463,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st56 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st56: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof56 } st_case_56: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1413,20 +1485,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st57 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st57: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof57 } st_case_57: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1435,20 +1507,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st58 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st58: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof58 } st_case_58: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1457,20 +1529,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st59 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st59: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof59 } st_case_59: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1479,20 +1551,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st60 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st60: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof60 } st_case_60: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1501,20 +1573,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st61 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st61: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof61 } st_case_61: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1523,20 +1595,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st62 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st62: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof62 } st_case_62: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1545,20 +1617,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st63 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st63: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof63 } st_case_63: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1567,20 +1639,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st64 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st64: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof64 } st_case_64: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1589,20 +1661,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st65 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st65: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof65 } st_case_65: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1611,20 +1683,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st66 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st66: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof66 } st_case_66: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1633,20 +1705,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st67 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st67: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof67 } st_case_67: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1655,20 +1727,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st68 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st68: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof68 } st_case_68: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1677,20 +1749,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st69 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st69: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof69 } st_case_69: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1698,69 +1770,81 @@ tr88: case 32: goto tr88 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr88 } goto tr35 -tr113: - ( m.cs) = 11 + tr113: + (m.cs) = 11 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr120: - ( m.cs) = 11 + goto _again + tr120: + (m.cs) = 11 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr125: - ( m.cs) = 11 + goto _again + tr125: + (m.cs) = 11 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr130: - ( m.cs) = 11 + goto _again + tr130: + (m.cs) = 11 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st11: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof11 } st_case_11: //line plugins/parsers/influx/machine.go:1763 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr7 case 44: @@ -1770,89 +1854,89 @@ tr130: case 92: goto tr8 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr7 } goto tr5 -tr8: + tr8: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st12 + goto st12 st12: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof12 } st_case_12: //line plugins/parsers/influx/machine.go:1789 - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr7 } goto st3 -tr24: + tr24: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st13 + goto st13 st13: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof13 } st_case_13: //line plugins/parsers/influx/machine.go:1805 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 34: goto st6 case 92: goto st6 } goto tr7 -tr13: + tr13: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st14 + goto st14 st14: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof14 } st_case_14: //line plugins/parsers/influx/machine.go:1824 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 46: goto st15 case 48: goto st72 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st75 } goto tr7 -tr14: + tr14: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st15 + goto st15 st15: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof15 } st_case_15: //line plugins/parsers/influx/machine.go:1846 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st70 } goto tr7 st70: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof70 } st_case_70: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -1867,20 +1951,20 @@ tr14: goto st16 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st70 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st16: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof16 } st_case_16: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 34: goto st17 case 43: @@ -1888,25 +1972,25 @@ tr14: case 45: goto st17 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st71 } goto tr7 st17: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof17 } st_case_17: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st71 } goto tr7 st71: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof71 } st_case_71: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -1917,20 +2001,20 @@ tr14: goto tr113 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st71 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st72: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof72 } st_case_72: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -1949,20 +2033,20 @@ tr14: goto st74 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st73 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st73: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof73 } st_case_73: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -1979,20 +2063,20 @@ tr14: goto st16 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st73 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st74: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof74 } st_case_74: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr118 case 13: @@ -2002,16 +2086,16 @@ tr14: case 44: goto tr120 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr117 } goto tr82 st75: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof75 } st_case_75: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -2030,27 +2114,27 @@ tr14: goto st74 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st75 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 -tr15: + tr15: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st76 + goto st76 st76: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof76 } st_case_76: //line plugins/parsers/influx/machine.go:2053 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -2071,20 +2155,20 @@ tr15: goto st77 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st73 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st77: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof77 } st_case_77: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr123 case 13: @@ -2094,23 +2178,23 @@ tr15: case 44: goto tr125 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr122 } goto tr82 -tr16: + tr16: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st78 + goto st78 st78: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof78 } st_case_78: //line plugins/parsers/influx/machine.go:2113 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -2131,27 +2215,27 @@ tr16: goto st77 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st78 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 -tr17: + tr17: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st79 + goto st79 st79: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof79 } st_case_79: //line plugins/parsers/influx/machine.go:2154 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2165,43 +2249,43 @@ tr17: case 97: goto st21 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 st18: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof18 } st_case_18: - if ( m.data)[( m.p)] == 76 { + if (m.data)[(m.p)] == 76 { goto st19 } goto tr7 st19: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof19 } st_case_19: - if ( m.data)[( m.p)] == 83 { + if (m.data)[(m.p)] == 83 { goto st20 } goto tr7 st20: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof20 } st_case_20: - if ( m.data)[( m.p)] == 69 { + if (m.data)[(m.p)] == 69 { goto st80 } goto tr7 st80: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof80 } st_case_80: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2211,50 +2295,50 @@ tr17: case 44: goto tr130 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 st21: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof21 } st_case_21: - if ( m.data)[( m.p)] == 108 { + if (m.data)[(m.p)] == 108 { goto st22 } goto tr7 st22: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof22 } st_case_22: - if ( m.data)[( m.p)] == 115 { + if (m.data)[(m.p)] == 115 { goto st23 } goto tr7 st23: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof23 } st_case_23: - if ( m.data)[( m.p)] == 101 { + if (m.data)[(m.p)] == 101 { goto st80 } goto tr7 -tr18: + tr18: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st81 + goto st81 st81: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof81 } st_case_81: //line plugins/parsers/influx/machine.go:2257 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2268,41 +2352,41 @@ tr18: case 114: goto st25 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 st24: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof24 } st_case_24: - if ( m.data)[( m.p)] == 85 { + if (m.data)[(m.p)] == 85 { goto st20 } goto tr7 st25: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof25 } st_case_25: - if ( m.data)[( m.p)] == 117 { + if (m.data)[(m.p)] == 117 { goto st23 } goto tr7 -tr19: + tr19: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st82 + goto st82 st82: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof82 } st_case_82: //line plugins/parsers/influx/machine.go:2305 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2314,23 +2398,23 @@ tr19: case 97: goto st21 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 -tr20: + tr20: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st83 + goto st83 st83: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof83 } st_case_83: //line plugins/parsers/influx/machine.go:2333 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2342,43 +2426,49 @@ tr20: case 114: goto st25 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 -tr3: - ( m.cs) = 26 + tr3: + (m.cs) = 26 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr57: - ( m.cs) = 26 + goto _again + tr57: + (m.cs) = 26 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st26: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof26 } st_case_26: //line plugins/parsers/influx/machine.go:2381 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2388,23 +2478,23 @@ tr57: case 92: goto tr49 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto tr48 -tr48: + tr48: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st27 + goto st27 st27: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof27 } st_case_27: //line plugins/parsers/influx/machine.go:2407 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2414,23 +2504,23 @@ tr48: case 92: goto st32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st27 -tr51: + tr51: //line plugins/parsers/influx/machine.go.rl:95 - m.key = m.text() + m.key = m.text() - goto st28 + goto st28 st28: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof28 } st_case_28: //line plugins/parsers/influx/machine.go:2433 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2440,23 +2530,23 @@ tr51: case 92: goto tr54 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto tr53 -tr53: + tr53: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st29 + goto st29 st29: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof29 } st_case_29: //line plugins/parsers/influx/machine.go:2459 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr2 case 13: @@ -2470,39 +2560,39 @@ tr53: case 92: goto st30 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr56 } goto st29 -tr54: + tr54: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st30 + goto st30 st30: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof30 } st_case_30: //line plugins/parsers/influx/machine.go:2489 - if ( m.data)[( m.p)] == 92 { + if (m.data)[(m.p)] == 92 { goto st31 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st29 st31: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { goto _test_eof31 } st_case_31: //line plugins/parsers/influx/machine.go:2505 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr2 case 13: @@ -2516,39 +2606,39 @@ tr54: case 92: goto st30 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr56 } goto st29 -tr49: + tr49: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st32 + goto st32 st32: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof32 } st_case_32: //line plugins/parsers/influx/machine.go:2535 - if ( m.data)[( m.p)] == 92 { + if (m.data)[(m.p)] == 92 { goto st33 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st27 st33: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { goto _test_eof33 } st_case_33: //line plugins/parsers/influx/machine.go:2551 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2558,44 +2648,46 @@ tr49: case 92: goto st32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st27 st34: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof34 } st_case_34: - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr62 } goto st34 -tr62: + tr62: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line //line plugins/parsers/influx/machine.go.rl:78 - {goto st85 } + { + goto st85 + } - goto st84 + goto st84 st84: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof84 } st_case_84: //line plugins/parsers/influx/machine.go:2592 goto st0 st37: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof37 } st_case_37: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr31 case 35: @@ -2605,27 +2697,27 @@ tr62: case 92: goto tr66 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr31 } goto tr65 -tr65: + tr65: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st86 + goto st86 st86: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof86 } st_case_86: //line plugins/parsers/influx/machine.go:2628 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr138 case 13: @@ -2637,138 +2729,159 @@ tr65: case 92: goto st45 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr2 } goto st86 -tr67: + tr67: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st87 -tr138: - ( m.cs) = 87 + goto st87 + tr138: + (m.cs) = 87 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr142: - ( m.cs) = 87 + goto _again + tr142: + (m.cs) = 87 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again + goto _again st87: //line plugins/parsers/influx/machine.go.rl:172 - m.finishMetric = true - ( m.cs) = 85; - {( m.p)++; goto _out } + m.finishMetric = true + (m.cs) = 85 + { + (m.p)++ + goto _out + } - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof87 } st_case_87: //line plugins/parsers/influx/machine.go:2702 goto st0 -tr139: - ( m.cs) = 38 + tr139: + (m.cs) = 38 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr143: - ( m.cs) = 38 + goto _again + tr143: + (m.cs) = 38 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st38: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof38 } st_case_38: //line plugins/parsers/influx/machine.go:2735 - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr67 } goto st0 -tr140: - ( m.cs) = 39 + tr140: + (m.cs) = 39 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr144: - ( m.cs) = 39 + goto _again + tr144: + (m.cs) = 39 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st39: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof39 } st_case_39: //line plugins/parsers/influx/machine.go:2771 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2778,23 +2891,23 @@ tr144: case 92: goto tr69 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto tr68 -tr68: + tr68: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st40 + goto st40 st40: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof40 } st_case_40: //line plugins/parsers/influx/machine.go:2797 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2804,23 +2917,23 @@ tr68: case 92: goto st43 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st40 -tr71: + tr71: //line plugins/parsers/influx/machine.go.rl:95 - m.key = m.text() + m.key = m.text() - goto st41 + goto st41 st41: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof41 } st_case_41: //line plugins/parsers/influx/machine.go:2823 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2830,23 +2943,23 @@ tr71: case 92: goto tr74 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto tr73 -tr73: + tr73: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st88 + goto st88 st88: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof88 } st_case_88: //line plugins/parsers/influx/machine.go:2849 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr142 case 13: @@ -2860,39 +2973,39 @@ tr73: case 92: goto st42 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr2 } goto st88 -tr74: + tr74: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st42 + goto st42 st42: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof42 } st_case_42: //line plugins/parsers/influx/machine.go:2879 - if ( m.data)[( m.p)] == 92 { + if (m.data)[(m.p)] == 92 { goto st89 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st88 st89: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { goto _test_eof89 } st_case_89: //line plugins/parsers/influx/machine.go:2895 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr142 case 13: @@ -2906,39 +3019,39 @@ tr74: case 92: goto st42 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr2 } goto st88 -tr69: + tr69: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st43 + goto st43 st43: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof43 } st_case_43: //line plugins/parsers/influx/machine.go:2925 - if ( m.data)[( m.p)] == 92 { + if (m.data)[(m.p)] == 92 { goto st44 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st40 st44: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { goto _test_eof44 } st_case_44: //line plugins/parsers/influx/machine.go:2941 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2948,45 +3061,45 @@ tr69: case 92: goto st43 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st40 -tr66: + tr66: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st45 + goto st45 st45: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof45 } st_case_45: //line plugins/parsers/influx/machine.go:2971 - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto st0 } goto st86 -tr63: + tr63: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st85 + goto st85 st85: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof85 } st_case_85: //line plugins/parsers/influx/machine.go:2989 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr63 case 13: @@ -2996,312 +3109,554 @@ tr63: case 35: goto st36 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st85 } goto tr135 st35: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof35 } st_case_35: - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr63 } goto st0 st36: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof36 } st_case_36: - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr63 } goto st36 st_out: - _test_eof46: ( m.cs) = 46; goto _test_eof - _test_eof1: ( m.cs) = 1; goto _test_eof - _test_eof2: ( m.cs) = 2; goto _test_eof - _test_eof3: ( m.cs) = 3; goto _test_eof - _test_eof4: ( m.cs) = 4; goto _test_eof - _test_eof5: ( m.cs) = 5; goto _test_eof - _test_eof6: ( m.cs) = 6; goto _test_eof - _test_eof47: ( m.cs) = 47; goto _test_eof - _test_eof48: ( m.cs) = 48; goto _test_eof - _test_eof49: ( m.cs) = 49; goto _test_eof - _test_eof7: ( m.cs) = 7; goto _test_eof - _test_eof8: ( m.cs) = 8; goto _test_eof - _test_eof9: ( m.cs) = 9; goto _test_eof - _test_eof10: ( m.cs) = 10; goto _test_eof - _test_eof50: ( m.cs) = 50; goto _test_eof - _test_eof51: ( m.cs) = 51; goto _test_eof - _test_eof52: ( m.cs) = 52; goto _test_eof - _test_eof53: ( m.cs) = 53; goto _test_eof - _test_eof54: ( m.cs) = 54; goto _test_eof - _test_eof55: ( m.cs) = 55; goto _test_eof - _test_eof56: ( m.cs) = 56; goto _test_eof - _test_eof57: ( m.cs) = 57; goto _test_eof - _test_eof58: ( m.cs) = 58; goto _test_eof - _test_eof59: ( m.cs) = 59; goto _test_eof - _test_eof60: ( m.cs) = 60; goto _test_eof - _test_eof61: ( m.cs) = 61; goto _test_eof - _test_eof62: ( m.cs) = 62; goto _test_eof - _test_eof63: ( m.cs) = 63; goto _test_eof - _test_eof64: ( m.cs) = 64; goto _test_eof - _test_eof65: ( m.cs) = 65; goto _test_eof - _test_eof66: ( m.cs) = 66; goto _test_eof - _test_eof67: ( m.cs) = 67; goto _test_eof - _test_eof68: ( m.cs) = 68; goto _test_eof - _test_eof69: ( m.cs) = 69; goto _test_eof - _test_eof11: ( m.cs) = 11; goto _test_eof - _test_eof12: ( m.cs) = 12; goto _test_eof - _test_eof13: ( m.cs) = 13; goto _test_eof - _test_eof14: ( m.cs) = 14; goto _test_eof - _test_eof15: ( m.cs) = 15; goto _test_eof - _test_eof70: ( m.cs) = 70; goto _test_eof - _test_eof16: ( m.cs) = 16; goto _test_eof - _test_eof17: ( m.cs) = 17; goto _test_eof - _test_eof71: ( m.cs) = 71; goto _test_eof - _test_eof72: ( m.cs) = 72; goto _test_eof - _test_eof73: ( m.cs) = 73; goto _test_eof - _test_eof74: ( m.cs) = 74; goto _test_eof - _test_eof75: ( m.cs) = 75; goto _test_eof - _test_eof76: ( m.cs) = 76; goto _test_eof - _test_eof77: ( m.cs) = 77; goto _test_eof - _test_eof78: ( m.cs) = 78; goto _test_eof - _test_eof79: ( m.cs) = 79; goto _test_eof - _test_eof18: ( m.cs) = 18; goto _test_eof - _test_eof19: ( m.cs) = 19; goto _test_eof - _test_eof20: ( m.cs) = 20; goto _test_eof - _test_eof80: ( m.cs) = 80; goto _test_eof - _test_eof21: ( m.cs) = 21; goto _test_eof - _test_eof22: ( m.cs) = 22; goto _test_eof - _test_eof23: ( m.cs) = 23; goto _test_eof - _test_eof81: ( m.cs) = 81; goto _test_eof - _test_eof24: ( m.cs) = 24; goto _test_eof - _test_eof25: ( m.cs) = 25; goto _test_eof - _test_eof82: ( m.cs) = 82; goto _test_eof - _test_eof83: ( m.cs) = 83; goto _test_eof - _test_eof26: ( m.cs) = 26; goto _test_eof - _test_eof27: ( m.cs) = 27; goto _test_eof - _test_eof28: ( m.cs) = 28; goto _test_eof - _test_eof29: ( m.cs) = 29; goto _test_eof - _test_eof30: ( m.cs) = 30; goto _test_eof - _test_eof31: ( m.cs) = 31; goto _test_eof - _test_eof32: ( m.cs) = 32; goto _test_eof - _test_eof33: ( m.cs) = 33; goto _test_eof - _test_eof34: ( m.cs) = 34; goto _test_eof - _test_eof84: ( m.cs) = 84; goto _test_eof - _test_eof37: ( m.cs) = 37; goto _test_eof - _test_eof86: ( m.cs) = 86; goto _test_eof - _test_eof87: ( m.cs) = 87; goto _test_eof - _test_eof38: ( m.cs) = 38; goto _test_eof - _test_eof39: ( m.cs) = 39; goto _test_eof - _test_eof40: ( m.cs) = 40; goto _test_eof - _test_eof41: ( m.cs) = 41; goto _test_eof - _test_eof88: ( m.cs) = 88; goto _test_eof - _test_eof42: ( m.cs) = 42; goto _test_eof - _test_eof89: ( m.cs) = 89; goto _test_eof - _test_eof43: ( m.cs) = 43; goto _test_eof - _test_eof44: ( m.cs) = 44; goto _test_eof - _test_eof45: ( m.cs) = 45; goto _test_eof - _test_eof85: ( m.cs) = 85; goto _test_eof - _test_eof35: ( m.cs) = 35; goto _test_eof - _test_eof36: ( m.cs) = 36; goto _test_eof - - _test_eof: {} - if ( m.p) == ( m.eof) { - switch ( m.cs) { - case 7, 37: + _test_eof46: + (m.cs) = 46 + goto _test_eof + _test_eof1: + (m.cs) = 1 + goto _test_eof + _test_eof2: + (m.cs) = 2 + goto _test_eof + _test_eof3: + (m.cs) = 3 + goto _test_eof + _test_eof4: + (m.cs) = 4 + goto _test_eof + _test_eof5: + (m.cs) = 5 + goto _test_eof + _test_eof6: + (m.cs) = 6 + goto _test_eof + _test_eof47: + (m.cs) = 47 + goto _test_eof + _test_eof48: + (m.cs) = 48 + goto _test_eof + _test_eof49: + (m.cs) = 49 + goto _test_eof + _test_eof7: + (m.cs) = 7 + goto _test_eof + _test_eof8: + (m.cs) = 8 + goto _test_eof + _test_eof9: + (m.cs) = 9 + goto _test_eof + _test_eof10: + (m.cs) = 10 + goto _test_eof + _test_eof50: + (m.cs) = 50 + goto _test_eof + _test_eof51: + (m.cs) = 51 + goto _test_eof + _test_eof52: + (m.cs) = 52 + goto _test_eof + _test_eof53: + (m.cs) = 53 + goto _test_eof + _test_eof54: + (m.cs) = 54 + goto _test_eof + _test_eof55: + (m.cs) = 55 + goto _test_eof + _test_eof56: + (m.cs) = 56 + goto _test_eof + _test_eof57: + (m.cs) = 57 + goto _test_eof + _test_eof58: + (m.cs) = 58 + goto _test_eof + _test_eof59: + (m.cs) = 59 + goto _test_eof + _test_eof60: + (m.cs) = 60 + goto _test_eof + _test_eof61: + (m.cs) = 61 + goto _test_eof + _test_eof62: + (m.cs) = 62 + goto _test_eof + _test_eof63: + (m.cs) = 63 + goto _test_eof + _test_eof64: + (m.cs) = 64 + goto _test_eof + _test_eof65: + (m.cs) = 65 + goto _test_eof + _test_eof66: + (m.cs) = 66 + goto _test_eof + _test_eof67: + (m.cs) = 67 + goto _test_eof + _test_eof68: + (m.cs) = 68 + goto _test_eof + _test_eof69: + (m.cs) = 69 + goto _test_eof + _test_eof11: + (m.cs) = 11 + goto _test_eof + _test_eof12: + (m.cs) = 12 + goto _test_eof + _test_eof13: + (m.cs) = 13 + goto _test_eof + _test_eof14: + (m.cs) = 14 + goto _test_eof + _test_eof15: + (m.cs) = 15 + goto _test_eof + _test_eof70: + (m.cs) = 70 + goto _test_eof + _test_eof16: + (m.cs) = 16 + goto _test_eof + _test_eof17: + (m.cs) = 17 + goto _test_eof + _test_eof71: + (m.cs) = 71 + goto _test_eof + _test_eof72: + (m.cs) = 72 + goto _test_eof + _test_eof73: + (m.cs) = 73 + goto _test_eof + _test_eof74: + (m.cs) = 74 + goto _test_eof + _test_eof75: + (m.cs) = 75 + goto _test_eof + _test_eof76: + (m.cs) = 76 + goto _test_eof + _test_eof77: + (m.cs) = 77 + goto _test_eof + _test_eof78: + (m.cs) = 78 + goto _test_eof + _test_eof79: + (m.cs) = 79 + goto _test_eof + _test_eof18: + (m.cs) = 18 + goto _test_eof + _test_eof19: + (m.cs) = 19 + goto _test_eof + _test_eof20: + (m.cs) = 20 + goto _test_eof + _test_eof80: + (m.cs) = 80 + goto _test_eof + _test_eof21: + (m.cs) = 21 + goto _test_eof + _test_eof22: + (m.cs) = 22 + goto _test_eof + _test_eof23: + (m.cs) = 23 + goto _test_eof + _test_eof81: + (m.cs) = 81 + goto _test_eof + _test_eof24: + (m.cs) = 24 + goto _test_eof + _test_eof25: + (m.cs) = 25 + goto _test_eof + _test_eof82: + (m.cs) = 82 + goto _test_eof + _test_eof83: + (m.cs) = 83 + goto _test_eof + _test_eof26: + (m.cs) = 26 + goto _test_eof + _test_eof27: + (m.cs) = 27 + goto _test_eof + _test_eof28: + (m.cs) = 28 + goto _test_eof + _test_eof29: + (m.cs) = 29 + goto _test_eof + _test_eof30: + (m.cs) = 30 + goto _test_eof + _test_eof31: + (m.cs) = 31 + goto _test_eof + _test_eof32: + (m.cs) = 32 + goto _test_eof + _test_eof33: + (m.cs) = 33 + goto _test_eof + _test_eof34: + (m.cs) = 34 + goto _test_eof + _test_eof84: + (m.cs) = 84 + goto _test_eof + _test_eof37: + (m.cs) = 37 + goto _test_eof + _test_eof86: + (m.cs) = 86 + goto _test_eof + _test_eof87: + (m.cs) = 87 + goto _test_eof + _test_eof38: + (m.cs) = 38 + goto _test_eof + _test_eof39: + (m.cs) = 39 + goto _test_eof + _test_eof40: + (m.cs) = 40 + goto _test_eof + _test_eof41: + (m.cs) = 41 + goto _test_eof + _test_eof88: + (m.cs) = 88 + goto _test_eof + _test_eof42: + (m.cs) = 42 + goto _test_eof + _test_eof89: + (m.cs) = 89 + goto _test_eof + _test_eof43: + (m.cs) = 43 + goto _test_eof + _test_eof44: + (m.cs) = 44 + goto _test_eof + _test_eof45: + (m.cs) = 45 + goto _test_eof + _test_eof85: + (m.cs) = 85 + goto _test_eof + _test_eof35: + (m.cs) = 35 + goto _test_eof + _test_eof36: + (m.cs) = 36 + goto _test_eof + + _test_eof: + { + } + if (m.p) == (m.eof) { + switch m.cs { + case 7, 37: //line plugins/parsers/influx/machine.go.rl:32 - err = ErrNameParse - ( m.p)-- + err = ErrNameParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25: + case 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25: //line plugins/parsers/influx/machine.go.rl:39 - err = ErrFieldParse - ( m.p)-- + err = ErrFieldParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 26, 27, 28, 30, 32, 33, 39, 40, 41, 42, 43, 44: + case 26, 27, 28, 30, 32, 33, 39, 40, 41, 42, 43, 44: //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- + err = ErrTagParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 10: + case 10: //line plugins/parsers/influx/machine.go.rl:53 - err = ErrTimestampParse - ( m.p)-- + err = ErrTimestampParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 86: + case 86: //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } - case 88, 89: + case 88, 89: //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } - case 47, 48, 49, 51: + case 47, 48, 49, 51: //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 46: + case 46: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 1: + case 1: //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- + err = ErrTagParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 29, 31: + case 29, 31: //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- + err = ErrTagParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 74: + case 74: //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 77: + case 77: //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 70, 71, 72, 73, 75, 76, 78: + case 70, 71, 72, 73, 75, 76, 78: //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 79, 80, 81, 82, 83: + case 79, 80, 81, 82, 83: //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 50, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69: + case 50, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69: //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true //line plugins/parsers/influx/machine.go:3301 + } } - } - _out: {} + _out: + { + } } //line plugins/parsers/influx/machine.go.rl:415 @@ -3364,7 +3719,7 @@ type streamMachine struct { func NewStreamMachine(r io.Reader, handler Handler) *streamMachine { m := &streamMachine{ machine: NewMachine(handler), - reader: r, + reader: r, } m.machine.SetData(make([]byte, 1024)) @@ -3394,7 +3749,7 @@ func (m *streamMachine) Next() error { for { // Expand the buffer if it is full if m.machine.pe == len(m.machine.data) { - expanded := make([]byte, 2 * len(m.machine.data)) + expanded := make([]byte, 2*len(m.machine.data)) copy(expanded, m.machine.data) m.machine.data = expanded } diff --git a/plugins/processors/filepath/filepath_test.go b/plugins/processors/filepath/filepath_test.go index a305c4c5c2f29..c6a3262921407 100644 --- a/plugins/processors/filepath/filepath_test.go +++ b/plugins/processors/filepath/filepath_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package filepath diff --git a/plugins/processors/port_name/services_path.go b/plugins/processors/port_name/services_path.go index c8cf73d14157c..3b9a4ce579c9a 100644 --- a/plugins/processors/port_name/services_path.go +++ b/plugins/processors/port_name/services_path.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package portname diff --git a/plugins/processors/port_name/services_path_notwindows.go b/plugins/processors/port_name/services_path_notwindows.go index 5097bfa9c6140..5fd30eb59671d 100644 --- a/plugins/processors/port_name/services_path_notwindows.go +++ b/plugins/processors/port_name/services_path_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package portname diff --git a/scripts/alpine.docker b/scripts/alpine.docker index d5b8b85f6abb7..8c2418083ef8c 100644 --- a/scripts/alpine.docker +++ b/scripts/alpine.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.6 as builder +FROM golang:1.17.0 as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/buster.docker b/scripts/buster.docker index 685d30067e0ef..fbb18eee24f17 100644 --- a/scripts/buster.docker +++ b/scripts/buster.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.6-buster as builder +FROM golang:1.17.0-buster as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/ci-1.16.docker b/scripts/ci-1.16.docker index f0b2badafd521..ab1683329e633 100644 --- a/scripts/ci-1.16.docker +++ b/scripts/ci-1.16.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.6 +FROM golang:1.16.7 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/ci-1.15.docker b/scripts/ci-1.17.docker similarity index 95% rename from scripts/ci-1.15.docker rename to scripts/ci-1.17.docker index 2b87f29be4e3e..574ab7be7a896 100644 --- a/scripts/ci-1.15.docker +++ b/scripts/ci-1.17.docker @@ -1,4 +1,4 @@ -FROM golang:1.15.8 +FROM golang:1.17.0 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/mac_installgo.sh b/scripts/mac_installgo.sh index aab4731c22f30..cb41ee5f666cd 100644 --- a/scripts/mac_installgo.sh +++ b/scripts/mac_installgo.sh @@ -3,8 +3,8 @@ set -eux GO_ARCH="darwin-amd64" -GO_VERSION="1.16.6" -GO_VERSION_SHA="e4e83e7c6891baa00062ed37273ce95835f0be77ad8203a29ec56dbf3d87508a" # from https://golang.org/dl +GO_VERSION="1.17" +GO_VERSION_SHA="355bd544ce08d7d484d9d7de05a71b5c6f5bc10aa4b316688c2192aeb3dacfd1" # from https://golang.org/dl # This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.) path="/usr/local/Cellar" From 9ac5ae72d2e68f55ed6032dea937addad05a74d9 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 24 Aug 2021 13:18:08 -0700 Subject: [PATCH 576/761] docs: update links (#9632) --- README.md | 8 ++++++++ docs/DATA_FORMATS_INPUT.md | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 763ed861e0533..2097ea5c37e2b 100644 --- a/README.md +++ b/README.md @@ -383,10 +383,14 @@ For documentation on the latest development code see the [documentation index][d - [Graphite](/plugins/parsers/graphite) - [Grok](/plugins/parsers/grok) - [JSON](/plugins/parsers/json) +- [JSON v2](/plugins/parsers/json_v2) - [Logfmt](/plugins/parsers/logfmt) - [Nagios](/plugins/parsers/nagios) +- [Prometheus](/plugins/parsers/prometheus) +- [Prometheus Remote Write](/plugins/parsers/prometheusremotewrite) - [Value](/plugins/parsers/value), ie: 45 or "booyah" - [Wavefront](/plugins/parsers/wavefront) +- [XPath](/plugins/parsers/xpath) (supports XML, JSON, MessagePack, Protocol Buffers) ## Serializers @@ -395,6 +399,8 @@ For documentation on the latest development code see the [documentation index][d - [Graphite](/plugins/serializers/graphite) - [JSON](/plugins/serializers/json) - [MessagePack](/plugins/serializers/msgpack) +- [Prometheus](/plugins/serializers/prometheus) +- [Prometheus Remote Write](/plugins/serializers/prometheusremotewrite) - [ServiceNow](/plugins/serializers/nowmetric) - [SplunkMetric](/plugins/serializers/splunkmetric) - [Wavefront](/plugins/serializers/wavefront) @@ -429,10 +435,12 @@ For documentation on the latest development code see the [documentation index][d ## Aggregator Plugins * [basicstats](./plugins/aggregators/basicstats) +* [derivative](./plugins/aggregators/derivative) * [final](./plugins/aggregators/final) * [histogram](./plugins/aggregators/histogram) * [merge](./plugins/aggregators/merge) * [minmax](./plugins/aggregators/minmax) +* [quantile](./plugins/aggregators/quantile) * [valuecounter](./plugins/aggregators/valuecounter) ## Output Plugins diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 2550e7e1044cc..cb04d3e009030 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -12,13 +12,14 @@ Protocol or in JSON format. - [Grok](/plugins/parsers/grok) - [InfluxDB Line Protocol](/plugins/parsers/influx) - [JSON](/plugins/parsers/json) +- [JSON v2](/plugins/parsers/json_v2) - [Logfmt](/plugins/parsers/logfmt) - [Nagios](/plugins/parsers/nagios) - [Prometheus](/plugins/parsers/prometheus) - [PrometheusRemoteWrite](/plugins/parsers/prometheusremotewrite) - [Value](/plugins/parsers/value), ie: 45 or "booyah" - [Wavefront](/plugins/parsers/wavefront) -- [XML](/plugins/parsers/xml) +- [XPath](/plugins/parsers/xpath) (supports XML, JSON, MessagePack, Protocol Buffers) Any input plugin containing the `data_format` option can use it to select the desired parser: From 8e8074e47b7d00335e9b9aecc67e417351a4e82d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Aug 2021 16:08:15 -0600 Subject: [PATCH 577/761] fix: bump github.com/tinylib/msgp from 1.1.5 to 1.1.6 (#9652) --- go.mod | 2 +- go.sum | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index f09d594111d42..7509cf6ac9b0a 100644 --- a/go.mod +++ b/go.mod @@ -247,7 +247,7 @@ require ( github.com/tidwall/gjson v1.8.0 github.com/tidwall/match v1.0.3 // indirect github.com/tidwall/pretty v1.1.0 // indirect - github.com/tinylib/msgp v1.1.5 + github.com/tinylib/msgp v1.1.6 github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 diff --git a/go.sum b/go.sum index d17f8209df7da..8e2fbee60fd3f 100644 --- a/go.sum +++ b/go.sum @@ -1473,15 +1473,14 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8= github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0= -github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= +github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= From 8daba8aa19e2c504ad62bc76cc08f0c47d6f30f7 Mon Sep 17 00:00:00 2001 From: Phil Bracikowski <13472206+philjb@users.noreply.github.com> Date: Wed, 25 Aug 2021 15:43:06 -0700 Subject: [PATCH 578/761] chore(influxv2plugin): Increase accepted retry-after header values. (#9619) --- plugins/outputs/influxdb_v2/http.go | 15 ++++---- .../outputs/influxdb_v2/http_internal_test.go | 36 ++++++++++++++++--- 2 files changed, 40 insertions(+), 11 deletions(-) diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index e62919cf43b13..e8df4da7d2041 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -36,8 +36,9 @@ func (e APIError) Error() string { } const ( - defaultRequestTimeout = time.Second * 5 - defaultMaxWait = 60 // seconds + defaultRequestTimeout = time.Second * 5 + defaultMaxWaitSeconds = 60 + defaultMaxWaitRetryAfterSeconds = 10 * 60 ) type HTTPConfig struct { @@ -306,8 +307,9 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te // retryDuration takes the longer of the Retry-After header and our own back-off calculation func (c *httpClient) getRetryDuration(headers http.Header) time.Duration { // basic exponential backoff (x^2)/40 (denominator to widen the slope) - // at 40 denominator, it'll take 35 retries to hit the max defaultMaxWait of 30s + // at 40 denominator, it'll take 49 retries to hit the max defaultMaxWait of 60s backoff := math.Pow(float64(c.retryCount), 2) / 40 + backoff = math.Min(backoff, defaultMaxWaitSeconds) // get any value from the header, if available retryAfterHeader := float64(0) @@ -319,11 +321,12 @@ func (c *httpClient) getRetryDuration(headers http.Header) time.Duration { // there was a value but we couldn't parse it? guess minimum 10 sec retryAfterHeader = 10 } + // protect against excessively large retry-after + retryAfterHeader = math.Min(retryAfterHeader, defaultMaxWaitRetryAfterSeconds) } - // take the highest value from both, but not over the max wait. + // take the highest value of backoff and retry-after. retry := math.Max(backoff, retryAfterHeader) - retry = math.Min(retry, defaultMaxWait) - return time.Duration(retry) * time.Second + return time.Duration(retry*1000) * time.Millisecond } func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) { diff --git a/plugins/outputs/influxdb_v2/http_internal_test.go b/plugins/outputs/influxdb_v2/http_internal_test.go index 2ff4990fa8a3b..10e2a4e133eeb 100644 --- a/plugins/outputs/influxdb_v2/http_internal_test.go +++ b/plugins/outputs/influxdb_v2/http_internal_test.go @@ -56,12 +56,12 @@ func TestExponentialBackoffCalculation(t *testing.T) { expected time.Duration }{ {retryCount: 0, expected: 0}, - {retryCount: 1, expected: 0}, - {retryCount: 5, expected: 0}, - {retryCount: 10, expected: 2 * time.Second}, - {retryCount: 30, expected: 22 * time.Second}, + {retryCount: 1, expected: 25 * time.Millisecond}, + {retryCount: 5, expected: 625 * time.Millisecond}, + {retryCount: 10, expected: 2500 * time.Millisecond}, + {retryCount: 30, expected: 22500 * time.Millisecond}, {retryCount: 40, expected: 40 * time.Second}, - {retryCount: 50, expected: 60 * time.Second}, + {retryCount: 50, expected: 60 * time.Second}, // max hit {retryCount: 100, expected: 60 * time.Second}, {retryCount: 1000, expected: 60 * time.Second}, } @@ -72,3 +72,29 @@ func TestExponentialBackoffCalculation(t *testing.T) { }) } } + +func TestExponentialBackoffCalculationWithRetryAfter(t *testing.T) { + c := &httpClient{} + tests := []struct { + retryCount int + retryAfter string + expected time.Duration + }{ + {retryCount: 0, retryAfter: "0", expected: 0}, + {retryCount: 0, retryAfter: "10", expected: 10 * time.Second}, + {retryCount: 0, retryAfter: "60", expected: 60 * time.Second}, + {retryCount: 0, retryAfter: "600", expected: 600 * time.Second}, + {retryCount: 0, retryAfter: "601", expected: 600 * time.Second}, // max hit + {retryCount: 40, retryAfter: "39", expected: 40 * time.Second}, // retryCount wins + {retryCount: 40, retryAfter: "41", expected: 41 * time.Second}, // retryAfter wins + {retryCount: 100, retryAfter: "100", expected: 100 * time.Second}, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_retries", test.retryCount), func(t *testing.T) { + c.retryCount = test.retryCount + hdr := http.Header{} + hdr.Add("Retry-After", test.retryAfter) + require.EqualValues(t, test.expected, c.getRetryDuration(hdr)) + }) + } +} From 0ce9c2e9f6c1339c53e4f763a4eb2fd03779ab06 Mon Sep 17 00:00:00 2001 From: Daniel Dyla Date: Thu, 26 Aug 2021 12:57:22 -0400 Subject: [PATCH 579/761] fix(dt-output): remove hardcoded int value (#9676) --- plugins/outputs/dynatrace/dynatrace.go | 2 +- plugins/outputs/dynatrace/dynatrace_test.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index c66bc8da2171e..470eb0e2cd0c6 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -287,7 +287,7 @@ func getTypeOption(metricType telegraf.ValueType, field *telegraf.Field) dtMetri case uint64: return dtMetric.WithIntGaugeValue(int64(v)) case int64: - return dtMetric.WithIntGaugeValue(32) + return dtMetric.WithIntGaugeValue(v) case bool: if v { return dtMetric.WithIntGaugeValue(1) diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index d9076906c1020..65cd3d2a86f0a 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -353,11 +353,11 @@ func TestSendMetricWithDefaultDimensions(t *testing.T) { require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed - require.Equal(t, len(bodyString), 79) + require.Equal(t, len(bodyString), 78) require.Regexp(t, regexp.MustCompile("^mymeasurement.value"), bodyString) require.Regexp(t, regexp.MustCompile("dt.metrics.source=telegraf"), bodyString) require.Regexp(t, regexp.MustCompile("dim=value"), bodyString) - require.Regexp(t, regexp.MustCompile("gauge,32 1289430000000$"), bodyString) + require.Regexp(t, regexp.MustCompile("gauge,2 1289430000000$"), bodyString) err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) require.NoError(t, err) })) @@ -378,7 +378,7 @@ func TestSendMetricWithDefaultDimensions(t *testing.T) { m1 := metric.New( "mymeasurement", map[string]string{}, - map[string]interface{}{"value": 32}, + map[string]interface{}{"value": 2}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) From 1a59157b91b35c5b8ddf7dc1d06b31846248d362 Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Thu, 26 Aug 2021 13:32:48 -0700 Subject: [PATCH 580/761] fix(mongodb): change command based on server version (#9674) --- plugins/inputs/mongodb/README.md | 2 + plugins/inputs/mongodb/mongodb_server.go | 27 ++++++++++-- plugins/inputs/mongodb/mongodb_server_test.go | 42 +++++++++++++++++++ 3 files changed, 67 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index ddcb1971f9667..15a474e6bb66a 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -1,5 +1,7 @@ # MongoDB Input Plugin +All MongoDB server versions from 2.6 and higher are supported. + ### Configuration: ```toml diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 723b0698b9ac8..79d3d36c6c038 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "go.mongodb.org/mongo-driver/bson/primitive" + "strconv" "strings" "time" @@ -126,11 +127,29 @@ func (s *Server) gatherClusterStatus() (*ClusterStatus, error) { }, nil } -func (s *Server) gatherShardConnPoolStats() (*ShardStats, error) { +func poolStatsCommand(version string) (string, error) { + majorPart := string(version[0]) + major, err := strconv.ParseInt(majorPart, 10, 64) + if err != nil { + return "", err + } + + if major == 5 { + return "connPoolStats", nil + } + return "shardConnPoolStats", nil +} + +func (s *Server) gatherShardConnPoolStats(version string) (*ShardStats, error) { + command, err := poolStatsCommand(version) + if err != nil { + return nil, err + } + shardStats := &ShardStats{} - err := s.runCommand("admin", bson.D{ + err = s.runCommand("admin", bson.D{ { - Key: "shardConnPoolStats", + Key: command, Value: 1, }, }, &shardStats) @@ -272,7 +291,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, clusterStatus = status } - shardStats, err := s.gatherShardConnPoolStats() + shardStats, err := s.gatherShardConnPoolStats(serverStatus.Version) if err != nil { s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %s", err.Error())) } diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index 64fb191639105..c8fd9f7c15284 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -40,3 +40,45 @@ func TestAddDefaultStats(t *testing.T) { assert.True(t, acc.HasInt64Field("mongodb", key)) } } + +func TestPoolStatsVersionCompatibility(t *testing.T) { + tests := []struct { + name string + version string + expectedCommand string + err bool + }{ + { + name: "mongodb v3", + version: "3.0.0", + expectedCommand: "shardConnPoolStats", + }, + { + name: "mongodb v4", + version: "4.0.0", + expectedCommand: "shardConnPoolStats", + }, + { + name: "mongodb v5", + version: "5.0.0", + expectedCommand: "connPoolStats", + }, + { + name: "invalid version", + version: "v4", + err: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + command, err := poolStatsCommand(test.version) + require.Equal(t, test.expectedCommand, command) + if test.err { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} From 4dc2967e34f36a8da9d500620711334b9ff3b8d3 Mon Sep 17 00:00:00 2001 From: David B <36965011+DavidBuettner@users.noreply.github.com> Date: Thu, 26 Aug 2021 22:34:52 +0200 Subject: [PATCH 581/761] feat(plugins/inputs/systemd_units): add pattern support (#9665) --- plugins/inputs/systemd_units/README.md | 9 ++++- .../systemd_units/systemd_units_linux.go | 36 ++++++++++++++----- .../systemd_units/systemd_units_linux_test.go | 2 +- 3 files changed, 37 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/systemd_units/README.md b/plugins/inputs/systemd_units/README.md index 7fe09e224c564..f9d47d7df1252 100644 --- a/plugins/inputs/systemd_units/README.md +++ b/plugins/inputs/systemd_units/README.md @@ -1,7 +1,7 @@ # systemd Units Input Plugin The systemd_units plugin gathers systemd unit status on Linux. It relies on -`systemctl list-units --all --plain --type=service` to collect data on service status. +`systemctl list-units [PATTERN] --all --plain --type=service` to collect data on service status. The results are tagged with the unit name and provide enumerated fields for loaded, active and running fields, indicating the unit health. @@ -22,6 +22,13 @@ see `systemctl list-units --all --type help` for possible options. ## values are "socket", "target", "device", "mount", "automount", "swap", ## "timer", "path", "slice" and "scope ": # unittype = "service" + # + ## Filter for a specific pattern, default is "" (i.e. all), other possible + ## values are valid pattern for systemctl, e.g. "a*" for all units with + ## names starting with "a" + # pattern = "" + ## pattern = "telegraf* influxdb*" + ## pattern = "a*" ``` ### Metrics diff --git a/plugins/inputs/systemd_units/systemd_units_linux.go b/plugins/inputs/systemd_units/systemd_units_linux.go index e94b9432136e4..e41c64752977e 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux.go +++ b/plugins/inputs/systemd_units/systemd_units_linux.go @@ -18,10 +18,11 @@ import ( type SystemdUnits struct { Timeout config.Duration UnitType string `toml:"unittype"` + Pattern string `toml:"pattern"` systemctl systemctl } -type systemctl func(timeout config.Duration, unitType string) (*bytes.Buffer, error) +type systemctl func(timeout config.Duration, unitType string, pattern string) (*bytes.Buffer, error) const measurement = "systemd_units" @@ -115,6 +116,7 @@ var subMap = map[string]int{ var ( defaultTimeout = config.Duration(time.Second) defaultUnitType = "service" + defaultPattern = "" ) // Description returns a short description of the plugin @@ -132,12 +134,19 @@ func (s *SystemdUnits) SampleConfig() string { ## values are "socket", "target", "device", "mount", "automount", "swap", ## "timer", "path", "slice" and "scope ": # unittype = "service" + # + ## Filter for a specific pattern, default is "" (i.e. all), other possible + ## values are valid pattern for systemctl, e.g. "a*" for all units with + ## names starting with "a" + # pattern = "" + ## pattern = "telegraf* influxdb*" + ## pattern = "a*" ` } // Gather parses systemctl outputs and adds counters to the Accumulator func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { - out, err := s.systemctl(s.Timeout, s.UnitType) + out, err := s.systemctl(s.Timeout, s.UnitType, s.Pattern) if err != nil { return err } @@ -192,22 +201,32 @@ func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { return nil } -func setSystemctl(timeout config.Duration, unitType string) (*bytes.Buffer, error) { +func setSystemctl(timeout config.Duration, unitType string, pattern string) (*bytes.Buffer, error) { // is systemctl available ? systemctlPath, err := exec.LookPath("systemctl") if err != nil { return nil, err } - - cmd := exec.Command(systemctlPath, "list-units", "--all", "--plain", fmt.Sprintf("--type=%s", unitType), "--no-legend") - + // build parameters for systemctl call + params := []string{"list-units"} + // create patterns parameters if provided in config + if pattern != "" { + psplit := strings.SplitN(pattern, " ", -1) + for v := range psplit { + params = append(params, psplit[v]) + } + } + params = append(params, "--all", "--plain") + // add type as configured in config + params = append(params, fmt.Sprintf("--type=%s", unitType)) + params = append(params, "--no-legend") + cmd := exec.Command(systemctlPath, params...) var out bytes.Buffer cmd.Stdout = &out err = internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { - return &out, fmt.Errorf("error running systemctl list-units --all --plain --type=%s --no-legend: %s", unitType, err) + return &out, fmt.Errorf("error running systemctl %s: %s", strings.Join(params, " "), err) } - return &out, nil } @@ -217,6 +236,7 @@ func init() { systemctl: setSystemctl, Timeout: defaultTimeout, UnitType: defaultUnitType, + Pattern: defaultPattern, } }) } diff --git a/plugins/inputs/systemd_units/systemd_units_linux_test.go b/plugins/inputs/systemd_units/systemd_units_linux_test.go index a6cfbd6552771..05070c6ff5e94 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux_test.go +++ b/plugins/inputs/systemd_units/systemd_units_linux_test.go @@ -74,7 +74,7 @@ func TestSystemdUnits(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { systemdUnits := &SystemdUnits{ - systemctl: func(timeout config.Duration, unitType string) (*bytes.Buffer, error) { + systemctl: func(timeout config.Duration, unitType string, pattern string) (*bytes.Buffer, error) { return bytes.NewBufferString(tt.line), nil }, } From 31178e1cf3c7e93e657626d5d35f07a6c5481f29 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 26 Aug 2021 15:36:53 -0500 Subject: [PATCH 582/761] fix: bump cloud.google.com/go/pubsub from 1.2.0 to 1.15.0 (#9655) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastian Spaink --- go.mod | 25 ++++---- go.sum | 177 ++++++++++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 174 insertions(+), 28 deletions(-) diff --git a/go.mod b/go.mod index 7509cf6ac9b0a..ff441b60264d1 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/influxdata/telegraf go 1.17 require ( - cloud.google.com/go v0.56.0 - cloud.google.com/go/bigquery v1.4.0 - cloud.google.com/go/pubsub v1.2.0 + cloud.google.com/go v0.90.0 + cloud.google.com/go/bigquery v1.8.0 + cloud.google.com/go/pubsub v1.15.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.5.0 github.com/Azure/azure-amqp-common-go/v3 v3.0.0 // indirect @@ -267,7 +267,7 @@ require ( github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.mongodb.org/mongo-driver v1.5.3 - go.opencensus.io v0.22.3 // indirect + go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/atomic v1.7.0 // indirect @@ -276,20 +276,20 @@ require ( golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20210610132358-84b48f89b13b - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 + golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect golang.org/x/text v0.3.6 - golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect - golang.org/x/tools v0.1.2 + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect + golang.org/x/tools v0.1.5 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect golang.zx2c4.com/wireguard v0.0.20200121 // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 - google.golang.org/api v0.29.0 - google.golang.org/appengine v1.6.6 // indirect - google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 - google.golang.org/grpc v1.39.0 + google.golang.org/api v0.54.0 + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20210813162853-db860fec028c + google.golang.org/grpc v1.39.1 google.golang.org/protobuf v1.27.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/djherbis/times.v1 v1.2.0 @@ -310,7 +310,6 @@ require ( gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gotest.tools v2.2.0+incompatible - honnef.co/go/tools v0.0.1-2020.1.4 // indirect k8s.io/api v0.20.4 k8s.io/apimachinery v0.21.1 k8s.io/client-go v0.20.4 diff --git a/go.sum b/go.sum index 8e2fbee60fd3f..d797edf56c569 100644 --- a/go.sum +++ b/go.sum @@ -12,24 +12,42 @@ cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gc cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0 h1:MjvSkUq8RuAb+2JLDi5VQmmExRJPUQ3JLCWpRB6fmdw= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.15.0 h1:6KI/wDVYLtNvzIPJ8ObuJcq5bBtAWQ6Suo8osHPvYn4= +cloud.google.com/go/pubsub v1.15.0/go.mod h1:DnEUPGZlp+N9MElp/6uVqCKiknQixvVLcrgrqT62O6A= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= @@ -317,6 +335,7 @@ github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= @@ -509,6 +528,7 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= @@ -704,6 +724,8 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -721,6 +743,7 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -755,12 +778,24 @@ github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -868,6 +903,7 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -1564,8 +1600,11 @@ go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c h1:3s2a2cav7u4W1b0cOYxmlj1y1NcVuDZwgUaAQ6wfImo= go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c/go.mod h1:PcHNnM+RUl0uD8VkSn93PO78N7kQYhfqpI/eki57pl4= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -1649,6 +1688,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= @@ -1659,6 +1699,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1685,6 +1727,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1703,15 +1746,20 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -1719,17 +1767,28 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b h1:k+E048sYJHyVnsr1GDrRZWQ32D2C7lWs9JRc0bel53A= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a h1:4Kd8OPUx1xgUwrHDaviWZO8MsgoZTZYC3g+8m16RBww= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1812,15 +1871,21 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200821140526-fda516888d29/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1833,21 +1898,32 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraCFObP8S1v6PRp0bLrtU= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1866,8 +1942,9 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1923,18 +2000,37 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1965,17 +2061,33 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0 h1:ECJUVngj71QI6XEm7b1sAf8BljU5inEhMbKPR8Lxhhk= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1999,15 +2111,42 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 h1:pc16UedxnxXXtGxHCSUhafAoVHQZ0yXl8ZelMH4EETc= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c h1:iLQakcwWG3k/++1q/46apVb1sUQ3IqIdn9yUE6eh/xA= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -2026,13 +2165,22 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1 h1:f37vZbBVTiJ6jKG5mWz8ySOBxNqy6ViPgyhSdVnxF3E= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2135,7 +2283,6 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= From 2370d39e8905529de585758c8ccdfe11943d7dd4 Mon Sep 17 00:00:00 2001 From: reimda Date: Mon, 30 Aug 2021 09:13:08 -0600 Subject: [PATCH 583/761] fix: output timestamp with fractional seconds (#9625) --- plugins/outputs/graylog/graylog.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index 05feafe9effc1..cf5dc6dc5ac3b 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -214,7 +214,7 @@ func (g *Graylog) serialize(metric telegraf.Metric) ([]string, error) { m := make(map[string]interface{}) m["version"] = "1.1" - m["timestamp"] = metric.Time().UnixNano() / 1000000000 + m["timestamp"] = float64(metric.Time().UnixNano()) / 1_000_000_000 m["short_message"] = "telegraf" m["name"] = metric.Name() From 435c2a6e3399c08fcecf26b0e294ea7051d1312e Mon Sep 17 00:00:00 2001 From: John Seekins Date: Tue, 31 Aug 2021 16:04:32 -0600 Subject: [PATCH 584/761] feat: add inputs.mdstat to gather from /proc/mdstat collection (#9101) --- README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/mdstat/README.md | 49 ++++ plugins/inputs/mdstat/mdstat.go | 313 +++++++++++++++++++++++ plugins/inputs/mdstat/mdstat_notlinux.go | 3 + plugins/inputs/mdstat/mdstat_test.go | 148 +++++++++++ 6 files changed, 515 insertions(+) create mode 100644 plugins/inputs/mdstat/README.md create mode 100644 plugins/inputs/mdstat/mdstat.go create mode 100644 plugins/inputs/mdstat/mdstat_notlinux.go create mode 100644 plugins/inputs/mdstat/mdstat_test.go diff --git a/README.md b/README.md index 2097ea5c37e2b..c4a89b751c5d2 100644 --- a/README.md +++ b/README.md @@ -265,6 +265,7 @@ For documentation on the latest development code see the [documentation index][d * [mailchimp](./plugins/inputs/mailchimp) * [marklogic](./plugins/inputs/marklogic) * [mcrouter](./plugins/inputs/mcrouter) +* [mdstat](./plugins/inputs/mdstat) * [memcached](./plugins/inputs/memcached) * [mem](./plugins/inputs/mem) * [mesos](./plugins/inputs/mesos) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 95cfcf6626444..350a8cca08cdb 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -101,6 +101,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" _ "github.com/influxdata/telegraf/plugins/inputs/marklogic" _ "github.com/influxdata/telegraf/plugins/inputs/mcrouter" + _ "github.com/influxdata/telegraf/plugins/inputs/mdstat" _ "github.com/influxdata/telegraf/plugins/inputs/mem" _ "github.com/influxdata/telegraf/plugins/inputs/memcached" _ "github.com/influxdata/telegraf/plugins/inputs/mesos" diff --git a/plugins/inputs/mdstat/README.md b/plugins/inputs/mdstat/README.md new file mode 100644 index 0000000000000..6180833b69ade --- /dev/null +++ b/plugins/inputs/mdstat/README.md @@ -0,0 +1,49 @@ +# mdstat Input Plugin + +The mdstat plugin gathers statistics about any Linux MD RAID arrays configured on the host +by reading /proc/mdstat. For a full list of available fields see the +/proc/mdstat section of the [proc man page](http://man7.org/linux/man-pages/man5/proc.5.html). +For a better idea of what each field represents, see the +[mdstat man page](https://raid.wiki.kernel.org/index.php/Mdstat). + +Stat collection based on Prometheus' mdstat collection library at https://github.com/prometheus/procfs/blob/master/mdstat.go + + +### Configuration: + +```toml +# Get kernel statistics from /proc/mdstat +[[inputs.mdstat]] + ## Sets file path + ## If not specified, then default is /proc/mdstat + # file_name = "/proc/mdstat" +``` + +### Measurements & Fields: + +- mdstat + - BlocksSynced (if the array is rebuilding/checking, this is the count of blocks that have been scanned) + - BlocksSyncedFinishTime (the expected finish time of the rebuild scan, listed in minutes remaining) + - BlocksSyncedPct (the percentage of the rebuild scan left) + - BlocksSyncedSpeed (the current speed the rebuild is running at, listed in K/sec) + - BlocksTotal (the total count of blocks in the array) + - DisksActive (the number of disks that are currently considered healthy in the array) + - DisksFailed (the current count of failed disks in the array) + - DisksSpare (the current count of "spare" disks in the array) + - DisksTotal (total count of disks in the array) + +### Tags: + +- mdstat + - ActivityState (`active` or `inactive`) + - Devices (comma separated list of devices that make up the array) + - Name (name of the array) + +### Example Output: + +``` +$ telegraf --config ~/ws/telegraf.conf --input-filter mdstat --test +* Plugin: mdstat, Collection 1 +> mdstat,ActivityState=active,Devices=sdm1\,sdn1,Name=md1 BlocksSynced=231299072i,BlocksSyncedFinishTime=0,BlocksSyncedPct=0,BlocksSyncedSpeed=0,BlocksTotal=231299072i,DisksActive=2i,DisksFailed=0i,DisksSpare=0i,DisksTotal=2i,DisksDown=0i 1617814276000000000 +> mdstat,ActivityState=active,Devices=sdm5\,sdn5,Name=md2 BlocksSynced=2996224i,BlocksSyncedFinishTime=0,BlocksSyncedPct=0,BlocksSyncedSpeed=0,BlocksTotal=2996224i,DisksActive=2i,DisksFailed=0i,DisksSpare=0i,DisksTotal=2i,DisksDown=0i 1617814276000000000 +``` diff --git a/plugins/inputs/mdstat/mdstat.go b/plugins/inputs/mdstat/mdstat.go new file mode 100644 index 0000000000000..0f18379c4c092 --- /dev/null +++ b/plugins/inputs/mdstat/mdstat.go @@ -0,0 +1,313 @@ +// +build linux + +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code has been changed since initial import. + +package mdstat + +import ( + "fmt" + "io/ioutil" + "os" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const ( + defaultHostProc = "/proc" + envProc = "HOST_PROC" +) + +var ( + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) + recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) + recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) + recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) + componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) +) + +type statusLine struct { + active int64 + total int64 + size int64 + down int64 +} + +type recoveryLine struct { + syncedBlocks int64 + pct float64 + finish float64 + speed float64 +} + +type MdstatConf struct { + FileName string `toml:"file_name"` +} + +func (k *MdstatConf) Description() string { + return "Get md array statistics from /proc/mdstat" +} + +var mdSampleConfig = ` + ## Sets file path + ## If not specified, then default is /proc/mdstat + # file_name = "/proc/mdstat" +` + +func (k *MdstatConf) SampleConfig() string { + return mdSampleConfig +} + +func evalStatusLine(deviceLine, statusLineStr string) (statusLine, error) { + sizeFields := strings.Fields(statusLineStr) + if len(sizeFields) < 1 { + return statusLine{active: 0, total: 0, down: 0, size: 0}, + fmt.Errorf("statusLine empty? %q", statusLineStr) + } + sizeStr := sizeFields[0] + size, err := strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return statusLine{active: 0, total: 0, down: 0, size: 0}, + fmt.Errorf("unexpected statusLine %q: %w", statusLineStr, err) + } + + if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { + // In the device deviceLine, only disks have a number associated with them in []. + total := int64(strings.Count(deviceLine, "[")) + return statusLine{active: total, total: total, down: 0, size: size}, nil + } + + if strings.Contains(deviceLine, "inactive") { + return statusLine{active: 0, total: 0, down: 0, size: size}, nil + } + + matches := statusLineRE.FindStringSubmatch(statusLineStr) + if len(matches) != 5 { + return statusLine{active: 0, total: 0, down: 0, size: size}, + fmt.Errorf("couldn't find all the substring matches: %s", statusLineStr) + } + total, err := strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return statusLine{active: 0, total: 0, down: 0, size: size}, + fmt.Errorf("unexpected statusLine %q: %w", statusLineStr, err) + } + active, err := strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return statusLine{active: 0, total: total, down: 0, size: size}, + fmt.Errorf("unexpected statusLine %q: %w", statusLineStr, err) + } + down := int64(strings.Count(matches[4], "_")) + + return statusLine{active: active, total: total, size: size, down: down}, nil +} + +func evalRecoveryLine(recoveryLineStr string) (recoveryLine, error) { + // Get count of completed vs. total blocks + matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: 0, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching syncedBlocks: %s", recoveryLineStr) + } + syncedBlocks, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return recoveryLine{syncedBlocks: 0, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLineStr, err) + } + + // Get percentage complete + matches = recoveryLinePctRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: syncedBlocks, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLineStr) + } + pct, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return recoveryLine{syncedBlocks: syncedBlocks, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLineStr, err) + } + + // Get time expected left to complete + matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: 0, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLineStr) + } + finish, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: 0, speed: 0}, + fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLineStr, err) + } + + // Get recovery speed + matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: finish, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLineStr) + } + speed, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: finish, speed: 0}, + fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLineStr, err) + } + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: finish, speed: speed}, nil +} + +func evalComponentDevices(deviceFields []string) string { + mdComponentDevices := make([]string, 0) + if len(deviceFields) > 3 { + for _, field := range deviceFields[4:] { + match := componentDeviceRE.FindStringSubmatch(field) + if match == nil { + continue + } + mdComponentDevices = append(mdComponentDevices, match[1]) + } + } + + // Ensure no churn on tag ordering change + sort.Strings(mdComponentDevices) + return strings.Join(mdComponentDevices, ",") +} + +func (k *MdstatConf) Gather(acc telegraf.Accumulator) error { + data, err := k.getProcMdstat() + if err != nil { + return err + } + lines := strings.Split(string(data), "\n") + // empty file should return nothing + if len(lines) < 3 { + return nil + } + for i, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' || strings.HasPrefix(line, "Personalities") || strings.HasPrefix(line, "unused") { + continue + } + deviceFields := strings.Fields(line) + if len(deviceFields) < 3 || len(lines) <= i+3 { + return fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + } + mdName := deviceFields[0] // mdx + state := deviceFields[2] // active or inactive + + /* + Failed disks have the suffix (F) & Spare disks have the suffix (S). + Failed disks may also not be marked separately... + */ + fail := int64(strings.Count(line, "(F)")) + spare := int64(strings.Count(line, "(S)")) + + sts, err := evalStatusLine(lines[i], lines[i+1]) + if err != nil { + return fmt.Errorf("error parsing md device lines: %w", err) + } + + syncLineIdx := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + syncLineIdx++ + } + + var rcvry recoveryLine + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + rcvry.syncedBlocks = sts.size + recovering := strings.Contains(lines[syncLineIdx], "recovery") + resyncing := strings.Contains(lines[syncLineIdx], "resync") + checking := strings.Contains(lines[syncLineIdx], "check") + + // Append recovery and resyncing state info. + if recovering || resyncing || checking { + if recovering { + state = "recovering" + } else if checking { + state = "checking" + } else { + state = "resyncing" + } + + // Handle case when resync=PENDING or resync=DELAYED. + if strings.Contains(lines[syncLineIdx], "PENDING") || strings.Contains(lines[syncLineIdx], "DELAYED") { + rcvry.syncedBlocks = 0 + } else { + var err error + rcvry, err = evalRecoveryLine(lines[syncLineIdx]) + if err != nil { + return fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) + } + } + } + fields := map[string]interface{}{ + "DisksActive": sts.active, + "DisksFailed": fail, + "DisksSpare": spare, + "DisksTotal": sts.total, + "DisksDown": sts.down, + "BlocksTotal": sts.size, + "BlocksSynced": rcvry.syncedBlocks, + "BlocksSyncedPct": rcvry.pct, + "BlocksSyncedFinishTime": rcvry.finish, + "BlocksSyncedSpeed": rcvry.speed, + } + tags := map[string]string{ + "Name": mdName, + "ActivityState": state, + "Devices": evalComponentDevices(deviceFields), + } + acc.AddFields("mdstat", fields, tags) + } + + return nil +} + +func (k *MdstatConf) getProcMdstat() ([]byte, error) { + var mdStatFile string + if k.FileName == "" { + mdStatFile = proc(envProc, defaultHostProc) + "/mdstat" + } else { + mdStatFile = k.FileName + } + if _, err := os.Stat(mdStatFile); os.IsNotExist(err) { + return nil, fmt.Errorf("mdstat: %s does not exist", mdStatFile) + } else if err != nil { + return nil, err + } + + data, err := ioutil.ReadFile(mdStatFile) + if err != nil { + return nil, err + } + + return data, nil +} + +func init() { + inputs.Add("mdstat", func() telegraf.Input { return &MdstatConf{} }) +} + +// proc can be used to read file paths from env +func proc(env, path string) string { + // try to read full file path + if p := os.Getenv(env); p != "" { + return p + } + // return default path + return path +} diff --git a/plugins/inputs/mdstat/mdstat_notlinux.go b/plugins/inputs/mdstat/mdstat_notlinux.go new file mode 100644 index 0000000000000..f0fe87e66ba91 --- /dev/null +++ b/plugins/inputs/mdstat/mdstat_notlinux.go @@ -0,0 +1,3 @@ +// +build !linux + +package mdstat diff --git a/plugins/inputs/mdstat/mdstat_test.go b/plugins/inputs/mdstat/mdstat_test.go new file mode 100644 index 0000000000000..030ac2cb55f6f --- /dev/null +++ b/plugins/inputs/mdstat/mdstat_test.go @@ -0,0 +1,148 @@ +// +build linux + +package mdstat + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func TestFullMdstatProcFile(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileFull)) + defer os.Remove(filename) + k := MdstatConf{ + FileName: filename, + } + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.NoError(t, err) + + fields := map[string]interface{}{ + "BlocksSynced": int64(10620027200), + "BlocksSyncedFinishTime": float64(101.6), + "BlocksSyncedPct": float64(94.3), + "BlocksSyncedSpeed": float64(103517), + "BlocksTotal": int64(11251451904), + "DisksActive": int64(12), + "DisksFailed": int64(0), + "DisksSpare": int64(0), + "DisksTotal": int64(12), + "DisksDown": int64(0), + } + acc.AssertContainsFields(t, "mdstat", fields) +} + +func TestFailedDiskMdStatProcFile1(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileFailedDisk)) + defer os.Remove(filename) + + k := MdstatConf{ + FileName: filename, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.NoError(t, err) + + fields := map[string]interface{}{ + "BlocksSynced": int64(5860144128), + "BlocksSyncedFinishTime": float64(0), + "BlocksSyncedPct": float64(0), + "BlocksSyncedSpeed": float64(0), + "BlocksTotal": int64(5860144128), + "DisksActive": int64(3), + "DisksFailed": int64(0), + "DisksSpare": int64(0), + "DisksTotal": int64(4), + "DisksDown": int64(1), + } + acc.AssertContainsFields(t, "mdstat", fields) +} + +func TestEmptyMdStatProcFile1(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileEmpty)) + defer os.Remove(filename) + + k := MdstatConf{ + FileName: filename, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.NoError(t, err) +} + +func TestInvalidMdStatProcFile1(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileInvalid)) + defer os.Remove(filename) + + k := MdstatConf{ + FileName: filename, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.Error(t, err) +} + +const mdStatFileFull = ` +Personalities : [raid1] [raid10] [linear] [multipath] [raid0] [raid6] [raid5] [raid4] +md2 : active raid10 sde[2] sdl[9] sdf[3] sdk[8] sdh[5] sdd[1] sdg[4] sdn[11] sdm[10] sdj[7] sdc[0] sdi[6] + 11251451904 blocks super 1.2 512K chunks 2 near-copies [12/12] [UUUUUUUUUUUU] + [==================>..] check = 94.3% (10620027200/11251451904) finish=101.6min speed=103517K/sec + bitmap: 35/84 pages [140KB], 65536KB chunk + +md1 : active raid1 sdb2[2] sda2[0] + 5909504 blocks super 1.2 [2/2] [UU] + +md0 : active raid1 sdb1[2] sda1[0] + 244005888 blocks super 1.2 [2/2] [UU] + bitmap: 1/2 pages [4KB], 65536KB chunk + +unused devices: +` + +const mdStatFileFailedDisk = ` +Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] +md0 : active raid5 sdd1[3] sdb1[1] sda1[0] + 5860144128 blocks super 1.2 level 5, 64k chunk, algorithm 2 [4/3] [UUU_] + bitmap: 8/15 pages [32KB], 65536KB chunk + +unused devices: +` + +const mdStatFileEmpty = ` +Personalities : +unused devices: +` + +const mdStatFileInvalid = ` +Personalities : + +mdf1: testman actve + +md0 : active raid1 sdb1[2] sda1[0] + 244005888 blocks super 1.2 [2/2] [UU] + bitmap: 1/2 pages [4KB], 65536KB chunk + +unused devices: +` + +func makeFakeMDStatFile(content []byte) (filename string) { + fileobj, err := ioutil.TempFile("", "mdstat") + if err != nil { + panic(err) + } + + if _, err = fileobj.Write(content); err != nil { + panic(err) + } + if err := fileobj.Close(); err != nil { + panic(err) + } + return fileobj.Name() +} From b8ff3e9c56686f2239013e272f395657909ae94c Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 1 Sep 2021 09:35:10 -0700 Subject: [PATCH 585/761] fix: run go fmt on inputs.mdstat with go1.17 (#9702) --- plugins/inputs/mdstat/mdstat.go | 1 + plugins/inputs/mdstat/mdstat_notlinux.go | 1 + plugins/inputs/mdstat/mdstat_test.go | 1 + 3 files changed, 3 insertions(+) diff --git a/plugins/inputs/mdstat/mdstat.go b/plugins/inputs/mdstat/mdstat.go index 0f18379c4c092..81e3f36e7c767 100644 --- a/plugins/inputs/mdstat/mdstat.go +++ b/plugins/inputs/mdstat/mdstat.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux // Copyright 2018 The Prometheus Authors diff --git a/plugins/inputs/mdstat/mdstat_notlinux.go b/plugins/inputs/mdstat/mdstat_notlinux.go index f0fe87e66ba91..409ae776102b0 100644 --- a/plugins/inputs/mdstat/mdstat_notlinux.go +++ b/plugins/inputs/mdstat/mdstat_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package mdstat diff --git a/plugins/inputs/mdstat/mdstat_test.go b/plugins/inputs/mdstat/mdstat_test.go index 030ac2cb55f6f..fe6041abec353 100644 --- a/plugins/inputs/mdstat/mdstat_test.go +++ b/plugins/inputs/mdstat/mdstat_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package mdstat From 167b6e0075b5da04fd3c33a86e68c95b3727e485 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 1 Sep 2021 22:21:53 -0700 Subject: [PATCH 586/761] fix: race condition in cookie test (#9659) --- plugins/common/cookie/cookie.go | 44 ++++++--- plugins/common/cookie/cookie_test.go | 128 +++++++++++---------------- 2 files changed, 80 insertions(+), 92 deletions(-) diff --git a/plugins/common/cookie/cookie.go b/plugins/common/cookie/cookie.go index 10213f78d9b37..e452a50a4b0a9 100644 --- a/plugins/common/cookie/cookie.go +++ b/plugins/common/cookie/cookie.go @@ -1,12 +1,14 @@ package cookie import ( + "context" "fmt" "io" "io/ioutil" "net/http" "net/http/cookiejar" "strings" + "sync" "time" clockutil "github.com/benbjohnson/clock" @@ -26,9 +28,25 @@ type CookieAuthConfig struct { Renewal config.Duration `toml:"cookie_auth_renewal"` client *http.Client + wg sync.WaitGroup } func (c *CookieAuthConfig) Start(client *http.Client, log telegraf.Logger, clock clockutil.Clock) (err error) { + if err = c.initializeClient(client); err != nil { + return err + } + + // continual auth renewal if set + if c.Renewal > 0 { + ticker := clock.Ticker(time.Duration(c.Renewal)) + // this context is used in the tests only, it is to cancel the goroutine + go c.authRenewal(context.Background(), ticker, log) + } + + return nil +} + +func (c *CookieAuthConfig) initializeClient(client *http.Client) (err error) { c.client = client if c.Method == "" { @@ -40,23 +58,21 @@ func (c *CookieAuthConfig) Start(client *http.Client, log telegraf.Logger, clock return err } - if err = c.auth(); err != nil { - return err - } + return c.auth() +} - // continual auth renewal if set - if c.Renewal > 0 { - ticker := clock.Ticker(time.Duration(c.Renewal)) - go func() { - for range ticker.C { - if err := c.auth(); err != nil && log != nil { - log.Errorf("renewal failed for %q: %v", c.URL, err) - } +func (c *CookieAuthConfig) authRenewal(ctx context.Context, ticker *clockutil.Ticker, log telegraf.Logger) { + for { + select { + case <-ctx.Done(): + c.wg.Done() + return + case <-ticker.C: + if err := c.auth(); err != nil && log != nil { + log.Errorf("renewal failed for %q: %v", c.URL, err) } - }() + } } - - return nil } func (c *CookieAuthConfig) auth() error { diff --git a/plugins/common/cookie/cookie_test.go b/plugins/common/cookie/cookie_test.go index 036ca2b5bb5a7..99269c27cd339 100644 --- a/plugins/common/cookie/cookie_test.go +++ b/plugins/common/cookie/cookie_test.go @@ -1,6 +1,7 @@ -package cookie_test +package cookie import ( + "context" "fmt" "io/ioutil" "net/http" @@ -12,7 +13,6 @@ import ( clockutil "github.com/benbjohnson/clock" "github.com/google/go-cmp/cmp" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/plugins/common/cookie" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -118,44 +118,25 @@ func TestAuthConfig_Start(t *testing.T) { endpoint string } tests := []struct { - name string - fields fields - args args - wantErr error - assert func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) + name string + fields fields + args args + wantErr error + firstAuthCount int32 + lastAuthCount int32 + firstHTTPResponse int + lastHTTPResponse int }{ - { - name: "zero renewal does not renew", - args: args{ - renewal: 0, - endpoint: authEndpointNoCreds, - }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { - // should have Cookie Authed once - srv.checkAuthCount(t, 1) - srv.checkResp(t, http.StatusOK) - mock.Add(renewalCheck) - srv.checkAuthCount(t, 1) - srv.checkResp(t, http.StatusOK) - }, - }, { name: "success no creds, no body, default method", args: args{ renewal: renewal, endpoint: authEndpointNoCreds, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { - // should have Cookie Authed once - srv.checkAuthCount(t, 1) - // default method set - require.Equal(t, http.MethodPost, c.Method) - srv.checkResp(t, http.StatusOK) - mock.Add(renewalCheck) - // should have Cookie Authed at least twice more - srv.checkAuthCount(t, 3) - srv.checkResp(t, http.StatusOK) - }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, }, { name: "success with creds, no body", @@ -168,15 +149,10 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointWithBasicAuth, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { - // should have Cookie Authed once - srv.checkAuthCount(t, 1) - srv.checkResp(t, http.StatusOK) - mock.Add(renewalCheck) - // should have Cookie Authed at least twice more - srv.checkAuthCount(t, 3) - srv.checkResp(t, http.StatusOK) - }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, }, { name: "failure with bad creds", @@ -189,16 +165,11 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointWithBasicAuth, }, - wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { - // should have never Cookie Authed - srv.checkAuthCount(t, 0) - srv.checkResp(t, http.StatusForbidden) - mock.Add(renewalCheck) - // should have still never Cookie Authed - srv.checkAuthCount(t, 0) - srv.checkResp(t, http.StatusForbidden) - }, + wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), + firstAuthCount: 0, + lastAuthCount: 0, + firstHTTPResponse: http.StatusForbidden, + lastHTTPResponse: http.StatusForbidden, }, { name: "success with no creds, with good body", @@ -210,15 +181,10 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointWithBody, }, - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { - // should have Cookie Authed once - srv.checkAuthCount(t, 1) - srv.checkResp(t, http.StatusOK) - mock.Add(renewalCheck) - // should have Cookie Authed at least twice more - srv.checkAuthCount(t, 3) - srv.checkResp(t, http.StatusOK) - }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, }, { name: "failure with bad body", @@ -230,23 +196,18 @@ func TestAuthConfig_Start(t *testing.T) { renewal: renewal, endpoint: authEndpointWithBody, }, - wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), - assert: func(t *testing.T, c *cookie.CookieAuthConfig, srv fakeServer, mock *clockutil.Mock) { - // should have never Cookie Authed - srv.checkAuthCount(t, 0) - srv.checkResp(t, http.StatusForbidden) - mock.Add(renewalCheck) - // should have still never Cookie Authed - srv.checkAuthCount(t, 0) - srv.checkResp(t, http.StatusForbidden) - }, + wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), + firstAuthCount: 0, + lastAuthCount: 0, + firstHTTPResponse: http.StatusForbidden, + lastHTTPResponse: http.StatusForbidden, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { srv := newFakeServer(t) - c := &cookie.CookieAuthConfig{ + c := &CookieAuthConfig{ URL: srv.URL + tt.args.endpoint, Method: tt.fields.Method, Username: tt.fields.Username, @@ -254,17 +215,28 @@ func TestAuthConfig_Start(t *testing.T) { Body: tt.fields.Body, Renewal: config.Duration(tt.args.renewal), } - - mock := clockutil.NewMock() - if err := c.Start(srv.Client(), testutil.Logger{Name: "cookie_auth"}, mock); tt.wantErr != nil { + if err := c.initializeClient(srv.Client()); tt.wantErr != nil { require.EqualError(t, err, tt.wantErr.Error()) } else { require.NoError(t, err) } + mock := clockutil.NewMock() + ticker := mock.Ticker(time.Duration(c.Renewal)) + defer ticker.Stop() + + c.wg.Add(1) + ctx, cancel := context.WithCancel(context.Background()) + go c.authRenewal(ctx, ticker, testutil.Logger{Name: "cookie_auth"}) + + srv.checkAuthCount(t, tt.firstAuthCount) + srv.checkResp(t, tt.firstHTTPResponse) + mock.Add(renewalCheck) + // Ensure that the auth renewal goroutine has completed + cancel() + c.wg.Wait() + srv.checkAuthCount(t, tt.lastAuthCount) + srv.checkResp(t, tt.lastHTTPResponse) - if tt.assert != nil { - tt.assert(t, c, srv, mock) - } srv.Close() }) } From 514a942a6ceb0a6972582a62e157aed6849eb05d Mon Sep 17 00:00:00 2001 From: Jake McCrary Date: Thu, 2 Sep 2021 09:56:45 -0500 Subject: [PATCH 587/761] Make prometheus serializer update timestamps and expiration time as new data arrives (#9139) --- plugins/serializers/prometheus/README.md | 6 +- plugins/serializers/prometheus/collection.go | 6 + .../serializers/prometheus/collection_test.go | 417 ++++++++++++++++++ 3 files changed, 428 insertions(+), 1 deletion(-) diff --git a/plugins/serializers/prometheus/README.md b/plugins/serializers/prometheus/README.md index 19c869ffbccb3..446def0b46d77 100644 --- a/plugins/serializers/prometheus/README.md +++ b/plugins/serializers/prometheus/README.md @@ -8,7 +8,11 @@ use the `metric_version = 2` option in order to properly round trip metrics. not be correct if the metric spans multiple batches. This issue can be somewhat, but not fully, mitigated by using outputs that support writing in "batch format". When using histogram and summary types, it is recommended to -use only the `prometheus_client` output. +use only the `prometheus_client` output. Histogram and Summary types +also update their expiration time based on the most recently received data. +If incoming metrics stop updating specific buckets or quantiles but continue +reporting others every bucket/quantile will continue to exist. + ### Configuration diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go index ed442e23c85fd..caa8a7334d91d 100644 --- a/plugins/serializers/prometheus/collection.go +++ b/plugins/serializers/prometheus/collection.go @@ -241,6 +241,9 @@ func (c *Collection) Add(metric telegraf.Metric, now time.Time) { AddTime: now, Histogram: &Histogram{}, } + } else { + m.Time = metric.Time() + m.AddTime = now } switch { case strings.HasSuffix(field.Key, "_bucket"): @@ -289,6 +292,9 @@ func (c *Collection) Add(metric telegraf.Metric, now time.Time) { AddTime: now, Summary: &Summary{}, } + } else { + m.Time = metric.Time() + m.AddTime = now } switch { case strings.HasSuffix(field.Key, "_sum"): diff --git a/plugins/serializers/prometheus/collection_test.go b/plugins/serializers/prometheus/collection_test.go index d2c5f5d098162..deb400ba2d899 100644 --- a/plugins/serializers/prometheus/collection_test.go +++ b/plugins/serializers/prometheus/collection_test.go @@ -302,6 +302,117 @@ func TestCollectionExpire(t *testing.T) { }, }, }, + { + name: "entire histogram expires", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, + }, + expected: []*dto.MetricFamily{}, + }, + { + name: "histogram does not expire because of addtime from bucket", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(15, 0), // More recent addtime causes entire metric to stay valid + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("http_request_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2), + SampleSum: proto.Float64(10.0), + Bucket: []*dto.Bucket{ + { + UpperBound: proto.Float64(math.Inf(1)), + CumulativeCount: proto.Uint64(1), + }, + { + UpperBound: proto.Float64(0.05), + CumulativeCount: proto.Uint64(1), + }, + }, + }, + }, + }, + }, + }, + }, { name: "summary quantile updates", now: time.Unix(0, 0), @@ -379,6 +490,106 @@ func TestCollectionExpire(t *testing.T) { }, }, }, + { + name: "Entire summary expires", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, + }, + expected: []*dto.MetricFamily{}, + }, + { + name: "summary does not expire because of quantile addtime", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.5"}, + map[string]interface{}{ + "rpc_duration_seconds": 10.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(15, 0), // Recent addtime keeps entire metric around + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("rpc_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Summary: &dto.Summary{ + SampleSum: proto.Float64(1), + SampleCount: proto.Uint64(1), + Quantile: []*dto.Quantile{ + { + Quantile: proto.Float64(0.5), + Value: proto.Float64(10), + }, + { + Quantile: proto.Float64(0.01), + Value: proto.Float64(1), + }, + }, + }, + }, + }, + }, + }, + }, { name: "expire based on add time", now: time.Unix(20, 0), @@ -425,3 +636,209 @@ func TestCollectionExpire(t *testing.T) { }) } } + +func TestExportTimestamps(t *testing.T) { + tests := []struct { + name string + now time.Time + age time.Duration + input []Input + expected []*dto.MetricFamily + }{ + { + name: "histogram bucket updates", + now: time.Unix(23, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(15, 0), + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(15, 0), + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(15, 0), + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + // Next interval + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 20.0, + "http_request_duration_seconds_count": 4, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("http_request_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + TimestampMs: proto.Int64(time.Unix(20, 0).UnixNano() / int64(time.Millisecond)), + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(4), + SampleSum: proto.Float64(20.0), + Bucket: []*dto.Bucket{ + { + UpperBound: proto.Float64(0.05), + CumulativeCount: proto.Uint64(2), + }, + { + UpperBound: proto.Float64(math.Inf(1)), + CumulativeCount: proto.Uint64(2), + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "summary quantile updates", + now: time.Unix(23, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(15, 0), + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(15, 0), + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, { + // Updated Summary + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 2.0, + "rpc_duration_seconds_count": 2, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 2.0, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("rpc_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + TimestampMs: proto.Int64(time.Unix(20, 0).UnixNano() / int64(time.Millisecond)), + Summary: &dto.Summary{ + SampleCount: proto.Uint64(2), + SampleSum: proto.Float64(2.0), + Quantile: []*dto.Quantile{ + { + Quantile: proto.Float64(0.01), + Value: proto.Float64(2), + }, + }, + }, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := NewCollection(FormatConfig{TimestampExport: ExportTimestamp}) + for _, item := range tt.input { + c.Add(item.metric, item.addtime) + } + c.Expire(tt.now, tt.age) + + actual := c.GetProto() + + require.Equal(t, tt.expected, actual) + }) + } +} From 04c3e9bb24feb36f24e8da75f6b764e44d6e58cf Mon Sep 17 00:00:00 2001 From: Matteo Concas Date: Thu, 2 Sep 2021 16:57:17 +0200 Subject: [PATCH 588/761] feat: Add rocm_smi input to monitor AMD GPUs (#9602) --- etc/telegraf.conf | 15 +- plugins/inputs/all/all.go | 1 + plugins/inputs/amd_rocm_smi/README.md | 58 ++++ plugins/inputs/amd_rocm_smi/amd_rocm_smi.go | 294 ++++++++++++++++++ .../inputs/amd_rocm_smi/amd_rocm_smi_test.go | 90 ++++++ .../amd_rocm_smi/testdata/vega-10-XT.json | 77 +++++ .../testdata/vega-20-WKS-GL-XE.json | 165 ++++++++++ 7 files changed, 696 insertions(+), 4 deletions(-) create mode 100644 plugins/inputs/amd_rocm_smi/README.md create mode 100644 plugins/inputs/amd_rocm_smi/amd_rocm_smi.go create mode 100644 plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go create mode 100644 plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json create mode 100644 plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json diff --git a/etc/telegraf.conf b/etc/telegraf.conf index c49761c947bc4..43b1f8f3ade45 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1870,7 +1870,7 @@ # ## Print Warp 10 error body # # print_error_body = false # -# ## Max string error size +# ## Max string error size # # max_string_error_size = 511 # # ## Optional TLS Config @@ -4343,19 +4343,19 @@ # ## List of metrics collected on above servers # ## Each metric consists in a name, a jmx path and either # ## a pass or drop slice attribute. -# ## This collect all heap memory usage metrics. +# ## This collect all heap memory usage metrics. # [[inputs.jolokia.metrics]] # name = "heap_memory_usage" # mbean = "java.lang:type=Memory" # attribute = "HeapMemoryUsage" # -# ## This collect thread counts metrics. +# ## This collect thread counts metrics. # [[inputs.jolokia.metrics]] # name = "thread_count" # mbean = "java.lang:type=Threading" # attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" # -# ## This collect number of class loaded/unloaded counts metrics. +# ## This collect number of class loaded/unloaded counts metrics. # [[inputs.jolokia.metrics]] # name = "class_count" # mbean = "java.lang:type=ClassLoading" @@ -5785,6 +5785,13 @@ # # Specify a list of one or more riak http servers # servers = ["http://localhost:8098"] +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" # # Read API usage and limits for a Salesforce organisation # [[inputs.salesforce]] diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 350a8cca08cdb..781e04e60928b 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -5,6 +5,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/activemq" _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" _ "github.com/influxdata/telegraf/plugins/inputs/aliyuncms" + _ "github.com/influxdata/telegraf/plugins/inputs/amd_rocm_smi" _ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/apache" _ "github.com/influxdata/telegraf/plugins/inputs/apcupsd" diff --git a/plugins/inputs/amd_rocm_smi/README.md b/plugins/inputs/amd_rocm_smi/README.md new file mode 100644 index 0000000000000..89a5b063065d7 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/README.md @@ -0,0 +1,58 @@ +# ROCm System Management Interface (SMI) Input Plugin + +This plugin uses a query on the [`rocm-smi`](https://github.com/RadeonOpenCompute/rocm_smi_lib/tree/master/python_smi_tools) binary to pull GPU stats including memory and GPU usage, temperatures and other. + +### Configuration + +```toml +# Pulls statistics from nvidia GPUs attached to the host +[[inputs.amd_rocm_smi]] + ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath + # bin_path = "/opt/rocm/bin/rocm-smi" + + ## Optional: timeout for GPU polling + # timeout = "5s" +``` + +### Metrics +- measurement: `amd_rocm_smi` + - tags + - `name` (entry name assigned by rocm-smi executable) + - `gpu_id` (id of the GPU according to rocm-smi) + - `gpu_unique_id` (unique id of the GPU) + + - fields + - `driver_version` (integer) + - `fan_speed`(integer) + - `memory_total`(integer B) + - `memory_used`(integer B) + - `memory_free`(integer B) + - `temperature_sensor_edge` (float, Celsius) + - `temperature_sensor_junction` (float, Celsius) + - `temperature_sensor_memory` (float, Celsius) + - `utilization_gpu` (integer, percentage) + - `utilization_memory` (integer, percentage) + - `clocks_current_sm` (integer, Mhz) + - `clocks_current_memory` (integer, Mhz) + - `power_draw` (float, Watt) + +### Troubleshooting +Check the full output by running `rocm-smi` binary manually. + +Linux: +```sh +rocm-smi rocm-smi -o -l -m -M -g -c -t -u -i -f -p -P -s -S -v --showreplaycount --showpids --showdriverversion --showmemvendor --showfwinfo --showproductname --showserial --showuniqueid --showbus --showpendingpages --showpagesinfo --showretiredpages --showunreservablepages --showmemuse --showvoltage --showtopo --showtopoweight --showtopohops --showtopotype --showtoponuma --showmeminfo all --json +``` +Please include the output of this command if opening a GitHub issue, together with ROCm version. +### Example Output +``` +amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=28,temperature_sensor_junction=29,temperature_sensor_memory=92,utilization_gpu=0i 1630572551000000000 +amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=29,temperature_sensor_junction=30,temperature_sensor_memory=91,utilization_gpu=0i 1630572701000000000 +amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=29,temperature_sensor_junction=29,temperature_sensor_memory=92,utilization_gpu=0i 1630572749000000000 +``` +### Limitations and notices +Please notice that this plugin has been developed and tested on a limited number of versions and small set of GPUs. Currently the latest ROCm version tested is 4.3.0. +Notice that depending on the device and driver versions the amount of information provided by `rocm-smi` can vary so that some fields would start/stop appearing in the metrics upon updates. +The `rocm-smi` JSON output is not perfectly homogeneous and is possibly changing in the future, hence parsing and unmarshaling can start failing upon updating ROCm. + +Inspired by the current state of the art of the `nvidia-smi` plugin. diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go new file mode 100644 index 0000000000000..7fdd32f466b73 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go @@ -0,0 +1,294 @@ +package amd_rocm_smi + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const measurement = "amd_rocm_smi" + +type ROCmSMI struct { + BinPath string + Timeout config.Duration +} + +// Description returns the description of the ROCmSMI plugin +func (rsmi *ROCmSMI) Description() string { + return "Query statistics from AMD Graphics cards using rocm-smi binary" +} + +var ROCmSMIConfig = ` +## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# bin_path = "/opt/rocm/bin/rocm-smi" + +## Optional: timeout for GPU polling +# timeout = "5s" +` + +// SampleConfig returns the sample configuration for the ROCmSMI plugin +func (rsmi *ROCmSMI) SampleConfig() string { + return ROCmSMIConfig +} + +// Gather implements the telegraf interface +func (rsmi *ROCmSMI) Gather(acc telegraf.Accumulator) error { + if _, err := os.Stat(rsmi.BinPath); os.IsNotExist(err) { + return fmt.Errorf("rocm-smi binary not found in path %s, cannot query GPUs statistics", rsmi.BinPath) + } + + data, err := rsmi.pollROCmSMI() + if err != nil { + return err + } + + err = gatherROCmSMI(data, acc) + if err != nil { + return err + } + + return nil +} + +func init() { + inputs.Add("amd_rocm_smi", func() telegraf.Input { + return &ROCmSMI{ + BinPath: "/opt/rocm/bin/rocm-smi", + Timeout: config.Duration(5 * time.Second), + } + }) +} + +func (rsmi *ROCmSMI) pollROCmSMI() ([]byte, error) { + // Construct and execute metrics query, there currently exist (ROCm v4.3.x) a "-a" option + // that does not provide all the information, so each needed parameter is set manually + cmd := exec.Command(rsmi.BinPath, + "-o", + "-l", + "-m", + "-M", + "-g", + "-c", + "-t", + "-u", + "-i", + "-f", + "-p", + "-P", + "-s", + "-S", + "-v", + "--showreplaycount", + "--showpids", + "--showdriverversion", + "--showmemvendor", + "--showfwinfo", + "--showproductname", + "--showserial", + "--showuniqueid", + "--showbus", + "--showpendingpages", + "--showpagesinfo", + "--showmeminfo", + "all", + "--showretiredpages", + "--showunreservablepages", + "--showmemuse", + "--showvoltage", + "--showtopo", + "--showtopoweight", + "--showtopohops", + "--showtopotype", + "--showtoponuma", + "--json") + + ret, _ := internal.StdOutputTimeout(cmd, + time.Duration(rsmi.Timeout)) + return ret, nil +} + +func gatherROCmSMI(ret []byte, acc telegraf.Accumulator) error { + var gpus map[string]GPU + var sys map[string]sysInfo + + err1 := json.Unmarshal(ret, &gpus) + if err1 != nil { + return err1 + } + + err2 := json.Unmarshal(ret, &sys) + if err2 != nil { + return err2 + } + + metrics := genTagsFields(gpus, sys) + for _, metric := range metrics { + acc.AddFields(measurement, metric.fields, metric.tags) + } + + return nil +} + +type metric struct { + tags map[string]string + fields map[string]interface{} +} + +func genTagsFields(gpus map[string]GPU, system map[string]sysInfo) []metric { + metrics := []metric{} + for cardID, payload := range gpus { + if strings.Contains(cardID, "card") { + tags := map[string]string{ + "name": cardID, + } + fields := map[string]interface{}{} + + totVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalMemory, 10, 64) + usdVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalUsedMemory, 10, 64) + strFree := strconv.FormatInt(totVRAM-usdVRAM, 10) + + setTagIfUsed(tags, "gpu_id", payload.GpuID) + setTagIfUsed(tags, "gpu_unique_id", payload.GpuUniqueID) + + setIfUsed("int", fields, "driver_version", strings.Replace(system["system"].DriverVersion, ".", "", -1)) + setIfUsed("int", fields, "fan_speed", payload.GpuFanSpeedPercentage) + setIfUsed("int64", fields, "memory_total", payload.GpuVRAMTotalMemory) + setIfUsed("int64", fields, "memory_used", payload.GpuVRAMTotalUsedMemory) + setIfUsed("int64", fields, "memory_free", strFree) + setIfUsed("float", fields, "temperature_sensor_edge", payload.GpuTemperatureSensorEdge) + setIfUsed("float", fields, "temperature_sensor_junction", payload.GpuTemperatureSensorJunction) + setIfUsed("float", fields, "temperature_sensor_memory", payload.GpuTemperatureSensorMemory) + setIfUsed("int", fields, "utilization_gpu", payload.GpuUsePercentage) + setIfUsed("int", fields, "utilization_memory", payload.GpuMemoryUsePercentage) + setIfUsed("int", fields, "clocks_current_sm", strings.Trim(payload.GpuSclkClockSpeed, "(Mhz)")) + setIfUsed("int", fields, "clocks_current_memory", strings.Trim(payload.GpuMclkClockSpeed, "(Mhz)")) + setIfUsed("float", fields, "power_draw", payload.GpuAveragePower) + + metrics = append(metrics, metric{tags, fields}) + } + } + return metrics +} + +func setTagIfUsed(m map[string]string, k, v string) { + if v != "" { + m[k] = v + } +} + +func setIfUsed(t string, m map[string]interface{}, k, v string) { + vals := strings.Fields(v) + if len(vals) < 1 { + return + } + + val := vals[0] + + switch t { + case "float": + if val != "" { + f, err := strconv.ParseFloat(val, 64) + if err == nil { + m[k] = f + } + } + case "int": + if val != "" { + i, err := strconv.Atoi(val) + if err == nil { + m[k] = i + } + } + case "int64": + if val != "" { + i, err := strconv.ParseInt(val, 10, 64) + if err == nil { + m[k] = i + } + } + case "str": + if val != "" { + m[k] = val + } + } +} + +type sysInfo struct { + DriverVersion string `json:"Driver version"` +} + +type GPU struct { + GpuID string `json:"GPU ID"` + GpuUniqueID string `json:"Unique ID"` + GpuVBIOSVersion string `json:"VBIOS version"` + GpuTemperatureSensorEdge string `json:"Temperature (Sensor edge) (C)"` + GpuTemperatureSensorJunction string `json:"Temperature (Sensor junction) (C)"` + GpuTemperatureSensorMemory string `json:"Temperature (Sensor memory) (C)"` + GpuDcefClkClockSpeed string `json:"dcefclk clock speed"` + GpuDcefClkClockLevel string `json:"dcefclk clock level"` + GpuFclkClockSpeed string `json:"fclk clock speed"` + GpuFclkClockLevel string `json:"fclk clock level"` + GpuMclkClockSpeed string `json:"mclk clock speed:"` + GpuMclkClockLevel string `json:"mclk clock level:"` + GpuSclkClockSpeed string `json:"sclk clock speed:"` + GpuSclkClockLevel string `json:"sclk clock level:"` + GpuSocclkClockSpeed string `json:"socclk clock speed"` + GpuSocclkClockLevel string `json:"socclk clock level"` + GpuPcieClock string `json:"pcie clock level"` + GpuFanSpeedLevel string `json:"Fan speed (level)"` + GpuFanSpeedPercentage string `json:"Fan speed (%)"` + GpuFanRPM string `json:"Fan RPM"` + GpuPerformanceLevel string `json:"Performance Level"` + GpuOverdrive string `json:"GPU OverDrive value (%)"` + GpuMaxPower string `json:"Max Graphics Package Power (W)"` + GpuAveragePower string `json:"Average Graphics Package Power (W)"` + GpuUsePercentage string `json:"GPU use (%)"` + GpuMemoryUsePercentage string `json:"GPU memory use (%)"` + GpuMemoryVendor string `json:"GPU memory vendor"` + GpuPCIeReplay string `json:"PCIe Replay Count"` + GpuSerialNumber string `json:"Serial Number"` + GpuVoltagemV string `json:"Voltage (mV)"` + GpuPCIBus string `json:"PCI Bus"` + GpuASDDirmware string `json:"ASD firmware version"` + GpuCEFirmware string `json:"CE firmware version"` + GpuDMCUFirmware string `json:"DMCU firmware version"` + GpuMCFirmware string `json:"MC firmware version"` + GpuMEFirmware string `json:"ME firmware version"` + GpuMECFirmware string `json:"MEC firmware version"` + GpuMEC2Firmware string `json:"MEC2 firmware version"` + GpuPFPFirmware string `json:"PFP firmware version"` + GpuRLCFirmware string `json:"RLC firmware version"` + GpuRLCSRLC string `json:"RLC SRLC firmware version"` + GpuRLCSRLG string `json:"RLC SRLG firmware version"` + GpuRLCSRLS string `json:"RLC SRLS firmware version"` + GpuSDMAFirmware string `json:"SDMA firmware version"` + GpuSDMA2Firmware string `json:"SDMA2 firmware version"` + GpuSMCFirmware string `json:"SMC firmware version"` + GpuSOSFirmware string `json:"SOS firmware version"` + GpuTARAS string `json:"TA RAS firmware version"` + GpuTAXGMI string `json:"TA XGMI firmware version"` + GpuUVDFirmware string `json:"UVD firmware version"` + GpuVCEFirmware string `json:"VCE firmware version"` + GpuVCNFirmware string `json:"VCN firmware version"` + GpuCardSeries string `json:"Card series"` + GpuCardModel string `json:"Card model"` + GpuCardVendor string `json:"Card vendor"` + GpuCardSKU string `json:"Card SKU"` + GpuNUMANode string `json:"(Topology) Numa Node"` + GpuNUMAAffinity string `json:"(Topology) Numa Affinity"` + GpuVisVRAMTotalMemory string `json:"VIS_VRAM Total Memory (B)"` + GpuVisVRAMTotalUsedMemory string `json:"VIS_VRAM Total Used Memory (B)"` + GpuVRAMTotalMemory string `json:"VRAM Total Memory (B)"` + GpuVRAMTotalUsedMemory string `json:"VRAM Total Used Memory (B)"` + GpuGTTTotalMemory string `json:"GTT Total Memory (B)"` + GpuGTTTotalUsedMemory string `json:"GTT Total Used Memory (B)"` +} diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go new file mode 100644 index 0000000000000..7893760bdf952 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go @@ -0,0 +1,90 @@ +package amd_rocm_smi + +import ( + "io/ioutil" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGatherValidJSON(t *testing.T) { + tests := []struct { + name string + filename string + expected []telegraf.Metric + }{ + { + name: "Vega 10 XT", + filename: "vega-10-XT.json", + expected: []telegraf.Metric{ + testutil.MustMetric( + "amd_rocm_smi", + map[string]string{ + "gpu_id": "0x6861", + "gpu_unique_id": "0x2150e7d042a1124", + "name": "card0", + }, + map[string]interface{}{ + "driver_version": 5925, + "fan_speed": 13, + "memory_total": int64(17163091968), + "memory_used": int64(17776640), + "memory_free": int64(17145315328), + "temperature_sensor_edge": 39.0, + "temperature_sensor_junction": 40.0, + "temperature_sensor_memory": 92.0, + "utilization_gpu": 0, + "clocks_current_sm": 1269, + "clocks_current_memory": 167, + "power_draw": 15.0, + }, + time.Unix(0, 0)), + }, + }, + { + name: "Vega 20 WKS GL-XE [Radeon Pro VII]", + filename: "vega-20-WKS-GL-XE.json", + expected: []telegraf.Metric{ + testutil.MustMetric( + "amd_rocm_smi", + map[string]string{ + "gpu_id": "0x66a1", + "gpu_unique_id": "0x2f048617326b1ea", + "name": "card0", + }, + map[string]interface{}{ + "driver_version": 5917, + "fan_speed": 0, + "memory_total": int64(34342961152), + "memory_used": int64(10850304), + "memory_free": int64(34332110848), + "temperature_sensor_edge": 36.0, + "temperature_sensor_junction": 38.0, + "temperature_sensor_memory": 35.0, + "utilization_gpu": 0, + "utilization_memory": 0, + "clocks_current_sm": 1725, + "clocks_current_memory": 1000, + "power_draw": 26.0, + }, + time.Unix(0, 0)), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename)) + require.NoError(t, err) + + err = gatherROCmSMI(octets, &acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} diff --git a/plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json b/plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json new file mode 100644 index 0000000000000..c4d51f5253a51 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json @@ -0,0 +1,77 @@ +{ + "card0": { + "GPU ID": "0x6861", + "Unique ID": "0x2150e7d042a1124", + "VBIOS version": "113-D0510100-106", + "Temperature (Sensor edge) (C)": "39.0", + "Temperature (Sensor junction) (C)": "40.0", + "Temperature (Sensor memory) (C)": "92.0", + "dcefclk clock speed:": "(600Mhz)", + "dcefclk clock level:": "0", + "mclk clock speed:": "(167Mhz)", + "mclk clock level:": "0", + "sclk clock speed:": "(1269Mhz)", + "sclk clock level:": "3", + "socclk clock speed:": "(960Mhz)", + "socclk clock level:": "3", + "pcie clock level": "1 (8.0GT/s x16)", + "sclk clock level": "3 (1269Mhz)", + "Fan speed (level)": "33", + "Fan speed (%)": "13", + "Fan RPM": "682", + "Performance Level": "auto", + "GPU OverDrive value (%)": "0", + "GPU Memory OverDrive value (%)": "0", + "Max Graphics Package Power (W)": "170.0", + "Average Graphics Package Power (W)": "15.0", + "0": "8.0GT/s x16", + "1": "8.0GT/s x16 *", + "2": "847Mhz", + "3": "960Mhz *", + "4": "1028Mhz", + "5": "1107Mhz", + "6": "1440Mhz", + "7": "1500Mhz", + "GPU use (%)": "0", + "GPU memory vendor": "samsung", + "PCIe Replay Count": "0", + "Serial Number": "N/A", + "Voltage (mV)": "906", + "PCI Bus": "0000:04:00.0", + "VRAM Total Memory (B)": "17163091968", + "VRAM Total Used Memory (B)": "17776640", + "VIS_VRAM Total Memory (B)": "268435456", + "VIS_VRAM Total Used Memory (B)": "13557760", + "GTT Total Memory (B)": "17163091968", + "GTT Total Used Memory (B)": "25608192", + "ASD firmware version": "553648152", + "CE firmware version": "79", + "DMCU firmware version": "0", + "MC firmware version": "0", + "ME firmware version": "163", + "MEC firmware version": "432", + "MEC2 firmware version": "432", + "PFP firmware version": "186", + "RLC firmware version": "93", + "RLC SRLC firmware version": "0", + "RLC SRLG firmware version": "0", + "RLC SRLS firmware version": "0", + "SDMA firmware version": "430", + "SDMA2 firmware version": "430", + "SMC firmware version": "00.28.54.00", + "SOS firmware version": "0x0008015d", + "TA RAS firmware version": "00.00.00.00", + "TA XGMI firmware version": "00.00.00.00", + "UVD firmware version": "0x422b1100", + "VCE firmware version": "0x39060400", + "VCN firmware version": "0x00000000", + "Card model": "0xc1e", + "Card vendor": "Advanced Micro Devices, Inc. [AMD/ATI]", + "Card SKU": "D05101", + "(Topology) Numa Node": "0", + "(Topology) Numa Affinity": "0" + }, + "system": { + "Driver version": "5.9.25" + } +} \ No newline at end of file diff --git a/plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json b/plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json new file mode 100644 index 0000000000000..771565a607bd5 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json @@ -0,0 +1,165 @@ +{ + "card0": { + "GPU ID": "0x66a1", + "Unique ID": "0x2f048617326b1ea", + "VBIOS version": "113-D1631700-111", + "Temperature (Sensor edge) (C)": "36.0", + "Temperature (Sensor junction) (C)": "38.0", + "Temperature (Sensor memory) (C)": "35.0", + "dcefclk clock speed:": "(357Mhz)", + "dcefclk clock level:": "0", + "fclk clock speed:": "(1080Mhz)", + "fclk clock level:": "6", + "mclk clock speed:": "(1000Mhz)", + "mclk clock level:": "2", + "sclk clock speed:": "(1725Mhz)", + "sclk clock level:": "8", + "socclk clock speed:": "(971Mhz)", + "socclk clock level:": "7", + "pcie clock level": "1 (16.0GT/s x16)", + "sclk clock level": "8 (1725Mhz)", + "Fan speed (level)": "0", + "Fan speed (%)": "0", + "Fan RPM": "0", + "Performance Level": "high", + "GPU OverDrive value (%)": "0", + "Max Graphics Package Power (W)": "225.0", + "Average Graphics Package Power (W)": "26.0", + "0": "2.5GT/s x16", + "1": "16.0GT/s x16 *", + "2": "566Mhz", + "3": "618Mhz", + "4": "680Mhz", + "5": "755Mhz", + "6": "850Mhz", + "7": "971Mhz *", + "8": "1725Mhz *", + "GPU use (%)": "0", + "GPU memory use (%)": "0", + "GPU memory vendor": "samsung", + "PCIe Replay Count": "0", + "Serial Number": "692024000810", + "Voltage (mV)": "1000", + "PCI Bus": "0000:63:00.0", + "VRAM Total Memory (B)": "34342961152", + "VRAM Total Used Memory (B)": "10850304", + "VIS_VRAM Total Memory (B)": "34342961152", + "VIS_VRAM Total Used Memory (B)": "10850304", + "GTT Total Memory (B)": "54974742528", + "GTT Total Used Memory (B)": "11591680", + "ASD firmware version": "553648199", + "CE firmware version": "79", + "DMCU firmware version": "0", + "MC firmware version": "0", + "ME firmware version": "164", + "MEC firmware version": "448", + "MEC2 firmware version": "448", + "PFP firmware version": "188", + "RLC firmware version": "50", + "RLC SRLC firmware version": "1", + "RLC SRLG firmware version": "1", + "RLC SRLS firmware version": "1", + "SDMA firmware version": "144", + "SDMA2 firmware version": "144", + "SMC firmware version": "00.40.59.00", + "SOS firmware version": "0x00080b67", + "TA RAS firmware version": "27.00.01.36", + "TA XGMI firmware version": "32.00.00.02", + "UVD firmware version": "0x42002b13", + "VCE firmware version": "0x39060400", + "VCN firmware version": "0x00000000", + "Card series": "Radeon Instinct MI50 32GB", + "Card model": "0x834", + "Card vendor": "Advanced Micro Devices, Inc. [AMD/ATI]", + "Card SKU": "D16317", + "(Topology) Numa Node": "0", + "(Topology) Numa Affinity": "0" + }, + "system": { + "Driver version": "5.9.17", + "(Topology) Weight between DRM devices 0 and 1": "40", + "(Topology) Weight between DRM devices 0 and 2": "40", + "(Topology) Weight between DRM devices 0 and 3": "40", + "(Topology) Weight between DRM devices 0 and 4": "72", + "(Topology) Weight between DRM devices 0 and 5": "72", + "(Topology) Weight between DRM devices 0 and 6": "72", + "(Topology) Weight between DRM devices 0 and 7": "72", + "(Topology) Weight between DRM devices 1 and 2": "40", + "(Topology) Weight between DRM devices 1 and 3": "40", + "(Topology) Weight between DRM devices 1 and 4": "72", + "(Topology) Weight between DRM devices 1 and 5": "72", + "(Topology) Weight between DRM devices 1 and 6": "72", + "(Topology) Weight between DRM devices 1 and 7": "72", + "(Topology) Weight between DRM devices 2 and 3": "40", + "(Topology) Weight between DRM devices 2 and 4": "72", + "(Topology) Weight between DRM devices 2 and 5": "72", + "(Topology) Weight between DRM devices 2 and 6": "72", + "(Topology) Weight between DRM devices 2 and 7": "72", + "(Topology) Weight between DRM devices 3 and 4": "72", + "(Topology) Weight between DRM devices 3 and 5": "72", + "(Topology) Weight between DRM devices 3 and 6": "72", + "(Topology) Weight between DRM devices 3 and 7": "72", + "(Topology) Weight between DRM devices 4 and 5": "40", + "(Topology) Weight between DRM devices 4 and 6": "40", + "(Topology) Weight between DRM devices 4 and 7": "40", + "(Topology) Weight between DRM devices 5 and 6": "40", + "(Topology) Weight between DRM devices 5 and 7": "40", + "(Topology) Weight between DRM devices 6 and 7": "40", + "(Topology) Hops between DRM devices 0 and 1": "2", + "(Topology) Hops between DRM devices 0 and 2": "2", + "(Topology) Hops between DRM devices 0 and 3": "2", + "(Topology) Hops between DRM devices 0 and 4": "3", + "(Topology) Hops between DRM devices 0 and 5": "3", + "(Topology) Hops between DRM devices 0 and 6": "3", + "(Topology) Hops between DRM devices 0 and 7": "3", + "(Topology) Hops between DRM devices 1 and 2": "2", + "(Topology) Hops between DRM devices 1 and 3": "2", + "(Topology) Hops between DRM devices 1 and 4": "3", + "(Topology) Hops between DRM devices 1 and 5": "3", + "(Topology) Hops between DRM devices 1 and 6": "3", + "(Topology) Hops between DRM devices 1 and 7": "3", + "(Topology) Hops between DRM devices 2 and 3": "2", + "(Topology) Hops between DRM devices 2 and 4": "3", + "(Topology) Hops between DRM devices 2 and 5": "3", + "(Topology) Hops between DRM devices 2 and 6": "3", + "(Topology) Hops between DRM devices 2 and 7": "3", + "(Topology) Hops between DRM devices 3 and 4": "3", + "(Topology) Hops between DRM devices 3 and 5": "3", + "(Topology) Hops between DRM devices 3 and 6": "3", + "(Topology) Hops between DRM devices 3 and 7": "3", + "(Topology) Hops between DRM devices 4 and 5": "2", + "(Topology) Hops between DRM devices 4 and 6": "2", + "(Topology) Hops between DRM devices 4 and 7": "2", + "(Topology) Hops between DRM devices 5 and 6": "2", + "(Topology) Hops between DRM devices 5 and 7": "2", + "(Topology) Hops between DRM devices 6 and 7": "2", + "(Topology) Link type between DRM devices 0 and 1": "PCIE", + "(Topology) Link type between DRM devices 0 and 2": "PCIE", + "(Topology) Link type between DRM devices 0 and 3": "PCIE", + "(Topology) Link type between DRM devices 0 and 4": "PCIE", + "(Topology) Link type between DRM devices 0 and 5": "PCIE", + "(Topology) Link type between DRM devices 0 and 6": "PCIE", + "(Topology) Link type between DRM devices 0 and 7": "PCIE", + "(Topology) Link type between DRM devices 1 and 2": "PCIE", + "(Topology) Link type between DRM devices 1 and 3": "PCIE", + "(Topology) Link type between DRM devices 1 and 4": "PCIE", + "(Topology) Link type between DRM devices 1 and 5": "PCIE", + "(Topology) Link type between DRM devices 1 and 6": "PCIE", + "(Topology) Link type between DRM devices 1 and 7": "PCIE", + "(Topology) Link type between DRM devices 2 and 3": "PCIE", + "(Topology) Link type between DRM devices 2 and 4": "PCIE", + "(Topology) Link type between DRM devices 2 and 5": "PCIE", + "(Topology) Link type between DRM devices 2 and 6": "PCIE", + "(Topology) Link type between DRM devices 2 and 7": "PCIE", + "(Topology) Link type between DRM devices 3 and 4": "PCIE", + "(Topology) Link type between DRM devices 3 and 5": "PCIE", + "(Topology) Link type between DRM devices 3 and 6": "PCIE", + "(Topology) Link type between DRM devices 3 and 7": "PCIE", + "(Topology) Link type between DRM devices 4 and 5": "PCIE", + "(Topology) Link type between DRM devices 4 and 6": "PCIE", + "(Topology) Link type between DRM devices 4 and 7": "PCIE", + "(Topology) Link type between DRM devices 5 and 6": "PCIE", + "(Topology) Link type between DRM devices 5 and 7": "PCIE", + "(Topology) Link type between DRM devices 6 and 7": "PCIE" + } +} \ No newline at end of file From a86f5997b93b09feaaf9d9697a99445073aa4eed Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 2 Sep 2021 08:08:24 -0700 Subject: [PATCH 589/761] fix: migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 (#9699) --- docs/LICENSE_OF_DEPENDENCIES.md | 2 +- go.mod | 2 +- go.sum | 4 ++-- plugins/inputs/dcos/client.go | 4 ++-- plugins/inputs/dcos/client_test.go | 2 +- plugins/inputs/dcos/dcos.go | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 1ec09fe87f486..a0c62d7f47767 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -60,7 +60,6 @@ following works: - github.com/davecgh/go-spew [ISC License](https://github.com/davecgh/go-spew/blob/master/LICENSE) - github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt) - github.com/devigned/tab [MIT License](https://github.com/devigned/tab/blob/master/LICENSE) -- github.com/dgrijalva/jwt-go [MIT License](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) - github.com/dimchansky/utfbom [Apache License 2.0](https://github.com/dimchansky/utfbom/blob/master/LICENSE) - github.com/docker/distribution [Apache License 2.0](https://github.com/docker/distribution/blob/master/LICENSE) - github.com/docker/docker [Apache License 2.0](https://github.com/docker/docker/blob/master/LICENSE) @@ -86,6 +85,7 @@ following works: - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) - github.com/gogo/googleapis [Apache License 2.0](https://github.com/gogo/googleapis/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang-jwt/jwt [MIT License](https://github.com/golang-jwt/jwt/blob/master/LICENSE) - github.com/golang-sql/civil [Apache License 2.0](https://github.com/golang-sql/civil/blob/master/LICENSE) - github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE) - github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE) diff --git a/go.mod b/go.mod index ff441b60264d1..1d819d87bedae 100644 --- a/go.mod +++ b/go.mod @@ -79,7 +79,6 @@ require ( github.com/denisenkom/go-mssqldb v0.10.0 github.com/devigned/tab v0.1.1 // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect - github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dimchansky/utfbom v1.1.1 github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/docker v20.10.6+incompatible @@ -108,6 +107,7 @@ require ( github.com/gofrs/uuid v3.3.0+incompatible github.com/gogo/googleapis v1.4.0 // indirect github.com/gogo/protobuf v1.3.2 + github.com/golang-jwt/jwt/v4 v4.0.0 github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect diff --git a/go.sum b/go.sum index d797edf56c569..93110f3928db3 100644 --- a/go.sum +++ b/go.sum @@ -460,8 +460,6 @@ github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mz github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU= -github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= @@ -706,6 +704,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= diff --git a/plugins/inputs/dcos/client.go b/plugins/inputs/dcos/client.go index fcb976e311ccf..08943d13db0f9 100644 --- a/plugins/inputs/dcos/client.go +++ b/plugins/inputs/dcos/client.go @@ -10,7 +10,7 @@ import ( "net/url" "time" - jwt "github.com/dgrijalva/jwt-go/v4" + jwt "github.com/golang-jwt/jwt/v4" ) const ( @@ -329,7 +329,7 @@ func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) { UID: sa.AccountID, StandardClaims: jwt.StandardClaims{ // How long we have to login with this token - ExpiresAt: jwt.At(time.Now().Add(5 * time.Minute)), + ExpiresAt: time.Now().Add(time.Minute * 5).Unix(), }, }) return token.SignedString(sa.PrivateKey) diff --git a/plugins/inputs/dcos/client_test.go b/plugins/inputs/dcos/client_test.go index ece4b178f4556..70cf9ce7cfccd 100644 --- a/plugins/inputs/dcos/client_test.go +++ b/plugins/inputs/dcos/client_test.go @@ -8,7 +8,7 @@ import ( "net/url" "testing" - jwt "github.com/dgrijalva/jwt-go/v4" + jwt "github.com/golang-jwt/jwt/v4" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index 8fcb321ff36cf..35822f30b074f 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -9,7 +9,7 @@ import ( "sync" "time" - jwt "github.com/dgrijalva/jwt-go/v4" + jwt "github.com/golang-jwt/jwt/v4" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" From 7af4c5fa1537091bc47e7ad3286a70e5c0756bc1 Mon Sep 17 00:00:00 2001 From: reimda Date: Thu, 2 Sep 2021 09:38:43 -0600 Subject: [PATCH 590/761] fix: bump runc to v1.0.0-rc95 to address CVE-2021-30465 (#9713) --- go.mod | 2 +- go.sum | 14 +++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 1d819d87bedae..eb900304fdd9a 100644 --- a/go.mod +++ b/go.mod @@ -203,7 +203,7 @@ require ( github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/opencontainers/runc v1.0.0-rc93 // indirect + github.com/opencontainers/runc v1.0.0-rc95 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go-opentracing v0.3.4 diff --git a/go.sum b/go.sum index 93110f3928db3..369a49f8412b6 100644 --- a/go.sum +++ b/go.sum @@ -322,12 +322,14 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= @@ -360,6 +362,7 @@ github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -428,6 +431,7 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/go-couchbase v0.1.0 h1:g4bCvDwRL+ZL6HLhYeRlXxEYP31Wpy0VFxnFw6efEp8= @@ -549,8 +553,9 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -685,6 +690,7 @@ github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= @@ -1255,10 +1261,10 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc93 h1:x2UMpOOVf3kQ8arv/EsDGwim8PTNqzL1/EYDr/+scOM= -github.com/opencontainers/runc v1.0.0-rc93 h1:x2UMpOOVf3kQ8arv/EsDGwim8PTNqzL1/EYDr/+scOM= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.0-rc95 h1:RMuWVfY3E1ILlVsC3RhIq38n4sJtlOFwU9gfFZSqrd0= +github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1269,6 +1275,7 @@ github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= @@ -1914,6 +1921,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From 5a71f761dcd641491c6092c11024dccd863eedf6 Mon Sep 17 00:00:00 2001 From: alrex Date: Thu, 2 Sep 2021 09:09:05 -0700 Subject: [PATCH 591/761] fix: outputs.opentelemetry use headers config in grpc requests (#9587) --- plugins/outputs/opentelemetry/opentelemetry.go | 5 +++++ plugins/outputs/opentelemetry/opentelemetry_test.go | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/plugins/outputs/opentelemetry/opentelemetry.go b/plugins/outputs/opentelemetry/opentelemetry.go index ea68fbae6323a..a25fe2ff8dae8 100644 --- a/plugins/outputs/opentelemetry/opentelemetry.go +++ b/plugins/outputs/opentelemetry/opentelemetry.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/model/otlpgrpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" ) type OpenTelemetry struct { @@ -160,6 +161,10 @@ func (o *OpenTelemetry) Write(metrics []telegraf.Metric) error { } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.Timeout)) + + if len(o.Headers) > 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(o.Headers)) + } defer cancel() _, err := o.metricsServiceClient.Export(ctx, md, o.callOptions...) return err diff --git a/plugins/outputs/opentelemetry/opentelemetry_test.go b/plugins/outputs/opentelemetry/opentelemetry_test.go index b61f480978ee4..cfafcd47fc10a 100644 --- a/plugins/outputs/opentelemetry/opentelemetry_test.go +++ b/plugins/outputs/opentelemetry/opentelemetry_test.go @@ -18,6 +18,7 @@ import ( "go.opentelemetry.io/collector/model/otlpgrpc" "go.opentelemetry.io/collector/model/pdata" "google.golang.org/grpc" + "google.golang.org/grpc/metadata" ) func TestOpenTelemetry(t *testing.T) { @@ -43,6 +44,7 @@ func TestOpenTelemetry(t *testing.T) { plugin := &OpenTelemetry{ ServiceAddress: m.Address(), Timeout: config.Duration(time.Second), + Headers: map[string]string{"test": "header1"}, metricsConverter: metricsConverter, grpcClientConn: m.GrpcClient(), metricsServiceClient: otlpgrpc.NewMetricsClient(m.GrpcClient()), @@ -131,5 +133,8 @@ func (m *mockOtelService) Address() string { func (m *mockOtelService) Export(ctx context.Context, request pdata.Metrics) (otlpgrpc.MetricsResponse, error) { m.metrics = request.Clone() + ctxMetadata, ok := metadata.FromIncomingContext(ctx) + assert.Equal(m.t, []string{"header1"}, ctxMetadata.Get("test")) + assert.True(m.t, ok) return otlpgrpc.MetricsResponse{}, nil } From 7de9c5ff279e10edf7fe3fdd596f3b33902c912b Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Thu, 2 Sep 2021 09:23:30 -0700 Subject: [PATCH 592/761] fix: bump thrift to 0.14.2 and zipkin-go-opentracing 0.4.5 (#9700) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 - go.mod | 7 +- go.sum | 8 +- .../stress_test_write/stress_test_write.go | 28 +- .../cmd/thrift_serialize/thrift_serialize.go | 19 +- plugins/inputs/zipkin/codec/codec.go | 2 +- plugins/inputs/zipkin/codec/jsonV1/jsonV1.go | 2 +- .../gen-go/zipkincore/GoUnusedProtection__.go | 5 + .../gen-go/zipkincore/zipkinCore-consts.go | 47 + .../thrift/gen-go/zipkincore/zipkinCore.go | 1556 +++++++++++++++++ plugins/inputs/zipkin/codec/thrift/thrift.go | 14 +- .../inputs/zipkin/codec/thrift/thrift_test.go | 2 +- 12 files changed, 1654 insertions(+), 37 deletions(-) create mode 100644 plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go create mode 100644 plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go create mode 100644 plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index a0c62d7f47767..46f8e5ff32793 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -173,7 +173,6 @@ following works: - github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE) - github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE) - github.com/opentracing/opentracing-go [Apache License 2.0](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) -- github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE) - github.com/philhofer/fwd [MIT License](https://github.com/philhofer/fwd/blob/master/LICENSE.md) - github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE) - github.com/pion/dtls [MIT License](https://github.com/pion/dtls/blob/master/LICENSE) diff --git a/go.mod b/go.mod index eb900304fdd9a..8dd6c8f7a6fc4 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/antchfx/xmlquery v1.3.5 github.com/antchfx/xpath v1.1.11 github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 // indirect - github.com/apache/thrift v0.13.0 + github.com/apache/thrift v0.14.2 github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.3 // indirect @@ -205,8 +205,9 @@ require ( github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opencontainers/runc v1.0.0-rc95 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/openzipkin/zipkin-go-opentracing v0.3.4 + github.com/opentracing/opentracing-go v1.2.0 + github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 + github.com/openzipkin/zipkin-go v0.2.5 github.com/philhofer/fwd v1.1.1 // indirect github.com/pierrec/lz4 v2.5.2+incompatible // indirect github.com/pion/dtls/v2 v2.0.9 diff --git a/go.sum b/go.sum index 369a49f8412b6..1d373bad3ce34 100644 --- a/go.sum +++ b/go.sum @@ -221,8 +221,9 @@ github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VT github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 h1:5ultmol0yeX75oh1hY78uAFn3dupBQ/QUNxERCkiaUQ= github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.2 h1:hY4rAyg7Eqbb27GB6gkhUKrRAuc8xRjlNtJq+LseKeY= +github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= @@ -1291,12 +1292,13 @@ github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mo github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 h1:ZCnq+JUrvXcDVhX/xRolRBZifmabN1HcS1wrPSvxhrU= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g= -github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= +github.com/openzipkin/zipkin-go v0.2.5 h1:UwtQQx2pyPIgWYHRg+epgdx1/HnBQTgN3/oIYEJTQzU= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= diff --git a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go index 3889e2f2cd9ea..a1abccc420ad9 100644 --- a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go +++ b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go @@ -24,7 +24,10 @@ import ( "log" "time" - zipkin "github.com/openzipkin/zipkin-go-opentracing" + otlog "github.com/opentracing/opentracing-go/log" + zipkinot "github.com/openzipkin-contrib/zipkin-go-opentracing" + "github.com/openzipkin/zipkin-go" + zipkinhttp "github.com/openzipkin/zipkin-go/reporter/http" ) var ( @@ -46,27 +49,30 @@ func init() { func main() { flag.Parse() var hostname = fmt.Sprintf("http://%s:9411/api/v1/spans", ZipkinServerHost) - collector, err := zipkin.NewHTTPCollector( + reporter := zipkinhttp.NewReporter( hostname, - zipkin.HTTPBatchSize(BatchSize), - zipkin.HTTPMaxBacklog(MaxBackLog), - zipkin.HTTPBatchInterval(time.Duration(BatchTimeInterval)*time.Second)) + zipkinhttp.BatchSize(BatchSize), + zipkinhttp.MaxBacklog(MaxBackLog), + zipkinhttp.BatchInterval(time.Duration(BatchTimeInterval)*time.Second), + ) + defer reporter.Close() + + endpoint, err := zipkin.NewEndpoint("Trivial", "127.0.0.1:0") if err != nil { - log.Fatalf("Error initializing zipkin http collector: %v\n", err) + log.Fatalf("Error: %v\n", err) } - defer collector.Close() - - tracer, err := zipkin.NewTracer( - zipkin.NewRecorder(collector, false, "127.0.0.1:0", "Trivial")) + nativeTracer, err := zipkin.NewTracer(reporter, zipkin.WithLocalEndpoint(endpoint)) if err != nil { log.Fatalf("Error: %v\n", err) } + tracer := zipkinot.Wrap(nativeTracer) + log.Printf("Writing %d spans to zipkin server at %s\n", SpanCount, hostname) for i := 0; i < SpanCount; i++ { parent := tracer.StartSpan("Parent") - parent.LogEvent(fmt.Sprintf("Trace%d", i)) + parent.LogFields(otlog.Message(fmt.Sprintf("Trace%d", i))) parent.Finish() } log.Println("Done. Flushing remaining spans...") diff --git a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go index b26e3d73fa3fd..9bf1f3261d9f6 100644 --- a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go +++ b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go @@ -24,6 +24,7 @@ Otherwise, the input file will be interpreted as json, and the output will be en package main import ( + "context" "encoding/json" "errors" "flag" @@ -32,7 +33,7 @@ import ( "log" "github.com/apache/thrift/lib/go/thrift" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) var ( @@ -100,20 +101,20 @@ func jsonToZipkinThrift(jsonRaw []byte) ([]byte, error) { zspans = append(zspans, spans...) buf := thrift.NewTMemoryBuffer() - transport := thrift.NewTBinaryProtocolTransport(buf) + transport := thrift.NewTBinaryProtocolConf(buf, nil) - if err = transport.WriteListBegin(thrift.STRUCT, len(spans)); err != nil { + if err = transport.WriteListBegin(context.Background(), thrift.STRUCT, len(spans)); err != nil { return nil, fmt.Errorf("error in beginning thrift write: %v", err) } for _, span := range zspans { - err = span.Write(transport) + err = span.Write(context.Background(), transport) if err != nil { return nil, fmt.Errorf("error converting zipkin struct to thrift: %v", err) } } - if err = transport.WriteListEnd(); err != nil { + if err = transport.WriteListEnd(context.Background()); err != nil { return nil, fmt.Errorf("error finishing thrift write: %v", err) } @@ -127,8 +128,8 @@ func thriftToJSONSpans(thriftData []byte) ([]byte, error) { return nil, err } - transport := thrift.NewTBinaryProtocolTransport(buffer) - _, size, err := transport.ReadListBegin() + transport := thrift.NewTBinaryProtocolConf(buffer, nil) + _, size, err := transport.ReadListBegin(context.Background()) if err != nil { err = fmt.Errorf("error in ReadListBegin: %v", err) return nil, err @@ -137,14 +138,14 @@ func thriftToJSONSpans(thriftData []byte) ([]byte, error) { var spans []*zipkincore.Span for i := 0; i < size; i++ { zs := &zipkincore.Span{} - if err = zs.Read(transport); err != nil { + if err = zs.Read(context.Background(), transport); err != nil { err = fmt.Errorf("Error reading into zipkin struct: %v", err) return nil, err } spans = append(spans, zs) } - err = transport.ReadListEnd() + err = transport.ReadListEnd(context.Background()) if err != nil { err = fmt.Errorf("error ending thrift read: %v", err) return nil, err diff --git a/plugins/inputs/zipkin/codec/codec.go b/plugins/inputs/zipkin/codec/codec.go index 167b8ec24f1a3..2754e13d969e7 100644 --- a/plugins/inputs/zipkin/codec/codec.go +++ b/plugins/inputs/zipkin/codec/codec.go @@ -3,8 +3,8 @@ package codec import ( "time" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" "github.com/influxdata/telegraf/plugins/inputs/zipkin/trace" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" ) //now is a mockable time for now diff --git a/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go b/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go index 1803486742301..4c054126fa95e 100644 --- a/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go +++ b/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go @@ -7,7 +7,7 @@ import ( "time" "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) // JSON decodes spans from bodies `POST`ed to the spans endpoint diff --git a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go new file mode 100644 index 0000000000000..be7b2034832d4 --- /dev/null +++ b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go @@ -0,0 +1,5 @@ +// Code generated by Thrift Compiler (0.14.2). DO NOT EDIT. + +package zipkincore + +var GoUnusedProtection__ int diff --git a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go new file mode 100644 index 0000000000000..7c5b5825acaa6 --- /dev/null +++ b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go @@ -0,0 +1,47 @@ +// Code generated by Thrift Compiler (0.14.2). DO NOT EDIT. + +package zipkincore + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "time" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +const CLIENT_SEND = "cs" +const CLIENT_RECV = "cr" +const SERVER_SEND = "ss" +const SERVER_RECV = "sr" +const MESSAGE_SEND = "ms" +const MESSAGE_RECV = "mr" +const WIRE_SEND = "ws" +const WIRE_RECV = "wr" +const CLIENT_SEND_FRAGMENT = "csf" +const CLIENT_RECV_FRAGMENT = "crf" +const SERVER_SEND_FRAGMENT = "ssf" +const SERVER_RECV_FRAGMENT = "srf" +const HTTP_HOST = "http.host" +const HTTP_METHOD = "http.method" +const HTTP_PATH = "http.path" +const HTTP_ROUTE = "http.route" +const HTTP_URL = "http.url" +const HTTP_STATUS_CODE = "http.status_code" +const HTTP_REQUEST_SIZE = "http.request.size" +const HTTP_RESPONSE_SIZE = "http.response.size" +const LOCAL_COMPONENT = "lc" +const ERROR = "error" +const CLIENT_ADDR = "ca" +const SERVER_ADDR = "sa" +const MESSAGE_ADDR = "ma" + +func init() { +} diff --git a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go new file mode 100644 index 0000000000000..258fd4d1a0afc --- /dev/null +++ b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go @@ -0,0 +1,1556 @@ +// Code generated by Thrift Compiler (0.14.2). DO NOT EDIT. + +package zipkincore + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "time" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +//A subset of thrift base types, except BYTES. +type AnnotationType int64 + +const ( + AnnotationType_BOOL AnnotationType = 0 + AnnotationType_BYTES AnnotationType = 1 + AnnotationType_I16 AnnotationType = 2 + AnnotationType_I32 AnnotationType = 3 + AnnotationType_I64 AnnotationType = 4 + AnnotationType_DOUBLE AnnotationType = 5 + AnnotationType_STRING AnnotationType = 6 +) + +func (p AnnotationType) String() string { + switch p { + case AnnotationType_BOOL: + return "BOOL" + case AnnotationType_BYTES: + return "BYTES" + case AnnotationType_I16: + return "I16" + case AnnotationType_I32: + return "I32" + case AnnotationType_I64: + return "I64" + case AnnotationType_DOUBLE: + return "DOUBLE" + case AnnotationType_STRING: + return "STRING" + } + return "" +} + +func AnnotationTypeFromString(s string) (AnnotationType, error) { + switch s { + case "BOOL": + return AnnotationType_BOOL, nil + case "BYTES": + return AnnotationType_BYTES, nil + case "I16": + return AnnotationType_I16, nil + case "I32": + return AnnotationType_I32, nil + case "I64": + return AnnotationType_I64, nil + case "DOUBLE": + return AnnotationType_DOUBLE, nil + case "STRING": + return AnnotationType_STRING, nil + } + return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") +} + +func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } + +func (p AnnotationType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *AnnotationType) UnmarshalText(text []byte) error { + q, err := AnnotationTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *AnnotationType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = AnnotationType(v) + return nil +} + +func (p *AnnotationType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Indicates the network context of a service recording an annotation with two +// exceptions. +// +// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, +// the endpoint indicates the source or destination of an RPC. This exception +// allows zipkin to display network context of uninstrumented services, or +// clients such as web browsers. +// +// Attributes: +// - Ipv4: IPv4 host address packed into 4 bytes. +// +// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 +// - Port: IPv4 port or 0, if unknown. +// +// Note: this is to be treated as an unsigned integer, so watch for negatives. +// - ServiceName: Classifier of a source or destination in lowercase, such as "zipkin-web". +// +// This is the primary parameter for trace lookup, so should be intuitive as +// possible, for example, matching names in service discovery. +// +// Conventionally, when the service name isn't known, service_name = "unknown". +// However, it is also permissible to set service_name = "" (empty string). +// The difference in the latter usage is that the span will not be queryable +// by service name unless more information is added to the span with non-empty +// service name, e.g. an additional annotation from the server. +// +// Particularly clients may not have a reliable service name at ingest. One +// approach is to set service_name to "" at ingest, and later assign a +// better label based on binary annotations, such as user agent. +// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() +type Endpoint struct { + Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"` + Port int16 `thrift:"port,2" db:"port" json:"port"` + ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"` + Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"` +} + +func NewEndpoint() *Endpoint { + return &Endpoint{} +} + +func (p *Endpoint) GetIpv4() int32 { + return p.Ipv4 +} + +func (p *Endpoint) GetPort() int16 { + return p.Port +} + +func (p *Endpoint) GetServiceName() string { + return p.ServiceName +} + +var Endpoint_Ipv6_DEFAULT []byte + +func (p *Endpoint) GetIpv6() []byte { + return p.Ipv6 +} +func (p *Endpoint) IsSetIpv6() bool { + return p.Ipv6 != nil +} + +func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I16 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ipv4 = v + } + return nil +} + +func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Port = v + } + return nil +} + +func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ServiceName = v + } + return nil +} + +func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Ipv6 = v + } + return nil +} + +func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) + } + return err +} + +func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) + } + if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) + } + return err +} + +func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) + } + return err +} + +func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIpv6() { + if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) + } + } + return err +} + +func (p *Endpoint) Equals(other *Endpoint) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ipv4 != other.Ipv4 { + return false + } + if p.Port != other.Port { + return false + } + if p.ServiceName != other.ServiceName { + return false + } + if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { + return false + } + return true +} + +func (p *Endpoint) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Endpoint(%+v)", *p) +} + +// Associates an event that explains latency with a timestamp. +// +// Unlike log statements, annotations are often codes: for example "sr". +// +// Attributes: +// - Timestamp: Microseconds from epoch. +// +// This value should use the most precise value possible. For example, +// gettimeofday or multiplying currentTimeMillis by 1000. +// - Value: Usually a short tag indicating an event, like "sr" or "finagle.retry". +// - Host: The host that recorded the value, primarily for query by service name. +type Annotation struct { + Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"` + Value string `thrift:"value,2" db:"value" json:"value"` + Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"` +} + +func NewAnnotation() *Annotation { + return &Annotation{} +} + +func (p *Annotation) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Annotation) GetValue() string { + return p.Value +} + +var Annotation_Host_DEFAULT *Endpoint + +func (p *Annotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return Annotation_Host_DEFAULT + } + return p.Host +} +func (p *Annotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Timestamp = v + } + return nil +} + +func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) + } + return err +} + +func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) + } + } + return err +} + +func (p *Annotation) Equals(other *Annotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Timestamp != other.Timestamp { + return false + } + if p.Value != other.Value { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *Annotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Annotation(%+v)", *p) +} + +// Binary annotations are tags applied to a Span to give it context. For +// example, a binary annotation of HTTP_PATH ("http.path") could the path +// to a resource in a RPC call. +// +// Binary annotations of type STRING are always queryable, though more a +// historical implementation detail than a structural concern. +// +// Binary annotations can repeat, and vary on the host. Similar to Annotation, +// the host indicates who logged the event. This allows you to tell the +// difference between the client and server side of the same key. For example, +// the key "http.path" might be different on the client and server side due to +// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, +// you can see the different points of view, which often help in debugging. +// +// Attributes: +// - Key: Name used to lookup spans, such as "http.path" or "finagle.version". +// - Value: Serialized thrift bytes, in TBinaryProtocol format. +// +// For legacy reasons, byte order is big-endian. See THRIFT-3217. +// - AnnotationType: The thrift type of value, most often STRING. +// +// annotation_type shouldn't vary for the same key. +// - Host: The host that recorded value, allowing query by service name or address. +// +// There are two exceptions: when key is "ca" or "sa", this is the source or +// destination of an RPC. This exception allows zipkin to display network +// context of uninstrumented services, such as browsers or databases. +type BinaryAnnotation struct { + Key string `thrift:"key,1" db:"key" json:"key"` + Value []byte `thrift:"value,2" db:"value" json:"value"` + AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"` + Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"` +} + +func NewBinaryAnnotation() *BinaryAnnotation { + return &BinaryAnnotation{} +} + +func (p *BinaryAnnotation) GetKey() string { + return p.Key +} + +func (p *BinaryAnnotation) GetValue() []byte { + return p.Value +} + +func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { + return p.AnnotationType +} + +var BinaryAnnotation_Host_DEFAULT *Endpoint + +func (p *BinaryAnnotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return BinaryAnnotation_Host_DEFAULT + } + return p.Host +} +func (p *BinaryAnnotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Key = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := AnnotationType(v) + p.AnnotationType = temp + } + return nil +} + +func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Value); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) + } + } + return err +} + +func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Key != other.Key { + return false + } + if bytes.Compare(p.Value, other.Value) != 0 { + return false + } + if p.AnnotationType != other.AnnotationType { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *BinaryAnnotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BinaryAnnotation(%+v)", *p) +} + +// A trace is a series of spans (often RPC calls) which form a latency tree. +// +// Spans are usually created by instrumentation in RPC clients or servers, but +// can also represent in-process activity. Annotations in spans are similar to +// log statements, and are sometimes created directly by application developers +// to indicate events of interest, such as a cache miss. +// +// The root span is where parent_id = Nil; it usually has the longest duration +// in the trace. +// +// Span identifiers are packed into i64s, but should be treated opaquely. +// String encoding is fixed-width lower-hex, to avoid signed interpretation. +// +// Attributes: +// - TraceID: Unique 8-byte identifier for a trace, set on all spans within it. +// - Name: Span name in lowercase, rpc method for example. Conventionally, when the +// span name isn't known, name = "unknown". +// - ID: Unique 8-byte identifier of this span within a trace. A span is uniquely +// identified in storage by (trace_id, id). +// - ParentID: The parent's Span.id; absent if this the root span in a trace. +// - Annotations: Associates events that explain latency with a timestamp. Unlike log +// statements, annotations are often codes: for example SERVER_RECV("sr"). +// Annotations are sorted ascending by timestamp. +// - BinaryAnnotations: Tags a span with context, usually to support query or aggregation. For +// example, a binary annotation key could be "http.path". +// - Debug: True is a request to store this span even if it overrides sampling policy. +// - Timestamp: Epoch microseconds of the start of this span, absent if this an incomplete +// span. +// +// This value should be set directly by instrumentation, using the most +// precise value possible. For example, gettimeofday or syncing nanoTime +// against a tick of currentTimeMillis. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this via Annotation.timestamp. +// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. +// +// Timestamp is nullable for input only. Spans without a timestamp cannot be +// presented in a timeline: Span stores should not output spans missing a +// timestamp. +// +// There are two known edge-cases where this could be absent: both cases +// exist when a collector receives a span in parts and a binary annotation +// precedes a timestamp. This is possible when.. +// - The span is in-flight (ex not yet received a timestamp) +// - The span's start event was lost +// - Duration: Measurement in microseconds of the critical path, if known. Durations of +// less than one microsecond must be rounded up to 1 microsecond. +// +// This value should be set directly, as opposed to implicitly via annotation +// timestamps. Doing so encourages precision decoupled from problems of +// clocks, such as skew or NTP updates causing time to move backwards. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this by subtracting Annotation.timestamp. +// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. +// +// If this field is persisted as unset, zipkin will continue to work, except +// duration query support will be implementation-specific. Similarly, setting +// this field non-atomically is implementation-specific. +// +// This field is i64 vs i32 to support spans longer than 35 minutes. +// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this +// means the trace uses 128 bit traceIds instead of 64 bit. +type Span struct { + TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"` + // unused field # 2 + Name string `thrift:"name,3" db:"name" json:"name"` + ID int64 `thrift:"id,4" db:"id" json:"id"` + ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"` + Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"` + // unused field # 7 + BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"` + Debug bool `thrift:"debug,9" db:"debug" json:"debug"` + Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"` + Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"` + TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + +func (p *Span) GetTraceID() int64 { + return p.TraceID +} + +func (p *Span) GetName() string { + return p.Name +} + +func (p *Span) GetID() int64 { + return p.ID +} + +var Span_ParentID_DEFAULT int64 + +func (p *Span) GetParentID() int64 { + if !p.IsSetParentID() { + return Span_ParentID_DEFAULT + } + return *p.ParentID +} + +func (p *Span) GetAnnotations() []*Annotation { + return p.Annotations +} + +func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { + return p.BinaryAnnotations +} + +var Span_Debug_DEFAULT bool = false + +func (p *Span) GetDebug() bool { + return p.Debug +} + +var Span_Timestamp_DEFAULT int64 + +func (p *Span) GetTimestamp() int64 { + if !p.IsSetTimestamp() { + return Span_Timestamp_DEFAULT + } + return *p.Timestamp +} + +var Span_Duration_DEFAULT int64 + +func (p *Span) GetDuration() int64 { + if !p.IsSetDuration() { + return Span_Duration_DEFAULT + } + return *p.Duration +} + +var Span_TraceIDHigh_DEFAULT int64 + +func (p *Span) GetTraceIDHigh() int64 { + if !p.IsSetTraceIDHigh() { + return Span_TraceIDHigh_DEFAULT + } + return *p.TraceIDHigh +} +func (p *Span) IsSetParentID() bool { + return p.ParentID != nil +} + +func (p *Span) IsSetDebug() bool { + return p.Debug != Span_Debug_DEFAULT +} + +func (p *Span) IsSetTimestamp() bool { + return p.Timestamp != nil +} + +func (p *Span) IsSetDuration() bool { + return p.Duration != nil +} + +func (p *Span) IsSetTraceIDHigh() bool { + return p.TraceIDHigh != nil +} + +func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.LIST { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I64 { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.I64 { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.I64 { + if err := p.ReadField12(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.TraceID = v + } + return nil +} + +func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Name = v + } + return nil +} + +func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.ID = v + } + return nil +} + +func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ParentID = &v + } + return nil +} + +func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Annotation, 0, size) + p.Annotations = tSlice + for i := 0; i < size; i++ { + _elem0 := &Annotation{} + if err := _elem0.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Annotations = append(p.Annotations, _elem0) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BinaryAnnotation, 0, size) + p.BinaryAnnotations = tSlice + for i := 0; i < size; i++ { + _elem1 := &BinaryAnnotation{} + if err := _elem1.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.Debug = v + } + return nil +} + +func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + p.Timestamp = &v + } + return nil +} + +func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 11: ", err) + } else { + p.Duration = &v + } + return nil +} + +func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 12: ", err) + } else { + p.TraceIDHigh = &v + } + return nil +} + +func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + if err := p.writeField5(ctx, oprot); err != nil { + return err + } + if err := p.writeField6(ctx, oprot); err != nil { + return err + } + if err := p.writeField8(ctx, oprot); err != nil { + return err + } + if err := p.writeField9(ctx, oprot); err != nil { + return err + } + if err := p.writeField10(ctx, oprot); err != nil { + return err + } + if err := p.writeField11(ctx, oprot); err != nil { + return err + } + if err := p.writeField12(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) + } + return err +} + +func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) + } + return err +} + +func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) + } + return err +} + +func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetParentID() { + if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) + } + } + return err +} + +func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Annotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) + } + return err +} + +func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BinaryAnnotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) + } + return err +} + +func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDebug() { + if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) + } + } + return err +} + +func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTimestamp() { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) + } + } + return err +} + +func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDuration() { + if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) + } + } + return err +} + +func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTraceIDHigh() { + if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) + } + } + return err +} + +func (p *Span) Equals(other *Span) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TraceID != other.TraceID { + return false + } + if p.Name != other.Name { + return false + } + if p.ID != other.ID { + return false + } + if p.ParentID != other.ParentID { + if p.ParentID == nil || other.ParentID == nil { + return false + } + if (*p.ParentID) != (*other.ParentID) { + return false + } + } + if len(p.Annotations) != len(other.Annotations) { + return false + } + for i, _tgt := range p.Annotations { + _src2 := other.Annotations[i] + if !_tgt.Equals(_src2) { + return false + } + } + if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { + return false + } + for i, _tgt := range p.BinaryAnnotations { + _src3 := other.BinaryAnnotations[i] + if !_tgt.Equals(_src3) { + return false + } + } + if p.Debug != other.Debug { + return false + } + if p.Timestamp != other.Timestamp { + if p.Timestamp == nil || other.Timestamp == nil { + return false + } + if (*p.Timestamp) != (*other.Timestamp) { + return false + } + } + if p.Duration != other.Duration { + if p.Duration == nil || other.Duration == nil { + return false + } + if (*p.Duration) != (*other.Duration) { + return false + } + } + if p.TraceIDHigh != other.TraceIDHigh { + if p.TraceIDHigh == nil || other.TraceIDHigh == nil { + return false + } + if (*p.TraceIDHigh) != (*other.TraceIDHigh) { + return false + } + } + return true +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} diff --git a/plugins/inputs/zipkin/codec/thrift/thrift.go b/plugins/inputs/zipkin/codec/thrift/thrift.go index 65a9e1488c2c4..c2c60a3395d2d 100644 --- a/plugins/inputs/zipkin/codec/thrift/thrift.go +++ b/plugins/inputs/zipkin/codec/thrift/thrift.go @@ -1,16 +1,16 @@ package thrift import ( + "context" "encoding/binary" "fmt" "net" "strconv" "time" - "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec" - "github.com/apache/thrift/lib/go/thrift" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) // UnmarshalThrift converts raw bytes in thrift format to a slice of spans @@ -20,8 +20,8 @@ func UnmarshalThrift(body []byte) ([]*zipkincore.Span, error) { return nil, err } - transport := thrift.NewTBinaryProtocolTransport(buffer) - _, size, err := transport.ReadListBegin() + transport := thrift.NewTBinaryProtocolConf(buffer, nil) + _, size, err := transport.ReadListBegin(context.Background()) if err != nil { return nil, err } @@ -29,13 +29,13 @@ func UnmarshalThrift(body []byte) ([]*zipkincore.Span, error) { spans := make([]*zipkincore.Span, size) for i := 0; i < size; i++ { zs := &zipkincore.Span{} - if err = zs.Read(transport); err != nil { + if err = zs.Read(context.Background(), transport); err != nil { return nil, err } spans[i] = zs } - if err = transport.ReadListEnd(); err != nil { + if err = transport.ReadListEnd(context.Background()); err != nil { return nil, err } return spans, nil diff --git a/plugins/inputs/zipkin/codec/thrift/thrift_test.go b/plugins/inputs/zipkin/codec/thrift/thrift_test.go index 798fc269edf86..d4bbc1d54df20 100644 --- a/plugins/inputs/zipkin/codec/thrift/thrift_test.go +++ b/plugins/inputs/zipkin/codec/thrift/thrift_test.go @@ -6,7 +6,7 @@ import ( "github.com/google/go-cmp/cmp" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) func Test_endpointHost(t *testing.T) { From e9872741674c054b58a4236d3c3ea98f18515c65 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Thu, 2 Sep 2021 14:17:05 -0600 Subject: [PATCH 593/761] Update changelog --- CHANGELOG.md | 53 +++++ etc/telegraf.conf | 569 +++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 561 insertions(+), 61 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 053e9ee59bbf7..42ca26772a37b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,56 @@ +## v1.20.0-rc0 [2021-09-02] + +#### Release Notes + + - [#9642](https://github.com/influxdata/telegraf/pull/9642) Build with Golang 1.17 + +#### Bugfixes + + - [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing 0.4.5 + - [#9587](https://github.com/influxdata/telegraf/pull/9587) `outputs.opentelemetry` Use headers config in grpc requests + - [#9713](https://github.com/influxdata/telegraf/pull/9713) Update runc module to v1.0.0-rc95 to address CVE-2021-30465 + - [#9699](https://github.com/influxdata/telegraf/pull/9699) Migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 + - [#9139](https://github.com/influxdata/telegraf/pull/9139) `serializers.prometheus` Update timestamps and expiration time as new data arrives + - [#9625](https://github.com/influxdata/telegraf/pull/9625) `outputs.graylog` Output timestamp with fractional seconds + - [#9655](https://github.com/influxdata/telegraf/pull/9655) Update cloud.google.com/go/pubsub module from 1.2.0 to 1.15.0 + - [#9674](https://github.com/influxdata/telegraf/pull/9674) `inputs.mongodb` Change command based on server version + - [#9676](https://github.com/influxdata/telegraf/pull/9676) `outputs.dynatrace` Remove hardcoded int value + - [#9619](https://github.com/influxdata/telegraf/pull/9619) `outputs.influxdb_v2` Increase accepted retry-after header values. + - [#9652](https://github.com/influxdata/telegraf/pull/9652) Update github.com/tinylib/msgp module from 1.1.5 to 1.1.6 + - [#9471](https://github.com/influxdata/telegraf/pull/9471) `inputs.sql` Make timeout apply to single query + +#### Features + + - [#9665](https://github.com/influxdata/telegraf/pull/9665) `inputs.systemd_units` feat(plugins/inputs/systemd_units): add pattern support + - [#9598](https://github.com/influxdata/telegraf/pull/9598) `outputs.sql` Add bool datatype + - [#9386](https://github.com/influxdata/telegraf/pull/9386) `inputs.cloudwatch` Pull metrics from multiple AWS CloudWatch namespaces + - [#9411](https://github.com/influxdata/telegraf/pull/9411) `inputs.cloudwatch` Support AWS Web Identity Provider + - [#9570](https://github.com/influxdata/telegraf/pull/9570) `inputs.modbus` Add support for RTU over TCP + - [#9488](https://github.com/influxdata/telegraf/pull/9488) `inputs.procstat` Support cgroup globs and include systemd unit children + - [#9322](https://github.com/influxdata/telegraf/pull/9322) `inputs.suricata` Support alert event type + - [#5464](https://github.com/influxdata/telegraf/pull/5464) `inputs.prometheus` Add ability to query Consul Service catalog + - [#8641](https://github.com/influxdata/telegraf/pull/8641) `outputs.prometheus_client` Add Landing page + - [#9529](https://github.com/influxdata/telegraf/pull/9529) `inputs.http_listener_v2` Allows multiple paths and add path_tag + - [#9395](https://github.com/influxdata/telegraf/pull/9395) Add cookie authentication to HTTP input and output plugins + - [#8454](https://github.com/influxdata/telegraf/pull/8454) `inputs.syslog` Add RFC3164 support + - [#9351](https://github.com/influxdata/telegraf/pull/9351) `inputs.jenkins` Add option to include nodes by name + - [#9277](https://github.com/influxdata/telegraf/pull/9277) Add JSON, MessagePack, and Protocol-buffers format support to the XPath parser + - [#9343](https://github.com/influxdata/telegraf/pull/9343) `inputs.snmp_trap` Improve MIB lookup performance + - [#9342](https://github.com/influxdata/telegraf/pull/9342) `outputs.newrelic` Add option to override metric_url + - [#9306](https://github.com/influxdata/telegraf/pull/9306) `inputs.smart` Add power mode status + +#### New Input Plugins + + - [#9602](https://github.com/influxdata/telegraf/pull/9602) Add rocm_smi input to monitor AMD GPUs + - [#9101](https://github.com/influxdata/telegraf/pull/9101) Add mdstat input to gather from /proc/mdstat collection + - [#3536](https://github.com/influxdata/telegraf/pull/3536) Add Elasticsearch query input + +#### New Output Plugins + + - [#9228](https://github.com/influxdata/telegraf/pull/9228) Add OpenTelemetry output + - [#9426](https://github.com/influxdata/telegraf/pull/9426) Add Azure Data Explorer(ADX) output + + ## v1.19.3 [2021-08-18] #### Bugfixes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 43b1f8f3ade45..fabd2616141fb 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -315,6 +315,30 @@ # # "ai.cloud.roleInstance" = "kubernetes_pod_name" +# # Sends metrics to Azure Data Explorer +# [[outputs.azure_data_explorer]] +# ## Azure Data Exlorer cluster endpoint +# ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" +# endpoint_url = "" +# +# ## The Azure Data Explorer database that the metrics will be ingested into. +# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. +# ## ex: "exampledatabase" +# database = "" +# +# ## Timeout for Azure Data Explorer operations +# # timeout = "20s" +# +# ## Type of metrics grouping used when pushing to Azure Data Explorer. +# ## Default is "TablePerMetric" for one table per different metric. +# ## For more information, please check the plugin README. +# # metrics_grouping_type = "TablePerMetric" +# +# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). +# # table_name = "" +# + + # # Send aggregate metrics to Azure Monitor # [[outputs.azure_monitor]] # ## Timeout for HTTP writes. @@ -404,16 +428,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -452,16 +479,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -809,6 +839,15 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: @@ -1049,16 +1088,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -1268,6 +1310,10 @@ # ## HTTP Proxy override. If unset use values from the standard # ## proxy environment variables to determine proxy, if any. # # http_proxy = "http://corporate.proxy:3128" +# +# ## Metric URL override to enable geographic location endpoints. +# # If not set use values from the standard +# # metric_url = "https://metric-api.newrelic.com/metric/v1" # # Send telegraf measurements to NSQD @@ -1284,6 +1330,41 @@ # data_format = "influx" +# # Send OpenTelemetry metrics over gRPC +# [[outputs.opentelemetry]] +# ## Override the default (localhost:4317) OpenTelemetry gRPC service +# ## address:port +# # service_address = "localhost:4317" +# +# ## Override the default (5s) request timeout +# # timeout = "5s" +# +# ## Optional TLS Config. +# ## +# ## Root certificates for verifying server certificates encoded in PEM format. +# # tls_ca = "/etc/telegraf/ca.pem" +# ## The public and private keypairs for the client encoded in PEM format. +# ## May contain intermediate certificates. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS, but skip TLS chain and host verification. +# # insecure_skip_verify = false +# ## Send the specified TLS server name via SNI. +# # tls_server_name = "foo.example.com" +# +# ## Override the default (gzip) compression used to send data. +# ## Supports: "gzip", "none" +# # compression = "gzip" +# +# ## Additional OpenTelemetry resource attributes +# # [outputs.opentelemetry.attributes] +# # "service.name" = "demo" +# +# ## Additional gRPC request metadata +# # [outputs.opentelemetry.headers] +# # key1 = "value1" + + # # Configuration for OpenTSDB server to send metrics to # [[outputs.opentsdb]] # ## prefix for metrics keys @@ -1748,16 +1829,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order: -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) Explicit credentials from 'access_key' and 'secret_key' -# ## 3) Shared profile from 'profile' -# ## 4) Environment variables -# ## 5) Shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -1870,7 +1954,7 @@ # ## Print Warp 10 error body # # print_error_body = false # -# ## Max string error size +# ## Max string error size # # max_string_error_size = 511 # # ## Optional TLS Config @@ -2896,6 +2980,15 @@ # # num_histogram_buckets = 100 # default: 10 +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + # # Read Apache status information (mod_status) # [[inputs.apache]] # ## An array of URLs to gather from, must be directed at the machine @@ -3163,16 +3256,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # # access_key = "" # # secret_key = "" # # token = "" # # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" # # profile = "" # # shared_credential_file = "" # @@ -3212,8 +3308,10 @@ # ## Configure the TTL for the internal cache of metrics. # # cache_ttl = "1h" # -# ## Metric Statistic Namespace (required) -# namespace = "AWS/ELB" +# ## Metric Statistic Namespaces (required) +# namespaces = ["AWS/ELB"] +# # A single metric statistic namespace that will be appended to namespaces on startup +# # namespace = "AWS/ELB" # # ## Maximum requests per second. Note that the global default AWS rate limit is # ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a @@ -3632,6 +3730,72 @@ # # num_most_recent_indices = 0 +# # Derive metrics from aggregating Elasticsearch query results +# [[inputs.elasticsearch_query]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# +# ## Elasticsearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# # enable_sniffer = false +# +# ## Set the interval to check if the Elasticsearch nodes are available +# ## This option is only used if enable_sniffer is also set (0s to disable it) +# # health_check_interval = "10s" +# +# ## HTTP basic authentication details (eg. when using x-pack) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# [[inputs.elasticsearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## Elasticsearch indexes to query (accept wildcards). +# index = "index-*" +# +# ## The date/time field in the Elasticsearch index (mandatory). +# date_field = "@timestamp" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags +# ## Must be text, non-analyzed fields. Metric aggregations are performed per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Used when include_missing_tag is true +# # missing_tag_value = "null" + + # # Returns ethtool statistics for given interfaces # [[inputs.ethtool]] # ## List of interfaces to pull metrics for @@ -3944,6 +4108,15 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# # ## Amount of time allowed to complete the HTTP request # # timeout = "5s" # @@ -4286,7 +4459,9 @@ # # job_include = [ "*" ] # # job_exclude = [ ] # -# ## Nodes to exclude from gathering +# ## Nodes to include or exclude from gathering +# ## When using both lists, node_exclude has priority. +# # node_include = [ "*" ] # # node_exclude = [ ] # # ## Worker pool for jenkins plugin only @@ -4343,19 +4518,19 @@ # ## List of metrics collected on above servers # ## Each metric consists in a name, a jmx path and either # ## a pass or drop slice attribute. -# ## This collect all heap memory usage metrics. +# ## This collect all heap memory usage metrics. # [[inputs.jolokia.metrics]] # name = "heap_memory_usage" # mbean = "java.lang:type=Memory" # attribute = "HeapMemoryUsage" # -# ## This collect thread counts metrics. +# ## This collect thread counts metrics. # [[inputs.jolokia.metrics]] # name = "thread_count" # mbean = "java.lang:type=Threading" # attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" # -# ## This collect number of class loaded/unloaded counts metrics. +# ## This collect number of class loaded/unloaded counts metrics. # [[inputs.jolokia.metrics]] # name = "class_count" # mbean = "java.lang:type=ClassLoading" @@ -4637,6 +4812,13 @@ # # timeout = "5s" +# # Get md array statistics from /proc/mdstat +# [[inputs.mdstat]] +# ## Sets file path +# ## If not specified, then default is /proc/mdstat +# # file_name = "/proc/mdstat" + + # # Read metrics from one or many memcached servers # [[inputs.memcached]] # ## An array of address to gather stats about. Specify an ip on hostname @@ -4708,7 +4890,7 @@ # [[inputs.modbus]] # ## Connection Configuration # ## -# ## The plugin supports connections to PLCs via MODBUS/TCP or +# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or # ## via serial line communication in binary (RTU) or readable (ASCII) encoding # ## # ## Device name @@ -4735,8 +4917,11 @@ # # data_bits = 8 # # parity = "N" # # stop_bits = 1 -# # transmission_mode = "RTU" # +# ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" +# ## default behaviour is "TCP" if the controller is TCP +# ## For Serial you can choose between "RTU" and "ASCII" +# # transmission_mode = "RTU" # # ## Measurements # ## @@ -5543,9 +5728,10 @@ # # pattern = "nginx" # ## user as argument for pgrep (ie, pgrep -u ) # # user = "nginx" -# ## Systemd unit name +# ## Systemd unit name, supports globs when include_systemd_children is set to true # # systemd_unit = "nginx.service" -# ## CGroup name or path +# # include_systemd_children = false +# ## CGroup name or path, supports globs # # cgroup = "systemd/system.slice/nginx.service" # # ## Windows service name @@ -5785,13 +5971,6 @@ # # Specify a list of one or more riak http servers # servers = ["http://localhost:8098"] -# # Query statistics from AMD Graphics cards using rocm-smi binary -# [[inputs.amd_rocm_smi]] -# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath -# # bin_path = "/opt/rocm/bin/rocm-smi" -# -# ## Optional: timeout for GPU polling -# # timeout = "5s" # # Read API usage and limits for a Salesforce organisation # [[inputs.salesforce]] @@ -6186,6 +6365,13 @@ # ## values are "socket", "target", "device", "mount", "automount", "swap", # ## "timer", "path", "slice" and "scope ": # # unittype = "service" +# # +# ## Filter for a specific pattern, default is "" (i.e. all), other possible +# ## values are valid pattern for systemctl, e.g. "a*" for all units with +# ## names starting with "a" +# # pattern = "" +# ## pattern = "telegraf* influxdb*" +# ## pattern = "a*" # # Reads metrics from a Teamspeak 3 Server via ServerQuery @@ -6293,6 +6479,219 @@ # ## General connection timeout # # timeout = "5s" +# # Input plugin to collect Windows Event Log messages +# [[inputs.win_eventlog]] +# ## Telegraf should have Administrator permissions to subscribe for some Windows Events channels +# ## (System log, for example) +# +# ## LCID (Locale ID) for event rendering +# ## 1033 to force English language +# ## 0 to use default Windows locale +# # locale = 0 +# +# ## Name of eventlog, used only if xpath_query is empty +# ## Example: "Application" +# # eventlog_name = "" +# +# ## xpath_query can be in defined short form like "Event/System[EventID=999]" +# ## or you can form a XML Query. Refer to the Consuming Events article: +# ## https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events +# ## XML query is the recommended form, because it is most flexible +# ## You can create or debug XML Query by creating Custom View in Windows Event Viewer +# ## and then copying resulting XML here +# xpath_query = ''' +# +# +# +# *[System[( (EventID >= 5152 and EventID <= 5158) or EventID=5379 or EventID=4672)]] +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# ''' +# +# ## System field names: +# ## "Source", "EventID", "Version", "Level", "Task", "Opcode", "Keywords", "TimeCreated", +# ## "EventRecordID", "ActivityID", "RelatedActivityID", "ProcessID", "ThreadID", "ProcessName", +# ## "Channel", "Computer", "UserID", "UserName", "Message", "LevelText", "TaskText", "OpcodeText" +# +# ## In addition to System, Data fields can be unrolled from additional XML nodes in event. +# ## Human-readable representation of those nodes is formatted into event Message field, +# ## but XML is more machine-parsable +# +# # Process UserData XML to fields, if this node exists in Event XML +# process_userdata = true +# +# # Process EventData XML to fields, if this node exists in Event XML +# process_eventdata = true +# +# ## Separator character to use for unrolled XML Data field names +# separator = "_" +# +# ## Get only first line of Message field. For most events first line is usually more than enough +# only_first_line_of_message = true +# +# ## Parse timestamp from TimeCreated.SystemTime event field. +# ## Will default to current time of telegraf processing on parsing error or if set to false +# timestamp_from_event = true +# +# ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") +# event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] +# +# ## Default list of fields to send. All fields are sent by default. Globbing supported +# event_fields = ["*"] +# +# ## Fields to exclude. Also applied to data fields. Globbing supported +# exclude_fields = ["TimeCreated", "Binary", "Data_Address*"] +# +# ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported +# exclude_empty = ["*ActivityID", "UserID"] + + +# # Input plugin to counterPath Performance Counters on Windows operating systems +# [[inputs.win_perf_counters]] +# ## By default this plugin returns basic CPU and Disk statistics. +# ## See the README file for more examples. +# ## Uncomment examples below or write your own as you see fit. If the system +# ## being polled for data does not have the Object at startup of the Telegraf +# ## agent, it will not be gathered. +# ## Settings: +# # PrintValid = false # Print All matching performance counters +# # Whether request a timestamp along with the PerfCounter data or just use current time +# # UsePerfCounterTime=true +# # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded +# # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. +# # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. +# #UseWildcardsExpansion = false +# # Period after which counters will be reread from configuration and wildcards in counter paths expanded +# CountersRefreshInterval="1m" +# +# [[inputs.win_perf_counters.object]] +# # Processor usage, alternative to native, reports on a per core. +# ObjectName = "Processor" +# Instances = ["*"] +# Counters = [ +# "% Idle Time", +# "% Interrupt Time", +# "% Privileged Time", +# "% User Time", +# "% Processor Time", +# "% DPC Time", +# ] +# Measurement = "win_cpu" +# # Set to true to include _Total instance when querying for all (*). +# # IncludeTotal=false +# # Print out when the performance counter is missing from object, counter or instance. +# # WarnOnMissing = false +# +# [[inputs.win_perf_counters.object]] +# # Disk times and queues +# ObjectName = "LogicalDisk" +# Instances = ["*"] +# Counters = [ +# "% Idle Time", +# "% Disk Time", +# "% Disk Read Time", +# "% Disk Write Time", +# "% User Time", +# "% Free Space", +# "Current Disk Queue Length", +# "Free Megabytes", +# ] +# Measurement = "win_disk" +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "PhysicalDisk" +# Instances = ["*"] +# Counters = [ +# "Disk Read Bytes/sec", +# "Disk Write Bytes/sec", +# "Current Disk Queue Length", +# "Disk Reads/sec", +# "Disk Writes/sec", +# "% Disk Time", +# "% Disk Read Time", +# "% Disk Write Time", +# ] +# Measurement = "win_diskio" +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "Network Interface" +# Instances = ["*"] +# Counters = [ +# "Bytes Received/sec", +# "Bytes Sent/sec", +# "Packets Received/sec", +# "Packets Sent/sec", +# "Packets Received Discarded", +# "Packets Outbound Discarded", +# "Packets Received Errors", +# "Packets Outbound Errors", +# ] +# Measurement = "win_net" +# +# +# [[inputs.win_perf_counters.object]] +# ObjectName = "System" +# Counters = [ +# "Context Switches/sec", +# "System Calls/sec", +# "Processor Queue Length", +# "System Up Time", +# ] +# Instances = ["------"] +# Measurement = "win_system" +# +# [[inputs.win_perf_counters.object]] +# # Example counterPath where the Instance portion must be removed to get data back, +# # such as from the Memory object. +# ObjectName = "Memory" +# Counters = [ +# "Available Bytes", +# "Cache Faults/sec", +# "Demand Zero Faults/sec", +# "Page Faults/sec", +# "Pages/sec", +# "Transition Faults/sec", +# "Pool Nonpaged Bytes", +# "Pool Paged Bytes", +# "Standby Cache Reserve Bytes", +# "Standby Cache Normal Priority Bytes", +# "Standby Cache Core Bytes", +# ] +# Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. +# Measurement = "win_mem" +# +# [[inputs.win_perf_counters.object]] +# # Example query where the Instance portion must be removed to get data back, +# # such as from the Paging File object. +# ObjectName = "Paging File" +# Counters = [ +# "% Usage", +# ] +# Instances = ["_Total"] +# Measurement = "win_swap" + + +# # Input plugin to report Windows services info. +# [[inputs.win_services]] +# ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. +# service_names = [ +# "LanmanServer", +# "TermService", +# "Win*", +# ] + # # A plugin to collect stats from Varnish HTTP Cache # [[inputs.varnish]] @@ -7138,7 +7537,14 @@ # service_address = ":8080" # # ## Path to listen to. -# # path = "/telegraf" +# ## This option is deprecated and only available for backward-compatibility. Please use paths instead. +# # path = "" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## Save path as http_listener_v2_path tag if set to true +# # path_tag = false # # ## HTTP methods to accept. # # methods = ["POST", "PUT"] @@ -7149,7 +7555,7 @@ # # write_timeout = "10s" # # ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) # # max_body_size = "500MB" # # ## Part of the request to consume. Available options are "body" and @@ -7450,16 +7856,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # # access_key = "" # # secret_key = "" # # token = "" # # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" # # profile = "" # # shared_credential_file = "" # @@ -7723,16 +8132,28 @@ # # Receive OpenTelemetry traces, metrics, and logs over gRPC # [[inputs.opentelemetry]] -# ## Override the OpenTelemetry gRPC service address:port +# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service +# ## address:port # # service_address = "0.0.0.0:4317" # -# ## Override the default request timeout +# ## Override the default (5s) new connection timeout # # timeout = "5s" # -# ## Select a schema for metrics: prometheus-v1 or prometheus-v2 +# ## Override the default (prometheus-v1) metrics schema. +# ## Supports: "prometheus-v1", "prometheus-v2" # ## For more information about the alternatives, read the Prometheus input # ## plugin notes. # # metrics_schema = "prometheus-v1" +# +# ## Optional TLS Config. +# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md +# ## +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Add service certificate and key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # # Read metrics from one or many pgbouncer servers @@ -7909,6 +8330,19 @@ # # eg. To scrape pods on a specific node # # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" # +# ## Scrape Services available in Consul Catalog +# # [inputs.prometheus.consul] +# # enabled = true +# # agent = "http://localhost:8500" +# # query_interval = "5m" +# +# # [[inputs.prometheus.consul.query]] +# # name = "a service name" +# # tag = "a service tag" +# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' +# # [inputs.prometheus.consul.query.tags] +# # host = "{{.Node}}" +# # ## Use bearer token for authorization. ('bearer_token' takes priority) # # bearer_token = "/path/to/bearer/token" # ## OR @@ -7989,6 +8423,10 @@ # ## 1024. See README.md for details # ## # # service_address = "udp://:162" +# ## +# ## Path to mib files +# # path = ["/usr/share/snmp/mibs"] +# ## # ## Timeout running snmptranslate command # # timeout = "5s" # ## Snmp version, defaults to 2c @@ -8082,6 +8520,7 @@ # dsn = "username:password@mysqlserver:3307/dbname?param=value" # # ## Timeout for any operation +# ## Note that the timeout for queries is per query not per gather. # # timeout = "5s" # # ## Connection time limits @@ -8282,9 +8721,9 @@ # #max_ttl = "1000h" -# # Suricata stats plugin +# # Suricata stats and alerts plugin # [[inputs.suricata]] -# ## Data sink for Suricata stats log +# ## Data sink for Suricata stats and alerts logs # # This is expected to be a filename of a # # unix socket to be created for listening. # source = "/var/run/suricata-stats.sock" @@ -8292,6 +8731,9 @@ # # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" # # becomes "detect_alert" when delimiter is "_". # delimiter = "_" +# +# ## Detect alert logs +# # alerts = false # # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 @@ -8336,6 +8778,11 @@ # ## By default best effort parsing is off. # # best_effort = false # +# ## The RFC standard to use for message parsing +# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) +# ## Must be one of "RFC5424", or "RFC3164". +# # syslog_standard = "RFC5424" +# # ## Character to prepend to SD-PARAMs (default = "_"). # ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. # ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] From 7c527f5a3facdb422c9605a254754780f307aad4 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Thu, 2 Sep 2021 16:15:04 -0600 Subject: [PATCH 594/761] disable scripts/check-file-changes.sh --- scripts/check-file-changes.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/check-file-changes.sh b/scripts/check-file-changes.sh index fa141afc4a23e..ad5848275216e 100755 --- a/scripts/check-file-changes.sh +++ b/scripts/check-file-changes.sh @@ -1,6 +1,8 @@ #!/bin/bash # To prevent the tests/builds to run for only a doc change, this script checks what files have changed in a pull request. +exit 0 + BRANCH="$(git rev-parse --abbrev-ref HEAD)" echo $BRANCH if [[ "$BRANCH" != "master" ]] && [[ "$BRANCH" != release* ]]; then # This should never skip for master and release branches From a1962dd9f1f8b1de4bc9f31f00b86f4c7bb8ff29 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Thu, 2 Sep 2021 16:17:41 -0600 Subject: [PATCH 595/761] Telegraf v1.20.0-rc0 From 38f38bd7941cc65478ed941cc54ec346d821f3f5 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 2 Sep 2021 14:24:26 -0700 Subject: [PATCH 596/761] docs: fix influxdb output readme (#9708) (cherry picked from commit 890508431c2692db8e5389461dc88c7575ea0873) --- plugins/outputs/influxdb/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index cd1b36a723aeb..36fde827e176a 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -85,8 +85,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser ``` ### Metrics - Reference the [influx serializer][] for details about metric production. - + [InfluxDB v1.x]: https://github.com/influxdata/influxdb [influx serializer]: /plugins/serializers/influx/README.md#Metrics From c01d9d692626081cbd49d4c2799c7fa85336b49e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20Bia=C5=82o=C5=84czyk?= Date: Tue, 7 Sep 2021 19:41:07 +0200 Subject: [PATCH 597/761] fix (inputs.ping): change edit to restart in README.md (#9728) (cherry picked from commit 0317d7c3db0d6f8da5f041dc1a4f83fa772bdb41) --- plugins/inputs/ping/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 5829d6bd07283..10744a9b15e99 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -100,7 +100,7 @@ LimitNOFILE=8192 Restart Telegraf: ```sh -$ systemctl edit telegraf +$ systemctl restart telegraf ``` #### Linux Permissions From b2dc1b3a4bd756d846fc8cfd6e3714062edf3ebb Mon Sep 17 00:00:00 2001 From: alrex Date: Tue, 7 Sep 2021 14:15:13 -0700 Subject: [PATCH 598/761] fix: outputs.opentelemetry use attributes setting (#9588) (cherry picked from commit 147e3d13891070015812029e7523a76be0a4c113) --- plugins/outputs/opentelemetry/opentelemetry.go | 8 ++++++++ plugins/outputs/opentelemetry/opentelemetry_test.go | 2 ++ 2 files changed, 10 insertions(+) diff --git a/plugins/outputs/opentelemetry/opentelemetry.go b/plugins/outputs/opentelemetry/opentelemetry.go index a25fe2ff8dae8..874eaba819418 100644 --- a/plugins/outputs/opentelemetry/opentelemetry.go +++ b/plugins/outputs/opentelemetry/opentelemetry.go @@ -160,6 +160,14 @@ func (o *OpenTelemetry) Write(metrics []telegraf.Metric) error { return nil } + if len(o.Attributes) > 0 { + for i := 0; i < md.ResourceMetrics().Len(); i++ { + for k, v := range o.Attributes { + md.ResourceMetrics().At(i).Resource().Attributes().UpsertString(k, v) + } + } + } + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.Timeout)) if len(o.Headers) > 0 { diff --git a/plugins/outputs/opentelemetry/opentelemetry_test.go b/plugins/outputs/opentelemetry/opentelemetry_test.go index cfafcd47fc10a..4ba3adbbb07d0 100644 --- a/plugins/outputs/opentelemetry/opentelemetry_test.go +++ b/plugins/outputs/opentelemetry/opentelemetry_test.go @@ -26,6 +26,7 @@ func TestOpenTelemetry(t *testing.T) { { rm := expect.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().InsertString("host.name", "potato") + rm.Resource().Attributes().InsertString("attr-key", "attr-val") ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() ilm.InstrumentationLibrary().SetName("My Library Name") m := ilm.Metrics().AppendEmpty() @@ -45,6 +46,7 @@ func TestOpenTelemetry(t *testing.T) { ServiceAddress: m.Address(), Timeout: config.Duration(time.Second), Headers: map[string]string{"test": "header1"}, + Attributes: map[string]string{"attr-key": "attr-val"}, metricsConverter: metricsConverter, grpcClientConn: m.GrpcClient(), metricsServiceClient: otlpgrpc.NewMetricsClient(m.GrpcClient()), From 208b8de0a7c1e6692f2ed292a43dc65fb1d6ba6c Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Tue, 7 Sep 2021 14:25:30 -0700 Subject: [PATCH 599/761] Fix memory leak in couchbase input (#9544) (cherry picked from commit ba1484cb75c9d0d820d843aab917bb9d349a84b0) --- plugins/inputs/couchbase/couchbase.go | 36 ++++++++++---------- plugins/inputs/couchbase/couchbase_test.go | 38 +++++++++------------- 2 files changed, 33 insertions(+), 41 deletions(-) diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index b62a7e970305d..7b99c76e6982c 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -53,7 +53,7 @@ func (cb *Couchbase) Description() string { // Returns one of the errors encountered while gathering stats (if any). func (cb *Couchbase) Gather(acc telegraf.Accumulator) error { if len(cb.Servers) == 0 { - return cb.gatherServer(acc, "http://localhost:8091/", nil) + return cb.gatherServer(acc, "http://localhost:8091/") } var wg sync.WaitGroup @@ -61,7 +61,7 @@ func (cb *Couchbase) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(serv string) { defer wg.Done() - acc.AddError(cb.gatherServer(acc, serv, nil)) + acc.AddError(cb.gatherServer(acc, serv)) }(serv) } @@ -70,26 +70,26 @@ func (cb *Couchbase) Gather(acc telegraf.Accumulator) error { return nil } -func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string, pool *couchbaseClient.Pool) error { - if pool == nil { - client, err := couchbaseClient.Connect(addr) - if err != nil { - return err - } +func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string) error { + escapedAddr := regexpURI.ReplaceAllString(addr, "${1}") - // `default` is the only possible pool name. It's a - // placeholder for a possible future Couchbase feature. See - // http://stackoverflow.com/a/16990911/17498. - p, err := client.GetPool("default") - if err != nil { - return err - } - pool = &p + client, err := couchbaseClient.Connect(addr) + if err != nil { + return err + } + + // `default` is the only possible pool name. It's a + // placeholder for a possible future Couchbase feature. See + // http://stackoverflow.com/a/16990911/17498. + pool, err := client.GetPool("default") + if err != nil { + return err } + defer pool.Close() for i := 0; i < len(pool.Nodes); i++ { node := pool.Nodes[i] - tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "hostname": node.Hostname} + tags := map[string]string{"cluster": escapedAddr, "hostname": node.Hostname} fields := make(map[string]interface{}) fields["memory_free"] = node.MemoryFree fields["memory_total"] = node.MemoryTotal @@ -97,7 +97,7 @@ func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string, pool *c } for bucketName := range pool.BucketMap { - tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "bucket": bucketName} + tags := map[string]string{"cluster": escapedAddr, "bucket": bucketName} bs := pool.BucketMap[bucketName].BasicStats fields := make(map[string]interface{}) cb.addBucketField(fields, "quota_percent_used", bs["quotaPercentUsed"]) diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go index d8f6aa3ac3ad1..a739732458a51 100644 --- a/plugins/inputs/couchbase/couchbase_test.go +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -8,42 +8,31 @@ import ( "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" - - "github.com/couchbase/go-couchbase" ) func TestGatherServer(t *testing.T) { bucket := "blastro-df" fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" { + if r.URL.Path == "/pools" { + _, _ = w.Write([]byte(poolsResponse)) + } else if r.URL.Path == "/pools/default" { + _, _ = w.Write([]byte(poolsDefaultResponse)) + } else if r.URL.Path == "/pools/default/buckets" { + _, _ = w.Write([]byte(bucketsResponse)) + } else if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" { _, _ = w.Write([]byte(bucketStatsResponse)) } else { w.WriteHeader(http.StatusNotFound) } })) - var pool couchbase.Pool - var err error - if err := json.Unmarshal([]byte(poolsDefaultResponse), &pool); err != nil { - t.Fatal("parse poolsDefaultResponse", err) - } - - if err := json.Unmarshal([]byte(bucketResponse), &pool.BucketMap); err != nil { - t.Fatal("parse bucketResponse", err) - } - - bucketStats := &BucketStats{} - if err := json.Unmarshal([]byte(bucketStatsResponse), bucketStats); err != nil { - t.Fatal("parse bucketResponse", err) - } - var cb Couchbase cb.BucketStatsIncluded = []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"} - err = cb.Init() + err := cb.Init() require.NoError(t, err) var acc testutil.Accumulator - err = cb.gatherServer(&acc, fakeServer.URL, &pool) + err = cb.gatherServer(&acc, fakeServer.URL) require.NoError(t, err) acc.AssertContainsTaggedFields(t, "couchbase_node", map[string]interface{}{"memory_free": 23181365248.0, "memory_total": 64424656896.0}, @@ -137,11 +126,14 @@ func TestGatherDetailedBucketMetrics(t *testing.T) { } } +// From `/pools` +const poolsResponse string = `{"pools":[{"name":"default","uri":"/pools/default"}]}` + // From `/pools/default` on a real cluster -const poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":450972598272,"quotaTotal":360777252864,"quotaUsed":360777252864,"used":446826622976,"usedByData":255061495696,"quotaUsedPerNode":51539607552,"quotaTotalPerNode":51539607552},"hdd":{"total":1108766539776,"quotaTotal":1108766539776,"used":559135126484,"usedByData":515767865143,"free":498944942902}},"serverGroupsUri":"/pools/default/serverGroups?v=98656394","name":"default","alerts":["Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.","Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.","Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."],"alertsSilenceURL":"/controller/resetAlerts?token=2814&uuid=2bec87861652b990cf6aa5c7ee58c253","nodes":[{"systemStats":{"cpu_utilization_rate":35.43307086614173,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23181365248},"interestingStats":{"cmd_get":17.98201798201798,"couch_docs_actual_disk_size":68506048063,"couch_docs_data_size":38718796110,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140158886,"curr_items_tot":279374646,"ep_bg_fetched":0.999000999000999,"get_hits":10.98901098901099,"mem_used":36497390640,"ops":829.1708291708292,"vb_replica_curr_items":139215760},"uptime":"341236","memoryTotal":64424656896,"memoryFree":23181365248,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.187:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.38255033557047,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23665811456},"interestingStats":{"cmd_get":172.8271728271728,"couch_docs_actual_disk_size":79360565405,"couch_docs_data_size":38736382876,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140174377,"curr_items_tot":279383025,"ep_bg_fetched":0.999000999000999,"get_hits":167.8321678321678,"mem_used":36650059656,"ops":1685.314685314685,"vb_replica_curr_items":139208648},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23665811456,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.65:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":25.5586592178771,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23726600192},"interestingStats":{"cmd_get":63.06306306306306,"couch_docs_actual_disk_size":79345105217,"couch_docs_data_size":38728086130,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139195268,"curr_items_tot":279349113,"ep_bg_fetched":0,"get_hits":53.05305305305306,"mem_used":36476665576,"ops":1878.878878878879,"vb_replica_curr_items":140153845},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23726600192,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.105:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":26.45803698435277,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23854841856},"interestingStats":{"cmd_get":51.05105105105105,"couch_docs_actual_disk_size":74465931949,"couch_docs_data_size":38723830730,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139209869,"curr_items_tot":279380019,"ep_bg_fetched":0,"get_hits":47.04704704704704,"mem_used":36471784896,"ops":1831.831831831832,"vb_replica_curr_items":140170150},"uptime":"340526","memoryTotal":64424656896,"memoryFree":23854841856,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.173:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.31034482758621,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23773573120},"interestingStats":{"cmd_get":77.07707707707708,"couch_docs_actual_disk_size":74743093945,"couch_docs_data_size":38594660087,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139215932,"curr_items_tot":278427644,"ep_bg_fetched":0,"get_hits":53.05305305305305,"mem_used":36306500344,"ops":1981.981981981982,"vb_replica_curr_items":139211712},"uptime":"340495","memoryTotal":64424656896,"memoryFree":23773573120,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.15.120:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":17.60660247592847,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23662190592},"interestingStats":{"cmd_get":146.8531468531468,"couch_docs_actual_disk_size":72932847344,"couch_docs_data_size":38581771457,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139226879,"curr_items_tot":278436540,"ep_bg_fetched":0,"get_hits":144.8551448551448,"mem_used":36421860496,"ops":1495.504495504495,"vb_replica_curr_items":139209661},"uptime":"337174","memoryTotal":64424656896,"memoryFree":23662190592,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.127:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":21.68831168831169,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":24049729536},"interestingStats":{"cmd_get":11.98801198801199,"couch_docs_actual_disk_size":66414273220,"couch_docs_data_size":38587642702,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139193759,"curr_items_tot":278398926,"ep_bg_fetched":0,"get_hits":9.990009990009991,"mem_used":36237234088,"ops":883.1168831168832,"vb_replica_curr_items":139205167},"uptime":"341228","memoryTotal":64424656896,"memoryFree":24049729536,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.148:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"buckets":{"uri":"/pools/default/buckets?v=74117050&uuid=2bec87861652b990cf6aa5c7ee58c253","terseBucketsBase":"/pools/default/b/","terseStreamingBucketsBase":"/pools/default/bs/"},"remoteClusters":{"uri":"/pools/default/remoteClusters?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/pools/default/remoteClusters?just_validate=1"},"controllers":{"addNode":{"uri":"/controller/addNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"rebalance":{"uri":"/controller/rebalance?uuid=2bec87861652b990cf6aa5c7ee58c253"},"failOver":{"uri":"/controller/failOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"startGracefulFailover":{"uri":"/controller/startGracefulFailover?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reAddNode":{"uri":"/controller/reAddNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reFailOver":{"uri":"/controller/reFailOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"ejectNode":{"uri":"/controller/ejectNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setRecoveryType":{"uri":"/controller/setRecoveryType?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setAutoCompaction":{"uri":"/controller/setAutoCompaction?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setAutoCompaction?just_validate=1"},"clusterLogsCollection":{"startURI":"/controller/startLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253","cancelURI":"/controller/cancelLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253"},"replication":{"createURI":"/controller/createReplication?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/createReplication?just_validate=1"},"setFastWarmup":{"uri":"/controller/setFastWarmup?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setFastWarmup?just_validate=1"}},"rebalanceStatus":"none","rebalanceProgressUri":"/pools/default/rebalanceProgress","stopRebalanceUri":"/controller/stopRebalance?uuid=2bec87861652b990cf6aa5c7ee58c253","nodeStatusesUri":"/nodeStatuses","maxBucketCount":10,"autoCompactionSettings":{"parallelDBAndViewCompaction":false,"databaseFragmentationThreshold":{"percentage":50,"size":"undefined"},"viewFragmentationThreshold":{"percentage":50,"size":"undefined"}},"fastWarmupSettings":{"fastWarmupEnabled":true,"minMemoryThreshold":10,"minItemsThreshold":10},"tasks":{"uri":"/pools/default/tasks?v=97479372"},"visualSettingsUri":"/internalSettings/visual?v=7111573","counters":{"rebalance_success":4,"rebalance_start":6,"rebalance_stop":2}}` +const poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":450972598272,"quotaTotal":360777252864,"quotaUsed":360777252864,"used":446826622976,"usedByData":255061495696,"quotaUsedPerNode":51539607552,"quotaTotalPerNode":51539607552},"hdd":{"total":1108766539776,"quotaTotal":1108766539776,"used":559135126484,"usedByData":515767865143,"free":498944942902}},"serverGroupsUri":"/pools/default/serverGroups","name":"default","alerts":["Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.","Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.","Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."],"alertsSilenceURL":"/controller/resetAlerts","nodes":[{"systemStats":{"cpu_utilization_rate":35.43307086614173,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23181365248},"interestingStats":{"cmd_get":17.98201798201798,"couch_docs_actual_disk_size":68506048063,"couch_docs_data_size":38718796110,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140158886,"curr_items_tot":279374646,"ep_bg_fetched":0.999000999000999,"get_hits":10.98901098901099,"mem_used":36497390640,"ops":829.1708291708292,"vb_replica_curr_items":139215760},"uptime":"341236","memoryTotal":64424656896,"memoryFree":23181365248,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.187:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.38255033557047,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23665811456},"interestingStats":{"cmd_get":172.8271728271728,"couch_docs_actual_disk_size":79360565405,"couch_docs_data_size":38736382876,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140174377,"curr_items_tot":279383025,"ep_bg_fetched":0.999000999000999,"get_hits":167.8321678321678,"mem_used":36650059656,"ops":1685.314685314685,"vb_replica_curr_items":139208648},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23665811456,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.65:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":25.5586592178771,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23726600192},"interestingStats":{"cmd_get":63.06306306306306,"couch_docs_actual_disk_size":79345105217,"couch_docs_data_size":38728086130,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139195268,"curr_items_tot":279349113,"ep_bg_fetched":0,"get_hits":53.05305305305306,"mem_used":36476665576,"ops":1878.878878878879,"vb_replica_curr_items":140153845},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23726600192,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.105:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":26.45803698435277,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23854841856},"interestingStats":{"cmd_get":51.05105105105105,"couch_docs_actual_disk_size":74465931949,"couch_docs_data_size":38723830730,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139209869,"curr_items_tot":279380019,"ep_bg_fetched":0,"get_hits":47.04704704704704,"mem_used":36471784896,"ops":1831.831831831832,"vb_replica_curr_items":140170150},"uptime":"340526","memoryTotal":64424656896,"memoryFree":23854841856,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.173:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.31034482758621,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23773573120},"interestingStats":{"cmd_get":77.07707707707708,"couch_docs_actual_disk_size":74743093945,"couch_docs_data_size":38594660087,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139215932,"curr_items_tot":278427644,"ep_bg_fetched":0,"get_hits":53.05305305305305,"mem_used":36306500344,"ops":1981.981981981982,"vb_replica_curr_items":139211712},"uptime":"340495","memoryTotal":64424656896,"memoryFree":23773573120,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.15.120:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":17.60660247592847,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23662190592},"interestingStats":{"cmd_get":146.8531468531468,"couch_docs_actual_disk_size":72932847344,"couch_docs_data_size":38581771457,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139226879,"curr_items_tot":278436540,"ep_bg_fetched":0,"get_hits":144.8551448551448,"mem_used":36421860496,"ops":1495.504495504495,"vb_replica_curr_items":139209661},"uptime":"337174","memoryTotal":64424656896,"memoryFree":23662190592,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.127:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":21.68831168831169,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":24049729536},"interestingStats":{"cmd_get":11.98801198801199,"couch_docs_actual_disk_size":66414273220,"couch_docs_data_size":38587642702,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139193759,"curr_items_tot":278398926,"ep_bg_fetched":0,"get_hits":9.990009990009991,"mem_used":36237234088,"ops":883.1168831168832,"vb_replica_curr_items":139205167},"uptime":"341228","memoryTotal":64424656896,"memoryFree":24049729536,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.148:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"buckets":{"uri":"/pools/default/buckets","terseBucketsBase":"/pools/default/b/","terseStreamingBucketsBase":"/pools/default/bs/"},"remoteClusters":{"uri":"/pools/default/remoteClusters","validateURI":"/pools/default/remoteClusters?just_validate=1"},"controllers":{"addNode":{"uri":"/controller/addNode"},"rebalance":{"uri":"/controller/rebalance"},"failOver":{"uri":"/controller/failOver"},"startGracefulFailover":{"uri":"/controller/startGracefulFailover"},"reAddNode":{"uri":"/controller/reAddNode"},"reFailOver":{"uri":"/controller/reFailOver"},"ejectNode":{"uri":"/controller/ejectNode"},"setRecoveryType":{"uri":"/controller/setRecoveryType"},"setAutoCompaction":{"uri":"/controller/setAutoCompaction","validateURI":"/controller/setAutoCompaction?just_validate=1"},"clusterLogsCollection":{"startURI":"/controller/startLogsCollection","cancelURI":"/controller/cancelLogsCollection"},"replication":{"createURI":"/controller/createReplication","validateURI":"/controller/createReplication?just_validate=1"},"setFastWarmup":{"uri":"/controller/setFastWarmup","validateURI":"/controller/setFastWarmup?just_validate=1"}},"rebalanceStatus":"none","rebalanceProgressUri":"/pools/default/rebalanceProgress","stopRebalanceUri":"/controller/stopRebalance","nodeStatusesUri":"/nodeStatuses","maxBucketCount":10,"autoCompactionSettings":{"parallelDBAndViewCompaction":false,"databaseFragmentationThreshold":{"percentage":50,"size":"undefined"},"viewFragmentationThreshold":{"percentage":50,"size":"undefined"}},"fastWarmupSettings":{"fastWarmupEnabled":true,"minMemoryThreshold":10,"minItemsThreshold":10},"tasks":{"uri":"/pools/default/tasks"},"visualSettingsUri":"/internalSettings/visual","counters":{"rebalance_success":4,"rebalance_start":6,"rebalance_stop":2}}` -// From `/pools/default/buckets/blastro-df` on a real cluster -const bucketResponse string = `{"blastro-df": {"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}}` +// From `/pools/default/buckets` on a real cluster +const bucketsResponse string = `[{"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}]` const bucketStatsResponse string = `{"op":{"samples":{"couch_total_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341],"couch_docs_fragmentation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_fragmentation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"hit_ratio":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_cache_miss_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_resident_items_rate":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_avg_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_replica_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_pending_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"avg_disk_update_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_disk_commit_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_bg_wait_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_active_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_replica_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bytes_read":[118.1818181818182,142.2805247225025,180.8080808080808,197.7800201816347,141.9939577039275,118.5410334346505,142.4242424242424,148.4848484848485,197.3816717019134,202.4291497975709,118.0625630676085,142.4242424242424,179.6165489404642,197.979797979798,142.4242424242424,118.1818181818182,142.2805247225025,148.4848484848485,197.979797979798,201.816347124117,118.1818181818182,142.4242424242424,148.4848484848485,197.7800201816347,142.4242424242424,118.1818181818182,142.2805247225025,179.7979797979798,197.1830985915493,202.6342451874367,118.1818181818182,142.2805247225025,180.4435483870968,198.3805668016194,142.2805247225025,118.1818181818182,142.2805247225025,148.4848484848485,197.979797979798,202.020202020202,118.0625630676085,118.1818181818182,204.040404040404,197.7800201816347,142.1370967741935,118.4210526315789,118.1818181818182,172.5529767911201,197.5806451612903,202.4291497975709,118.0625630676085,118.1818181818182,172.7272727272727,197.7800201816347,142.4242424242424,118.0625630676085,118.1818181818182,204.040404040404,197.979797979798,201.816347124117],"bytes_written":[36420.20202020202,37762.86579212916,37225.25252525252,50460.14127144299,37686.80765357502,36530.90172239109,37801.0101010101,37111.11111111111,50358.50956696878,60511.13360323886,36383.45105953582,37801.0101010101,37393.54187689203,50511.11111111111,37801.0101010101,36420.20202020202,37762.86579212916,37111.11111111111,50511.11111111111,60327.95156407669,36420.20202020202,37801.0101010101,37111.11111111111,50460.14127144299,37801.0101010101,36420.20202020202,37762.86579212916,37431.31313131313,50307.84708249497,60572.44174265451,36420.20202020202,37762.86579212916,37150.20161290323,50613.36032388664,37762.86579212916,36420.20202020202,37762.86579212916,37111.11111111111,50511.11111111111,60388.88888888889,36383.45105953582,36420.20202020202,38812.12121212122,50460.14127144299,37724.79838709677,36493.92712550607,36420.20202020202,38453.07769929364,50409.27419354839,60511.13360323886,36383.45105953582,36420.20202020202,38491.91919191919,50460.14127144299,37801.0101010101,36383.45105953582,36420.20202020202,38812.12121212122,50511.11111111111,60327.95156407669],"cas_badval":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_get":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_lookup":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_set":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_docs_actual_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341],"couch_docs_data_size":[531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373],"couch_docs_disk_size":[531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373],"couch_spatial_data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_actual_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"curr_connections":[14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14],"curr_items":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"curr_items_tot":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"decr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"decr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_write_queue":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_bg_fetched":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_clock_cas_drift_threshold_exceeded":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_read_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_write_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_flusher_todo":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_item_commit_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_kv_size":[10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340],"ep_max_size":[8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032],"ep_mem_high_wat":[7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627],"ep_mem_low_wat":[6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024],"ep_meta_data_memory":[68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68],"ep_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_get_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_value_ejects":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_overhead":[403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824],"ep_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_tmp_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_vb_total":[64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64],"evictions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"get_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"get_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"mem_used":[4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016],"misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"timestamp":[1615918120012,1615918121003,1615918121993,1615918122984,1615918123977,1615918124964,1615918125954,1615918126944,1615918127937,1615918128925,1615918129916,1615918130906,1615918131897,1615918132887,1615918133877,1615918134867,1615918135858,1615918136848,1615918137838,1615918138829,1615918139819,1615918140809,1615918141799,1615918142790,1615918143780,1615918144770,1615918145761,1615918146751,1615918147745,1615918148732,1615918149722,1615918150713,1615918151705,1615918152693,1615918153684,1615918154674,1615918155665,1615918156655,1615918157645,1615918158635,1615918159626,1615918160616,1615918161606,1615918162597,1615918163589,1615918164577,1615918165567,1615918166558,1615918167550,1615918168538,1615918169529,1615918170519,1615918171509,1615918172500,1615918173490,1615918174481,1615918175471,1615918176461,1615918177451,1615918178442],"vb_active_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_itm_memory":[88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88],"vb_active_meta_data_memory":[68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68],"vb_active_num":[64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64],"vb_active_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_aborted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_accepted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_committed_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_curr_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_itm_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_meta_data_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_curr_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_itm_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_meta_data_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_num":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"xdc_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"allocstall":[18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615],"cpu_cores_available":[12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12],"cpu_irq_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_stolen_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_sys_rate":[4.942965779467681,5.243268776570619,6.823027718550106,4.815073272854153,4.853128991060026,5.068836045056321,4.983108108108108,4.110738255033557,3.201347935973041,3.959561920808762,3.610411418975651,3.459915611814346,3.691275167785235,4.553119730185498,6.470588235294118,4.545454545454546,5.046257359125315,5.976430976430977,5.564924114671164,3.703703703703704,3.529411764705882,3.544303797468354,3.826787512588117,5.118961788031723,7.166947723440135,5.87248322147651,4.289318755256518,5.485232067510548,4.765886287625418,4.672897196261682,4.184100418410042,4.560810810810811,7.02928870292887,6.081081081081081,5.378151260504202,6.239460370994941,8.984047019311502,6.896551724137931,9.636517328825022,9.335576114381833,7.64063811922754,8.684654300168635,6.543624161073826,6.465155331654072,5.961376994122586,3.807106598984772,3.36417157275021,3.700588730025231,3.775167785234899,9.45945945945946,3.114478114478115,3.451178451178451,4.465037910699242,3.852596314907873,3.462837837837838,5.205709487825357,5.218855218855219,6.532663316582915,5.885057471264368,4.030226700251889],"cpu_user_rate":[15.20912547528517,9.58904109589041,10.76759061833689,8.443824145150035,8.301404853128991,10.95118898623279,9.797297297297296,6.879194630872483,6.823925863521483,6.908171861836562,6.54911838790932,6.835443037974684,7.382550335570469,10.28667790893761,16.97478991596639,11.53198653198653,9.75609756097561,11.11111111111111,12.05733558178752,7.154882154882155,6.890756302521009,6.666666666666667,7.150050352467271,10.23792357606345,12.7318718381113,9.479865771812081,7.905803195962994,8.016877637130802,9.19732441471572,9.600679694137638,7.364016736401673,8.108108108108109,15.31380753138075,13.85135135135135,10.58823529411765,12.64755480607083,18.47187237615449,13.28847771236333,19.8647506339814,21.86711522287637,23.5936188077246,22.17537942664418,12.08053691275168,16.96053736356003,32.49370277078086,8.20642978003384,10.17661900756939,7.653490328006728,10.82214765100671,14.27364864864865,6.986531986531986,7.407407407407407,10.02527379949452,11.55778894472362,8.192567567567568,12.34256926952141,14.05723905723906,28.64321608040201,13.14942528735632,7.388748950461797],"cpu_utilization_rate":[20.15209125475285,14.83230987246103,17.59061833688699,13.25889741800419,13.15453384418902,16.02002503128911,14.78040540540541,10.98993288590604,10.02527379949452,10.86773378264532,10.15952980688497,10.29535864978903,11.0738255033557,14.8397976391231,23.4453781512605,16.07744107744108,14.80235492010092,17.08754208754209,17.62225969645868,10.85858585858586,10.42016806722689,10.21097046413502,10.97683786505539,15.35688536409517,19.89881956155143,15.35234899328859,12.19512195121951,13.50210970464135,13.96321070234114,14.27357689039932,11.54811715481171,12.66891891891892,22.34309623430962,19.93243243243243,15.96638655462185,18.88701517706577,27.45591939546599,20.18502943650126,29.50126796280642,31.2026913372582,31.23425692695214,30.86003372681282,18.6241610738255,23.42569269521411,38.45507976490345,12.01353637901861,13.5407905803196,11.35407905803196,14.59731543624161,23.73310810810811,10.1010101010101,10.85858585858586,14.49031171019377,15.41038525963149,11.65540540540541,17.54827875734677,19.27609427609428,35.17587939698493,19.03448275862069,11.41897565071369],"hibernated_requests":[7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7],"hibernated_waked":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"mem_actual_free":[7004864512,6998364160,7056683008,7055605760,7059243008,7078457344,7079067648,7079514112,7078977536,7088099328,7091081216,7091773440,7091589120,7080108032,7073554432,7073914880,7080144896,7065124864,7063183360,7072677888,7073767424,7073542144,7073542144,7074902016,7053836288,7050895360,7055720448,7056822272,7057281024,7053025280,7052763136,7051984896,7049113600,7040618496,7045636096,7050907648,7021027328,7001329664,6985895936,6985895936,6955642880,7059750912,7058616320,7050067968,7047163904,7045873664,7050272768,7068528640,7073677312,7079116800,7078252544,7075880960,7065079808,7066251264,7065726976,7063486464,7064797184,7066206208,7068819456,7071809536],"mem_actual_used":[10175004672,10181505024,10123186176,10124263424,10120626176,10101411840,10100801536,10100355072,10100891648,10091769856,10088787968,10088095744,10088280064,10099761152,10106314752,10105954304,10099724288,10114744320,10116685824,10107191296,10106101760,10106327040,10106327040,10104967168,10126032896,10128973824,10124148736,10123046912,10122588160,10126843904,10127106048,10127884288,10130755584,10139250688,10134233088,10128961536,10158841856,10178539520,10193973248,10193973248,10224226304,10120118272,10121252864,10129801216,10132705280,10133995520,10129596416,10111340544,10106191872,10100752384,10101616640,10103988224,10114789376,10113617920,10114142208,10116382720,10115072000,10113662976,10111049728,10108059648],"mem_free":[7004864512,6998364160,7056683008,7055605760,7059243008,7078457344,7079067648,7079514112,7078977536,7088099328,7091081216,7091773440,7091589120,7080108032,7073554432,7073914880,7080144896,7065124864,7063183360,7072677888,7073767424,7073542144,7073542144,7074902016,7053836288,7050895360,7055720448,7056822272,7057281024,7053025280,7052763136,7051984896,7049113600,7040618496,7045636096,7050907648,7021027328,7001329664,6985895936,6985895936,6955642880,7059750912,7058616320,7050067968,7047163904,7045873664,7050272768,7068528640,7073677312,7079116800,7078252544,7075880960,7065079808,7066251264,7065726976,7063486464,7064797184,7066206208,7068819456,7071809536],"mem_limit":[17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184],"mem_total":[17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184],"mem_used_sys":[16694517760,16707862528,16608030720,16610041856,16604663808,16553811968,16553463808,16553369600,16553861120,16539238400,16536092672,16535760896,16535707648,16553418752,16559439872,16558895104,16554569728,16580468736,16582680576,16565084160,16564649984,16565272576,16565272576,16562823168,16599863296,16602157056,16597528576,16596774912,16595107840,16593002496,16593485824,16596668416,16598691840,16607469568,16599904256,16590753792,16644947968,16684613632,16714768384,16714768384,16781234176,16573353984,16575979520,16593072128,16603037696,16605077504,16599199744,16581554176,16570187776,16560140288,16561221632,16565153792,16577990656,16577200128,16578031616,16582909952,16569671680,16565702656,16560218112,16554315776],"odp_report_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"rest_requests":[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,8,2,2,2,2,2,2,2,2,3,2,2,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,2,2,2,2,2],"swap_total":[1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824],"swap_used":[122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392]},"samplesCount":60,"isPersistent":true,"lastTStamp":1615918178442,"interval":1000},"hot_keys":[{"name":"first-duck","ops":6.003482019571351e-05}]}` const bucketStatsResponseWithMissing string = `{"op":{"samples":{"couch_total_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341]},"samplesCount":60,"isPersistent":true,"lastTStamp":1615918178442,"interval":1000},"hot_keys":[{"name":"first-duck","ops":6.003482019571351e-05}]}` From 0083cc5ea8799299baa7f1cbc24f5df0fe6d248b Mon Sep 17 00:00:00 2001 From: Daniel Dyla Date: Wed, 8 Sep 2021 14:31:42 -0400 Subject: [PATCH 600/761] feat(dynatrace-output): remove special handling from counters (#9675) Co-authored-by: Armin Ruech (cherry picked from commit 95ef67445668010841a6ed70140fded0b472cd94) --- go.mod | 2 +- go.sum | 4 +- plugins/outputs/dynatrace/README.md | 8 +- plugins/outputs/dynatrace/dynatrace.go | 24 +++-- plugins/outputs/dynatrace/dynatrace_test.go | 102 +++++++++----------- 5 files changed, 67 insertions(+), 73 deletions(-) diff --git a/go.mod b/go.mod index 8dd6c8f7a6fc4..c133b72dda361 100644 --- a/go.mod +++ b/go.mod @@ -85,7 +85,7 @@ require ( github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 - github.com/dynatrace-oss/dynatrace-metric-utils-go v0.2.0 + github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0 github.com/eapache/go-resiliency v1.2.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect diff --git a/go.sum b/go.sum index 1d373bad3ce34..01266f3e9cbf7 100644 --- a/go.sum +++ b/go.sum @@ -507,8 +507,8 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dynatrace-oss/dynatrace-metric-utils-go v0.2.0 h1:TEG5Jj7RYM2JBCUH3nLqCmSZy6srnaefvXxjUTCuHyA= -github.com/dynatrace-oss/dynatrace-metric-utils-go v0.2.0/go.mod h1:qw0E9EJ0PnSlhWawDNuqE0zhc1hqOBUCFIAj3dd9DNw= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0 h1:q2Ayh9s6Cr75bS5URiOUAoyFXemgKQaBJphbhAaJHCY= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.3.0/go.mod h1:qw0E9EJ0PnSlhWawDNuqE0zhc1hqOBUCFIAj3dd9DNw= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= diff --git a/plugins/outputs/dynatrace/README.md b/plugins/outputs/dynatrace/README.md index 666f821f6356c..f25b8708942d6 100644 --- a/plugins/outputs/dynatrace/README.md +++ b/plugins/outputs/dynatrace/README.md @@ -2,10 +2,12 @@ This plugin sends Telegraf metrics to [Dynatrace](https://www.dynatrace.com) via the [Dynatrace Metrics API V2](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/metric-v2/). It may be run alongside the Dynatrace OneAgent for automatic authentication or it may be run standalone on a host without a OneAgent by specifying a URL and API Token. More information on the plugin can be found in the [Dynatrace documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/ingestion-methods/telegraf/). +All metrics are reported as gauges, unless they are specified to be delta counters using the `additional_counters` config option (see below). +See the [Dynatrace Metrics ingestion protocol documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol) for details on the types defined there. ## Requirements -You will either need a Dynatrace OneAgent (version 1.201 or higher) installed on the same host as Telegraf; or a Dynatrace environment with version 1.202 or higher. Monotonic counters (e.g. `diskio.reads`, `system.uptime`) require Dynatrace 208 or later. +You will either need a Dynatrace OneAgent (version 1.201 or higher) installed on the same host as Telegraf; or a Dynatrace environment with version 1.202 or higher. - Telegraf minimum version: Telegraf 1.16 @@ -65,7 +67,7 @@ You can learn more about how to use the Dynatrace API [here](https://www.dynatra prefix = "telegraf" ## Flag for skipping the tls certificate check, just for testing purposes, should be false by default insecure_skip_verify = false - ## If you want to convert values represented as gauges to counters, add the metric names here + ## If you want metrics to be treated and reported as delta counters, add the metric names here additional_counters = [ ] ## Optional dimensions to be added to every metric @@ -119,7 +121,7 @@ insecure_skip_verify = false *required*: `false` -If you want to convert values represented as gauges to counters, add the metric names here. +If you want a metric to be treated and reported as a delta counter, add its name to this list. ```toml additional_counters = [ ] diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 470eb0e2cd0c6..11796e8e12994 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -69,7 +69,7 @@ const sampleConfig = ` ## Connection timeout, defaults to "5s" if not set. timeout = "5s" - ## If you want to convert values represented as gauges to counters, add the metric names here + ## If you want metrics to be treated and reported as delta counters, add the metric names here additional_counters = [ ] ## Optional dimensions to be added to every metric @@ -122,16 +122,10 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { dims = append(dims, dimensions.NewDimension(tag.Key, tag.Value)) } - metricType := tm.Type() for _, field := range tm.FieldList() { metricName := tm.Name() + "." + field.Key - for _, i := range d.AddCounterMetrics { - if metricName == i { - metricType = telegraf.Counter - } - } - typeOpt := getTypeOption(metricType, field) + typeOpt := d.getTypeOption(tm, field) if typeOpt == nil { // Unsupported type. Log only once per unsupported metric name @@ -267,15 +261,19 @@ func init() { }) } -func getTypeOption(metricType telegraf.ValueType, field *telegraf.Field) dtMetric.MetricOption { - if metricType == telegraf.Counter { +func (d *Dynatrace) getTypeOption(metric telegraf.Metric, field *telegraf.Field) dtMetric.MetricOption { + metricName := metric.Name() + "." + field.Key + for _, i := range d.AddCounterMetrics { + if metricName != i { + continue + } switch v := field.Value.(type) { case float64: - return dtMetric.WithFloatCounterValueTotal(v) + return dtMetric.WithFloatCounterValueDelta(v) case uint64: - return dtMetric.WithIntCounterValueTotal(int64(v)) + return dtMetric.WithIntCounterValueDelta(int64(v)) case int64: - return dtMetric.WithIntCounterValueTotal(v) + return dtMetric.WithIntCounterValueDelta(v) default: return nil } diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index 65cd3d2a86f0a..c3cb091cbf549 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -2,10 +2,13 @@ package dynatrace import ( "encoding/json" + "fmt" "io/ioutil" "net/http" "net/http/httptest" "regexp" + "sort" + "strings" "testing" "time" @@ -123,26 +126,37 @@ func TestMissingAPIToken(t *testing.T) { } func TestSendMetrics(t *testing.T) { + expected := []string{} + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result bodyBytes, err := ioutil.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield,dt.metrics.source=telegraf gauge,3.14 1289430000000\nmymeasurement.value,dt.metrics.source=telegraf count,3.14 1289430000000" - if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) + + lines := strings.Split(bodyString, "\n") + + sort.Strings(lines) + sort.Strings(expected) + + expectedString := strings.Join(expected, "\n") + foundString := strings.Join(lines, "\n") + if foundString != expectedString { + t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expectedString, foundString) } w.WriteHeader(http.StatusOK) - err = json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + err = json.NewEncoder(w).Encode(fmt.Sprintf(`{"linesOk":%d,"linesInvalid":0,"error":null}`, len(lines))) require.NoError(t, err) })) defer ts.Close() - d := &Dynatrace{} + d := &Dynatrace{ + URL: ts.URL, + APIToken: "123", + Log: testutil.Logger{}, + AddCounterMetrics: []string{}, + } - d.URL = ts.URL - d.APIToken = "123" - d.Log = testutil.Logger{} err := d.Init() require.NoError(t, err) err = d.Connect() @@ -150,22 +164,43 @@ func TestSendMetrics(t *testing.T) { // Init metrics + // Simple metrics are exported as a gauge unless in additional_counters + expected = append(expected, "simple_metric.value,dt.metrics.source=telegraf gauge,3.14 1289430000000") + expected = append(expected, "simple_metric.counter,dt.metrics.source=telegraf count,delta=5 1289430000000") + d.AddCounterMetrics = append(d.AddCounterMetrics, "simple_metric.counter") m1 := metric.New( - "mymeasurement", + "simple_metric", map[string]string{}, - map[string]interface{}{"myfield": float64(3.14)}, + map[string]interface{}{"value": float64(3.14), "counter": 5}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) + // Even if Type() returns counter, all metrics are treated as a gauge unless explicitly added to additional_counters + expected = append(expected, "counter_type.value,dt.metrics.source=telegraf gauge,3.14 1289430000000") + expected = append(expected, "counter_type.counter,dt.metrics.source=telegraf count,delta=5 1289430000000") + d.AddCounterMetrics = append(d.AddCounterMetrics, "counter_type.counter") m2 := metric.New( - "mymeasurement", + "counter_type", map[string]string{}, - map[string]interface{}{"value": float64(3.14)}, + map[string]interface{}{"value": float64(3.14), "counter": 5}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), telegraf.Counter, ) - metrics := []telegraf.Metric{m1, m2} + expected = append(expected, "complex_metric.int,dt.metrics.source=telegraf gauge,1 1289430000000") + expected = append(expected, "complex_metric.int64,dt.metrics.source=telegraf gauge,2 1289430000000") + expected = append(expected, "complex_metric.float,dt.metrics.source=telegraf gauge,3 1289430000000") + expected = append(expected, "complex_metric.float64,dt.metrics.source=telegraf gauge,4 1289430000000") + expected = append(expected, "complex_metric.true,dt.metrics.source=telegraf gauge,1 1289430000000") + expected = append(expected, "complex_metric.false,dt.metrics.source=telegraf gauge,0 1289430000000") + m3 := metric.New( + "complex_metric", + map[string]string{}, + map[string]interface{}{"int": 1, "int64": int64(2), "float": 3.0, "float64": float64(4.0), "true": true, "false": false}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics := []telegraf.Metric{m1, m2, m3} err = d.Write(metrics) require.NoError(t, err) @@ -475,47 +510,6 @@ func TestStaticDimensionsOverrideMetric(t *testing.T) { require.NoError(t, err) } -func TestSendCounterMetricWithoutTags(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) - require.NoError(t, err) - bodyString := string(bodyBytes) - expected := "mymeasurement.value,dt.metrics.source=telegraf gauge,32 1289430000000" - if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) - } - err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) - require.NoError(t, err) - })) - defer ts.Close() - - d := &Dynatrace{} - - d.URL = ts.URL - d.APIToken = "123" - d.Log = testutil.Logger{} - err := d.Init() - require.NoError(t, err) - err = d.Connect() - require.NoError(t, err) - - // Init metrics - - m1 := metric.New( - "mymeasurement", - map[string]string{}, - map[string]interface{}{"value": 32}, - time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), - ) - - metrics := []telegraf.Metric{m1} - - err = d.Write(metrics) - require.NoError(t, err) -} - var warnfCalledTimes int type loggerStub struct { From c87e8555f086734d6eddd3be149c28515620272d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Sep 2021 12:38:59 -0500 Subject: [PATCH 601/761] fix: bump github.com/Azure/go-autorest/autorest/adal from 0.9.10->0.9.15 (cherry picked from commit 317ee71c325385872a475eac9330d336f13a6378) --- docs/LICENSE_OF_DEPENDENCIES.md | 2 +- go.mod | 4 ++-- go.sum | 6 ++++-- plugins/outputs/azure_monitor/README.md | 6 +++--- plugins/outputs/azure_monitor/azure_monitor_test.go | 8 +++++++- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 46f8e5ff32793..c50b0ea3f3f9f 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -85,7 +85,7 @@ following works: - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) - github.com/gogo/googleapis [Apache License 2.0](https://github.com/gogo/googleapis/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) -- github.com/golang-jwt/jwt [MIT License](https://github.com/golang-jwt/jwt/blob/master/LICENSE) +- github.com/golang-jwt/jwt [MIT License](https://github.com/golang-jwt/jwt/blob/main/LICENSE) - github.com/golang-sql/civil [Apache License 2.0](https://github.com/golang-sql/civil/blob/master/LICENSE) - github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE) - github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE) diff --git a/go.mod b/go.mod index c133b72dda361..54d8eea5e8782 100644 --- a/go.mod +++ b/go.mod @@ -19,13 +19,13 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.17 - github.com/Azure/go-autorest/autorest/adal v0.9.10 + github.com/Azure/go-autorest/autorest/adal v0.9.15 github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.2.0 // indirect - github.com/Azure/go-autorest/logger v0.2.0 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/BurntSushi/toml v0.3.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee diff --git a/go.sum b/go.sum index 01266f3e9cbf7..1cc02dab3cf01 100644 --- a/go.sum +++ b/go.sum @@ -98,8 +98,9 @@ github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMl github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.10 h1:r6fZHMaHD8B6LDCn0o5vyBFHIHrM6Ywwx7mb49lPItI= github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= +github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 h1:cgiBtUxatlt/e3qY6fQJioqbocWHr5osz259MomF5M0= github.com/Azure/go-autorest/autorest/azure/auth v0.5.6/go.mod h1:nYlP+G+n8MhD5CjIi6W8nFTIJn/PnTHes5nUbK6BxD0= @@ -121,8 +122,9 @@ github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsI github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= diff --git a/plugins/outputs/azure_monitor/README.md b/plugins/outputs/azure_monitor/README.md index 6f2abb97ec3ed..9d835c1eb6f4b 100644 --- a/plugins/outputs/azure_monitor/README.md +++ b/plugins/outputs/azure_monitor/README.md @@ -40,7 +40,7 @@ written as a dimension on each Azure Monitor metric. ## The Azure Resource ID against which metric will be logged, e.g. ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" # resource_id = "" - + ## Optionally, if in Azure US Government, China, or other sovereign ## cloud environment, set the appropriate REST endpoint for receiving ## metrics. (Note: region may be unused in this context) @@ -76,7 +76,7 @@ preferred authentication methods are different from the *order* in which each authentication is checked. Here are the preferred authentication methods: 1. Managed Service Identity (MSI) token - - This is the prefered authentication method. Telegraf will automatically + - This is the preferred authentication method. Telegraf will automatically authenticate using this method when running on Azure VMs. 2. AAD Application Tokens (Service Principals) - Primarily useful if Telegraf is writing metrics for other resources. @@ -132,7 +132,7 @@ authenticate when running Telegraf on Azure VMs. Azure Monitor only accepts values with a numeric type. The plugin will drop fields with a string type by default. The plugin can set all string type fields as extra dimensions in the Azure Monitor custom metric by setting the -configuration option `strings_as_dimensions` to `true`. +configuration option `strings_as_dimensions` to `true`. Keep in mind, Azure Monitor allows a maximum of 10 dimensions per metric. The plugin will deterministically dropped any dimensions that exceed the 10 diff --git a/plugins/outputs/azure_monitor/azure_monitor_test.go b/plugins/outputs/azure_monitor/azure_monitor_test.go index c702f46b0e0b5..803b0441af207 100644 --- a/plugins/outputs/azure_monitor/azure_monitor_test.go +++ b/plugins/outputs/azure_monitor/azure_monitor_test.go @@ -6,10 +6,12 @@ import ( "encoding/json" "net/http" "net/http/httptest" + "os" "testing" "time" "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -209,7 +211,11 @@ func TestAggregate(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.plugin.Connect() + msiEndpoint, err := adal.GetMSIVMEndpoint() + require.NoError(t, err) + + os.Setenv("MSI_ENDPOINT", msiEndpoint) + err = tt.plugin.Connect() require.NoError(t, err) // Reset globals From 239d43bb36d3443aa2221f84899e41c7709f9366 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Tue, 14 Sep 2021 15:31:44 +0200 Subject: [PATCH 602/761] fix: Update gopcua library to latest version (#9560) (cherry picked from commit 3c27f598bb909adf6d12ba9cbd6a135d263e9615) --- go.mod | 2 +- go.sum | 4 ++-- plugins/inputs/opcua/opcua_client.go | 8 +++++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 54d8eea5e8782..a05a8f5faeeaa 100644 --- a/go.mod +++ b/go.mod @@ -121,7 +121,7 @@ require ( github.com/google/uuid v1.2.0 // indirect github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/googleapis/gnostic v0.4.1 // indirect - github.com/gopcua/opcua v0.1.13 + github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 github.com/gorilla/mux v1.7.3 github.com/gorilla/websocket v1.4.2 github.com/gosnmp/gosnmp v1.32.0 diff --git a/go.sum b/go.sum index 1cc02dab3cf01..b8002a55b2b33 100644 --- a/go.sum +++ b/go.sum @@ -819,8 +819,8 @@ github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTV github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gopcua/opcua v0.1.13 h1:UP746MKRFNbv+CQGfrPwgH7rGxOlSGzVu9ieZdcox4E= -github.com/gopcua/opcua v0.1.13/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= +github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 h1:OtFKr0Kwe1oLpMR+uNMh/DPgC5fxAq4xRe6HBv8LDqQ= +github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index 53454ba8816a7..8dec41eb343e3 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -406,10 +406,10 @@ func Connect(o *OpcUA) error { o.state = Connecting if o.client != nil { - if err := o.client.CloseSession(); err != nil { + if err := o.client.Close(); err != nil { // Only log the error but to not bail-out here as this prevents // reconnections for multiple parties (see e.g. #9523). - o.Log.Errorf("Closing session failed: %v", err) + o.Log.Errorf("Closing connection failed: %v", err) } } @@ -445,8 +445,10 @@ func Connect(o *OpcUA) error { } func (o *OpcUA) setupOptions() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.ConnectTimeout)) + defer cancel() // Get a list of the endpoints for our target server - endpoints, err := opcua.GetEndpoints(o.Endpoint) + endpoints, err := opcua.GetEndpoints(ctx, o.Endpoint) if err != nil { return err } From e1de0042f1df2aa50f51dba70d8f8618d340973c Mon Sep 17 00:00:00 2001 From: Jacob Marble Date: Tue, 14 Sep 2021 11:07:28 -0700 Subject: [PATCH 603/761] chore: update influxdb-observability for OpenTelemetry plugins (#9718) (cherry picked from commit a3454be2d884dce2b1a2eabc019c53a7df2a6ffa) --- go.mod | 10 +++++----- go.sum | 19 ++++++++++--------- .../outputs/opentelemetry/opentelemetry.go | 2 ++ .../opentelemetry/opentelemetry_test.go | 4 ++-- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index a05a8f5faeeaa..1558135cd9a3f 100644 --- a/go.mod +++ b/go.mod @@ -140,9 +140,9 @@ require ( github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/serf v0.9.5 // indirect github.com/influxdata/go-syslog/v3 v3.0.0 - github.com/influxdata/influxdb-observability/common v0.2.4 - github.com/influxdata/influxdb-observability/influx2otel v0.2.4 - github.com/influxdata/influxdb-observability/otel2influx v0.2.4 + github.com/influxdata/influxdb-observability/common v0.2.7 + github.com/influxdata/influxdb-observability/influx2otel v0.2.7 + github.com/influxdata/influxdb-observability/otel2influx v0.2.7 github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 @@ -269,7 +269,7 @@ require ( go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.mongodb.org/mongo-driver v1.5.3 go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c + go.opentelemetry.io/collector/model v0.35.0 go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect @@ -290,7 +290,7 @@ require ( google.golang.org/api v0.54.0 google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20210813162853-db860fec028c - google.golang.org/grpc v1.39.1 + google.golang.org/grpc v1.40.0 google.golang.org/protobuf v1.27.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/djherbis/times.v1 v1.2.0 diff --git a/go.sum b/go.sum index b8002a55b2b33..a0e2e9e2bdd7d 100644 --- a/go.sum +++ b/go.sum @@ -924,12 +924,12 @@ github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmc github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I= github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q= github.com/influxdata/influxdb v1.8.2/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= -github.com/influxdata/influxdb-observability/common v0.2.4 h1:GMycMMB0IMLzStLVgWIRJ4UFP5x5JOfITTRryL1dpgQ= -github.com/influxdata/influxdb-observability/common v0.2.4/go.mod h1:xbwEYfQLQIHnmcLQL8vniaZ1aEHI5D0K5Y6afiV5Wmo= -github.com/influxdata/influxdb-observability/influx2otel v0.2.4 h1:23qw/xv9ke6LIYo0/pNLhiS9bqlrkx2YiU3SNUKLxts= -github.com/influxdata/influxdb-observability/influx2otel v0.2.4/go.mod h1:WnBBHlTEB/orMD3io5TX8EZEnKryNviUbdlLhWwcqo0= -github.com/influxdata/influxdb-observability/otel2influx v0.2.4 h1:wDLEz/JxGXRJdmU9wT7YwslEaU6la27/Qs4f3a9VPhI= -github.com/influxdata/influxdb-observability/otel2influx v0.2.4/go.mod h1:HniEElFGVVs0KgHCjU/iIv6PFFvpicaLKd72PlCqn1o= +github.com/influxdata/influxdb-observability/common v0.2.7 h1:C+oDh8Kbw+Ykx9yog/uJXL27rwMN3hgTLQfAFg1eQO0= +github.com/influxdata/influxdb-observability/common v0.2.7/go.mod h1:+8VMGrfWZnXjc1c/oP+N4O/sHoneWgN3ojAHwgYgV4A= +github.com/influxdata/influxdb-observability/influx2otel v0.2.7 h1:YIXH+qNQgAtTA5U3s/wxDxxh5Vz+ylhZhyuRxtfTBqs= +github.com/influxdata/influxdb-observability/influx2otel v0.2.7/go.mod h1:ASyDMoPChvIgbEOvghwc5NxngOgXThp9MFKs7efNLtQ= +github.com/influxdata/influxdb-observability/otel2influx v0.2.7 h1:FACov3tcGCKfEGXsyUbgUOQx3zXffXaCFbN3ntAzh1E= +github.com/influxdata/influxdb-observability/otel2influx v0.2.7/go.mod h1:tE3OSy4RyAHIjxYlFZBsWorEM3aqaUeqSx3mbacm8KI= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= @@ -1616,8 +1616,8 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c h1:3s2a2cav7u4W1b0cOYxmlj1y1NcVuDZwgUaAQ6wfImo= -go.opentelemetry.io/collector/model v0.0.0-20210723184018-3b7d6ce4830c/go.mod h1:PcHNnM+RUl0uD8VkSn93PO78N7kQYhfqpI/eki57pl4= +go.opentelemetry.io/collector/model v0.35.0 h1:NpKjghiqlei4ecwjOYOMhD6tj4gY8yiWHPJmbFs/ArI= +go.opentelemetry.io/collector/model v0.35.0/go.mod h1:+7YCSjJG+MqiIFjauzt7oM2qkqBsaJWh5hcsO4fwsAc= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.starlark.net v0.0.0-20210406145628-7a1108eaa012 h1:4RGobP/iq7S22H0Bb92OEt+M8/cfBQnW+T+a2MC0sQo= go.starlark.net v0.0.0-20210406145628-7a1108eaa012/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= @@ -2190,8 +2190,9 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1 h1:f37vZbBVTiJ6jKG5mWz8ySOBxNqy6ViPgyhSdVnxF3E= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/plugins/outputs/opentelemetry/opentelemetry.go b/plugins/outputs/opentelemetry/opentelemetry.go index 874eaba819418..e1bbc9322e759 100644 --- a/plugins/outputs/opentelemetry/opentelemetry.go +++ b/plugins/outputs/opentelemetry/opentelemetry.go @@ -13,6 +13,8 @@ import ( "go.opentelemetry.io/collector/model/otlpgrpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + // This causes the gRPC library to register gzip compression. + _ "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/metadata" ) diff --git a/plugins/outputs/opentelemetry/opentelemetry_test.go b/plugins/outputs/opentelemetry/opentelemetry_test.go index 4ba3adbbb07d0..6ebf1829bd540 100644 --- a/plugins/outputs/opentelemetry/opentelemetry_test.go +++ b/plugins/outputs/opentelemetry/opentelemetry_test.go @@ -33,9 +33,9 @@ func TestOpenTelemetry(t *testing.T) { m.SetName("cpu_temp") m.SetDataType(pdata.MetricDataTypeGauge) dp := m.Gauge().DataPoints().AppendEmpty() - dp.LabelsMap().Insert("foo", "bar") + dp.Attributes().InsertString("foo", "bar") dp.SetTimestamp(pdata.Timestamp(1622848686000000000)) - dp.SetValue(87.332) + dp.SetDoubleVal(87.332) } m := newMockOtelService(t) t.Cleanup(m.Cleanup) From d0dd23620496a0715b7b025a8170984bc28bd0fb Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 14 Sep 2021 13:56:49 -0700 Subject: [PATCH 604/761] fix(inputs.tail): change test default watch method to poll when Win (cherry picked from commit cfd50de57c20f499a5b964f95374466951e3bcbe) --- plugins/inputs/tail/tail_test.go | 107 +++++++++++++++---------------- 1 file changed, 52 insertions(+), 55 deletions(-) diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index b855691e6f1ab..16c38519a83b6 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -450,89 +450,86 @@ func TestCharacterEncoding(t *testing.T) { ), } + watchMethod := defaultWatchMethod + if runtime.GOOS == "windows" { + watchMethod = "poll" + } + tests := []struct { - name string - plugin *Tail - offset int64 - expected []telegraf.Metric + name string + testfiles string + fromBeginning bool + characterEncoding string + offset int64 + expected []telegraf.Metric }{ { - name: "utf-8", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-8.influx")}, - FromBeginning: true, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-8", - }, - expected: full, + name: "utf-8", + testfiles: "cpu-utf-8.influx", + fromBeginning: true, + characterEncoding: "utf-8", + expected: full, }, { - name: "utf-8 seek", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-8.influx")}, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-8", - }, - offset: 0x33, - expected: full[1:], + name: "utf-8 seek", + testfiles: "cpu-utf-8.influx", + characterEncoding: "utf-8", + offset: 0x33, + expected: full[1:], }, { - name: "utf-16le", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-16le.influx")}, - FromBeginning: true, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-16le", - }, - expected: full, + name: "utf-16le", + testfiles: "cpu-utf-16le.influx", + fromBeginning: true, + characterEncoding: "utf-16le", + expected: full, }, { - name: "utf-16le seek", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-16le.influx")}, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-16le", - }, - offset: 0x68, - expected: full[1:], + name: "utf-16le seek", + testfiles: "cpu-utf-16le.influx", + characterEncoding: "utf-16le", + offset: 0x68, + expected: full[1:], }, { - name: "utf-16be", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-16be.influx")}, - FromBeginning: true, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-16be", - }, - expected: full, + name: "utf-16be", + testfiles: "cpu-utf-16be.influx", + fromBeginning: true, + characterEncoding: "utf-16be", + expected: full, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tt.plugin.SetParserFunc(func() (parsers.Parser, error) { + + plugin := &Tail{ + Files: []string{filepath.Join(testdataDir, tt.testfiles)}, + FromBeginning: tt.fromBeginning, + MaxUndeliveredLines: 1000, + Log: testutil.Logger{}, + CharacterEncoding: tt.characterEncoding, + WatchMethod: watchMethod, + } + + plugin.SetParserFunc(func() (parsers.Parser, error) { handler := influx.NewMetricHandler() return influx.NewParser(handler), nil }) if tt.offset != 0 { - tt.plugin.offsets = map[string]int64{ - tt.plugin.Files[0]: tt.offset, + plugin.offsets = map[string]int64{ + plugin.Files[0]: tt.offset, } } - err := tt.plugin.Init() + err := plugin.Init() require.NoError(t, err) var acc testutil.Accumulator - err = tt.plugin.Start(&acc) + err = plugin.Start(&acc) require.NoError(t, err) acc.Wait(len(tt.expected)) - tt.plugin.Stop() + plugin.Stop() actual := acc.GetTelegrafMetrics() for _, m := range actual { From 3f5867c1722f1521e8f8e1a3aca0514b0ad608e3 Mon Sep 17 00:00:00 2001 From: Goutham Veeramachaneni Date: Tue, 14 Sep 2021 23:04:34 +0200 Subject: [PATCH 605/761] fix: Add error message logging to outputs.http (#9727) (cherry picked from commit 357959f0876985c3b2e19c9fec19fb7d26b1c734) --- plugins/outputs/http/http.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 83faef0dae241..edaae3f6ec07d 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -1,6 +1,7 @@ package http import ( + "bufio" "bytes" "context" "fmt" @@ -18,7 +19,8 @@ import ( ) const ( - defaultURL = "http://127.0.0.1:8080/telegraf" + maxErrMsgLen = 1024 + defaultURL = "http://127.0.0.1:8080/telegraf" ) var sampleConfig = ` @@ -182,11 +184,18 @@ func (h *HTTP) write(reqBody []byte) error { return err } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return fmt.Errorf("when writing to [%s] received status code: %d", h.URL, resp.StatusCode) + errorLine := "" + scanner := bufio.NewScanner(io.LimitReader(resp.Body, maxErrMsgLen)) + if scanner.Scan() { + errorLine = scanner.Text() + } + + return fmt.Errorf("when writing to [%s] received status code: %d. body: %s", h.URL, resp.StatusCode, errorLine) } + + _, err = ioutil.ReadAll(resp.Body) if err != nil { return fmt.Errorf("when writing to [%s] received error: %v", h.URL, err) } From b80f73bc1e1a9571a02b7717535d8f11f509e550 Mon Sep 17 00:00:00 2001 From: Doron-Bargo <62555360+Doron-Bargo@users.noreply.github.com> Date: Wed, 15 Sep 2021 00:06:11 +0300 Subject: [PATCH 606/761] fix: pagination error on cloudwatch plugin (#9693) (cherry picked from commit 646273abe0c61bd9d9bb7ca04deadfe698c03f73) --- plugins/inputs/cloudwatch/cloudwatch.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 1cd7958301611..7dbd3c7faa7be 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -410,18 +410,21 @@ func (c *CloudWatch) fetchNamespaceMetrics() ([]*cwClient.Metric, error) { default: recentlyActive = nil } - params = &cwClient.ListMetricsInput{ - Dimensions: []*cwClient.DimensionFilter{}, - NextToken: token, - MetricName: nil, - RecentlyActive: recentlyActive, - } + for _, namespace := range c.Namespaces { - params.Namespace = aws.String(namespace) + + params = &cwClient.ListMetricsInput{ + Dimensions: []*cwClient.DimensionFilter{}, + NextToken: token, + MetricName: nil, + RecentlyActive: recentlyActive, + Namespace: aws.String(namespace), + } + for { resp, err := c.client.ListMetrics(params) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to list metrics with params per namespace: %v", err) } metrics = append(metrics, resp.Metrics...) @@ -432,7 +435,6 @@ func (c *CloudWatch) fetchNamespaceMetrics() ([]*cwClient.Metric, error) { params.NextToken = resp.NextToken } } - return metrics, nil } From 353f579d181939ce3e0c5ab36cedda658de2acf1 Mon Sep 17 00:00:00 2001 From: Sanyam Arya Date: Tue, 14 Sep 2021 23:26:49 +0200 Subject: [PATCH 607/761] feat: Internet Speed Monitor Input Plugin (#9623) (cherry picked from commit 40fa10ba0b66cc941ed202a23c5fb952da06aeee) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 3 +- go.sum | 5 +- plugins/inputs/all/all.go | 1 + plugins/inputs/internet_speed/README.md | 30 +++++++ .../inputs/internet_speed/internet_speed.go | 82 +++++++++++++++++++ .../internet_speed/internet_speed_test.go | 44 ++++++++++ 7 files changed, 164 insertions(+), 2 deletions(-) create mode 100644 plugins/inputs/internet_speed/README.md create mode 100644 plugins/inputs/internet_speed/internet_speed.go create mode 100644 plugins/inputs/internet_speed/internet_speed_test.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index c50b0ea3f3f9f..b36594a1faea2 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -193,6 +193,7 @@ following works: - github.com/safchain/ethtool [Apache License 2.0](https://github.com/safchain/ethtool/blob/master/LICENSE) - github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) - github.com/shirou/gopsutil [BSD 3-Clause Clear License](https://github.com/shirou/gopsutil/blob/master/LICENSE) +- github.com/showwin/speedtest-go [MIT License](https://github.com/showwin/speedtest-go/blob/master/LICENSE) - github.com/signalfx/com_signalfx_metrics_protobuf [Apache License 2.0](https://github.com/signalfx/com_signalfx_metrics_protobuf/blob/master/LICENSE) - github.com/signalfx/gohistogram [MIT License](https://github.com/signalfx/gohistogram/blob/master/LICENSE) - github.com/signalfx/golib [Apache License 2.0](https://github.com/signalfx/golib/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 1558135cd9a3f..f115d32594712 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect github.com/aerospike/aerospike-client-go v1.27.0 github.com/alecthomas/participle v0.4.1 // indirect - github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d + github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 github.com/antchfx/jsonquery v1.1.4 @@ -233,6 +233,7 @@ require ( github.com/sensu/sensu-go/api/core/v2 v2.9.0 github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect + github.com/showwin/speedtest-go v1.1.4 github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect github.com/signalfx/golib/v3 v3.3.34 diff --git a/go.sum b/go.sum index a0e2e9e2bdd7d..1d2611560b766 100644 --- a/go.sum +++ b/go.sum @@ -202,8 +202,9 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 h1:YtaYjXmemIMyySUbs0VGFPqsLpsNHf4TW/L6yqpJQ9s= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= @@ -1434,6 +1435,8 @@ github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/showwin/speedtest-go v1.1.4 h1:pcY1W5LYZu44lH6Fuu80nu/Pj67n//VArlZudbAgR6E= +github.com/showwin/speedtest-go v1.1.4/go.mod h1:dJugxvC/AQDt4HQQKZ9lKNa2+b1c8nzj9IL0a/F8l1U= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 781e04e60928b..60a52903ef079 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -74,6 +74,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/intel_powerstat" _ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt" _ "github.com/influxdata/telegraf/plugins/inputs/internal" + _ "github.com/influxdata/telegraf/plugins/inputs/internet_speed" _ "github.com/influxdata/telegraf/plugins/inputs/interrupts" _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" _ "github.com/influxdata/telegraf/plugins/inputs/ipset" diff --git a/plugins/inputs/internet_speed/README.md b/plugins/inputs/internet_speed/README.md new file mode 100644 index 0000000000000..f9a71446f4979 --- /dev/null +++ b/plugins/inputs/internet_speed/README.md @@ -0,0 +1,30 @@ +# Internet Speed Monitor + +The `Internet Speed Monitor` collects data about the internet speed on the system. + +## Configuration + +```toml +# Monitors internet speed in the network +[[inputs.internet_speed]] + ## Sets if runs file download test + ## Default: false + enable_file_download = false +``` + +## Metrics + +It collects latency, download speed and upload speed + + +| Name | filed name | type | Unit | +| -------------- | ---------- | ------- | ---- | +| Download Speed | download | float64 | Mbps | +| Upload Speed | upload | float64 | Mbps | +| Latency | latency | float64 | ms | + +## Example Output + +```sh +internet_speed,host=Sanyam-Ubuntu download=41.791,latency=28.518,upload=59.798 1631031183000000000 +``` \ No newline at end of file diff --git a/plugins/inputs/internet_speed/internet_speed.go b/plugins/inputs/internet_speed/internet_speed.go new file mode 100644 index 0000000000000..cf0c5cfb13117 --- /dev/null +++ b/plugins/inputs/internet_speed/internet_speed.go @@ -0,0 +1,82 @@ +package internet_speed + +import ( + "fmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/showwin/speedtest-go/speedtest" +) + +// InternetSpeed is used to store configuration values. +type InternetSpeed struct { + EnableFileDownload bool `toml:"enable_file_download"` + Log telegraf.Logger `toml:"-"` +} + +const sampleConfig = ` + ## Sets if runs file download test + ## Default: false + enable_file_download = false +` + +// Description returns information about the plugin. +func (is *InternetSpeed) Description() string { + return "Monitors internet speed using speedtest.net service" +} + +// SampleConfig displays configuration instructions. +func (is *InternetSpeed) SampleConfig() string { + return sampleConfig +} + +const measurement = "internet_speed" + +func (is *InternetSpeed) Gather(acc telegraf.Accumulator) error { + user, err := speedtest.FetchUserInfo() + if err != nil { + return fmt.Errorf("fetching user info failed: %v", err) + } + serverList, err := speedtest.FetchServerList(user) + if err != nil { + return fmt.Errorf("fetching server list failed: %v", err) + } + + if len(serverList.Servers) < 1 { + return fmt.Errorf("no servers found") + } + s := serverList.Servers[0] + is.Log.Debug("Starting Speed Test") + is.Log.Debug("Running Ping...") + err = s.PingTest() + if err != nil { + return fmt.Errorf("ping test failed: %v", err) + } + is.Log.Debug("Running Download...") + err = s.DownloadTest(is.EnableFileDownload) + if err != nil { + return fmt.Errorf("download test failed: %v", err) + } + is.Log.Debug("Running Upload...") + err = s.UploadTest(is.EnableFileDownload) + if err != nil { + return fmt.Errorf("upload test failed failed: %v", err) + } + + is.Log.Debug("Test finished.") + + fields := make(map[string]interface{}) + fields["download"] = s.DLSpeed + fields["upload"] = s.ULSpeed + fields["latency"] = s.Latency + + tags := make(map[string]string) + + acc.AddFields(measurement, fields, tags) + return nil +} +func init() { + inputs.Add("internet_speed", func() telegraf.Input { + return &InternetSpeed{} + }) +} diff --git a/plugins/inputs/internet_speed/internet_speed_test.go b/plugins/inputs/internet_speed/internet_speed_test.go new file mode 100644 index 0000000000000..669426ff683ad --- /dev/null +++ b/plugins/inputs/internet_speed/internet_speed_test.go @@ -0,0 +1,44 @@ +package internet_speed + +import ( + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGathering(t *testing.T) { + if testing.Short() { + t.Skip("Skipping network-dependent test in short mode.") + } + internetSpeed := &InternetSpeed{ + EnableFileDownload: false, + Log: testutil.Logger{}, + } + + acc := &testutil.Accumulator{} + + require.NoError(t, internetSpeed.Gather(acc)) +} + +func TestDataGen(t *testing.T) { + if testing.Short() { + t.Skip("Skipping network-dependent test in short mode.") + } + internetSpeed := &InternetSpeed{ + EnableFileDownload: false, + Log: testutil.Logger{}, + } + + acc := &testutil.Accumulator{} + require.NoError(t, internetSpeed.Gather(acc)) + + metric, ok := acc.Get("internet_speed") + require.True(t, ok) + + tags := metric.Tags + + fields := metric.Fields + + acc.AssertContainsTaggedFields(t, "internet_speed", fields, tags) +} From 6d9f16824a6900bf38eb7dcc0204cba64d71e446 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Sep 2021 22:36:14 +0100 Subject: [PATCH 608/761] fix: bump github.com/prometheus/client_golang from 1.7.1 to 1.11.0 (#9653) (cherry picked from commit 96773387ae1a57068367041c2e57c4b05030f114) --- go.mod | 6 +++--- go.sum | 12 ++++++++---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index f115d32594712..5981bc49d62b1 100644 --- a/go.mod +++ b/go.mod @@ -160,7 +160,7 @@ require ( github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca github.com/jmespath/go-jmespath v0.4.0 github.com/jpillora/backoff v1.0.0 // indirect - github.com/json-iterator/go v1.1.10 // indirect + github.com/json-iterator/go v1.1.11 // indirect github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 @@ -217,9 +217,9 @@ require ( github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 // indirect github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.7.1 + github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.15.0 + github.com/prometheus/common v0.26.0 github.com/prometheus/procfs v0.6.0 github.com/prometheus/prometheus v1.8.2-0.20200911110723-e83ef207b6c2 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect diff --git a/go.sum b/go.sum index 1d2611560b766..2622f28fff755 100644 --- a/go.sum +++ b/go.sum @@ -581,6 +581,7 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= @@ -1018,8 +1019,9 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -1353,8 +1355,9 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= -github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1371,8 +1374,8 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2 github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.13.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1932,6 +1935,7 @@ golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From 0202994f2ac0b9b32e4a6f8f7cc7c4f747fa76dc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Sep 2021 22:36:58 +0100 Subject: [PATCH 609/761] fix: bump github.com/Azure/azure-event-hubs-go/v3 from 3.2.0 to 3.3.13 (#9677) (cherry picked from commit c331669f2eab1bba58db8c16b21e8577910d604f) --- go.mod | 14 +++++++------- go.sum | 30 +++++++++++++++++++----------- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 5981bc49d62b1..4dcb4550d4764 100644 --- a/go.mod +++ b/go.mod @@ -8,23 +8,23 @@ require ( cloud.google.com/go/pubsub v1.15.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.5.0 - github.com/Azure/azure-amqp-common-go/v3 v3.0.0 // indirect - github.com/Azure/azure-event-hubs-go/v3 v3.2.0 + github.com/Azure/azure-amqp-common-go/v3 v3.0.1 // indirect + github.com/Azure/azure-event-hubs-go/v3 v3.3.13 github.com/Azure/azure-kusto-go v0.3.2 github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go v45.1.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go v51.1.0+incompatible // indirect github.com/Azure/azure-storage-blob-go v0.13.0 // indirect github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd - github.com/Azure/go-amqp v0.12.6 // indirect + github.com/Azure/go-amqp v0.13.12 // indirect github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.17 + github.com/Azure/go-autorest/autorest v0.11.18 github.com/Azure/go-autorest/autorest/adal v0.9.15 github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect - github.com/Azure/go-autorest/autorest/validation v0.2.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/BurntSushi/toml v0.3.1 diff --git a/go.sum b/go.sum index 2622f28fff755..c5a0778443420 100644 --- a/go.sum +++ b/go.sum @@ -55,10 +55,10 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= collectd.org v0.5.0 h1:y4uFSAuOmeVhG3GCRa3/oH+ysePfO/+eGJNfd0Qa3d8= collectd.org v0.5.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc= -github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= -github.com/Azure/azure-event-hubs-go/v3 v3.2.0 h1:CQlxKH5a4NX1ZmbdqXUPRwuNGh2XvtgmhkZvkEuWzhs= -github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4clbrULrQ3q7+icmqHyyLc= +github.com/Azure/azure-amqp-common-go/v3 v3.0.1 h1:mXh+eyOxGLBfqDtfmbtby0l7XfG/6b2NkuZ3B7i6zHA= +github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= +github.com/Azure/azure-event-hubs-go/v3 v3.3.13 h1:aiI2RLjp0MzLCuFUXzR8b3h3bdPIc2c3vBYXRK8jX3E= +github.com/Azure/azure-event-hubs-go/v3 v3.3.13/go.mod h1:dJ/WqDn0KEJkNznL9UT/UbXzfmkffCjSNl9x2Y8JI28= github.com/Azure/azure-kusto-go v0.3.2 h1:XpS9co6GvEDl2oICF9HsjEsQVwEpRK6wbNWb9Z+uqsY= github.com/Azure/azure-kusto-go v0.3.2/go.mod h1:wd50n4qlsSxh+G4f80t+Fnl2ShK9AcXD+lMOstiKuYo= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= @@ -67,18 +67,19 @@ github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiU github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v44.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v45.1.0+incompatible h1:kxtaPD8n2z5Za+9e3sKsYG2IX6PG2R6VXtgS7gAbh3A= github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v51.1.0+incompatible h1:7uk6GWtUqKg6weLv2dbKnzwb0ml1Qn70AdtRccZ543w= +github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= -github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY= -github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= +github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= +github.com/Azure/go-amqp v0.13.12 h1:u/m0QvBgNVlcMqj4bPHxtEyANOzS+cXXndVMYGsC29A= +github.com/Azure/go-amqp v0.13.12/go.mod h1:D5ZrjQqB1dyp1A+G73xeL/kNn7D5qHJIIsNNps7YNmk= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -88,9 +89,11 @@ github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+B github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.17 h1:2zCdHwNgRH+St1J+ZMf66xI8aLr/5KMy+wWLH97zwYM= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= @@ -99,6 +102,7 @@ github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQW github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= @@ -117,10 +121,12 @@ github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= @@ -463,8 +469,10 @@ github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVz github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/devigned/tab v0.0.1/go.mod h1:oVYrfgGyond090gxCvvbjZji79+peOiSV6vhZhKJM0Y= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/devigned/tab/opencensus v0.1.2/go.mod h1:U6xXMXnNwXJpdaK0mnT3zdng4WTi+vCfqn7YHofEv2A= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= From d80eba4390908ab9405887cd389e348266eae98a Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 14 Sep 2021 14:42:59 -0700 Subject: [PATCH 610/761] chore: run ci tests in parallel and get test insights (#9686) (cherry picked from commit 779ed5ec42c09de94130435143bd86151e52666c) --- .circleci/config.yml | 169 +++++++++++------- Makefile | 4 - scripts/install_gotestsum.sh | 46 +++++ .../{mac_installgo.sh => installgo_mac.sh} | 10 +- scripts/installgo_windows.sh | 25 +++ 5 files changed, 185 insertions(+), 69 deletions(-) create mode 100755 scripts/install_gotestsum.sh rename scripts/{mac_installgo.sh => installgo_mac.sh} (78%) create mode 100644 scripts/installgo_windows.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 3daec86da98b4..b2043e1fa291c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,6 @@ version: 2.1 orbs: - win: circleci/windows@2.4.0 + win: circleci/windows@2.4.0 aws-cli: circleci/aws-cli@1.4.0 executors: @@ -26,27 +26,84 @@ executors: commands: check-changed-files-or-halt: - steps: - - run: ./scripts/check-file-changes.sh - check-changed-files-or-halt-windows: - steps: - - run: - command: ./scripts/check-file-changes.sh - shell: bash.exe + steps: + - run: ./scripts/check-file-changes.sh test-go: parameters: - goarch: + os: + type: string + default: "linux" + gotestsum: type: string - default: "amd64" + default: "gotestsum" + cache_version: + type: string + default: "v3" steps: - checkout - check-changed-files-or-halt - - attach_workspace: - at: '/go' - - run: 'GOARCH=<< parameters.goarch >> make' - - run: 'GOARCH=<< parameters.goarch >> make check' - - run: 'GOARCH=<< parameters.goarch >> make check-deps' - - run: 'GOARCH=<< parameters.goarch >> make test' + - when: + condition: + equal: [ linux, << parameters.os >> ] + steps: + - restore_cache: + key: linux-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - attach_workspace: + at: '/go' + - when: + condition: + equal: [ darwin, << parameters.os >> ] + steps: + - restore_cache: + key: darwin-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - run: 'sh ./scripts/installgo_mac.sh' + - when: + condition: + equal: [ windows, << parameters.os >> ] + steps: + - run: rm -rf /c/Go + - restore_cache: + key: windows-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - run: 'sh ./scripts/installgo_windows.sh' + - run: mkdir -p test-results + - run: ./scripts/install_gotestsum.sh << parameters.os >> << parameters.gotestsum >> + - run: | + PACKAGE_NAMES=$(go list ./... | circleci tests split --split-by=timings --timings-type=classname) + ./<< parameters.gotestsum >> --junitfile test-results/gotestsum-report.xml -- -short $PACKAGE_NAMES + - store_test_results: + path: test-results + - when: + condition: + equal: [ linux, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: linux-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - '~/go/src/github.com/influxdata/telegraf/gotestsum' + - when: + condition: + equal: [ darwin, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: darwin-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - '/go/src/github.com/influxdata/telegraf/gotestsum' + - '/usr/local/Cellar/go' + - '/usr/local/bin/go' + - '/usr/local/bin/gofmt' + - when: + condition: + equal: [ windows, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: windows-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - 'C:\Go' + - 'C:\Users\circleci\project\gotestsum.exe' + package-build: parameters: release: @@ -65,11 +122,11 @@ commands: at: '/go' - when: condition: << parameters.release >> - steps: + steps: - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 make package' - when: condition: << parameters.nightly >> - steps: + steps: - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 NIGHTLY=1 make package' - run: 'make upload-nightly' - unless: @@ -96,6 +153,8 @@ jobs: - check-changed-files-or-halt - run: 'make deps' - run: 'make tidy' + - run: 'make check' + - run: 'make check-deps' - save_cache: name: 'go module cache' key: go-mod-v1-{{ checksum "go.sum" }} @@ -109,51 +168,37 @@ jobs: executor: go-1_16 steps: - test-go + parallelism: 4 test-go-1_16-386: executor: go-1_16 steps: - - test-go: - goarch: "386" + - test-go + parallelism: 4 test-go-1_17: executor: go-1_17 steps: - test-go + parallelism: 4 test-go-1_17-386: executor: go-1_17 steps: - - test-go: - goarch: "386" + - test-go + parallelism: 4 test-go-mac: executor: mac steps: - - checkout - - restore_cache: - key: mac-go-mod-v0-{{ checksum "go.sum" }} - - check-changed-files-or-halt - - run: 'sh ./scripts/mac_installgo.sh' - - save_cache: - name: 'Saving cache' - key: mac-go-mod-v0-{{ checksum "go.sum" }} - paths: - - '/usr/local/Cellar/go' - - '/usr/local/bin/go' - - '/usr/local/bin/gofmt' - - run: 'make deps' - - run: 'make tidy' - - run: 'make' - - run: 'make check' - - run: 'make test' + - test-go: + os: darwin + parallelism: 4 test-go-windows: executor: name: win/default - shell: powershell.exe + shell: bash.exe steps: - - checkout - - check-changed-files-or-halt-windows - - run: choco upgrade golang --version=1.17.0 - - run: choco install make - - run: git config --system core.longpaths true - - run: make test-windows + - test-go: + os: windows + gotestsum: "gotestsum.exe" + parallelism: 4 windows-package: executor: go-1_17 @@ -283,14 +328,14 @@ jobs: command: | echo "Go tests complete." share-artifacts: - executor: aws-cli/default + executor: aws-cli/default steps: - run: command: | PR=${CIRCLE_PULL_REQUEST##*/} printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" - curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" - + curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" + workflows: version: 2 check: @@ -331,47 +376,47 @@ workflows: filters: tags: only: /.*/ - - 'test-awaiter': + - 'test-awaiter': requires: - 'test-go-1_16' - 'test-go-1_16-386' - 'test-go-1_17' - 'test-go-1_17-386' - 'windows-package': - requires: + requires: - 'test-go-windows' - 'darwin-package': - requires: + requires: - 'test-go-mac' - 'i386-package': - requires: + requires: - 'test-awaiter' - 'ppc641e-package': - requires: + requires: - 'test-awaiter' - 's390x-package': - requires: + requires: - 'test-awaiter' - 'armel-package': - requires: + requires: - 'test-awaiter' - 'amd64-package': - requires: + requires: - 'test-awaiter' - 'arm64-package': - requires: + requires: - 'test-awaiter' - 'armhf-package': - requires: + requires: - 'test-awaiter' - 'static-package': requires: - 'test-awaiter' - 'mipsel-package': - requires: + requires: - 'test-awaiter' - 'mips-package': - requires: + requires: - 'test-awaiter' - 'share-artifacts': requires: @@ -412,7 +457,7 @@ workflows: only: /.*/ - 'package-sign-mac': requires: - - 'package-sign-windows' + - 'package-sign-windows' filters: tags: only: /.*/ diff --git a/Makefile b/Makefile index f0bb01dd2a35e..230eedf600f6f 100644 --- a/Makefile +++ b/Makefile @@ -119,10 +119,6 @@ fmtcheck: exit 1 ;\ fi -.PHONY: test-windows -test-windows: - go test -short ./... - .PHONY: vet vet: @echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)' diff --git a/scripts/install_gotestsum.sh b/scripts/install_gotestsum.sh new file mode 100755 index 0000000000000..0b813e20879fa --- /dev/null +++ b/scripts/install_gotestsum.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +set -eux + +OS=$1 +EXE=$2 +VERSION="1.7.0" + +WINDOWS_SHA="7ae12ddb171375f0c14d6a09dd27a5c1d1fc72edeea674e3d6e7489a533b40c1" +DARWIN_SHA="a8e2351604882af1a67601cbeeacdcfa9b17fc2f6fbac291cf5d434efdf2d85b" +LINUX_SHA="b5c98cc408c75e76a097354d9487dca114996e821b3af29a0442aa6c9159bd40" + +setup_gotestsum () { + echo "installing gotestsum" + curl -L "https://github.com/gotestyourself/gotestsum/releases/download/v${VERSION}/gotestsum_${VERSION}_${OS}_amd64.tar.gz" --output gotestsum.tar.gz + + if [ "$OS" = "windows" ]; then + SHA=$WINDOWS_SHA + SHATOOL="sha256sum" + elif [ "$OS" = "darwin" ]; then + SHA=$DARWIN_SHA + SHATOOL="shasum --algorithm 256" + elif [ "$OS" = "linux" ]; then + SHA=$LINUX_SHA + SHATOOL="sha256sum" + fi + + if ! echo "${SHA} gotestsum.tar.gz" | ${SHATOOL} --check -; then + echo "Checksum failed" >&2 + exit 1 + fi + + tar --extract --file=gotestsum.tar.gz "${EXE}" +} + +if test -f "${EXE}"; then + echo "gotestsum is already installed" + v=$(./"${EXE}" --version) + echo "$v is installed, required version is ${VERSION}" + if [ "$v" != "gotestsum version ${VERSION}" ]; then + setup_gotestsum + ${EXE} --version + fi +else + setup_gotestsum +fi diff --git a/scripts/mac_installgo.sh b/scripts/installgo_mac.sh similarity index 78% rename from scripts/mac_installgo.sh rename to scripts/installgo_mac.sh index cb41ee5f666cd..b839358136d98 100644 --- a/scripts/mac_installgo.sh +++ b/scripts/installgo_mac.sh @@ -14,16 +14,20 @@ path="/usr/local/Cellar" setup_go () { echo "installing go" curl -L https://golang.org/dl/go${GO_VERSION}.${GO_ARCH}.tar.gz --output go${GO_VERSION}.${GO_ARCH}.tar.gz - echo "${GO_VERSION_SHA} go${GO_VERSION}.${GO_ARCH}.tar.gz" | shasum -a 256 --check + if ! echo "${GO_VERSION_SHA} go${GO_VERSION}.${GO_ARCH}.tar.gz" | shasum --algorithm 256 --check -; then + echo "Checksum failed" >&2 + exit 1 + fi + sudo rm -rf ${path}/go sudo tar -C $path -xzf go${GO_VERSION}.${GO_ARCH}.tar.gz ln -sf ${path}/go/bin/go /usr/local/bin/go ln -sf ${path}/go/bin/gofmt /usr/local/bin/gofmt } -if command -v go &> /dev/null; then +if command -v go >/dev/null 2>&1; then echo "Go is already installed" - v=`go version | { read _ _ v _; echo ${v#go}; }` + v=$(go version | { read -r _ _ v _; echo "${v#go}"; }) echo "$v is installed, required version is ${GO_VERSION}" if [ "$v" != ${GO_VERSION} ]; then setup_go diff --git a/scripts/installgo_windows.sh b/scripts/installgo_windows.sh new file mode 100644 index 0000000000000..d035447570c8a --- /dev/null +++ b/scripts/installgo_windows.sh @@ -0,0 +1,25 @@ +#!/bin/sh + +set -eux + +GO_VERSION="1.17" + +setup_go () { + choco upgrade golang --version=${GO_VERSION} + choco install make + git config --system core.longpaths true + rm -rf /c/Go + cp -r /c/Program\ Files/Go /c/ +} + +if command -v go >/dev/null 2>&1; then + echo "Go is already installed" + v=$(go version | { read -r _ _ v _; echo "${v#go}"; }) + echo "$v is installed, required version is ${GO_VERSION}" + if [ "$v" != ${GO_VERSION} ]; then + setup_go + go version + fi +else + setup_go +fi From 87564b8fadd9b817a68aaf59067cb7f5be890a33 Mon Sep 17 00:00:00 2001 From: John Seekins Date: Wed, 15 Sep 2021 11:56:52 -0600 Subject: [PATCH 611/761] feat: add count of bonded slaves (for easier alerting) (#9762) (cherry picked from commit 0e9391d43fbef7857b197ee4f903f975d35d3bde) --- plugins/inputs/bond/README.md | 6 ++++++ plugins/inputs/bond/bond.go | 9 +++++++++ plugins/inputs/bond/bond_test.go | 1 + 3 files changed, 16 insertions(+) diff --git a/plugins/inputs/bond/README.md b/plugins/inputs/bond/README.md index abcf72c9193ca..d905038a9d533 100644 --- a/plugins/inputs/bond/README.md +++ b/plugins/inputs/bond/README.md @@ -27,6 +27,7 @@ The plugin collects these metrics from `/proc/net/bonding/*` files. - bond_slave - failures - status + - count ### Description: @@ -39,6 +40,9 @@ status failures Amount of failures for bond's slave interface. + +count + Number of slaves attached to bond ``` ### Tags: @@ -79,7 +83,9 @@ Output: > bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000 > bond_slave,bond=bond1,interface=eth0,host=local status=1i,failures=0i 1509704525000000000 > bond_slave,host=local,bond=bond1,interface=eth1 status=1i,failures=0i 1509704525000000000 +> bond_slave,host=local,bond=bond1 count=2i 1509704525000000000 > bond,bond=bond0,host=isvetlov-mac.local status=1i 1509704525000000000 > bond_slave,bond=bond0,interface=eth1,host=local status=1i,failures=0i 1509704525000000000 > bond_slave,bond=bond0,interface=eth2,host=local status=1i,failures=0i 1509704525000000000 +> bond_slave,bond=bond0,host=local count=2i 1509704525000000000 ``` diff --git a/plugins/inputs/bond/bond.go b/plugins/inputs/bond/bond.go index b71f36e629feb..dc9b083ec5af9 100644 --- a/plugins/inputs/bond/bond.go +++ b/plugins/inputs/bond/bond.go @@ -122,6 +122,7 @@ func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.A func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf.Accumulator) error { var slave string var status int + var slaveCount int scanner := bufio.NewScanner(strings.NewReader(rawFile)) for scanner.Scan() { @@ -155,8 +156,16 @@ func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf. "interface": slave, } acc.AddFields("bond_slave", fields, tags) + slaveCount++ } } + fields := map[string]interface{}{ + "count": slaveCount, + } + tags := map[string]string{ + "bond": bondName, + } + acc.AddFields("bond_slave", fields, tags) return scanner.Err() } diff --git a/plugins/inputs/bond/bond_test.go b/plugins/inputs/bond/bond_test.go index 342a3f4eb831d..8dc24f4cafa45 100644 --- a/plugins/inputs/bond/bond_test.go +++ b/plugins/inputs/bond/bond_test.go @@ -75,4 +75,5 @@ func TestGatherBondInterface(t *testing.T) { acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"active_slave": "eth2", "status": 1}, map[string]string{"bond": "bondAB"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 0}, map[string]string{"bond": "bondAB", "interface": "eth3"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bondAB", "interface": "eth2"}) + acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"count": 2}, map[string]string{"bond": "bondAB"}) } From d035e3f5a5228c2037c2c4f31d024ffb42ef978a Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Wed, 15 Sep 2021 19:58:40 +0200 Subject: [PATCH 612/761] fix: Fix panic for non-existing metric names (#9757) (cherry picked from commit c076398440971c01f67eb326c434c1eab1c361b2) --- plugins/parsers/registry.go | 2 +- plugins/parsers/xpath/parser.go | 26 ++-- plugins/parsers/xpath/parser_test.go | 189 ++++++++++++++++++--------- 3 files changed, 141 insertions(+), 76 deletions(-) diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index cc2102c9532d2..f07c789a272f1 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -395,7 +395,7 @@ func NewXPathParserConfigs(metricName string, cfgs []XPathConfig) []xpath.Config configs := make([]xpath.Config, 0, len(cfgs)) for _, cfg := range cfgs { config := xpath.Config(cfg) - config.MetricName = metricName + config.MetricDefaultName = metricName configs = append(configs, config) } return configs diff --git a/plugins/parsers/xpath/parser.go b/plugins/parsers/xpath/parser.go index 52224530a9250..75ebfd92035c1 100644 --- a/plugins/parsers/xpath/parser.go +++ b/plugins/parsers/xpath/parser.go @@ -35,14 +35,14 @@ type Parser struct { } type Config struct { - MetricName string - MetricQuery string `toml:"metric_name"` - Selection string `toml:"metric_selection"` - Timestamp string `toml:"timestamp"` - TimestampFmt string `toml:"timestamp_format"` - Tags map[string]string `toml:"tags"` - Fields map[string]string `toml:"fields"` - FieldsInt map[string]string `toml:"fields_int"` + MetricDefaultName string `toml:"-"` + MetricQuery string `toml:"metric_name"` + Selection string `toml:"metric_selection"` + Timestamp string `toml:"timestamp"` + TimestampFmt string `toml:"timestamp_format"` + Tags map[string]string `toml:"tags"` + Fields map[string]string `toml:"fields"` + FieldsInt map[string]string `toml:"fields_int"` FieldSelection string `toml:"field_selection"` FieldNameQuery string `toml:"field_name"` @@ -160,13 +160,19 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config // Determine the metric name. If a query was specified, use the result of this query and the default metric name // otherwise. - metricname = config.MetricName + metricname = config.MetricDefaultName if len(config.MetricQuery) > 0 { v, err := p.executeQuery(doc, selected, config.MetricQuery) if err != nil { return nil, fmt.Errorf("failed to query metric name: %v", err) } - metricname = v.(string) + var ok bool + if metricname, ok = v.(string); !ok { + if v == nil { + p.Log.Infof("Hint: Empty metric-name-node. If you wanted to set a constant please use `metric_name = \"'name'\"`.") + } + return nil, fmt.Errorf("failed to query metric name: query result is of type %T not 'string'", v) + } } // By default take the time the parser was invoked and override the value diff --git a/plugins/parsers/xpath/parser_test.go b/plugins/parsers/xpath/parser_test.go index 46e4dba690102..8e7a3087c0888 100644 --- a/plugins/parsers/xpath/parser_test.go +++ b/plugins/parsers/xpath/parser_test.go @@ -148,8 +148,8 @@ func TestInvalidTypeQueriesFail(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", FieldsInt: map[string]string{ "a": "/Device_1/value_string", }, @@ -185,8 +185,8 @@ func TestInvalidTypeQueries(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "a": "number(/Device_1/value_string)", }, @@ -207,8 +207,8 @@ func TestInvalidTypeQueries(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "a": "boolean(/Device_1/value_string)", }, @@ -252,8 +252,8 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", }, }, defaultTags: map[string]string{}, @@ -269,9 +269,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", - TimestampFmt: "unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + TimestampFmt: "unix", }, }, defaultTags: map[string]string{}, @@ -287,9 +287,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix_ms", - TimestampFmt: "unix_ms", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix_ms", + TimestampFmt: "unix_ms", }, }, defaultTags: map[string]string{}, @@ -305,9 +305,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix_us", - TimestampFmt: "unix_us", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix_us", + TimestampFmt: "unix_us", }, }, defaultTags: map[string]string{}, @@ -323,9 +323,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix_ns", - TimestampFmt: "unix_ns", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix_ns", + TimestampFmt: "unix_ns", }, }, defaultTags: map[string]string{}, @@ -341,9 +341,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_iso", - TimestampFmt: "2006-01-02T15:04:05Z", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_iso", + TimestampFmt: "2006-01-02T15:04:05Z", }, }, defaultTags: map[string]string{}, @@ -382,8 +382,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "a": "/Device_1/value_int", "b": "/Device_1/value_float", @@ -410,8 +410,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "a": "number(Device_1/value_int)", "b": "number(/Device_1/value_float)", @@ -438,8 +438,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "b": "number(/Device_1/value_float)", "c": "boolean(/Device_1/value_bool)", @@ -468,8 +468,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "x": "substring-before(/Device_1/value_position, ';')", "y": "substring-after(/Device_1/value_position, ';')", @@ -492,8 +492,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "x": "number(substring-before(/Device_1/value_position, ';'))", "y": "number(substring-after(/Device_1/value_position, ';'))", @@ -516,8 +516,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", FieldsInt: map[string]string{ "x": "substring-before(/Device_1/value_position, ';')", "y": "substring-after(/Device_1/value_position, ';')", @@ -540,8 +540,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Tags: map[string]string{ "state": "/Device_1/State", "name": "substring-after(/Device_1/Name, ' ')", @@ -587,8 +587,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", }, }, defaultTags: map[string]string{}, @@ -604,9 +604,9 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_iso/@value", - TimestampFmt: "2006-01-02T15:04:05Z", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_iso/@value", + TimestampFmt: "2006-01-02T15:04:05Z", }, }, defaultTags: map[string]string{}, @@ -622,8 +622,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "a": "/Device_1/attr_int/@_", "b": "/Device_1/attr_float/@_", @@ -650,8 +650,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "a": "number(/Device_1/attr_int/@_)", "b": "number(/Device_1/attr_float/@_)", @@ -678,8 +678,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "b": "number(/Device_1/attr_float/@_)", "c": "boolean(/Device_1/attr_bool/@_)", @@ -708,8 +708,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "name": "substring-after(/Device_1/Name/@value, ' ')", }, @@ -730,8 +730,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Tags: map[string]string{ "state": "/Device_1/State/@_", "name": "substring-after(/Device_1/Name/@value, ' ')", @@ -754,8 +754,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "a": "/Device_1/attr_bool_numeric/@_ = 1", }, @@ -799,8 +799,8 @@ func TestParseMultiValues(t *testing.T) { input: singleMetricMultiValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Timestamp/@value", + MetricDefaultName: "test", + Timestamp: "/Timestamp/@value", Fields: map[string]string{ "a": "number(/Device/Value[1])", "b": "number(/Device/Value[2])", @@ -831,8 +831,8 @@ func TestParseMultiValues(t *testing.T) { input: singleMetricMultiValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Timestamp/@value", + MetricDefaultName: "test", + Timestamp: "/Timestamp/@value", FieldsInt: map[string]string{ "a": "/Device/Value[1]", "b": "/Device/Value[2]", @@ -886,9 +886,9 @@ func TestParseMultiNodes(t *testing.T) { input: multipleNodesXML, configs: []Config{ { - MetricName: "test", - Selection: "/Device", - Timestamp: "/Timestamp/@value", + MetricDefaultName: "test", + Selection: "/Device", + Timestamp: "/Timestamp/@value", Fields: map[string]string{ "value": "number(Value)", "active": "Active = 1", @@ -999,9 +999,9 @@ func TestParseMetricQuery(t *testing.T) { input: metricNameQueryXML, configs: []Config{ { - MetricName: "test", - MetricQuery: "name(/Device_1/Metric/@*[1])", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + MetricQuery: "name(/Device_1/Metric/@*[1])", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "value": "/Device_1/Metric/@*[1]", }, @@ -1017,6 +1017,29 @@ func TestParseMetricQuery(t *testing.T) { time.Unix(1577923199, 0), ), }, + { + name: "parse metric name constant", + input: metricNameQueryXML, + configs: []Config{ + { + MetricDefaultName: "test", + MetricQuery: "'the_metric'", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "value": "/Device_1/Metric/@*[1]", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "the_metric", + map[string]string{}, + map[string]interface{}{ + "value": "ok", + }, + time.Unix(1577923199, 0), + ), + }, } for _, tt := range tests { @@ -1032,6 +1055,42 @@ func TestParseMetricQuery(t *testing.T) { } } +func TestParseErrors(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + expected string + }{ + { + name: "string metric name query", + input: metricNameQueryXML, + configs: []Config{ + { + MetricDefaultName: "test", + MetricQuery: "arbitrary", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "value": "/Device_1/Metric/@*[1]", + }, + }, + }, + expected: "failed to query metric name: query result is of type not 'string'", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: map[string]string{}, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + _, err := parser.ParseLine(tt.input) + require.Error(t, err) + require.Equal(t, tt.expected, err.Error()) + }) + } +} + func TestEmptySelection(t *testing.T) { var tests = []struct { name string @@ -1146,7 +1205,7 @@ func TestTestCases(t *testing.T) { filename := filepath.FromSlash(tt.filename) cfg, header, err := loadTestConfiguration(filename) require.NoError(t, err) - cfg.MetricName = "xml" + cfg.MetricDefaultName = "xml" // Load the xml-content input, err := testutil.ParseRawLinesFrom(header, "File:") From 24bf987d85f4fac9fa8bf4abc0414edfaa5c8eca Mon Sep 17 00:00:00 2001 From: reimda Date: Wed, 15 Sep 2021 15:35:23 -0600 Subject: [PATCH 613/761] Update release.sh to include new builds on website (#9765) (cherry picked from commit 783945e55d03d36059ac1bd2da9551ee29c5ebb0) --- scripts/release.sh | 38 +++++++++++++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/scripts/release.sh b/scripts/release.sh index b445efc0494b3..22cac0a09cf53 100644 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -103,6 +103,16 @@ do done < manifest echo "" +package="$(grep *_darwin_amd64.dmg manifest | cut -f2 -d' ')" +cat -< Date: Thu, 16 Sep 2021 12:01:34 -0400 Subject: [PATCH 614/761] fix: bump github.com/antchfx/xmlquery from 1.3.5 to 1.3.6 (#9750) (cherry picked from commit c4050aed948dd65ed33a2b9d2125712c8159901f) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4dcb4550d4764..24b6f664b2756 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/aliyun/alibaba-cloud-sdk-go v1.61.1004 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 github.com/antchfx/jsonquery v1.1.4 - github.com/antchfx/xmlquery v1.3.5 + github.com/antchfx/xmlquery v1.3.6 github.com/antchfx/xpath v1.1.11 github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230 // indirect github.com/apache/thrift v0.14.2 diff --git a/go.sum b/go.sum index c5a0778443420..e09c7b36318ff 100644 --- a/go.sum +++ b/go.sum @@ -219,8 +219,8 @@ github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RD github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antchfx/jsonquery v1.1.4 h1:+OlFO3QS9wjU0MKx9MgHm5f6o6hdd4e9mUTp0wTjxlM= github.com/antchfx/jsonquery v1.1.4/go.mod h1:cHs8r6Bymd8j6HI6Ej1IJbjahKvLBcIEh54dfmo+E9A= -github.com/antchfx/xmlquery v1.3.5 h1:I7TuBRqsnfFuL11ruavGm911Awx9IqSdiU6W/ztSmVw= -github.com/antchfx/xmlquery v1.3.5/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= +github.com/antchfx/xmlquery v1.3.6 h1:kaEVzH1mNo/2AJZrhZjAaAUTy2Nn2zxGfYYU8jWfXOo= +github.com/antchfx/xmlquery v1.3.6/go.mod h1:64w0Xesg2sTaawIdNqMB+7qaW/bSqkQm+ssPaCMWNnc= github.com/antchfx/xpath v1.1.7/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= github.com/antchfx/xpath v1.1.11 h1:WOFtK8TVAjLm3lbgqeP0arlHpvCEeTANeWZ/csPpJkQ= From ca8d9eea8da2f371c00bc80bf95d296dcf70e0b8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Sep 2021 10:02:47 -0600 Subject: [PATCH 615/761] fix: bump github.com/miekg/dns from 1.1.31 to 1.1.43 (#9656) (cherry picked from commit a02f49c6ff5b43955e117f0a1290f2c4b6543d45) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 24b6f664b2756..65a894974bea6 100644 --- a/go.mod +++ b/go.mod @@ -180,7 +180,7 @@ require ( github.com/mdlayher/genetlink v1.0.0 // indirect github.com/mdlayher/netlink v1.1.0 // indirect github.com/microsoft/ApplicationInsights-Go v0.4.4 - github.com/miekg/dns v1.1.31 + github.com/miekg/dns v1.1.43 github.com/minio/highwayhash v1.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.2.2 // indirect diff --git a/go.sum b/go.sum index e09c7b36318ff..5ff7799dc902b 100644 --- a/go.sum +++ b/go.sum @@ -1146,8 +1146,9 @@ github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81T github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= From 3ba27ea1bb680e4d46c73d400ed721327ba7e8fa Mon Sep 17 00:00:00 2001 From: Alan Pope Date: Thu, 16 Sep 2021 18:22:24 +0100 Subject: [PATCH 616/761] docs: Move nightly builds (#9747) (cherry picked from commit 81eed8d436ace8b896f23c4fdc2a08a568a686aa) --- README.md | 41 +---------------------------------------- docs/NIGHTLIES.md | 42 ++++++++++++++++++++++++++++++++++++++++++ docs/README.md | 3 +++ 3 files changed, 46 insertions(+), 40 deletions(-) create mode 100644 docs/NIGHTLIES.md diff --git a/README.md b/README.md index c4a89b751c5d2..57b2d4e8cc33d 100644 --- a/README.md +++ b/README.md @@ -80,46 +80,7 @@ version. ### Nightly Builds -These builds are generated from the master branch: - -FreeBSD - .tar.gz -- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) -- [telegraf-nightly_freebsd_armv7.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_armv7.tar.gz) -- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) - -Linux - .rpm -- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) -- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) -- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) -- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) -- [telegraf-nightly.ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) -- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) -- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) - -Linux - .deb -- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) -- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) -- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) -- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) -- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) -- [telegraf_nightly_ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) -- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) - -Linux - .tar.gz -- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) -- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) -- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) -- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) -- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) -- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) -- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz) - -OSX - .tar.gz -- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) - -Windows - .zip -- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) -- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) +[Nightly](/docs/NIGHTLIES.md) builds are available, generated from the master branch. ## How to use it: diff --git a/docs/NIGHTLIES.md b/docs/NIGHTLIES.md new file mode 100644 index 0000000000000..63cdc2d82cfdc --- /dev/null +++ b/docs/NIGHTLIES.md @@ -0,0 +1,42 @@ +### Nightly Builds + +These builds are generated from the master branch: + +FreeBSD - .tar.gz +- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) +- [telegraf-nightly_freebsd_armv7.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_armv7.tar.gz) +- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) + +Linux - .rpm +- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) +- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) +- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) +- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) +- [telegraf-nightly.ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) +- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) +- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) + +Linux - .deb +- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) +- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) +- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) +- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) +- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) +- [telegraf_nightly_ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) +- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) + +Linux - .tar.gz +- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) +- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) +- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) +- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) +- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) +- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) +- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz) + +OSX - .tar.gz +- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) + +Windows - .zip +- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) +- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) diff --git a/docs/README.md b/docs/README.md index b7b55336c5a04..99320dee95588 100644 --- a/docs/README.md +++ b/docs/README.md @@ -10,6 +10,8 @@ - [Profiling][profiling] - [Windows Service][winsvc] - [FAQ][faq] +- Developer Builds + - [Nightlies](nightlies) [conf]: /docs/CONFIGURATION.md [metrics]: /docs/METRICS.md @@ -19,3 +21,4 @@ [profiling]: /docs/PROFILING.md [winsvc]: /docs/WINDOWS_SERVICE.md [faq]: /docs/FAQ.md +[nightlies]: /docs/NIGHTLIES.md \ No newline at end of file From b406fac7e64a68afee198a6daac9a022a60858e9 Mon Sep 17 00:00:00 2001 From: Michael Hall Date: Thu, 16 Sep 2021 14:14:41 -0400 Subject: [PATCH 617/761] docs: Add list of 3rd party builds to the README (#8576) (cherry picked from commit b806ad88488b057b9864d7365e24b1651726caa3) --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index 57b2d4e8cc33d..2b49842789db6 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,17 @@ version. [Nightly](/docs/NIGHTLIES.md) builds are available, generated from the master branch. +### 3rd Party Builds + +Builds for other platforms or package formats are provided by members of the Telegraf community. These packages are not built, tested or supported by the Telegraf project or InfluxData, we make no guarantees that they will work. Please get in touch with the package author if you need support. + +* Windows + * [Chocolatey](https://chocolatey.org/packages/telegraf) by [ripclawffb](https://chocolatey.org/profiles/ripclawffb) + * [Scoop](https://github.com/ScoopInstaller/Main/blob/master/bucket/telegraf.json) +* Linux + * [Snap](https://snapcraft.io/telegraf) by Laurent Sesquès (sajoupa) + + ## How to use it: See usage with: From 58abd00c2f2cec15d4f697053f7cfe638eb67657 Mon Sep 17 00:00:00 2001 From: John Seekins Date: Thu, 16 Sep 2021 15:19:51 -0600 Subject: [PATCH 618/761] fix: add additional logstash output plugin stats (#9707) (cherry picked from commit f5a3df429ad969302c765da5dce92f4c63042f37) --- plugins/inputs/logstash/README.md | 6 ++ plugins/inputs/logstash/logstash.go | 65 +++++++++++++++++++- plugins/inputs/logstash/logstash_test.go | 58 +++++++++++++++++ plugins/inputs/logstash/samples_logstash7.go | 7 ++- 4 files changed, 131 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/logstash/README.md b/plugins/inputs/logstash/README.md index 9571de5fd8873..95ec3e6feae66 100644 --- a/plugins/inputs/logstash/README.md +++ b/plugins/inputs/logstash/README.md @@ -42,6 +42,8 @@ Logstash 5 and later is supported. ### Metrics +Additional plugin stats may be collected (because logstash doesn't consistently expose all stats) + - logstash_jvm - tags: - node_id @@ -125,6 +127,10 @@ Logstash 5 and later is supported. - duration_in_millis - in - out + - bulk_requests_failures (for Logstash 7+) + - bulk_requests_with_errors (for Logstash 7+) + - documents_successes (for logstash 7+) + - documents_retryable_failures (for logstash 7+) - logstash_queue - tags: diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index 76f75bc63a6a0..10a3e7b6b8dd0 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -126,9 +126,11 @@ type Pipeline struct { } type Plugin struct { - ID string `json:"id"` - Events interface{} `json:"events"` - Name string `json:"name"` + ID string `json:"id"` + Events interface{} `json:"events"` + Name string `json:"name"` + BulkRequests map[string]interface{} `json:"bulk_requests"` + Documents map[string]interface{} `json:"documents"` } type PipelinePlugins struct { @@ -290,6 +292,63 @@ func (logstash *Logstash) gatherPluginsStats( return err } accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + /* + The elasticsearch output produces additional stats around + bulk requests and document writes (that are elasticsearch specific). + Collect those here + */ + if pluginType == "output" && plugin.Name == "elasticsearch" { + /* + The "bulk_requests" section has details about batch writes + into Elasticsearch + + "bulk_requests" : { + "successes" : 2870, + "responses" : { + "200" : 2870 + }, + "failures": 262, + "with_errors": 9089 + }, + */ + flattener := jsonParser.JSONFlattener{} + err := flattener.FlattenJSON("", plugin.BulkRequests) + if err != nil { + return err + } + for k, v := range flattener.Fields { + if strings.HasPrefix(k, "bulk_requests") { + continue + } + newKey := fmt.Sprintf("bulk_requests_%s", k) + flattener.Fields[newKey] = v + delete(flattener.Fields, k) + } + accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + + /* + The "documents" section has counts of individual documents + written/retried/etc. + "documents" : { + "successes" : 2665549, + "retryable_failures": 13733 + } + */ + flattener = jsonParser.JSONFlattener{} + err = flattener.FlattenJSON("", plugin.Documents) + if err != nil { + return err + } + for k, v := range flattener.Fields { + if strings.HasPrefix(k, "documents") { + continue + } + newKey := fmt.Sprintf("documents_%s", k) + flattener.Fields[newKey] = v + delete(flattener.Fields, k) + } + accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + } } return nil diff --git a/plugins/inputs/logstash/logstash_test.go b/plugins/inputs/logstash/logstash_test.go index d8db3475a1e95..089824c58767f 100644 --- a/plugins/inputs/logstash/logstash_test.go +++ b/plugins/inputs/logstash/logstash_test.go @@ -708,6 +708,64 @@ func Test_Logstash7GatherPipelinesQueueStats(test *testing.T) { }, ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(2802177.0), + "in": float64(2665549.0), + "out": float64(2665549.0), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "bulk_requests_successes": float64(2870), + "bulk_requests_responses_200": float64(2870), + "bulk_requests_failures": float64(262), + "bulk_requests_with_errors": float64(9089), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "documents_successes": float64(2665549), + "documents_retryable_failures": float64(13733), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( test, "logstash_queue", diff --git a/plugins/inputs/logstash/samples_logstash7.go b/plugins/inputs/logstash/samples_logstash7.go index fe05712909c81..e04bb4319a27a 100644 --- a/plugins/inputs/logstash/samples_logstash7.go +++ b/plugins/inputs/logstash/samples_logstash7.go @@ -110,10 +110,13 @@ const logstash7PipelinesJSON = ` "successes" : 2870, "responses" : { "200" : 2870 - } + }, + "failures": 262, + "with_errors": 9089 }, "documents" : { - "successes" : 2665549 + "successes" : 2665549, + "retryable_failures": 13733 } } ] }, From 2e08eab494fe58b7a143ab430c8fb13f97244cbe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Sep 2021 15:57:45 -0600 Subject: [PATCH 619/761] fix: bump github.com/shirou/gopsutil (#9760) (cherry picked from commit d441b03b57599257142b7949af8711782209f269) --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 65a894974bea6..72990ab8394bb 100644 --- a/go.mod +++ b/go.mod @@ -231,7 +231,7 @@ require ( github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/sensu/sensu-go/api/core/v2 v2.9.0 - github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible + github.com/shirou/gopsutil v3.21.8+incompatible github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/showwin/speedtest-go v1.1.4 github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect diff --git a/go.sum b/go.sum index 5ff7799dc902b..79046bf42b4ab 100644 --- a/go.sum +++ b/go.sum @@ -1443,6 +1443,8 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible h1:Rucj22V2P6ktUBqN5auqjyxRHLXqNX6CteXBXifRrgY= github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU= +github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= From 6fe91865acc8f3e222cfffa6ff78b8a34caa5b45 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Thu, 16 Sep 2021 16:26:09 -0600 Subject: [PATCH 620/761] go mod tidy (cherry picked from commit 3b20b93a3346a132854f783a148f3c020b375bb9) --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 79046bf42b4ab..e85b07043c3f2 100644 --- a/go.sum +++ b/go.sum @@ -1441,8 +1441,6 @@ github.com/sensu/sensu-go/api/core/v2 v2.9.0 h1:NanHMIWbrHP/L4Ge0V1x2+0G9bxFHpvh github.com/sensu/sensu-go/api/core/v2 v2.9.0/go.mod h1:QcgxKxydmScE66hLBTzbFhhiPSR/JHqUjNi/+Lelh6E= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible h1:Rucj22V2P6ktUBqN5auqjyxRHLXqNX6CteXBXifRrgY= -github.com/shirou/gopsutil v3.21.6-0.20210624221800-cb512c850043+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU= github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= From a9924dea7a9bc642120b23db5ef39d757bff9103 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Fri, 17 Sep 2021 16:31:42 -0600 Subject: [PATCH 621/761] Update changelog --- CHANGELOG.md | 22 +++++++++++++++++++--- etc/telegraf.conf | 9 ++++++++- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42ca26772a37b..2ebccd4849220 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## v1.20.0-rc0 [2021-09-02] +## v1.20.0 [2021-09-17] #### Release Notes @@ -6,7 +6,7 @@ #### Bugfixes - - [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing 0.4.5 + - [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing to 0.4.5 - [#9587](https://github.com/influxdata/telegraf/pull/9587) `outputs.opentelemetry` Use headers config in grpc requests - [#9713](https://github.com/influxdata/telegraf/pull/9713) Update runc module to v1.0.0-rc95 to address CVE-2021-30465 - [#9699](https://github.com/influxdata/telegraf/pull/9699) Migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 @@ -16,8 +16,21 @@ - [#9674](https://github.com/influxdata/telegraf/pull/9674) `inputs.mongodb` Change command based on server version - [#9676](https://github.com/influxdata/telegraf/pull/9676) `outputs.dynatrace` Remove hardcoded int value - [#9619](https://github.com/influxdata/telegraf/pull/9619) `outputs.influxdb_v2` Increase accepted retry-after header values. - - [#9652](https://github.com/influxdata/telegraf/pull/9652) Update github.com/tinylib/msgp module from 1.1.5 to 1.1.6 + - [#9652](https://github.com/influxdata/telegraf/pull/9652) Update tinylib/msgp module from 1.1.5 to 1.1.6 - [#9471](https://github.com/influxdata/telegraf/pull/9471) `inputs.sql` Make timeout apply to single query + - [#9760](https://github.com/influxdata/telegraf/pull/9760) Update shirou/gopsutil module to 3.21.8 + - [#9707](https://github.com/influxdata/telegraf/pull/9707) `inputs.logstash` Add additional logstash output plugin stats + - [#9656](https://github.com/influxdata/telegraf/pull/9656) Update miekg/dns module from 1.1.31 to 1.1.43 + - [#9750](https://github.com/influxdata/telegraf/pull/9750) Update antchfx/xmlquery module from 1.3.5 to 1.3.6 + - [#9757](https://github.com/influxdata/telegraf/pull/9757) `parsers.registry.go` Fix panic for non-existing metric names + - [#9677](https://github.com/influxdata/telegraf/pull/9677) Update Azure/azure-event-hubs-go/v3 module from 3.2.0 to 3.3.13 + - [#9653](https://github.com/influxdata/telegraf/pull/9653) Update prometheus/client_golang module from 1.7.1 to 1.11.0 + - [#9693](https://github.com/influxdata/telegraf/pull/9693) `inputs.cloudwatch` Fix pagination error + - [#9727](https://github.com/influxdata/telegraf/pull/9727) `outputs.http` Add error message logging + - [#9718](https://github.com/influxdata/telegraf/pull/9718) Update influxdata/influxdb-observability module from 0.2.4 to 0.2.7 + - [#9560](https://github.com/influxdata/telegraf/pull/9560) Update gopcua/opcua module + - [#9544](https://github.com/influxdata/telegraf/pull/9544) `inputs.couchbase` Fix memory leak + - [#9588](https://github.com/influxdata/telegraf/pull/9588) `outputs.opentelemetry` Use attributes setting #### Features @@ -38,12 +51,15 @@ - [#9343](https://github.com/influxdata/telegraf/pull/9343) `inputs.snmp_trap` Improve MIB lookup performance - [#9342](https://github.com/influxdata/telegraf/pull/9342) `outputs.newrelic` Add option to override metric_url - [#9306](https://github.com/influxdata/telegraf/pull/9306) `inputs.smart` Add power mode status + - [#9762](https://github.com/influxdata/telegraf/pull/9762) `inputs.bond` Add count of bonded slaves (for easier alerting) + - [#9675](https://github.com/influxdata/telegraf/pull/9675) `outputs.dynatrace` Remove special handling from counters and update dynatrace-oss/dynatrace-metric-utils-go module to 0.3.0 #### New Input Plugins - [#9602](https://github.com/influxdata/telegraf/pull/9602) Add rocm_smi input to monitor AMD GPUs - [#9101](https://github.com/influxdata/telegraf/pull/9101) Add mdstat input to gather from /proc/mdstat collection - [#3536](https://github.com/influxdata/telegraf/pull/3536) Add Elasticsearch query input + - [#9623](https://github.com/influxdata/telegraf/pull/9623) Add internet Speed Monitor Input Plugin #### New Output Plugins diff --git a/etc/telegraf.conf b/etc/telegraf.conf index fabd2616141fb..beb22821464d9 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -592,7 +592,7 @@ # ## Connection timeout, defaults to "5s" if not set. # timeout = "5s" # -# ## If you want to convert values represented as gauges to counters, add the metric names here +# ## If you want metrics to be treated and reported as delta counters, add the metric names here # additional_counters = [ ] # # ## Optional dimensions to be added to every metric @@ -4324,6 +4324,13 @@ # # collect_memstats = true +# # Monitors internet speed using speedtest.net service +# [[inputs.internet_speed]] +# ## Sets if runs file download test +# ## Default: false +# enable_file_download = false + + # # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. # [[inputs.interrupts]] # ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is From f489cff3eadd150123c894121fe45424b0828f38 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Fri, 17 Sep 2021 16:32:26 -0600 Subject: [PATCH 622/761] Telegraf v1.20.0 From c033f0fe5abf847d4ff06ad495de5f18bf27ed03 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Sep 2021 16:06:13 -0600 Subject: [PATCH 623/761] fix: bump github.com/aws/smithy-go from 1.3.1 to 1.8.0 (#9770) (cherry picked from commit 8014a508e5fdde3b56936e678a9a72502e219b07) --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 72990ab8394bb..6f16bb0fb0f83 100644 --- a/go.mod +++ b/go.mod @@ -59,7 +59,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 // indirect - github.com/aws/smithy-go v1.3.1 + github.com/aws/smithy-go v1.8.0 github.com/benbjohnson/clock v1.0.3 github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect diff --git a/go.sum b/go.sum index e85b07043c3f2..19b3febf462ff 100644 --- a/go.sum +++ b/go.sum @@ -282,8 +282,9 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/ github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 h1:fKw6QSGcFlvZCBPYx3fo4sL0HfTmaT06ZtMHJfQQNQQ= github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= -github.com/aws/smithy-go v1.3.1 h1:xJFO4pK0y9J8fCl34uGsSJX5KNnGbdARDlA5BPhXnwE= github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= From 0ed38c773e74ff88b81c6377fa662390b4b92083 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Mon, 20 Sep 2021 08:13:39 -0700 Subject: [PATCH 624/761] docs: document telegraf commands and flags (#9635) (cherry picked from commit 9ecf6040afd97488365739c423ffc5ed83a15479) --- docs/COMMANDS_AND_FLAGS.md | 67 ++++++++++++++++++++++++++++++++++++++ docs/CONFIGURATION.md | 3 ++ 2 files changed, 70 insertions(+) create mode 100644 docs/COMMANDS_AND_FLAGS.md diff --git a/docs/COMMANDS_AND_FLAGS.md b/docs/COMMANDS_AND_FLAGS.md new file mode 100644 index 0000000000000..cb0c31268c9a4 --- /dev/null +++ b/docs/COMMANDS_AND_FLAGS.md @@ -0,0 +1,67 @@ +# Telegraf Commands & Flags + +### Usage + +``` +telegraf [commands] +telegraf [flags] +``` + +### Commands + +|command|description| +|--------|-----------------------------------------------| +|`config` |print out full sample configuration to stdout| +|`version`|print the version to stdout| + +### Flags + +|flag|description| +|-------------------|------------| +|`--aggregator-filter ` |filter the aggregators to enable, separator is `:`| +|`--config ` |configuration file to load| +|`--config-directory ` |directory containing additional *.conf files| +|`--watch-config` |Telegraf will restart on local config changes.
Monitor changes using either fs notifications or polling. Valid values: `inotify` or `poll`.
Monitoring is off by default.| +|`--plugin-directory` |directory containing *.so files, this directory will be searched recursively. Any Plugin found will be loaded and namespaced.| +|`--debug` |turn on debug logging| +|`--input-filter ` |filter the inputs to enable, separator is `:`| +|`--input-list` |print available input plugins.| +|`--output-filter ` |filter the outputs to enable, separator is `:`| +|`--output-list` |print available output plugins.| +|`--pidfile ` |file to write our pid to| +|`--pprof-addr

` |pprof address to listen on, don't activate pprof if empty| +|`--processor-filter ` |filter the processors to enable, separator is `:`| +|`--quiet` |run in quiet mode| +|`--section-filter` |filter config sections to output, separator is `:`
Valid values are `agent`, `global_tags`, `outputs`, `processors`, `aggregators` and `inputs`| +|`--sample-config` |print out full sample configuration| +|`--once` |enable once mode: gather metrics once, write them, and exit| +|`--test` |enable test mode: gather metrics once and print them| +|`--test-wait` |wait up to this many seconds for service inputs to complete in test or once mode| +|`--usage ` |print usage for a plugin, ie, `telegraf --usage mysql`| +|`--version` |display the version and exit| + +### Examples + +**Generate a telegraf config file:** + +`telegraf config > telegraf.conf` + +**Generate config with only cpu input & influxdb output plugins defined:** + +`telegraf --input-filter cpu --output-filter influxdb config` + +**Run a single telegraf collection, outputting metrics to stdout:** + +`telegraf --config telegraf.conf --test` + +**Run telegraf with all plugins defined in config file:** + +`telegraf --config telegraf.conf` + +**Run telegraf, enabling the cpu & memory input, and influxdb output plugins:** + +`telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb` + +**Run telegraf with pprof:** + +`telegraf --config telegraf.conf --pprof-addr localhost:6060` diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 70e7981c9450b..9af88b669ea9f 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -19,6 +19,8 @@ To generate a file with specific inputs and outputs, you can use the telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config ``` +[View the full list][flags] of Telegraf commands and flags or by running `telegraf --help`. + ### Configuration Loading The location of the configuration file can be set via the `--config` command @@ -671,3 +673,4 @@ Reference the detailed [TLS][] documentation. [telegraf.conf]: /etc/telegraf.conf [TLS]: /docs/TLS.md [glob pattern]: https://github.com/gobwas/glob#syntax +[flags]: /docs/COMMANDS_AND_FLAGS.md From 0b3417620ee9e4ebd9e7845c463331fe537459e5 Mon Sep 17 00:00:00 2001 From: Sean Molenaar Date: Mon, 20 Sep 2021 19:10:36 +0300 Subject: [PATCH 625/761] docs: fix jenkins plugin documentation (#9714) (cherry picked from commit b93f20068a9b35905137517bb93448bdc5351539) --- plugins/inputs/jenkins/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index 4d82f4e90ba31..e12326031b9ef 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -57,7 +57,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API ### Metrics: -- jenkins_node +- jenkins - tags: - source - port From dac7802b74399940ded38c40a2a693e1f38be2f0 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 20 Sep 2021 10:26:10 -0700 Subject: [PATCH 626/761] chore: "makefile help" output, but still support building based on arch for CI (#9579) (cherry picked from commit 82bdbce498ad2b1f558145fa9f4cc7cac2bbf1c4) --- .circleci/config.yml | 16 +- Makefile | 355 ++++++++++++++++------------------- docs/developers/PACKAGING.md | 25 ++- 3 files changed, 179 insertions(+), 217 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b2043e1fa291c..1f644a7b9d20b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -123,11 +123,11 @@ commands: - when: condition: << parameters.release >> steps: - - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 make package' + - run: 'make package' - when: condition: << parameters.nightly >> steps: - - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 NIGHTLY=1 make package' + - run: 'make package' - run: 'make upload-nightly' - unless: condition: @@ -135,7 +135,7 @@ commands: - << parameters.nightly >> - << parameters.release >> steps: - - run: '<< parameters.type >>=1 make package' + - run: 'make package include_packages="$(make << parameters.type >>)"' - store_artifacts: path: './build/dist' destination: 'build/dist' @@ -215,11 +215,11 @@ jobs: steps: - package-build: type: i386 - ppc641e-package: - executor: go-1_17 + ppc64le-package: + executor: go-1_16 steps: - package-build: - type: ppc641e + type: ppc64le s390x-package: executor: go-1_17 steps: @@ -391,7 +391,7 @@ workflows: - 'i386-package': requires: - 'test-awaiter' - - 'ppc641e-package': + - 'ppc64le-package': requires: - 'test-awaiter' - 's390x-package': @@ -421,7 +421,7 @@ workflows: - 'share-artifacts': requires: - 'i386-package' - - 'ppc641e-package' + - 'ppc64le-package' - 's390x-package' - 'armel-package' - 'amd64-package' diff --git a/Makefile b/Makefile index 230eedf600f6f..a7797a0e8ce5f 100644 --- a/Makefile +++ b/Makefile @@ -80,9 +80,18 @@ help: @echo ' lint-install - install linter' @echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md' @echo ' clean - delete build artifacts' + @echo ' package - build all supported packages, override include_packages to only build a subset' + @echo ' e.g.: make package include_packages="amd64.deb"' @echo '' - @echo 'Package Targets:' - @$(foreach dist,$(dists),echo " $(dist)";) + @echo 'Possible values for include_packages variable' + @$(foreach package,$(include_packages),echo " $(package)";) + @echo '' + @echo 'Resulting package name format (where arch will be the arch of the package):' + @echo ' telegraf_$(deb_version)_arch.deb' + @echo ' telegraf-$(rpm_version).arch.rpm' + @echo ' telegraf-$(tar_version)_arch.tar.gz' + @echo ' telegraf-$(tar_version)_arch.zip' + .PHONY: deps deps: @@ -224,164 +233,118 @@ $(buildbin): @mkdir -pv $(dir $@) go build -o $(dir $@) -ldflags "$(LDFLAGS)" ./cmd/telegraf -ifdef mips -debs += telegraf_$(deb_version)_mips.deb -tars += telegraf-$(tar_version)_linux_mips.tar.gz -endif - -ifdef mipsel -debs += telegraf_$(deb_version)_mipsel.deb -tars += telegraf-$(tar_version)_linux_mipsel.tar.gz -endif - -ifdef arm64 -tars += telegraf-$(tar_version)_linux_arm64.tar.gz -debs += telegraf_$(deb_version)_arm64.deb -rpms += telegraf-$(rpm_version).aarch64.rpm -endif - -ifdef amd64 -tars += telegraf-$(tar_version)_freebsd_amd64.tar.gz -tars += telegraf-$(tar_version)_linux_amd64.tar.gz -debs += telegraf_$(deb_version)_amd64.deb -rpms += telegraf-$(rpm_version).x86_64.rpm -endif - -ifdef static -tars += telegraf-$(tar_version)_static_linux_amd64.tar.gz -endif - -ifdef armel -tars += telegraf-$(tar_version)_linux_armel.tar.gz -rpms += telegraf-$(rpm_version).armel.rpm -debs += telegraf_$(deb_version)_armel.deb -endif - -ifdef armhf -tars += telegraf-$(tar_version)_linux_armhf.tar.gz -tars += telegraf-$(tar_version)_freebsd_armv7.tar.gz -debs += telegraf_$(deb_version)_armhf.deb -rpms += telegraf-$(rpm_version).armv6hl.rpm -endif - -ifdef s390x -tars += telegraf-$(tar_version)_linux_s390x.tar.gz -debs += telegraf_$(deb_version)_s390x.deb -rpms += telegraf-$(rpm_version).s390x.rpm -endif - -ifdef ppc641e -tars += telegraf-$(tar_version)_linux_ppc64le.tar.gz -rpms += telegraf-$(rpm_version).ppc64le.rpm -debs += telegraf_$(deb_version)_ppc64el.deb -endif - -ifdef i386 -tars += telegraf-$(tar_version)_freebsd_i386.tar.gz -debs += telegraf_$(deb_version)_i386.deb -tars += telegraf-$(tar_version)_linux_i386.tar.gz -rpms += telegraf-$(rpm_version).i386.rpm -endif - -ifdef windows -zips += telegraf-$(tar_version)_windows_i386.zip -zips += telegraf-$(tar_version)_windows_amd64.zip -endif - -ifdef darwin -tars += telegraf-$(tar_version)_darwin_amd64.tar.gz -endif - -dists := $(debs) $(rpms) $(tars) $(zips) +# Define packages Telegraf supports, organized by architecture with a rule to echo the list to limit include_packages +# e.g. make package include_packages="$(make amd64)" +mips += linux_mips.tar.gz mips.deb +.PHONY: mips +mips: + @ echo $(mips) +mipsel += mipsel.deb linux_mipsel.tar.gz +.PHONY: mipsel +mipsel: + @ echo $(mipsel) +arm64 += linux_arm64.tar.gz arm64.deb aarch64.rpm +.PHONY: arm64 +arm64: + @ echo $(arm64) +amd64 += freebsd_amd64.tar.gz linux_amd64.tar.gz amd64.deb x86_64.rpm +.PHONY: amd64 +amd64: + @ echo $(amd64) +static += static_linux_amd64.tar.gz +.PHONY: static +static: + @ echo $(static) +armel += linux_armel.tar.gz armel.rpm armel.deb +.PHONY: armel +armel: + @ echo $(armel) +armhf += linux_armhf.tar.gz freebsd_armv7.tar.gz armhf.deb armv6hl.rpm +.PHONY: armhf +armhf: + @ echo $(armhf) +s390x += linux_s390x.tar.gz s390x.deb s390x.rpm +.PHONY: s390x +s390x: + @ echo $(s390x) +ppc64le += linux_ppc64le.tar.gz ppc64le.rpm ppc64el.deb +.PHONY: ppc64le +ppc64le: + @ echo $(ppc64le) +i386 += freebsd_i386.tar.gz i386.deb linux_i386.tar.gzi386.rpm +.PHONY: i386 +i386: + @ echo $(i386) +windows += windows_i386.zip windows_amd64.zip +.PHONY: windows +windows: + @ echo $(windows) +darwin += darwin_amd64.tar.gz +.PHONY: darwin +darwin: + @ echo $(darwin) + +include_packages := $(mips) $(mipsel) $(arm64) $(amd64) $(static) $(armel) $(armhf) $(s390x) $(ppc64le) $(i386) $(windows) $(darwin) .PHONY: package -package: $(dists) - -rpm_amd64 := amd64 -rpm_386 := i386 -rpm_s390x := s390x -rpm_ppc64le := ppc64le -rpm_arm5 := armel -rpm_arm6 := armv6hl -rpm_arm647 := aarch64 -rpm_arch = $(rpm_$(GOARCH)$(GOARM)) - -.PHONY: $(rpms) -$(rpms): - @$(MAKE) install - @mkdir -p $(pkgdir) - fpm --force \ - --log info \ - --architecture $(rpm_arch) \ - --input-type dir \ - --output-type rpm \ - --vendor InfluxData \ - --url https://github.com/influxdata/telegraf \ - --license MIT \ - --maintainer support@influxdb.com \ - --config-files /etc/telegraf/telegraf.conf \ - --config-files /etc/logrotate.d/telegraf \ - --after-install scripts/rpm/post-install.sh \ - --before-install scripts/rpm/pre-install.sh \ - --after-remove scripts/rpm/post-remove.sh \ - --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ - --depends coreutils \ - --depends shadow-utils \ - --rpm-posttrans scripts/rpm/post-install.sh \ - --name telegraf \ - --version $(version) \ - --iteration $(rpm_iteration) \ - --chdir $(DESTDIR) \ - --package $(pkgdir)/$@ - -deb_amd64 := amd64 -deb_386 := i386 -deb_s390x := s390x -deb_ppc64le := ppc64el -deb_arm5 := armel -deb_arm6 := armhf -deb_arm647 := arm64 -deb_mips := mips -deb_mipsle := mipsel -deb_arch = $(deb_$(GOARCH)$(GOARM)) - -.PHONY: $(debs) -$(debs): - @$(MAKE) install - @mkdir -pv $(pkgdir) - fpm --force \ - --log info \ - --architecture $(deb_arch) \ - --input-type dir \ - --output-type deb \ - --vendor InfluxData \ - --url https://github.com/influxdata/telegraf \ - --license MIT \ - --maintainer support@influxdb.com \ - --config-files /etc/telegraf/telegraf.conf.sample \ - --config-files /etc/logrotate.d/telegraf \ - --after-install scripts/deb/post-install.sh \ - --before-install scripts/deb/pre-install.sh \ - --after-remove scripts/deb/post-remove.sh \ - --before-remove scripts/deb/pre-remove.sh \ - --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ - --name telegraf \ - --version $(version) \ - --iteration $(deb_iteration) \ - --chdir $(DESTDIR) \ - --package $(pkgdir)/$@ - -.PHONY: $(zips) -$(zips): - @$(MAKE) install - @mkdir -p $(pkgdir) - (cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/$@ +package: $(include_packages) -.PHONY: $(tars) -$(tars): +.PHONY: $(include_packages) +$(include_packages): @$(MAKE) install @mkdir -p $(pkgdir) - tar --owner 0 --group 0 -czvf $(pkgdir)/$@ -C $(dir $(DESTDIR)) . + + @if [ "$(suffix $@)" = ".rpm" ]; then \ + fpm --force \ + --log info \ + --architecture $(basename $@) \ + --input-type dir \ + --output-type rpm \ + --vendor InfluxData \ + --url https://github.com/influxdata/telegraf \ + --license MIT \ + --maintainer support@influxdb.com \ + --config-files /etc/telegraf/telegraf.conf \ + --config-files /etc/logrotate.d/telegraf \ + --after-install scripts/rpm/post-install.sh \ + --before-install scripts/rpm/pre-install.sh \ + --after-remove scripts/rpm/post-remove.sh \ + --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ + --depends coreutils \ + --depends shadow-utils \ + --rpm-posttrans scripts/rpm/post-install.sh \ + --name telegraf \ + --version $(version) \ + --iteration $(rpm_iteration) \ + --chdir $(DESTDIR) \ + --package $(pkgdir)/telegraf-$(rpm_version).$@ ;\ + elif [ "$(suffix $@)" = ".deb" ]; then \ + fpm --force \ + --log info \ + --architecture $(basename $@) \ + --input-type dir \ + --output-type deb \ + --vendor InfluxData \ + --url https://github.com/influxdata/telegraf \ + --license MIT \ + --maintainer support@influxdb.com \ + --config-files /etc/telegraf/telegraf.conf.sample \ + --config-files /etc/logrotate.d/telegraf \ + --after-install scripts/deb/post-install.sh \ + --before-install scripts/deb/pre-install.sh \ + --after-remove scripts/deb/post-remove.sh \ + --before-remove scripts/deb/pre-remove.sh \ + --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ + --name telegraf \ + --version $(version) \ + --iteration $(deb_iteration) \ + --chdir $(DESTDIR) \ + --package $(pkgdir)/telegraf_$(deb_version)_$@ ;\ + elif [ "$(suffix $@)" = ".zip" ]; then \ + (cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/telegraf-$(tar_version)_$@ ;\ + elif [ "$(suffix $@)" = ".gz" ]; then \ + tar --owner 0 --group 0 -czvf $(pkgdir)/telegraf-$(tar_version)_$@ -C $(dir $(DESTDIR)) . ;\ + fi .PHONY: upload-nightly upload-nightly: @@ -393,63 +356,63 @@ upload-nightly: --include "*.zip" \ --acl public-read -%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOOS := linux -%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOARCH := amd64 +amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOOS := linux +amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOARCH := amd64 -%static_linux_amd64.tar.gz: export cgo := -nocgo -%static_linux_amd64.tar.gz: export CGO_ENABLED := 0 +static_linux_amd64.tar.gz: export cgo := -nocgo +static_linux_amd64.tar.gz: export CGO_ENABLED := 0 -%i386.deb %i386.rpm %linux_i386.tar.gz: export GOOS := linux -%i386.deb %i386.rpm %linux_i386.tar.gz: export GOARCH := 386 +i386.deb i386.rpm linux_i386.tar.gz: export GOOS := linux +i386.deb i386.rpm linux_i386.tar.gz: export GOARCH := 386 -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOOS := linux -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARCH := arm -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARM := 5 +armel.deb armel.rpm linux_armel.tar.gz: export GOOS := linux +armel.deb armel.rpm linux_armel.tar.gz: export GOARCH := arm +armel.deb armel.rpm linux_armel.tar.gz: export GOARM := 5 -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOOS := linux -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARCH := arm -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARM := 6 +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOOS := linux +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARCH := arm +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARM := 6 -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOOS := linux -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARCH := arm64 -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARM := 7 +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOOS := linux +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARCH := arm64 +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARM := 7 -%mips.deb %linux_mips.tar.gz: export GOOS := linux -%mips.deb %linux_mips.tar.gz: export GOARCH := mips +mips.deb linux_mips.tar.gz: export GOOS := linux +mips.deb linux_mips.tar.gz: export GOARCH := mips -%mipsel.deb %linux_mipsel.tar.gz: export GOOS := linux -%mipsel.deb %linux_mipsel.tar.gz: export GOARCH := mipsle +mipsel.deb linux_mipsel.tar.gz: export GOOS := linux +mipsel.deb linux_mipsel.tar.gz: export GOARCH := mipsle -%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOOS := linux -%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOARCH := s390x +s390x.deb s390x.rpm linux_s390x.tar.gz: export GOOS := linux +s390x.deb s390x.rpm linux_s390x.tar.gz: export GOARCH := s390x -%ppc64el.deb %ppc64le.rpm %linux_ppc64le.tar.gz: export GOOS := linux -%ppc64el.deb %ppc64le.rpm %linux_ppc64le.tar.gz: export GOARCH := ppc64le +ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOOS := linux +ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOARCH := ppc64le -%freebsd_amd64.tar.gz: export GOOS := freebsd -%freebsd_amd64.tar.gz: export GOARCH := amd64 +freebsd_amd64.tar.gz: export GOOS := freebsd +freebsd_amd64.tar.gz: export GOARCH := amd64 -%freebsd_i386.tar.gz: export GOOS := freebsd -%freebsd_i386.tar.gz: export GOARCH := 386 +freebsd_i386.tar.gz: export GOOS := freebsd +freebsd_i386.tar.gz: export GOARCH := 386 -%freebsd_armv7.tar.gz: export GOOS := freebsd -%freebsd_armv7.tar.gz: export GOARCH := arm -%freebsd_armv7.tar.gz: export GOARM := 7 +freebsd_armv7.tar.gz: export GOOS := freebsd +freebsd_armv7.tar.gz: export GOARCH := arm +freebsd_armv7.tar.gz: export GOARM := 7 -%windows_amd64.zip: export GOOS := windows -%windows_amd64.zip: export GOARCH := amd64 +windows_amd64.zip: export GOOS := windows +windows_amd64.zip: export GOARCH := amd64 -%darwin_amd64.tar.gz: export GOOS := darwin -%darwin_amd64.tar.gz: export GOARCH := amd64 +darwin_amd64.tar.gz: export GOOS := darwin +darwin_amd64.tar.gz: export GOARCH := amd64 -%windows_i386.zip: export GOOS := windows -%windows_i386.zip: export GOARCH := 386 +windows_i386.zip: export GOOS := windows +windows_i386.zip: export GOARCH := 386 -%windows_i386.zip %windows_amd64.zip: export prefix = -%windows_i386.zip %windows_amd64.zip: export bindir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export sysconfdir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export localstatedir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export EXEEXT := .exe +windows_i386.zip windows_amd64.zip: export prefix = +windows_i386.zip windows_amd64.zip: export bindir = $(prefix) +windows_i386.zip windows_amd64.zip: export sysconfdir = $(prefix) +windows_i386.zip windows_amd64.zip: export localstatedir = $(prefix) +windows_i386.zip windows_amd64.zip: export EXEEXT := .exe %.deb: export pkg := deb %.deb: export prefix := /usr diff --git a/docs/developers/PACKAGING.md b/docs/developers/PACKAGING.md index f9708fb7164d0..cbdb61b05af01 100644 --- a/docs/developers/PACKAGING.md +++ b/docs/developers/PACKAGING.md @@ -1,5 +1,9 @@ # Packaging +Building the packages for Telegraf is automated using [Make](https://en.wikipedia.org/wiki/Make_(software)). Just running `make` will build a Telegraf binary for the operating system and architecture you are using (if it is supported). If you need to build a different package then you can run `make package` which will build all the supported packages. You will most likely only want a subset, you can define a subset of packages to be built by overriding the `include_packages` variable like so `make package include_packages="amd64.deb"`. You can also build all packages for a specific architecture like so `make package include_packages="$(make amd64)"`. + +The packaging steps require certain tools to be setup before hand to work. These dependencies are listed in the ci-1.16.docker file which you can find in the scripts directory. Therefore it is recommended to use Docker to build the artifacts, see more details below. + ## Package using Docker This packaging method uses the CI images, and is very similar to how the @@ -18,20 +22,15 @@ docker run -ti quay.io/influxdb/telegraf-ci:1.9.7 /bin/bash ``` From within the container: -``` -go get -d github.com/influxdata/telegraf -cd /go/src/github.com/influxdata/telegraf - -# Use tag of Telegraf version you would like to build -git checkout release-1.10 -git reset --hard 1.10.2 -make deps -# To build packages run: - -``` -make package amd64=1 -``` +1. `go get -d github.com/influxdata/telegraf` +2. `cd /go/src/github.com/influxdata/telegraf` +3. `git checkout release-1.10` + * Replace tag `release-1.10` with the version of Telegraf you would like to build +4. `git reset --hard 1.10.2` +5. `make deps` +6. `make package include_packages="amd64.deb"` + * Change `include_packages` to change what package you want, run `make help` to see possible values From the host system, copy the build artifacts out of the container: ``` From cf1aa5caaaf30e4d6564eaf67cc8bacaa269c3ab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Sep 2021 15:34:39 -0600 Subject: [PATCH 627/761] fix: bump cloud.google.com/go/pubsub from 1.15.0 to 1.17.0 (#9769) (cherry picked from commit 58d4e9a851c293608ede43b47dda8f8b347979dc) --- go.mod | 9 ++++----- go.sum | 18 ++++++++++++------ 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 6f16bb0fb0f83..d1d2dd094d749 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,10 @@ module github.com/influxdata/telegraf go 1.17 require ( - cloud.google.com/go v0.90.0 + cloud.google.com/go v0.93.3 // indirect cloud.google.com/go/bigquery v1.8.0 - cloud.google.com/go/pubsub v1.15.0 + cloud.google.com/go/monitoring v0.2.0 + cloud.google.com/go/pubsub v1.17.0 code.cloudfoundry.org/clock v1.0.0 // indirect collectd.org v0.5.0 github.com/Azure/azure-amqp-common-go/v3 v3.0.1 // indirect @@ -161,7 +162,6 @@ require ( github.com/jmespath/go-jmespath v0.4.0 github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.11 // indirect - github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 @@ -275,7 +275,6 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect - golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20210610132358-84b48f89b13b golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a @@ -290,7 +289,7 @@ require ( golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.54.0 google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20210813162853-db860fec028c + google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 google.golang.org/grpc v1.40.0 google.golang.org/protobuf v1.27.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect diff --git a/go.sum b/go.sum index 19b3febf462ff..9a8b98cea97ad 100644 --- a/go.sum +++ b/go.sum @@ -24,8 +24,10 @@ cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0 h1:MjvSkUq8RuAb+2JLDi5VQmmExRJPUQ3JLCWpRB6fmdw= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.92.2/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -36,12 +38,16 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/kms v0.1.0 h1:VXAb5OzejDcyhFzIDeZ5n5AUdlsFnCyexuascIwWMj0= +cloud.google.com/go/kms v0.1.0/go.mod h1:8Qp8PCAypHg4FdmlyW1QRAv09BGQ9Uzh7JnmIZxPk+c= +cloud.google.com/go/monitoring v0.2.0 h1:UFQB1+YbZjAOqAFFY4RlNiOrt19O5HzPeCdtYSlPvmk= +cloud.google.com/go/monitoring v0.2.0/go.mod h1:K/JoZWY3xszHf38AMkzZGx1n5eT1/57ilElGMpESsEE= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.15.0 h1:6KI/wDVYLtNvzIPJ8ObuJcq5bBtAWQ6Suo8osHPvYn4= -cloud.google.com/go/pubsub v1.15.0/go.mod h1:DnEUPGZlp+N9MElp/6uVqCKiknQixvVLcrgrqT62O6A= +cloud.google.com/go/pubsub v1.17.0 h1:uGzqGUGvaSJ3APz5BmLFw1LpSTnB9o+EzE5fI3rBbJI= +cloud.google.com/go/pubsub v1.17.0/go.mod h1:bBIeYx9ftf/hr7eoSUim6cRaOYZE/hHuigwdwLLByi8= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -1032,7 +1038,6 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -1716,7 +1721,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -2174,8 +2178,10 @@ google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c h1:iLQakcwWG3k/++1q/46apVb1sUQ3IqIdn9yUE6eh/xA= google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210824181836-a4879c3d0e89/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 h1:Ogdiaj9EMVKYHnDsESxwlTr/k5eqCdwoQVJEcdg0NbE= +google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= From cf185cba227c1c913e6c1ee8fd697a0a80207f65 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Sep 2021 15:53:39 -0600 Subject: [PATCH 628/761] fix: bump github.com/Azure/go-autorest/autorest/azure/auth from 0.5.6 to 0.5.8 (#9678) (cherry picked from commit 7eb6e88c859ea30a783f9bf1146a3d3c574113f4) --- go.mod | 2 +- go.sum | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index d1d2dd094d749..d56ac810d6026 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.18 github.com/Azure/go-autorest/autorest/adal v0.9.15 - github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 + github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect diff --git a/go.sum b/go.sum index 9a8b98cea97ad..c67071b110a16 100644 --- a/go.sum +++ b/go.sum @@ -107,13 +107,13 @@ github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMl github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 h1:cgiBtUxatlt/e3qY6fQJioqbocWHr5osz259MomF5M0= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.6/go.mod h1:nYlP+G+n8MhD5CjIi6W8nFTIJn/PnTHes5nUbK6BxD0= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= @@ -1687,7 +1687,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -1957,7 +1956,6 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraCFObP8S1v6PRp0bLrtU= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 673c062de9a0119aa12cedc34a8749d976b52020 Mon Sep 17 00:00:00 2001 From: Alan Pope Date: Tue, 21 Sep 2021 12:02:13 +0100 Subject: [PATCH 629/761] Reduce README size/complexity (cherry picked from commit 8133fd83a8177866adff2028160bcf27e186464c) --- README.md | 367 +++--------------------------------------------------- 1 file changed, 19 insertions(+), 348 deletions(-) diff --git a/README.md b/README.md index 2b49842789db6..5f0861f4fa3cb 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,8 @@ Telegraf is an agent for collecting, processing, aggregating, and writing metrics. -Design goals are to have a minimal memory footprint with a plugin system so -that developers in the community can easily add support for collecting -metrics. +Design goal: +- Have a minimal memory footprint with a plugin system so that developers in the community can easily add support for collecting metrics. Telegraf is plugin-driven and has the concept of 4 distinct plugin types: @@ -19,25 +18,9 @@ Telegraf is plugin-driven and has the concept of 4 distinct plugin types: 3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) 4. [Output Plugins](#output-plugins) write metrics to various destinations -New plugins are designed to be easy to contribute, pull requests are welcomed -and we work to incorporate as many pull requests as possible. -If none of the internal plugins fit your needs, you could have a look at the +New plugins are designed to be easy to contribute, pull requests are welcomed and we work to incorporate as many pull requests as possible. If none of the internal plugins fit your needs, you could have a look at the [list of external plugins](EXTERNAL_PLUGINS.md). -## Try in Browser :rocket: - -You can try Telegraf right in your browser in the [Telegraf playground](https://rootnroll.com/d/telegraf/). - -## Contributing - -There are many ways to contribute: -- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new) -- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation) -- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) -- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) -- [Contribute plugins](CONTRIBUTING.md) -- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) - ## Minimum Requirements Telegraf shares the same [minimum requirements][] as Go: @@ -92,7 +75,6 @@ Builds for other platforms or package formats are provided by members of the Tel * Linux * [Snap](https://snapcraft.io/telegraf) by Laurent Sesquès (sajoupa) - ## How to use it: See usage with: @@ -138,330 +120,19 @@ telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb For documentation on the latest development code see the [documentation index][devel docs]. [release docs]: https://docs.influxdata.com/telegraf -[devel docs]: docs - -## Input Plugins - -* [activemq](./plugins/inputs/activemq) -* [aerospike](./plugins/inputs/aerospike) -* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq) -* [apache](./plugins/inputs/apache) -* [apcupsd](./plugins/inputs/apcupsd) -* [aurora](./plugins/inputs/aurora) -* [aws cloudwatch](./plugins/inputs/cloudwatch) (Amazon Cloudwatch) -* [azure_storage_queue](./plugins/inputs/azure_storage_queue) -* [bcache](./plugins/inputs/bcache) -* [beanstalkd](./plugins/inputs/beanstalkd) -* [bind](./plugins/inputs/bind) -* [bond](./plugins/inputs/bond) -* [burrow](./plugins/inputs/burrow) -* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) -* [ceph](./plugins/inputs/ceph) -* [cgroup](./plugins/inputs/cgroup) -* [chrony](./plugins/inputs/chrony) -* [cisco_telemetry_gnmi](./plugins/inputs/cisco_telemetry_gnmi) (deprecated, renamed to [gnmi](/plugins/inputs/gnmi)) -* [cisco_telemetry_mdt](./plugins/inputs/cisco_telemetry_mdt) -* [clickhouse](./plugins/inputs/clickhouse) -* [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub -* [cloud_pubsub_push](./plugins/inputs/cloud_pubsub_push) Google Cloud Pub/Sub push endpoint -* [conntrack](./plugins/inputs/conntrack) -* [consul](./plugins/inputs/consul) -* [couchbase](./plugins/inputs/couchbase) -* [couchdb](./plugins/inputs/couchdb) -* [cpu](./plugins/inputs/cpu) -* [DC/OS](./plugins/inputs/dcos) -* [diskio](./plugins/inputs/diskio) -* [disk](./plugins/inputs/disk) -* [disque](./plugins/inputs/disque) -* [dmcache](./plugins/inputs/dmcache) -* [dns query time](./plugins/inputs/dns_query) -* [docker](./plugins/inputs/docker) -* [docker_log](./plugins/inputs/docker_log) -* [dovecot](./plugins/inputs/dovecot) -* [dpdk](./plugins/inputs/dpdk) -* [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) -* [elasticsearch](./plugins/inputs/elasticsearch) -* [ethtool](./plugins/inputs/ethtool) -* [eventhub_consumer](./plugins/inputs/eventhub_consumer) (Azure Event Hubs \& Azure IoT Hub) -* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) -* [execd](./plugins/inputs/execd) (generic executable "daemon" processes) -* [fail2ban](./plugins/inputs/fail2ban) -* [fibaro](./plugins/inputs/fibaro) -* [file](./plugins/inputs/file) -* [filestat](./plugins/inputs/filestat) -* [filecount](./plugins/inputs/filecount) -* [fireboard](/plugins/inputs/fireboard) -* [fluentd](./plugins/inputs/fluentd) -* [github](./plugins/inputs/github) -* [gnmi](./plugins/inputs/gnmi) -* [graylog](./plugins/inputs/graylog) -* [haproxy](./plugins/inputs/haproxy) -* [hddtemp](./plugins/inputs/hddtemp) -* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin) -* [http_listener](./plugins/inputs/influxdb_listener) (deprecated, renamed to [influxdb_listener](/plugins/inputs/influxdb_listener)) -* [http_listener_v2](./plugins/inputs/http_listener_v2) -* [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats) -* [http_response](./plugins/inputs/http_response) -* [icinga2](./plugins/inputs/icinga2) -* [infiniband](./plugins/inputs/infiniband) -* [influxdb](./plugins/inputs/influxdb) -* [influxdb_listener](./plugins/inputs/influxdb_listener) -* [influxdb_v2_listener](./plugins/inputs/influxdb_v2_listener) -* [intel_powerstat](plugins/inputs/intel_powerstat) -* [intel_rdt](./plugins/inputs/intel_rdt) -* [internal](./plugins/inputs/internal) -* [interrupts](./plugins/inputs/interrupts) -* [ipmi_sensor](./plugins/inputs/ipmi_sensor) -* [ipset](./plugins/inputs/ipset) -* [iptables](./plugins/inputs/iptables) -* [ipvs](./plugins/inputs/ipvs) -* [jenkins](./plugins/inputs/jenkins) -* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka) -* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) -* [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) -* [kafka_consumer](./plugins/inputs/kafka_consumer) -* [kapacitor](./plugins/inputs/kapacitor) -* [aws kinesis](./plugins/inputs/kinesis_consumer) (Amazon Kinesis) -* [kernel](./plugins/inputs/kernel) -* [kernel_vmstat](./plugins/inputs/kernel_vmstat) -* [kibana](./plugins/inputs/kibana) -* [knx_listener](./plugins/inputs/knx_listener) -* [kubernetes](./plugins/inputs/kubernetes) -* [kube_inventory](./plugins/inputs/kube_inventory) -* [lanz](./plugins/inputs/lanz) -* [leofs](./plugins/inputs/leofs) -* [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) -* [logparser](./plugins/inputs/logparser) (deprecated, use [tail](/plugins/inputs/tail)) -* [logstash](./plugins/inputs/logstash) -* [lustre2](./plugins/inputs/lustre2) -* [mailchimp](./plugins/inputs/mailchimp) -* [marklogic](./plugins/inputs/marklogic) -* [mcrouter](./plugins/inputs/mcrouter) -* [mdstat](./plugins/inputs/mdstat) -* [memcached](./plugins/inputs/memcached) -* [mem](./plugins/inputs/mem) -* [mesos](./plugins/inputs/mesos) -* [minecraft](./plugins/inputs/minecraft) -* [modbus](./plugins/inputs/modbus) -* [mongodb](./plugins/inputs/mongodb) -* [monit](./plugins/inputs/monit) -* [mqtt_consumer](./plugins/inputs/mqtt_consumer) -* [multifile](./plugins/inputs/multifile) -* [mysql](./plugins/inputs/mysql) -* [nats_consumer](./plugins/inputs/nats_consumer) -* [nats](./plugins/inputs/nats) -* [neptune_apex](./plugins/inputs/neptune_apex) -* [net](./plugins/inputs/net) -* [net_response](./plugins/inputs/net_response) -* [netstat](./plugins/inputs/net) -* [nfsclient](./plugins/inputs/nfsclient) -* [nginx](./plugins/inputs/nginx) -* [nginx_plus_api](./plugins/inputs/nginx_plus_api) -* [nginx_plus](./plugins/inputs/nginx_plus) -* [nginx_sts](./plugins/inputs/nginx_sts) -* [nginx_upstream_check](./plugins/inputs/nginx_upstream_check) -* [nginx_vts](./plugins/inputs/nginx_vts) -* [nsd](./plugins/inputs/nsd) -* [nsq_consumer](./plugins/inputs/nsq_consumer) -* [nsq](./plugins/inputs/nsq) -* [nstat](./plugins/inputs/nstat) -* [ntpq](./plugins/inputs/ntpq) -* [nvidia_smi](./plugins/inputs/nvidia_smi) -* [opcua](./plugins/inputs/opcua) -* [openldap](./plugins/inputs/openldap) -* [openntpd](./plugins/inputs/openntpd) -* [opensmtpd](./plugins/inputs/opensmtpd) -* [opentelemetry](./plugins/inputs/opentelemetry) -* [openweathermap](./plugins/inputs/openweathermap) -* [pf](./plugins/inputs/pf) -* [pgbouncer](./plugins/inputs/pgbouncer) -* [phpfpm](./plugins/inputs/phpfpm) -* [phusion passenger](./plugins/inputs/passenger) -* [ping](./plugins/inputs/ping) -* [postfix](./plugins/inputs/postfix) -* [postgresql_extensible](./plugins/inputs/postgresql_extensible) -* [postgresql](./plugins/inputs/postgresql) -* [powerdns](./plugins/inputs/powerdns) -* [powerdns_recursor](./plugins/inputs/powerdns_recursor) -* [processes](./plugins/inputs/processes) -* [procstat](./plugins/inputs/procstat) -* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server)) -* [proxmox](./plugins/inputs/proxmox) -* [puppetagent](./plugins/inputs/puppetagent) -* [rabbitmq](./plugins/inputs/rabbitmq) -* [raindrops](./plugins/inputs/raindrops) -* [ras](./plugins/inputs/ras) -* [ravendb](./plugins/inputs/ravendb) -* [redfish](./plugins/inputs/redfish) -* [redis](./plugins/inputs/redis) -* [rethinkdb](./plugins/inputs/rethinkdb) -* [riak](./plugins/inputs/riak) -* [salesforce](./plugins/inputs/salesforce) -* [sensors](./plugins/inputs/sensors) -* [sflow](./plugins/inputs/sflow) -* [smart](./plugins/inputs/smart) -* [snmp_legacy](./plugins/inputs/snmp_legacy) -* [snmp](./plugins/inputs/snmp) -* [snmp_trap](./plugins/inputs/snmp_trap) -* [socket_listener](./plugins/inputs/socket_listener) -* [solr](./plugins/inputs/solr) -* [sql](./plugins/inputs/sql) (generic SQL query plugin) -* [sql server](./plugins/inputs/sqlserver) (microsoft) -* [stackdriver](./plugins/inputs/stackdriver) (Google Cloud Monitoring) -* [sql](./plugins/outputs/sql) (SQL generic output) -* [statsd](./plugins/inputs/statsd) -* [suricata](./plugins/inputs/suricata) -* [swap](./plugins/inputs/swap) -* [synproxy](./plugins/inputs/synproxy) -* [syslog](./plugins/inputs/syslog) -* [sysstat](./plugins/inputs/sysstat) -* [systemd_units](./plugins/inputs/systemd_units) -* [system](./plugins/inputs/system) -* [tail](./plugins/inputs/tail) -* [temp](./plugins/inputs/temp) -* [tcp_listener](./plugins/inputs/socket_listener) -* [teamspeak](./plugins/inputs/teamspeak) -* [tengine](./plugins/inputs/tengine) -* [tomcat](./plugins/inputs/tomcat) -* [twemproxy](./plugins/inputs/twemproxy) -* [udp_listener](./plugins/inputs/socket_listener) -* [unbound](./plugins/inputs/unbound) -* [uwsgi](./plugins/inputs/uwsgi) -* [varnish](./plugins/inputs/varnish) -* [vsphere](./plugins/inputs/vsphere) VMware vSphere -* [webhooks](./plugins/inputs/webhooks) - * [filestack](./plugins/inputs/webhooks/filestack) - * [github](./plugins/inputs/webhooks/github) - * [mandrill](./plugins/inputs/webhooks/mandrill) - * [papertrail](./plugins/inputs/webhooks/papertrail) - * [particle](./plugins/inputs/webhooks/particle) - * [rollbar](./plugins/inputs/webhooks/rollbar) -* [win_eventlog](./plugins/inputs/win_eventlog) -* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) -* [win_services](./plugins/inputs/win_services) -* [wireguard](./plugins/inputs/wireguard) -* [wireless](./plugins/inputs/wireless) -* [x509_cert](./plugins/inputs/x509_cert) -* [zfs](./plugins/inputs/zfs) -* [zipkin](./plugins/inputs/zipkin) -* [zookeeper](./plugins/inputs/zookeeper) - -## Parsers - -- [InfluxDB Line Protocol](/plugins/parsers/influx) -- [Collectd](/plugins/parsers/collectd) -- [CSV](/plugins/parsers/csv) -- [Dropwizard](/plugins/parsers/dropwizard) -- [FormUrlencoded](/plugins/parser/form_urlencoded) -- [Graphite](/plugins/parsers/graphite) -- [Grok](/plugins/parsers/grok) -- [JSON](/plugins/parsers/json) -- [JSON v2](/plugins/parsers/json_v2) -- [Logfmt](/plugins/parsers/logfmt) -- [Nagios](/plugins/parsers/nagios) -- [Prometheus](/plugins/parsers/prometheus) -- [Prometheus Remote Write](/plugins/parsers/prometheusremotewrite) -- [Value](/plugins/parsers/value), ie: 45 or "booyah" -- [Wavefront](/plugins/parsers/wavefront) -- [XPath](/plugins/parsers/xpath) (supports XML, JSON, MessagePack, Protocol Buffers) - -## Serializers - -- [InfluxDB Line Protocol](/plugins/serializers/influx) -- [Carbon2](/plugins/serializers/carbon2) -- [Graphite](/plugins/serializers/graphite) -- [JSON](/plugins/serializers/json) -- [MessagePack](/plugins/serializers/msgpack) -- [Prometheus](/plugins/serializers/prometheus) -- [Prometheus Remote Write](/plugins/serializers/prometheusremotewrite) -- [ServiceNow](/plugins/serializers/nowmetric) -- [SplunkMetric](/plugins/serializers/splunkmetric) -- [Wavefront](/plugins/serializers/wavefront) - -## Processor Plugins - -* [clone](/plugins/processors/clone) -* [converter](/plugins/processors/converter) -* [date](/plugins/processors/date) -* [dedup](/plugins/processors/dedup) -* [defaults](/plugins/processors/defaults) -* [enum](/plugins/processors/enum) -* [execd](/plugins/processors/execd) -* [ifname](/plugins/processors/ifname) -* [filepath](/plugins/processors/filepath) -* [override](/plugins/processors/override) -* [parser](/plugins/processors/parser) -* [pivot](/plugins/processors/pivot) -* [port_name](/plugins/processors/port_name) -* [printer](/plugins/processors/printer) -* [regex](/plugins/processors/regex) -* [rename](/plugins/processors/rename) -* [reverse_dns](/plugins/processors/reverse_dns) -* [s2geo](/plugins/processors/s2geo) -* [starlark](/plugins/processors/starlark) -* [strings](/plugins/processors/strings) -* [tag_limit](/plugins/processors/tag_limit) -* [template](/plugins/processors/template) -* [topk](/plugins/processors/topk) -* [unpivot](/plugins/processors/unpivot) - -## Aggregator Plugins - -* [basicstats](./plugins/aggregators/basicstats) -* [derivative](./plugins/aggregators/derivative) -* [final](./plugins/aggregators/final) -* [histogram](./plugins/aggregators/histogram) -* [merge](./plugins/aggregators/merge) -* [minmax](./plugins/aggregators/minmax) -* [quantile](./plugins/aggregators/quantile) -* [valuecounter](./plugins/aggregators/valuecounter) - -## Output Plugins - -* [influxdb](./plugins/outputs/influxdb) (InfluxDB 1.x) -* [influxdb_v2](./plugins/outputs/influxdb_v2) ([InfluxDB 2.x](https://github.com/influxdata/influxdb)) -* [amon](./plugins/outputs/amon) -* [amqp](./plugins/outputs/amqp) (rabbitmq) -* [application_insights](./plugins/outputs/application_insights) -* [aws kinesis](./plugins/outputs/kinesis) -* [aws cloudwatch](./plugins/outputs/cloudwatch) -* [azure_monitor](./plugins/outputs/azure_monitor) -* [bigquery](./plugins/outputs/bigquery) -* [cloud_pubsub](./plugins/outputs/cloud_pubsub) Google Cloud Pub/Sub -* [cratedb](./plugins/outputs/cratedb) -* [datadog](./plugins/outputs/datadog) -* [discard](./plugins/outputs/discard) -* [dynatrace](./plugins/outputs/dynatrace) -* [elasticsearch](./plugins/outputs/elasticsearch) -* [exec](./plugins/outputs/exec) -* [execd](./plugins/outputs/execd) -* [file](./plugins/outputs/file) -* [graphite](./plugins/outputs/graphite) -* [graylog](./plugins/outputs/graylog) -* [health](./plugins/outputs/health) -* [http](./plugins/outputs/http) -* [instrumental](./plugins/outputs/instrumental) -* [kafka](./plugins/outputs/kafka) -* [librato](./plugins/outputs/librato) -* [logz.io](./plugins/outputs/logzio) -* [mqtt](./plugins/outputs/mqtt) -* [nats](./plugins/outputs/nats) -* [newrelic](./plugins/outputs/newrelic) -* [nsq](./plugins/outputs/nsq) -* [opentelemetry](./plugins/outputs/opentelemetry) -* [opentsdb](./plugins/outputs/opentsdb) -* [prometheus](./plugins/outputs/prometheus_client) -* [riemann](./plugins/outputs/riemann) -* [riemann_legacy](./plugins/outputs/riemann_legacy) -* [sensu](./plugins/outputs/sensu) -* [signalfx](./plugins/outputs/signalfx) -* [socket_writer](./plugins/outputs/socket_writer) -* [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring) -* [syslog](./plugins/outputs/syslog) -* [tcp](./plugins/outputs/socket_writer) -* [udp](./plugins/outputs/socket_writer) -* [warp10](./plugins/outputs/warp10) -* [wavefront](./plugins/outputs/wavefront) -* [websocket](./plugins/outputs/websocket) -* [sumologic](./plugins/outputs/sumologic) -* [yandex_cloud_monitoring](./plugins/outputs/yandex_cloud_monitoring) +[developer docs]: docs +- [Input Plugins](/telegraf/docs/INPUTS.md) +- [Output Plugins](/telegraf/docs/OUTPUTS.md) +- [Processor Plugins](/telegraf/docs/PROCESSORS.md) +- [Aggregator Plugins](/telegraf/docs/AGGREGATORS.md) + + +## Contributing + +There are many ways to contribute: +- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new) +- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation) +- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) +- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) +- [Contribute plugins](CONTRIBUTING.md) +- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) \ No newline at end of file From 1dbddf5c534c0abff99f36925d54c1dfaa9606a6 Mon Sep 17 00:00:00 2001 From: Alan Pope Date: Tue, 21 Sep 2021 18:39:48 +0100 Subject: [PATCH 630/761] Update README.md Sorry, unbreaking the broken commit to master I did earlier. Won't do it again, promise! (cherry picked from commit 9e004623e03ba9f7003a36f0b5f9ffd259e7fafc) --- README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 5f0861f4fa3cb..9c75311e4e2cb 100644 --- a/README.md +++ b/README.md @@ -13,10 +13,10 @@ Design goal: Telegraf is plugin-driven and has the concept of 4 distinct plugin types: -1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs -2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics -3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) -4. [Output Plugins](#output-plugins) write metrics to various destinations +1. [Input Plugins](/docs/INPUTS.md) collect metrics from the system, services, or 3rd party APIs +2. [Processor Plugins](/docs/PROCESSORS.md) transform, decorate, and/or filter metrics +3. [Aggregator Plugins](/docs/AGGREGATORS.md) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) +4. [Output Plugins](/docs/OUTPUTS.md) write metrics to various destinations New plugins are designed to be easy to contribute, pull requests are welcomed and we work to incorporate as many pull requests as possible. If none of the internal plugins fit your needs, you could have a look at the [list of external plugins](EXTERNAL_PLUGINS.md). @@ -117,14 +117,14 @@ telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb [Latest Release Documentation][release docs]. -For documentation on the latest development code see the [documentation index][devel docs]. +For documentation on the latest development code see the [documentation index](/docs). [release docs]: https://docs.influxdata.com/telegraf [developer docs]: docs -- [Input Plugins](/telegraf/docs/INPUTS.md) -- [Output Plugins](/telegraf/docs/OUTPUTS.md) -- [Processor Plugins](/telegraf/docs/PROCESSORS.md) -- [Aggregator Plugins](/telegraf/docs/AGGREGATORS.md) +- [Input Plugins](/docs/INPUTS.md) +- [Output Plugins](/docs/OUTPUTS.md) +- [Processor Plugins](/docs/PROCESSORS.md) +- [Aggregator Plugins](/docs/AGGREGATORS.md) ## Contributing @@ -135,4 +135,4 @@ There are many ways to contribute: - [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) - Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) - [Contribute plugins](CONTRIBUTING.md) -- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) \ No newline at end of file +- [Contribute external plugins](docs/EXTERNAL_PLUGINS.md) From d56268c44ebd197e060e73c3dc25b9b89a519293 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 21 Sep 2021 12:03:41 -0700 Subject: [PATCH 631/761] docs: update caddy server instructions (#9698) (cherry picked from commit c4c3c8ade982c7935f013bc93107dd9c702541f5) --- plugins/inputs/prometheus/README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index c826fd0e015ab..955c6ab7d978b 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -158,20 +158,20 @@ Authorization header. ### Usage for Caddy HTTP server -If you want to monitor Caddy, you need to use Caddy with its Prometheus plugin: +Steps to monitor Caddy with Telegraf's Prometheus input plugin: -* Download Caddy+Prometheus plugin [here](https://caddyserver.com/download/linux/amd64?plugins=http.prometheus) -* Add the `prometheus` directive in your `CaddyFile` +* Download [Caddy](https://caddyserver.com/download) +* Download Prometheus and set up [monitoring Caddy with Prometheus metrics](https://caddyserver.com/docs/metrics#monitoring-caddy-with-prometheus-metrics) * Restart Caddy * Configure Telegraf to fetch metrics on it: ```toml [[inputs.prometheus]] # ## An array of urls to scrape metrics from. - urls = ["http://localhost:9180/metrics"] + urls = ["http://localhost:2019/metrics"] ``` -> This is the default URL where Caddy Prometheus plugin will send data. +> This is the default URL where Caddy will send data. > For more details, please read the [Caddy Prometheus documentation](https://github.com/miekg/caddy-prometheus/blob/master/README.md). ### Metrics: From 61647c0266c4b7030227df2950f573615afcd379 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 21 Sep 2021 12:19:45 -0700 Subject: [PATCH 632/761] chore: update nightly to package by arch (#9781) (cherry picked from commit 1c0b74eacded31b103a3467535166c39f6dffc7b) --- .circleci/config.yml | 156 ++++++++++++++++++++++++++----------------- Makefile | 12 +--- 2 files changed, 96 insertions(+), 72 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1f644a7b9d20b..01a4bce06952e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -109,9 +109,6 @@ commands: release: type: boolean default: false - nightly: - type: boolean - default: false type: type: string default: "" @@ -124,15 +121,9 @@ commands: condition: << parameters.release >> steps: - run: 'make package' - - when: - condition: << parameters.nightly >> - steps: - - run: 'make package' - - run: 'make upload-nightly' - unless: condition: or: - - << parameters.nightly >> - << parameters.release >> steps: - run: 'make package include_packages="$(make << parameters.type >>)"' @@ -269,8 +260,17 @@ jobs: nightly: executor: go-1_17 steps: - - package-build: - nightly: true + - attach_workspace: + at: '/build' + - run: + command: | + aws s3 sync /build/dist s3://dl.influxdata.com/telegraf/nightlies/ \ + --exclude "*" \ + --include "*.tar.gz" \ + --include "*.deb" \ + --include "*.rpm" \ + --include "*.zip" \ + --acl public-read package-consolidate: executor: name: win/default @@ -336,6 +336,63 @@ jobs: printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" +commonjobs: + - &test-awaiter + 'test-awaiter': + requires: + - 'test-go-1_16' + - 'test-go-1_16-386' + - 'test-go-1_17' + - 'test-go-1_17-386' + - &windows-package + 'windows-package': + requires: + - 'test-go-windows' + - &darwin-package + 'darwin-package': + requires: + - 'test-go-mac' + - &i386-package + 'i386-package': + requires: + - 'test-awaiter' + - &ppc64le-package + 'ppc64le-package': + requires: + - 'test-awaiter' + - &s390x-package + 's390x-package': + requires: + - 'test-awaiter' + - &armel-package + 'armel-package': + requires: + - 'test-awaiter' + - &amd64-package + 'amd64-package': + requires: + - 'test-awaiter' + - &arm64-package + 'arm64-package': + requires: + - 'test-awaiter' + - &armhf-package + 'armhf-package': + requires: + - 'test-awaiter' + - &static-package + 'static-package': + requires: + - 'test-awaiter' + - &mipsel-package + 'mipsel-package': + requires: + - 'test-awaiter' + - &mips-package + 'mips-package': + requires: + - 'test-awaiter' + workflows: version: 2 check: @@ -376,48 +433,19 @@ workflows: filters: tags: only: /.*/ - - 'test-awaiter': - requires: - - 'test-go-1_16' - - 'test-go-1_16-386' - - 'test-go-1_17' - - 'test-go-1_17-386' - - 'windows-package': - requires: - - 'test-go-windows' - - 'darwin-package': - requires: - - 'test-go-mac' - - 'i386-package': - requires: - - 'test-awaiter' - - 'ppc64le-package': - requires: - - 'test-awaiter' - - 's390x-package': - requires: - - 'test-awaiter' - - 'armel-package': - requires: - - 'test-awaiter' - - 'amd64-package': - requires: - - 'test-awaiter' - - 'arm64-package': - requires: - - 'test-awaiter' - - 'armhf-package': - requires: - - 'test-awaiter' - - 'static-package': - requires: - - 'test-awaiter' - - 'mipsel-package': - requires: - - 'test-awaiter' - - 'mips-package': - requires: - - 'test-awaiter' + - *test-awaiter + - *windows-package + - *darwin-package + - *i386-package + - *ppc64le-package + - *s390x-package + - *armel-package + - *amd64-package + - *arm64-package + - *armhf-package + - *static-package + - *mipsel-package + - *mips-package - 'share-artifacts': requires: - 'i386-package' @@ -479,14 +507,20 @@ workflows: - 'deps' - 'test-go-mac' - 'test-go-windows' - - 'nightly': - requires: - - 'test-go-windows' - - 'test-go-mac' - - 'test-go-1_16' - - 'test-go-1_16-386' - - 'test-go-1_17' - - 'test-go-1_17-386' + - *test-awaiter + - *windows-package + - *darwin-package + - *i386-package + - *ppc64le-package + - *s390x-package + - *armel-package + - *amd64-package + - *arm64-package + - *armhf-package + - *static-package + - *mipsel-package + - *mips-package + - nightly triggers: - schedule: cron: "0 7 * * *" diff --git a/Makefile b/Makefile index a7797a0e8ce5f..cbe0e2a2e5dbb 100644 --- a/Makefile +++ b/Makefile @@ -203,7 +203,7 @@ plugin-%: ci-1.16: docker build -t quay.io/influxdb/telegraf-ci:1.16.7 - < scripts/ci-1.16.docker docker push quay.io/influxdb/telegraf-ci:1.16.7 - + .PHONY: ci-1.17 ci-1.17: docker build -t quay.io/influxdb/telegraf-ci:1.17.0 - < scripts/ci-1.17.docker @@ -346,16 +346,6 @@ $(include_packages): tar --owner 0 --group 0 -czvf $(pkgdir)/telegraf-$(tar_version)_$@ -C $(dir $(DESTDIR)) . ;\ fi -.PHONY: upload-nightly -upload-nightly: - aws s3 sync $(pkgdir) s3://dl.influxdata.com/telegraf/nightlies/ \ - --exclude "*" \ - --include "*.tar.gz" \ - --include "*.deb" \ - --include "*.rpm" \ - --include "*.zip" \ - --acl public-read - amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOOS := linux amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOARCH := amd64 From e6b4c30cf549d1e2ca0b2b66f56326fffaa95594 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 21 Sep 2021 12:53:18 -0700 Subject: [PATCH 633/761] chore: automate updating etc/telegraf.conf and etc/telegraf_windows.conf (#9684) (cherry picked from commit 027647e3edde77ce6a6c70956ea3a5bd20abf2d2) --- .circleci/config.yml | 57 ++++++++++++++++++++++++++++++++++++++ scripts/generate_config.sh | 27 ++++++++++++++++++ scripts/update_config.sh | 22 +++++++++++++++ 3 files changed, 106 insertions(+) create mode 100755 scripts/generate_config.sh create mode 100755 scripts/update_config.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 01a4bce06952e..dc59d4aa13cdb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -25,6 +25,23 @@ executors: GOFLAGS: -p=8 commands: + generate-config: + parameters: + os: + type: string + default: "linux" + steps: + - checkout + - attach_workspace: + at: '/build' + - run: ./scripts/generate_config.sh << parameters.os >> + - store_artifacts: + path: './new-config' + destination: 'new-config' + - persist_to_workspace: + root: './new-config' + paths: + - '*' check-changed-files-or-halt: steps: - run: ./scripts/check-file-changes.sh @@ -335,6 +352,24 @@ jobs: PR=${CIRCLE_PULL_REQUEST##*/} printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" + generate-config: + executor: go-1_17 + steps: + - generate-config + generate-config-win: + executor: + name: win/default + shell: bash.exe + steps: + - generate-config: + os: windows + update-config: + executor: go-1_17 + steps: + - checkout + - attach_workspace: + at: '/new-config' + - run: ./scripts/update_config.sh ${UPDATE_CONFIG_TOKEN} commonjobs: - &test-awaiter @@ -446,6 +481,28 @@ workflows: - *static-package - *mipsel-package - *mips-package + - 'generate-config': + requires: + - 'amd64-package' + filters: + branches: + only: + - master + - 'generate-config-win': + requires: + - 'windows-package' + filters: + branches: + only: + - master + - 'update-config': + requires: + - 'generate-config-win' + - 'generate-config' + filters: + branches: + only: + - master - 'share-artifacts': requires: - 'i386-package' diff --git a/scripts/generate_config.sh b/scripts/generate_config.sh new file mode 100755 index 0000000000000..c85dd05172631 --- /dev/null +++ b/scripts/generate_config.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# This script is responsible for generating the Telegraf config found under the `etc` directory. +# This script is meant to be only ran in within the Circle CI pipeline so that the Tiger Bot can update them automatically. +# It supports Windows and Linux because the configs are different depending on the OS. + + +os=$1 # windows or linux +exe_path="/build/extracted" # Path will contain telegraf binary +config_name="telegraf.conf" + +if [ "$os" = "windows" ]; then + zip=$(/bin/find ./build/dist -maxdepth 1 -name "*windows_amd64.zip" -print) + exe_path="$PWD/build/extracted" + unzip "$zip" -d "$exe_path" + config_name="telegraf_windows.conf" + exe_path=$(/bin/find "$exe_path" -name telegraf.exe -type f -print) +else + tar_path=$(find /build/dist -maxdepth 1 -name "*linux_amd64.tar.gz" -print | grep -v ".*static.*") + mkdir "$exe_path" + tar --extract --file="$tar_path" --directory "$exe_path" + exe_path=$(find "$exe_path" -name telegraf -type f -print | grep ".*usr/bin/.*") +fi + +$exe_path config > $config_name + +mkdir ./new-config +mv $config_name ./new-config diff --git a/scripts/update_config.sh b/scripts/update_config.sh new file mode 100755 index 0000000000000..87cfe2620ab61 --- /dev/null +++ b/scripts/update_config.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# This script is responsible for triggering the Tiger Bot endpoint that will create the pull request with the newly generated configs. +# This script is meant to be only ran in within the Circle CI pipeline. + +token=$1 + +config_path="/new-config" + +if [ ! -f "$config_path/telegraf.conf" ]; then + echo "$config_path/telegraf.conf does not exist" + exit +fi +if [ ! -f "$config_path/telegraf_windows.conf" ]; then + echo "$config_path/telegraf_windows.conf does not exist" + exit +fi + +if cmp -s "$config_path/telegraf.conf" "etc/telegraf.conf" && cmp -s "$config_path/telegraf_windows.conf" "etc/telegraf_windows.conf"; then + echo "Both telegraf.conf and telegraf_windows.conf haven't changed" +fi + +curl -H "Authorization: Bearer $token" -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/updateConfig" From fe3c8a0dfc058e9be1c0edf8125b5d0bc4332f15 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Sep 2021 08:00:13 -0600 Subject: [PATCH 634/761] fix: bump github.com/Azure/go-autorest/autorest/adal (#9791) (cherry picked from commit 86a6c06955d6e3197ff39355293ec7cfb4c3d54d) --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d56ac810d6026..8e7e48099934e 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.18 - github.com/Azure/go-autorest/autorest/adal v0.9.15 + github.com/Azure/go-autorest/autorest/adal v0.9.16 github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect diff --git a/go.sum b/go.sum index c67071b110a16..e8ba0d9ef6cc6 100644 --- a/go.sum +++ b/go.sum @@ -111,6 +111,8 @@ github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJ github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= +github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc= +github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg= github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= From 63d299ed1f3bdb4b53b1eec85bfe0dfe59288051 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Sep 2021 08:20:37 -0600 Subject: [PATCH 635/761] fix: bump github.com/testcontainers/testcontainers-go from 0.11.0 to 0.11.1 (#9789) (cherry picked from commit 4cee2ca15d80ffe16bc5c861adfb7710f158b03a) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 8e7e48099934e..a569c672a4eb4 100644 --- a/go.mod +++ b/go.mod @@ -82,7 +82,7 @@ require ( github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/dimchansky/utfbom v1.1.1 github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/docker v20.10.6+incompatible + github.com/docker/docker v20.10.7+incompatible github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 @@ -245,7 +245,7 @@ require ( github.com/stretchr/objx v0.2.0 // indirect github.com/stretchr/testify v1.7.0 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/testcontainers/testcontainers-go v0.11.0 + github.com/testcontainers/testcontainers-go v0.11.1 github.com/tidwall/gjson v1.8.0 github.com/tidwall/match v1.0.3 // indirect github.com/tidwall/pretty v1.1.0 // indirect diff --git a/go.sum b/go.sum index e8ba0d9ef6cc6..6cc78b3768f13 100644 --- a/go.sum +++ b/go.sum @@ -499,8 +499,8 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= -github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -1533,8 +1533,8 @@ github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOs github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= -github.com/testcontainers/testcontainers-go v0.11.0 h1:HO5YOx2DYBHqcg4MzVWPj3FuHAv7USWVu94vCSsgiaM= -github.com/testcontainers/testcontainers-go v0.11.0/go.mod h1:HztBCODzuA+YpMXGK8amjO8j50jz2gcT0BOzSKUiYIs= +github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= +github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= github.com/tidwall/gjson v1.8.0 h1:Qt+orfosKn0rbNTZqHYDqBrmm3UDA4KRkv70fDzG+PQ= github.com/tidwall/gjson v1.8.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= From 5a3297543523cc6ddfc74f2b5aab9b4d80674c98 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 22 Sep 2021 08:36:04 -0600 Subject: [PATCH 636/761] fix: run go mod tidy (cherry picked from commit 20ed68c36088941ebd608ef7405567ec764f54da) --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 6cc78b3768f13..300b12d8d6a6c 100644 --- a/go.sum +++ b/go.sum @@ -109,8 +109,6 @@ github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35pe github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= -github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc= github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= From 1995483224358803b80b2442dc265941720d53a2 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 22 Sep 2021 08:54:59 -0700 Subject: [PATCH 637/761] fix: nightly upload requires package steps (#9795) (cherry picked from commit 045adcb700ebc55761e5876a07de82f9317e4056) --- .circleci/config.yml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index dc59d4aa13cdb..027a529cb0385 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -577,7 +577,20 @@ workflows: - *static-package - *mipsel-package - *mips-package - - nightly + - nightly: + requires: + - 'i386-package' + - 'ppc64le-package' + - 's390x-package' + - 'armel-package' + - 'amd64-package' + - 'mipsel-package' + - 'mips-package' + - 'darwin-package' + - 'windows-package' + - 'static-package' + - 'arm64-package' + - 'armhf-package' triggers: - schedule: cron: "0 7 * * *" From ae33d4dcff832b2112b67e3288490de722d51381 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 23 Sep 2021 16:10:44 +0200 Subject: [PATCH 638/761] fix: Rename KNXListener to knx_listener (#9741) (cherry picked from commit ceae37d66ecaf949a5813847ad4b695e2a936c3e) --- etc/telegraf.conf | 15 +++++++-------- plugins/inputs/knx_listener/README.md | 6 +++--- plugins/inputs/knx_listener/knx_listener.go | 6 ++++-- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index beb22821464d9..4b7b8b0a546de 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -5546,7 +5546,7 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false @@ -6802,7 +6802,7 @@ # # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. -# [[inputs.KNXListener]] +# [[inputs.knx_listener]] # ## Type of KNX-IP interface. # ## Can be either "tunnel" or "router". # # service_type = "tunnel" @@ -6811,7 +6811,7 @@ # service_address = "localhost:3671" # # ## Measurement definition(s) -# # [[inputs.KNXListener.measurement]] +# # [[inputs.knx_listener.measurement]] # # ## Name of the measurement # # name = "temperature" # # ## Datapoint-Type (DPT) of the KNX messages @@ -6819,7 +6819,7 @@ # # ## List of Group-Addresses (GAs) assigned to the measurement # # addresses = ["5/5/1"] # -# # [[inputs.KNXListener.measurement]] +# # [[inputs.knx_listener.measurement]] # # name = "illumination" # # dpt = "9.004" # # addresses = ["5/5/3"] @@ -7667,7 +7667,7 @@ # ## This value is propagated to pqos tool. Interval format is defined by pqos itself. # ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. # # sampling_interval = "10" -# +# # ## Optionally specify the path to pqos executable. # ## If not provided, auto discovery will be performed. # # pqos_path = "/usr/local/bin/pqos" @@ -7675,12 +7675,12 @@ # ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. # ## If not provided, default value is false. # # shortened_metrics = false -# +# # ## Specify the list of groups of CPU core(s) to be provided as pqos input. # ## Mandatory if processes aren't set and forbidden if processes are specified. # ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] # # cores = ["0-3"] -# +# # ## Specify the list of processes for which Metrics will be collected. # ## Mandatory if cores aren't set and forbidden if cores are specified. # ## e.g. ["qemu", "pmd"] @@ -9099,4 +9099,3 @@ # [[inputs.zipkin]] # # path = "/api/v1/spans" # URL path for span data # # port = 9411 # Port on which Telegraf listens - diff --git a/plugins/inputs/knx_listener/README.md b/plugins/inputs/knx_listener/README.md index 7a06462ffbb3e..518dd5d7f3720 100644 --- a/plugins/inputs/knx_listener/README.md +++ b/plugins/inputs/knx_listener/README.md @@ -11,7 +11,7 @@ This is a sample config for the plugin. ```toml # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. -[[inputs.KNXListener]] +[[inputs.knx_listener]] ## Type of KNX-IP interface. ## Can be either "tunnel" or "router". # service_type = "tunnel" @@ -20,7 +20,7 @@ This is a sample config for the plugin. service_address = "localhost:3671" ## Measurement definition(s) - # [[inputs.KNXListener.measurement]] + # [[inputs.knx_listener.measurement]] # ## Name of the measurement # name = "temperature" # ## Datapoint-Type (DPT) of the KNX messages @@ -28,7 +28,7 @@ This is a sample config for the plugin. # ## List of Group-Addresses (GAs) assigned to the measurement # addresses = ["5/5/1"] - # [[inputs.KNXListener.measurement]] + # [[inputs.knx_listener.measurement]] # name = "illumination" # dpt = "9.004" # addresses = ["5/5/3"] diff --git a/plugins/inputs/knx_listener/knx_listener.go b/plugins/inputs/knx_listener/knx_listener.go index 98f19e922f7ad..3896d649b4055 100644 --- a/plugins/inputs/knx_listener/knx_listener.go +++ b/plugins/inputs/knx_listener/knx_listener.go @@ -56,7 +56,7 @@ func (kl *KNXListener) SampleConfig() string { service_address = "localhost:3671" ## Measurement definition(s) - # [[inputs.KNXListener.measurement]] + # [[inputs.knx_listener.measurement]] # ## Name of the measurement # name = "temperature" # ## Datapoint-Type (DPT) of the KNX messages @@ -64,7 +64,7 @@ func (kl *KNXListener) SampleConfig() string { # ## List of Group-Addresses (GAs) assigned to the measurement # addresses = ["5/5/1"] - # [[inputs.KNXListener.measurement]] + # [[inputs.knx_listener.measurement]] # name = "illumination" # dpt = "9.004" # addresses = ["5/5/3"] @@ -195,5 +195,7 @@ func (kl *KNXListener) listen() { } func init() { + inputs.Add("knx_listener", func() telegraf.Input { return &KNXListener{ServiceType: "tunnel"} }) + // Register for backward compatibility inputs.Add("KNXListener", func() telegraf.Input { return &KNXListener{ServiceType: "tunnel"} }) } From 08429e22dd74b26170c289e0285b7682d8f2ec88 Mon Sep 17 00:00:00 2001 From: Jacob Marble Date: Thu, 23 Sep 2021 09:05:29 -0700 Subject: [PATCH 639/761] fix: error returned to OpenTelemetry client (#9797) (cherry picked from commit fb088bd69c86c6628dfdc5a44f9e0d878587f6e3) --- go.mod | 16 +++- go.sum | 27 +++++- plugins/inputs/opentelemetry/grpc_services.go | 2 +- plugins/inputs/opentelemetry/opentelemetry.go | 11 ++- .../opentelemetry/opentelemetry_test.go | 83 +++++++++++++++++++ 5 files changed, 131 insertions(+), 8 deletions(-) create mode 100644 plugins/inputs/opentelemetry/opentelemetry_test.go diff --git a/go.mod b/go.mod index a569c672a4eb4..dc8b762d1e6d1 100644 --- a/go.mod +++ b/go.mod @@ -61,7 +61,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sso v1.1.5 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.2.2 // indirect github.com/aws/smithy-go v1.8.0 - github.com/benbjohnson/clock v1.0.3 + github.com/benbjohnson/clock v1.1.0 github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmatcuk/doublestar/v3 v3.0.0 @@ -271,6 +271,9 @@ require ( go.mongodb.org/mongo-driver v1.5.3 go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/collector/model v0.35.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0 + go.opentelemetry.io/otel/metric v0.23.0 + go.opentelemetry.io/otel/sdk/metric v0.23.0 go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect @@ -329,6 +332,17 @@ require ( sigs.k8s.io/yaml v1.2.0 // indirect ) +require ( + github.com/cenkalti/backoff/v4 v4.1.1 // indirect + go.opentelemetry.io/otel v1.0.0-RC3 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 // indirect + go.opentelemetry.io/otel/internal/metric v0.23.0 // indirect + go.opentelemetry.io/otel/sdk v1.0.0-RC3 // indirect + go.opentelemetry.io/otel/sdk/export/metric v0.23.0 // indirect + go.opentelemetry.io/otel/trace v1.0.0-RC3 // indirect + go.opentelemetry.io/proto/otlp v0.9.0 // indirect +) + // replaced due to https://github.com/satori/go.uuid/issues/73 replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible diff --git a/go.sum b/go.sum index 300b12d8d6a6c..4189b415723f0 100644 --- a/go.sum +++ b/go.sum @@ -291,8 +291,8 @@ github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -333,6 +333,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEe github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -1639,7 +1641,27 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/collector/model v0.35.0 h1:NpKjghiqlei4ecwjOYOMhD6tj4gY8yiWHPJmbFs/ArI= go.opentelemetry.io/collector/model v0.35.0/go.mod h1:+7YCSjJG+MqiIFjauzt7oM2qkqBsaJWh5hcsO4fwsAc= +go.opentelemetry.io/otel v1.0.0-RC3 h1:kvwiyEkiUT/JaadXzVLI/R1wDO934A7r3Bs2wEe6wqA= +go.opentelemetry.io/otel v1.0.0-RC3/go.mod h1:Ka5j3ua8tZs4Rkq4Ex3hwgBgOchyPVq5S6P2lz//nKQ= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 h1:vKIEsT6IJU0NYd+iZccjgCmk80zsa7dTiC2Bu7U1jz0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0/go.mod h1:pe9oOWRaZyapdajWCn64fnl76v3cmTEmNBgh7MkKvwE= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0 h1:JSsJID+KU3G8wxynfHIlWaefOvYngDjnrmtHOGb1sb0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0/go.mod h1:aSP5oMNaAfOYq+sRydHANZ0vBYLyZR/3lR9pru9aPLk= +go.opentelemetry.io/otel/internal/metric v0.23.0 h1:mPfzm9Iqhw7G2nDBmUAjFTfPqLZPbOW2k7QI57ITbaI= +go.opentelemetry.io/otel/internal/metric v0.23.0/go.mod h1:z+RPiDJe30YnCrOhFGivwBS+DU1JU/PiLKkk4re2DNY= +go.opentelemetry.io/otel/metric v0.23.0 h1:mYCcDxi60P4T27/0jchIDFa1WHEfQeU3zH9UEMpnj2c= +go.opentelemetry.io/otel/metric v0.23.0/go.mod h1:G/Nn9InyNnIv7J6YVkQfpc0JCfKBNJaERBGw08nqmVQ= +go.opentelemetry.io/otel/sdk v1.0.0-RC3 h1:iRMkET+EmJUn5mW0hJzygBraXRmrUwzbOtNvTCh/oKs= +go.opentelemetry.io/otel/sdk v1.0.0-RC3/go.mod h1:78H6hyg2fka0NYT9fqGuFLvly2yCxiBXDJAgLKo/2Us= +go.opentelemetry.io/otel/sdk/export/metric v0.23.0 h1:7NeoKPPx6NdZBVHLEp/LY5Lq85Ff1WNZnuJkuRy+azw= +go.opentelemetry.io/otel/sdk/export/metric v0.23.0/go.mod h1:SuMiREmKVRIwFKq73zvGTvwFpxb/ZAYkMfyqMoOtDqs= +go.opentelemetry.io/otel/sdk/metric v0.23.0 h1:xlZhPbiue1+jjSFEth94q9QCmX8Q24mOtue9IAmlVyI= +go.opentelemetry.io/otel/sdk/metric v0.23.0/go.mod h1:wa0sKK13eeIFW+0OFjcC3S1i7FTRRiLAXe1kjBVbhwg= +go.opentelemetry.io/otel/trace v1.0.0-RC3 h1:9F0ayEvlxv8BmNmPbU005WK7hC+7KbOazCPZjNa1yME= +go.opentelemetry.io/otel/trace v1.0.0-RC3/go.mod h1:VUt2TUYd8S2/ZRX09ZDFZQwn2RqfMB5MzO17jBojGxo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= go.starlark.net v0.0.0-20210406145628-7a1108eaa012 h1:4RGobP/iq7S22H0Bb92OEt+M8/cfBQnW+T+a2MC0sQo= go.starlark.net v0.0.0-20210406145628-7a1108eaa012/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1944,6 +1966,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/plugins/inputs/opentelemetry/grpc_services.go b/plugins/inputs/opentelemetry/grpc_services.go index f5fa450fa8f65..1c805e2a23ff2 100644 --- a/plugins/inputs/opentelemetry/grpc_services.go +++ b/plugins/inputs/opentelemetry/grpc_services.go @@ -56,7 +56,7 @@ func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema func (s *metricsService) Export(ctx context.Context, req pdata.Metrics) (otlpgrpc.MetricsResponse, error) { err := s.converter.WriteMetrics(ctx, req, s.writer) - return otlpgrpc.MetricsResponse{}, err + return otlpgrpc.NewMetricsResponse(), err } type logsService struct { diff --git a/plugins/inputs/opentelemetry/opentelemetry.go b/plugins/inputs/opentelemetry/opentelemetry.go index 2e6cbf9b8349a..85f32a7695efa 100644 --- a/plugins/inputs/opentelemetry/opentelemetry.go +++ b/plugins/inputs/opentelemetry/opentelemetry.go @@ -24,6 +24,7 @@ type OpenTelemetry struct { Log telegraf.Logger `toml:"-"` + listener net.Listener // overridden in tests grpcServer *grpc.Server wg sync.WaitGroup @@ -89,14 +90,16 @@ func (o *OpenTelemetry) Start(accumulator telegraf.Accumulator) error { otlpgrpc.RegisterMetricsServer(o.grpcServer, ms) otlpgrpc.RegisterLogsServer(o.grpcServer, newLogsService(logger, influxWriter)) - listener, err := net.Listen("tcp", o.ServiceAddress) - if err != nil { - return err + if o.listener == nil { + o.listener, err = net.Listen("tcp", o.ServiceAddress) + if err != nil { + return err + } } o.wg.Add(1) go func() { - if err := o.grpcServer.Serve(listener); err != nil { + if err := o.grpcServer.Serve(o.listener); err != nil { accumulator.AddError(fmt.Errorf("failed to stop OpenTelemetry gRPC service: %w", err)) } o.wg.Done() diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go new file mode 100644 index 0000000000000..2de35bb06af50 --- /dev/null +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -0,0 +1,83 @@ +package opentelemetry + +import ( + "context" + "net" + "testing" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/global" + controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" + processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" + "go.opentelemetry.io/otel/sdk/metric/selector/simple" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +func TestOpenTelemetry(t *testing.T) { + mockListener := bufconn.Listen(1024 * 1024) + plugin := inputs.Inputs["opentelemetry"]().(*OpenTelemetry) + plugin.listener = mockListener + accumulator := new(testutil.Accumulator) + + err := plugin.Start(accumulator) + require.NoError(t, err) + t.Cleanup(plugin.Stop) + + metricExporter, err := otlpmetricgrpc.New(context.Background(), + otlpmetricgrpc.WithInsecure(), + otlpmetricgrpc.WithDialOption( + grpc.WithBlock(), + grpc.WithContextDialer(func(_ context.Context, _ string) (net.Conn, error) { + return mockListener.Dial() + })), + ) + require.NoError(t, err) + t.Cleanup(func() { _ = metricExporter.Shutdown(context.Background()) }) + + pusher := controller.New( + processor.New( + simple.NewWithExactDistribution(), + metricExporter, + ), + controller.WithExporter(metricExporter), + ) + + err = pusher.Start(context.Background()) + require.NoError(t, err) + t.Cleanup(func() { _ = pusher.Stop(context.Background()) }) + + global.SetMeterProvider(pusher.MeterProvider()) + + // write metrics + meter := global.Meter("library-name") + counter := metric.Must(meter).NewInt64Counter("measurement-counter") + meter.RecordBatch(context.Background(), nil, counter.Measurement(7)) + + err = pusher.Stop(context.Background()) + require.NoError(t, err) + + // Shutdown + + plugin.Stop() + + err = metricExporter.Shutdown(context.Background()) + require.NoError(t, err) + + // Check + + assert.Empty(t, accumulator.Errors) + + if assert.Len(t, accumulator.Metrics, 1) { + got := accumulator.Metrics[0] + assert.Equal(t, "measurement-counter", got.Measurement) + assert.Equal(t, telegraf.Counter, got.Type) + assert.Equal(t, "library-name", got.Tags["otel.library.name"]) + } +} From 3d4f3c1361fb961d2356c56c5849b00849bd23a1 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Thu, 23 Sep 2021 10:15:43 -0700 Subject: [PATCH 640/761] docs: add trig and twemproxy plugin readmes (#9801) (cherry picked from commit 3ec4c128caf32f89c18501299f894abac69420e9) --- plugins/inputs/trig/README.md | 28 ++++++++++++++++++++++++++++ plugins/inputs/twemproxy/README.md | 16 ++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 plugins/inputs/trig/README.md create mode 100644 plugins/inputs/twemproxy/README.md diff --git a/plugins/inputs/trig/README.md b/plugins/inputs/trig/README.md new file mode 100644 index 0000000000000..41ff8743e8cf3 --- /dev/null +++ b/plugins/inputs/trig/README.md @@ -0,0 +1,28 @@ +# Trig Input Plugin + +The `trig` plugin is for demonstration purposes and inserts sine and cosine + +### Configuration + +```toml +# Inserts sine and cosine waves for demonstration purposes +[[inputs.trig]] + ## Set the amplitude + amplitude = 10.0 +``` + +### Metrics + +- trig + - fields: + - cosine (float) + - sine (float) + + +### Example Output + +``` +trig,host=MBP15-SWANG.local cosine=10,sine=0 1632338680000000000 +trig,host=MBP15-SWANG.local sine=5.877852522924732,cosine=8.090169943749473 1632338690000000000 +trig,host=MBP15-SWANG.local sine=9.510565162951535,cosine=3.0901699437494745 1632338700000000000 +``` diff --git a/plugins/inputs/twemproxy/README.md b/plugins/inputs/twemproxy/README.md new file mode 100644 index 0000000000000..0c07e0aec4463 --- /dev/null +++ b/plugins/inputs/twemproxy/README.md @@ -0,0 +1,16 @@ +# Twemproxy Input Plugin + +The `twemproxy` plugin gathers statistics from [Twemproxy](https://github.com/twitter/twemproxy) servers. + + +### Configuration + +```toml +# Read Twemproxy stats data +[[inputs.twemproxy]] + ## Twemproxy stats address and port (no scheme) + addr = "localhost:22222" + ## Monitor pool name + pools = ["redis_pool", "mc_pool"] +``` + From 01f336d62f89c459eef92eb2778ceb857aa94c01 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Fri, 24 Sep 2021 09:09:52 -0600 Subject: [PATCH 641/761] fix: update golang-ci package (#9817) (cherry picked from commit 3b94269f30de70079fbf9942cf9266882b359947) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index cbe0e2a2e5dbb..12267c04403bb 100644 --- a/Makefile +++ b/Makefile @@ -141,7 +141,7 @@ vet: .PHONY: lint-install lint-install: - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.38.0 + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.42.1 .PHONY: lint lint: From 4956994418b898ca5fc98521063e143f195d6a8d Mon Sep 17 00:00:00 2001 From: Eng Zer Jun Date: Wed, 29 Sep 2021 05:16:32 +0800 Subject: [PATCH 642/761] refactor: move from io/ioutil to io and os package (#9811) (cherry picked from commit 6a3b27126a26dd43d2a287b4c9d8831dca6bf687) --- config/config.go | 6 ++-- internal/content_coding_test.go | 4 +-- internal/internal_test.go | 7 ++-- internal/process/process.go | 3 +- internal/rotate/file_writer_test.go | 35 +++++++++---------- logger/logger_test.go | 27 +++++++------- plugins/common/cookie/cookie.go | 5 ++- plugins/common/cookie/cookie_test.go | 4 +-- plugins/common/encoding/decoder_test.go | 4 +-- plugins/common/logrus/hook.go | 4 +-- plugins/common/shim/config.go | 3 +- plugins/common/shim/input_test.go | 5 +-- plugins/common/shim/processor_test.go | 5 +-- plugins/common/tls/config.go | 4 +-- plugins/inputs/activemq/activemq.go | 4 +-- plugins/inputs/aliyuncms/aliyuncms_test.go | 8 ++--- .../inputs/amd_rocm_smi/amd_rocm_smi_test.go | 4 +-- plugins/inputs/bcache/bcache.go | 5 ++- plugins/inputs/bcache/bcache_test.go | 19 +++++----- plugins/inputs/beat/beat_test.go | 6 ++-- plugins/inputs/bond/bond.go | 3 +- plugins/inputs/burrow/burrow_test.go | 3 +- plugins/inputs/cassandra/cassandra.go | 4 +-- plugins/inputs/cassandra/cassandra_test.go | 4 +-- plugins/inputs/ceph/ceph.go | 4 +-- plugins/inputs/ceph/ceph_test.go | 5 ++- plugins/inputs/cgroup/cgroup_linux.go | 3 +- plugins/inputs/clickhouse/clickhouse.go | 5 ++- .../inputs/cloud_pubsub_push/pubsub_push.go | 4 +-- plugins/inputs/conntrack/conntrack.go | 6 ++-- plugins/inputs/conntrack/conntrack_test.go | 17 +++++---- plugins/inputs/dcos/creds.go | 4 +-- plugins/inputs/dcos/dcos.go | 4 +-- .../directory_monitor/directory_monitor.go | 5 ++- .../directory_monitor_test.go | 11 +++--- plugins/inputs/diskio/diskio_linux_test.go | 3 +- plugins/inputs/docker/docker_test.go | 6 ++-- plugins/inputs/docker/docker_testdata.go | 6 ++-- plugins/inputs/ecs/client.go | 5 ++- plugins/inputs/ecs/client_test.go | 14 ++++---- plugins/inputs/elasticsearch/elasticsearch.go | 4 +-- .../elasticsearch/elasticsearch_test.go | 4 +-- plugins/inputs/execd/shim/goshim.go | 3 +- plugins/inputs/file/file.go | 4 +-- plugins/inputs/fluentd/fluentd.go | 4 +-- plugins/inputs/graylog/graylog.go | 4 +-- plugins/inputs/graylog/graylog_test.go | 4 +-- plugins/inputs/http/http.go | 8 ++--- plugins/inputs/http/http_test.go | 10 +++--- .../http_listener_v2/http_listener_v2.go | 8 ++--- .../http_listener_v2/http_listener_v2_test.go | 4 +-- plugins/inputs/http_response/http_response.go | 6 ++-- .../http_response/http_response_test.go | 4 +-- plugins/inputs/httpjson/httpjson.go | 4 +-- plugins/inputs/httpjson/httpjson_test.go | 6 ++-- .../influxdb_listener_test.go | 4 +-- .../influxdb_v2_listener.go | 4 +-- .../influxdb_v2_listener_test.go | 7 ++-- plugins/inputs/intel_powerstat/file.go | 5 ++- plugins/inputs/jolokia/jolokia.go | 4 +-- plugins/inputs/jolokia/jolokia_test.go | 4 +-- plugins/inputs/jolokia2/client.go | 4 +-- plugins/inputs/jolokia2/client_test.go | 6 ++-- plugins/inputs/kernel/kernel.go | 5 ++- plugins/inputs/kernel/kernel_test.go | 3 +- plugins/inputs/kernel_vmstat/kernel_vmstat.go | 3 +- .../kernel_vmstat/kernel_vmstat_test.go | 3 +- plugins/inputs/kibana/kibana.go | 3 +- plugins/inputs/kibana/kibana_test.go | 4 +-- .../kinesis_consumer/kinesis_consumer.go | 6 ++-- plugins/inputs/kube_inventory/kube_state.go | 4 +-- plugins/inputs/kubernetes/kubernetes.go | 4 +-- plugins/inputs/leofs/leofs_test.go | 3 +- .../inputs/linux_sysctl_fs/linux_sysctl_fs.go | 5 ++- .../linux_sysctl_fs/linux_sysctl_fs_test.go | 15 ++++---- plugins/inputs/logparser/logparser_test.go | 7 ++-- plugins/inputs/logstash/logstash.go | 3 +- plugins/inputs/lustre2/lustre2.go | 4 +-- plugins/inputs/lustre2/lustre2_test.go | 11 +++--- plugins/inputs/mailchimp/chimp_api.go | 5 ++- plugins/inputs/mdstat/mdstat.go | 3 +- plugins/inputs/mdstat/mdstat_test.go | 3 +- plugins/inputs/mesos/mesos.go | 4 +-- plugins/inputs/multifile/multifile.go | 4 +-- plugins/inputs/nats/nats.go | 4 +-- plugins/inputs/neptune_apex/neptune_apex.go | 4 +-- .../nginx_plus_api/nginx_plus_api_metrics.go | 4 +-- .../nginx_upstream_check.go | 3 +- plugins/inputs/nsq/nsq.go | 4 +-- plugins/inputs/nstat/nstat.go | 7 ++-- plugins/inputs/nvidia_smi/nvidia_smi_test.go | 4 +-- plugins/inputs/opcua/opcua_util.go | 3 +- plugins/inputs/passenger/passenger_test.go | 3 +- plugins/inputs/phpfpm/child.go | 5 ++- plugins/inputs/phpfpm/fcgi_test.go | 5 ++- plugins/inputs/postfix/postfix_test.go | 15 ++++---- .../postgresql_extensible.go | 4 +-- .../inputs/processes/processes_notwindows.go | 3 +- plugins/inputs/procstat/native_finder.go | 4 +-- plugins/inputs/procstat/pgrep.go | 4 +-- plugins/inputs/procstat/procstat.go | 3 +- plugins/inputs/procstat/procstat_test.go | 5 ++- plugins/inputs/prometheus/kubernetes.go | 4 +-- plugins/inputs/prometheus/prometheus.go | 6 ++-- plugins/inputs/proxmox/proxmox.go | 4 +-- plugins/inputs/puppetagent/puppetagent.go | 6 ++-- plugins/inputs/rabbitmq/rabbitmq.go | 4 +-- plugins/inputs/rabbitmq/rabbitmq_test.go | 6 ++-- plugins/inputs/ravendb/ravendb_test.go | 6 ++-- plugins/inputs/redfish/redfish.go | 4 +-- plugins/inputs/salesforce/salesforce.go | 5 ++- plugins/inputs/snmp_legacy/snmp_legacy.go | 4 +-- .../socket_listener/socket_listener_test.go | 7 ++-- plugins/inputs/sql/sql.go | 4 +-- plugins/inputs/suricata/suricata_test.go | 27 +++++++------- plugins/inputs/synproxy/synproxy_test.go | 3 +- plugins/inputs/syslog/nontransparent_test.go | 9 +++-- plugins/inputs/syslog/octetcounting_test.go | 9 +++-- plugins/inputs/syslog/rfc5426_test.go | 5 ++- plugins/inputs/syslog/syslog_test.go | 3 +- plugins/inputs/tail/tail_test.go | 13 ++++--- plugins/inputs/twemproxy/twemproxy.go | 4 +-- .../inputs/udp_listener/udp_listener_test.go | 8 ++--- .../webhooks/filestack/filestack_webhooks.go | 4 +-- .../inputs/webhooks/github/github_webhooks.go | 4 +-- .../webhooks/mandrill/mandrill_webhooks.go | 4 +-- .../webhooks/rollbar/rollbar_webhooks.go | 4 +-- plugins/inputs/wireless/wireless_linux.go | 3 +- plugins/inputs/x509_cert/x509_cert.go | 7 ++-- plugins/inputs/x509_cert/x509_cert_test.go | 12 +++---- plugins/inputs/zfs/zfs_linux_test.go | 21 ++++++----- .../cmd/thrift_serialize/thrift_serialize.go | 8 ++--- .../inputs/zipkin/codec/thrift/thrift_test.go | 4 +-- plugins/inputs/zipkin/handler.go | 4 +-- plugins/inputs/zipkin/handler_test.go | 7 ++-- plugins/inputs/zipkin/zipkin_test.go | 4 +-- .../outputs/azure_monitor/azure_monitor.go | 6 ++-- plugins/outputs/dynatrace/dynatrace.go | 4 +-- plugins/outputs/dynatrace/dynatrace_test.go | 18 +++++----- plugins/outputs/file/file_test.go | 7 ++-- plugins/outputs/health/health_test.go | 4 +-- plugins/outputs/http/http.go | 3 +- plugins/outputs/http/http_test.go | 4 +-- plugins/outputs/influxdb/http.go | 7 ++-- plugins/outputs/influxdb/http_test.go | 16 ++++----- plugins/outputs/influxdb_v2/http.go | 3 +- plugins/outputs/influxdb_v2/http_test.go | 4 +-- plugins/outputs/librato/librato.go | 4 +-- plugins/outputs/loki/loki_test.go | 9 ++--- plugins/outputs/opentsdb/opentsdb_http.go | 3 +- .../prometheus_client_v1_test.go | 8 ++--- .../prometheus_client_v2_test.go | 6 ++-- plugins/outputs/sensu/sensu.go | 3 +- plugins/outputs/sensu/sensu_test.go | 4 +-- .../socket_writer/socket_writer_test.go | 5 ++- plugins/outputs/sql/sql_test.go | 13 ++++--- plugins/outputs/sql/sqlite_test.go | 3 +- plugins/outputs/sumologic/sumologic_test.go | 3 +- plugins/outputs/warp10/warp10.go | 4 +-- .../yandex_cloud_monitoring.go | 6 ++-- plugins/parsers/json_v2/parser_test.go | 3 +- plugins/parsers/prometheus/parser_test.go | 4 +-- plugins/parsers/xpath/parser_test.go | 6 ++-- plugins/processors/starlark/starlark_test.go | 3 +- testutil/tls.go | 4 +-- 165 files changed, 456 insertions(+), 517 deletions(-) diff --git a/config/config.go b/config/config.go index 56beed8ee4910..57cb9de479875 100644 --- a/config/config.go +++ b/config/config.go @@ -3,7 +3,7 @@ package config import ( "bytes" "fmt" - "io/ioutil" + "io" "log" "net/http" "net/url" @@ -933,7 +933,7 @@ func loadConfig(config string) ([]byte, error) { } // If it isn't a https scheme, try it as a file - return ioutil.ReadFile(config) + return os.ReadFile(config) } func fetchConfig(u *url.URL) ([]byte, error) { @@ -964,7 +964,7 @@ func fetchConfig(u *url.URL) ([]byte, error) { return nil, fmt.Errorf("Retry %d of %d failed to retrieve remote config: %s", i, retries, resp.Status) } defer resp.Body.Close() - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } return nil, nil diff --git a/internal/content_coding_test.go b/internal/content_coding_test.go index 85496df59c5b6..06235a63879a9 100644 --- a/internal/content_coding_test.go +++ b/internal/content_coding_test.go @@ -2,7 +2,7 @@ package internal import ( "bytes" - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/require" @@ -68,7 +68,7 @@ func TestStreamIdentityDecode(t *testing.T) { dec, err := NewStreamContentDecoder("identity", &r) require.NoError(t, err) - data, err := ioutil.ReadAll(dec) + data, err := io.ReadAll(dec) require.NoError(t, err) require.Equal(t, []byte("howdy"), data) diff --git a/internal/internal_test.go b/internal/internal_test.go index 7cb56d5324f06..8dae73f562702 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -5,7 +5,6 @@ import ( "compress/gzip" "crypto/rand" "io" - "io/ioutil" "log" "os/exec" "regexp" @@ -182,7 +181,7 @@ func TestCompressWithGzip(t *testing.T) { assert.NoError(t, err) defer gzipReader.Close() - output, err := ioutil.ReadAll(gzipReader) + output, err := io.ReadAll(gzipReader) assert.NoError(t, err) assert.Equal(t, testData, string(output)) @@ -203,7 +202,7 @@ func TestCompressWithGzipEarlyClose(t *testing.T) { rc, err := CompressWithGzip(mr) assert.NoError(t, err) - n, err := io.CopyN(ioutil.Discard, rc, 10000) + n, err := io.CopyN(io.Discard, rc, 10000) assert.NoError(t, err) assert.Equal(t, int64(10000), n) @@ -211,7 +210,7 @@ func TestCompressWithGzipEarlyClose(t *testing.T) { err = rc.Close() assert.NoError(t, err) - n, err = io.CopyN(ioutil.Discard, rc, 10000) + n, err = io.CopyN(io.Discard, rc, 10000) assert.Error(t, io.EOF, err) assert.Equal(t, int64(0), n) diff --git a/internal/process/process.go b/internal/process/process.go index 6da98d211a43b..3bfc3bb7e44e6 100644 --- a/internal/process/process.go +++ b/internal/process/process.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os/exec" "sync" "sync/atomic" @@ -187,5 +186,5 @@ func isQuitting(ctx context.Context) bool { } func defaultReadPipe(r io.Reader) { - io.Copy(ioutil.Discard, r) + _, _ = io.Copy(io.Discard, r) } diff --git a/internal/rotate/file_writer_test.go b/internal/rotate/file_writer_test.go index ca29b9a2f45d6..2d249d74548e1 100644 --- a/internal/rotate/file_writer_test.go +++ b/internal/rotate/file_writer_test.go @@ -1,7 +1,6 @@ package rotate import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -12,7 +11,7 @@ import ( ) func TestFileWriter_NoRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationNo") + tempDir, err := os.MkdirTemp("", "RotationNo") require.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test"), 0, 0, 0) require.NoError(t, err) @@ -22,12 +21,12 @@ func TestFileWriter_NoRotation(t *testing.T) { require.NoError(t, err) _, err = writer.Write([]byte("Hello World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 1, len(files)) } func TestFileWriter_TimeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationTime") + tempDir, err := os.MkdirTemp("", "RotationTime") require.NoError(t, err) interval, _ := time.ParseDuration("1s") writer, err := NewFileWriter(filepath.Join(tempDir, "test"), interval, 0, -1) @@ -39,28 +38,28 @@ func TestFileWriter_TimeRotation(t *testing.T) { time.Sleep(1 * time.Second) _, err = writer.Write([]byte("Hello World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_ReopenTimeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationTime") + tempDir, err := os.MkdirTemp("", "RotationTime") require.NoError(t, err) interval, _ := time.ParseDuration("1s") filePath := filepath.Join(tempDir, "test.log") - err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) + err = os.WriteFile(filePath, []byte("Hello World"), 0644) time.Sleep(1 * time.Second) assert.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), interval, 0, -1) require.NoError(t, err) defer func() { writer.Close(); os.RemoveAll(tempDir) }() - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_SizeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationSize") + tempDir, err := os.MkdirTemp("", "RotationSize") require.NoError(t, err) maxSize := int64(9) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) @@ -71,16 +70,16 @@ func TestFileWriter_SizeRotation(t *testing.T) { require.NoError(t, err) _, err = writer.Write([]byte("World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_ReopenSizeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationSize") + tempDir, err := os.MkdirTemp("", "RotationSize") require.NoError(t, err) maxSize := int64(12) filePath := filepath.Join(tempDir, "test.log") - err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) + err = os.WriteFile(filePath, []byte("Hello World"), 0644) assert.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) require.NoError(t, err) @@ -88,12 +87,12 @@ func TestFileWriter_ReopenSizeRotation(t *testing.T) { _, err = writer.Write([]byte("Hello World Again")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_DeleteArchives(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationDeleteArchives") + tempDir, err := os.MkdirTemp("", "RotationDeleteArchives") require.NoError(t, err) maxSize := int64(5) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, 2) @@ -112,14 +111,14 @@ func TestFileWriter_DeleteArchives(t *testing.T) { _, err = writer.Write([]byte("Third file")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 3, len(files)) for _, tempFile := range files { var bytes []byte var err error path := filepath.Join(tempDir, tempFile.Name()) - if bytes, err = ioutil.ReadFile(path); err != nil { + if bytes, err = os.ReadFile(path); err != nil { t.Error(err.Error()) return } @@ -133,7 +132,7 @@ func TestFileWriter_DeleteArchives(t *testing.T) { } func TestFileWriter_CloseRotates(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationClose") + tempDir, err := os.MkdirTemp("", "RotationClose") require.NoError(t, err) defer os.RemoveAll(tempDir) maxSize := int64(9) @@ -142,7 +141,7 @@ func TestFileWriter_CloseRotates(t *testing.T) { writer.Close() - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 1, len(files)) assert.Regexp(t, "^test\\.[^\\.]+\\.log$", files[0].Name()) } diff --git a/logger/logger_test.go b/logger/logger_test.go index d2c699da52644..47af1d4591bff 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -3,7 +3,6 @@ package logger import ( "bytes" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -15,7 +14,7 @@ import ( ) func TestWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() @@ -24,13 +23,13 @@ func TestWriteLogToFile(t *testing.T) { log.Printf("I! TEST") log.Printf("D! TEST") // <- should be ignored - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) } func TestDebugWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -38,13 +37,13 @@ func TestDebugWriteLogToFile(t *testing.T) { SetupLogging(config) log.Printf("D! TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z D! TEST\n")) } func TestErrorWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -53,13 +52,13 @@ func TestErrorWriteLogToFile(t *testing.T) { log.Printf("E! TEST") log.Printf("I! TEST") // <- should be ignored - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z E! TEST\n")) } func TestAddDefaultLogLevel(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -67,13 +66,13 @@ func TestAddDefaultLogLevel(t *testing.T) { SetupLogging(config) log.Printf("TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) } func TestWriteToTruncatedFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() config := createBasicLogConfig(tmpfile.Name()) @@ -81,7 +80,7 @@ func TestWriteToTruncatedFile(t *testing.T) { SetupLogging(config) log.Printf("TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) @@ -91,13 +90,13 @@ func TestWriteToTruncatedFile(t *testing.T) { log.Printf("SHOULD BE FIRST") - f, err = ioutil.ReadFile(tmpfile.Name()) + f, err = os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! SHOULD BE FIRST\n")) } func TestWriteToFileInRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "LogRotation") + tempDir, err := os.MkdirTemp("", "LogRotation") require.NoError(t, err) cfg := createBasicLogConfig(filepath.Join(tempDir, "test.log")) cfg.LogTarget = LogTargetFile @@ -110,7 +109,7 @@ func TestWriteToFileInRotation(t *testing.T) { log.Printf("I! TEST 1") // Writes 31 bytes, will rotate log.Printf("I! TEST") // Writes 29 byes, no rotation expected - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } diff --git a/plugins/common/cookie/cookie.go b/plugins/common/cookie/cookie.go index e452a50a4b0a9..03fd97f95077f 100644 --- a/plugins/common/cookie/cookie.go +++ b/plugins/common/cookie/cookie.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "net/http/cookiejar" "strings" @@ -78,7 +77,7 @@ func (c *CookieAuthConfig) authRenewal(ctx context.Context, ticker *clockutil.Ti func (c *CookieAuthConfig) auth() error { var body io.ReadCloser if c.Body != "" { - body = ioutil.NopCloser(strings.NewReader(c.Body)) + body = io.NopCloser(strings.NewReader(c.Body)) defer body.Close() } @@ -97,7 +96,7 @@ func (c *CookieAuthConfig) auth() error { } defer resp.Body.Close() - if _, err = io.Copy(ioutil.Discard, resp.Body); err != nil { + if _, err = io.Copy(io.Discard, resp.Body); err != nil { return err } diff --git a/plugins/common/cookie/cookie_test.go b/plugins/common/cookie/cookie_test.go index 99269c27cd339..b32ceb0059e8b 100644 --- a/plugins/common/cookie/cookie_test.go +++ b/plugins/common/cookie/cookie_test.go @@ -3,7 +3,7 @@ package cookie import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "sync/atomic" @@ -50,7 +50,7 @@ func newFakeServer(t *testing.T) fakeServer { case authEndpointNoCreds: authed() case authEndpointWithBody: - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) if !cmp.Equal([]byte(reqBody), body) { w.WriteHeader(http.StatusUnauthorized) diff --git a/plugins/common/encoding/decoder_test.go b/plugins/common/encoding/decoder_test.go index 87115318ad0ed..b8e19af9cea43 100644 --- a/plugins/common/encoding/decoder_test.go +++ b/plugins/common/encoding/decoder_test.go @@ -2,7 +2,7 @@ package encoding import ( "bytes" - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/require" @@ -66,7 +66,7 @@ func TestDecoder(t *testing.T) { require.NoError(t, err) buf := bytes.NewBuffer(tt.input) r := decoder.Reader(buf) - actual, err := ioutil.ReadAll(r) + actual, err := io.ReadAll(r) if tt.expectedErr { require.Error(t, err) return diff --git a/plugins/common/logrus/hook.go b/plugins/common/logrus/hook.go index a7f99023be1ba..7451639a75423 100644 --- a/plugins/common/logrus/hook.go +++ b/plugins/common/logrus/hook.go @@ -1,7 +1,7 @@ package logrus import ( - "io/ioutil" + "io" "log" "strings" "sync" @@ -19,7 +19,7 @@ type LogHook struct { // that directly log to the logrus system without providing an override method. func InstallHook() { once.Do(func() { - logrus.SetOutput(ioutil.Discard) + logrus.SetOutput(io.Discard) logrus.AddHook(&LogHook{}) }) } diff --git a/plugins/common/shim/config.go b/plugins/common/shim/config.go index a0bb3ce0de696..089c2b7ee7525 100644 --- a/plugins/common/shim/config.go +++ b/plugins/common/shim/config.go @@ -3,7 +3,6 @@ package shim import ( "errors" "fmt" - "io/ioutil" "log" "os" @@ -53,7 +52,7 @@ func LoadConfig(filePath *string) (loaded loadedConfig, err error) { var data string conf := config{} if filePath != nil && *filePath != "" { - b, err := ioutil.ReadFile(*filePath) + b, err := os.ReadFile(*filePath) if err != nil { return loadedConfig{}, err } diff --git a/plugins/common/shim/input_test.go b/plugins/common/shim/input_test.go index 7cbfe6413975f..9a0423261ac14 100644 --- a/plugins/common/shim/input_test.go +++ b/plugins/common/shim/input_test.go @@ -3,7 +3,6 @@ package shim import ( "bufio" "io" - "io/ioutil" "strings" "testing" "time" @@ -45,7 +44,9 @@ func TestInputShimStdinSignalingWorks(t *testing.T) { require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) stdinWriter.Close() - go ioutil.ReadAll(r) + go func() { + _, _ = io.ReadAll(r) + }() // check that it exits cleanly <-exited } diff --git a/plugins/common/shim/processor_test.go b/plugins/common/shim/processor_test.go index ea2e61a459469..bc00fb70d1bba 100644 --- a/plugins/common/shim/processor_test.go +++ b/plugins/common/shim/processor_test.go @@ -3,7 +3,6 @@ package shim import ( "bufio" "io" - "io/ioutil" "math/rand" "sync" "testing" @@ -84,7 +83,9 @@ func testSendAndRecieve(t *testing.T, fieldKey string, fieldValue string) { val2, ok := mOut.Fields()[fieldKey] require.True(t, ok) require.Equal(t, fieldValue, val2) - go ioutil.ReadAll(r) + go func() { + _, _ = io.ReadAll(r) + }() wg.Wait() } diff --git a/plugins/common/tls/config.go b/plugins/common/tls/config.go index 9a752fbce5714..586ec8fd4a417 100644 --- a/plugins/common/tls/config.go +++ b/plugins/common/tls/config.go @@ -4,7 +4,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "os" "strings" ) @@ -147,7 +147,7 @@ func (c *ServerConfig) TLSConfig() (*tls.Config, error) { func makeCertPool(certFiles []string) (*x509.CertPool, error) { pool := x509.NewCertPool() for _, certFile := range certFiles { - pem, err := ioutil.ReadFile(certFile) + pem, err := os.ReadFile(certFile) if err != nil { return nil, fmt.Errorf( "could not read certificate %q: %v", certFile, err) diff --git a/plugins/inputs/activemq/activemq.go b/plugins/inputs/activemq/activemq.go index 0674b7ae0fe52..f5cf7927342e5 100644 --- a/plugins/inputs/activemq/activemq.go +++ b/plugins/inputs/activemq/activemq.go @@ -3,7 +3,7 @@ package activemq import ( "encoding/xml" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -184,7 +184,7 @@ func (a *ActiveMQ) GetMetrics(u string) ([]byte, error) { return nil, fmt.Errorf("GET %s returned status %q", u, resp.Status) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } func (a *ActiveMQ) GatherQueuesMetrics(acc telegraf.Accumulator, queues Queues) { diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go index 22e0acbc52ebe..7e346a6ae9b8e 100644 --- a/plugins/inputs/aliyuncms/aliyuncms_test.go +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -2,7 +2,7 @@ package aliyuncms import ( "bytes" - "io/ioutil" + "io" "net/http" "testing" "time" @@ -132,7 +132,7 @@ func TestPluginInitialize(t *testing.T) { httpResp := &http.Response{ StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewBufferString( + Body: io.NopCloser(bytes.NewBufferString( `{ "LoadBalancers": { @@ -359,7 +359,7 @@ func TestGetDiscoveryDataAcrossRegions(t *testing.T) { region: "cn-hongkong", httpResp: &http.Response{ StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewBufferString(`{}`)), + Body: io.NopCloser(bytes.NewBufferString(`{}`)), }, totalCount: 0, pageSize: 0, @@ -372,7 +372,7 @@ func TestGetDiscoveryDataAcrossRegions(t *testing.T) { region: "cn-hongkong", httpResp: &http.Response{ StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewBufferString( + Body: io.NopCloser(bytes.NewBufferString( `{ "LoadBalancers": { diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go index 7893760bdf952..e38e0ff89eae0 100644 --- a/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go @@ -1,7 +1,7 @@ package amd_rocm_smi import ( - "io/ioutil" + "os" "path/filepath" "testing" "time" @@ -78,7 +78,7 @@ func TestGatherValidJSON(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename)) + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) require.NoError(t, err) err = gatherROCmSMI(octets, &acc) diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 3195cf4dabcbb..84eb3262fdf28 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -8,7 +8,6 @@ package bcache import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -85,7 +84,7 @@ func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error { if len(metrics) == 0 { return errors.New("can't read any stats file") } - file, err := ioutil.ReadFile(bdev + "/dirty_data") + file, err := os.ReadFile(bdev + "/dirty_data") if err != nil { return err } @@ -97,7 +96,7 @@ func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error { for _, path := range metrics { key := filepath.Base(path) - file, err := ioutil.ReadFile(path) + file, err := os.ReadFile(path) rawValue := strings.TrimSpace(string(file)) if err != nil { return err diff --git a/plugins/inputs/bcache/bcache_test.go b/plugins/inputs/bcache/bcache_test.go index 857538a8d6f72..4c62e0f014f14 100644 --- a/plugins/inputs/bcache/bcache_test.go +++ b/plugins/inputs/bcache/bcache_test.go @@ -4,7 +4,6 @@ package bcache import ( - "io/ioutil" "os" "testing" @@ -50,39 +49,39 @@ func TestBcacheGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testBcacheUUIDPath+"/bdev0/stats_total", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/dirty_data", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/dirty_data", []byte(dirtyData), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/bypassed", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/bypassed", []byte(bypassed), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_hits", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_hits", []byte(cacheBypassHits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_misses", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_misses", []byte(cacheBypassMisses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hit_ratio", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hit_ratio", []byte(cacheHitRatio), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hits", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hits", []byte(cacheHits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_miss_collisions", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_miss_collisions", []byte(cacheMissCollisions), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_misses", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_misses", []byte(cacheMisses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_readaheads", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_readaheads", []byte(cacheReadaheads), 0644) require.NoError(t, err) diff --git a/plugins/inputs/beat/beat_test.go b/plugins/inputs/beat/beat_test.go index 8f2c5c9c2fbee..433e8fcd61337 100644 --- a/plugins/inputs/beat/beat_test.go +++ b/plugins/inputs/beat/beat_test.go @@ -2,11 +2,11 @@ package beat import ( "fmt" - "io/ioutil" "net" "net/http" "net/http/httptest" "net/url" + "os" "testing" "github.com/influxdata/telegraf/testutil" @@ -31,7 +31,7 @@ func Test_BeatStats(t *testing.T) { require.FailNow(t, "cannot handle request") } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) require.NoError(t, err, "could not write data") @@ -175,7 +175,7 @@ func Test_BeatRequest(t *testing.T) { require.FailNow(t, "cannot handle request") } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) require.Equal(t, request.Host, "beat.test.local") require.Equal(t, request.Method, "POST") diff --git a/plugins/inputs/bond/bond.go b/plugins/inputs/bond/bond.go index dc9b083ec5af9..4f30a20e3f677 100644 --- a/plugins/inputs/bond/bond.go +++ b/plugins/inputs/bond/bond.go @@ -3,7 +3,6 @@ package bond import ( "bufio" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -53,7 +52,7 @@ func (bond *Bond) Gather(acc telegraf.Accumulator) error { } for _, bondName := range bondNames { bondAbsPath := bond.HostProc + "/net/bonding/" + bondName - file, err := ioutil.ReadFile(bondAbsPath) + file, err := os.ReadFile(bondAbsPath) if err != nil { acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondAbsPath, err)) continue diff --git a/plugins/inputs/burrow/burrow_test.go b/plugins/inputs/burrow/burrow_test.go index d9df7be31d27e..db58df6fc94e8 100644 --- a/plugins/inputs/burrow/burrow_test.go +++ b/plugins/inputs/burrow/burrow_test.go @@ -2,7 +2,6 @@ package burrow import ( "fmt" - "io/ioutil" "net/http" "net/http/httptest" "os" @@ -28,7 +27,7 @@ func getResponseJSON(requestURI string) ([]byte, int) { } // respond with file - b, _ := ioutil.ReadFile(jsonFile) + b, _ := os.ReadFile(jsonFile) return b, code } diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index 4a52ef2979b7d..d1c23caadc68a 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -4,7 +4,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -217,7 +217,7 @@ func (c *Cassandra) getAttr(requestURL *url.URL) (map[string]interface{}, error) } // read body - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/cassandra/cassandra_test.go b/plugins/inputs/cassandra/cassandra_test.go index 325c267d9274b..f167f50e7187f 100644 --- a/plugins/inputs/cassandra/cassandra_test.go +++ b/plugins/inputs/cassandra/cassandra_test.go @@ -2,7 +2,7 @@ package cassandra import ( _ "fmt" - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -109,7 +109,7 @@ type jolokiaClientStub struct { func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) { resp := http.Response{} resp.StatusCode = c.statusCode - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index 7baa28213ac7f..efd61d56322a7 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "os" "os/exec" "path/filepath" "strings" @@ -206,7 +206,7 @@ var perfDump = func(binary string, socket *socket) (string, error) { } var findSockets = func(c *Ceph) ([]*socket, error) { - listing, err := ioutil.ReadDir(c.SocketDir) + listing, err := os.ReadDir(c.SocketDir) if err != nil { return []*socket{}, fmt.Errorf("Failed to read socket directory '%s': %v", c.SocketDir, err) } diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index a61838bc6a4e0..7915d6dd695f4 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -2,7 +2,6 @@ package ceph import ( "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -113,7 +112,7 @@ func TestGather(t *testing.T) { } func TestFindSockets(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "socktest") + tmpdir, err := os.MkdirTemp("", "socktest") require.NoError(t, err) defer func() { err := os.Remove(tmpdir) @@ -189,7 +188,7 @@ func createTestFiles(dir string, st *SockTest) error { writeFile := func(prefix string, i int) error { f := sockFile(prefix, i) fpath := filepath.Join(dir, f) - return ioutil.WriteFile(fpath, []byte(""), 0777) + return os.WriteFile(fpath, []byte(""), 0777) } return tstFileApply(st, writeFile) } diff --git a/plugins/inputs/cgroup/cgroup_linux.go b/plugins/inputs/cgroup/cgroup_linux.go index d1eda6e7a3b07..b892f528c234f 100644 --- a/plugins/inputs/cgroup/cgroup_linux.go +++ b/plugins/inputs/cgroup/cgroup_linux.go @@ -5,7 +5,6 @@ package cgroup import ( "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -46,7 +45,7 @@ func (g *CGroup) gatherDir(acc telegraf.Accumulator, dir string) error { return file.err } - raw, err := ioutil.ReadFile(file.path) + raw, err := os.ReadFile(file.path) if err != nil { return err } diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go index e7c5991676211..bdd4cf4730fbc 100644 --- a/plugins/inputs/clickhouse/clickhouse.go +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -590,7 +589,7 @@ func (ch *ClickHouse) execQuery(address *url.URL, query string, i interface{}) e } defer func() { _ = resp.Body.Close() }() if resp.StatusCode >= 300 { - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return &clickhouseError{ StatusCode: resp.StatusCode, body: body, @@ -606,7 +605,7 @@ func (ch *ClickHouse) execQuery(address *url.URL, query string, i interface{}) e return err } - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + if _, err := io.Copy(io.Discard, resp.Body); err != nil { return err } return nil diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push.go b/plugins/inputs/cloud_pubsub_push/pubsub_push.go index ef43a3d5eb161..48329e1cd362e 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push.go @@ -5,7 +5,7 @@ import ( "crypto/subtle" "encoding/base64" "encoding/json" - "io/ioutil" + "io" "net/http" "sync" "time" @@ -222,7 +222,7 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { } body := http.MaxBytesReader(res, req.Body, int64(p.MaxBodySize)) - bytes, err := ioutil.ReadAll(body) + bytes, err := io.ReadAll(body) if err != nil { res.WriteHeader(http.StatusRequestEntityTooLarge) return diff --git a/plugins/inputs/conntrack/conntrack.go b/plugins/inputs/conntrack/conntrack.go index f1b04fb0d965a..d644f7c188fc5 100644 --- a/plugins/inputs/conntrack/conntrack.go +++ b/plugins/inputs/conntrack/conntrack.go @@ -5,14 +5,14 @@ package conntrack import ( "fmt" - "io/ioutil" "os" "strconv" "strings" + "path/filepath" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "path/filepath" ) type Conntrack struct { @@ -91,7 +91,7 @@ func (c *Conntrack) Gather(acc telegraf.Accumulator) error { continue } - contents, err := ioutil.ReadFile(fName) + contents, err := os.ReadFile(fName) if err != nil { acc.AddError(fmt.Errorf("E! failed to read file '%s': %v", fName, err)) continue diff --git a/plugins/inputs/conntrack/conntrack_test.go b/plugins/inputs/conntrack/conntrack_test.go index 50f56d831791e..cb33caec2e330 100644 --- a/plugins/inputs/conntrack/conntrack_test.go +++ b/plugins/inputs/conntrack/conntrack_test.go @@ -4,7 +4,6 @@ package conntrack import ( - "io/ioutil" "os" "path" "strconv" @@ -35,11 +34,11 @@ func TestNoFilesFound(t *testing.T) { func TestDefaultsUsed(t *testing.T) { defer restoreDflts(dfltFiles, dfltDirs) - tmpdir, err := ioutil.TempDir("", "tmp1") + tmpdir, err := os.MkdirTemp("", "tmp1") require.NoError(t, err) defer os.Remove(tmpdir) - tmpFile, err := ioutil.TempFile(tmpdir, "ip_conntrack_count") + tmpFile, err := os.CreateTemp(tmpdir, "ip_conntrack_count") require.NoError(t, err) defer os.Remove(tmpFile.Name()) @@ -48,7 +47,7 @@ func TestDefaultsUsed(t *testing.T) { dfltFiles = []string{fname} count := 1234321 - require.NoError(t, ioutil.WriteFile(tmpFile.Name(), []byte(strconv.Itoa(count)), 0660)) + require.NoError(t, os.WriteFile(tmpFile.Name(), []byte(strconv.Itoa(count)), 0660)) c := &Conntrack{} acc := &testutil.Accumulator{} @@ -59,13 +58,13 @@ func TestDefaultsUsed(t *testing.T) { func TestConfigsUsed(t *testing.T) { defer restoreDflts(dfltFiles, dfltDirs) - tmpdir, err := ioutil.TempDir("", "tmp1") + tmpdir, err := os.MkdirTemp("", "tmp1") require.NoError(t, err) defer os.Remove(tmpdir) - cntFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_count") + cntFile, err := os.CreateTemp(tmpdir, "nf_conntrack_count") require.NoError(t, err) - maxFile, err := ioutil.TempFile(tmpdir, "nf_conntrack_max") + maxFile, err := os.CreateTemp(tmpdir, "nf_conntrack_max") require.NoError(t, err) defer os.Remove(cntFile.Name()) defer os.Remove(maxFile.Name()) @@ -77,8 +76,8 @@ func TestConfigsUsed(t *testing.T) { count := 1234321 max := 9999999 - require.NoError(t, ioutil.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0660)) - require.NoError(t, ioutil.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0660)) + require.NoError(t, os.WriteFile(cntFile.Name(), []byte(strconv.Itoa(count)), 0660)) + require.NoError(t, os.WriteFile(maxFile.Name(), []byte(strconv.Itoa(max)), 0660)) c := &Conntrack{} acc := &testutil.Accumulator{} diff --git a/plugins/inputs/dcos/creds.go b/plugins/inputs/dcos/creds.go index 2fd5f078e46e5..328ce394a4cf6 100644 --- a/plugins/inputs/dcos/creds.go +++ b/plugins/inputs/dcos/creds.go @@ -4,7 +4,7 @@ import ( "context" "crypto/rsa" "fmt" - "io/ioutil" + "os" "strings" "time" "unicode/utf8" @@ -48,7 +48,7 @@ func (c *ServiceAccount) IsExpired() bool { } func (c *TokenCreds) Token(_ context.Context, _ Client) (string, error) { - octets, err := ioutil.ReadFile(c.Path) + octets, err := os.ReadFile(c.Path) if err != nil { return "", fmt.Errorf("error reading token file %q: %s", c.Path, err) } diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index 35822f30b074f..dd8f22f7292f5 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -2,8 +2,8 @@ package dcos import ( "context" - "io/ioutil" "net/url" + "os" "sort" "strings" "sync" @@ -370,7 +370,7 @@ func (d *DCOS) createClient() (Client, error) { func (d *DCOS) createCredentials() (Credentials, error) { if d.ServiceAccountID != "" && d.ServiceAccountPrivateKey != "" { - bs, err := ioutil.ReadFile(d.ServiceAccountPrivateKey) + bs, err := os.ReadFile(d.ServiceAccountPrivateKey) if err != nil { return nil, err } diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go index 45acd1c062ba9..a58c039422757 100644 --- a/plugins/inputs/directory_monitor/directory_monitor.go +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "regexp" @@ -108,7 +107,7 @@ func (monitor *DirectoryMonitor) Description() string { func (monitor *DirectoryMonitor) Gather(_ telegraf.Accumulator) error { // Get all files sitting in the directory. - files, err := ioutil.ReadDir(monitor.Directory) + files, err := os.ReadDir(monitor.Directory) if err != nil { return fmt.Errorf("unable to monitor the targeted directory: %w", err) } @@ -183,7 +182,7 @@ func (monitor *DirectoryMonitor) Monitor() { } } -func (monitor *DirectoryMonitor) processFile(file os.FileInfo) { +func (monitor *DirectoryMonitor) processFile(file os.DirEntry) { if file.IsDir() { return } diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go index 2ad504637c6c2..7cda5f2d7b639 100644 --- a/plugins/inputs/directory_monitor/directory_monitor_test.go +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -3,7 +3,6 @@ package directory_monitor import ( "bytes" "compress/gzip" - "io/ioutil" "os" "path/filepath" "testing" @@ -20,9 +19,9 @@ func TestCSVGZImport(t *testing.T) { testCsvGzFile := "test.csv.gz" // Establish process directory and finished directory. - finishedDirectory, err := ioutil.TempDir("", "finished") + finishedDirectory, err := os.MkdirTemp("", "finished") require.NoError(t, err) - processDirectory, err := ioutil.TempDir("", "test") + processDirectory, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(processDirectory) defer os.RemoveAll(finishedDirectory) @@ -62,7 +61,7 @@ func TestCSVGZImport(t *testing.T) { require.NoError(t, err) err = w.Close() require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(processDirectory, testCsvGzFile), b.Bytes(), 0666) + err = os.WriteFile(filepath.Join(processDirectory, testCsvGzFile), b.Bytes(), 0666) require.NoError(t, err) // Start plugin before adding file. @@ -89,9 +88,9 @@ func TestMultipleJSONFileImports(t *testing.T) { testJSONFile := "test.json" // Establish process directory and finished directory. - finishedDirectory, err := ioutil.TempDir("", "finished") + finishedDirectory, err := os.MkdirTemp("", "finished") require.NoError(t, err) - processDirectory, err := ioutil.TempDir("", "test") + processDirectory, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(processDirectory) defer os.RemoveAll(finishedDirectory) diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index 1a97aabf40db5..8a76e230cbb98 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -4,7 +4,6 @@ package diskio import ( - "io/ioutil" "os" "testing" @@ -20,7 +19,7 @@ S:foo/bar/devlink1 // setupNullDisk sets up fake udev info as if /dev/null were a disk. func setupNullDisk(t *testing.T, s *DiskIO, devName string) func() { - td, err := ioutil.TempFile("", ".telegraf.DiskInfoTest") + td, err := os.CreateTemp("", ".telegraf.DiskInfoTest") require.NoError(t, err) if s.infoCache == nil { diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 599adae409e99..a84a6047b30aa 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -3,7 +3,7 @@ package docker import ( "context" "crypto/tls" - "io/ioutil" + "io" "reflect" "sort" "strings" @@ -1060,7 +1060,7 @@ func TestContainerName(t *testing.T) { } client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { return types.ContainerStats{ - Body: ioutil.NopCloser(strings.NewReader(`{"name": "logspout"}`)), + Body: io.NopCloser(strings.NewReader(`{"name": "logspout"}`)), }, nil } return &client, nil @@ -1080,7 +1080,7 @@ func TestContainerName(t *testing.T) { } client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { return types.ContainerStats{ - Body: ioutil.NopCloser(strings.NewReader(`{}`)), + Body: io.NopCloser(strings.NewReader(`{}`)), }, nil } return &client, nil diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go index bde0bd312c788..826f34f6703d4 100644 --- a/plugins/inputs/docker/docker_testdata.go +++ b/plugins/inputs/docker/docker_testdata.go @@ -2,7 +2,7 @@ package docker import ( "fmt" - "io/ioutil" + "io" "strings" "time" @@ -344,7 +344,7 @@ func containerStats(s string) types.ContainerStats { }, "read": "2016-02-24T11:42:27.472459608-05:00" }`, name) - stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat)) + stat.Body = io.NopCloser(strings.NewReader(jsonStat)) return stat } @@ -488,7 +488,7 @@ func containerStatsWindows() types.ContainerStats { }, "name":"/gt_test_iis", }` - stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat)) + stat.Body = io.NopCloser(strings.NewReader(jsonStat)) return stat } diff --git a/plugins/inputs/ecs/client.go b/plugins/inputs/ecs/client.go index ac7ed2e1b09ef..b5521c5ea3f3a 100644 --- a/plugins/inputs/ecs/client.go +++ b/plugins/inputs/ecs/client.go @@ -3,7 +3,6 @@ package ecs import ( "fmt" "io" - "io/ioutil" "net/http" "net/url" "time" @@ -113,7 +112,7 @@ func (c *EcsClient) Task() (*Task, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.taskURL, resp.Status, body) } @@ -137,7 +136,7 @@ func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.statsURL, resp.Status, body) } diff --git a/plugins/inputs/ecs/client_test.go b/plugins/inputs/ecs/client_test.go index 2f37ca0cfa456..7e9d7e393346f 100644 --- a/plugins/inputs/ecs/client_test.go +++ b/plugins/inputs/ecs/client_test.go @@ -3,7 +3,7 @@ package ecs import ( "bytes" "errors" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -108,7 +108,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(rc), + Body: io.NopCloser(rc), }, nil }, }, @@ -129,7 +129,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusInternalServerError, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -141,7 +141,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -179,7 +179,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(rc), + Body: io.NopCloser(rc), }, nil }, }, @@ -201,7 +201,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -214,7 +214,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusInternalServerError, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 0bd4ce677cd9e..24142ba38c32e 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -3,7 +3,7 @@ package elasticsearch import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "regexp" "sort" @@ -702,7 +702,7 @@ func (e *Elasticsearch) getCatMaster(url string) (string, error) { // future calls. return "", fmt.Errorf("elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK) } - response, err := ioutil.ReadAll(r.Body) + response, err := io.ReadAll(r.Body) if err != nil { return "", err diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 8248d063b6883..1ed61e731ce1f 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -1,7 +1,7 @@ package elasticsearch import ( - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -44,7 +44,7 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { StatusCode: t.statusCode, } res.Header.Set("Content-Type", "application/json") - res.Body = ioutil.NopCloser(strings.NewReader(t.body)) + res.Body = io.NopCloser(strings.NewReader(t.body)) return res, nil } diff --git a/plugins/inputs/execd/shim/goshim.go b/plugins/inputs/execd/shim/goshim.go index 075d2cf55ab62..cfb54e3ae0708 100644 --- a/plugins/inputs/execd/shim/goshim.go +++ b/plugins/inputs/execd/shim/goshim.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "os/signal" "strings" @@ -274,7 +273,7 @@ func LoadConfig(filePath *string) ([]telegraf.Input, error) { return DefaultImportedPlugins() } - b, err := ioutil.ReadFile(*filePath) + b, err := os.ReadFile(*filePath) if err != nil { return nil, err } diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index e431bc6df9f15..22af282dbde0a 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -2,7 +2,7 @@ package file import ( "fmt" - "io/ioutil" + "io" "os" "path/filepath" @@ -115,7 +115,7 @@ func (f *File) readMetric(filename string) ([]telegraf.Metric, error) { defer file.Close() r, _ := utfbom.Skip(f.decoder.Reader(file)) - fileContents, err := ioutil.ReadAll(r) + fileContents, err := io.ReadAll(r) if err != nil { return nil, fmt.Errorf("E! Error file: %v could not be read, %s", filename, err) } diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go index 03f46c67ce515..9ebd1682a56b7 100644 --- a/plugins/inputs/fluentd/fluentd.go +++ b/plugins/inputs/fluentd/fluentd.go @@ -3,7 +3,7 @@ package fluentd import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -104,7 +104,7 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("unable to read the HTTP body \"%s\": %v", string(body), err) diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index 9b73991eb8227..d522f5a49dfea 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -5,7 +5,7 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -264,7 +264,7 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { defer resp.Body.Close() responseTime := time.Since(start).Seconds() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return string(body), responseTime, err } diff --git a/plugins/inputs/graylog/graylog_test.go b/plugins/inputs/graylog/graylog_test.go index f8008f1d94c66..5739969e3df01 100644 --- a/plugins/inputs/graylog/graylog_test.go +++ b/plugins/inputs/graylog/graylog_test.go @@ -1,7 +1,7 @@ package graylog import ( - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -115,7 +115,7 @@ func (c *mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) resp.StatusCode = 405 // Method not allowed } - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index c61465a54c36f..d7a6ac1213b6f 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -4,8 +4,8 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" + "os" "strings" "sync" @@ -180,7 +180,7 @@ func (h *HTTP) gatherURL( } if h.BearerToken != "" { - token, err := ioutil.ReadFile(h.BearerToken) + token, err := os.ReadFile(h.BearerToken) if err != nil { return err } @@ -225,7 +225,7 @@ func (h *HTTP) gatherURL( h.SuccessStatusCodes) } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return err } @@ -254,7 +254,7 @@ func makeRequestBodyReader(contentEncoding, body string) (io.ReadCloser, error) } return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func init() { diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index 02351effc71b9..da9fed2251514 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -3,7 +3,7 @@ package http_test import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -183,7 +183,7 @@ func TestBodyAndContentEncoding(t *testing.T) { URLs: []string{url}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte(""), body) w.WriteHeader(http.StatusOK) @@ -197,7 +197,7 @@ func TestBodyAndContentEncoding(t *testing.T) { Body: "test", }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) @@ -211,7 +211,7 @@ func TestBodyAndContentEncoding(t *testing.T) { Body: "test", }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) @@ -230,7 +230,7 @@ func TestBodyAndContentEncoding(t *testing.T) { gr, err := gzip.NewReader(r.Body) require.NoError(t, err) - body, err := ioutil.ReadAll(gr) + body, err := io.ReadAll(gr) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 5b511de57fb54..d2a2e5f35214e 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -4,7 +4,7 @@ import ( "compress/gzip" "crypto/subtle" "crypto/tls" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -292,7 +292,7 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) } defer r.Close() maxReader := http.MaxBytesReader(res, r, int64(h.MaxBodySize)) - bytes, err := ioutil.ReadAll(maxReader) + bytes, err := io.ReadAll(maxReader) if err != nil { if err := tooLarge(res); err != nil { h.Log.Debugf("error in too-large: %v", err) @@ -302,7 +302,7 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) return bytes, true case "snappy": defer req.Body.Close() - bytes, err := ioutil.ReadAll(req.Body) + bytes, err := io.ReadAll(req.Body) if err != nil { h.Log.Debug(err.Error()) if err := badRequest(res); err != nil { @@ -322,7 +322,7 @@ func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) return bytes, true default: defer req.Body.Close() - bytes, err := ioutil.ReadAll(req.Body) + bytes, err := io.ReadAll(req.Body) if err != nil { h.Log.Debug(err.Error()) if err := badRequest(res); err != nil { diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index 5daaf2785ffe3..da70f443998e1 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -4,9 +4,9 @@ import ( "bytes" "crypto/tls" "crypto/x509" - "io/ioutil" "net/http" "net/url" + "os" "runtime" "strconv" "sync" @@ -361,7 +361,7 @@ func TestWriteHTTPGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index d8a4e0e1438cd..799f664d1e7b0 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -4,10 +4,10 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" + "os" "regexp" "strconv" "strings" @@ -277,7 +277,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] } if h.BearerToken != "" { - token, err := ioutil.ReadFile(h.BearerToken) + token, err := os.ReadFile(h.BearerToken) if err != nil { return nil, nil, err } @@ -339,7 +339,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] if h.ResponseBodyMaxSize == 0 { h.ResponseBodyMaxSize = config.Size(defaultResponseBodyMaxSize) } - bodyBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, int64(h.ResponseBodyMaxSize)+1)) + bodyBytes, err := io.ReadAll(io.LimitReader(resp.Body, int64(h.ResponseBodyMaxSize)+1)) // Check first if the response body size exceeds the limit. if err == nil && int64(len(bodyBytes)) > int64(h.ResponseBodyMaxSize) { h.setBodyReadError("The body of the HTTP Response is too large", bodyBytes, fields, tags) diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 40917bba1bc39..5d109d0a35439 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -8,7 +8,7 @@ package http_response import ( "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/http/httptest" @@ -123,7 +123,7 @@ func setUpTestMux() http.Handler { fmt.Fprintf(w, "used post correctly!") }) mux.HandleFunc("/musthaveabody", func(w http.ResponseWriter, req *http.Request) { - body, err := ioutil.ReadAll(req.Body) + body, err := io.ReadAll(req.Body) //nolint:errcheck,revive req.Body.Close() if err != nil { diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index f5d97b90989c0..10a4cb0c17643 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -3,7 +3,7 @@ package httpjson import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -263,7 +263,7 @@ func (h *HTTPJSON) sendRequest(serverURL string) (string, float64, error) { defer resp.Body.Close() responseTime := time.Since(start).Seconds() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return string(body), responseTime, err } diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 9f6292cba722d..b203238a94037 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -2,7 +2,7 @@ package httpjson import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -143,7 +143,7 @@ func (c *mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) resp.StatusCode = 405 // Method not allowed } - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } @@ -377,7 +377,7 @@ func TestHttpJsonPOST(t *testing.T) { "api_key": "mykey", } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) assert.NoError(t, err) assert.Equal(t, "api_key=mykey", string(body)) w.WriteHeader(http.StatusOK) diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go index 8a082a855a7f8..6b88907f95801 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -4,9 +4,9 @@ import ( "bytes" "crypto/tls" "crypto/x509" - "io/ioutil" "net/http" "net/url" + "os" "runtime" "strconv" "sync" @@ -406,7 +406,7 @@ func TestWriteGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go index 64907d12a52dc..4df2f7dc86a5e 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go @@ -6,7 +6,7 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "time" @@ -256,7 +256,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { var readErr error var bytes []byte //body = http.MaxBytesReader(res, req.Body, 1000000) //p.MaxBodySize.Size) - bytes, readErr = ioutil.ReadAll(body) + bytes, readErr = io.ReadAll(body) if readErr != nil { h.Log.Debugf("Error parsing the request body: %v", readErr.Error()) if err := badRequest(res, InternalError, readErr.Error()); err != nil { diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go index be99c93f51a8a..055dfc395ba7b 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go @@ -5,9 +5,10 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "io" "net/http" "net/url" + "os" "runtime" "strconv" "sync" @@ -363,7 +364,7 @@ func TestWriteGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), bytes.NewBuffer(data)) @@ -485,7 +486,7 @@ func TestReady(t *testing.T) { resp, err := http.Get(createURL(listener, "http", "/api/v2/ready", "")) require.NoError(t, err) require.Equal(t, "application/json", resp.Header["Content-Type"][0]) - bodyBytes, err := ioutil.ReadAll(resp.Body) + bodyBytes, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Contains(t, string(bodyBytes), "\"status\":\"ready\"") require.NoError(t, resp.Body.Close()) diff --git a/plugins/inputs/intel_powerstat/file.go b/plugins/inputs/intel_powerstat/file.go index a07dd57e16a57..c69dea89f4e26 100644 --- a/plugins/inputs/intel_powerstat/file.go +++ b/plugins/inputs/intel_powerstat/file.go @@ -8,7 +8,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "os" "path/filepath" "regexp" @@ -107,7 +106,7 @@ func (fs *fileServiceImpl) getStringsMatchingPatternOnPath(path string) ([]strin // readFile reads file on path and return string content. func (fs *fileServiceImpl) readFile(path string) ([]byte, error) { - out, err := ioutil.ReadFile(path) + out, err := os.ReadFile(path) if err != nil { return make([]byte, 0), err } @@ -116,7 +115,7 @@ func (fs *fileServiceImpl) readFile(path string) ([]byte, error) { // readFileToFloat64 reads file on path and tries to parse content to float64. func (fs *fileServiceImpl) readFileToFloat64(reader io.Reader) (float64, int64, error) { - read, err := ioutil.ReadAll(reader) + read, err := io.ReadAll(reader) if err != nil { return 0, 0, err } diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 9e4cac511683b..af5e3de283800 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -153,7 +153,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) } // read body - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index 5c1bc50aa2ae7..e91e9a1087fda 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -2,7 +2,7 @@ package jolokia import ( _ "fmt" - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -116,7 +116,7 @@ type jolokiaClientStub struct { func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) { resp := http.Response{} resp.StatusCode = c.statusCode - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } diff --git a/plugins/inputs/jolokia2/client.go b/plugins/inputs/jolokia2/client.go index 41ebd4f8af872..789450e3a1016 100644 --- a/plugins/inputs/jolokia2/client.go +++ b/plugins/inputs/jolokia2/client.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -149,7 +149,7 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { c.URL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) } - responseBody, err := ioutil.ReadAll(resp.Body) + responseBody, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/jolokia2/client_test.go b/plugins/inputs/jolokia2/client_test.go index 7ec65d27a0ebf..a1bd5f4a2e141 100644 --- a/plugins/inputs/jolokia2/client_test.go +++ b/plugins/inputs/jolokia2/client_test.go @@ -3,7 +3,7 @@ package jolokia2 import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "testing" @@ -20,7 +20,7 @@ func TestJolokia2_ClientAuthRequest(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ = r.BasicAuth() - body, _ := ioutil.ReadAll(r.Body) + body, _ := io.ReadAll(r.Body) require.NoError(t, json.Unmarshal(body, &requests)) w.WriteHeader(http.StatusOK) @@ -56,7 +56,7 @@ func TestJolokia2_ClientProxyAuthRequest(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ = r.BasicAuth() - body, _ := ioutil.ReadAll(r.Body) + body, _ := io.ReadAll(r.Body) require.NoError(t, json.Unmarshal(body, &requests)) w.WriteHeader(http.StatusOK) _, err := fmt.Fprintf(w, "[]") diff --git a/plugins/inputs/kernel/kernel.go b/plugins/inputs/kernel/kernel.go index 22311e9a0f12d..c16c68bf44bd1 100644 --- a/plugins/inputs/kernel/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -6,7 +6,6 @@ package kernel import ( "bytes" "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -41,7 +40,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { return err } - entropyData, err := ioutil.ReadFile(k.entropyStatFile) + entropyData, err := os.ReadFile(k.entropyStatFile) if err != nil { return err } @@ -109,7 +108,7 @@ func (k *Kernel) getProcStat() ([]byte, error) { return nil, err } - data, err := ioutil.ReadFile(k.statFile) + data, err := os.ReadFile(k.statFile) if err != nil { return nil, err } diff --git a/plugins/inputs/kernel/kernel_test.go b/plugins/inputs/kernel/kernel_test.go index 462624c2eb40d..f174017fad7b9 100644 --- a/plugins/inputs/kernel/kernel_test.go +++ b/plugins/inputs/kernel/kernel_test.go @@ -4,7 +4,6 @@ package kernel import ( - "io/ioutil" "os" "testing" @@ -169,7 +168,7 @@ const entropyStatFilePartial = `1024` const entropyStatFileInvalid = `` func makeFakeStatFile(t *testing.T, content []byte) string { - tmpfile, err := ioutil.TempFile("", "kernel_test") + tmpfile, err := os.CreateTemp("", "kernel_test") require.NoError(t, err) _, err = tmpfile.Write(content) diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat.go b/plugins/inputs/kernel_vmstat/kernel_vmstat.go index 2019e0cbfddb3..95a7a5e32f1e0 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat.go @@ -6,7 +6,6 @@ package kernel_vmstat import ( "bytes" "fmt" - "io/ioutil" "os" "strconv" @@ -61,7 +60,7 @@ func (k *KernelVmstat) getProcVmstat() ([]byte, error) { return nil, err } - data, err := ioutil.ReadFile(k.statFile) + data, err := os.ReadFile(k.statFile) if err != nil { return nil, err } diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go index 6bbb9d7b5b12f..6590e3febd19c 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go @@ -4,7 +4,6 @@ package kernel_vmstat import ( - "io/ioutil" "os" "testing" @@ -300,7 +299,7 @@ thp_collapse_alloc_failed 102214 thp_split abcd` func makeFakeVMStatFile(t *testing.T, content []byte) string { - tmpfile, err := ioutil.TempFile("", "kernel_vmstat_test") + tmpfile, err := os.CreateTemp("", "kernel_vmstat_test") require.NoError(t, err) _, err = tmpfile.Write(content) diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index c94438eb38d4d..55ffa1df845f9 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "strconv" "strings" @@ -253,7 +252,7 @@ func (k *Kibana) gatherJSONData(url string, v interface{}) (host string, err err if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) return request.Host, fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) } diff --git a/plugins/inputs/kibana/kibana_test.go b/plugins/inputs/kibana/kibana_test.go index 3dfed9edfa9a2..565d9b1c79416 100644 --- a/plugins/inputs/kibana/kibana_test.go +++ b/plugins/inputs/kibana/kibana_test.go @@ -1,7 +1,7 @@ package kibana import ( - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -46,7 +46,7 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { StatusCode: t.statusCode, } res.Header.Set("Content-Type", "application/json") - res.Body = ioutil.NopCloser(strings.NewReader(t.body)) + res.Body = io.NopCloser(strings.NewReader(t.body)) return res, nil } diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 7acd3202c012b..005ccdc43aab2 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -6,7 +6,7 @@ import ( "compress/zlib" "context" "fmt" - "io/ioutil" + "io" "math/big" "strings" "sync" @@ -349,7 +349,7 @@ func processGzip(data []byte) ([]byte, error) { return nil, err } defer zipData.Close() - return ioutil.ReadAll(zipData) + return io.ReadAll(zipData) } func processZlib(data []byte) ([]byte, error) { @@ -358,7 +358,7 @@ func processZlib(data []byte) ([]byte, error) { return nil, err } defer zlibData.Close() - return ioutil.ReadAll(zlibData) + return io.ReadAll(zlibData) } func processNoOp(data []byte) ([]byte, error) { diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index bcfae4ce8f52f..24db993dd39bb 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -3,8 +3,8 @@ package kube_inventory import ( "context" "fmt" - "io/ioutil" "log" + "os" "strconv" "strings" "sync" @@ -101,7 +101,7 @@ func (ki *KubernetesInventory) Init() error { } if ki.BearerToken != "" { - token, err := ioutil.ReadFile(ki.BearerToken) + token, err := os.ReadFile(ki.BearerToken) if err != nil { return err } diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index ab1cf4bfe4afc..8ca636d480cc2 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -3,8 +3,8 @@ package kubernetes import ( "encoding/json" "fmt" - "io/ioutil" "net/http" + "os" "strings" "time" @@ -93,7 +93,7 @@ func (k *Kubernetes) Init() error { } if k.BearerToken != "" { - token, err := ioutil.ReadFile(k.BearerToken) + token, err := os.ReadFile(k.BearerToken) if err != nil { return err } diff --git a/plugins/inputs/leofs/leofs_test.go b/plugins/inputs/leofs/leofs_test.go index 513d2f5ed7de7..1e33ddc4c3d38 100644 --- a/plugins/inputs/leofs/leofs_test.go +++ b/plugins/inputs/leofs/leofs_test.go @@ -1,7 +1,6 @@ package leofs import ( - "io/ioutil" "os" "os/exec" "runtime" @@ -132,7 +131,7 @@ func testMain(t *testing.T, code string, endpoint string, serverType ServerType) // Build the fake snmpwalk for test src := os.TempDir() + "/test.go" - require.NoError(t, ioutil.WriteFile(src, []byte(code), 0600)) + require.NoError(t, os.WriteFile(src, []byte(code), 0600)) defer os.Remove(src) require.NoError(t, exec.Command("go", "build", "-o", executable, src).Run()) diff --git a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go index 55cb22292105a..19848b6db0e37 100644 --- a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go @@ -3,7 +3,6 @@ package linux_sysctl_fs import ( "bytes" "errors" - "io/ioutil" "os" "strconv" @@ -29,7 +28,7 @@ func (sfs SysctlFS) SampleConfig() string { } func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fieldNames ...string) error { - bs, err := ioutil.ReadFile(sfs.path + "/" + file) + bs, err := os.ReadFile(sfs.path + "/" + file) if err != nil { // Ignore non-existing entries if errors.Is(err, os.ErrNotExist) { @@ -58,7 +57,7 @@ func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fiel } func (sfs *SysctlFS) gatherOne(name string, fields map[string]interface{}) error { - bs, err := ioutil.ReadFile(sfs.path + "/" + name) + bs, err := os.ReadFile(sfs.path + "/" + name) if err != nil { // Ignore non-existing entries if errors.Is(err, os.ErrNotExist) { diff --git a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go index 78011e288b962..8b76b266b1c9e 100644 --- a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go @@ -1,7 +1,6 @@ package linux_sysctl_fs import ( - "io/ioutil" "os" "testing" @@ -10,16 +9,16 @@ import ( ) func TestSysctlFSGather(t *testing.T) { - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) - require.NoError(t, ioutil.WriteFile(td+"/aio-nr", []byte("100\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/aio-max-nr", []byte("101\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/super-nr", []byte("102\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/super-max", []byte("103\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/file-nr", []byte("104\t0\t106\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/inode-state", []byte("107\t108\t109\t0\t0\t0\t0\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/aio-nr", []byte("100\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/aio-max-nr", []byte("101\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/super-nr", []byte("102\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/super-max", []byte("103\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/file-nr", []byte("104\t0\t106\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/inode-state", []byte("107\t108\t109\t0\t0\t0\t0\n"), 0644)) sfs := &SysctlFS{ path: td, diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 9bf4f125ae4f6..3100c615cd4e4 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -1,7 +1,6 @@ package logparser import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -111,7 +110,7 @@ func TestGrokParseLogFiles(t *testing.T) { } func TestGrokParseLogFilesAppearLater(t *testing.T) { - emptydir, err := ioutil.TempDir("", "TestGrokParseLogFilesAppearLater") + emptydir, err := os.MkdirTemp("", "TestGrokParseLogFilesAppearLater") defer os.RemoveAll(emptydir) assert.NoError(t, err) @@ -131,10 +130,10 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { assert.Equal(t, acc.NFields(), 0) - input, err := ioutil.ReadFile(filepath.Join(testdataDir, "test_a.log")) + input, err := os.ReadFile(filepath.Join(testdataDir, "test_a.log")) assert.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(emptydir, "test_a.log"), input, 0644) + err = os.WriteFile(filepath.Join(emptydir, "test_a.log"), input, 0644) assert.NoError(t, err) assert.NoError(t, acc.GatherError(logparser.Gather)) diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index 10a3e7b6b8dd0..6fcaadabcd244 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -206,7 +205,7 @@ func (logstash *Logstash) gatherJSONData(url string, value interface{}) error { defer response.Body.Close() if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) } diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 00aa288b316a8..abd5ce87c6bbb 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -8,7 +8,7 @@ package lustre2 import ( - "io/ioutil" + "os" "path/filepath" "regexp" "strconv" @@ -374,7 +374,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping) e name := path[len(path)-2] //lines, err := internal.ReadLines(file) - wholeFile, err := ioutil.ReadFile(file) + wholeFile, err := os.ReadFile(file) if err != nil { return err } diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 52c7e87f08fc6..7fd3fd91f469e 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -4,7 +4,6 @@ package lustre2 import ( - "io/ioutil" "os" "testing" @@ -149,13 +148,13 @@ func TestLustre2GeneratesMetrics(t *testing.T) { err = os.MkdirAll(obddir+"/"+ostName, 0755) require.NoError(t, err) - err = ioutil.WriteFile(mdtdir+"/"+ostName+"/md_stats", []byte(mdtProcContents), 0644) + err = os.WriteFile(mdtdir+"/"+ostName+"/md_stats", []byte(mdtProcContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(osddir+"/"+ostName+"/stats", []byte(osdldiskfsProcContents), 0644) + err = os.WriteFile(osddir+"/"+ostName+"/stats", []byte(osdldiskfsProcContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(obddir+"/"+ostName+"/stats", []byte(obdfilterProcContents), 0644) + err = os.WriteFile(obddir+"/"+ostName+"/stats", []byte(obdfilterProcContents), 0644) require.NoError(t, err) // Begin by testing standard Lustre stats @@ -218,10 +217,10 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { err = os.MkdirAll(obddir+"/"+ostName, 0755) require.NoError(t, err) - err = ioutil.WriteFile(mdtdir+"/"+ostName+"/job_stats", []byte(mdtJobStatsContents), 0644) + err = os.WriteFile(mdtdir+"/"+ostName+"/job_stats", []byte(mdtJobStatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(obddir+"/"+ostName+"/job_stats", []byte(obdfilterJobStatsContents), 0644) + err = os.WriteFile(obddir+"/"+ostName+"/job_stats", []byte(obdfilterJobStatsContents), 0644) require.NoError(t, err) // Test Lustre Jobstats diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index 259e64a0e3104..2f6cecdb9e0da 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "net/http" "net/url" @@ -148,11 +147,11 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, fmt.Errorf("%s returned HTTP status %s: %q", api.url.String(), resp.Status, body) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/mdstat/mdstat.go b/plugins/inputs/mdstat/mdstat.go index 81e3f36e7c767..3f6fee7d086ca 100644 --- a/plugins/inputs/mdstat/mdstat.go +++ b/plugins/inputs/mdstat/mdstat.go @@ -20,7 +20,6 @@ package mdstat import ( "fmt" - "io/ioutil" "os" "regexp" "sort" @@ -291,7 +290,7 @@ func (k *MdstatConf) getProcMdstat() ([]byte, error) { return nil, err } - data, err := ioutil.ReadFile(mdStatFile) + data, err := os.ReadFile(mdStatFile) if err != nil { return nil, err } diff --git a/plugins/inputs/mdstat/mdstat_test.go b/plugins/inputs/mdstat/mdstat_test.go index fe6041abec353..070b7ddd234f5 100644 --- a/plugins/inputs/mdstat/mdstat_test.go +++ b/plugins/inputs/mdstat/mdstat_test.go @@ -4,7 +4,6 @@ package mdstat import ( - "io/ioutil" "os" "testing" @@ -134,7 +133,7 @@ unused devices: ` func makeFakeMDStatFile(content []byte) (filename string) { - fileobj, err := ioutil.TempFile("", "mdstat") + fileobj, err := os.CreateTemp("", "mdstat") if err != nil { panic(err) } diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index acb79ce5724e5..68203c9d480cb 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -3,7 +3,7 @@ package mesos import ( "encoding/json" "errors" - "io/ioutil" + "io" "log" "net" "net/http" @@ -558,7 +558,7 @@ func (m *Mesos) gatherMainMetrics(u *url.URL, role Role, acc telegraf.Accumulato return err } - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) // Ignore the returned error to not shadow the initial one //nolint:errcheck,revive resp.Body.Close() diff --git a/plugins/inputs/multifile/multifile.go b/plugins/inputs/multifile/multifile.go index 838b1dd764d2f..65c2ac4e4b783 100644 --- a/plugins/inputs/multifile/multifile.go +++ b/plugins/inputs/multifile/multifile.go @@ -3,8 +3,8 @@ package multifile import ( "bytes" "fmt" - "io/ioutil" "math" + "os" "path" "strconv" "time" @@ -84,7 +84,7 @@ func (m *MultiFile) Gather(acc telegraf.Accumulator) error { tags := make(map[string]string) for _, file := range m.Files { - fileContents, err := ioutil.ReadFile(file.Name) + fileContents, err := os.ReadFile(file.Name) if err != nil { if m.FailEarly { diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index c2adab29b324d..7144355096b4e 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -5,7 +5,7 @@ package nats import ( "encoding/json" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -56,7 +56,7 @@ func (n *Nats) Gather(acc telegraf.Accumulator) error { } defer resp.Body.Close() - bytes, err := ioutil.ReadAll(resp.Body) + bytes, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index dad4c8e5857f6..c2bb05384d7c8 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -5,7 +5,7 @@ package neptuneapex import ( "encoding/xml" "fmt" - "io/ioutil" + "io" "math" "net/http" "strconv" @@ -276,7 +276,7 @@ func (n *NeptuneApex) sendRequest(server string) ([]byte, error) { url, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("unable to read output from %q: %v", url, err) } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go index 7e1e753c5ff76..5cd7e76aec439 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -4,7 +4,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -70,7 +70,7 @@ func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] switch contentType { case "application/json": - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index fb40643409056..42e0cab62d53e 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strconv" @@ -153,7 +152,7 @@ func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) e defer response.Body.Close() if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) } diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index 681c2f6e7f460..58f60192b96d0 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -25,7 +25,7 @@ package nsq import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strconv" @@ -131,7 +131,7 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status) } - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { return fmt.Errorf(`error reading body: %s`, err) } diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index 5bc2bc85a3136..4408b8f728579 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -2,7 +2,6 @@ package nstat import ( "bytes" - "io/ioutil" "os" "strconv" @@ -62,7 +61,7 @@ func (ns *Nstat) Gather(acc telegraf.Accumulator) error { // load paths, get from env if config values are empty ns.loadPaths() - netstat, err := ioutil.ReadFile(ns.ProcNetNetstat) + netstat, err := os.ReadFile(ns.ProcNetNetstat) if err != nil { return err } @@ -71,14 +70,14 @@ func (ns *Nstat) Gather(acc telegraf.Accumulator) error { ns.gatherNetstat(netstat, acc) // collect SNMP data - snmp, err := ioutil.ReadFile(ns.ProcNetSNMP) + snmp, err := os.ReadFile(ns.ProcNetSNMP) if err != nil { return err } ns.gatherSNMP(snmp, acc) // collect SNMP6 data, if SNMP6 directory exists (IPv6 enabled) - snmp6, err := ioutil.ReadFile(ns.ProcNetSNMP6) + snmp6, err := os.ReadFile(ns.ProcNetSNMP6) if err == nil { ns.gatherSNMP6(snmp6, acc) } else if !os.IsNotExist(err) { diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go index ea5887ae10a5d..3c0b14d6e4559 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -1,7 +1,7 @@ package nvidia_smi import ( - "io/ioutil" + "os" "path/filepath" "testing" "time" @@ -139,7 +139,7 @@ func TestGatherValidXML(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename)) + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) require.NoError(t, err) err = gatherNvidiaSMI(octets, &acc) diff --git a/plugins/inputs/opcua/opcua_util.go b/plugins/inputs/opcua/opcua_util.go index bb7ca56200954..e1304fa304fc6 100644 --- a/plugins/inputs/opcua/opcua_util.go +++ b/plugins/inputs/opcua/opcua_util.go @@ -9,7 +9,6 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "io/ioutil" "log" "math/big" "net" @@ -27,7 +26,7 @@ import ( // SELF SIGNED CERT FUNCTIONS func newTempDir() (string, error) { - dir, err := ioutil.TempDir("", "ssc") + dir, err := os.MkdirTemp("", "ssc") return dir, err } diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index dbee336ba1040..ecbeeb532fd1e 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -2,7 +2,6 @@ package passenger import ( "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -28,7 +27,7 @@ func fakePassengerStatus(stat string) (string, error) { } tempFilePath := filepath.Join(os.TempDir(), "passenger-status"+fileExtension) - if err := ioutil.WriteFile(tempFilePath, []byte(content), 0700); err != nil { + if err := os.WriteFile(tempFilePath, []byte(content), 0700); err != nil { return "", err } diff --git a/plugins/inputs/phpfpm/child.go b/plugins/inputs/phpfpm/child.go index 9ac7e60715856..b6a6f956d3bf0 100644 --- a/plugins/inputs/phpfpm/child.go +++ b/plugins/inputs/phpfpm/child.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/http/cgi" @@ -161,7 +160,7 @@ func (c *child) serve() { var errCloseConn = errors.New("fcgi: connection should be closed") -var emptyBody = ioutil.NopCloser(strings.NewReader("")) +var emptyBody = io.NopCloser(strings.NewReader("")) // ErrRequestAborted is returned by Read when a handler attempts to read the // body of a request that has been aborted by the web server. @@ -295,7 +294,7 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) { // can properly cut off the client sending all the data. // For now just bound it a little and //nolint:errcheck,revive - io.CopyN(ioutil.Discard, body, 100<<20) + io.CopyN(io.Discard, body, 100<<20) //nolint:errcheck,revive body.Close() diff --git a/plugins/inputs/phpfpm/fcgi_test.go b/plugins/inputs/phpfpm/fcgi_test.go index a7234225806cc..7211c0c3971e1 100644 --- a/plugins/inputs/phpfpm/fcgi_test.go +++ b/plugins/inputs/phpfpm/fcgi_test.go @@ -8,7 +8,6 @@ import ( "bytes" "errors" "io" - "io/ioutil" "net/http" "testing" ) @@ -242,7 +241,7 @@ func TestChildServeCleansUp(t *testing.T) { r *http.Request, ) { // block on reading body of request - _, err := io.Copy(ioutil.Discard, r.Body) + _, err := io.Copy(io.Discard, r.Body) if err != tt.err { t.Errorf("Expected %#v, got %#v", tt.err, err) } @@ -274,7 +273,7 @@ func TestMalformedParams(_ *testing.T) { // end of params 1, 4, 0, 1, 0, 0, 0, 0, } - rw := rwNopCloser{bytes.NewReader(input), ioutil.Discard} + rw := rwNopCloser{bytes.NewReader(input), io.Discard} c := newChild(rw, http.DefaultServeMux) c.serve() } diff --git a/plugins/inputs/postfix/postfix_test.go b/plugins/inputs/postfix/postfix_test.go index 782a0c78c95b9..6ab6556a0cf07 100644 --- a/plugins/inputs/postfix/postfix_test.go +++ b/plugins/inputs/postfix/postfix_test.go @@ -4,7 +4,6 @@ package postfix import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -15,7 +14,7 @@ import ( ) func TestGather(t *testing.T) { - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) @@ -23,12 +22,12 @@ func TestGather(t *testing.T) { require.NoError(t, os.MkdirAll(filepath.FromSlash(td+"/"+q), 0755)) } - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644)) p := Postfix{ QueueDirectory: td, diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 8311064b1f060..176827a4b1dc7 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -3,7 +3,7 @@ package postgresql_extensible import ( "bytes" "fmt" - "io/ioutil" + "io" "os" "strings" "time" @@ -147,7 +147,7 @@ func ReadQueryFromFile(filePath string) (string, error) { } defer file.Close() - query, err := ioutil.ReadAll(file) + query, err := io.ReadAll(file) if err != nil { return "", err } diff --git a/plugins/inputs/processes/processes_notwindows.go b/plugins/inputs/processes/processes_notwindows.go index 3c685cf1ebf7f..070dce65fe2a0 100644 --- a/plugins/inputs/processes/processes_notwindows.go +++ b/plugins/inputs/processes/processes_notwindows.go @@ -6,7 +6,6 @@ package processes import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -192,7 +191,7 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { } func readProcFile(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) + data, err := os.ReadFile(filename) if err != nil { if os.IsNotExist(err) { return nil, nil diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index d5d8b8b36fe70..05cf4a72735f0 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -2,7 +2,7 @@ package procstat import ( "fmt" - "io/ioutil" + "os" "regexp" "strconv" "strings" @@ -43,7 +43,7 @@ func (pg *NativeFinder) UID(user string) ([]PID, error) { //PidFile returns the pid from the pid file given. func (pg *NativeFinder) PidFile(path string) ([]PID, error) { var pids []PID - pidString, err := ioutil.ReadFile(path) + pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", path, err) diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 85e8d80f83cfe..34c44e0b2fefb 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -2,7 +2,7 @@ package procstat import ( "fmt" - "io/ioutil" + "os" "os/exec" "strconv" "strings" @@ -25,7 +25,7 @@ func NewPgrep() (PIDFinder, error) { func (pg *Pgrep) PidFile(path string) ([]PID, error) { var pids []PID - pidString, err := ioutil.ReadFile(path) + pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", path, err) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index b838df651f636..ce29a08460cca 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -3,7 +3,6 @@ package procstat import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -516,7 +515,7 @@ func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { return nil, fmt.Errorf("not a directory %s", path) } procsPath := filepath.Join(path, "cgroup.procs") - out, err := ioutil.ReadFile(procsPath) + out, err := os.ReadFile(procsPath) if err != nil { return nil, err } diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 2d8687e75013b..bc586fca4fa42 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -2,7 +2,6 @@ package procstat import ( "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -385,10 +384,10 @@ func TestGather_cgroupPIDs(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("no cgroups in windows") } - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) - err = ioutil.WriteFile(filepath.Join(td, "cgroup.procs"), []byte("1234\n5678\n"), 0644) + err = os.WriteFile(filepath.Join(td, "cgroup.procs"), []byte("1234\n5678\n"), 0644) require.NoError(t, err) p := Procstat{ diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 0e658003a7122..a57e771bfc483 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -5,11 +5,11 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/ioutil" "log" "net" "net/http" "net/url" + "os" "os/user" "path/filepath" "time" @@ -41,7 +41,7 @@ const cAdvisorPodListDefaultInterval = 60 // loadClient parses a kubeconfig from a file and returns a Kubernetes // client. It does not support extensions or client auth providers. func loadClient(kubeconfigPath string) (*kubernetes.Clientset, error) { - data, err := ioutil.ReadFile(kubeconfigPath) + data, err := os.ReadFile(kubeconfigPath) if err != nil { return nil, fmt.Errorf("failed reading '%s': %v", kubeconfigPath, err) } diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index adeb452253a37..136e8ae0f6d9d 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -4,7 +4,7 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -382,7 +382,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error p.addHeaders(req) if p.BearerToken != "" { - token, err := ioutil.ReadFile(p.BearerToken) + token, err := os.ReadFile(p.BearerToken) if err != nil { return err } @@ -408,7 +408,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("error reading body: %s", err) } diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index ec34a7b2f5a36..efd7fae7d5d5f 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -3,7 +3,7 @@ package proxmox import ( "encoding/json" "errors" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -115,7 +115,7 @@ func performRequest(px *Proxmox, apiURL string, method string, data url.Values) } defer resp.Body.Close() - responseBody, err := ioutil.ReadAll(resp.Body) + responseBody, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index 741de4a0dc013..9976012fe368c 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -2,12 +2,12 @@ package puppetagent import ( "fmt" - "gopkg.in/yaml.v2" - "io/ioutil" "os" "reflect" "strings" + "gopkg.in/yaml.v2" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -92,7 +92,7 @@ func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { return fmt.Errorf("%s", err) } - fh, err := ioutil.ReadFile(pa.Location) + fh, err := os.ReadFile(pa.Location) if err != nil { return fmt.Errorf("%s", err) } diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 13be5f63b1619..158b8d5ed6b21 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -3,7 +3,7 @@ package rabbitmq import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "strconv" "sync" @@ -431,7 +431,7 @@ func (r *RabbitMQ) requestEndpoint(u string) ([]byte, error) { return nil, fmt.Errorf("getting %q failed: %v %v", u, resp.StatusCode, http.StatusText(resp.StatusCode)) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } func (r *RabbitMQ) requestJSON(u string, target interface{}) error { diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 830819b0528e4..e867b1e2dcb61 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -2,9 +2,9 @@ package rabbitmq import ( "fmt" - "io/ioutil" "net/http" "net/http/httptest" + "os" "time" "testing" @@ -37,7 +37,7 @@ func TestRabbitMQGeneratesMetricsSet1(t *testing.T) { return } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) @@ -247,7 +247,7 @@ func TestRabbitMQGeneratesMetricsSet2(t *testing.T) { return } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) diff --git a/plugins/inputs/ravendb/ravendb_test.go b/plugins/inputs/ravendb/ravendb_test.go index 42eaea3fb3e3b..3da1d0190a055 100644 --- a/plugins/inputs/ravendb/ravendb_test.go +++ b/plugins/inputs/ravendb/ravendb_test.go @@ -1,9 +1,9 @@ package ravendb import ( - "io/ioutil" "net/http" "net/http/httptest" + "os" "testing" "time" @@ -30,7 +30,7 @@ func TestRavenDBGeneratesMetricsFull(t *testing.T) { require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) @@ -225,7 +225,7 @@ func TestRavenDBGeneratesMetricsMin(t *testing.T) { require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) diff --git a/plugins/inputs/redfish/redfish.go b/plugins/inputs/redfish/redfish.go index 4d9e70a57a9bd..dcf26b192c651 100644 --- a/plugins/inputs/redfish/redfish.go +++ b/plugins/inputs/redfish/redfish.go @@ -3,7 +3,7 @@ package redfish import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -199,7 +199,7 @@ func (r *Redfish) getData(url string, payload interface{}) error { r.Address) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/salesforce/salesforce.go b/plugins/inputs/salesforce/salesforce.go index f1ecff8d61a83..f7c321d7ae978 100644 --- a/plugins/inputs/salesforce/salesforce.go +++ b/plugins/inputs/salesforce/salesforce.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -203,11 +202,11 @@ func (s *Salesforce) login() error { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", loginEndpoint, resp.Status, body) } - respBody, err := ioutil.ReadAll(resp.Body) + respBody, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index d85afca8e4e7f..604a2205c0d2c 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -1,9 +1,9 @@ package snmp_legacy import ( - "io/ioutil" "log" "net" + "os" "strconv" "strings" "time" @@ -296,7 +296,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { subnodes: make(map[string]Node), } - data, err := ioutil.ReadFile(s.SnmptranslateFile) + data, err := os.ReadFile(s.SnmptranslateFile) if err != nil { s.Log.Errorf("Reading SNMPtranslate file error: %s", err.Error()) return err diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index c33e59f7129b6..a3ccacae1ceb2 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -4,7 +4,6 @@ import ( "bytes" "crypto/tls" "io" - "io/ioutil" "log" "net" "os" @@ -69,7 +68,7 @@ func TestSocketListener_tcp_tls(t *testing.T) { } func TestSocketListener_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix_tls.sock") @@ -133,7 +132,7 @@ func TestSocketListener_udp(t *testing.T) { } func TestSocketListener_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix.sock") @@ -163,7 +162,7 @@ func TestSocketListener_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unixgram.sock") diff --git a/plugins/inputs/sql/sql.go b/plugins/inputs/sql/sql.go index c6c4658d83959..87227663bb4d0 100644 --- a/plugins/inputs/sql/sql.go +++ b/plugins/inputs/sql/sql.go @@ -5,7 +5,7 @@ import ( dbsql "database/sql" "errors" "fmt" - "io/ioutil" + "os" "sort" "strings" "sync" @@ -326,7 +326,7 @@ func (s *SQL) Init() error { // In case we got a script, we should read the query now. if q.Script != "" { - query, err := ioutil.ReadFile(q.Script) + query, err := os.ReadFile(q.Script) if err != nil { return fmt.Errorf("reading script %q failed: %v", q.Script, err) } diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index 9b620efc3e216..f3fc5f14eb394 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -2,7 +2,6 @@ package suricata import ( "fmt" - "io/ioutil" "log" "math/rand" "net" @@ -21,7 +20,7 @@ var ex2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats"," var ex3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W#05-wlp4s0": { "capture":{"kernel_packets":905344474,"kernel_drops":78355440}}}}}` func TestSuricataLarge(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -38,7 +37,7 @@ func TestSuricataLarge(t *testing.T) { require.NoError(t, s.Start(&acc)) defer s.Stop() - data, err := ioutil.ReadFile("testdata/test1.json") + data, err := os.ReadFile("testdata/test1.json") require.NoError(t, err) c, err := net.Dial("unix", tmpfn) @@ -49,7 +48,7 @@ func TestSuricataLarge(t *testing.T) { require.NoError(t, err) //test suricata alerts - data2, err := ioutil.ReadFile("testdata/test2.json") + data2, err := os.ReadFile("testdata/test2.json") require.NoError(t, err) _, err = c.Write(data2) require.NoError(t, err) @@ -61,7 +60,7 @@ func TestSuricataLarge(t *testing.T) { } func TestSuricataAlerts(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -78,7 +77,7 @@ func TestSuricataAlerts(t *testing.T) { require.NoError(t, s.Start(&acc)) defer s.Stop() - data, err := ioutil.ReadFile("testdata/test3.json") + data, err := os.ReadFile("testdata/test3.json") require.NoError(t, err) c, err := net.Dial("unix", tmpfn) @@ -116,7 +115,7 @@ func TestSuricataAlerts(t *testing.T) { } func TestSuricata(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -162,7 +161,7 @@ func TestSuricata(t *testing.T) { } func TestThreadStats(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -212,7 +211,7 @@ func TestThreadStats(t *testing.T) { } func TestSuricataInvalid(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -254,7 +253,7 @@ func TestSuricataInvalidPath(t *testing.T) { } func TestSuricataTooLongLine(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -282,7 +281,7 @@ func TestSuricataTooLongLine(t *testing.T) { } func TestSuricataEmptyJSON(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -309,7 +308,7 @@ func TestSuricataEmptyJSON(t *testing.T) { } func TestSuricataDisconnectSocket(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -345,7 +344,7 @@ func TestSuricataDisconnectSocket(t *testing.T) { } func TestSuricataStartStop(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -387,7 +386,7 @@ func TestSuricataParse(t *testing.T) { } for _, tc := range tests { - data, err := ioutil.ReadFile("testdata/" + tc.filename) + data, err := os.ReadFile("testdata/" + tc.filename) require.NoError(t, err) s := Suricata{ Delimiter: "_", diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go index dd733253635b8..e8fbe62989055 100644 --- a/plugins/inputs/synproxy/synproxy_test.go +++ b/plugins/inputs/synproxy/synproxy_test.go @@ -4,7 +4,6 @@ package synproxy import ( - "io/ioutil" "os" "testing" @@ -156,7 +155,7 @@ func testSynproxyFileData(t *testing.T, fileData string, telegrafData map[string } func makeFakeSynproxyFile(content []byte) string { - tmpfile, err := ioutil.TempFile("", "synproxy_test") + tmpfile, err := os.CreateTemp("", "synproxy_test") if err != nil { panic(err) } diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go index 4d29daaf53915..7782ad968a3b1 100644 --- a/plugins/inputs/syslog/nontransparent_test.go +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -2,7 +2,6 @@ package syslog import ( "crypto/tls" - "io/ioutil" "net" "os" "path/filepath" @@ -270,7 +269,7 @@ func TestNonTransparentStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { } func TestNonTransparentStrict_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") @@ -278,7 +277,7 @@ func TestNonTransparentStrict_unix(t *testing.T) { } func TestNonTransparentBestEffort_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") @@ -286,7 +285,7 @@ func TestNonTransparentBestEffort_unix(t *testing.T) { } func TestNonTransparentStrict_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") @@ -294,7 +293,7 @@ func TestNonTransparentStrict_unix_tls(t *testing.T) { } func TestNonTransparentBestEffort_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index 53fee69d112a5..1c0cc024507e2 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -3,7 +3,6 @@ package syslog import ( "crypto/tls" "fmt" - "io/ioutil" "net" "os" "path/filepath" @@ -470,7 +469,7 @@ func TestOctetCountingStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { } func TestOctetCountingStrict_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") @@ -478,7 +477,7 @@ func TestOctetCountingStrict_unix(t *testing.T) { } func TestOctetCountingBestEffort_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") @@ -486,7 +485,7 @@ func TestOctetCountingBestEffort_unix(t *testing.T) { } func TestOctetCountingStrict_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") @@ -494,7 +493,7 @@ func TestOctetCountingStrict_unix_tls(t *testing.T) { } func TestOctetCountingBestEffort_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index ab3fe2ceaf60f..5bcb847b36ec4 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -2,7 +2,6 @@ package syslog import ( "fmt" - "io/ioutil" "net" "os" "path/filepath" @@ -290,7 +289,7 @@ func TestBestEffort_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unixgram.sock") @@ -304,7 +303,7 @@ func TestStrict_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unixgram.sock") diff --git a/plugins/inputs/syslog/syslog_test.go b/plugins/inputs/syslog/syslog_test.go index ac0539d30e1af..00146fde9cd26 100644 --- a/plugins/inputs/syslog/syslog_test.go +++ b/plugins/inputs/syslog/syslog_test.go @@ -1,7 +1,6 @@ package syslog import ( - "io/ioutil" "os" "path/filepath" "runtime" @@ -46,7 +45,7 @@ func TestAddress(t *testing.T) { require.EqualError(t, err, "unknown protocol 'unsupported' in 'example.com:6514'") require.Error(t, err) - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") defer os.RemoveAll(tmpdir) require.NoError(t, err) sock := filepath.Join(tmpdir, "syslog.TestAddress.sock") diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 16c38519a83b6..1098a10edbff5 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -2,7 +2,6 @@ package tail import ( "bytes" - "io/ioutil" "log" "os" "path/filepath" @@ -49,7 +48,7 @@ func NewTestTail() *Tail { } func TestTailBadLine(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -86,7 +85,7 @@ func TestTailBadLine(t *testing.T) { } func TestTailDosLineEndings(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString("cpu usage_idle=100\r\ncpu2 usage_idle=200\r\n") @@ -173,7 +172,7 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { } func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -286,7 +285,7 @@ func createGrokParser() (parsers.Parser, error) { // The csv parser should only parse the header line once per file. func TestCSVHeadersParsedOnce(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -345,7 +344,7 @@ cpu,42 // Ensure that the first line can produce multiple metrics (#6138) func TestMultipleMetricsOnFirstLine(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -542,7 +541,7 @@ func TestCharacterEncoding(t *testing.T) { } func TestTailEOF(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString("cpu usage_idle=100\r\n") diff --git a/plugins/inputs/twemproxy/twemproxy.go b/plugins/inputs/twemproxy/twemproxy.go index cda56943f1002..b4c4b52f85b6c 100644 --- a/plugins/inputs/twemproxy/twemproxy.go +++ b/plugins/inputs/twemproxy/twemproxy.go @@ -3,7 +3,7 @@ package twemproxy import ( "encoding/json" "errors" - "io/ioutil" + "io" "net" "time" @@ -37,7 +37,7 @@ func (t *Twemproxy) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - body, err := ioutil.ReadAll(conn) + body, err := io.ReadAll(conn) if err != nil { return err } diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index 8bd8262c035b0..3e36838c6192a 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -104,7 +104,7 @@ package udp_listener // } // func TestRunParser(t *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257\n") // listener, in := newTestUDPListener() @@ -127,7 +127,7 @@ package udp_listener // } // func TestRunParserInvalidMsg(_ *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("cpu_load_short") // listener, in := newTestUDPListener() @@ -153,7 +153,7 @@ package udp_listener // } // func TestRunParserGraphiteMsg(t *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("cpu.load.graphite 12 1454780029") // listener, in := newTestUDPListener() @@ -174,7 +174,7 @@ package udp_listener // } // func TestRunParserJSONMsg(t *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n") // listener, in := newTestUDPListener() diff --git a/plugins/inputs/webhooks/filestack/filestack_webhooks.go b/plugins/inputs/webhooks/filestack/filestack_webhooks.go index 19f8c0251bbb7..44def8c6f5141 100644 --- a/plugins/inputs/webhooks/filestack/filestack_webhooks.go +++ b/plugins/inputs/webhooks/filestack/filestack_webhooks.go @@ -2,7 +2,7 @@ package filestack import ( "encoding/json" - "io/ioutil" + "io" "log" "net/http" "time" @@ -25,7 +25,7 @@ func (fs *FilestackWebhook) Register(router *mux.Router, acc telegraf.Accumulato func (fs *FilestackWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/github/github_webhooks.go b/plugins/inputs/webhooks/github/github_webhooks.go index 5febb80afb6bb..2d48cbef2e5f2 100644 --- a/plugins/inputs/webhooks/github/github_webhooks.go +++ b/plugins/inputs/webhooks/github/github_webhooks.go @@ -5,7 +5,7 @@ import ( "crypto/sha1" "encoding/hex" "encoding/json" - "io/ioutil" + "io" "log" "net/http" @@ -28,7 +28,7 @@ func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator) func (gh *GithubWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() eventType := r.Header.Get("X-Github-Event") - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go index a7e219c53c905..67ba86908d1a1 100644 --- a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go +++ b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go @@ -2,7 +2,7 @@ package mandrill import ( "encoding/json" - "io/ioutil" + "io" "log" "net/http" "net/url" @@ -31,7 +31,7 @@ func (md *MandrillWebhook) returnOK(w http.ResponseWriter, _ *http.Request) { func (md *MandrillWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go index 55ff7eb2f3594..d9c1323cdd608 100644 --- a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go +++ b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go @@ -3,7 +3,7 @@ package rollbar import ( "encoding/json" "errors" - "io/ioutil" + "io" "log" "net/http" "time" @@ -25,7 +25,7 @@ func (rb *RollbarWebhook) Register(router *mux.Router, acc telegraf.Accumulator) func (rb *RollbarWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/wireless/wireless_linux.go b/plugins/inputs/wireless/wireless_linux.go index 706f9700d12c9..29a0250d92b7f 100644 --- a/plugins/inputs/wireless/wireless_linux.go +++ b/plugins/inputs/wireless/wireless_linux.go @@ -5,7 +5,6 @@ package wireless import ( "bytes" - "io/ioutil" "log" "os" "path" @@ -47,7 +46,7 @@ func (w *Wireless) Gather(acc telegraf.Accumulator) error { w.loadPath() wirelessPath := path.Join(w.HostProc, "net", "wireless") - table, err := ioutil.ReadFile(wirelessPath) + table, err := os.ReadFile(wirelessPath) if err != nil { return err } diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index b106f91b772f6..3486f2779eb2b 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -7,14 +7,15 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "github.com/pion/dtls/v2" - "io/ioutil" "net" "net/url" + "os" "path/filepath" "strings" "time" + "github.com/pion/dtls/v2" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/globpath" @@ -176,7 +177,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica return certs, nil case "file": - content, err := ioutil.ReadFile(u.Path) + content, err := os.ReadFile(u.Path) if err != nil { return nil, err } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 9c42c09bdabda..f0b0379109749 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -4,8 +4,6 @@ import ( "crypto/tls" "encoding/base64" "fmt" - "github.com/pion/dtls/v2" - "io/ioutil" "math/big" "net" "net/url" @@ -15,6 +13,8 @@ import ( "testing" "time" + "github.com/pion/dtls/v2" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -32,7 +32,7 @@ var _ telegraf.Input = &X509Cert{} func TestGatherRemoteIntegration(t *testing.T) { t.Skip("Skipping network-dependent test due to race condition when test-all") - tmpfile, err := ioutil.TempFile("", "example") + tmpfile, err := os.CreateTemp("", "example") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -149,7 +149,7 @@ func TestGatherLocal(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f, err := ioutil.TempFile("", "x509_cert") + f, err := os.CreateTemp("", "x509_cert") require.NoError(t, err) _, err = f.Write([]byte(test.content)) @@ -181,7 +181,7 @@ func TestGatherLocal(t *testing.T) { func TestTags(t *testing.T) { cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert()) - f, err := ioutil.TempFile("", "x509_cert") + f, err := os.CreateTemp("", "x509_cert") require.NoError(t, err) _, err = f.Write([]byte(cert)) @@ -238,7 +238,7 @@ func TestGatherChain(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f, err := ioutil.TempFile("", "x509_cert") + f, err := os.CreateTemp("", "x509_cert") require.NoError(t, err) _, err = f.Write([]byte(test.content)) diff --git a/plugins/inputs/zfs/zfs_linux_test.go b/plugins/inputs/zfs/zfs_linux_test.go index 52622582029a5..b844759eaffd1 100644 --- a/plugins/inputs/zfs/zfs_linux_test.go +++ b/plugins/inputs/zfs/zfs_linux_test.go @@ -4,7 +4,6 @@ package zfs import ( - "io/ioutil" "os" "testing" @@ -192,10 +191,10 @@ func TestZfsPoolMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(poolIoContents), 0644) + err = os.WriteFile(testKstatPath+"/HOME/io", []byte(poolIoContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) require.NoError(t, err) poolMetrics := getPoolMetrics() @@ -231,25 +230,25 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) + err = os.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/zil", []byte(zilContents), 0644) + err = os.WriteFile(testKstatPath+"/zil", []byte(zilContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/fm", []byte(fmContents), 0644) + err = os.WriteFile(testKstatPath+"/fm", []byte(fmContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/dmu_tx", []byte(dmuTxContents), 0644) + err = os.WriteFile(testKstatPath+"/dmu_tx", []byte(dmuTxContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/abdstats", []byte(abdstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/abdstats", []byte(abdstatsContents), 0644) require.NoError(t, err) intMetrics := getKstatMetricsAll() @@ -272,7 +271,7 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/STORAGE", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) + err = os.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) require.NoError(t, err) tags = map[string]string{ diff --git a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go index 9bf1f3261d9f6..09518103b22cc 100644 --- a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go +++ b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go @@ -29,8 +29,8 @@ import ( "errors" "flag" "fmt" - "io/ioutil" "log" + "os" "github.com/apache/thrift/lib/go/thrift" "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" @@ -52,7 +52,7 @@ func init() { func main() { flag.Parse() - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { log.Fatalf("Error reading file: %v\n", err) } @@ -63,7 +63,7 @@ func main() { if err != nil { log.Fatalf("%v\n", err) } - if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil { + if err := os.WriteFile(outFileName, raw, 0644); err != nil { log.Fatalf("%v", err) } case "thrift": @@ -71,7 +71,7 @@ func main() { if err != nil { log.Fatalf("%v\n", err) } - if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil { + if err := os.WriteFile(outFileName, raw, 0644); err != nil { log.Fatalf("%v", err) } default: diff --git a/plugins/inputs/zipkin/codec/thrift/thrift_test.go b/plugins/inputs/zipkin/codec/thrift/thrift_test.go index d4bbc1d54df20..ea566e4bfd0c8 100644 --- a/plugins/inputs/zipkin/codec/thrift/thrift_test.go +++ b/plugins/inputs/zipkin/codec/thrift/thrift_test.go @@ -1,7 +1,7 @@ package thrift import ( - "io/ioutil" + "os" "testing" "github.com/google/go-cmp/cmp" @@ -193,7 +193,7 @@ func TestUnmarshalThrift(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - dat, err := ioutil.ReadFile(tt.filename) + dat, err := os.ReadFile(tt.filename) if err != nil { t.Fatalf("Could not find file %s\n", tt.filename) } diff --git a/plugins/inputs/zipkin/handler.go b/plugins/inputs/zipkin/handler.go index 24e7ac12f01be..83288bd6e4b2e 100644 --- a/plugins/inputs/zipkin/handler.go +++ b/plugins/inputs/zipkin/handler.go @@ -3,7 +3,7 @@ package zipkin import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "mime" "net/http" "strings" @@ -88,7 +88,7 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnsupportedMediaType) } - octets, err := ioutil.ReadAll(body) + octets, err := io.ReadAll(body) if err != nil { s.recorder.Error(err) w.WriteHeader(http.StatusInternalServerError) diff --git a/plugins/inputs/zipkin/handler_test.go b/plugins/inputs/zipkin/handler_test.go index b0176a22ead3c..f6e8bece80240 100644 --- a/plugins/inputs/zipkin/handler_test.go +++ b/plugins/inputs/zipkin/handler_test.go @@ -2,9 +2,10 @@ package zipkin import ( "bytes" - "io/ioutil" + "io" "net/http" "net/http/httptest" + "os" "strconv" "testing" "time" @@ -28,7 +29,7 @@ func (m *MockRecorder) Error(err error) { } func TestSpanHandler(t *testing.T) { - dat, err := ioutil.ReadFile("testdata/threespans.dat") + dat, err := os.ReadFile("testdata/threespans.dat") if err != nil { t.Fatalf("Could not find file %s\n", "testdata/threespans.dat") } @@ -37,7 +38,7 @@ func TestSpanHandler(t *testing.T) { r := httptest.NewRequest( "POST", "http://server.local/api/v1/spans", - ioutil.NopCloser( + io.NopCloser( bytes.NewReader(dat))) r.Header.Set("Content-Type", "application/x-thrift") diff --git a/plugins/inputs/zipkin/zipkin_test.go b/plugins/inputs/zipkin/zipkin_test.go index 77bef853b7e52..0c0bab279cc7f 100644 --- a/plugins/inputs/zipkin/zipkin_test.go +++ b/plugins/inputs/zipkin/zipkin_test.go @@ -3,8 +3,8 @@ package zipkin import ( "bytes" "fmt" - "io/ioutil" "net/http" + "os" "testing" "time" @@ -637,7 +637,7 @@ func TestZipkinPlugin(t *testing.T) { } func postThriftData(datafile, address, contentType string) error { - dat, err := ioutil.ReadFile(datafile) + dat, err := os.ReadFile(datafile) if err != nil { return fmt.Errorf("could not read from data file %s", datafile) } diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index e513dbdca23e9..ca511a5211860 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -7,7 +7,7 @@ import ( "encoding/json" "fmt" "hash/fnv" - "io/ioutil" + "io" "net/http" "regexp" "strings" @@ -221,7 +221,7 @@ func vmInstanceMetadata(c *http.Client) (string, string, error) { } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return "", "", err } @@ -356,7 +356,7 @@ func (a *AzureMonitor) send(body []byte) error { } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 { return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status) } diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index 11796e8e12994..adf74ea48a232 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -3,7 +3,7 @@ package dynatrace import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "strings" "time" @@ -209,7 +209,7 @@ func (d *Dynatrace) send(msg string) error { } // print metric line results as info log - bodyBytes, err := ioutil.ReadAll(resp.Body) + bodyBytes, err := io.ReadAll(resp.Body) if err != nil { d.Log.Errorf("Dynatrace error reading response") } diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index c3cb091cbf549..0ed7cf4cf1195 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -3,7 +3,7 @@ package dynatrace import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "regexp" @@ -130,7 +130,7 @@ func TestSendMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) @@ -209,7 +209,7 @@ func TestSendMetrics(t *testing.T) { func TestSendSingleMetricWithUnorderedTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because dimension order isn't guaranteed @@ -255,7 +255,7 @@ func TestSendMetricWithoutTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) expected := "mymeasurement.myfield,dt.metrics.source=telegraf gauge,3.14 1289430000000" @@ -296,7 +296,7 @@ func TestSendMetricWithUpperCaseTagKeys(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) @@ -343,7 +343,7 @@ func TestSendBooleanMetricWithoutTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed @@ -384,7 +384,7 @@ func TestSendMetricWithDefaultDimensions(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed @@ -427,7 +427,7 @@ func TestMetricDimensionsOverrideDefault(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed @@ -470,7 +470,7 @@ func TestStaticDimensionsOverrideMetric(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed diff --git a/plugins/outputs/file/file_test.go b/plugins/outputs/file/file_test.go index f1e87853d6153..5fcdc511972ac 100644 --- a/plugins/outputs/file/file_test.go +++ b/plugins/outputs/file/file_test.go @@ -3,7 +3,6 @@ package file import ( "bytes" "io" - "io/ioutil" "os" "testing" @@ -181,7 +180,7 @@ func TestFileStdout(t *testing.T) { } func createFile() *os.File { - f, err := ioutil.TempFile("", "") + f, err := os.CreateTemp("", "") if err != nil { panic(err) } @@ -190,7 +189,7 @@ func createFile() *os.File { } func tmpFile() string { - d, err := ioutil.TempDir("", "") + d, err := os.MkdirTemp("", "") if err != nil { panic(err) } @@ -198,7 +197,7 @@ func tmpFile() string { } func validateFile(fname, expS string, t *testing.T) { - buf, err := ioutil.ReadFile(fname) + buf, err := os.ReadFile(fname) if err != nil { panic(err) } diff --git a/plugins/outputs/health/health_test.go b/plugins/outputs/health/health_test.go index f03cfcacba7a6..03a08fca21e7b 100644 --- a/plugins/outputs/health/health_test.go +++ b/plugins/outputs/health/health_test.go @@ -1,7 +1,7 @@ package health_test import ( - "io/ioutil" + "io" "net/http" "testing" "time" @@ -121,7 +121,7 @@ func TestHealth(t *testing.T) { require.NoError(t, err) require.Equal(t, tt.expectedCode, resp.StatusCode) - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) require.NoError(t, err) err = output.Close() diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index edaae3f6ec07d..c94052ea92c1c 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -6,7 +6,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "strings" "time" @@ -195,7 +194,7 @@ func (h *HTTP) write(reqBody []byte) error { return fmt.Errorf("when writing to [%s] received status code: %d. body: %s", h.URL, resp.StatusCode, errorLine) } - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("when writing to [%s] received error: %v", h.URL, err) } diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index 8089f45f59f2e..d6803eed3211d 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -3,7 +3,7 @@ package http import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -272,7 +272,7 @@ func TestContentEncodingGzip(t *testing.T) { require.NoError(t, err) } - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) require.Contains(t, string(payload), "cpu value=42") diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 5c11d2821d2f1..ac85814db1f34 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -489,7 +488,7 @@ func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func (c *httpClient) addHeaders(req *http.Request) { @@ -503,13 +502,13 @@ func (c *httpClient) addHeaders(req *http.Request) { } func (c *httpClient) validateResponse(response io.ReadCloser) (io.ReadCloser, error) { - bodyBytes, err := ioutil.ReadAll(response) + bodyBytes, err := io.ReadAll(response) if err != nil { return nil, err } defer response.Close() - originalResponse := ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) + originalResponse := io.NopCloser(bytes.NewBuffer(bodyBytes)) // Empty response is valid. if response == http.NoBody || len(bodyBytes) == 0 || bodyBytes == nil { diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index e19d8d2e580c9..ba4dd2d81b12a 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -6,7 +6,7 @@ import ( "compress/gzip" "context" "fmt" - "io/ioutil" + "io" "log" "net" "net/http" @@ -284,7 +284,7 @@ func TestHTTP_Write(t *testing.T) { }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") w.WriteHeader(http.StatusNoContent) @@ -573,7 +573,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { gr, err := gzip.NewReader(r.Body) require.NoError(t, err) - body, err := ioutil.ReadAll(gr) + body, err := io.ReadAll(gr) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") @@ -618,7 +618,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { } func TestHTTP_UnixSocket(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf-test") + tmpdir, err := os.MkdirTemp("", "telegraf-test") if err != nil { require.NoError(t, err) } @@ -700,7 +700,7 @@ func TestHTTP_WriteDatabaseTagWorksOnRetry(t *testing.T) { r.ParseForm() require.Equal(t, r.Form["db"], []string{"foo"}) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") @@ -835,7 +835,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu,rp=foo value=42") w.WriteHeader(http.StatusNoContent) @@ -917,7 +917,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") w.WriteHeader(http.StatusNoContent) @@ -948,7 +948,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu,rp=foo value=42") w.WriteHeader(http.StatusNoContent) diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index e8df4da7d2041..c076580255740 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" "math" "net" @@ -361,7 +360,7 @@ func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func (c *httpClient) addHeaders(req *http.Request) { diff --git a/plugins/outputs/influxdb_v2/http_test.go b/plugins/outputs/influxdb_v2/http_test.go index 23c3ff05e17b6..0637cd8060bd0 100644 --- a/plugins/outputs/influxdb_v2/http_test.go +++ b/plugins/outputs/influxdb_v2/http_test.go @@ -2,7 +2,7 @@ package influxdb_v2_test import ( "context" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -63,7 +63,7 @@ func TestWriteBucketTagWorksOnRetry(t *testing.T) { r.ParseForm() require.Equal(t, r.Form["bucket"], []string{"foo"}) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index d4aa3e6e92bb7..dc1e9b6fa7856 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "regexp" "time" @@ -151,7 +151,7 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { defer resp.Body.Close() if resp.StatusCode != 200 || l.Debug { - htmlData, err := ioutil.ReadAll(resp.Body) + htmlData, err := io.ReadAll(resp.Body) if err != nil { l.Log.Debugf("Couldn't get response! (%v)", err) } diff --git a/plugins/outputs/loki/loki_test.go b/plugins/outputs/loki/loki_test.go index efe31728218d7..ba6d0808fabaa 100644 --- a/plugins/outputs/loki/loki_test.go +++ b/plugins/outputs/loki/loki_test.go @@ -4,14 +4,15 @@ import ( "compress/gzip" "encoding/json" "fmt" - "github.com/influxdata/telegraf/testutil" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" "testing" "time" + "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/stretchr/testify/require" @@ -215,7 +216,7 @@ func TestContentEncodingGzip(t *testing.T) { require.NoError(t, err) } - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) var s Request @@ -394,7 +395,7 @@ func TestMetricSorting(t *testing.T) { body := r.Body var err error - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) var s Request diff --git a/plugins/outputs/opentsdb/opentsdb_http.go b/plugins/outputs/opentsdb/opentsdb_http.go index b164765850578..582a9bb85fc9a 100644 --- a/plugins/outputs/opentsdb/opentsdb_http.go +++ b/plugins/outputs/opentsdb/opentsdb_http.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "net/http" "net/http/httputil" @@ -163,7 +162,7 @@ func (o *openTSDBHttp) flush() error { fmt.Printf("Received response\n%s\n\n", dump) } else { // Important so http client reuse connection for next request if need be. - io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(io.Discard, resp.Body) } if resp.StatusCode/100 != 2 { diff --git a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go index 39b8fec262095..95fa97fb688b7 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go @@ -2,7 +2,7 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -261,7 +261,7 @@ rpc_duration_seconds_count 2693 require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -392,7 +392,7 @@ rpc_duration_seconds_count 2693 resp, err := http.Get(output.URL()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -422,7 +422,7 @@ func TestLandingPage(t *testing.T) { resp, err := http.Get(u.String()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, expected, strings.TrimSpace(string(actual))) diff --git a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go index 27be9103b28bd..c5ff76d4017a7 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go @@ -2,7 +2,7 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -321,7 +321,7 @@ cpu_usage_idle_count{cpu="cpu1"} 20 require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -452,7 +452,7 @@ rpc_duration_seconds_count 2693 resp, err := http.Get(output.URL()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, diff --git a/plugins/outputs/sensu/sensu.go b/plugins/outputs/sensu/sensu.go index 568f8f7a144e4..3cd8b2274e52a 100644 --- a/plugins/outputs/sensu/sensu.go +++ b/plugins/outputs/sensu/sensu.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math" "net/http" "net/url" @@ -336,7 +335,7 @@ func (s *Sensu) write(reqBody []byte) error { defer resp.Body.Close() if resp.StatusCode != http.StatusCreated { - bodyData, err := ioutil.ReadAll(resp.Body) + bodyData, err := io.ReadAll(resp.Body) if err != nil { s.Log.Debugf("Couldn't read response body: %v", err) } diff --git a/plugins/outputs/sensu/sensu_test.go b/plugins/outputs/sensu/sensu_test.go index 249775727a481..e7a272ed5e149 100644 --- a/plugins/outputs/sensu/sensu_test.go +++ b/plugins/outputs/sensu/sensu_test.go @@ -3,7 +3,7 @@ package sensu import ( "encoding/json" "fmt" - "io/ioutil" + "io" "math" "net/http" "net/http/httptest" @@ -118,7 +118,7 @@ func TestConnectAndWrite(t *testing.T) { require.Equal(t, expectedURL, r.URL.String()) require.Equal(t, expectedAuthHeader, r.Header.Get("Authorization")) // let's make sure what we received is a valid Sensu event that contains all of the expected data - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) receivedEvent := &corev2.Event{} err = json.Unmarshal(body, receivedEvent) diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index 3c20583e15e20..0decb644cccab 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -2,7 +2,6 @@ package socket_writer import ( "bufio" - "io/ioutil" "net" "os" "path/filepath" @@ -46,7 +45,7 @@ func TestSocketWriter_udp(t *testing.T) { } func TestSocketWriter_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sw.TestSocketWriter_unix.sock") @@ -71,7 +70,7 @@ func TestSocketWriter_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sw.TSW_unixgram.sock") diff --git a/plugins/outputs/sql/sql_test.go b/plugins/outputs/sql/sql_test.go index 5dad6752d4cfe..ef02c89b11fad 100644 --- a/plugins/outputs/sql/sql_test.go +++ b/plugins/outputs/sql/sql_test.go @@ -3,7 +3,6 @@ package sql import ( "context" "fmt" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -162,7 +161,7 @@ func TestMysqlIntegration(t *testing.T) { const username = "root" password := pwgen(32) - outDir, err := ioutil.TempDir("", "tg-mysql-*") + outDir, err := os.MkdirTemp("", "tg-mysql-*") require.NoError(t, err) defer os.RemoveAll(outDir) @@ -230,9 +229,9 @@ func TestMysqlIntegration(t *testing.T) { require.FileExists(t, dumpfile) //compare the dump to what we expected - expected, err := ioutil.ReadFile("testdata/mariadb/expected.sql") + expected, err := os.ReadFile("testdata/mariadb/expected.sql") require.NoError(t, err) - actual, err := ioutil.ReadFile(dumpfile) + actual, err := os.ReadFile(dumpfile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) } @@ -252,7 +251,7 @@ func TestPostgresIntegration(t *testing.T) { const username = "postgres" password := pwgen(32) - outDir, err := ioutil.TempDir("", "tg-postgres-*") + outDir, err := os.MkdirTemp("", "tg-postgres-*") require.NoError(t, err) defer os.RemoveAll(outDir) @@ -329,9 +328,9 @@ func TestPostgresIntegration(t *testing.T) { require.FileExists(t, dumpfile) //compare the dump to what we expected - expected, err := ioutil.ReadFile("testdata/postgres/expected.sql") + expected, err := os.ReadFile("testdata/postgres/expected.sql") require.NoError(t, err) - actual, err := ioutil.ReadFile(dumpfile) + actual, err := os.ReadFile(dumpfile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) } diff --git a/plugins/outputs/sql/sqlite_test.go b/plugins/outputs/sql/sqlite_test.go index d54ffe877a80f..7707f9d085e7e 100644 --- a/plugins/outputs/sql/sqlite_test.go +++ b/plugins/outputs/sql/sqlite_test.go @@ -7,7 +7,6 @@ package sql import ( gosql "database/sql" - "io/ioutil" "os" "path/filepath" "testing" @@ -18,7 +17,7 @@ import ( ) func TestSqlite(t *testing.T) { - outDir, err := ioutil.TempDir("", "tg-sqlite-*") + outDir, err := os.MkdirTemp("", "tg-sqlite-*") require.NoError(t, err) defer os.RemoveAll(outDir) diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index 5ce502bab2c0e..5629defa4506e 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -6,7 +6,6 @@ import ( "compress/gzip" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -300,7 +299,7 @@ func TestContentEncodingGzip(t *testing.T) { body, err := gzip.NewReader(r.Body) require.NoError(t, err) - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) assert.Equal(t, string(payload), "metric=cpu field=value 42 0\n") diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index 7826047d7873d..4d3027b1b5331 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -3,7 +3,7 @@ package warp10 import ( "bytes" "fmt" - "io/ioutil" + "io" "log" "math" "net/http" @@ -154,7 +154,7 @@ func (w *Warp10) Write(metrics []telegraf.Metric) error { if resp.StatusCode != http.StatusOK { if w.PrintErrorBody { - body, _ := ioutil.ReadAll(resp.Body) + body, _ := io.ReadAll(resp.Body) return fmt.Errorf(w.WarpURL + ": " + w.HandleError(string(body), w.MaxStringErrorSize)) } diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go index c6eb9db2ae5b5..dc097da45ac2a 100644 --- a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "time" @@ -172,7 +172,7 @@ func getResponseFromMetadata(c *http.Client, metadataURL string) ([]byte, error) } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -242,7 +242,7 @@ func (a *YandexCloudMonitoring) send(body []byte) error { } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 { return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status) } diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index f0f018034dc5b..7b34b83c0af8a 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -3,7 +3,6 @@ package json_v2_test import ( "bufio" "fmt" - "io/ioutil" "os" "testing" @@ -90,7 +89,7 @@ func TestData(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { // Process the telegraf config file for the test - buf, err := ioutil.ReadFile(fmt.Sprintf("testdata/%s/telegraf.conf", tc.test)) + buf, err := os.ReadFile(fmt.Sprintf("testdata/%s/telegraf.conf", tc.test)) require.NoError(t, err) inputs.Add("file", func() telegraf.Input { return &file.File{} diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go index f53b926bda4a5..a403887e093b9 100644 --- a/plugins/parsers/prometheus/parser_test.go +++ b/plugins/parsers/prometheus/parser_test.go @@ -2,7 +2,7 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "testing" @@ -435,7 +435,7 @@ func TestParserProtobufHeader(t *testing.T) { t.Fatalf("error making HTTP request to %s: %s", ts.URL, err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("error reading body: %s", err) } diff --git a/plugins/parsers/xpath/parser_test.go b/plugins/parsers/xpath/parser_test.go index 8e7a3087c0888..ead02e0392769 100644 --- a/plugins/parsers/xpath/parser_test.go +++ b/plugins/parsers/xpath/parser_test.go @@ -1,7 +1,7 @@ package xpath import ( - "io/ioutil" + "os" "path/filepath" "strings" "testing" @@ -1233,7 +1233,7 @@ func TestTestCases(t *testing.T) { pbmsgtype = protofields[1] } - content, err := ioutil.ReadFile(datafile) + content, err := os.ReadFile(datafile) require.NoError(t, err) // Get the expectations @@ -1266,7 +1266,7 @@ func TestTestCases(t *testing.T) { } func loadTestConfiguration(filename string) (*Config, []string, error) { - buf, err := ioutil.ReadFile(filename) + buf, err := os.ReadFile(filename) if err != nil { return nil, nil, err } diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 15152a2f349c3..9eed069948bb0 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -3,7 +3,6 @@ package starlark import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -3194,7 +3193,7 @@ func TestAllScriptTestData(t *testing.T) { } fn := path t.Run(fn, func(t *testing.T) { - b, err := ioutil.ReadFile(fn) + b, err := os.ReadFile(fn) require.NoError(t, err) lines := strings.Split(string(b), "\n") inputMetrics := parseMetricsFrom(t, lines, "Example Input:") diff --git a/testutil/tls.go b/testutil/tls.go index 68a244a8b1e74..686f327d06f49 100644 --- a/testutil/tls.go +++ b/testutil/tls.go @@ -2,7 +2,7 @@ package testutil import ( "fmt" - "io/ioutil" + "io" "os" "path" @@ -93,7 +93,7 @@ func readCertificate(filename string) string { if err != nil { panic(fmt.Sprintf("opening %q: %v", filename, err)) } - octets, err := ioutil.ReadAll(file) + octets, err := io.ReadAll(file) if err != nil { panic(fmt.Sprintf("reading %q: %v", filename, err)) } From f4c407d797c99d7a5f80ae15080c602faf14252c Mon Sep 17 00:00:00 2001 From: Alexander Krantz Date: Wed, 29 Sep 2021 14:39:46 -0700 Subject: [PATCH 643/761] fix: Couchbase insecure certificate validation (#9458) (cherry picked from commit 872b29bf958cf6c485f0d649b0540b0bae137a50) --- plugins/inputs/couchbase/README.md | 8 ++++++ plugins/inputs/couchbase/couchbase.go | 33 ++++++++++++++++++++-- plugins/inputs/couchbase/couchbase_test.go | 12 ++++++-- 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index 2c777e17a9ed0..1acdaea4ac76e 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -20,6 +20,14 @@ This plugin gets metrics for each Couchbase node, as well as detailed metrics fo ## Filter bucket fields to include only here. # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification (defaults to false) + ## If set to false, tls_cert and tls_key are required + # insecure_skip_verify = false ``` ## Measurements: diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index 7b99c76e6982c..f67e75096cde3 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -11,6 +11,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -20,6 +21,9 @@ type Couchbase struct { BucketStatsIncluded []string `toml:"bucket_stats_included"` bucketInclude filter.Filter + client *http.Client + + tls.ClientConfig } var sampleConfig = ` @@ -36,10 +40,17 @@ var sampleConfig = ` ## Filter bucket fields to include only here. # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification (defaults to false) + ## If set to false, tls_cert and tls_key are required + # insecure_skip_verify = false ` var regexpURI = regexp.MustCompile(`(\S+://)?(\S+\:\S+@)`) -var client = &http.Client{Timeout: 10 * time.Second} func (cb *Couchbase) SampleConfig() string { return sampleConfig @@ -369,7 +380,7 @@ func (cb *Couchbase) queryDetailedBucketStats(server, bucket string, bucketStats return err } - r, err := client.Do(req) + r, err := cb.client.Do(req) if err != nil { return err } @@ -387,6 +398,24 @@ func (cb *Couchbase) Init() error { cb.bucketInclude = f + tlsConfig, err := cb.TLSConfig() + if err != nil { + return err + } + + cb.client = &http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + MaxIdleConnsPerHost: couchbaseClient.MaxIdleConnsPerHost, + TLSClientConfig: tlsConfig, + }, + } + + couchbaseClient.SetSkipVerify(cb.ClientConfig.InsecureSkipVerify) + couchbaseClient.SetCertFile(cb.ClientConfig.TLSCert) + couchbaseClient.SetKeyFile(cb.ClientConfig.TLSKey) + couchbaseClient.SetRootFile(cb.ClientConfig.TLSCA) + return nil } diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go index a739732458a51..e6abc3ea74c01 100644 --- a/plugins/inputs/couchbase/couchbase_test.go +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -2,6 +2,7 @@ package couchbase import ( "encoding/json" + "github.com/influxdata/telegraf/plugins/common/tls" "net/http" "net/http/httptest" "testing" @@ -26,8 +27,12 @@ func TestGatherServer(t *testing.T) { } })) - var cb Couchbase - cb.BucketStatsIncluded = []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"} + cb := Couchbase{ + BucketStatsIncluded: []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"}, + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, + } err := cb.Init() require.NoError(t, err) @@ -105,6 +110,9 @@ func TestGatherDetailedBucketMetrics(t *testing.T) { var err error var cb Couchbase cb.BucketStatsIncluded = []string{"couch_total_disk_size"} + cb.ClientConfig = tls.ClientConfig{ + InsecureSkipVerify: true, + } err = cb.Init() require.NoError(t, err) var acc testutil.Accumulator From 5adbe4c6765f7457ed2615de92aa8cba9d7ae605 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Wed, 29 Sep 2021 14:40:23 -0700 Subject: [PATCH 644/761] docs: update readme title for amd_rocm_smi (#9826) (cherry picked from commit 11193a3b4cbffd4ccdb7eb5b2aa12e83be729c11) --- plugins/inputs/amd_rocm_smi/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/amd_rocm_smi/README.md b/plugins/inputs/amd_rocm_smi/README.md index 89a5b063065d7..ac080974dd274 100644 --- a/plugins/inputs/amd_rocm_smi/README.md +++ b/plugins/inputs/amd_rocm_smi/README.md @@ -1,11 +1,11 @@ -# ROCm System Management Interface (SMI) Input Plugin +# AMD ROCm System Management Interface (SMI) Input Plugin This plugin uses a query on the [`rocm-smi`](https://github.com/RadeonOpenCompute/rocm_smi_lib/tree/master/python_smi_tools) binary to pull GPU stats including memory and GPU usage, temperatures and other. ### Configuration ```toml -# Pulls statistics from nvidia GPUs attached to the host +# Pulls statistics from AMD GPUs attached to the host [[inputs.amd_rocm_smi]] ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath # bin_path = "/opt/rocm/bin/rocm-smi" From 82a7934752a607c7f8da0f9346b158497a7479a4 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 30 Sep 2021 10:28:48 -0600 Subject: [PATCH 645/761] fix: Revert "Reset the flush interval timer when flush is requested or batch is ready. (#8953)" (#9800) This reverts commit a6d2c4f254dbe9f7353961d892f8b91d907423ea. (cherry picked from commit 70afc94d121c4bb75ded3f8177859436355c4dfa) --- agent/agent.go | 12 ++++++++---- agent/tick.go | 19 +++++++------------ 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 78097bcd47731..7bd6b108df048 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -775,7 +775,7 @@ func (a *Agent) runOutputs( func (a *Agent) flushLoop( ctx context.Context, output *models.RunningOutput, - ticker *RollingTicker, + ticker Ticker, ) { logError := func(err error) { if err != nil { @@ -804,11 +804,15 @@ func (a *Agent) flushLoop( case <-ticker.Elapsed(): logError(a.flushOnce(output, ticker, output.Write)) case <-flushRequested: - ticker.Reset() logError(a.flushOnce(output, ticker, output.Write)) case <-output.BatchReady: - ticker.Reset() - logError(a.flushOnce(output, ticker, output.WriteBatch)) + // Favor the ticker over batch ready + select { + case <-ticker.Elapsed(): + logError(a.flushOnce(output, ticker, output.Write)) + default: + logError(a.flushOnce(output, ticker, output.WriteBatch)) + } } } } diff --git a/agent/tick.go b/agent/tick.go index 9696cd2c18c16..16233ba6d4adb 100644 --- a/agent/tick.go +++ b/agent/tick.go @@ -214,7 +214,6 @@ type RollingTicker struct { ch chan time.Time cancel context.CancelFunc wg sync.WaitGroup - timer *clock.Timer } func NewRollingTicker(interval, jitter time.Duration) *RollingTicker { @@ -231,12 +230,12 @@ func newRollingTicker(interval, jitter time.Duration, clock clock.Clock) *Rollin } d := t.next() - t.timer = clock.Timer(d) + timer := clock.Timer(d) t.wg.Add(1) go func() { defer t.wg.Done() - t.run(ctx) + t.run(ctx, timer) }() return t @@ -246,28 +245,24 @@ func (t *RollingTicker) next() time.Duration { return t.interval + internal.RandomDuration(t.jitter) } -func (t *RollingTicker) run(ctx context.Context) { +func (t *RollingTicker) run(ctx context.Context, timer *clock.Timer) { for { select { case <-ctx.Done(): - t.timer.Stop() + timer.Stop() return - case now := <-t.timer.C: + case now := <-timer.C: select { case t.ch <- now: default: } - t.Reset() + d := t.next() + timer.Reset(d) } } } -// Reset the ticker to the next interval + jitter. -func (t *RollingTicker) Reset() { - t.timer.Reset(t.next()) -} - func (t *RollingTicker) Elapsed() <-chan time.Time { return t.ch } From 7f0718d7a4e66c483b849869766c09e78c27d18d Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Fri, 1 Oct 2021 11:10:30 -0400 Subject: [PATCH 646/761] fix: add keep alive config option, add documentation around issue with eclipse/mosquitto version combined with this plugin, update test (#9803) (cherry picked from commit 3990ab5eb9047c99b03a40afd3f02a90e7aabdb2) --- plugins/outputs/mqtt/README.md | 7 +++++++ plugins/outputs/mqtt/mqtt.go | 21 +++++++++++++++++---- plugins/outputs/mqtt/mqtt_test.go | 1 + 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index abb770f068d4f..f82d7597c5bea 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -40,6 +40,12 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt ## When true, messages will have RETAIN flag set. # retain = false + ## Defines the maximum length of time that the broker and client may not communicate. + ## Defaults to 0 which turns the feature off. For version v2.0.12 mosquitto there is a + ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. + ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. + # keep_alive = 0 + ## Data format to output. # data_format = "influx" ``` @@ -62,3 +68,4 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt * `batch`: When true, metrics will be sent in one MQTT message per flush. Otherwise, metrics are written one metric per MQTT message. * `retain`: Set `retain` flag when publishing * `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) +* `keep_alive`: Defines the maximum length of time that the broker and client may not communicate with each other. Defaults to 0 which deactivates this feature. diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 584a79ffd2ef1..54203ee0dba66 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -16,6 +16,10 @@ import ( "github.com/influxdata/telegraf/plugins/serializers" ) +const ( + defaultKeepAlive = 0 +) + var sampleConfig = ` servers = ["localhost:1883"] # required. @@ -55,6 +59,12 @@ var sampleConfig = ` ## actually reads it # retain = false + ## Defines the maximum length of time that the broker and client may not communicate. + ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a + ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. + ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. + # keep_alive = 0 + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -72,8 +82,9 @@ type MQTT struct { QoS int `toml:"qos"` ClientID string `toml:"client_id"` tls.ClientConfig - BatchMessage bool `toml:"batch"` - Retain bool `toml:"retain"` + BatchMessage bool `toml:"batch"` + Retain bool `toml:"retain"` + KeepAlive int64 `toml:"keep_alive"` client paho.Client opts *paho.ClientOptions @@ -190,7 +201,7 @@ func (m *MQTT) publish(topic string, body []byte) error { func (m *MQTT) createOpts() (*paho.ClientOptions, error) { opts := paho.NewClientOptions() - opts.KeepAlive = 0 + opts.KeepAlive = m.KeepAlive if m.Timeout < config.Duration(time.Second) { m.Timeout = config.Duration(5 * time.Second) @@ -237,6 +248,8 @@ func (m *MQTT) createOpts() (*paho.ClientOptions, error) { func init() { outputs.Add("mqtt", func() telegraf.Output { - return &MQTT{} + return &MQTT{ + KeepAlive: defaultKeepAlive, + } }) } diff --git a/plugins/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go index 8affce1c93ddf..fd36d6d0577ac 100644 --- a/plugins/outputs/mqtt/mqtt_test.go +++ b/plugins/outputs/mqtt/mqtt_test.go @@ -19,6 +19,7 @@ func TestConnectAndWriteIntegration(t *testing.T) { m := &MQTT{ Servers: []string{url}, serializer: s, + KeepAlive: 30, } // Verify that we can connect to the MQTT broker From fe53f242653f437540c5ac651309a48430439580 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Fri, 1 Oct 2021 15:09:50 -0600 Subject: [PATCH 647/761] fix: gitignore should ignore .toml/.conf files (#9818) As the application requires a config.toml or config.conf file it makes sense to ignore these types of files rather than having them show up in git status output. While the files are technically in the toml format, we use the .conf extension in our documentation so ignore both. (cherry picked from commit 49e50863901354fbc8c66e8f07920beb88bbd2ac) --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 7c3fbd21c3535..614809d0681e1 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,5 @@ .DS_Store process.yml /.vscode +/*.toml +/*.conf From d0adff6737c6dcc47f5be6e05759343d9b1a92de Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Fri, 1 Oct 2021 15:10:25 -0600 Subject: [PATCH 648/761] fix: procstat missing tags in procstat_lookup metric (#9808) In #9488 the way that tags were built for procstat_lookup was changed and it was only including the pid_finder and result tags. This is not consistent with the documentation and is a regression from how they were previously constructed. Becuase of the large change to how procstat metrics are gathered, this will use one of the process metric's tags as a basis for the tags for procstat_lookup. Resolves: #9793 (cherry picked from commit ac40bdc52e8ced5afc9605cea33b4fe32d998797) --- plugins/inputs/procstat/procstat.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index ce29a08460cca..7b2ffba26b430 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -154,9 +154,10 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { } } + tags := make(map[string]string) p.procs = newProcs - for _, proc := range p.procs { + tags = proc.Tags() p.addMetric(proc, acc, now) } @@ -165,7 +166,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { "running": len(p.procs), "result_code": 0, } - tags := make(map[string]string) + tags["pid_finder"] = p.PidFinder tags["result"] = "success" acc.AddFields("procstat_lookup", fields, tags, now) From d2bc647e84c9c428ba9fa8c8f26fd0e3492a030e Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Mon, 4 Oct 2021 10:05:56 -0400 Subject: [PATCH 649/761] fix: update toml tag to match sample config / readme (#9848) (cherry picked from commit 021dedb792cf2791a21d3bb80024dd67db7b875c) --- plugins/outputs/loki/loki.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/loki/loki.go b/plugins/outputs/loki/loki.go index 2f920ec829e3b..07d4d473bf396 100644 --- a/plugins/outputs/loki/loki.go +++ b/plugins/outputs/loki/loki.go @@ -57,7 +57,7 @@ type Loki struct { Timeout config.Duration `toml:"timeout"` Username string `toml:"username"` Password string `toml:"password"` - Headers map[string]string `toml:"headers"` + Headers map[string]string `toml:"http_headers"` ClientID string `toml:"client_id"` ClientSecret string `toml:"client_secret"` TokenURL string `toml:"token_url"` From c95a1bc29caaa5d5bad0ea95889a8ce3727b2f20 Mon Sep 17 00:00:00 2001 From: "Guo Qiao (Joe)" Date: Tue, 5 Oct 2021 05:04:30 +1300 Subject: [PATCH 650/761] fix: logging in intel_rdt.go caused service stop timeout even as root (#9844) (#9850) (cherry picked from commit 6c1bdfad76d8833f538f346a95ca5a5af88e9db9) --- plugins/inputs/intel_rdt/intel_rdt.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index 89370062d730e..e0c7de526b067 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -278,12 +278,12 @@ func (r *IntelRDT) readData(ctx context.Context, args []string, processesPIDsAss }() err = cmd.Start() if err != nil { - r.errorChan <- fmt.Errorf("pqos: %v", err) + r.Log.Errorf("pqos: %v", err) return } err = cmd.Wait() if err != nil { - r.errorChan <- fmt.Errorf("pqos: %v", err) + r.Log.Errorf("pqos: %v", err) } } From d103beea90fc2f5ab99a7e7ec3c39a195b8120d9 Mon Sep 17 00:00:00 2001 From: Howard Yoo <32691630+howardyoo@users.noreply.github.com> Date: Mon, 4 Oct 2021 11:04:58 -0500 Subject: [PATCH 651/761] fix: mongodb input plugin issue #9845 (#9846) (cherry picked from commit c1f51b0645235e851f8c68e01b2e649dd7af5d22) --- plugins/inputs/mongodb/mongostat.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 41f735d389c7a..3871f6d252909 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -1086,8 +1086,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.Metrics.Repl.Network != nil { returnVal.ReplNetworkBytes = newStat.Metrics.Repl.Network.Bytes - returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num - returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + if newStat.Metrics.Repl.Network.GetMores != nil { + returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num + returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + } returnVal.ReplNetworkOps = newStat.Metrics.Repl.Network.Ops } } From d4e510e5298b0f48664509955f950f7277273aa1 Mon Sep 17 00:00:00 2001 From: Helen Weller <38860767+helenosheaa@users.noreply.github.com> Date: Mon, 4 Oct 2021 17:30:59 -0400 Subject: [PATCH 652/761] fix: remove eg fix: which breaks label bot functionality (#9859) (cherry picked from commit 68333d70f02d5ad89eac0dce290c1ad8b3917ffd) --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 1c717ddbb1a15..67b65a26247fb 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -5,7 +5,7 @@ show completion. --> - [ ] Updated associated README.md. - [ ] Wrote appropriate unit tests. -- [ ] Pull request title or commits are in [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) (e.g. feat: or fix:) +- [ ] Pull request title or commits are in [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) - -### Relevant telegraf.conf: - -```toml - -``` - -### System info: - - - -### Docker - - - -### Steps to reproduce: - - - -1. ... -2. ... - -### Expected behavior: - - - -### Actual behavior: - - - -### Additional info: - - From c2118a08e39395f27c752c0252855a18b77e5678 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Tue, 26 Oct 2021 11:03:41 -0600 Subject: [PATCH 716/761] fix: redacts IPMI password in logs (#9997) (cherry picked from commit 38aefd99b55450a6338c3e843487712110c2f3d2) --- plugins/inputs/ipmi_sensor/ipmi.go | 14 ++++++-- plugins/inputs/ipmi_sensor/ipmi_test.go | 48 +++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index c7f23dbc30e36..3f201708ff5c5 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -150,7 +150,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { cmd := execCommand(name, dumpOpts...) out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) if err != nil { - return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) + return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out)) } } opts = append(opts, "-S") @@ -169,7 +169,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) timestamp := time.Now() if err != nil { - return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) + return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out)) } if m.MetricVersion == 2 { return parseV2(acc, hostname, out, timestamp) @@ -314,6 +314,16 @@ func aToFloat(val string) (float64, error) { return f, nil } +func sanitizeIPMICmd(args []string) []string { + for i, v := range args { + if v == "-P" { + args[i+1] = "REDACTED" + } + } + + return args +} + func trim(s string) string { return strings.TrimSpace(s) } diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index cf53214dbbd66..6e731f6309894 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -756,3 +756,51 @@ func Test_parseV2(t *testing.T) { }) } } + +func TestSanitizeIPMICmd(t *testing.T) { + tests := []struct { + name string + args []string + expected []string + }{ + { + name: "default args", + args: []string{ + "-H", "localhost", + "-U", "username", + "-P", "password", + "-I", "lan", + }, + expected: []string{ + "-H", "localhost", + "-U", "username", + "-P", "REDACTED", + "-I", "lan", + }, + }, + { + name: "no password", + args: []string{ + "-H", "localhost", + "-U", "username", + "-I", "lan", + }, + expected: []string{ + "-H", "localhost", + "-U", "username", + "-I", "lan", + }, + }, + { + name: "empty args", + args: []string{}, + expected: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var sanitizedArgs []string = sanitizeIPMICmd(tt.args) + require.Equal(t, tt.expected, sanitizedArgs) + }) + } +} From bac958fe2f03e9746dc69d006494237398c13d21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Oct 2021 12:15:18 -0500 Subject: [PATCH 717/761] fix: bump github.com/aws/aws-sdk-go-v2/config from 1.8.2 to 1.8.3 (#9948) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> (cherry picked from commit 7bf8343c60e5ff4b9093be489a0ebd3a4ccd9c3d) --- go.mod | 16 ++++++++-------- go.sum | 24 ++++++++++++++++-------- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index 274c30fb651cc..a3041e576119d 100644 --- a/go.mod +++ b/go.mod @@ -47,25 +47,25 @@ require ( github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.3 // indirect - github.com/aws/aws-sdk-go-v2 v1.9.1 - github.com/aws/aws-sdk-go-v2/config v1.8.2 - github.com/aws/aws-sdk-go-v2/credentials v1.4.2 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1 + github.com/aws/aws-sdk-go-v2 v1.9.2 + github.com/aws/aws-sdk-go-v2/config v1.8.3 + github.com/aws/aws-sdk-go-v2/credentials v1.4.3 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4 // indirect github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 // indirect github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0 github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.4.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.7.1 + github.com/aws/aws-sdk-go-v2/service/sso v1.4.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.7.2 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 github.com/aws/smithy-go v1.8.0 github.com/benbjohnson/clock v1.1.0 diff --git a/go.sum b/go.sum index 07cd16c7cceb1..316ee7038388c 100644 --- a/go.sum +++ b/go.sum @@ -301,25 +301,29 @@ github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAP github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.9.1 h1:ZbovGV/qo40nrOJ4q8G33AGICzaPI45FHQWJ9650pF4= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2 v1.9.2 h1:dUFQcMNZMLON4BOe273pl0filK9RqyQMhCK/6xssL6s= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= -github.com/aws/aws-sdk-go-v2/config v1.8.2 h1:Dqy4ySXFmulRmZhfynm/5CD4Y6aXiTVhDtXLIuUe/r0= github.com/aws/aws-sdk-go-v2/config v1.8.2/go.mod h1:r0bkX9NyuCuf28qVcsEMtpAQibT7gA1Q0gzkjvgJdLU= +github.com/aws/aws-sdk-go-v2/config v1.8.3 h1:o5583X4qUfuRrOGOgmOcDgvr5gJVSu57NK08cWAhIDk= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= github.com/aws/aws-sdk-go-v2/credentials v1.3.3/go.mod h1:oVieKMT3m9BSfqhOfuQ+E0j/yN84ZAJ7Qv8Sfume/ak= -github.com/aws/aws-sdk-go-v2/credentials v1.4.2 h1:8kVE4Og6wlhVrMGiORQ3p9gRj2exjzhFRB+QzWBUa5Q= github.com/aws/aws-sdk-go-v2/credentials v1.4.2/go.mod h1:9Sp6u121/f0NnvHyhG7dgoYeUTEFC2vsvJqJ6wXpkaI= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3 h1:LTdD5QhK073MpElh9umLLP97wxphkgVC/OjQaEbBwZA= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 h1:8kvinmbIDObqsWegKP0JjeanYPiA4GUVpAtciNWE+jw= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1/go.mod h1:+GTydg3uHmVlQdkRoetz6VHKbOMEYof70m19IpMLifc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1 h1:Nm+BxqBtT0r+AnD6byGMCGT4Km0QwHBy8mAYptNPXY4= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1/go.mod h1:W1ldHfsgeGlKpJ4xZMKZUI6Wmp6EAstU7PxnhbXWWrI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0 h1:9tfxW/icbSu98C2pcNynm5jmDwU3/741F11688B6QnU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 h1:0O72494cCsazjpsGfo+LXezru6PMSp0HUB1m5UfpaRU= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3/go.mod h1:claNkz2j/N/AZceFcAbR0NyuWnrn+jCYpI+6Ozjsc0k= @@ -331,8 +335,9 @@ github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6 github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3 h1:NnXJXUz7oihrSlPKEM0yZ19b+7GQ47MX/LluLlEyE/Y= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3/go.mod h1:EES9ToeC3h063zCFDdqWGnARExNdULPaBvARm1FLwxA= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4 h1:leSJ6vCqtPpTmBIgE7044B1wql1E4n//McF+mEgNrYg= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 h1:vXZPcDQg7e5z2IKz0huei6zhfAxDoZdXej2o3jUbjCI= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0/go.mod h1:BlrFkwOhSgESkbdS+zJBy4+1mQ3f3Fq9Gp8nT+gaSwk= github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2 h1:B120/boLr82yRaQFEPn9u01OwWMnc+xGvz5SOHfBrHY= @@ -356,8 +361,9 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1/go.mod h1:PIS github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3/go.mod h1:7gcsONBmFoCcKrAqrm95trrMd2+C/ReYKP7Vfu8yHHA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1 h1:APEjhKZLFlNVLATnA/TJyA+w1r/xd5r5ACWBDZ9aIvc= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1/go.mod h1:Ve+eJOx9UWaT/lMVebnFhDhO49fSLVedHoA82+Rqme0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2 h1:r7jel2aa4d9Duys7wEmWqDd5ebpC9w6Kxu6wIjjp18E= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 h1:YEz2KMyqK2zyG3uOa0l2xBc/H6NUVJir8FhwHQHF3rc= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1/go.mod h1:yg4EN/BKoc7+DLhNOxxdvoO3+iyW2FuynvaKqLcLDUM= @@ -369,13 +375,15 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0/go.mod h1:Tk23mCmfL3wb3tNIeMk/0d github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.1 h1:RfgQyv3bFT2Js6XokcrNtTjQ6wAVBRpoCgTFsypihHA= github.com/aws/aws-sdk-go-v2/service/sso v1.4.1/go.mod h1:ycPdbJZlM0BLhuBnd80WX9PucWPG88qps/2jl9HugXs= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2 h1:pZwkxZbspdqRGzddDB92bkZBoB7lg85sMRE7OqdB3V0= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= github.com/aws/aws-sdk-go-v2/service/sts v1.6.2/go.mod h1:RBhoMJB8yFToaCnbe0jNq5Dcdy0jp6LhHqg55rjClkM= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.1 h1:7ce9ugapSgBapwLhg7AJTqKW5U92VRX3vX65k2tsB+g= github.com/aws/aws-sdk-go-v2/service/sts v1.7.1/go.mod h1:r1i8QwKPzwByXqZb3POQfBs7jozrdnHz8PVbsvyx73w= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2 h1:ol2Y5DWqnJeKqNd8th7JWzBtqu63xpOfs1Is+n1t8/4= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 h1:1s/RRA5Owuz4/G/eWCdCKgC+9zaz2vxFsRSwe7R3cPY= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 h1:1s/RRA5Owuz4/G/eWCdCKgC+9zaz2vxFsRSwe7R3cPY= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2/go.mod h1:XoDkdZ5pBf2za2GWbFHQ8Ps0K8fRbmbwrHh7PF5xnzQ= From 1e1c55870d03b5dcb37c4f4dde4f6f837cb60c6e Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 27 Oct 2021 07:11:43 -0600 Subject: [PATCH 718/761] fix: update readme to align with other docs (#10005) (cherry picked from commit 488568cafc6e74af82859b57e67c3767fed0cfa0) --- README.md | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index b76ad45c0d1a3..03d7428c12591 100644 --- a/README.md +++ b/README.md @@ -43,24 +43,29 @@ page or from each [GitHub Releases](https://github.com/influxdata/telegraf/relea InfluxData also provides a package repo that contains both DEB and RPM downloads. -For deb-based platforms run the following to add the repo key and setup a new -sources.list entry: +For deb-based platforms (e.g. Ubuntu and Debian) run the following to add the +repo key and setup a new sources.list entry: ```shell -curl -s https://repos.influxdata.com/influxdb.key | gpg --dearmor > /etc/apt/trusted.gpg.d/influxdb.gpg -export DISTRIB_ID=$(lsb_release -si); export DISTRIB_CODENAME=$(lsb_release -sc) -echo "deb [signed-by=/etc/apt/trusted.gpg.d/influxdb.gpg] https://repos.influxdata.com/${DISTRIB_ID,,} ${DISTRIB_CODENAME} stable" > /etc/apt/sources.list.d/influxdb.list +wget -qO- https://repos.influxdata.com/influxdb.key | sudo tee /etc/apt/trusted.gpg.d/influxdb.asc >/dev/null +source /etc/os-release +echo "deb https://repos.influxdata.com/${ID} ${VERSION_CODENAME} stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +sudo apt-get update && sudo apt-get install telegraf ``` -For RPM-based platforms use the following repo file in `/etc/yum.repos.d/`: +For RPM-based platforms (e.g. RHEL, CentOS) use the following to create a repo +file and install telegraf: -```text +```shell +cat < Date: Wed, 27 Oct 2021 10:49:20 -0500 Subject: [PATCH 719/761] fix: stop triggering share-artifacts on release/tags (#9996) (cherry picked from commit 0088be7da1c97b151534b873ad48867845b975a5) --- .circleci/config.yml | 3 +++ scripts/check-file-changes.sh | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 028198bbdb236..e5d535bf41115 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -535,6 +535,9 @@ workflows: branches: ignore: - master + - release.* + tags: + ignore: /.*/ - 'release': requires: - 'test-go-windows' diff --git a/scripts/check-file-changes.sh b/scripts/check-file-changes.sh index ad5848275216e..3ee954a35040f 100755 --- a/scripts/check-file-changes.sh +++ b/scripts/check-file-changes.sh @@ -1,11 +1,12 @@ #!/bin/bash +# CIRCLE-CI SCRIPT: This file is used exclusively for CI # To prevent the tests/builds to run for only a doc change, this script checks what files have changed in a pull request. exit 0 BRANCH="$(git rev-parse --abbrev-ref HEAD)" echo $BRANCH -if [[ "$BRANCH" != "master" ]] && [[ "$BRANCH" != release* ]]; then # This should never skip for master and release branches +if [[ ${CIRCLE_PULL_REQUEST##*/} != "" ]]; then # Only skip if their is an associated pull request with this job # Ask git for all the differences between this branch and master # Then use grep to look for changes in the .circleci/ directory, anything named *.go or *.mod or *.sum or *.sh or Makefile # If no match is found, then circleci step halt will stop the CI job but mark it successful From 5ce5e14f68921bdb49e83f7df5c1cf560d0d315b Mon Sep 17 00:00:00 2001 From: Josh Powers Date: Wed, 27 Oct 2021 13:07:26 -0600 Subject: [PATCH 720/761] update build version --- build_version.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_version.txt b/build_version.txt index 769e37e159d42..f5b00dc262bed 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.20.2 +1.20.3 From f7a4d20bbb7f7eaa6278bbcee5c62eeeb53d8cc7 Mon Sep 17 00:00:00 2001 From: Josh Powers Date: Wed, 27 Oct 2021 13:15:25 -0600 Subject: [PATCH 721/761] Update changelog --- etc/telegraf.conf | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index f1942e7e12712..f6ea72b183638 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -3824,6 +3824,15 @@ # # ## List of interfaces to ignore when pulling metrics. # # interface_exclude = ["eth1"] +# +# ## Some drivers declare statistics with extra whitespace, different spacing, +# ## and mix cases. This list, when enabled, can be used to clean the keys. +# ## Here are the current possible normalizations: +# ## * snakecase: converts fooBarBaz to foo_bar_baz +# ## * trim: removes leading and trailing whitespace +# ## * lower: changes all capitalized letters to lowercase +# ## * underscore: replaces spaces with underscores +# # normalize_keys = ["snakecase", "trim", "lower", "underscore"] # # Read metrics from one or more commands that can output to stdout @@ -7493,6 +7502,10 @@ # ## Mandatory if cores aren't set and forbidden if cores are specified. # ## e.g. ["qemu", "pmd"] # # processes = ["process"] +# +# ## Specify if the pqos process should be called with sudo. +# ## Mandatory if the telegraf process does not run as root. +# # use_sudo = false # # Read JTI OpenConfig Telemetry from listed sensors From 6c03be4e7382e3f8efdf1a5caccc5eb8698caa92 Mon Sep 17 00:00:00 2001 From: Josh Powers Date: Wed, 27 Oct 2021 13:50:47 -0600 Subject: [PATCH 722/761] Update changelog --- CHANGELOG.md | 48 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8760b914b7f95..d03253afbcff7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,43 @@ +## v1.20.3 [2021-10-27] + +#### Release Notes + + - [#9873](https://github.com/influxdata/telegraf/pull/9873) Update go to 1.17.2 + +#### Bugfixes + + - [#9948](https://github.com/influxdata/telegraf/pull/9948) Update github.com/aws/aws-sdk-go-v2/config module from 1.8.2 to 1.8.3 + - [#9997](https://github.com/influxdata/telegraf/pull/9997) `inputs.ipmi_sensor` Redact IPMI password in logs + - [#9978](https://github.com/influxdata/telegraf/pull/9978) `inputs.kube_inventory` Do not skip resources with zero s/ns timestamps + - [#9998](https://github.com/influxdata/telegraf/pull/9998) Update gjson module to v1.10.2 + - [#9973](https://github.com/influxdata/telegraf/pull/9973) `inputs.procstat` Revert and fix tag creation + - [#9943](https://github.com/influxdata/telegraf/pull/9943) `inputs.sqlserver` Add sqlserver plugin integration tests + - [#9647](https://github.com/influxdata/telegraf/pull/9647) `inputs.cloudwatch` Use the AWS SDK v2 library + - [#9954](https://github.com/influxdata/telegraf/pull/9954) `processors.starlark` Starlark pop operation for non-existing keys + - [#9956](https://github.com/influxdata/telegraf/pull/9956) `inputs.zfs` Check return code of zfs command for FreeBSD + - [#9585](https://github.com/influxdata/telegraf/pull/9585) `inputs.kube_inventory` Fix segfault in ingress, persistentvolumeclaim, statefulset in kube_inventory + - [#9901](https://github.com/influxdata/telegraf/pull/9901) `inputs.ethtool` Add normalization of tags for ethtool input plugin + - [#9957](https://github.com/influxdata/telegraf/pull/9957) `inputs.internet_speed` Resolve missing latency field + - [#9662](https://github.com/influxdata/telegraf/pull/9662) `inputs.prometheus` Decode Prometheus scrape path from Kuberentes labels + - [#9933](https://github.com/influxdata/telegraf/pull/9933) `inputs.procstat` Correct conversion of int with specific bit size + - [#9940](https://github.com/influxdata/telegraf/pull/9940) `inputs.webhooks` Provide more fields for papertrail event webhook + - [#9892](https://github.com/influxdata/telegraf/pull/9892) `inputs.mongodb` Solve compatibility issue for mongodb inputs when using 5.x relicaset + - [#9768](https://github.com/influxdata/telegraf/pull/9768) Update github.com/Azure/azure-kusto-go module from 0.3.2 to 0.4.0 + - [#9904](https://github.com/influxdata/telegraf/pull/9904) Update github.com/golang-jwt/jwt/v4 module from 4.0.0 to 4.1.0 + - [#9921](https://github.com/influxdata/telegraf/pull/9921) Update github.com/apache/thrift module from 0.14.2 to 0.15.0 + - [#9403](https://github.com/influxdata/telegraf/pull/9403) `inputs.mysql`Fix inconsistent metric types in mysql + - [#9905](https://github.com/influxdata/telegraf/pull/9905) Update github.com/docker/docker module from 20.10.7+incompatible to 20.10.9+incompatible + - [#9920](https://github.com/influxdata/telegraf/pull/9920) `inputs.prometheus` Move err check to correct place + - [#9869](https://github.com/influxdata/telegraf/pull/9869) Update github.com/prometheus/common module from 0.26.0 to 0.31.1 + - [#9866](https://github.com/influxdata/telegraf/pull/9866) Update snowflake database driver module to 1.6.2 + - [#9527](https://github.com/influxdata/telegraf/pull/9527) `inputs.intel_rdt` Allow sudo usage + - [#9893](https://github.com/influxdata/telegraf/pull/9893) Update github.com/jaegertracing/jaeger module from 1.15.1 to 1.26.0 + +#### New External Plugins + + - [IBM DB2](https://github.com/bonitoo-io/telegraf-input-db2) - contributed by @sranka + - [Oracle Database](https://github.com/bonitoo-io/telegraf-input-oracle) - contributed by @sranka + ## v1.20.2 [2021-10-07] #### Bugfixes @@ -134,7 +174,7 @@ #### Bugfixes - [#9363](https://github.com/influxdata/telegraf/pull/9363) `outputs.dynatrace` Update dynatrace output to allow optional default dimensions - - [#9526](https://github.com/influxdata/telegraf/pull/9526) `outputs.influxdb` Fix metrics reported as written but not actually written + - [#9526](https://github.com/influxdata/telegraf/pull/9526) `outputs.influxdb` Fix metrics reported as written but not actually written - [#9549](https://github.com/influxdata/telegraf/pull/9549) `inputs.kube_inventory` Prevent segfault in persistent volume claims - [#9503](https://github.com/influxdata/telegraf/pull/9503) `inputs.nsq_consumer` Fix connection error when not using server setting - [#9540](https://github.com/influxdata/telegraf/pull/9540) `inputs.sql` Fix handling bool column @@ -243,7 +283,7 @@ #### New Input Plugins -- [Alibaba CloudMonitor Service (Aliyun)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aliyuncms) - contributed by @i-prudnikov +- [Alibaba CloudMonitor Service (Aliyun)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aliyuncms) - contributed by @i-prudnikov - [OpenTelemetry](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry) - contributed by @jacobmarble - [Intel Data Plane Development Kit (DPDK)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dpdk) - contributed by @p-zak - [KNX](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/knx_listener) - contributed by @DocLambda @@ -397,7 +437,7 @@ #### New Parsers - [XML Parser Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/xml) - Contributed by @srebhan - + #### New Serializers - [MessagePack Serializer Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/serializers/msgpack) - Contributed by @dialogbox @@ -405,7 +445,7 @@ - [GeoIP Processor Plugin ](https://github.com/a-bali/telegraf-geoip) - Contributed by @a-bali - [Plex Webhook Input Plugin](https://github.com/russorat/telegraf-webhooks-plex) - Contributed by @russorat - [SMCIPMITool Input Plugin](https://github.com/jhpope/smc_ipmi) - Contributed by @jhpope - + ## v1.17.3 [2021-02-17] #### Bugfixes From f45647269c350f8d13c1e336dffc9fc300575c95 Mon Sep 17 00:00:00 2001 From: Josh Powers Date: Wed, 27 Oct 2021 14:01:28 -0600 Subject: [PATCH 723/761] go mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 316ee7038388c..bfc341b558dad 100644 --- a/go.sum +++ b/go.sum @@ -1994,8 +1994,6 @@ github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0 github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/testcontainers/testcontainers-go v0.11.1 h1:FiYsB83LSGbiawoV8TpAZGfcCUbtaeeg1SXqEKUxh08= github.com/testcontainers/testcontainers-go v0.11.1/go.mod h1:/V0UVq+1e7NWYoqTPog179clf0Qp9TOyp4EcXaEFQz8= -github.com/tidwall/gjson v1.8.0 h1:Qt+orfosKn0rbNTZqHYDqBrmm3UDA4KRkv70fDzG+PQ= -github.com/tidwall/gjson v1.8.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= github.com/tetafro/godot v1.4.4/go.mod h1:FVDd4JuKliW3UgjswZfJfHq4vAx0bD/Jd5brJjGeaz4= github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= From 7a4e60aa464f32e81014aaff991ca7f8ba2592f3 Mon Sep 17 00:00:00 2001 From: Josh Powers Date: Wed, 27 Oct 2021 14:04:45 -0600 Subject: [PATCH 724/761] Telegraf v1.20.3 From 0f6c059147c1c05599963558b7f409712b8142c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 26 Oct 2021 15:45:03 +0200 Subject: [PATCH 725/761] fix: Linter fixes for plugins/inputs/[h-j]* (#9986) (cherry picked from commit 77248978c748a43f55a25f7f722ca7338fb192e6) --- plugins/inputs/haproxy/haproxy_test.go | 5 +- plugins/inputs/hddtemp/hddtemp_test.go | 9 +-- plugins/inputs/http/http_test.go | 63 +++++++++--------- .../http_listener_v2/http_listener_v2_test.go | 4 +- plugins/inputs/icinga2/icinga2.go | 12 ++-- .../influxdb_listener_test.go | 5 +- .../influxdb_v2_listener_test.go | 4 +- plugins/inputs/interrupts/interrupts.go | 28 +++++--- plugins/inputs/ipmi_sensor/ipmi.go | 23 +++---- plugins/inputs/ipmi_sensor/ipmi_test.go | 65 +++++++++++++------ plugins/inputs/jenkins/jenkins_test.go | 8 +-- plugins/inputs/jolokia2/client.go | 4 +- plugins/inputs/jolokia2/gatherer.go | 2 +- plugins/inputs/jolokia2/jolokia_agent.go | 6 +- plugins/inputs/jolokia2/jolokia_proxy.go | 4 +- plugins/inputs/jolokia2/jolokia_test.go | 11 ++-- .../openconfig_telemetry.go | 13 ++-- 17 files changed, 155 insertions(+), 111 deletions(-) diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index c5c06e930c15c..21a1b09c10d02 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -12,8 +12,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) type statServer struct{} @@ -134,7 +135,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { } sockets[i] = sock - defer sock.Close() + defer sock.Close() //nolint:revive // done on purpose, closing will be executed properly s := statServer{} go s.serverSocket(sock) diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index 79fceb72e8129..769022049d17a 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -3,10 +3,11 @@ package hddtemp import ( "testing" - hddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" + "github.com/influxdata/telegraf/testutil" ) type mockFetcher struct { @@ -33,14 +34,14 @@ func newMockFetcher() *mockFetcher { } func TestFetch(t *testing.T) { - hddtemp := &HDDTemp{ + hddTemp := &HDDTemp{ fetcher: newMockFetcher(), Address: "localhost", Devices: []string{"*"}, } acc := &testutil.Accumulator{} - err := hddtemp.Gather(acc) + err := hddTemp.Gather(acc) require.NoError(t, err) assert.Equal(t, acc.NFields(), 2) diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index da9fed2251514..c485167205708 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -9,15 +9,16 @@ import ( "net/url" "testing" + "github.com/stretchr/testify/require" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" - oauth "github.com/influxdata/telegraf/plugins/common/oauth" - plugin "github.com/influxdata/telegraf/plugins/inputs/http" + "github.com/influxdata/telegraf/plugins/common/oauth" + httpplugin "github.com/influxdata/telegraf/plugins/inputs/http" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) -func TestHTTPwithJSONFormat(t *testing.T) { +func TestHTTPWithJSONFormat(t *testing.T) { fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { _, _ = w.Write([]byte(simpleJSON)) @@ -27,9 +28,9 @@ func TestHTTPwithJSONFormat(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, } metricName := "metricName" @@ -50,7 +51,7 @@ func TestHTTPwithJSONFormat(t *testing.T) { require.Equal(t, metric.Measurement, metricName) require.Len(t, acc.Metrics[0].Fields, 1) require.Equal(t, acc.Metrics[0].Fields["a"], 1.2) - require.Equal(t, acc.Metrics[0].Tags["url"], url) + require.Equal(t, acc.Metrics[0].Tags["url"], address) } func TestHTTPHeaders(t *testing.T) { @@ -69,9 +70,9 @@ func TestHTTPHeaders(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, Headers: map[string]string{header: headerValue}, } @@ -92,9 +93,9 @@ func TestInvalidStatusCode(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, } metricName := "metricName" @@ -115,9 +116,9 @@ func TestSuccessStatusCodes(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, SuccessStatusCodes: []int{200, 202}, } @@ -143,7 +144,7 @@ func TestMethod(t *testing.T) { })) defer fakeServer.Close() - plugin := &plugin.HTTP{ + plugin := &httpplugin.HTTP{ URLs: []string{fakeServer.URL}, Method: "POST", } @@ -169,18 +170,18 @@ func TestBodyAndContentEncoding(t *testing.T) { ts := httptest.NewServer(http.NotFoundHandler()) defer ts.Close() - url := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) + address := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) tests := []struct { name string - plugin *plugin.HTTP + plugin *httpplugin.HTTP queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) }{ { name: "no body", - plugin: &plugin.HTTP{ + plugin: &httpplugin.HTTP{ Method: "POST", - URLs: []string{url}, + URLs: []string{address}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) @@ -191,8 +192,8 @@ func TestBodyAndContentEncoding(t *testing.T) { }, { name: "post body", - plugin: &plugin.HTTP{ - URLs: []string{url}, + plugin: &httpplugin.HTTP{ + URLs: []string{address}, Method: "POST", Body: "test", }, @@ -205,8 +206,8 @@ func TestBodyAndContentEncoding(t *testing.T) { }, { name: "get method body is sent", - plugin: &plugin.HTTP{ - URLs: []string{url}, + plugin: &httpplugin.HTTP{ + URLs: []string{address}, Method: "GET", Body: "test", }, @@ -219,8 +220,8 @@ func TestBodyAndContentEncoding(t *testing.T) { }, { name: "gzip encoding", - plugin: &plugin.HTTP{ - URLs: []string{url}, + plugin: &httpplugin.HTTP{ + URLs: []string{address}, Method: "GET", Body: "test", ContentEncoding: "gzip", @@ -269,13 +270,13 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { tests := []struct { name string - plugin *plugin.HTTP + plugin *httpplugin.HTTP tokenHandler TestHandlerFunc handler TestHandlerFunc }{ { name: "no credentials", - plugin: &plugin.HTTP{ + plugin: &httpplugin.HTTP{ URLs: []string{u.String()}, }, handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { @@ -285,7 +286,7 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { }, { name: "success", - plugin: &plugin.HTTP{ + plugin: &httpplugin.HTTP{ URLs: []string{u.String() + "/write"}, HTTPClientConfig: httpconfig.HTTPClientConfig{ OAuth2Config: oauth.OAuth2Config{ diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index da70f443998e1..bf320d6f05174 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -14,10 +14,11 @@ import ( "time" "github.com/golang/snappy" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) const ( @@ -371,6 +372,7 @@ func TestWriteHTTPGzippedData(t *testing.T) { client := &http.Client{} resp, err := client.Do(req) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) hostTags := []string{"server02", "server03", diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go index 17069e169c81b..f56192a7a9282 100644 --- a/plugins/inputs/icinga2/icinga2.go +++ b/plugins/inputs/icinga2/icinga2.go @@ -82,7 +82,7 @@ func (i *Icinga2) SampleConfig() string { func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) { for _, check := range checks { - url, err := url.Parse(i.Server) + serverURL, err := url.Parse(i.Server) if err != nil { i.Log.Error(err.Error()) continue @@ -106,9 +106,9 @@ func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) { "check_command": check.Attrs.CheckCommand, "source": source, "state": levels[state], - "server": url.Hostname(), - "scheme": url.Scheme, - "port": url.Port(), + "server": serverURL.Hostname(), + "scheme": serverURL.Scheme, + "port": serverURL.Port(), } acc.AddFields(fmt.Sprintf("icinga2_%s", i.ObjectType), fields, tags) @@ -152,9 +152,9 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error { requestURL += "&attrs=host_name" } - url := fmt.Sprintf(requestURL, i.Server, i.ObjectType) + address := fmt.Sprintf(requestURL, i.Server, i.ObjectType) - req, err := http.NewRequest("GET", url, nil) + req, err := http.NewRequest("GET", address, nil) if err != nil { return err } diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go index 6b88907f95801..36952f6851064 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -13,10 +13,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) const ( @@ -416,6 +417,7 @@ func TestWriteGzippedData(t *testing.T) { client := &http.Client{} resp, err := client.Do(req) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) hostTags := []string{"server02", "server03", @@ -526,6 +528,7 @@ func TestQuery(t *testing.T) { resp, err := http.Post( createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 200, resp.StatusCode) } diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go index 055dfc395ba7b..4338f34f89567 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go @@ -15,9 +15,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) const ( @@ -374,6 +375,7 @@ func TestWriteGzippedData(t *testing.T) { client := &http.Client{} resp, err := client.Do(req) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) hostTags := []string{"server02", "server03", diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go index 6d68818a8f055..d9e9dd287361c 100644 --- a/plugins/inputs/interrupts/interrupts.go +++ b/plugins/inputs/interrupts/interrupts.go @@ -57,7 +57,7 @@ func parseInterrupts(r io.Reader) ([]IRQ, error) { if scanner.Scan() { cpus := strings.Fields(scanner.Text()) if cpus[0] != "CPU0" { - return nil, fmt.Errorf("Expected first line to start with CPU0, but was %s", scanner.Text()) + return nil, fmt.Errorf("expected first line to start with CPU0, but was %s", scanner.Text()) } cpucount = len(cpus) } @@ -93,7 +93,7 @@ scan: irqs = append(irqs, *irq) } if scanner.Err() != nil { - return nil, fmt.Errorf("Error scanning file: %s", scanner.Err()) + return nil, fmt.Errorf("error scanning file: %s", scanner.Err()) } return irqs, nil } @@ -110,15 +110,9 @@ func gatherTagsFields(irq IRQ) (map[string]string, map[string]interface{}) { func (s *Interrupts) Gather(acc telegraf.Accumulator) error { for measurement, file := range map[string]string{"interrupts": "/proc/interrupts", "soft_interrupts": "/proc/softirqs"} { - f, err := os.Open(file) + irqs, err := parseFile(file) if err != nil { - acc.AddError(fmt.Errorf("Could not open file: %s", file)) - continue - } - defer f.Close() - irqs, err := parseInterrupts(f) - if err != nil { - acc.AddError(fmt.Errorf("Parsing %s: %s", file, err)) + acc.AddError(err) continue } reportMetrics(measurement, irqs, acc, s.CPUAsTag) @@ -126,6 +120,20 @@ func (s *Interrupts) Gather(acc telegraf.Accumulator) error { return nil } +func parseFile(file string) ([]IRQ, error) { + f, err := os.Open(file) + if err != nil { + return nil, fmt.Errorf("could not open file: %s", file) + } + defer f.Close() + + irqs, err := parseInterrupts(f) + if err != nil { + return nil, fmt.Errorf("parsing %s: %s", file, err) + } + return irqs, nil +} + func reportMetrics(measurement string, irqs []IRQ, acc telegraf.Accumulator, cpusAsTags bool) { for _, irq := range irqs { tags, fields := gatherTagsFields(irq) diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index 3f201708ff5c5..801188130c960 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -4,7 +4,6 @@ import ( "bufio" "bytes" "fmt" - "log" "os" "os/exec" "path/filepath" @@ -39,6 +38,8 @@ type Ipmi struct { UseSudo bool UseCache bool CachePath string + + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -172,17 +173,17 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out)) } if m.MetricVersion == 2 { - return parseV2(acc, hostname, out, timestamp) + return m.parseV2(acc, hostname, out, timestamp) } - return parseV1(acc, hostname, out, timestamp) + return m.parseV1(acc, hostname, out, timestamp) } -func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { +func (m *Ipmi) parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { // each line will look something like // Planar VBAT | 3.05 Volts | ok scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) for scanner.Scan() { - ipmiFields := extractFieldsFromRegex(reV1ParseLine, scanner.Text()) + ipmiFields := m.extractFieldsFromRegex(reV1ParseLine, scanner.Text()) if len(ipmiFields) != 3 { continue } @@ -234,14 +235,14 @@ func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredA return scanner.Err() } -func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { +func (m *Ipmi) parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { // each line will look something like // CMOS Battery | 65h | ok | 7.1 | // Temp | 0Eh | ok | 3.1 | 55 degrees C // Drive 0 | A0h | ok | 7.1 | Drive Present scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) for scanner.Scan() { - ipmiFields := extractFieldsFromRegex(reV2ParseLine, scanner.Text()) + ipmiFields := m.extractFieldsFromRegex(reV2ParseLine, scanner.Text()) if len(ipmiFields) < 3 || len(ipmiFields) > 4 { continue } @@ -257,7 +258,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredA tags["entity_id"] = transform(ipmiFields["entity_id"]) tags["status_code"] = trim(ipmiFields["status_code"]) fields := make(map[string]interface{}) - descriptionResults := extractFieldsFromRegex(reV2ParseDescription, trim(ipmiFields["description"])) + descriptionResults := m.extractFieldsFromRegex(reV2ParseDescription, trim(ipmiFields["description"])) // This is an analog value with a unit if descriptionResults["analogValue"] != "" && len(descriptionResults["analogUnit"]) >= 1 { var err error @@ -266,7 +267,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredA continue } // Some implementations add an extra status to their analog units - unitResults := extractFieldsFromRegex(reV2ParseUnit, descriptionResults["analogUnit"]) + unitResults := m.extractFieldsFromRegex(reV2ParseUnit, descriptionResults["analogUnit"]) tags["unit"] = transform(unitResults["realAnalogUnit"]) if unitResults["statusDesc"] != "" { tags["status_desc"] = transform(unitResults["statusDesc"]) @@ -289,12 +290,12 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredA } // extractFieldsFromRegex consumes a regex with named capture groups and returns a kvp map of strings with the results -func extractFieldsFromRegex(re *regexp.Regexp, input string) map[string]string { +func (m *Ipmi) extractFieldsFromRegex(re *regexp.Regexp, input string) map[string]string { submatches := re.FindStringSubmatch(input) results := make(map[string]string) subexpNames := re.SubexpNames() if len(subexpNames) > len(submatches) { - log.Printf("D! No matches found in '%s'", input) + m.Log.Debugf("No matches found in '%s'", input) return results } for i, name := range subexpNames { diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index 6e731f6309894..504a7467f5130 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -7,10 +7,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestGather(t *testing.T) { @@ -20,6 +21,7 @@ func TestGather(t *testing.T) { Privilege: "USER", Timeout: config.Duration(time.Second * 5), HexKey: "1234567F", + Log: testutil.Logger{}, } // overwriting exec commands with mock commands @@ -44,7 +46,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(20), - "status": int(1), + "status": 1, }, map[string]string{ "name": "ambient_temp", @@ -55,7 +57,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(80), - "status": int(1), + "status": 1, }, map[string]string{ "name": "altitude", @@ -66,7 +68,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(210), - "status": int(1), + "status": 1, }, map[string]string{ "name": "avg_power", @@ -77,7 +79,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(4.9), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_5v", @@ -88,7 +90,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(3.05), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_vbat", @@ -99,7 +101,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(2610), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1a_tach", @@ -110,7 +112,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(1775), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1b_tach", @@ -127,6 +129,7 @@ func TestGather(t *testing.T) { i = &Ipmi{ Path: "ipmitool", Timeout: config.Duration(time.Second * 5), + Log: testutil.Logger{}, } err = acc.GatherError(i.Gather) @@ -139,7 +142,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(20), - "status": int(1), + "status": 1, }, map[string]string{ "name": "ambient_temp", @@ -149,7 +152,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(80), - "status": int(1), + "status": 1, }, map[string]string{ "name": "altitude", @@ -159,7 +162,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(210), - "status": int(1), + "status": 1, }, map[string]string{ "name": "avg_power", @@ -169,7 +172,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(4.9), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_5v", @@ -179,7 +182,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(3.05), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_vbat", @@ -189,7 +192,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(2610), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1a_tach", @@ -199,7 +202,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(1775), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1b_tach", @@ -371,7 +374,7 @@ OS RealTime Mod | 0x00 | ok // Previous arguments are tests stuff, that looks like : // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, args := args[3], args[4:] + cmd := args[3] // Ignore the returned errors for the mocked interface as tests will fail anyway if cmd == "ipmitool" { @@ -380,8 +383,10 @@ OS RealTime Mod | 0x00 | ok } else { //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) } + //nolint:revive // error code is important for this "test" os.Exit(0) } @@ -393,6 +398,7 @@ func TestGatherV2(t *testing.T) { Timeout: config.Duration(time.Second * 5), MetricVersion: 2, HexKey: "0000000F", + Log: testutil.Logger{}, } // overwriting exec commands with mock commands execCommand = fakeExecCommandV2 @@ -434,6 +440,7 @@ func TestGatherV2(t *testing.T) { Path: "ipmitool", Timeout: config.Duration(time.Second * 5), MetricVersion: 2, + Log: testutil.Logger{}, } err = acc.GatherError(i.Gather) @@ -568,7 +575,7 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected // Previous arguments are tests stuff, that looks like : // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, args := args[3], args[4:] + cmd := args[3] // Ignore the returned errors for the mocked interface as tests will fail anyway if cmd == "ipmitool" { @@ -577,8 +584,10 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected } else { //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) } + //nolint:revive // error code is important for this "test" os.Exit(0) } @@ -613,10 +622,14 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected v2Data, } + ipmi := &Ipmi{ + Log: testutil.Logger{}, + } + for i := range tests { t.Logf("Checking v%d data...", i+1) - extractFieldsFromRegex(reV1ParseLine, tests[i]) - extractFieldsFromRegex(reV2ParseLine, tests[i]) + ipmi.extractFieldsFromRegex(reV1ParseLine, tests[i]) + ipmi.extractFieldsFromRegex(reV2ParseLine, tests[i]) } } @@ -653,11 +666,16 @@ func Test_parseV1(t *testing.T) { wantErr: false, }, } + + ipmi := &Ipmi{ + Log: testutil.Logger{}, + } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - if err := parseV1(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { + if err := ipmi.parseV1(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { t.Errorf("parseV1() error = %v, wantErr %v", err, tt.wantErr) } @@ -746,10 +764,15 @@ func Test_parseV2(t *testing.T) { wantErr: false, }, } + + ipmi := &Ipmi{ + Log: testutil.Logger{}, + } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - if err := parseV2(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { + if err := ipmi.parseV2(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { t.Errorf("parseV2() error = %v, wantErr %v", err, tt.wantErr) } testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index 2b74d654a6d2d..e5f09ad66d1ca 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -44,13 +44,13 @@ func TestJobRequest(t *testing.T) { } for _, test := range tests { hierarchyName := test.input.hierarchyName() - URL := test.input.URL() + address := test.input.URL() if hierarchyName != test.hierarchyName { t.Errorf("Expected %s, got %s\n", test.hierarchyName, hierarchyName) } - if test.URL != "" && URL != test.URL { - t.Errorf("Expected %s, got %s\n", test.URL, URL) + if test.URL != "" && address != test.URL { + t.Errorf("Expected %s, got %s\n", test.URL, address) } } } @@ -429,7 +429,7 @@ func TestInitialize(t *testing.T) { } if test.output != nil { if test.input.client == nil { - t.Fatalf("%s: failed %s, jenkins instance shouldn't be nil", test.name, te.Error()) + t.Fatalf("%s: failed %v, jenkins instance shouldn't be nil", test.name, te) } if test.input.MaxConnections != test.output.MaxConnections { t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections) diff --git a/plugins/inputs/jolokia2/client.go b/plugins/inputs/jolokia2/client.go index 789450e3a1016..e3b42f660dff6 100644 --- a/plugins/inputs/jolokia2/client.go +++ b/plugins/inputs/jolokia2/client.go @@ -95,7 +95,7 @@ type jolokiaResponse struct { Status int `json:"status"` } -func NewClient(url string, config *ClientConfig) (*Client, error) { +func NewClient(address string, config *ClientConfig) (*Client, error) { tlsConfig, err := config.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -112,7 +112,7 @@ func NewClient(url string, config *ClientConfig) (*Client, error) { } return &Client{ - URL: url, + URL: address, config: config, client: client, }, nil diff --git a/plugins/inputs/jolokia2/gatherer.go b/plugins/inputs/jolokia2/gatherer.go index 99cd2f4b91a13..1dfdc057e832b 100644 --- a/plugins/inputs/jolokia2/gatherer.go +++ b/plugins/inputs/jolokia2/gatherer.go @@ -80,7 +80,7 @@ func (g *Gatherer) generatePoints(metric Metric, responses []ReadResponse) ([]po for _, response := range responses { switch response.Status { case 200: - break + // Correct response status - do nothing. case 404: continue default: diff --git a/plugins/inputs/jolokia2/jolokia_agent.go b/plugins/inputs/jolokia2/jolokia_agent.go index 5b2e3da37c16e..23336dd6f4351 100644 --- a/plugins/inputs/jolokia2/jolokia_agent.go +++ b/plugins/inputs/jolokia2/jolokia_agent.go @@ -68,7 +68,7 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error { for _, url := range ja.URLs { client, err := ja.createClient(url) if err != nil { - acc.AddError(fmt.Errorf("Unable to create client for %s: %v", url, err)) + acc.AddError(fmt.Errorf("unable to create client for %s: %v", url, err)) continue } ja.clients = append(ja.clients, client) @@ -97,8 +97,8 @@ func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error { func (ja *JolokiaAgent) createMetrics() []Metric { var metrics []Metric - for _, config := range ja.Metrics { - metrics = append(metrics, NewMetric(config, + for _, metricConfig := range ja.Metrics { + metrics = append(metrics, NewMetric(metricConfig, ja.DefaultFieldPrefix, ja.DefaultFieldSeparator, ja.DefaultTagPrefix)) } diff --git a/plugins/inputs/jolokia2/jolokia_proxy.go b/plugins/inputs/jolokia2/jolokia_proxy.go index 1f91e1cb911fe..8654c9308762c 100644 --- a/plugins/inputs/jolokia2/jolokia_proxy.go +++ b/plugins/inputs/jolokia2/jolokia_proxy.go @@ -93,8 +93,8 @@ func (jp *JolokiaProxy) Gather(acc telegraf.Accumulator) error { func (jp *JolokiaProxy) createMetrics() []Metric { var metrics []Metric - for _, config := range jp.Metrics { - metrics = append(metrics, NewMetric(config, + for _, metricConfig := range jp.Metrics { + metrics = append(metrics, NewMetric(metricConfig, jp.DefaultFieldPrefix, jp.DefaultFieldSeparator, jp.DefaultTagPrefix)) } diff --git a/plugins/inputs/jolokia2/jolokia_test.go b/plugins/inputs/jolokia2/jolokia_test.go index eddcebfce0892..01750bf002ff5 100644 --- a/plugins/inputs/jolokia2/jolokia_test.go +++ b/plugins/inputs/jolokia2/jolokia_test.go @@ -6,11 +6,12 @@ import ( "net/http/httptest" "testing" + "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/influxdata/toml/ast" - "github.com/stretchr/testify/assert" ) func TestJolokia2_ScalarValues(t *testing.T) { @@ -749,15 +750,15 @@ func TestJolokia2_ProxyTargets(t *testing.T) { } func TestFillFields(t *testing.T) { - complex := map[string]interface{}{"Value": []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} - scalar := []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + complexPoint := map[string]interface{}{"Value": []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + scalarPoint := []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} results := map[string]interface{}{} - newPointBuilder(Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").fillFields("", complex, results) + newPointBuilder(Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").fillFields("", complexPoint, results) assert.Equal(t, map[string]interface{}{}, results) results = map[string]interface{}{} - newPointBuilder(Metric{Name: "test", Mbean: "scalar"}, []string{"this", "that"}, "/").fillFields("", scalar, results) + newPointBuilder(Metric{Name: "test", Mbean: "scalar"}, []string{"this", "that"}, "/").fillFields("", scalarPoint, results) assert.Equal(t, map[string]interface{}{}, results) } diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go index 96dce5a88c7e7..b95930cd42f87 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go @@ -8,17 +8,18 @@ import ( "sync" "time" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/status" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" internaltls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/auth" "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/status" ) type OpenConfigTelemetry struct { @@ -42,7 +43,7 @@ type OpenConfigTelemetry struct { var ( // Regex to match and extract data points from path value in received key - keyPathRegex = regexp.MustCompile("\\/([^\\/]*)\\[([A-Za-z0-9\\-\\/]*\\=[^\\[]*)\\]") + keyPathRegex = regexp.MustCompile(`/([^/]*)\[([A-Za-z0-9\-/]*=[^\[]*)]`) sampleConfig = ` ## List of device addresses to collect telemetry from servers = ["localhost:1883"] From a58977e848d3bc289528709f9da702968a04a2dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 27 Oct 2021 17:48:57 +0200 Subject: [PATCH 726/761] fix: Linter fixes for plugins/inputs/[k-l]* (#9999) (cherry picked from commit eec6fd5702b59423c4eeb85ea10517cacea0f745) --- .../inputs/kafka_consumer/kafka_consumer.go | 11 +++--- .../kafka_consumer/kafka_consumer_test.go | 12 ++++--- .../kafka_consumer_legacy_integration_test.go | 14 ++++---- .../kinesis_consumer/kinesis_consumer_test.go | 14 ++++---- .../inputs/knx_listener/knx_listener_test.go | 36 +++++++++---------- plugins/inputs/kube_inventory/kube_state.go | 13 +++---- plugins/inputs/kube_inventory/node.go | 13 ++++--- plugins/inputs/kube_inventory/pod.go | 12 +++---- plugins/inputs/lanz/lanz.go | 2 ++ plugins/inputs/lanz/lanz_test.go | 3 +- plugins/inputs/logparser/logparser_test.go | 21 ++++++----- plugins/inputs/logstash/logstash.go | 22 ++++++------ plugins/inputs/lustre2/lustre2_test.go | 6 ++-- 13 files changed, 93 insertions(+), 86 deletions(-) diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index c6894fd74ae21..4462cd016766c 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -3,12 +3,12 @@ package kafka_consumer import ( "context" "fmt" - "log" "strings" "sync" "time" "github.com/Shopify/sarama" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/kafka" @@ -229,7 +229,7 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { go func() { defer k.wg.Done() for ctx.Err() == nil { - handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser) + handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser, k.Log) handler.MaxMessageLen = k.MaxMessageLen handler.TopicTag = k.TopicTag err := k.consumer.Consume(ctx, k.Topics, handler) @@ -273,12 +273,13 @@ type Message struct { session sarama.ConsumerGroupSession } -func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser) *ConsumerGroupHandler { +func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser, log telegraf.Logger) *ConsumerGroupHandler { handler := &ConsumerGroupHandler{ acc: acc.WithTracking(maxUndelivered), sem: make(chan empty, maxUndelivered), undelivered: make(map[telegraf.TrackingID]Message, maxUndelivered), parser: parser, + log: log, } return handler } @@ -296,6 +297,8 @@ type ConsumerGroupHandler struct { mu sync.Mutex undelivered map[telegraf.TrackingID]Message + + log telegraf.Logger } // Setup is called once when a new session is opened. It setups up the handler @@ -332,7 +335,7 @@ func (h *ConsumerGroupHandler) onDelivery(track telegraf.DeliveryInfo) { msg, ok := h.undelivered[track.ID()] if !ok { - log.Printf("E! [inputs.kafka_consumer] Could not mark message delivered: %d", track.ID()) + h.log.Errorf("Could not mark message delivered: %d", track.ID()) return } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index c73104278338e..68fd9e0627bed 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -6,12 +6,13 @@ import ( "time" "github.com/Shopify/sarama" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) type FakeConsumerGroup struct { @@ -259,7 +260,7 @@ func (c *FakeConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage { func TestConsumerGroupHandler_Lifecycle(t *testing.T) { acc := &testutil.Accumulator{} parser := value.NewValueParser("cpu", "int", "", nil) - cg := NewConsumerGroupHandler(acc, 1, parser) + cg := NewConsumerGroupHandler(acc, 1, parser, testutil.Logger{}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -274,11 +275,12 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) { require.NoError(t, err) cancel() - // This produces a flappy testcase probably due to a race between context cancelation and consumption. + // This produces a flappy testcase probably due to a race between context cancellation and consumption. // Furthermore, it is not clear what the outcome of this test should be... // err = cg.ConsumeClaim(session, &claim) //require.NoError(t, err) // So stick with the line below for now. + //nolint:errcheck cg.ConsumeClaim(session, &claim) err = cg.Cleanup(session) @@ -288,7 +290,7 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) { func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { acc := &testutil.Accumulator{} parser := value.NewValueParser("cpu", "int", "", nil) - cg := NewConsumerGroupHandler(acc, 1, parser) + cg := NewConsumerGroupHandler(acc, 1, parser, testutil.Logger{}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -402,7 +404,7 @@ func TestConsumerGroupHandler_Handle(t *testing.T) { t.Run(tt.name, func(t *testing.T) { acc := &testutil.Accumulator{} parser := value.NewValueParser("cpu", "int", "", nil) - cg := NewConsumerGroupHandler(acc, 1, parser) + cg := NewConsumerGroupHandler(acc, 1, parser, testutil.Logger{}) cg.MaxMessageLen = tt.maxMessageLen cg.TopicTag = tt.topicTag diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go index 976412a7196b5..473c5b9740847 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go @@ -6,11 +6,10 @@ import ( "time" "github.com/Shopify/sarama" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" ) func TestReadsMetricsFromKafka(t *testing.T) { @@ -51,7 +50,7 @@ func TestReadsMetricsFromKafka(t *testing.T) { var acc testutil.Accumulator // Sanity check - assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") + require.Equal(t, 0, len(acc.Metrics), "There should not be any points") if err := k.Start(&acc); err != nil { t.Fatal(err.Error()) } else { @@ -65,14 +64,14 @@ func TestReadsMetricsFromKafka(t *testing.T) { require.NoError(t, err) if len(acc.Metrics) == 1 { point := acc.Metrics[0] - assert.Equal(t, "cpu_load_short", point.Measurement) - assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) - assert.Equal(t, map[string]string{ + require.Equal(t, "cpu_load_short", point.Measurement) + require.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) + require.Equal(t, map[string]string{ "host": "server01", "direction": "in", "region": "us-west", }, point.Tags) - assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) + require.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) } else { t.Errorf("No points found in accumulator, expected 1") } @@ -84,6 +83,7 @@ func waitForPoint(acc *testutil.Accumulator, t *testing.T) { // Give the kafka container up to 2 seconds to get the point to the consumer ticker := time.NewTicker(5 * time.Millisecond) counter := 0 + //nolint:gosimple // for-select used on purpose for { select { case <-ticker.C: diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go index 6d52f07835e6b..1e0d935e03cc6 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go @@ -2,15 +2,17 @@ package kinesis_consumer import ( "encoding/base64" + "testing" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/kinesis/types" consumer "github.com/harlow/kinesis-consumer" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "testing" ) func TestKinesisConsumer_onMessage(t *testing.T) { @@ -177,7 +179,7 @@ func TestKinesisConsumer_onMessage(t *testing.T) { ContentEncoding: "notsupported", } err := k.Init() - assert.NotNil(t, err) + require.NotNil(t, err) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -187,18 +189,18 @@ func TestKinesisConsumer_onMessage(t *testing.T) { records: tt.fields.records, } err := k.Init() - assert.Nil(t, err) + require.Nil(t, err) acc := testutil.Accumulator{} if err := k.onMessage(acc.WithTracking(tt.expected.numberOfMetrics), tt.args.r); (err != nil) != tt.wantErr { t.Errorf("onMessage() error = %v, wantErr %v", err, tt.wantErr) } - assert.Equal(t, tt.expected.numberOfMetrics, len(acc.Metrics)) + require.Equal(t, tt.expected.numberOfMetrics, len(acc.Metrics)) for _, metric := range acc.Metrics { if logEventMessage, ok := metric.Fields["message"]; ok { - assert.Contains(t, logEventMessage.(string), tt.expected.messageContains) + require.Contains(t, logEventMessage.(string), tt.expected.messageContains) } else { t.Errorf("Expect logEvents to be present") } diff --git a/plugins/inputs/knx_listener/knx_listener_test.go b/plugins/inputs/knx_listener/knx_listener_test.go index b0502fbbc8e95..adb07eb6d0113 100644 --- a/plugins/inputs/knx_listener/knx_listener_test.go +++ b/plugins/inputs/knx_listener/knx_listener_test.go @@ -6,14 +6,12 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/vapourismo/knx-go/knx" "github.com/vapourismo/knx-go/knx/cemi" "github.com/vapourismo/knx-go/knx/dpt" + + "github.com/influxdata/telegraf/testutil" ) const epsilon = 1e-3 @@ -127,17 +125,17 @@ func TestRegularReceives_DPT(t *testing.T) { // Check if we got what we expected require.Len(t, acc.Metrics, len(testcases)) for i, m := range acc.Metrics { - assert.Equal(t, "test", m.Measurement) - assert.Equal(t, testcases[i].address, m.Tags["groupaddress"]) - assert.Len(t, m.Fields, 1) + require.Equal(t, "test", m.Measurement) + require.Equal(t, testcases[i].address, m.Tags["groupaddress"]) + require.Len(t, m.Fields, 1) switch v := testcases[i].value.(type) { case bool, int64, uint64: - assert.Equal(t, v, m.Fields["value"]) + require.Equal(t, v, m.Fields["value"]) case float64: - assert.InDelta(t, v, m.Fields["value"], epsilon) + require.InDelta(t, v, m.Fields["value"], epsilon) } - assert.True(t, !tstop.Before(m.Time)) - assert.True(t, !tstart.After(m.Time)) + require.True(t, !tstop.Before(m.Time)) + require.True(t, !tstart.After(m.Time)) } } @@ -178,13 +176,13 @@ func TestRegularReceives_MultipleMessages(t *testing.T) { // Check if we got what we expected require.Len(t, acc.Metrics, 2) - assert.Equal(t, "temperature", acc.Metrics[0].Measurement) - assert.Equal(t, "1/1/1", acc.Metrics[0].Tags["groupaddress"]) - assert.Len(t, acc.Metrics[0].Fields, 1) - assert.Equal(t, true, acc.Metrics[0].Fields["value"]) + require.Equal(t, "temperature", acc.Metrics[0].Measurement) + require.Equal(t, "1/1/1", acc.Metrics[0].Tags["groupaddress"]) + require.Len(t, acc.Metrics[0].Fields, 1) + require.Equal(t, true, acc.Metrics[0].Fields["value"]) - assert.Equal(t, "temperature", acc.Metrics[1].Measurement) - assert.Equal(t, "1/1/1", acc.Metrics[1].Tags["groupaddress"]) - assert.Len(t, acc.Metrics[1].Fields, 1) - assert.Equal(t, false, acc.Metrics[1].Fields["value"]) + require.Equal(t, "temperature", acc.Metrics[1].Measurement) + require.Equal(t, "1/1/1", acc.Metrics[1].Tags["groupaddress"]) + require.Len(t, acc.Metrics[1].Fields, 1) + require.Equal(t, false, acc.Metrics[1].Fields["value"]) } diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index 24db993dd39bb..94cb5faf9048b 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -3,7 +3,6 @@ package kube_inventory import ( "context" "fmt" - "log" "os" "strconv" "strings" @@ -37,6 +36,8 @@ type KubernetesInventory struct { SelectorInclude []string `toml:"selector_include"` SelectorExclude []string `toml:"selector_exclude"` + Log telegraf.Logger `toml:"-"` + tls.ClientConfig client *client @@ -169,15 +170,15 @@ func atoi(s string) int64 { return i } -func convertQuantity(s string, m float64) int64 { +func (ki *KubernetesInventory) convertQuantity(s string, m float64) int64 { q, err := resource.ParseQuantity(s) if err != nil { - log.Printf("D! [inputs.kube_inventory] failed to parse quantity: %s", err.Error()) + ki.Log.Debugf("failed to parse quantity: %s", err.Error()) return 0 } f, err := strconv.ParseFloat(fmt.Sprint(q.AsDec()), 64) if err != nil { - log.Printf("D! [inputs.kube_inventory] failed to parse float: %s", err.Error()) + ki.Log.Debugf("failed to parse float: %s", err.Error()) return 0 } if m < 1 { @@ -187,11 +188,11 @@ func convertQuantity(s string, m float64) int64 { } func (ki *KubernetesInventory) createSelectorFilters() error { - filter, err := filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude) + selectorFilter, err := filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude) if err != nil { return err } - ki.selectorFilter = filter + ki.selectorFilter = selectorFilter return nil } diff --git a/plugins/inputs/kube_inventory/node.go b/plugins/inputs/kube_inventory/node.go index 3c7c9cb38e160..b46b4e6209ffc 100644 --- a/plugins/inputs/kube_inventory/node.go +++ b/plugins/inputs/kube_inventory/node.go @@ -26,13 +26,12 @@ func (ki *KubernetesInventory) gatherNode(n corev1.Node, acc telegraf.Accumulato } for resourceName, val := range n.Status.Capacity { - switch resourceName { case "cpu": - fields["capacity_cpu_cores"] = convertQuantity(val.String(), 1) - fields["capacity_millicpu_cores"] = convertQuantity(val.String(), 1000) + fields["capacity_cpu_cores"] = ki.convertQuantity(val.String(), 1) + fields["capacity_millicpu_cores"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["capacity_memory_bytes"] = convertQuantity(val.String(), 1) + fields["capacity_memory_bytes"] = ki.convertQuantity(val.String(), 1) case "pods": fields["capacity_pods"] = atoi(val.String()) } @@ -41,10 +40,10 @@ func (ki *KubernetesInventory) gatherNode(n corev1.Node, acc telegraf.Accumulato for resourceName, val := range n.Status.Allocatable { switch resourceName { case "cpu": - fields["allocatable_cpu_cores"] = convertQuantity(val.String(), 1) - fields["allocatable_millicpu_cores"] = convertQuantity(val.String(), 1000) + fields["allocatable_cpu_cores"] = ki.convertQuantity(val.String(), 1) + fields["allocatable_millicpu_cores"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["allocatable_memory_bytes"] = convertQuantity(val.String(), 1) + fields["allocatable_memory_bytes"] = ki.convertQuantity(val.String(), 1) case "pods": fields["allocatable_pods"] = atoi(val.String()) } diff --git a/plugins/inputs/kube_inventory/pod.go b/plugins/inputs/kube_inventory/pod.go index ab4e5dd287cbe..ed95dd63d970d 100644 --- a/plugins/inputs/kube_inventory/pod.go +++ b/plugins/inputs/kube_inventory/pod.go @@ -35,11 +35,11 @@ func (ki *KubernetesInventory) gatherPod(p corev1.Pod, acc telegraf.Accumulator) if !ok { cs = &corev1.ContainerStatus{} } - gatherPodContainer(ki, p, *cs, c, acc) + ki.gatherPodContainer(p, *cs, c, acc) } } -func gatherPodContainer(ki *KubernetesInventory, p corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) { +func (ki *KubernetesInventory) gatherPodContainer(p corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) { stateCode := 3 stateReason := "" state := "unknown" @@ -103,17 +103,17 @@ func gatherPodContainer(ki *KubernetesInventory, p corev1.Pod, cs corev1.Contain for resourceName, val := range req { switch resourceName { case "cpu": - fields["resource_requests_millicpu_units"] = convertQuantity(val.String(), 1000) + fields["resource_requests_millicpu_units"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["resource_requests_memory_bytes"] = convertQuantity(val.String(), 1) + fields["resource_requests_memory_bytes"] = ki.convertQuantity(val.String(), 1) } } for resourceName, val := range lim { switch resourceName { case "cpu": - fields["resource_limits_millicpu_units"] = convertQuantity(val.String(), 1000) + fields["resource_limits_millicpu_units"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["resource_limits_memory_bytes"] = convertQuantity(val.String(), 1) + fields["resource_limits_memory_bytes"] = ki.convertQuantity(val.String(), 1) } } diff --git a/plugins/inputs/lanz/lanz.go b/plugins/inputs/lanz/lanz.go index 86bb93a8f754b..a77e99df61f6e 100644 --- a/plugins/inputs/lanz/lanz.go +++ b/plugins/inputs/lanz/lanz.go @@ -8,6 +8,7 @@ import ( "github.com/aristanetworks/goarista/lanz" pb "github.com/aristanetworks/goarista/lanz/proto" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -85,6 +86,7 @@ func (l *Lanz) Stop() { } func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceURL *url.URL) { + //nolint:gosimple // for-select used on purpose for { select { case msg, ok := <-in: diff --git a/plugins/inputs/lanz/lanz_test.go b/plugins/inputs/lanz/lanz_test.go index 684bfc8902bb8..f2a8b5815e36d 100644 --- a/plugins/inputs/lanz/lanz_test.go +++ b/plugins/inputs/lanz/lanz_test.go @@ -6,7 +6,8 @@ import ( "testing" pb "github.com/aristanetworks/goarista/lanz/proto" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" + "github.com/influxdata/telegraf/testutil" ) diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 3100c615cd4e4..a2f780afd21b9 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -25,7 +24,7 @@ func TestStartNoParsers(t *testing.T) { } acc := testutil.Accumulator{} - assert.Error(t, logparser.Start(&acc)) + require.Error(t, logparser.Start(&acc)) } func TestGrokParseLogFilesNonExistPattern(t *testing.T) { @@ -41,7 +40,7 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) { acc := testutil.Accumulator{} err := logparser.Start(&acc) - assert.Error(t, err) + require.Error(t, err) } func TestGrokParseLogFiles(t *testing.T) { @@ -112,7 +111,7 @@ func TestGrokParseLogFiles(t *testing.T) { func TestGrokParseLogFilesAppearLater(t *testing.T) { emptydir, err := os.MkdirTemp("", "TestGrokParseLogFilesAppearLater") defer os.RemoveAll(emptydir) - assert.NoError(t, err) + require.NoError(t, err) logparser := &LogParserPlugin{ Log: testutil.Logger{}, @@ -126,17 +125,17 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { } acc := testutil.Accumulator{} - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) input, err := os.ReadFile(filepath.Join(testdataDir, "test_a.log")) - assert.NoError(t, err) + require.NoError(t, err) err = os.WriteFile(filepath.Join(emptydir, "test_a.log"), input, 0644) - assert.NoError(t, err) + require.NoError(t, err) - assert.NoError(t, acc.GatherError(logparser.Gather)) + require.NoError(t, acc.GatherError(logparser.Gather)) acc.Wait(1) logparser.Stop() @@ -170,7 +169,7 @@ func TestGrokParseLogFilesOneBad(t *testing.T) { acc := testutil.Accumulator{} acc.SetDebug(true) - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) acc.Wait(1) logparser.Stop() @@ -202,7 +201,7 @@ func TestGrokParseLogFiles_TimestampInEpochMilli(t *testing.T) { acc := testutil.Accumulator{} acc.SetDebug(true) - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) acc.Wait(1) logparser.Stop() diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index 6fcaadabcd244..9f5a198587e4d 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -179,8 +179,8 @@ func (logstash *Logstash) createHTTPClient() (*http.Client, error) { } // gatherJSONData query the data source and parse the response JSON -func (logstash *Logstash) gatherJSONData(url string, value interface{}) error { - request, err := http.NewRequest("GET", url, nil) +func (logstash *Logstash) gatherJSONData(address string, value interface{}) error { + request, err := http.NewRequest("GET", address, nil) if err != nil { return err } @@ -206,7 +206,7 @@ func (logstash *Logstash) gatherJSONData(url string, value interface{}) error { if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) - return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + return fmt.Errorf("%s returned HTTP status %s: %q", address, response.Status, body) } err = json.NewDecoder(response.Body).Decode(value) @@ -218,10 +218,10 @@ func (logstash *Logstash) gatherJSONData(url string, value interface{}) error { } // gatherJVMStats gather the JVM metrics and add results to the accumulator -func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherJVMStats(address string, accumulator telegraf.Accumulator) error { jvmStats := &JVMStats{} - err := logstash.gatherJSONData(url, jvmStats) + err := logstash.gatherJSONData(address, jvmStats) if err != nil { return err } @@ -244,10 +244,10 @@ func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumu } // gatherJVMStats gather the Process metrics and add results to the accumulator -func (logstash *Logstash) gatherProcessStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherProcessStats(address string, accumulator telegraf.Accumulator) error { processStats := &ProcessStats{} - err := logstash.gatherJSONData(url, processStats) + err := logstash.gatherJSONData(address, processStats) if err != nil { return err } @@ -403,10 +403,10 @@ func (logstash *Logstash) gatherQueueStats( } // gatherJVMStats gather the Pipeline metrics and add results to the accumulator (for Logstash < 6) -func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegraf.Accumulator) error { pipelineStats := &PipelineStats{} - err := logstash.gatherJSONData(url, pipelineStats) + err := logstash.gatherJSONData(address, pipelineStats) if err != nil { return err } @@ -447,10 +447,10 @@ func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.A } // gatherJVMStats gather the Pipelines metrics and add results to the accumulator (for Logstash >= 6) -func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherPipelinesStats(address string, accumulator telegraf.Accumulator) error { pipelinesStats := &PipelinesStats{} - err := logstash.gatherJSONData(url, pipelinesStats) + err := logstash.gatherJSONData(address, pipelinesStats) if err != nil { return err } diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 7fd3fd91f469e..3c5659e18f14f 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -7,11 +7,11 @@ import ( "os" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/influxdata/toml/ast" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // Set config file variables to point to fake directory structure instead of /proc? @@ -358,7 +358,7 @@ func TestLustre2CanParseConfiguration(t *testing.T) { require.NoError(t, toml.UnmarshalTable(lustre2.([]*ast.Table)[0], &plugin)) - assert.Equal(t, Lustre2{ + require.Equal(t, Lustre2{ OstProcfiles: []string{ "/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats", From b6099b5fb39e3e0ab5a10f9cbe9bb28058d9cbcd Mon Sep 17 00:00:00 2001 From: reimda Date: Fri, 29 Oct 2021 13:43:38 -0600 Subject: [PATCH 727/761] chore: remove unused dockerfiles, add link in docs (#10013) (cherry picked from commit 0ebd2f388d7bf8f3fd6dad3fa76f4342bc0c8ea0) --- docs/DOCKER.md | 3 +++ scripts/alpine.docker | 18 ------------------ scripts/buster.docker | 15 --------------- scripts/docker-entrypoint.sh | 8 -------- scripts/stretch.docker | 15 --------------- 5 files changed, 3 insertions(+), 56 deletions(-) create mode 100644 docs/DOCKER.md delete mode 100644 scripts/alpine.docker delete mode 100644 scripts/buster.docker delete mode 100755 scripts/docker-entrypoint.sh delete mode 100644 scripts/stretch.docker diff --git a/docs/DOCKER.md b/docs/DOCKER.md new file mode 100644 index 0000000000000..5d0484e10be5a --- /dev/null +++ b/docs/DOCKER.md @@ -0,0 +1,3 @@ +# Telegraf Docker Images + +Docker images for Telegraf are kept in the [influxdata/influxdata-docker](https://github.com/influxdata/influxdata-docker/tree/master/telegraf) repo. diff --git a/scripts/alpine.docker b/scripts/alpine.docker deleted file mode 100644 index 84cfcac2268a0..0000000000000 --- a/scripts/alpine.docker +++ /dev/null @@ -1,18 +0,0 @@ -FROM golang:1.17.2 as builder -WORKDIR /go/src/github.com/influxdata/telegraf - -COPY . /go/src/github.com/influxdata/telegraf -RUN CGO_ENABLED=0 make go-install - -FROM alpine:3.12 -RUN echo 'hosts: files dns' >> /etc/nsswitch.conf -RUN apk add --no-cache iputils ca-certificates net-snmp-tools procps lm_sensors && \ - update-ca-certificates -COPY --from=builder /go/bin/* /usr/bin/ -COPY etc/telegraf.conf /etc/telegraf/telegraf.conf - -EXPOSE 8125/udp 8092/udp 8094 - -COPY scripts/docker-entrypoint.sh /entrypoint.sh -ENTRYPOINT ["/entrypoint.sh"] -CMD ["telegraf"] diff --git a/scripts/buster.docker b/scripts/buster.docker deleted file mode 100644 index 17b0cb581cc92..0000000000000 --- a/scripts/buster.docker +++ /dev/null @@ -1,15 +0,0 @@ -FROM golang:1.17.2-buster as builder -WORKDIR /go/src/github.com/influxdata/telegraf - -COPY . /go/src/github.com/influxdata/telegraf -RUN make go-install - -FROM buildpack-deps:buster-curl -COPY --from=builder /go/bin/* /usr/bin/ -COPY etc/telegraf.conf /etc/telegraf/telegraf.conf - -EXPOSE 8125/udp 8092/udp 8094 - -COPY scripts/docker-entrypoint.sh /entrypoint.sh -ENTRYPOINT ["/entrypoint.sh"] -CMD ["telegraf"] diff --git a/scripts/docker-entrypoint.sh b/scripts/docker-entrypoint.sh deleted file mode 100755 index 6e7580b21a92f..0000000000000 --- a/scripts/docker-entrypoint.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -e - -if [ "${1:0:1}" = '-' ]; then - set -- telegraf "$@" -fi - -exec "$@" diff --git a/scripts/stretch.docker b/scripts/stretch.docker deleted file mode 100644 index 39c6e6c1a49d3..0000000000000 --- a/scripts/stretch.docker +++ /dev/null @@ -1,15 +0,0 @@ -FROM golang:1.14.9-stretch as builder -WORKDIR /go/src/github.com/influxdata/telegraf - -COPY . /go/src/github.com/influxdata/telegraf -RUN make go-install - -FROM buildpack-deps:stretch-curl -COPY --from=builder /go/bin/* /usr/bin/ -COPY etc/telegraf.conf /etc/telegraf/telegraf.conf - -EXPOSE 8125/udp 8092/udp 8094 - -COPY scripts/docker-entrypoint.sh /entrypoint.sh -ENTRYPOINT ["/entrypoint.sh"] -CMD ["telegraf"] From c8bea6cba4e56b66b75881af1629aee46ef677e4 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Fri, 29 Oct 2021 15:46:09 -0600 Subject: [PATCH 728/761] fix: remove release.sh script (#10030) (cherry picked from commit 43017559fa77149a432f1e842ae9eab9db9b7441) --- scripts/release.sh | 214 --------------------------------------------- 1 file changed, 214 deletions(-) delete mode 100644 scripts/release.sh diff --git a/scripts/release.sh b/scripts/release.sh deleted file mode 100644 index 22cac0a09cf53..0000000000000 --- a/scripts/release.sh +++ /dev/null @@ -1,214 +0,0 @@ -#!/bin/sh -# -# usage: release.sh BUILD_NUM -# -# Requirements: -# - curl -# - jq -# - sha256sum -# - awscli -# - gpg -# -# CIRCLE_TOKEN set to a CircleCI API token that can list the artifacts. -# -# AWS cli setup to be able to write to the BUCKET. -# -# GPG setup with a signing key. - -BUILD_NUM="${1:?usage: release.sh BUILD_NUM}" -BUCKET="${2:-dl.influxdata.com/telegraf/releases}" - -: ${CIRCLE_TOKEN:?"Must set CIRCLE_TOKEN"} - -tmpdir="$(mktemp -d -t telegraf.XXXXXXXXXX)" - -on_exit() { - rm -rf "$tmpdir" -} -trap on_exit EXIT - -echo "${tmpdir}" -cd "${tmpdir}" || exit 1 - -curl -s -S -L -H Circle-Token:${CIRCLE_TOKEN} \ - "https://circleci.com/api/v2/project/gh/influxdata/telegraf/${BUILD_NUM}/artifacts" \ - -o artifacts || exit 1 - -cat artifacts | jq -r '.items[] | "\(.url) \(.path|ltrimstr("build/dist/"))"' > manifest - -while read url path; -do - echo $url - curl -s -S -L -o "$path" "$url" && - sha256sum "$path" > "$path.DIGESTS" && - gpg --armor --detach-sign "$path.DIGESTS" && - gpg --armor --detach-sign "$path" || exit 1 -done < manifest - -echo -cat *.DIGESTS -echo - -arch() { - case ${1} in - *i386.*) - echo i386;; - *armel.*) - echo armel;; - *armv6hl.*) - echo armv6hl;; - *armhf.*) - echo armhf;; - *arm64.* | *aarch64.*) - echo arm64;; - *amd64.* | *x86_64.*) - echo amd64;; - *s390x.*) - echo s390x;; - *ppc64le.*) - echo ppc64le;; - *mipsel.*) - echo mipsel;; - *mips.*) - echo mips;; - *) - echo unknown - esac -} - -platform() { - case ${1} in - *".rpm") - echo Centos;; - *".deb") - echo Debian;; - *"linux"*) - echo Linux;; - *"freebsd"*) - echo FreeBSD;; - *"darwin"*) - echo Mac OS X;; - *"windows"*) - echo Windows;; - *) - echo unknown;; - esac -} - -echo "Arch | Platform | Package | SHA256" -echo "---| --- | --- | ---" -while read url path; -do - echo "$(arch ${path}) | $(platform ${path}) | [\`${path}\`](https://dl.influxdata.com/telegraf/releases/${path}) | \`$(sha256sum ${path} | cut -f1 -d' ')\`" -done < manifest -echo "" - -package="$(grep *_darwin_amd64.dmg manifest | cut -f2 -d' ')" -cat -< Date: Mon, 1 Nov 2021 19:53:23 +0100 Subject: [PATCH 729/761] fix: correct timezone in intel rdt plugin (#10026) (cherry picked from commit 317dd38af32e410d559207a36458695156f0883b) --- plugins/inputs/intel_rdt/intel_rdt.go | 38 +++-- plugins/inputs/intel_rdt/intel_rdt_test.go | 17 +- plugins/inputs/intel_rdt/publisher.go | 100 +++++------ plugins/inputs/intel_rdt/publisher_test.go | 182 ++++++++++----------- 4 files changed, 170 insertions(+), 167 deletions(-) diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index 486a13c98c535..d354bb855aacf 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -66,6 +66,12 @@ type processMeasurement struct { measurement string } +type splitCSVLine struct { + timeValue string + metricsValues []string + coreOrPIDsValues []string +} + // All gathering is done in the Start function func (r *IntelRDT) Gather(_ telegraf.Accumulator) error { return nil @@ -230,8 +236,8 @@ func (r *IntelRDT) associateProcessesWithPIDs(providedProcesses []string) (map[s } for _, availableProcess := range availableProcesses { if choice.Contains(availableProcess.Name, providedProcesses) { - PID := availableProcess.PID - mapProcessPIDs[availableProcess.Name] = mapProcessPIDs[availableProcess.Name] + fmt.Sprintf("%d", PID) + "," + pid := availableProcess.PID + mapProcessPIDs[availableProcess.Name] = mapProcessPIDs[availableProcess.Name] + fmt.Sprintf("%d", pid) + "," } } for key := range mapProcessPIDs { @@ -258,7 +264,7 @@ func (r *IntelRDT) readData(ctx context.Context, args []string, processesPIDsAss r.wg.Add(1) defer r.wg.Done() - cmd := exec.Command(r.PqosPath, append(args)...) + cmd := exec.Command(r.PqosPath, args...) if r.UseSudo { // run pqos with `/bin/sh -c "sudo /path/to/pqos ..."` @@ -327,13 +333,13 @@ func (r *IntelRDT) processOutput(cmdReader io.ReadCloser, processesPIDsAssociati if len(r.Processes) != 0 { newMetric := processMeasurement{} - PIDs, err := findPIDsInMeasurement(out) + pids, err := findPIDsInMeasurement(out) if err != nil { r.errorChan <- err break } for processName, PIDsProcess := range processesPIDsAssociation { - if PIDs == PIDsProcess { + if pids == PIDsProcess { newMetric.name = processName newMetric.measurement = out } @@ -482,29 +488,29 @@ func validateAndParseCores(coreStr string) ([]int, error) { func findPIDsInMeasurement(measurements string) (string, error) { // to distinguish PIDs from Cores (PIDs should be in quotes) var insideQuoteRegex = regexp.MustCompile(`"(.*?)"`) - PIDsMatch := insideQuoteRegex.FindStringSubmatch(measurements) - if len(PIDsMatch) < 2 { + pidsMatch := insideQuoteRegex.FindStringSubmatch(measurements) + if len(pidsMatch) < 2 { return "", fmt.Errorf("cannot find PIDs in measurement line") } - PIDs := PIDsMatch[1] - return PIDs, nil + pids := pidsMatch[1] + return pids, nil } -func splitCSVLineIntoValues(line string) (timeValue string, metricsValues, coreOrPIDsValues []string, err error) { +func splitCSVLineIntoValues(line string) (splitCSVLine, error) { values, err := splitMeasurementLine(line) if err != nil { - return "", nil, nil, err + return splitCSVLine{}, err } - timeValue = values[0] + timeValue := values[0] // Because pqos csv format is broken when many cores are involved in PID or // group of PIDs, there is need to work around it. E.g.: // Time,PID,Core,IPC,LLC Misses,LLC[KB],MBL[MB/s],MBR[MB/s],MBT[MB/s] // 2020-08-12 13:34:36,"45417,29170,",37,44,0.00,0,0.0,0.0,0.0,0.0 - metricsValues = values[len(values)-numberOfMetrics:] - coreOrPIDsValues = values[1 : len(values)-numberOfMetrics] + metricsValues := values[len(values)-numberOfMetrics:] + coreOrPIDsValues := values[1 : len(values)-numberOfMetrics] - return timeValue, metricsValues, coreOrPIDsValues, nil + return splitCSVLine{timeValue, metricsValues, coreOrPIDsValues}, nil } func validateInterval(interval int32) error { @@ -523,7 +529,7 @@ func splitMeasurementLine(line string) ([]string, error) { } func parseTime(value string) (time.Time, error) { - timestamp, err := time.Parse(timestampFormat, value) + timestamp, err := time.ParseInLocation(timestampFormat, value, time.Local) if err != nil { return time.Time{}, err } diff --git a/plugins/inputs/intel_rdt/intel_rdt_test.go b/plugins/inputs/intel_rdt/intel_rdt_test.go index 1eecbc5018125..18dd2e93aa1c1 100644 --- a/plugins/inputs/intel_rdt/intel_rdt_test.go +++ b/plugins/inputs/intel_rdt/intel_rdt_test.go @@ -52,18 +52,18 @@ func TestSplitCSVLineIntoValues(t *testing.T) { expectedMetricsValue := []string{"0.00", "0", "0.0", "0.0", "0.0", "0.0"} expectedCoreOrPidsValue := []string{"\"45417", "29170\"", "37", "44"} - timeValue, metricsValue, coreOrPidsValue, err := splitCSVLineIntoValues(line) + splitCSV, err := splitCSVLineIntoValues(line) assert.Nil(t, err) - assert.Equal(t, expectedTimeValue, timeValue) - assert.Equal(t, expectedMetricsValue, metricsValue) - assert.Equal(t, expectedCoreOrPidsValue, coreOrPidsValue) + assert.Equal(t, expectedTimeValue, splitCSV.timeValue) + assert.Equal(t, expectedMetricsValue, splitCSV.metricsValues) + assert.Equal(t, expectedCoreOrPidsValue, splitCSV.coreOrPIDsValues) wrongLine := "2020-08-12 13:34:36,37,44,0.00,0,0.0" - timeValue, metricsValue, coreOrPidsValue, err = splitCSVLineIntoValues(wrongLine) + splitCSV, err = splitCSVLineIntoValues(wrongLine) assert.NotNil(t, err) - assert.Equal(t, "", timeValue) - assert.Nil(t, nil, metricsValue) - assert.Nil(t, nil, coreOrPidsValue) + assert.Equal(t, "", splitCSV.timeValue) + assert.Nil(t, nil, splitCSV.metricsValues) + assert.Nil(t, nil, splitCSV.coreOrPIDsValues) } func TestFindPIDsInMeasurement(t *testing.T) { @@ -107,7 +107,6 @@ func TestCreateArgsCores(t *testing.T) { assert.EqualValues(t, expected, result) cores = []string{"1,2,3", "4,5,6"} - expected = "--mon-core=" expectedPrefix := "--mon-core=" expectedSubstring := "all:[1,2,3];mbt:[1,2,3];" expectedSubstring2 := "all:[4,5,6];mbt:[4,5,6];" diff --git a/plugins/inputs/intel_rdt/publisher.go b/plugins/inputs/intel_rdt/publisher.go index a567e1aacb1fa..4fdb91dc7b128 100644 --- a/plugins/inputs/intel_rdt/publisher.go +++ b/plugins/inputs/intel_rdt/publisher.go @@ -5,12 +5,26 @@ package intel_rdt import ( "context" + "errors" "strings" "time" "github.com/influxdata/telegraf" ) +type parsedCoresMeasurement struct { + cores string + values []float64 + time time.Time +} + +type parsedProcessMeasurement struct { + process string + cores string + values []float64 + time time.Time +} + // Publisher for publish new RDT metrics to telegraf accumulator type Publisher struct { acc telegraf.Accumulator @@ -50,48 +64,48 @@ func (p *Publisher) publish(ctx context.Context) { } func (p *Publisher) publishCores(measurement string) { - coresString, values, timestamp, err := parseCoresMeasurement(measurement) + parsedCoresMeasurement, err := parseCoresMeasurement(measurement) if err != nil { p.errChan <- err } - p.addToAccumulatorCores(coresString, values, timestamp) + p.addToAccumulatorCores(parsedCoresMeasurement) } func (p *Publisher) publishProcess(measurement processMeasurement) { - process, coresString, values, timestamp, err := parseProcessesMeasurement(measurement) + parsedProcessMeasurement, err := parseProcessesMeasurement(measurement) if err != nil { p.errChan <- err } - p.addToAccumulatorProcesses(process, coresString, values, timestamp) + p.addToAccumulatorProcesses(parsedProcessMeasurement) } -func parseCoresMeasurement(measurements string) (string, []float64, time.Time, error) { +func parseCoresMeasurement(measurements string) (parsedCoresMeasurement, error) { var values []float64 - timeValue, metricsValues, cores, err := splitCSVLineIntoValues(measurements) + splitCSV, err := splitCSVLineIntoValues(measurements) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } - timestamp, err := parseTime(timeValue) + timestamp, err := parseTime(splitCSV.timeValue) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } // change string slice to one string and separate it by coma - coresString := strings.Join(cores, ",") + coresString := strings.Join(splitCSV.coreOrPIDsValues, ",") // trim unwanted quotes coresString = strings.Trim(coresString, "\"") - for _, metric := range metricsValues { + for _, metric := range splitCSV.metricsValues { parsedValue, err := parseFloat(metric) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } values = append(values, parsedValue) } - return coresString, values, timestamp, nil + return parsedCoresMeasurement{coresString, values, timestamp}, nil } -func (p *Publisher) addToAccumulatorCores(cores string, metricsValues []float64, timestamp time.Time) { - for i, value := range metricsValues { +func (p *Publisher) addToAccumulatorCores(measurement parsedCoresMeasurement) { + for i, value := range measurement.values { if p.shortenedMetrics { //0: "IPC" //1: "LLC_Misses" @@ -102,41 +116,47 @@ func (p *Publisher) addToAccumulatorCores(cores string, metricsValues []float64, tags := map[string]string{} fields := make(map[string]interface{}) - tags["cores"] = cores + tags["cores"] = measurement.cores tags["name"] = pqosMetricOrder[i] fields["value"] = value - p.acc.AddFields("rdt_metric", fields, tags, timestamp) + p.acc.AddFields("rdt_metric", fields, tags, measurement.time) } } -func parseProcessesMeasurement(measurement processMeasurement) (string, string, []float64, time.Time, error) { - var values []float64 - timeValue, metricsValues, coreOrPidsValues, pids, err := parseProcessMeasurement(measurement.measurement) +func parseProcessesMeasurement(measurement processMeasurement) (parsedProcessMeasurement, error) { + splitCSV, err := splitCSVLineIntoValues(measurement.measurement) + if err != nil { + return parsedProcessMeasurement{}, err + } + pids, err := findPIDsInMeasurement(measurement.measurement) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err + } + lenOfPIDs := len(strings.Split(pids, ",")) + if lenOfPIDs > len(splitCSV.coreOrPIDsValues) { + return parsedProcessMeasurement{}, errors.New("detected more pids (quoted) than actual number of pids in csv line") } - timestamp, err := parseTime(timeValue) + timestamp, err := parseTime(splitCSV.timeValue) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err } actualProcess := measurement.name - lenOfPids := len(strings.Split(pids, ",")) - cores := coreOrPidsValues[lenOfPids:] - coresString := strings.Trim(strings.Join(cores, ","), `"`) + cores := strings.Trim(strings.Join(splitCSV.coreOrPIDsValues[lenOfPIDs:], ","), `"`) - for _, metric := range metricsValues { + var values []float64 + for _, metric := range splitCSV.metricsValues { parsedValue, err := parseFloat(metric) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err } values = append(values, parsedValue) } - return actualProcess, coresString, values, timestamp, nil + return parsedProcessMeasurement{actualProcess, cores, values, timestamp}, nil } -func (p *Publisher) addToAccumulatorProcesses(process string, cores string, metricsValues []float64, timestamp time.Time) { - for i, value := range metricsValues { +func (p *Publisher) addToAccumulatorProcesses(measurement parsedProcessMeasurement) { + for i, value := range measurement.values { if p.shortenedMetrics { //0: "IPC" //1: "LLC_Misses" @@ -147,23 +167,11 @@ func (p *Publisher) addToAccumulatorProcesses(process string, cores string, metr tags := map[string]string{} fields := make(map[string]interface{}) - tags["process"] = process - tags["cores"] = cores + tags["process"] = measurement.process + tags["cores"] = measurement.cores tags["name"] = pqosMetricOrder[i] fields["value"] = value - p.acc.AddFields("rdt_metric", fields, tags, timestamp) - } -} - -func parseProcessMeasurement(measurements string) (string, []string, []string, string, error) { - timeValue, metricsValues, coreOrPidsValues, err := splitCSVLineIntoValues(measurements) - if err != nil { - return "", nil, nil, "", err - } - pids, err := findPIDsInMeasurement(measurements) - if err != nil { - return "", nil, nil, "", err + p.acc.AddFields("rdt_metric", fields, tags, measurement.time) } - return timeValue, metricsValues, coreOrPidsValues, pids, nil } diff --git a/plugins/inputs/intel_rdt/publisher_test.go b/plugins/inputs/intel_rdt/publisher_test.go index 7db71e9ac5afa..2529a2235a1b9 100644 --- a/plugins/inputs/intel_rdt/publisher_test.go +++ b/plugins/inputs/intel_rdt/publisher_test.go @@ -37,29 +37,29 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBT"]) expectedCores := "37,44" - expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.Nil(t, err) - assert.Equal(t, expectedCores, resultCoresString) - assert.Equal(t, expectedTimestamp, resultTimestamp) - assert.Equal(t, resultValues[0], metricsValues["IPC"]) - assert.Equal(t, resultValues[1], metricsValues["LLC_Misses"]) - assert.Equal(t, resultValues[2], metricsValues["LLC"]) - assert.Equal(t, resultValues[3], metricsValues["MBL"]) - assert.Equal(t, resultValues[4], metricsValues["MBR"]) - assert.Equal(t, resultValues[5], metricsValues["MBT"]) + assert.Equal(t, expectedCores, result.cores) + assert.Equal(t, expectedTimestamp, result.time) + assert.Equal(t, result.values[0], metricsValues["IPC"]) + assert.Equal(t, result.values[1], metricsValues["LLC_Misses"]) + assert.Equal(t, result.values[2], metricsValues["LLC"]) + assert.Equal(t, result.values[3], metricsValues["MBL"]) + assert.Equal(t, result.values[4], metricsValues["MBR"]) + assert.Equal(t, result.values[5], metricsValues["MBT"]) }) t.Run("not valid measurement string", func(t *testing.T) { measurement := "not, valid, measurement" - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) t.Run("not valid values string", func(t *testing.T) { measurement := fmt.Sprintf("%s,%s,%s,%s,%f,%f,%f,%f", @@ -72,12 +72,12 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBR"], metricsValues["MBT"]) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) t.Run("not valid timestamp format", func(t *testing.T) { invalidTimestamp := "2020-08-12-21 13:34:" @@ -91,12 +91,12 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBR"], metricsValues["MBT"]) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) } @@ -119,44 +119,36 @@ func TestParseProcessesMeasurement(t *testing.T) { metricsValues["MBT"]) expectedCores := "37,44" - expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) newMeasurement := processMeasurement{ name: processName, measurement: measurement, } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) + result, err := parseProcessesMeasurement(newMeasurement) assert.Nil(t, err) - assert.Equal(t, processName, actualProcess) - assert.Equal(t, expectedCores, resultCoresString) - assert.Equal(t, expectedTimestamp, resultTimestamp) - assert.Equal(t, resultValues[0], metricsValues["IPC"]) - assert.Equal(t, resultValues[1], metricsValues["LLC_Misses"]) - assert.Equal(t, resultValues[2], metricsValues["LLC"]) - assert.Equal(t, resultValues[3], metricsValues["MBL"]) - assert.Equal(t, resultValues[4], metricsValues["MBR"]) - assert.Equal(t, resultValues[5], metricsValues["MBT"]) + assert.Equal(t, processName, result.process) + assert.Equal(t, expectedCores, result.cores) + assert.Equal(t, expectedTimestamp, result.time) + assert.Equal(t, result.values[0], metricsValues["IPC"]) + assert.Equal(t, result.values[1], metricsValues["LLC_Misses"]) + assert.Equal(t, result.values[2], metricsValues["LLC"]) + assert.Equal(t, result.values[3], metricsValues["MBL"]) + assert.Equal(t, result.values[4], metricsValues["MBR"]) + assert.Equal(t, result.values[5], metricsValues["MBT"]) }) - t.Run("not valid measurement string", func(t *testing.T) { - processName := "process_name" - measurement := "invalid,measurement,format" - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) - t.Run("not valid timestamp format", func(t *testing.T) { - invalidTimestamp := "2020-20-20-31" - measurement := fmt.Sprintf("%s,%s,%s,%f,%f,%f,%f,%f,%f", + invalidTimestamp := "2020-20-20-31" + negativeTests := []struct { + name string + measurement string + }{{ + name: "not valid measurement string", + measurement: "invalid,measurement,format", + }, { + name: "not valid timestamp format", + measurement: fmt.Sprintf("%s,%s,%s,%f,%f,%f,%f,%f,%f", invalidTimestamp, pids, cores, @@ -165,44 +157,42 @@ func TestParseProcessesMeasurement(t *testing.T) { metricsValues["LLC"], metricsValues["MBL"], metricsValues["MBR"], - metricsValues["MBT"]) - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) - - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) - t.Run("not valid values string", func(t *testing.T) { - measurement := fmt.Sprintf("%s,%s,%s,%s,%s,%f,%f,%f,%f", - timestamp, - pids, - cores, - "1##", - "da", - metricsValues["LLC"], - metricsValues["MBL"], - metricsValues["MBR"], - metricsValues["MBT"]) - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) + metricsValues["MBT"]), + }, + { + name: "not valid values string", + measurement: fmt.Sprintf("%s,%s,%s,%s,%s,%f,%f,%f,%f", + timestamp, + pids, + cores, + "1##", + "da", + metricsValues["LLC"], + metricsValues["MBL"], + metricsValues["MBR"], + metricsValues["MBT"]), + }, + { + name: "not valid csv line with quotes", + measurement: "0000-08-02 0:00:00,,\",,,,,,,,,,,,,,,,,,,,,,,,\",,", + }, + } - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) + for _, test := range negativeTests { + t.Run(test.name, func(t *testing.T) { + newMeasurement := processMeasurement{ + name: processName, + measurement: test.measurement, + } + result, err := parseProcessesMeasurement(newMeasurement) + + assert.NotNil(t, err) + assert.Equal(t, "", result.process) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) + }) + } } func TestAddToAccumulatorCores(t *testing.T) { @@ -212,9 +202,9 @@ func TestAddToAccumulatorCores(t *testing.T) { cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorCores(cores, metricsValues, timestamp) + publisher.addToAccumulatorCores(parsedCoresMeasurement{cores, metricsValues, timestamp}) for _, test := range testCoreMetrics { acc.AssertContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -226,9 +216,9 @@ func TestAddToAccumulatorCores(t *testing.T) { cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorCores(cores, metricsValues, timestamp) + publisher.addToAccumulatorCores(parsedCoresMeasurement{cores, metricsValues, timestamp}) for _, test := range testCoreMetricsShortened { acc.AssertDoesNotContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -244,9 +234,9 @@ func TestAddToAccumulatorProcesses(t *testing.T) { process := "process_name" cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorProcesses(process, cores, metricsValues, timestamp) + publisher.addToAccumulatorProcesses(parsedProcessMeasurement{process, cores, metricsValues, timestamp}) for _, test := range testCoreProcesses { acc.AssertContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -259,9 +249,9 @@ func TestAddToAccumulatorProcesses(t *testing.T) { process := "process_name" cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorProcesses(process, cores, metricsValues, timestamp) + publisher.addToAccumulatorProcesses(parsedProcessMeasurement{process, cores, metricsValues, timestamp}) for _, test := range testCoreProcessesShortened { acc.AssertDoesNotContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) From 847d579673de5b3f89eddbd229a0f408d2acf828 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Mon, 1 Nov 2021 13:45:21 -0600 Subject: [PATCH 730/761] fix: update influxdb input schema documentation (#10029) (cherry picked from commit 934db67c2b820da97cbc504c5f2d6815596a0236) --- plugins/inputs/influxdb/README.md | 264 ++++++++++++++++++++++++------ 1 file changed, 216 insertions(+), 48 deletions(-) diff --git a/plugins/inputs/influxdb/README.md b/plugins/inputs/influxdb/README.md index 9a2db484601fd..8ba686aab1bd1 100644 --- a/plugins/inputs/influxdb/README.md +++ b/plugins/inputs/influxdb/README.md @@ -41,54 +41,222 @@ InfluxDB-formatted endpoints. See below for more information. **Note:** The measurements and fields included in this plugin are dynamically built from the InfluxDB source, and may vary between versions: -- influxdb - - n_shards: The total number of shards in the specified database. -- influxdb_ae _(Enterprise Only)_ : Statistics related to the Anti-Entropy (AE) engine in InfluxDB Enterprise clusters. -- influxdb_cluster _(Enterprise Only)_ : Statistics related to the clustering features of the data nodes in InfluxDB Enterprise clusters. -- influxdb_cq: The metrics related to continuous queries (CQs). -- influxdb_database: The database metrics are being collected from. -- influxdb_hh _(Enterprise Only)_ : Events resulting in new hinted handoff (HH) processors in InfluxDB Enterprise clusters. -- influxdb_hh_database _(Enterprise Only)_ : Aggregates all hinted handoff queues for a single database and node. -- influxdb_hh_processor _(Enterprise Only)_ : Statistics stored for a single queue (shard). -- influxdb_httpd: The URL to listen for network requests. By default, `http://localhost:8086/debug/var`. -- influxdb_measurement: The measurement that metrics are collected from. -- influxdb_memstats: Statistics about the memory allocator in the specified database. - - heap_inuse: The number of bytes in in-use spans. - - heap_released: The number of bytes of physical memory returned to the OS. - - mspan_inuse: The number of bytes in in-use mspans. - - total_alloc: The cumulative bytes allocated for heap objects. - - sys: The total number of bytes of memory obtained from the OS. Measures the virtual address space reserved by the Go runtime for the heap, stacks, and other internal data structures. - - mallocs: The total number of heap objects allocated. (The total number of live objects are frees.) - - frees: The cumulative number of freed (live) heap objects. - - heap_idle: The number of bytes of idle heap objects. - - pause_total_ns: The total time garbage collection cycles are paused in nanoseconds. - - lookups: The number of pointer lookups performed by the runtime. Primarily useful for debugging runtime internals. - - heap_sys: The number of bytes of heap memory obtained from the OS. Measures the amount of virtual address space reserved for the heap. - - mcache_sys: The bytes of memory obtained from the OS for mcache structures. - - next_gc: The target heap size of the next garbage collection cycle. - - gc_cpu_fraction: The fraction of CPU time used by the garbage collection cycle. - - other_sys: The number of bytes of memory used other than heap_sys, stacks_sys, mspan_sys, mcache_sys, buckhash_sys, and gc_sys. - - alloc: The currently allocated number of bytes of heap objects. - - stack_inuse: The number of bytes in in-use stacks. - - stack_sys: The total number of bytes of memory obtained from the stack in use. - - buck_hash_sys: The bytes of memory in profiling bucket hash tables. - - gc_sys: The bytes of memory in garbage collection metadata. - - num_gc: The number of completed garbage collection cycles. - - heap_alloc: The size, in bytes, of all heap objects. - - heap_objects: The number of allocated heap objects. - - mspan_sys: The bytes of memory obtained from the OS for mspan. - - mcache_inuse: The bytes of allocated mcache structures. - - last_gc: Time the last garbage collection finished, as nanoseconds since 1970 (the UNIX epoch). -- influxdb_queryExecutor: Query Executor metrics of the InfluxDB engine. -- influxdb_rpc _(Enterprise Only)_ : Statistics are related to the use of RPC calls within InfluxDB Enterprise clusters. -- influxdb_runtime: The shard metrics are collected from. -- influxdb_shard: The shard metrics are collected from. -- influxdb_subscriber: The InfluxDB subscription that metrics are collected from. -- influxdb_tsm1_cache: The TSM cache that metrics are collected from. -- influxdb_tsm1_engine: The TSM storage engine that metrics are collected from. -- influxdb_tsm1_filestore: The TSM file store that metrics are collected from. -- influxdb_tsm1_wal: The TSM Write Ahead Log (WAL) that metrics are collected from. -- influxdb_write: The total writes to the specified database. +- **influxdb_ae** _(Enterprise Only)_ : Statistics related to the Anti-Entropy (AE) engine in InfluxDB Enterprise clusters. + - **bytesRx**: Number of bytes received by the data node. + - **errors**: Total number of anti-entropy jobs that have resulted in errors. + - **jobs**: Total number of jobs executed by the data node. + - **jobsActive**: Number of active (currently executing) jobs. +- **influxdb_cluster** _(Enterprise Only)_ : Statistics related to the clustering features of the data nodes in InfluxDB Enterprise clusters. + - **copyShardReq**: Number of internal requests made to copy a shard from one data node to another. + - **createIteratorReq**: Number of read requests from other data nodes in the cluster. + - **expandSourcesReq**: Number of remote node requests made to find measurements on this node that match a particular regular expression. + - **fieldDimensionsReq**: Number of remote node requests for information about the fields and associated types, and tag keys of measurements on this data node. + - **iteratorCostReq**: Number of internal requests for iterator cost. + - **removeShardReq**: Number of internal requests to delete a shard from this data node. Exclusively incremented by use of the influxd-ctl remove shard command. + - **writeShardFail**: Total number of internal write requests from a remote node that failed. + - **writeShardPointsReq**: Number of points in every internal write request from any remote node, regardless of success. + - **writeShardReq**: Number of internal write requests from a remote data node, regardless of success. +- **influxdb_cq**: Metrics related to continuous queries (CQs). + - **queryFail**: Total number of continuous queries that executed but failed. + - **queryOk**: Total number of continuous queries that executed successfully. +- **influxdb_database**: Database metrics are collected from. + - **numMeasurements**: Current number of measurements in the specified database. + - **numSeries**: Current series cardinality of the specified database. +- **influxdb_hh** _(Enterprise Only)_ : Events resulting in new hinted handoff (HH) processors in InfluxDB Enterprise clusters. + - **writeShardReq**: Number of initial write requests handled by the hinted handoff engine for a remote node. + - **writeShardReqPoints**: Number of write requests for each point in the initial request to the hinted handoff engine for a remote node. +- **influxdb_hh_database** _(Enterprise Only)_ : Aggregates all hinted handoff queues for a single database and node. + - **bytesRead**: Size, in bytes, of points read from the hinted handoff queue and sent to its destination data node. + - **bytesWritten**: Total number of bytes written to the hinted handoff queue. + - **queueBytes**: Total number of bytes remaining in the hinted handoff queue. + - **queueDepth**: Total number of segments in the hinted handoff queue. The HH queue is a sequence of 10MB “segment” files. + - **writeBlocked**: Number of writes blocked because the number of concurrent HH requests exceeds the limit. + - **writeDropped**: Number of writes dropped from the HH queue because the write appeared to be corrupted. + - **writeNodeReq**: Total number of write requests that succeeded in writing a batch to the destination node. + - **writeNodeReqFail**: Total number of write requests that failed in writing a batch of data from the hinted handoff queue to the destination node. + - **writeNodeReqPoints**: Total number of points successfully written from the HH queue to the destination node fr + - **writeShardReq**: Total number of every write batch request enqueued into the hinted handoff queue. + - **writeShardReqPoints**: Total number of points enqueued into the hinted handoff queue. +- **influxdb_hh_processor** _(Enterprise Only)_: Statistics stored for a single queue (shard). + - **bytesRead**: Size, in bytes, of points read from the hinted handoff queue and sent to its destination data node. + - **bytesWritten**: Total number of bytes written to the hinted handoff queue. + - **queueBytes**: Total number of bytes remaining in the hinted handoff queue. + - **queueDepth**: Total number of segments in the hinted handoff queue. The HH queue is a sequence of 10MB “segment” files. + - **writeBlocked**: Number of writes blocked because the number of concurrent HH requests exceeds the limit. + - **writeDropped**: Number of writes dropped from the HH queue because the write appeared to be corrupted. + - **writeNodeReq**: Total number of write requests that succeeded in writing a batch to the destination node. + - **writeNodeReqFail**: Total number of write requests that failed in writing a batch of data from the hinted handoff queue to the destination node. + - **writeNodeReqPoints**: Total number of points successfully written from the HH queue to the destination node fr + - **writeShardReq**: Total number of every write batch request enqueued into the hinted handoff queue. + - **writeShardReqPoints**: Total number of points enqueued into the hinted handoff queue. +- **influxdb_httpd**: Metrics related to the InfluxDB HTTP server. + - **authFail**: Number of HTTP requests that were aborted due to authentication being required, but not supplied or incorrect. + - **clientError**: Number of HTTP responses due to client errors, with a 4XX HTTP status code. + - **fluxQueryReq**: Number of Flux query requests served. + - **fluxQueryReqDurationNs**: Duration (wall-time), in nanoseconds, spent executing Flux query requests. + - **pingReq**: Number of times InfluxDB HTTP server served the /ping HTTP endpoint. + - **pointsWrittenDropped**: Number of points dropped by the storage engine. + - **pointsWrittenFail**: Number of points accepted by the HTTP /write endpoint, but unable to be persisted. + - **pointsWrittenOK**: Number of points successfully accepted and persisted by the HTTP /write endpoint. + - **promReadReq**: Number of read requests to the Prometheus /read endpoint. + - **promWriteReq**: Number of write requests to the Prometheus /write endpoint. + - **queryReq**: Number of query requests. + - **queryReqDurationNs**: Total query request duration, in nanosecond (ns). + - **queryRespBytes**: Total number of bytes returned in query responses. + - **recoveredPanics**: Total number of panics recovered by the HTTP handler. + - **req**: Total number of HTTP requests served. + - **reqActive**: Number of currently active requests. + - **reqDurationNs**: Duration (wall time), in nanoseconds, spent inside HTTP requests. + - **serverError**: Number of HTTP responses due to server errors. + - **statusReq**: Number of status requests served using the HTTP /status endpoint. + - **valuesWrittenOK**: Number of values (fields) successfully accepted and persisted by the HTTP /write endpoint. + - **writeReq**: Number of write requests served using the HTTP /write endpoint. + - **writeReqActive**: Number of currently active write requests. + - **writeReqBytes**: Total number of bytes of line protocol data received by write requests, using the HTTP /write endpoint. + - **writeReqDurationNs**: Duration, in nanoseconds, of write requests served using the /write HTTP endpoint. +- **influxdb_memstats**: Statistics about the memory allocator in the specified database. + - **Alloc**: Number of bytes allocated to heap objects. + - **BuckHashSys**: Number of bytes of memory in profiling bucket hash tables. + - **Frees**: Cumulative count of heap objects freed. + - **GCCPUFraction**: fraction of InfluxDB's available CPU time used by the garbage collector (GC) since InfluxDB started. + - **GCSys**: Number of bytes of memory in garbage collection metadata. + - **HeapAlloc**: Number of bytes of allocated heap objects. + - **HeapIdle**: Number of bytes in idle (unused) spans. + - **HeapInuse**: Number of bytes in in-use spans. + - **HeapObjects**: Number of allocated heap objects. + - **HeapReleased**: Number of bytes of physical memory returned to the OS. + - **HeapSys**: Number of bytes of heap memory obtained from the OS. + - **LastGC**: Time the last garbage collection finished. + - **Lookups**: Number of pointer lookups performed by the runtime. + - **MCacheInuse**: Number of bytes of allocated mcache structures. + - **MCacheSys**: Number of bytes of memory obtained from the OS for mcache structures. + - **MSpanInuse**: Number of bytes of allocated mspan structures. + - **MSpanSys**: Number of bytes of memory obtained from the OS for mspan structures. + - **Mallocs**: Cumulative count of heap objects allocated. + - **NextGC**: Target heap size of the next GC cycle. + - **NumForcedGC**: Number of GC cycles that were forced by the application calling the GC function. + - **NumGC**: Number of completed GC cycles. + - **OtherSys**: Number of bytes of memory in miscellaneous off-heap runtime allocations. + - **PauseTotalNs**: Cumulative nanoseconds in GC stop-the-world pauses since the program started. + - **StackInuse**: Number of bytes in stack spans. + - **StackSys**: Number of bytes of stack memory obtained from the OS. + - **Sys**: Total bytes of memory obtained from the OS. + - **TotalAlloc**: Cumulative bytes allocated for heap objects. +- **influxdb_queryExecutor**: Metrics related to usage of the Query Executor of the InfluxDB engine. + - **queriesActive**: Number of active queries currently being handled. + - **queriesExecuted**: Number of queries executed (started). + - **queriesFinished**: Number of queries that have finished executing. + - **queryDurationNs**: Total duration, in nanoseconds, of executed queries. + - **recoveredPanics**: Number of panics recovered by the Query Executor. +- **influxdb_rpc** _(Enterprise Only)_ : Statistics related to the use of RPC calls within InfluxDB Enterprise clusters. + - **idleStreams**: Number of idle multiplexed streams across all live TCP connections. + - **liveConnections**: Current number of live TCP connections to other nodes. + - **liveStreams**: Current number of live multiplexed streams across all live TCP connections. + - **rpcCalls**: Total number of RPC calls made to remote nodes. + - **rpcFailures**: Total number of RPC failures, which are RPCs that did not recover. + - **rpcReadBytes**: Total number of RPC bytes read. + - **rpcRetries**: Total number of RPC calls that retried at least once. + - **rpcWriteBytes**: Total number of RPC bytes written. + - **singleUse**: Total number of single-use connections opened using Dial. + - **singleUseOpen**: Number of single-use connections currently open. + - **totalConnections**: Total number of TCP connections that have been established. + - **totalStreams**: Total number of streams established. +- **influxdb_runtime**: Subset of memstat record statistics for the Go memory allocator. + - **Alloc**: Currently allocated number of bytes of heap objects. + - **Frees**: Cumulative number of freed (live) heap objects. + - **HeapAlloc**: Size, in bytes, of all heap objects. + - **HeapIdle**: Number of bytes of idle heap objects. + - **HeapInUse**: Number of bytes in in-use spans. + - **HeapObjects**: Number of allocated heap objects. + - **HeapReleased**: Number of bytes of physical memory returned to the OS. + - **HeapSys**: Number of bytes of heap memory obtained from the OS. Measures the amount of virtual address space reserved for the heap. + - **Lookups**: Number of pointer lookups performed by the runtime. Primarily useful for debugging runtime internals. + - **Mallocs**: Total number of heap objects allocated. The total number of live objects is Frees. + - **NumGC**: Number of completed GC (garbage collection) cycles. + - **NumGoroutine**: Total number of Go routines. + - **PauseTotalNs**: Total duration, in nanoseconds, of total GC (garbage collection) pauses. + - **Sys**: Total number of bytes of memory obtained from the OS. Measures the virtual address space reserved by the Go runtime for the heap, stacks, and other internal data structures. + - **TotalAlloc**: Total number of bytes allocated for heap objects. This statistic does not decrease when objects are freed. +- **influxdb_shard**: Metrics related to InfluxDB shards. + - **diskBytes**: Size, in bytes, of the shard, including the size of the data directory and the WAL directory. + - **fieldsCreate**: Number of fields created. + - **indexType**: Type of index inmem or tsi1. + - **n_shards**: Total number of shards in the specified database. + - **seriesCreate**: Number of series created. + - **writeBytes**: Number of bytes written to the shard. + - **writePointsDropped**: Number of requests to write points t dropped from a write. + - **writePointsErr**: Number of requests to write points that failed to be written due to errors. + - **writePointsOk**: Number of points written successfully. + - **writeReq**: Total number of write requests. + - **writeReqErr**: Total number of write requests that failed due to errors. + - **writeReqOk**: Total number of successful write requests. +- **influxdb_subscriber**: InfluxDB subscription metrics. + - **createFailures**: Number of subscriptions that failed to be created. + - **pointsWritten**: Total number of points that were successfully written to subscribers. + - **writeFailures**: Total number of batches that failed to be written to subscribers. +- **influxdb_tsm1_cache**: TSM cache metrics. + - **cacheAgeMs**: Duration, in milliseconds, since the cache was last snapshotted at sample time. + - **cachedBytes**: Total number of bytes that have been written into snapshots. + - **diskBytes**: Size, in bytes, of on-disk snapshots. + - **memBytes**: Size, in bytes, of in-memory cache. + - **snapshotCount**: Current level (number) of active snapshots. + - **WALCompactionTimeMs**: Duration, in milliseconds, that the commit lock is held while compacting snapshots. + - **writeDropped**: Total number of writes dropped due to timeouts. + - **writeErr**: Total number of writes that failed. + - **writeOk**: Total number of successful writes. +- **influxdb_tsm1_engine**: TSM storage engine metrics. + - **cacheCompactionDuration** Duration (wall time), in nanoseconds, spent in cache compactions. + - **cacheCompactionErr** Number of cache compactions that have failed due to errors. + - **cacheCompactions** Total number of cache compactions that have ever run. + - **cacheCompactionsActive** Number of cache compactions that are currently running. + - **tsmFullCompactionDuration** Duration (wall time), in nanoseconds, spent in full compactions. + - **tsmFullCompactionErr** Total number of TSM full compactions that have failed due to errors. + - **tsmFullCompactionQueue** Current number of pending TMS Full compactions. + - **tsmFullCompactions** Total number of TSM full compactions that have ever run. + - **tsmFullCompactionsActive** Number of TSM full compactions currently running. + - **tsmLevel1CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 1 compactions. + - **tsmLevel1CompactionErr** Total number of TSM level 1 compactions that have failed due to errors. + - **tsmLevel1CompactionQueue** Current number of pending TSM level 1 compactions. + - **tsmLevel1Compactions** Total number of TSM level 1 compactions that have ever run. + - **tsmLevel1CompactionsActive** Number of TSM level 1 compactions that are currently running. + - **tsmLevel2CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 2 compactions. + - **tsmLevel2CompactionErr** Number of TSM level 2 compactions that have failed due to errors. + - **tsmLevel2CompactionQueue** Current number of pending TSM level 2 compactions. + - **tsmLevel2Compactions** Total number of TSM level 2 compactions that have ever run. + - **tsmLevel2CompactionsActive** Number of TSM level 2 compactions that are currently running. + - **tsmLevel3CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 3 compactions. + - **tsmLevel3CompactionErr** Number of TSM level 3 compactions that have failed due to errors. + - **tsmLevel3CompactionQueue** Current number of pending TSM level 3 compactions. + - **tsmLevel3Compactions** Total number of TSM level 3 compactions that have ever run. + - **tsmLevel3CompactionsActive** Number of TSM level 3 compactions that are currently running. + - **tsmOptimizeCompactionDuration** Duration (wall time), in nanoseconds, spent during TSM optimize compactions. + - **tsmOptimizeCompactionErr** Total number of TSM optimize compactions that have failed due to errors. + - **tsmOptimizeCompactionQueue** Current number of pending TSM optimize compactions. + - **tsmOptimizeCompactions** Total number of TSM optimize compactions that have ever run. + - **tsmOptimizeCompactionsActive** Number of TSM optimize compactions that are currently running. +- **influxdb_tsm1_filestore**: The TSM file store metrics. + - **diskBytes**: Size, in bytes, of disk usage by the TSM file store. + - **numFiles**: Total number of files in the TSM file store. +- **influxdb_tsm1_wal**: The TSM Write Ahead Log (WAL) metrics. + - **currentSegmentDiskBytes**: Current size, in bytes, of the segment disk. + - **oldSegmentDiskBytes**: Size, in bytes, of the segment disk. + - **writeErr**: Number of writes that failed due to errors. + - **writeOK**: Number of writes that succeeded. +- **influxdb_write**: Metrics related to InfluxDB writes. + - **pointReq**: Total number of points requested to be written. + - **pointReqHH** _(Enterprise only)_: Total number of points received for write by this node and then enqueued into hinted handoff for the destination node. + - **pointReqLocal** _(Enterprise only)_: Total number of point requests that have been attempted to be written into a shard on the same (local) node. + - **pointReqRemote** _(Enterprise only)_: Total number of points received for write by this node but needed to be forwarded into a shard on a remote node. + - **pointsWrittenOK**: Number of points written to the HTTP /write endpoint and persisted successfully. + - **req**: Total number of batches requested to be written. + - **subWriteDrop**: Total number of batches that failed to be sent to the subscription dispatcher. + - **subWriteOk**: Total number of batches successfully sent to the subscription dispatcher. + - **valuesWrittenOK**: Number of values (fields) written to the HTTP /write endpoint and persisted successfully. + - **writeDrop**: Total number of write requests for points that have been dropped due to timestamps not matching any existing retention policies. + - **writeError**: Total number of batches of points that were not successfully written, due to a failure to write to a local or remote shard. + - **writeOk**: Total number of batches of points written at the requested consistency level. + - **writePartial** _(Enterprise only)_: Total number of batches written to at least one node, but did not meet the requested consistency level. + - **writeTimeout**: Total number of write requests that failed to complete within the default write timeout duration. ### Example Output: From 072e74a85be3168f4808183bc3c35a3050b5fe7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 2 Nov 2021 15:42:22 +0100 Subject: [PATCH 731/761] fix: Linter fixes for plugins/inputs/[n-o]* (#10011) (cherry picked from commit e6b107b0621fb3b3ada6670b35f208ab68b76ccd) --- plugins/inputs/nats/nats.go | 9 ++-- plugins/inputs/neptune_apex/neptune_apex.go | 4 +- plugins/inputs/net_response/net_response.go | 25 +++++---- plugins/inputs/nginx/nginx_test.go | 6 +-- .../nginx_plus_api/nginx_plus_api_metrics.go | 10 ++-- .../nginx_upstream_check.go | 12 ++--- plugins/inputs/nsd/nsd.go | 16 +++--- plugins/inputs/nsd/nsd_test.go | 15 +++--- .../inputs/nsq_consumer/nsq_consumer_test.go | 21 ++++---- plugins/inputs/nstat/nstat.go | 12 ++--- plugins/inputs/ntpq/ntpq.go | 2 +- plugins/inputs/ntpq/ntpq_test.go | 30 +++++------ plugins/inputs/opcua/opcua_client.go | 9 ++-- plugins/inputs/opcua/opcua_client_test.go | 23 ++++---- plugins/inputs/openldap/openldap.go | 11 ++-- plugins/inputs/openldap/openldap_test.go | 28 +++++----- plugins/inputs/openntpd/openntpd_test.go | 53 +++++++++---------- plugins/inputs/opensmtpd/opensmtpd_test.go | 11 ++-- .../opentelemetry/opentelemetry_test.go | 21 ++++---- .../inputs/openweathermap/openweathermap.go | 16 +++--- 20 files changed, 169 insertions(+), 165 deletions(-) diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index 7144355096b4e..c9e99824d4de5 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -11,10 +11,11 @@ import ( "path" "time" + gnatsd "github.com/nats-io/nats-server/v2/server" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" - gnatsd "github.com/nats-io/nats-server/v2/server" ) type Nats struct { @@ -41,16 +42,16 @@ func (n *Nats) Description() string { } func (n *Nats) Gather(acc telegraf.Accumulator) error { - url, err := url.Parse(n.Server) + address, err := url.Parse(n.Server) if err != nil { return err } - url.Path = path.Join(url.Path, "varz") + address.Path = path.Join(address.Path, "varz") if n.client == nil { n.client = n.createHTTPClient() } - resp, err := n.client.Get(url.String()) + resp, err := n.client.Get(address.String()) if err != nil { return err } diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index c2bb05384d7c8..a8934bd01ee94 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -245,7 +245,7 @@ func findProbe(probe string, probes []probe) int { // returns a time.Time struct. func parseTime(val string, tz float64) (time.Time, error) { // Magic time constant from https://golang.org/pkg/time/#Parse - const TimeLayout = "01/02/2006 15:04:05 -0700" + const timeLayout = "01/02/2006 15:04:05 -0700" // Timezone offset needs to be explicit sign := '+' @@ -256,7 +256,7 @@ func parseTime(val string, tz float64) (time.Time, error) { // Build a time string with the timezone in a format Go can parse. tzs := fmt.Sprintf("%c%04d", sign, int(math.Abs(tz))*100) ts := fmt.Sprintf("%s %s", val, tzs) - t, err := time.Parse(TimeLayout, ts) + t, err := time.Parse(timeLayout, ts) if err != nil { return time.Now(), fmt.Errorf("unable to parse %q (%v)", ts, err) } diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index a7fcec4353c81..043a3c44760ed 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -17,10 +17,10 @@ type ResultType uint64 const ( Success ResultType = 0 - Timeout = 1 - ConnectionFailed = 2 - ReadFailed = 3 - StringMismatch = 4 + Timeout ResultType = 1 + ConnectionFailed ResultType = 2 + ReadFailed ResultType = 3 + StringMismatch ResultType = 4 ) // NetResponse struct @@ -120,8 +120,8 @@ func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, er setResult(ReadFailed, fields, tags, n.Expect) } else { // Looking for string in answer - RegEx := regexp.MustCompile(`.*` + n.Expect + `.*`) - find := RegEx.FindString(data) + regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) + find := regEx.FindString(data) if find != "" { setResult(Success, fields, tags, n.Expect) } else { @@ -186,8 +186,8 @@ func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, er } // Looking for string in answer - RegEx := regexp.MustCompile(`.*` + n.Expect + `.*`) - find := RegEx.FindString(string(buf)) + regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) + find := regEx.FindString(string(buf)) if find != "" { setResult(Success, fields, tags, n.Expect) } else { @@ -232,22 +232,25 @@ func (n *NetResponse) Gather(acc telegraf.Accumulator) error { tags := map[string]string{"server": host, "port": port} var fields map[string]interface{} var returnTags map[string]string + // Gather data - if n.Protocol == "tcp" { + switch n.Protocol { + case "tcp": returnTags, fields, err = n.TCPGather() if err != nil { return err } tags["protocol"] = "tcp" - } else if n.Protocol == "udp" { + case "udp": returnTags, fields, err = n.UDPGather() if err != nil { return err } tags["protocol"] = "udp" - } else { + default: return errors.New("bad protocol") } + // Merge the tags for k, v := range returnTags { tags[k] = v diff --git a/plugins/inputs/nginx/nginx_test.go b/plugins/inputs/nginx/nginx_test.go index db30304dcc15a..5a947e7e202e0 100644 --- a/plugins/inputs/nginx/nginx_test.go +++ b/plugins/inputs/nginx/nginx_test.go @@ -8,9 +8,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const nginxSampleResponse = ` @@ -33,7 +33,7 @@ func TestNginxTags(t *testing.T) { for _, url1 := range urls { addr, _ = url.Parse(url1) tagMap := getTags(addr) - assert.Contains(t, tagMap["server"], "localhost") + require.Contains(t, tagMap["server"], "localhost") } } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go index 5cd7e76aec439..81f747d86d825 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -49,11 +49,11 @@ func addError(acc telegraf.Accumulator, err error) { } func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { - url := fmt.Sprintf("%s/%d/%s", addr.String(), n.APIVersion, path) - resp, err := n.client.Get(url) + address := fmt.Sprintf("%s/%d/%s", addr.String(), n.APIVersion, path) + resp, err := n.client.Get(address) if err != nil { - return nil, fmt.Errorf("error making HTTP request to %s: %s", url, err) + return nil, fmt.Errorf("error making HTTP request to %s: %s", address, err) } defer resp.Body.Close() @@ -64,7 +64,7 @@ func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { // features are either optional, or only available in some versions return nil, errNotFound default: - return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status) + return nil, fmt.Errorf("%s returned HTTP status %s", address, resp.Status) } contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] @@ -77,7 +77,7 @@ func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { return body, nil default: - return nil, fmt.Errorf("%s returned unexpected content type %s", url, contentType) + return nil, fmt.Errorf("%s returned unexpected content type %s", address, contentType) } } diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index 42e0cab62d53e..8ad8cc91e8a9e 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -121,7 +121,7 @@ func (check *NginxUpstreamCheck) createHTTPClient() (*http.Client, error) { } // gatherJSONData query the data source and parse the response JSON -func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) error { +func (check *NginxUpstreamCheck) gatherJSONData(address string, value interface{}) error { var method string if check.Method != "" { method = check.Method @@ -129,7 +129,7 @@ func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) e method = "GET" } - request, err := http.NewRequest(method, url, nil) + request, err := http.NewRequest(method, address, nil) if err != nil { return err } @@ -153,7 +153,7 @@ func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) e if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) - return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + return fmt.Errorf("%s returned HTTP status %s: %q", address, response.Status, body) } err = json.NewDecoder(response.Body).Decode(value) @@ -187,10 +187,10 @@ func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error return nil } -func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegraf.Accumulator) error { +func (check *NginxUpstreamCheck) gatherStatusData(address string, accumulator telegraf.Accumulator) error { checkData := &NginxUpstreamCheckData{} - err := check.gatherJSONData(url, checkData) + err := check.gatherJSONData(address, checkData) if err != nil { return err } @@ -201,7 +201,7 @@ func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegr "type": server.Type, "name": server.Name, "port": strconv.Itoa(int(server.Port)), - "url": url, + "url": address, } fields := map[string]interface{}{ diff --git a/plugins/inputs/nsd/nsd.go b/plugins/inputs/nsd/nsd.go index f75f700eaa2f9..6c8998129cf90 100644 --- a/plugins/inputs/nsd/nsd.go +++ b/plugins/inputs/nsd/nsd.go @@ -61,20 +61,20 @@ func (s *NSD) SampleConfig() string { } // Shell out to nsd_stat and return the output -func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) { +func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, server string, configFile string) (*bytes.Buffer, error) { cmdArgs := []string{"stats_noreset"} - if Server != "" { - host, port, err := net.SplitHostPort(Server) + if server != "" { + host, port, err := net.SplitHostPort(server) if err == nil { - Server = host + "@" + port + server = host + "@" + port } - cmdArgs = append([]string{"-s", Server}, cmdArgs...) + cmdArgs = append([]string{"-s", server}, cmdArgs...) } - if ConfigFile != "" { - cmdArgs = append([]string{"-c", ConfigFile}, cmdArgs...) + if configFile != "" { + cmdArgs = append([]string{"-c", configFile}, cmdArgs...) } cmd := exec.Command(cmdName, cmdArgs...) @@ -119,7 +119,7 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error { fieldValue, err := strconv.ParseFloat(value, 64) if err != nil { - acc.AddError(fmt.Errorf("Expected a numerical value for %s = %v", + acc.AddError(fmt.Errorf("expected a numerical value for %s = %v", stat, value)) continue } diff --git a/plugins/inputs/nsd/nsd_test.go b/plugins/inputs/nsd/nsd_test.go index d64cad7dcea63..74f4a14cf96fa 100644 --- a/plugins/inputs/nsd/nsd_test.go +++ b/plugins/inputs/nsd/nsd_test.go @@ -3,16 +3,13 @@ package nsd import ( "bytes" "testing" - "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) -var TestTimeout = config.Duration(time.Second) - func NSDControl(output string) func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { return func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil @@ -26,13 +23,13 @@ func TestParseFullOutput(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) + require.NoError(t, err) - assert.True(t, acc.HasMeasurement("nsd")) - assert.True(t, acc.HasMeasurement("nsd_servers")) + require.True(t, acc.HasMeasurement("nsd")) + require.True(t, acc.HasMeasurement("nsd_servers")) - assert.Len(t, acc.Metrics, 2) - assert.Equal(t, 99, acc.NFields()) + require.Len(t, acc.Metrics, 2) + require.Equal(t, 99, acc.NFields()) acc.AssertContainsFields(t, "nsd", parsedFullOutput) acc.AssertContainsFields(t, "nsd_servers", parsedFullOutputServerAsTag) diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go index d5086862bbf7e..4c6d944746440 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go @@ -11,10 +11,11 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/testutil" "github.com/nsqio/go-nsq" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" ) // This test is modeled after the kafka consumer integration test @@ -36,7 +37,7 @@ func TestReadsMetricsFromNSQ(t *testing.T) { } addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:4155") - newMockNSQD(script, addr.String()) + newMockNSQD(t, script, addr.String()) consumer := &NSQConsumer{ Log: testutil.Logger{}, @@ -76,6 +77,8 @@ func waitForPoint(acc *testutil.Accumulator, t *testing.T) { ticker := time.NewTicker(5 * time.Millisecond) defer ticker.Stop() counter := 0 + + //nolint:gosimple // for-select used on purpose for { select { case <-ticker.C: @@ -89,16 +92,15 @@ func waitForPoint(acc *testutil.Accumulator, t *testing.T) { } } -func newMockNSQD(script []instruction, addr string) *mockNSQD { +func newMockNSQD(t *testing.T, script []instruction, addr string) *mockNSQD { n := &mockNSQD{ script: script, exitChan: make(chan int), } tcpListener, err := net.Listen("tcp", addr) - if err != nil { - log.Fatalf("FATAL: listen (%s) failed - %s", n.tcpAddr.String(), err) - } + require.NoError(t, err, "listen (%s) failed", n.tcpAddr.String()) + n.tcpListener = tcpListener n.tcpAddr = tcpListener.Addr().(*net.TCPAddr) @@ -139,6 +141,7 @@ func (n *mockNSQD) handle(conn net.Conn) { buf := make([]byte, 4) _, err := io.ReadFull(conn, buf) if err != nil { + //nolint:revive // log.Fatalf called intentionally log.Fatalf("ERROR: failed to read protocol version - %s", err) } @@ -171,14 +174,14 @@ func (n *mockNSQD) handle(conn net.Conn) { l := make([]byte, 4) _, err := io.ReadFull(rdr, l) if err != nil { - log.Printf(err.Error()) + log.Print(err.Error()) goto exit } size := int32(binary.BigEndian.Uint32(l)) b := make([]byte, size) _, err = io.ReadFull(rdr, b) if err != nil { - log.Printf(err.Error()) + log.Print(err.Error()) goto exit } case bytes.Equal(params[0], []byte("RDY")): diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index 4408b8f728579..b5ada855479c9 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -138,10 +138,10 @@ func (ns *Nstat) loadGoodTable(table []byte) map[string]interface{} { if bytes.Equal(fields[i+1], zeroByte) { if !ns.DumpZeros { continue - } else { - entries[string(fields[i])] = int64(0) - continue } + + entries[string(fields[i])] = int64(0) + continue } // the counter is not zero, so parse it. value, err = strconv.ParseInt(string(fields[i+1]), 10, 64) @@ -176,10 +176,10 @@ func (ns *Nstat) loadUglyTable(table []byte) map[string]interface{} { if bytes.Equal(metrics[j], zeroByte) { if !ns.DumpZeros { continue - } else { - entries[string(append(prefix, headers[j]...))] = int64(0) - continue } + + entries[string(append(prefix, headers[j]...))] = int64(0) + continue } // the counter is not zero, so parse it. value, err = strconv.ParseInt(string(metrics[j]), 10, 64) diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index a952783a344a6..6b924fc52298a 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -50,7 +50,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { // Due to problems with a parsing, we have to use regexp expression in order // to remove string that starts from '(' and ends with space // see: https://github.com/influxdata/telegraf/issues/2386 - reg, err := regexp.Compile("\\s+\\([\\S]*") + reg, err := regexp.Compile(`\s+\([\S]*`) if err != nil { return err } diff --git a/plugins/inputs/ntpq/ntpq_test.go b/plugins/inputs/ntpq/ntpq_test.go index b0db77e45784f..54d4e10e717ac 100644 --- a/plugins/inputs/ntpq/ntpq_test.go +++ b/plugins/inputs/ntpq/ntpq_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestSingleNTPQ(t *testing.T) { @@ -20,7 +20,7 @@ func TestSingleNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -49,7 +49,7 @@ func TestBadIntNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -77,7 +77,7 @@ func TestBadFloatNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(2), @@ -105,7 +105,7 @@ func TestDaysNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(172800), @@ -134,7 +134,7 @@ func TestHoursNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(7200), @@ -163,7 +163,7 @@ func TestMinutesNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(120), @@ -192,7 +192,7 @@ func TestBadWhenNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "poll": int64(256), @@ -222,7 +222,7 @@ func TestParserNTPQ(t *testing.T) { n := newNTPQ() n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "poll": int64(64), @@ -285,7 +285,7 @@ func TestMultiNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "delay": float64(54.033), @@ -329,7 +329,7 @@ func TestBadHeaderNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -357,7 +357,7 @@ func TestMissingDelayColumnNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -378,13 +378,13 @@ func TestMissingDelayColumnNTPQ(t *testing.T) { func TestFailedNTPQ(t *testing.T) { tt := tester{ ret: []byte(singleNTPQ), - err: fmt.Errorf("Test failure"), + err: fmt.Errorf("test failure"), } n := newNTPQ() n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) } // It is possible for the output of ntqp to be missing the refid column. This diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index 8dec41eb343e3..213dbd615a939 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -10,6 +10,7 @@ import ( "github.com/gopcua/opcua" "github.com/gopcua/opcua/ua" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" @@ -229,14 +230,14 @@ func (o *OpcUA) validateEndpoint() error { //search security policy type switch o.SecurityPolicy { case "None", "Basic128Rsa15", "Basic256", "Basic256Sha256", "auto": - break + // Valid security policy type - do nothing. default: return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityPolicy, o.MetricName) } //search security mode type switch o.SecurityMode { case "None", "Sign", "SignAndEncrypt", "auto": - break + // Valid security mode type - do nothing. default: return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityMode, o.MetricName) } @@ -371,7 +372,7 @@ func (o *OpcUA) validateOPCTags() error { //search identifier type switch node.tag.IdentifierType { case "s", "i", "g", "b": - break + // Valid identifier type - do nothing. default: return fmt.Errorf("invalid identifier type '%s' in '%s'", node.tag.IdentifierType, node.tag.FieldName) } @@ -455,7 +456,7 @@ func (o *OpcUA) setupOptions() error { if o.Certificate == "" && o.PrivateKey == "" { if o.SecurityPolicy != "None" || o.SecurityMode != "None" { - o.Certificate, o.PrivateKey, err = generateCert("urn:telegraf:gopcua:client", 2048, o.Certificate, o.PrivateKey, (365 * 24 * time.Hour)) + o.Certificate, o.PrivateKey, err = generateCert("urn:telegraf:gopcua:client", 2048, o.Certificate, o.PrivateKey, 365*24*time.Hour) if err != nil { return err } diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go index 4c7805578b114..27bfc1ecf4342 100644 --- a/plugins/inputs/opcua/opcua_client_test.go +++ b/plugins/inputs/opcua/opcua_client_test.go @@ -6,11 +6,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type OPCTags struct { @@ -137,30 +136,30 @@ nodes = [{name="name4", identifier="4000", tags=[["tag1", "override"]]}] func TestTagsSliceToMap(t *testing.T) { m, err := tagsSliceToMap([][]string{{"foo", "bar"}, {"baz", "bat"}}) - assert.NoError(t, err) - assert.Len(t, m, 2) - assert.Equal(t, m["foo"], "bar") - assert.Equal(t, m["baz"], "bat") + require.NoError(t, err) + require.Len(t, m, 2) + require.Equal(t, m["foo"], "bar") + require.Equal(t, m["baz"], "bat") } func TestTagsSliceToMap_twoStrings(t *testing.T) { var err error _, err = tagsSliceToMap([][]string{{"foo", "bar", "baz"}}) - assert.Error(t, err) + require.Error(t, err) _, err = tagsSliceToMap([][]string{{"foo"}}) - assert.Error(t, err) + require.Error(t, err) } func TestTagsSliceToMap_dupeKey(t *testing.T) { _, err := tagsSliceToMap([][]string{{"foo", "bar"}, {"foo", "bat"}}) - assert.Error(t, err) + require.Error(t, err) } func TestTagsSliceToMap_empty(t *testing.T) { _, err := tagsSliceToMap([][]string{{"foo", ""}}) - assert.Equal(t, fmt.Errorf("tag 1 has empty value"), err) + require.Equal(t, fmt.Errorf("tag 1 has empty value"), err) _, err = tagsSliceToMap([][]string{{"", "bar"}}) - assert.Equal(t, fmt.Errorf("tag 1 has empty name"), err) + require.Equal(t, fmt.Errorf("tag 1 has empty name"), err) } func TestValidateOPCTags(t *testing.T) { diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index f3f7b47cf597c..7a3f766718c52 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -5,10 +5,11 @@ import ( "strconv" "strings" + "gopkg.in/ldap.v3" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "gopkg.in/ldap.v3" ) type Openldap struct { @@ -110,13 +111,15 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error { acc.AddError(err) return nil } - if o.TLS == "ldaps" { + + switch o.TLS { + case "ldaps": l, err = ldap.DialTLS("tcp", fmt.Sprintf("%s:%d", o.Host, o.Port), tlsConfig) if err != nil { acc.AddError(err) return nil } - } else if o.TLS == "starttls" { + case "starttls": l, err = ldap.Dial("tcp", fmt.Sprintf("%s:%d", o.Host, o.Port)) if err != nil { acc.AddError(err) @@ -127,7 +130,7 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error { acc.AddError(err) return nil } - } else { + default: acc.AddError(fmt.Errorf("invalid setting for ssl: %s", o.TLS)) return nil } diff --git a/plugins/inputs/openldap/openldap_test.go b/plugins/inputs/openldap/openldap_test.go index b3e171b22e9db..ac9e810f0b49e 100644 --- a/plugins/inputs/openldap/openldap_test.go +++ b/plugins/inputs/openldap/openldap_test.go @@ -4,10 +4,10 @@ import ( "strconv" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/ldap.v3" + + "github.com/influxdata/telegraf/testutil" ) func TestOpenldapMockResult(t *testing.T) { @@ -45,9 +45,9 @@ func TestOpenldapNoConnectionIntegration(t *testing.T) { var acc testutil.Accumulator err := o.Gather(&acc) - require.NoError(t, err) // test that we didn't return an error - assert.Zero(t, acc.NFields()) // test that we didn't return any fields - assert.NotEmpty(t, acc.Errors) // test that we set an error + require.NoError(t, err) // test that we didn't return an error + require.Zero(t, acc.NFields()) // test that we didn't return any fields + require.NotEmpty(t, acc.Errors) // test that we set an error } func TestOpenldapGeneratesMetricsIntegration(t *testing.T) { @@ -108,9 +108,9 @@ func TestOpenldapInvalidSSLIntegration(t *testing.T) { var acc testutil.Accumulator err := o.Gather(&acc) - require.NoError(t, err) // test that we didn't return an error - assert.Zero(t, acc.NFields()) // test that we didn't return any fields - assert.NotEmpty(t, acc.Errors) // test that we set an error + require.NoError(t, err) // test that we didn't return an error + require.Zero(t, acc.NFields()) // test that we didn't return any fields + require.NotEmpty(t, acc.Errors) // test that we set an error } func TestOpenldapBindIntegration(t *testing.T) { @@ -132,11 +132,11 @@ func TestOpenldapBindIntegration(t *testing.T) { } func commonTests(t *testing.T, o *Openldap, acc *testutil.Accumulator) { - assert.Empty(t, acc.Errors, "accumulator had no errors") - assert.True(t, acc.HasMeasurement("openldap"), "Has a measurement called 'openldap'") - assert.Equal(t, o.Host, acc.TagValue("openldap", "server"), "Has a tag value of server=o.Host") - assert.Equal(t, strconv.Itoa(o.Port), acc.TagValue("openldap", "port"), "Has a tag value of port=o.Port") - assert.True(t, acc.HasInt64Field("openldap", "total_connections"), "Has an integer field called total_connections") + require.Empty(t, acc.Errors, "accumulator had no errors") + require.True(t, acc.HasMeasurement("openldap"), "Has a measurement called 'openldap'") + require.Equal(t, o.Host, acc.TagValue("openldap", "server"), "Has a tag value of server=o.Host") + require.Equal(t, strconv.Itoa(o.Port), acc.TagValue("openldap", "port"), "Has a tag value of port=o.Port") + require.True(t, acc.HasInt64Field("openldap", "total_connections"), "Has an integer field called total_connections") } func TestOpenldapReverseMetricsIntegration(t *testing.T) { @@ -155,5 +155,5 @@ func TestOpenldapReverseMetricsIntegration(t *testing.T) { var acc testutil.Accumulator err := o.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasInt64Field("openldap", "connections_total"), "Has an integer field called connections_total") + require.True(t, acc.HasInt64Field("openldap", "connections_total"), "Has an integer field called connections_total") } diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go index f26419a71101e..ffca02b31a908 100644 --- a/plugins/inputs/openntpd/openntpd_test.go +++ b/plugins/inputs/openntpd/openntpd_test.go @@ -3,16 +3,13 @@ package openntpd import ( "bytes" "testing" - "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) -var TestTimeout = config.Duration(time.Second) - func OpenntpdCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { return func(string, config.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil @@ -26,11 +23,11 @@ func TestParseSimpleOutput(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 7) + require.Equal(t, acc.NFields(), 7) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -57,11 +54,11 @@ func TestParseSimpleOutputwithStatePrefix(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 7) + require.Equal(t, acc.NFields(), 7) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -89,11 +86,11 @@ func TestParseSimpleOutputInvalidPeer(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -117,11 +114,11 @@ func TestParseSimpleOutputServersDNSError(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "next": int64(2), @@ -159,11 +156,11 @@ func TestParseSimpleOutputServerDNSError(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "next": int64(12), @@ -187,11 +184,11 @@ func TestParseFullOutput(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(20)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(20)) - assert.Equal(t, acc.NFields(), 113) + require.Equal(t, acc.NFields(), 113) firstpeerfields := map[string]interface{}{ "wt": int64(1), diff --git a/plugins/inputs/opensmtpd/opensmtpd_test.go b/plugins/inputs/opensmtpd/opensmtpd_test.go index fb3afa82e0171..3b625be51cef2 100644 --- a/plugins/inputs/opensmtpd/opensmtpd_test.go +++ b/plugins/inputs/opensmtpd/opensmtpd_test.go @@ -4,9 +4,10 @@ import ( "bytes" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func SMTPCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { @@ -22,11 +23,11 @@ func TestFilterSomeStats(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("opensmtpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("opensmtpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 36) + require.Equal(t, acc.NFields(), 36) acc.AssertContainsFields(t, "opensmtpd", parsedFullOutput) } diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go index 2de35bb06af50..8df1273bef8c4 100644 --- a/plugins/inputs/opentelemetry/opentelemetry_test.go +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -5,10 +5,6 @@ import ( "net" "testing" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/metric" @@ -18,6 +14,10 @@ import ( "go.opentelemetry.io/otel/sdk/metric/selector/simple" "google.golang.org/grpc" "google.golang.org/grpc/test/bufconn" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" ) func TestOpenTelemetry(t *testing.T) { @@ -72,12 +72,11 @@ func TestOpenTelemetry(t *testing.T) { // Check - assert.Empty(t, accumulator.Errors) + require.Empty(t, accumulator.Errors) - if assert.Len(t, accumulator.Metrics, 1) { - got := accumulator.Metrics[0] - assert.Equal(t, "measurement-counter", got.Measurement) - assert.Equal(t, telegraf.Counter, got.Type) - assert.Equal(t, "library-name", got.Tags["otel.library.name"]) - } + require.Len(t, accumulator.Metrics, 1) + got := accumulator.Metrics[0] + require.Equal(t, "measurement-counter", got.Measurement) + require.Equal(t, telegraf.Counter, got.Type) + require.Equal(t, "library-name", got.Tags["otel.library.name"]) } diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index fcc22343b435e..c4f2f4f032d7e 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -23,10 +23,10 @@ const ( // The limit of locations is 20. owmRequestSeveralCityID int = 20 - defaultBaseURL = "https://api.openweathermap.org/" - defaultResponseTimeout time.Duration = time.Second * 5 - defaultUnits string = "metric" - defaultLang string = "en" + defaultBaseURL = "https://api.openweathermap.org/" + defaultResponseTimeout = time.Second * 5 + defaultUnits = "metric" + defaultLang = "en" ) type OpenWeatherMap struct { @@ -38,8 +38,8 @@ type OpenWeatherMap struct { ResponseTimeout config.Duration `toml:"response_timeout"` Units string `toml:"units"` - client *http.Client - baseURL *url.URL + client *http.Client + baseParsedURL *url.URL } var sampleConfig = ` @@ -309,7 +309,7 @@ func init() { func (n *OpenWeatherMap) Init() error { var err error - n.baseURL, err = url.Parse(n.BaseURL) + n.baseParsedURL, err = url.Parse(n.BaseURL) if err != nil { return err } @@ -353,5 +353,5 @@ func (n *OpenWeatherMap) formatURL(path string, city string) string { RawQuery: v.Encode(), } - return n.baseURL.ResolveReference(relative).String() + return n.baseParsedURL.ResolveReference(relative).String() } From 443fc3d9d9bc1962e34ed8039c4baa75a5969534 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 2 Nov 2021 15:49:26 +0100 Subject: [PATCH 732/761] fix: Linter fixes for plugins/inputs/m* (#10006) (cherry picked from commit c1d4ce4dd548dd67afdd2dd1401920479198103b) --- plugins/inputs/mailchimp/chimp_api.go | 30 +-- plugins/inputs/mailchimp/mailchimp.go | 13 +- plugins/inputs/mailchimp/mailchimp_test.go | 45 ++-- plugins/inputs/marklogic/marklogic.go | 8 +- plugins/inputs/mcrouter/mcrouter.go | 21 +- plugins/inputs/mcrouter/mcrouter_test.go | 20 +- plugins/inputs/mdstat/mdstat_test.go | 11 +- plugins/inputs/memcached/memcached_test.go | 6 +- plugins/inputs/mesos/mesos.go | 55 +++-- plugins/inputs/mesos/mesos_test.go | 11 +- plugins/inputs/minecraft/client.go | 10 +- plugins/inputs/mongodb/mongodb_data_test.go | 33 +-- plugins/inputs/mongodb/mongodb_server_test.go | 6 +- plugins/inputs/mongodb/mongostat.go | 8 +- plugins/inputs/mongodb/mongostat_test.go | 40 ++-- plugins/inputs/monit/monit.go | 218 +++++++++--------- plugins/inputs/monit/monit_test.go | 6 +- plugins/inputs/mqtt_consumer/README.md | 2 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 29 +-- plugins/inputs/multifile/multifile_test.go | 10 +- plugins/inputs/mysql/mysql.go | 163 +++++++------ plugins/inputs/mysql/mysql_test.go | 30 +-- 22 files changed, 400 insertions(+), 375 deletions(-) diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index 2f6cecdb9e0da..71e7bcea6d535 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -5,12 +5,13 @@ import ( "encoding/json" "fmt" "io" - "log" "net/http" "net/url" "regexp" "sync" "time" + + "github.com/influxdata/telegraf" ) const ( @@ -22,11 +23,12 @@ var mailchimpDatacenter = regexp.MustCompile("[a-z]+[0-9]+$") type ChimpAPI struct { Transport http.RoundTripper - Debug bool + debug bool sync.Mutex url *url.URL + log telegraf.Logger } type ReportsParams struct { @@ -53,12 +55,12 @@ func (p *ReportsParams) String() string { return v.Encode() } -func NewChimpAPI(apiKey string) *ChimpAPI { +func NewChimpAPI(apiKey string, log telegraf.Logger) *ChimpAPI { u := &url.URL{} u.Scheme = "https" u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimpDatacenter.FindString(apiKey)) u.User = url.UserPassword("", apiKey) - return &ChimpAPI{url: u} + return &ChimpAPI{url: u, log: log} } type APIError struct { @@ -90,7 +92,7 @@ func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) { a.url.Path = reportsEndpoint var response ReportsResponse - rawjson, err := runChimp(a, params) + rawjson, err := a.runChimp(params) if err != nil { return response, err } @@ -109,7 +111,7 @@ func (a *ChimpAPI) GetReport(campaignID string) (Report, error) { a.url.Path = fmt.Sprintf(reportsEndpointCampaign, campaignID) var response Report - rawjson, err := runChimp(a, ReportsParams{}) + rawjson, err := a.runChimp(ReportsParams{}) if err != nil { return response, err } @@ -122,21 +124,21 @@ func (a *ChimpAPI) GetReport(campaignID string) (Report, error) { return response, nil } -func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { +func (a *ChimpAPI) runChimp(params ReportsParams) ([]byte, error) { client := &http.Client{ - Transport: api.Transport, + Transport: a.Transport, Timeout: 4 * time.Second, } var b bytes.Buffer - req, err := http.NewRequest("GET", api.url.String(), &b) + req, err := http.NewRequest("GET", a.url.String(), &b) if err != nil { return nil, err } req.URL.RawQuery = params.String() req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin") - if api.Debug { - log.Printf("D! [inputs.mailchimp] request URL: %s", req.URL.String()) + if a.debug { + a.log.Debugf("request URL: %s", req.URL.String()) } resp, err := client.Do(req) @@ -148,15 +150,15 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) - return nil, fmt.Errorf("%s returned HTTP status %s: %q", api.url.String(), resp.Status, body) + return nil, fmt.Errorf("%s returned HTTP status %s: %q", a.url.String(), resp.Status, body) } body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } - if api.Debug { - log.Printf("D! [inputs.mailchimp] response Body: %q", string(body)) + if a.debug { + a.log.Debugf("response Body: %q", string(body)) } if err = chimpErrorCheck(body); err != nil { diff --git a/plugins/inputs/mailchimp/mailchimp.go b/plugins/inputs/mailchimp/mailchimp.go index fe6892bf48743..b898cb6ba1768 100644 --- a/plugins/inputs/mailchimp/mailchimp.go +++ b/plugins/inputs/mailchimp/mailchimp.go @@ -14,6 +14,8 @@ type MailChimp struct { APIKey string `toml:"api_key"` DaysOld int `toml:"days_old"` CampaignID string `toml:"campaign_id"` + + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -35,12 +37,13 @@ func (m *MailChimp) Description() string { return "Gathers metrics from the /3.0/reports MailChimp API" } -func (m *MailChimp) Gather(acc telegraf.Accumulator) error { - if m.api == nil { - m.api = NewChimpAPI(m.APIKey) - } - m.api.Debug = false +func (m *MailChimp) Init() error { + m.api = NewChimpAPI(m.APIKey, m.Log) + return nil +} + +func (m *MailChimp) Gather(acc telegraf.Accumulator) error { if m.CampaignID == "" { since := "" if m.DaysOld > 0 { diff --git a/plugins/inputs/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go index 1366d8859df5d..1df6c52cf6256 100644 --- a/plugins/inputs/mailchimp/mailchimp_test.go +++ b/plugins/inputs/mailchimp/mailchimp_test.go @@ -7,9 +7,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestMailChimpGatherReports(t *testing.T) { @@ -28,7 +28,8 @@ func TestMailChimpGatherReports(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, @@ -43,22 +44,22 @@ func TestMailChimpGatherReports(t *testing.T) { tags["campaign_title"] = "Freddie's Jokes Vol. 1" fields := map[string]interface{}{ - "emails_sent": int(200), - "abuse_reports": int(0), - "unsubscribed": int(2), - "hard_bounces": int(0), - "soft_bounces": int(2), - "syntax_errors": int(0), - "forwards_count": int(0), - "forwards_opens": int(0), - "opens_total": int(186), - "unique_opens": int(100), - "clicks_total": int(42), - "unique_clicks": int(400), - "unique_subscriber_clicks": int(42), - "facebook_recipient_likes": int(5), - "facebook_unique_likes": int(8), - "facebook_likes": int(42), + "emails_sent": 200, + "abuse_reports": 0, + "unsubscribed": 2, + "hard_bounces": 0, + "soft_bounces": 2, + "syntax_errors": 0, + "forwards_count": 0, + "forwards_opens": 0, + "opens_total": 186, + "unique_opens": 100, + "clicks_total": 42, + "unique_clicks": 400, + "unique_subscriber_clicks": 42, + "facebook_recipient_likes": 5, + "facebook_unique_likes": 8, + "facebook_likes": 42, "open_rate": float64(42), "click_rate": float64(42), "industry_open_rate": float64(0.17076777144396), @@ -92,7 +93,8 @@ func TestMailChimpGatherReport(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, @@ -157,7 +159,8 @@ func TestMailChimpGatherError(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, diff --git a/plugins/inputs/marklogic/marklogic.go b/plugins/inputs/marklogic/marklogic.go index d2ef139bfc7a3..30f9ee6403074 100644 --- a/plugins/inputs/marklogic/marklogic.go +++ b/plugins/inputs/marklogic/marklogic.go @@ -163,9 +163,9 @@ func (c *Marklogic) Gather(accumulator telegraf.Accumulator) error { return nil } -func (c *Marklogic) fetchAndInsertData(acc telegraf.Accumulator, url string) error { +func (c *Marklogic) fetchAndInsertData(acc telegraf.Accumulator, address string) error { ml := &MlHost{} - if err := c.gatherJSONData(url, ml); err != nil { + if err := c.gatherJSONData(address, ml); err != nil { return err } @@ -225,8 +225,8 @@ func (c *Marklogic) createHTTPClient() (*http.Client, error) { return client, nil } -func (c *Marklogic) gatherJSONData(url string, v interface{}) error { - req, err := http.NewRequest("GET", url, nil) +func (c *Marklogic) gatherJSONData(address string, v interface{}) error { + req, err := http.NewRequest("GET", address, nil) if err != nil { return err } diff --git a/plugins/inputs/mcrouter/mcrouter.go b/plugins/inputs/mcrouter/mcrouter.go index af197c3072089..07599ca2cc0b0 100644 --- a/plugins/inputs/mcrouter/mcrouter.go +++ b/plugins/inputs/mcrouter/mcrouter.go @@ -146,32 +146,33 @@ func (m *Mcrouter) Gather(acc telegraf.Accumulator) error { } // ParseAddress parses an address string into 'host:port' and 'protocol' parts -func (m *Mcrouter) ParseAddress(address string) (string, string, error) { - var protocol string +func (m *Mcrouter) ParseAddress(address string) (parsedAddress string, protocol string, err error) { var host string var port string - u, parseError := url.Parse(address) + parsedAddress = address + + u, parseError := url.Parse(parsedAddress) if parseError != nil { - return "", "", fmt.Errorf("Invalid server address") + return "", "", fmt.Errorf("invalid server address") } if u.Scheme != "tcp" && u.Scheme != "unix" { - return "", "", fmt.Errorf("Invalid server protocol") + return "", "", fmt.Errorf("invalid server protocol") } protocol = u.Scheme if protocol == "unix" { if u.Path == "" { - return "", "", fmt.Errorf("Invalid unix socket path") + return "", "", fmt.Errorf("invalid unix socket path") } - address = u.Path + parsedAddress = u.Path } else { if u.Host == "" { - return "", "", fmt.Errorf("Invalid host") + return "", "", fmt.Errorf("invalid host") } host = u.Hostname() @@ -185,10 +186,10 @@ func (m *Mcrouter) ParseAddress(address string) (string, string, error) { port = defaultServerURL.Port() } - address = host + ":" + port + parsedAddress = host + ":" + port } - return address, protocol, nil + return parsedAddress, protocol, nil } func (m *Mcrouter) gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error { diff --git a/plugins/inputs/mcrouter/mcrouter_test.go b/plugins/inputs/mcrouter/mcrouter_test.go index a9b525d46b79c..f02f2b53d4b85 100644 --- a/plugins/inputs/mcrouter/mcrouter_test.go +++ b/plugins/inputs/mcrouter/mcrouter_test.go @@ -5,9 +5,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestAddressParsing(t *testing.T) { @@ -30,17 +30,17 @@ func TestAddressParsing(t *testing.T) { for _, args := range acceptTests { address, protocol, err := m.ParseAddress(args[0]) - assert.Nil(t, err, args[0]) - assert.True(t, address == args[1], args[0]) - assert.True(t, protocol == args[2], args[0]) + require.Nil(t, err, args[0]) + require.Equal(t, args[1], address, args[0]) + require.Equal(t, args[2], protocol, args[0]) } for _, addr := range rejectTests { address, protocol, err := m.ParseAddress(addr) - assert.NotNil(t, err, addr) - assert.Empty(t, address, addr) - assert.Empty(t, protocol, addr) + require.NotNil(t, err, addr) + require.Empty(t, address, addr) + require.Empty(t, protocol, addr) } } @@ -129,11 +129,11 @@ func TestMcrouterGeneratesMetricsIntegration(t *testing.T) { } for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("mcrouter", metric), metric) + require.True(t, acc.HasInt64Field("mcrouter", metric), metric) } for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("mcrouter", metric), metric) + require.True(t, acc.HasFloatField("mcrouter", metric), metric) } } diff --git a/plugins/inputs/mdstat/mdstat_test.go b/plugins/inputs/mdstat/mdstat_test.go index 070b7ddd234f5..27397f715ad0d 100644 --- a/plugins/inputs/mdstat/mdstat_test.go +++ b/plugins/inputs/mdstat/mdstat_test.go @@ -7,8 +7,9 @@ import ( "os" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func TestFullMdstatProcFile(t *testing.T) { @@ -19,7 +20,7 @@ func TestFullMdstatProcFile(t *testing.T) { } acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) fields := map[string]interface{}{ "BlocksSynced": int64(10620027200), @@ -46,7 +47,7 @@ func TestFailedDiskMdStatProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) fields := map[string]interface{}{ "BlocksSynced": int64(5860144128), @@ -73,7 +74,7 @@ func TestEmptyMdStatProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) } func TestInvalidMdStatProcFile1(t *testing.T) { @@ -86,7 +87,7 @@ func TestInvalidMdStatProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) } const mdStatFileFull = ` diff --git a/plugins/inputs/memcached/memcached_test.go b/plugins/inputs/memcached/memcached_test.go index 1d0807625b31b..1ebfe65bad6fb 100644 --- a/plugins/inputs/memcached/memcached_test.go +++ b/plugins/inputs/memcached/memcached_test.go @@ -5,9 +5,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestMemcachedGeneratesMetricsIntegration(t *testing.T) { @@ -32,7 +32,7 @@ func TestMemcachedGeneratesMetricsIntegration(t *testing.T) { "bytes_read", "bytes_written", "threads", "conn_yields"} for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("memcached", metric), metric) + require.True(t, acc.HasInt64Field("memcached", metric), metric) } } diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 68203c9d480cb..991f8a9fd7003 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -4,7 +4,6 @@ import ( "encoding/json" "errors" "io" - "log" "net" "net/http" "net/url" @@ -23,7 +22,7 @@ type Role string const ( MASTER Role = "master" - SLAVE = "slave" + SLAVE Role = "slave" ) type Mesos struct { @@ -100,7 +99,7 @@ func (m *Mesos) Description() string { return "Telegraf plugin for gathering metrics from N Mesos masters" } -func parseURL(s string, role Role) (*url.URL, error) { +func (m *Mesos) parseURL(s string, role Role) (*url.URL, error) { if !strings.HasPrefix(s, "http://") && !strings.HasPrefix(s, "https://") { host, port, err := net.SplitHostPort(s) // no port specified @@ -115,7 +114,7 @@ func parseURL(s string, role Role) (*url.URL, error) { } s = "http://" + host + ":" + port - log.Printf("W! [inputs.mesos] using %q as connection URL; please update your configuration to use an URL", s) + m.Log.Warnf("using %q as connection URL; please update your configuration to use an URL", s) } return url.Parse(s) @@ -139,7 +138,7 @@ func (m *Mesos) initialize() error { m.masterURLs = make([]*url.URL, 0, len(m.Masters)) for _, master := range m.Masters { - u, err := parseURL(master, MASTER) + u, err := m.parseURL(master, MASTER) if err != nil { return err } @@ -150,7 +149,7 @@ func (m *Mesos) initialize() error { m.slaveURLs = make([]*url.URL, 0, len(m.Slaves)) for _, slave := range m.Slaves { - u, err := parseURL(slave, SLAVE) + u, err := m.parseURL(slave, SLAVE) if err != nil { return err } @@ -241,11 +240,11 @@ func metricsDiff(role Role, w []string) []string { } // masterBlocks serves as kind of metrics registry grouping them in sets -func getMetrics(role Role, group string) []string { - m := make(map[string][]string) +func (m *Mesos) getMetrics(role Role, group string) []string { + metrics := make(map[string][]string) if role == MASTER { - m["resources"] = []string{ + metrics["resources"] = []string{ "master/cpus_percent", "master/cpus_used", "master/cpus_total", @@ -272,12 +271,12 @@ func getMetrics(role Role, group string) []string { "master/mem_revocable_used", } - m["master"] = []string{ + metrics["master"] = []string{ "master/elected", "master/uptime_secs", } - m["system"] = []string{ + metrics["system"] = []string{ "system/cpus_total", "system/load_15min", "system/load_5min", @@ -286,7 +285,7 @@ func getMetrics(role Role, group string) []string { "system/mem_total_bytes", } - m["agents"] = []string{ + metrics["agents"] = []string{ "master/slave_registrations", "master/slave_removals", "master/slave_reregistrations", @@ -303,7 +302,7 @@ func getMetrics(role Role, group string) []string { "master/slaves_unreachable", } - m["frameworks"] = []string{ + metrics["frameworks"] = []string{ "master/frameworks_active", "master/frameworks_connected", "master/frameworks_disconnected", @@ -314,10 +313,10 @@ func getMetrics(role Role, group string) []string { // framework_offers and allocator metrics have unpredictable names, so they can't be listed here. // These empty groups are included to prevent the "unknown metrics group" info log below. // filterMetrics() filters these metrics by looking for names with the corresponding prefix. - m["framework_offers"] = []string{} - m["allocator"] = []string{} + metrics["framework_offers"] = []string{} + metrics["allocator"] = []string{} - m["tasks"] = []string{ + metrics["tasks"] = []string{ "master/tasks_error", "master/tasks_failed", "master/tasks_finished", @@ -333,7 +332,7 @@ func getMetrics(role Role, group string) []string { "master/tasks_unreachable", } - m["messages"] = []string{ + metrics["messages"] = []string{ "master/invalid_executor_to_framework_messages", "master/invalid_framework_to_executor_messages", "master/invalid_status_update_acknowledgements", @@ -377,14 +376,14 @@ func getMetrics(role Role, group string) []string { "master/valid_operation_status_update_acknowledgements", } - m["evqueue"] = []string{ + metrics["evqueue"] = []string{ "master/event_queue_dispatches", "master/event_queue_http_requests", "master/event_queue_messages", "master/operator_event_stream_subscribers", } - m["registrar"] = []string{ + metrics["registrar"] = []string{ "registrar/state_fetch_ms", "registrar/state_store_ms", "registrar/state_store_ms/max", @@ -402,7 +401,7 @@ func getMetrics(role Role, group string) []string { "registrar/state_store_ms/count", } } else if role == SLAVE { - m["resources"] = []string{ + metrics["resources"] = []string{ "slave/cpus_percent", "slave/cpus_used", "slave/cpus_total", @@ -429,12 +428,12 @@ func getMetrics(role Role, group string) []string { "slave/mem_revocable_used", } - m["agent"] = []string{ + metrics["agent"] = []string{ "slave/registered", "slave/uptime_secs", } - m["system"] = []string{ + metrics["system"] = []string{ "system/cpus_total", "system/load_15min", "system/load_5min", @@ -443,7 +442,7 @@ func getMetrics(role Role, group string) []string { "system/mem_total_bytes", } - m["executors"] = []string{ + metrics["executors"] = []string{ "containerizer/mesos/container_destroy_errors", "slave/container_launch_errors", "slave/executors_preempted", @@ -456,7 +455,7 @@ func getMetrics(role Role, group string) []string { "slave/recovery_errors", } - m["tasks"] = []string{ + metrics["tasks"] = []string{ "slave/tasks_failed", "slave/tasks_finished", "slave/tasks_killed", @@ -466,7 +465,7 @@ func getMetrics(role Role, group string) []string { "slave/tasks_starting", } - m["messages"] = []string{ + metrics["messages"] = []string{ "slave/invalid_framework_messages", "slave/invalid_status_updates", "slave/valid_framework_messages", @@ -474,10 +473,10 @@ func getMetrics(role Role, group string) []string { } } - ret, ok := m[group] + ret, ok := metrics[group] if !ok { - log.Printf("I! [inputs.mesos] unknown role %q metrics group: %s", role, group) + m.Log.Infof("unknown role %q metrics group: %s", role, group) return []string{} } @@ -512,7 +511,7 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) { // All other metrics have predictable names. We can use getMetrics() to retrieve them. default: - for _, v := range getMetrics(role, k) { + for _, v := range m.getMetrics(role, k) { if _, ok = (*metrics)[v]; ok { delete(*metrics, v) } diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index 4b6d5ab74d371..2605ddd4678c2 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -10,8 +10,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var masterMetrics map[string]interface{} @@ -340,7 +341,7 @@ func TestMasterFilter(t *testing.T) { // Assert expected metrics are present. for _, v := range m.MasterCols { - for _, x := range getMetrics(MASTER, v) { + for _, x := range m.getMetrics(MASTER, v) { _, ok := masterMetrics[x] require.Truef(t, ok, "Didn't find key %s, it should present.", x) } @@ -357,7 +358,7 @@ func TestMasterFilter(t *testing.T) { // Assert unexpected metrics are not present. for _, v := range b { - for _, x := range getMetrics(MASTER, v) { + for _, x := range m.getMetrics(MASTER, v) { _, ok := masterMetrics[x] require.Falsef(t, ok, "Found key %s, it should be gone.", x) } @@ -402,13 +403,13 @@ func TestSlaveFilter(t *testing.T) { m.filterMetrics(SLAVE, &slaveMetrics) for _, v := range b { - for _, x := range getMetrics(SLAVE, v) { + for _, x := range m.getMetrics(SLAVE, v) { _, ok := slaveMetrics[x] require.Falsef(t, ok, "Found key %s, it should be gone.", x) } } for _, v := range m.MasterCols { - for _, x := range getMetrics(SLAVE, v) { + for _, x := range m.getMetrics(SLAVE, v) { _, ok := slaveMetrics[x] require.Truef(t, ok, "Didn't find key %s, it should present.", x) } diff --git a/plugins/inputs/minecraft/client.go b/plugins/inputs/minecraft/client.go index 641a8ae75db9f..4aa712d4b04f4 100644 --- a/plugins/inputs/minecraft/client.go +++ b/plugins/inputs/minecraft/client.go @@ -45,17 +45,17 @@ func (c *connector) Connect() (Connection, error) { return nil, err } - rcon, err := rcon.NewClient(c.hostname, p) + client, err := rcon.NewClient(c.hostname, p) if err != nil { return nil, err } - _, err = rcon.Authorize(c.password) + _, err = client.Authorize(c.password) if err != nil { return nil, err } - return &connection{rcon: rcon}, nil + return &connection{client: client}, nil } func newClient(connector Connector) *client { @@ -111,11 +111,11 @@ func (c *client) Scores(player string) ([]Score, error) { } type connection struct { - rcon *rcon.Client + client *rcon.Client } func (c *connection) Execute(command string) (string, error) { - packet, err := c.rcon.Execute(command) + packet, err := c.client.Execute(command) if err != nil { return "", err } diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 378268916054d..f7f891ec775bf 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -5,8 +5,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) var tags = make(map[string]string) @@ -65,7 +66,7 @@ func TestAddNonReplStats(t *testing.T) { d.flush(&acc) for key := range defaultStats { - assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) + require.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } } @@ -86,7 +87,7 @@ func TestAddReplStats(t *testing.T) { d.flush(&acc) for key := range mmapStats { - assert.True(t, acc.HasInt64Field("mongodb", key), key) + require.True(t, acc.HasInt64Field("mongodb", key), key) } } @@ -120,14 +121,14 @@ func TestAddWiredTigerStats(t *testing.T) { d.flush(&acc) for key := range wiredTigerStats { - assert.True(t, acc.HasFloatField("mongodb", key), key) + require.True(t, acc.HasFloatField("mongodb", key), key) } for key := range wiredTigerExtStats { - assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) + require.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } - assert.True(t, acc.HasInt64Field("mongodb", "page_faults")) + require.True(t, acc.HasInt64Field("mongodb", "page_faults")) } func TestAddShardStats(t *testing.T) { @@ -147,7 +148,7 @@ func TestAddShardStats(t *testing.T) { d.flush(&acc) for key := range defaultShardStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -170,7 +171,7 @@ func TestAddLatencyStats(t *testing.T) { d.flush(&acc) for key := range defaultLatencyStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -192,7 +193,7 @@ func TestAddAssertsStats(t *testing.T) { d.flush(&acc) for key := range defaultAssertsStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -227,7 +228,7 @@ func TestAddCommandsStats(t *testing.T) { d.flush(&acc) for key := range defaultCommandsStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -263,7 +264,7 @@ func TestAddTCMallocStats(t *testing.T) { d.flush(&acc) for key := range defaultTCMallocStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -283,7 +284,7 @@ func TestAddStorageStats(t *testing.T) { d.flush(&acc) for key := range defaultStorageStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -313,15 +314,15 @@ func TestAddShardHostStats(t *testing.T) { var hostsFound []string for host := range hostStatLines { for key := range shardHostStats { - assert.True(t, acc.HasInt64Field("mongodb_shard_stats", key)) + require.True(t, acc.HasInt64Field("mongodb_shard_stats", key)) } - assert.True(t, acc.HasTag("mongodb_shard_stats", "hostname")) + require.True(t, acc.HasTag("mongodb_shard_stats", "hostname")) hostsFound = append(hostsFound, host) } sort.Strings(hostsFound) sort.Strings(expectedHosts) - assert.Equal(t, hostsFound, expectedHosts) + require.Equal(t, hostsFound, expectedHosts) } func TestStateTag(t *testing.T) { @@ -527,7 +528,7 @@ func TestAddTopStats(t *testing.T) { for range topStatLines { for key := range topDataStats { - assert.True(t, acc.HasInt64Field("mongodb_top_stats", key)) + require.True(t, acc.HasInt64Field("mongodb_top_stats", key)) } } } diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index c8fd9f7c15284..d2313e4088f82 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -6,9 +6,9 @@ package mongodb import ( "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestGetDefaultTags(t *testing.T) { @@ -37,7 +37,7 @@ func TestAddDefaultStats(t *testing.T) { require.NoError(t, err) for key := range defaultStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index ea69c8d424f7c..2490ca2c1777c 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -903,7 +903,7 @@ func computeLockDiffs(prevLocks, curLocks map[string]LockUsage) []LockUsage { return lockUsages } -func diff(newVal, oldVal, sampleTime int64) (int64, int64) { +func diff(newVal, oldVal, sampleTime int64) (avg int64, newValue int64) { d := newVal - oldVal if d < 0 { d = newVal @@ -1311,10 +1311,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec // I'm the master returnVal.ReplLag = 0 break - } else { - // I'm secondary - me = member } + + // I'm secondary + me = member } else if member.State == 1 { // Master found master = member diff --git a/plugins/inputs/mongodb/mongostat_test.go b/plugins/inputs/mongodb/mongostat_test.go index 9f6ef04892ac9..908b82de1b911 100644 --- a/plugins/inputs/mongodb/mongostat_test.go +++ b/plugins/inputs/mongodb/mongostat_test.go @@ -2,10 +2,8 @@ package mongodb import ( "testing" - //"time" - //"github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestLatencyStats(t *testing.T) { @@ -55,12 +53,12 @@ func TestLatencyStats(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(0)) - assert.Equal(t, sl.ReadLatency, int64(0)) - assert.Equal(t, sl.WriteLatency, int64(0)) - assert.Equal(t, sl.CommandOpsCnt, int64(0)) - assert.Equal(t, sl.ReadOpsCnt, int64(0)) - assert.Equal(t, sl.WriteOpsCnt, int64(0)) + require.Equal(t, sl.CommandLatency, int64(0)) + require.Equal(t, sl.ReadLatency, int64(0)) + require.Equal(t, sl.WriteLatency, int64(0)) + require.Equal(t, sl.CommandOpsCnt, int64(0)) + require.Equal(t, sl.ReadOpsCnt, int64(0)) + require.Equal(t, sl.WriteOpsCnt, int64(0)) } func TestLatencyStatsDiffZero(t *testing.T) { @@ -124,12 +122,12 @@ func TestLatencyStatsDiffZero(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(0)) - assert.Equal(t, sl.ReadLatency, int64(0)) - assert.Equal(t, sl.WriteLatency, int64(0)) - assert.Equal(t, sl.CommandOpsCnt, int64(0)) - assert.Equal(t, sl.ReadOpsCnt, int64(0)) - assert.Equal(t, sl.WriteOpsCnt, int64(0)) + require.Equal(t, sl.CommandLatency, int64(0)) + require.Equal(t, sl.ReadLatency, int64(0)) + require.Equal(t, sl.WriteLatency, int64(0)) + require.Equal(t, sl.CommandOpsCnt, int64(0)) + require.Equal(t, sl.ReadOpsCnt, int64(0)) + require.Equal(t, sl.WriteOpsCnt, int64(0)) } func TestLatencyStatsDiff(t *testing.T) { @@ -193,10 +191,10 @@ func TestLatencyStatsDiff(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(59177981552)) - assert.Equal(t, sl.ReadLatency, int64(2255946760057)) - assert.Equal(t, sl.WriteLatency, int64(494479456987)) - assert.Equal(t, sl.CommandOpsCnt, int64(1019152861)) - assert.Equal(t, sl.ReadOpsCnt, int64(4189049884)) - assert.Equal(t, sl.WriteOpsCnt, int64(1691021287)) + require.Equal(t, sl.CommandLatency, int64(59177981552)) + require.Equal(t, sl.ReadLatency, int64(2255946760057)) + require.Equal(t, sl.WriteLatency, int64(494479456987)) + require.Equal(t, sl.CommandOpsCnt, int64(1019152861)) + require.Equal(t, sl.ReadOpsCnt, int64(4189049884)) + require.Equal(t, sl.WriteOpsCnt, int64(1691021287)) } diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go index 1cb1a4ba57da9..051e0b36982fe 100644 --- a/plugins/inputs/monit/monit.go +++ b/plugins/inputs/monit/monit.go @@ -6,23 +6,24 @@ import ( "net/http" "time" + "golang.org/x/net/html/charset" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "golang.org/x/net/html/charset" ) const ( - fileSystem string = "0" - directory = "1" - file = "2" - process = "3" - remoteHost = "4" - system = "5" - fifo = "6" - program = "7" - network = "8" + fileSystem = "0" + directory = "1" + file = "2" + process = "3" + remoteHost = "4" + system = "5" + fifo = "6" + program = "7" + network = "8" ) var pendingActions = []string{"ignore", "alert", "restart", "stop", "exec", "unmonitor", "start", "monitor"} @@ -244,108 +245,109 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error { } defer resp.Body.Close() - if resp.StatusCode == 200 { - var status Status - decoder := xml.NewDecoder(resp.Body) - decoder.CharsetReader = charset.NewReaderLabel - if err := decoder.Decode(&status); err != nil { - return fmt.Errorf("error parsing input: %v", err) - } + if resp.StatusCode != 200 { + return fmt.Errorf("received status code %d (%s), expected 200", resp.StatusCode, http.StatusText(resp.StatusCode)) + } - tags := map[string]string{ - "version": status.Server.Version, - "source": status.Server.LocalHostname, - "platform_name": status.Platform.Name, - } + var status Status + decoder := xml.NewDecoder(resp.Body) + decoder.CharsetReader = charset.NewReaderLabel + if err := decoder.Decode(&status); err != nil { + return fmt.Errorf("error parsing input: %v", err) + } + + tags := map[string]string{ + "version": status.Server.Version, + "source": status.Server.LocalHostname, + "platform_name": status.Platform.Name, + } - for _, service := range status.Services { - fields := make(map[string]interface{}) - tags["status"] = serviceStatus(service) - fields["status_code"] = service.Status - tags["pending_action"] = pendingAction(service) - fields["pending_action_code"] = service.PendingAction - tags["monitoring_status"] = monitoringStatus(service) - fields["monitoring_status_code"] = service.MonitoringStatus - tags["monitoring_mode"] = monitoringMode(service) - fields["monitoring_mode_code"] = service.MonitorMode - tags["service"] = service.Name - if service.Type == fileSystem { - fields["mode"] = service.Mode - fields["block_percent"] = service.Block.Percent - fields["block_usage"] = service.Block.Usage - fields["block_total"] = service.Block.Total - fields["inode_percent"] = service.Inode.Percent - fields["inode_usage"] = service.Inode.Usage - fields["inode_total"] = service.Inode.Total - acc.AddFields("monit_filesystem", fields, tags) - } else if service.Type == directory { - fields["mode"] = service.Mode - acc.AddFields("monit_directory", fields, tags) - } else if service.Type == file { - fields["size"] = service.Size - fields["mode"] = service.Mode - acc.AddFields("monit_file", fields, tags) - } else if service.Type == process { - fields["cpu_percent"] = service.CPU.Percent - fields["cpu_percent_total"] = service.CPU.PercentTotal - fields["mem_kb"] = service.Memory.Kilobyte - fields["mem_kb_total"] = service.Memory.KilobyteTotal - fields["mem_percent"] = service.Memory.Percent - fields["mem_percent_total"] = service.Memory.PercentTotal - fields["pid"] = service.Pid - fields["parent_pid"] = service.ParentPid - fields["threads"] = service.Threads - fields["children"] = service.Children - acc.AddFields("monit_process", fields, tags) - } else if service.Type == remoteHost { - fields["remote_hostname"] = service.Port.Hostname - fields["port_number"] = service.Port.PortNumber - fields["request"] = service.Port.Request - fields["response_time"] = service.Port.ResponseTime - fields["protocol"] = service.Port.Protocol - fields["type"] = service.Port.Type - acc.AddFields("monit_remote_host", fields, tags) - } else if service.Type == system { - fields["cpu_system"] = service.System.CPU.System - fields["cpu_user"] = service.System.CPU.User - fields["cpu_wait"] = service.System.CPU.Wait - fields["cpu_load_avg_1m"] = service.System.Load.Avg01 - fields["cpu_load_avg_5m"] = service.System.Load.Avg05 - fields["cpu_load_avg_15m"] = service.System.Load.Avg15 - fields["mem_kb"] = service.System.Memory.Kilobyte - fields["mem_percent"] = service.System.Memory.Percent - fields["swap_kb"] = service.System.Swap.Kilobyte - fields["swap_percent"] = service.System.Swap.Percent - acc.AddFields("monit_system", fields, tags) - } else if service.Type == fifo { - fields["mode"] = service.Mode - acc.AddFields("monit_fifo", fields, tags) - } else if service.Type == program { - fields["program_started"] = service.Program.Started * 10000000 - fields["program_status"] = service.Program.Status - acc.AddFields("monit_program", fields, tags) - } else if service.Type == network { - fields["link_state"] = service.Link.State - fields["link_speed"] = service.Link.Speed - fields["link_mode"] = linkMode(service) - fields["download_packets_now"] = service.Link.Download.Packets.Now - fields["download_packets_total"] = service.Link.Download.Packets.Total - fields["download_bytes_now"] = service.Link.Download.Bytes.Now - fields["download_bytes_total"] = service.Link.Download.Bytes.Total - fields["download_errors_now"] = service.Link.Download.Errors.Now - fields["download_errors_total"] = service.Link.Download.Errors.Total - fields["upload_packets_now"] = service.Link.Upload.Packets.Now - fields["upload_packets_total"] = service.Link.Upload.Packets.Total - fields["upload_bytes_now"] = service.Link.Upload.Bytes.Now - fields["upload_bytes_total"] = service.Link.Upload.Bytes.Total - fields["upload_errors_now"] = service.Link.Upload.Errors.Now - fields["upload_errors_total"] = service.Link.Upload.Errors.Total - acc.AddFields("monit_network", fields, tags) - } + for _, service := range status.Services { + fields := make(map[string]interface{}) + tags["status"] = serviceStatus(service) + fields["status_code"] = service.Status + tags["pending_action"] = pendingAction(service) + fields["pending_action_code"] = service.PendingAction + tags["monitoring_status"] = monitoringStatus(service) + fields["monitoring_status_code"] = service.MonitoringStatus + tags["monitoring_mode"] = monitoringMode(service) + fields["monitoring_mode_code"] = service.MonitorMode + tags["service"] = service.Name + if service.Type == fileSystem { + fields["mode"] = service.Mode + fields["block_percent"] = service.Block.Percent + fields["block_usage"] = service.Block.Usage + fields["block_total"] = service.Block.Total + fields["inode_percent"] = service.Inode.Percent + fields["inode_usage"] = service.Inode.Usage + fields["inode_total"] = service.Inode.Total + acc.AddFields("monit_filesystem", fields, tags) + } else if service.Type == directory { + fields["mode"] = service.Mode + acc.AddFields("monit_directory", fields, tags) + } else if service.Type == file { + fields["size"] = service.Size + fields["mode"] = service.Mode + acc.AddFields("monit_file", fields, tags) + } else if service.Type == process { + fields["cpu_percent"] = service.CPU.Percent + fields["cpu_percent_total"] = service.CPU.PercentTotal + fields["mem_kb"] = service.Memory.Kilobyte + fields["mem_kb_total"] = service.Memory.KilobyteTotal + fields["mem_percent"] = service.Memory.Percent + fields["mem_percent_total"] = service.Memory.PercentTotal + fields["pid"] = service.Pid + fields["parent_pid"] = service.ParentPid + fields["threads"] = service.Threads + fields["children"] = service.Children + acc.AddFields("monit_process", fields, tags) + } else if service.Type == remoteHost { + fields["remote_hostname"] = service.Port.Hostname + fields["port_number"] = service.Port.PortNumber + fields["request"] = service.Port.Request + fields["response_time"] = service.Port.ResponseTime + fields["protocol"] = service.Port.Protocol + fields["type"] = service.Port.Type + acc.AddFields("monit_remote_host", fields, tags) + } else if service.Type == system { + fields["cpu_system"] = service.System.CPU.System + fields["cpu_user"] = service.System.CPU.User + fields["cpu_wait"] = service.System.CPU.Wait + fields["cpu_load_avg_1m"] = service.System.Load.Avg01 + fields["cpu_load_avg_5m"] = service.System.Load.Avg05 + fields["cpu_load_avg_15m"] = service.System.Load.Avg15 + fields["mem_kb"] = service.System.Memory.Kilobyte + fields["mem_percent"] = service.System.Memory.Percent + fields["swap_kb"] = service.System.Swap.Kilobyte + fields["swap_percent"] = service.System.Swap.Percent + acc.AddFields("monit_system", fields, tags) + } else if service.Type == fifo { + fields["mode"] = service.Mode + acc.AddFields("monit_fifo", fields, tags) + } else if service.Type == program { + fields["program_started"] = service.Program.Started * 10000000 + fields["program_status"] = service.Program.Status + acc.AddFields("monit_program", fields, tags) + } else if service.Type == network { + fields["link_state"] = service.Link.State + fields["link_speed"] = service.Link.Speed + fields["link_mode"] = linkMode(service) + fields["download_packets_now"] = service.Link.Download.Packets.Now + fields["download_packets_total"] = service.Link.Download.Packets.Total + fields["download_bytes_now"] = service.Link.Download.Bytes.Now + fields["download_bytes_total"] = service.Link.Download.Bytes.Total + fields["download_errors_now"] = service.Link.Download.Errors.Now + fields["download_errors_total"] = service.Link.Download.Errors.Total + fields["upload_packets_now"] = service.Link.Upload.Packets.Now + fields["upload_packets_total"] = service.Link.Upload.Packets.Total + fields["upload_bytes_now"] = service.Link.Upload.Bytes.Now + fields["upload_bytes_total"] = service.Link.Upload.Bytes.Total + fields["upload_errors_now"] = service.Link.Upload.Errors.Now + fields["upload_errors_total"] = service.Link.Upload.Errors.Total + acc.AddFields("monit_network", fields, tags) } - } else { - return fmt.Errorf("received status code %d (%s), expected 200", resp.StatusCode, http.StatusText(resp.StatusCode)) } + return nil } diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index b3bbed79f68e1..ef47575e80b4c 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -8,10 +8,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type transportMock struct { @@ -632,7 +632,7 @@ func TestNoUsernameOrPasswordConfiguration(t *testing.T) { require.NoError(t, r.Init()) err := r.Gather(&acc) - assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") + require.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") } func TestInvalidXMLAndInvalidTypes(t *testing.T) { diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index a9e8236ee0cf5..3fd128eb85e10 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -8,7 +8,7 @@ and creates metrics using one of the supported [input data formats][]. ```toml [[inputs.mqtt_consumer]] ## Broker URLs for the MQTT server or cluster. To connect to multiple - ## clusters or standalone servers, use a seperate plugin instance. + ## clusters or standalone servers, use a separate plugin instance. ## example: servers = ["tcp://localhost:1883"] ## servers = ["ssl://localhost:1883"] ## servers = ["ws://localhost:1883"] diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 815f27a727abf..3e88cecbbce45 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -9,6 +9,7 @@ import ( "time" mqtt "github.com/eclipse/paho.mqtt.golang" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" @@ -64,15 +65,15 @@ type MQTTConsumer struct { Log telegraf.Logger - clientFactory ClientFactory - client Client - opts *mqtt.ClientOptions - acc telegraf.TrackingAccumulator - state ConnectionState - sem semaphore - messages map[telegraf.TrackingID]bool - messagesMutex sync.Mutex - topicTag string + clientFactory ClientFactory + client Client + opts *mqtt.ClientOptions + acc telegraf.TrackingAccumulator + state ConnectionState + sem semaphore + messages map[telegraf.TrackingID]bool + messagesMutex sync.Mutex + chosenTopicTag string ctx context.Context cancel context.CancelFunc @@ -80,7 +81,7 @@ type MQTTConsumer struct { var sampleConfig = ` ## Broker URLs for the MQTT server or cluster. To connect to multiple - ## clusters or standalone servers, use a seperate plugin instance. + ## clusters or standalone servers, use a separate plugin instance. ## example: servers = ["tcp://localhost:1883"] ## servers = ["ssl://localhost:1883"] ## servers = ["ws://localhost:1883"] @@ -174,9 +175,9 @@ func (m *MQTTConsumer) Init() error { return fmt.Errorf("connection_timeout must be greater than 1s: %s", time.Duration(m.ConnectionTimeout)) } - m.topicTag = "topic" + m.chosenTopicTag = "topic" if m.TopicTag != nil { - m.topicTag = *m.TopicTag + m.chosenTopicTag = *m.TopicTag } opts, err := m.createOpts() @@ -284,10 +285,10 @@ func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Mess return err } - if m.topicTag != "" { + if m.chosenTopicTag != "" { topic := msg.Topic() for _, metric := range metrics { - metric.AddTag(m.topicTag, topic) + metric.AddTag(m.chosenTopicTag, topic) } } diff --git a/plugins/inputs/multifile/multifile_test.go b/plugins/inputs/multifile/multifile_test.go index b12f29f35c2cd..214cebd136f9c 100644 --- a/plugins/inputs/multifile/multifile_test.go +++ b/plugins/inputs/multifile/multifile_test.go @@ -5,9 +5,9 @@ import ( "path" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestFileTypes(t *testing.T) { @@ -32,8 +32,8 @@ func TestFileTypes(t *testing.T) { err := m.Gather(&acc) require.NoError(t, err) - assert.Equal(t, map[string]string{"exampletag": "test"}, acc.Metrics[0].Tags) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]string{"exampletag": "test"}, acc.Metrics[0].Tags) + require.Equal(t, map[string]interface{}{ "examplebool": true, "examplestring": "hello world", "exampleint": int64(123456), @@ -60,7 +60,7 @@ func FailEarly(failEarly bool, t *testing.T) error { err := m.Gather(&acc) if err == nil { - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "exampleint": int64(123456), }, acc.Metrics[0].Fields) } diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 6e81b3df2f757..28313b25534aa 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -10,6 +10,7 @@ import ( "time" "github.com/go-sql-driver/mysql" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -905,6 +906,7 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. return err } defer rows.Close() + var ( command string state string @@ -948,6 +950,7 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. if err != nil { return err } + defer connRows.Close() for connRows.Next() { var user string @@ -1812,90 +1815,100 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula } for _, database := range dbList { - rows, err := db.Query(fmt.Sprintf(tableSchemaQuery, database)) + err := m.gatherSchemaForDB(db, database, servtag, acc) if err != nil { return err } - defer rows.Close() - var ( - tableSchema string - tableName string - tableType string - engine string - version float64 - rowFormat string - tableRows float64 - dataLength float64 - indexLength float64 - dataFree float64 - createOptions string + } + return nil +} + +func (m *Mysql) gatherSchemaForDB(db *sql.DB, database string, servtag string, acc telegraf.Accumulator) error { + rows, err := db.Query(fmt.Sprintf(tableSchemaQuery, database)) + if err != nil { + return err + } + defer rows.Close() + + var ( + tableSchema string + tableName string + tableType string + engine string + version float64 + rowFormat string + tableRows float64 + dataLength float64 + indexLength float64 + dataFree float64 + createOptions string + ) + + for rows.Next() { + err = rows.Scan( + &tableSchema, + &tableName, + &tableType, + &engine, + &version, + &rowFormat, + &tableRows, + &dataLength, + &indexLength, + &dataFree, + &createOptions, ) - for rows.Next() { - err = rows.Scan( - &tableSchema, - &tableName, - &tableType, - &engine, - &version, - &rowFormat, - &tableRows, - &dataLength, - &indexLength, - &dataFree, - &createOptions, - ) - if err != nil { - return err - } - tags := map[string]string{"server": servtag} - tags["schema"] = tableSchema - tags["table"] = tableName - - if m.MetricVersion < 2 { - acc.AddFields(newNamespace("info_schema", "table_rows"), - map[string]interface{}{"value": tableRows}, tags) - - dlTags := copyTags(tags) - dlTags["component"] = "data_length" - acc.AddFields(newNamespace("info_schema", "table_size", "data_length"), - map[string]interface{}{"value": dataLength}, dlTags) - - ilTags := copyTags(tags) - ilTags["component"] = "index_length" - acc.AddFields(newNamespace("info_schema", "table_size", "index_length"), - map[string]interface{}{"value": indexLength}, ilTags) - - dfTags := copyTags(tags) - dfTags["component"] = "data_free" - acc.AddFields(newNamespace("info_schema", "table_size", "data_free"), - map[string]interface{}{"value": dataFree}, dfTags) - } else { - acc.AddFields("mysql_table_schema", - map[string]interface{}{"rows": tableRows}, tags) + if err != nil { + return err + } + tags := map[string]string{"server": servtag} + tags["schema"] = tableSchema + tags["table"] = tableName - acc.AddFields("mysql_table_schema", - map[string]interface{}{"data_length": dataLength}, tags) + if m.MetricVersion < 2 { + acc.AddFields(newNamespace("info_schema", "table_rows"), + map[string]interface{}{"value": tableRows}, tags) + + dlTags := copyTags(tags) + dlTags["component"] = "data_length" + acc.AddFields(newNamespace("info_schema", "table_size", "data_length"), + map[string]interface{}{"value": dataLength}, dlTags) + + ilTags := copyTags(tags) + ilTags["component"] = "index_length" + acc.AddFields(newNamespace("info_schema", "table_size", "index_length"), + map[string]interface{}{"value": indexLength}, ilTags) + + dfTags := copyTags(tags) + dfTags["component"] = "data_free" + acc.AddFields(newNamespace("info_schema", "table_size", "data_free"), + map[string]interface{}{"value": dataFree}, dfTags) + } else { + acc.AddFields("mysql_table_schema", + map[string]interface{}{"rows": tableRows}, tags) - acc.AddFields("mysql_table_schema", - map[string]interface{}{"index_length": indexLength}, tags) + acc.AddFields("mysql_table_schema", + map[string]interface{}{"data_length": dataLength}, tags) - acc.AddFields("mysql_table_schema", - map[string]interface{}{"data_free": dataFree}, tags) - } + acc.AddFields("mysql_table_schema", + map[string]interface{}{"index_length": indexLength}, tags) - versionTags := copyTags(tags) - versionTags["type"] = tableType - versionTags["engine"] = engine - versionTags["row_format"] = rowFormat - versionTags["create_options"] = createOptions + acc.AddFields("mysql_table_schema", + map[string]interface{}{"data_free": dataFree}, tags) + } - if m.MetricVersion < 2 { - acc.AddFields(newNamespace("info_schema", "table_version"), - map[string]interface{}{"value": version}, versionTags) - } else { - acc.AddFields("mysql_table_schema_version", - map[string]interface{}{"table_version": version}, versionTags) - } + versionTags := copyTags(tags) + versionTags["type"] = tableType + versionTags["engine"] = engine + versionTags["row_format"] = rowFormat + versionTags["create_options"] = createOptions + + if m.MetricVersion < 2 { + acc.AddFields(newNamespace("info_schema", "table_version"), + map[string]interface{}{"value": version}, versionTags) + } else { + acc.AddFields("mysql_table_schema_version", + map[string]interface{}{"table_version": version}, versionTags) } } return nil diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index 0cdcd4b1cd345..410f80213252f 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -5,9 +5,9 @@ import ( "fmt" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestMysqlDefaultsToLocalIntegration(t *testing.T) { @@ -23,7 +23,7 @@ func TestMysqlDefaultsToLocalIntegration(t *testing.T) { err := m.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("mysql")) + require.True(t, acc.HasMeasurement("mysql")) } func TestMysqlMultipleInstancesIntegration(t *testing.T) { @@ -43,9 +43,9 @@ func TestMysqlMultipleInstancesIntegration(t *testing.T) { var acc, acc2 testutil.Accumulator err := m.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("mysql")) + require.True(t, acc.HasMeasurement("mysql")) // acc should have global variables - assert.True(t, acc.HasMeasurement("mysql_variables")) + require.True(t, acc.HasMeasurement("mysql_variables")) m2 := &Mysql{ Servers: []string{testServer}, @@ -53,9 +53,9 @@ func TestMysqlMultipleInstancesIntegration(t *testing.T) { } err = m2.Gather(&acc2) require.NoError(t, err) - assert.True(t, acc2.HasMeasurement("mysql")) + require.True(t, acc2.HasMeasurement("mysql")) // acc2 should not have global variables - assert.False(t, acc2.HasMeasurement("mysql_variables")) + require.False(t, acc2.HasMeasurement("mysql_variables")) } func TestMysqlMultipleInits(t *testing.T) { @@ -65,16 +65,16 @@ func TestMysqlMultipleInits(t *testing.T) { m2 := &Mysql{} m.InitMysql() - assert.True(t, m.initDone) - assert.False(t, m2.initDone) - assert.Equal(t, m.scanIntervalSlow, uint32(30)) - assert.Equal(t, m2.scanIntervalSlow, uint32(0)) + require.True(t, m.initDone) + require.False(t, m2.initDone) + require.Equal(t, m.scanIntervalSlow, uint32(30)) + require.Equal(t, m2.scanIntervalSlow, uint32(0)) m2.InitMysql() - assert.True(t, m.initDone) - assert.True(t, m2.initDone) - assert.Equal(t, m.scanIntervalSlow, uint32(30)) - assert.Equal(t, m2.scanIntervalSlow, uint32(0)) + require.True(t, m.initDone) + require.True(t, m2.initDone) + require.Equal(t, m.scanIntervalSlow, uint32(30)) + require.Equal(t, m2.scanIntervalSlow, uint32(0)) } func TestMysqlGetDSNTag(t *testing.T) { From 0469ee5bc756a79e2878c586b5501fcef95aff10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20Dupuy?= Date: Tue, 2 Nov 2021 15:50:38 +0100 Subject: [PATCH 733/761] docs: add elastic pool in supported versions in sqlserver (#10044) (cherry picked from commit a2cf4fb98f0ecb3cf3da11540cd66e35aef7375b) --- plugins/inputs/sqlserver/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 10f6064581dfb..721906250d699 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -9,6 +9,7 @@ lightweight and use Dynamic Management Views supplied by SQL Server. need to be addressed by the community. - Azure SQL Database (Single) - Azure SQL Managed Instance +- Azure SQL Elastic Pool ### Additional Setup: From 254370226485454bf1c9cd9a8c082a780c47e539 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 2 Nov 2021 15:03:24 -0500 Subject: [PATCH 734/761] chore: add Super Linter Github Action (#10014) (cherry picked from commit 79dadd3da88e705ccc62ecee1414177360a0e06c) --- .github/workflows/golangci-lint.yml | 35 ----------------- .github/workflows/linter.yml | 59 +++++++++++++++++++++++++++++ .markdownlint.yml | 3 ++ Makefile | 12 +++++- README.md | 18 +++++---- docs/developers/REVIEWS.md | 17 ++++++++- 6 files changed, 99 insertions(+), 45 deletions(-) delete mode 100644 .github/workflows/golangci-lint.yml create mode 100644 .github/workflows/linter.yml create mode 100644 .markdownlint.yml diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml deleted file mode 100644 index d4eac0d328059..0000000000000 --- a/.github/workflows/golangci-lint.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: golangci-lint -on: - push: - branches: - - master - pull_request: - branches: - - master - schedule: - # Trigger every day at 16:00 UTC - - cron: '0 16 * * *' -jobs: - golangci-pr: - if: github.ref != 'refs/heads/master' - name: lint-pr-changes - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: golangci-lint - uses: golangci/golangci-lint-action@v2 - with: - version: v1.42.1 - only-new-issues: true - golangci-master: - if: github.ref == 'refs/heads/master' - name: lint-master-all - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: golangci-lint - uses: golangci/golangci-lint-action@v2 - with: - version: v1.42.1 - only-new-issues: true - args: --issues-exit-code=0 diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml new file mode 100644 index 0000000000000..21cdd54d7176e --- /dev/null +++ b/.github/workflows/linter.yml @@ -0,0 +1,59 @@ +--- +################################# +################################# +## Super Linter GitHub Actions ## +################################# +################################# +name: Lint Code Base + +# +# Documentation: +# https://help.github.com/en/articles/workflow-syntax-for-github-actions +# + +############################# +# Start the job on all push # +############################# +on: + push: + branches-ignore: [master, main] + # Remove the line above to run when pushing to master + pull_request: + branches: [master, main] + +############### +# Set the Job # +############### +jobs: + build: + # Name the Job + name: Lint Code Base + # Set the agent to run on + runs-on: ubuntu-latest + + ################## + # Load all steps # + ################## + steps: + ########################## + # Checkout the code base # + ########################## + - name: Checkout Code + uses: actions/checkout@v2 + with: + # Full git history is needed to get a proper list of changed files within `super-linter` + fetch-depth: 0 + + ################################ + # Run Linter against code base # + ################################ + - name: Lint Code Base + uses: github/super-linter@v4 + env: + VALIDATE_ALL_CODEBASE: false + DEFAULT_BRANCH: master + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LINTER_RULES_PATH: '.' + MARKDOWN_CONFIG_FILE: .markdownlint.yml + VALIDATE_MARKDOWN: true + VALIDATE_GO: true diff --git a/.markdownlint.yml b/.markdownlint.yml new file mode 100644 index 0000000000000..1344b312f825e --- /dev/null +++ b/.markdownlint.yml @@ -0,0 +1,3 @@ +{ + "MD013": false +} diff --git a/Makefile b/Makefile index 09a6babaee73f..7b91fa1edcfec 100644 --- a/Makefile +++ b/Makefile @@ -140,9 +140,12 @@ vet: .PHONY: lint-install lint-install: - + @echo "Installing golangci-lint" go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.42.1 + @echo "Installing markdownlint" + npm install -g markdownlint-cli + .PHONY: lint lint: ifeq (, $(shell which golangci-lint)) @@ -152,6 +155,13 @@ endif golangci-lint run +ifeq (, $(shell which markdownlint-cli)) + $(info markdownlint-cli can't be found, please run: make lint-install) + exit 1 +endif + + markdownlint-cli + .PHONY: lint-branch lint-branch: ifeq (, $(shell which golangci-lint)) diff --git a/README.md b/README.md index 03d7428c12591..94762148d04ea 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,7 @@ [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) [![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack) +[![GitHub Super-Linter](https://github.com/influxdata/telegraf/workflows/Lint%20Code%20Base/badge.svg)](https://github.com/marketplace/actions/super-linter) Telegraf is an agent for collecting, processing, aggregating, and writing metrics. Based on a plugin system to enable developers in the community to easily add support for additional @@ -74,11 +75,14 @@ Telegraf requires Go version 1.17 or newer, the Makefile requires GNU make. 1. [Install Go](https://golang.org/doc/install) >=1.17 (1.17.2 recommended) 2. Clone the Telegraf repository: - ``` + + ```shell git clone https://github.com/influxdata/telegraf.git ``` + 3. Run `make` from the source directory - ``` + + ```shell cd telegraf make ``` @@ -106,31 +110,31 @@ See usage with: telegraf --help ``` -#### Generate a telegraf config file: +### Generate a telegraf config file ```shell telegraf config > telegraf.conf ``` -#### Generate config with only cpu input & influxdb output plugins defined: +### Generate config with only cpu input & influxdb output plugins defined ```shell telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config ``` -#### Run a single telegraf collection, outputting metrics to stdout: +### Run a single telegraf collection, outputting metrics to stdout ```shell telegraf --config telegraf.conf --test ``` -#### Run telegraf with all plugins defined in config file: +### Run telegraf with all plugins defined in config file ```shell telegraf --config telegraf.conf ``` -#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins: +### Run telegraf, enabling the cpu & memory input, and influxdb output plugins ```shell telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb diff --git a/docs/developers/REVIEWS.md b/docs/developers/REVIEWS.md index 0f036d225b7ba..49107c03f9da9 100644 --- a/docs/developers/REVIEWS.md +++ b/docs/developers/REVIEWS.md @@ -9,7 +9,9 @@ All pull requests should follow the style and best practices in the document. ## Process + The review process is roughly structured as follows: + 1. Submit a pull request. Please check that you signed the [CLA](https://www.influxdata.com/legal/cla/) (and [Corporate CLA](https://www.influxdata.com/legal/ccla/) if you are contributing code on as an employee of your company). Provide a short description of your submission and reference issues that you potentially close. Make sure the CI tests are all green and there are no linter-issues. 1. Get feedback from a first reviewer and a `ready for final review` tag. @@ -21,6 +23,7 @@ It might take some time until your PR gets merged, depending on the release cycl your pull-request (bugfix, enhancement of existing code, new plugin, etc). Remember, it might be necessary to rebase your code before merge to resolve conflicts. Please read the review comments carefully, fix the related part of the code and/or respond in case there is anything unclear. If there is no activity in a pull-request or the contributor does not respond, we apply the following scheme: + 1. We send a first reminder after at least 2 weeks of inactivity. 1. After at least another two weeks of inactivity we send a second reminder and are setting the `waiting for response` tag. 1. Another two weeks later we will ask the community for help setting the `help wanted` reminder. @@ -34,10 +37,13 @@ So in case you expect a longer period of inactivity or you want to abandon a pul - SampleConfig must match the readme, but not include the plugin name. - structs should include toml tags for fields that are expected to be editable from the config. eg `toml:"command"` (snake_case) - plugins that want to log should declare the Telegraf logger, not use the log package. eg: + ```Go Log telegraf.Logger `toml:"-"` ``` + (in tests, you can do `myPlugin.Log = testutil.Logger{}`) + - Initialization and config checking should be done on the `Init() error` function, not in the Connect, Gather, or Start functions. - `Init() error` should not contain connections to external services. If anything fails in Init, Telegraf will consider it a configuration error and refuse to start. - plugins should avoid synchronization code if they are not starting goroutines. Plugin functions are never called in parallel. @@ -67,6 +73,9 @@ So in case you expect a longer period of inactivity or you want to abandon a pul - changing the default value of a field can be okay, but will affect users who have not specified the field and should be approached cautiously. - The general rule here is "don't surprise me": users should not be caught off-guard by unexpected or breaking changes. +## Linting + +Each pull request will have the appriopriate linters checking the files for any common mistakes. The github action Super Linter is used: [super-pinter](https://github.com/github/super-linter). If it is failing you can click on the action and read the logs to figure out the issue. You can also run the github action locally by following these instructions: [run-linter-locally.md](https://github.com/github/super-linter/blob/main/docs/run-linter-locally.md). You can find more information on each of the linters in the super linter readme. ## Testing @@ -82,6 +91,7 @@ used for assertions within the tests when possible, with preference towards github.com/stretchr/testify/require. Primarily use the require package to avoid cascading errors: + ```go assert.Equal(t, lhs, rhs) # avoid require.Equal(t, lhs, rhs) # good @@ -96,6 +106,7 @@ Ensure the [[SampleConfig]] and match with the current standards. READMEs should: + - be spaces, not tabs - be indented consistently, matching other READMEs - have two `#` for comments @@ -121,7 +132,8 @@ Metrics use `snake_case` naming style. Generally enumeration data should be encoded as a tag. In some cases it may be desirable to also include the data as an integer field: -``` + +```shell net_response,result=success result_code=0i ``` @@ -129,7 +141,8 @@ net_response,result=success result_code=0i Use tags for each range with the `le` tag, and `+Inf` for the values out of range. This format is inspired by the Prometheus project: -``` + +```shell cpu,le=0.0 usage_idle_bucket=0i 1486998330000000000 cpu,le=50.0 usage_idle_bucket=2i 1486998330000000000 cpu,le=100.0 usage_idle_bucket=2i 1486998330000000000 From 51e68ba11f91dc47b1f4c49d7fd0e571c18d6fce Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 3 Nov 2021 12:41:07 -0600 Subject: [PATCH 735/761] fix: update readme.md to point at latest docs URL (cherry picked from commit 5b1c9f3c4c7d83ca3df19a319ed110fbb3938b6d) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 94762148d04ea..d1f5908a9021d 100644 --- a/README.md +++ b/README.md @@ -142,7 +142,7 @@ telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb ## Documentation -[Latest Release Documentation](https://docs.influxdata.com/telegraf) +[Latest Release Documentation](https://docs.influxdata.com/telegraf/latest/) For documentation on the latest development code see the [documentation index](/docs). From 55694ae1398ab6af41dd8232a6fd9b601b9a7ab1 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 3 Nov 2021 15:57:44 -0500 Subject: [PATCH 736/761] chore: only check new issues with Go linter (#10054) (cherry picked from commit 00325f20c00e828608043f505687b829139cfcca) --- .github/workflows/golangci-lint.yml | 35 +++++++++++++++++++++++++++++ .github/workflows/linter.yml | 1 - README.md | 1 - 3 files changed, 35 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/golangci-lint.yml diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000000000..d4eac0d328059 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,35 @@ +name: golangci-lint +on: + push: + branches: + - master + pull_request: + branches: + - master + schedule: + # Trigger every day at 16:00 UTC + - cron: '0 16 * * *' +jobs: + golangci-pr: + if: github.ref != 'refs/heads/master' + name: lint-pr-changes + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: golangci-lint + uses: golangci/golangci-lint-action@v2 + with: + version: v1.42.1 + only-new-issues: true + golangci-master: + if: github.ref == 'refs/heads/master' + name: lint-master-all + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: golangci-lint + uses: golangci/golangci-lint-action@v2 + with: + version: v1.42.1 + only-new-issues: true + args: --issues-exit-code=0 diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 21cdd54d7176e..8ba9ae2944823 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -56,4 +56,3 @@ jobs: LINTER_RULES_PATH: '.' MARKDOWN_CONFIG_FILE: .markdownlint.yml VALIDATE_MARKDOWN: true - VALIDATE_GO: true diff --git a/README.md b/README.md index d1f5908a9021d..122b20839db6b 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,6 @@ [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) [![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack) -[![GitHub Super-Linter](https://github.com/influxdata/telegraf/workflows/Lint%20Code%20Base/badge.svg)](https://github.com/marketplace/actions/super-linter) Telegraf is an agent for collecting, processing, aggregating, and writing metrics. Based on a plugin system to enable developers in the community to easily add support for additional From e562a3e5185a1c38d5ab2f59b86a6eea652106f0 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 3 Nov 2021 16:11:47 -0600 Subject: [PATCH 737/761] fix: remove telegraflinter from in-tree (#10053) (cherry picked from commit b4cafff535ced6d999e5e851e5fa6a94f0b122f4) --- .golangci.yml | 4 --- go.mod | 2 +- telegraflinter/README.md | 31 ------------------ telegraflinter/telegraflinter.go | 54 -------------------------------- 4 files changed, 1 insertion(+), 90 deletions(-) delete mode 100644 telegraflinter/README.md delete mode 100644 telegraflinter/telegraflinter.go diff --git a/.golangci.yml b/.golangci.yml index 470fc116bfb37..a4d14ddd80362 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -21,10 +21,6 @@ linters: - varcheck linters-settings: - # custom: - # telegraflinter: - # path: telegraflinter.so - # description: "Find Telegraf specific review criteria, more info: https://github.com/influxdata/telegraf/wiki/Review" revive: rules: - name: argument-limit diff --git a/go.mod b/go.mod index a3041e576119d..171ee3cbbf2fb 100644 --- a/go.mod +++ b/go.mod @@ -291,7 +291,7 @@ require ( golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect golang.org/x/text v0.3.7 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect - golang.org/x/tools v0.1.5 + golang.org/x/tools v0.1.5 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect golang.zx2c4.com/wireguard v0.0.20200121 // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 diff --git a/telegraflinter/README.md b/telegraflinter/README.md deleted file mode 100644 index b049cf6446bc6..0000000000000 --- a/telegraflinter/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Private linter for Telegraf - -The purpose of this linter is to enforce the review criteria for the Telegraf project, outlined here: https://github.com/influxdata/telegraf/wiki/Review. This is currently not compatible with the linter running in the CI and can only be ran locally. - -## Running it locally - -To use the Telegraf linter, you need a binary of golangci-lint that was compiled with CGO enabled. Currently no release is provided with it enabled, therefore you will need to clone the source code and compile it yourself. You can run the following commands to achieve this: - -1. `git clone https://github.com/sspaink/golangci-lint.git` -2. `cd golangci-lint` -3. `git checkout tags/v1.39.0 -b 1390` -4. `CGO_ENABLED=true go build -o golangci-lint-cgo ./cmd/golangci-lint` - -You will now have the binary you need to run the Telegraf linter. The Telegraf linter will now need to be compiled as a plugin to get a *.so file. [Currently plugins are only supported on Linux, FreeBSD, and macOS](https://golang.org/pkg/plugin/). From the root of the Telegraf project, you can run the following commands to compile the linter and run it: - -1. `CGO_ENABLED=true go build -buildmode=plugin telegraflinter/telegraflinter.go` -2. In the .golanci-lint file: - * uncomment the `custom` section under the `linters-settings` section - * uncomment `telegraflinter` under the `enable` section -3. `golanci-lint-cgo run` - -*Note:* If you made a change to the telegraf linter and want to run it again, be sure to clear the [cache directory](https://golang.org/pkg/os/#UserCacheDir). On unix systems you can run `rm -rf ~/.cache/golangci-lint` otherwise it will seem like nothing changed. - -## Requirement - -This linter lives in the Telegraf repository and is compiled to become a Go plugin, any packages used in the linter *MUST* match the version in the golanci-lint otherwise there will be issues. For example the import `golang.org/x/tools v0.1.0` needs to match what golangci-lint is using. - -## Useful references - -* https://golangci-lint.run/contributing/new-linters/#how-to-add-a-private-linter-to-golangci-lint -* https://github.com/golangci/example-plugin-linter diff --git a/telegraflinter/telegraflinter.go b/telegraflinter/telegraflinter.go deleted file mode 100644 index b295327f8eed5..0000000000000 --- a/telegraflinter/telegraflinter.go +++ /dev/null @@ -1,54 +0,0 @@ -// This must be package main -package main - -import ( - "go/ast" - "strings" - - "golang.org/x/tools/go/analysis" -) - -type analyzerPlugin struct{} - -// This must be implemented -func (*analyzerPlugin) GetAnalyzers() []*analysis.Analyzer { - return []*analysis.Analyzer{ - TelegrafAnalyzer, - } -} - -// This must be defined and named 'AnalyzerPlugin' -var AnalyzerPlugin analyzerPlugin - -var TelegrafAnalyzer = &analysis.Analyzer{ - Name: "telegraflinter", - Doc: "Find Telegraf specific review criteria, more info: https://github.com/influxdata/telegraf/wiki/Review", - Run: run, -} - -func run(pass *analysis.Pass) (interface{}, error) { - for _, file := range pass.Files { - ast.Inspect(file, func(n ast.Node) bool { - checkLogImport(n, pass) - return true - }) - } - return nil, nil -} - -func checkLogImport(n ast.Node, pass *analysis.Pass) { - if !strings.HasPrefix(pass.Pkg.Path(), "github.com/influxdata/telegraf/plugins/") { - return - } - if importSpec, ok := n.(*ast.ImportSpec); ok { - if importSpec.Path != nil && strings.HasPrefix(importSpec.Path.Value, "\"log\"") { - pass.Report(analysis.Diagnostic{ - Pos: importSpec.Pos(), - End: 0, - Category: "log", - Message: "Don't use log package in plugin, use the Telegraf logger.", - SuggestedFixes: nil, - }) - } - } -} From d806d60a3e3b531b5a78836a8c2c69dd008a24e2 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 4 Nov 2021 11:40:11 -0500 Subject: [PATCH 738/761] chore: don't trigger share-artifacts if no go files changed (#10060) (cherry picked from commit 60400662ea9aeafe0122e92226d025e897c2cbd1) --- .circleci/config.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index e5d535bf41115..3fa611f8b26fb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -399,6 +399,8 @@ jobs: share-artifacts: executor: aws-cli/default steps: + - checkout + - check-changed-files-or-halt - run: command: | PR=${CIRCLE_PULL_REQUEST##*/} From f046b71d05e68b9253e5f5decd9a1ea9ee478cfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Fri, 5 Nov 2021 14:55:16 +0100 Subject: [PATCH 739/761] fix: Markdown linter fixes for LICENSE_OF_DEPENDENCIES.md (#10065) (cherry picked from commit 5ac9f418caa8ba83f162ecf9852eb7e23cc3d083) --- docs/LICENSE_OF_DEPENDENCIES.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index ba1ee5147d99e..c2f542cd77cbd 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -284,5 +284,7 @@ following works: - modernc.org/sqlite [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/sqlite/-/blob/master/LICENSE) - sigs.k8s.io/structured-merge-diff [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - sigs.k8s.io/yaml [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) -## telegraf used and modified code from these projects + +## Telegraf used and modified code from these projects + - github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) From 7579dd0036250abf3473a75834f34e1ad2efc011 Mon Sep 17 00:00:00 2001 From: Jacob Marble Date: Mon, 8 Nov 2021 13:41:50 -0800 Subject: [PATCH 740/761] chore: update OpenTelemetry plugins (#10010) (cherry picked from commit 9871b676a535938d2782f46b577a32cdee748a49) --- go.mod | 28 ++++----- go.sum | 57 ++++++++++--------- plugins/inputs/opentelemetry/grpc_services.go | 19 ++++--- .../opentelemetry/opentelemetry_test.go | 4 +- .../outputs/opentelemetry/opentelemetry.go | 9 +-- .../opentelemetry/opentelemetry_test.go | 4 +- 6 files changed, 65 insertions(+), 56 deletions(-) diff --git a/go.mod b/go.mod index 171ee3cbbf2fb..093975105cb09 100644 --- a/go.mod +++ b/go.mod @@ -147,9 +147,9 @@ require ( github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/serf v0.9.5 // indirect github.com/influxdata/go-syslog/v3 v3.0.0 - github.com/influxdata/influxdb-observability/common v0.2.7 - github.com/influxdata/influxdb-observability/influx2otel v0.2.7 - github.com/influxdata/influxdb-observability/otel2influx v0.2.7 + github.com/influxdata/influxdb-observability/common v0.2.8 + github.com/influxdata/influxdb-observability/influx2otel v0.2.8 + github.com/influxdata/influxdb-observability/otel2influx v0.2.8 github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 @@ -275,10 +275,10 @@ require ( go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.mongodb.org/mongo-driver v1.5.3 go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.35.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0 - go.opentelemetry.io/otel/metric v0.23.0 - go.opentelemetry.io/otel/sdk/metric v0.23.0 + go.opentelemetry.io/collector/model v0.37.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0 + go.opentelemetry.io/otel/metric v0.24.0 + go.opentelemetry.io/otel/sdk/metric v0.24.0 go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.6.0 // indirect @@ -298,7 +298,7 @@ require ( google.golang.org/api v0.54.0 google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 - google.golang.org/grpc v1.40.0 + google.golang.org/grpc v1.41.0 google.golang.org/protobuf v1.27.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/djherbis/times.v1 v1.2.0 @@ -346,12 +346,12 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/pierrec/lz4/v4 v4.1.8 // indirect github.com/rogpeppe/go-internal v1.6.2 // indirect - go.opentelemetry.io/otel v1.0.0-RC3 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 // indirect - go.opentelemetry.io/otel/internal/metric v0.23.0 // indirect - go.opentelemetry.io/otel/sdk v1.0.0-RC3 // indirect - go.opentelemetry.io/otel/sdk/export/metric v0.23.0 // indirect - go.opentelemetry.io/otel/trace v1.0.0-RC3 // indirect + go.opentelemetry.io/otel v1.0.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0 // indirect + go.opentelemetry.io/otel/internal/metric v0.24.0 // indirect + go.opentelemetry.io/otel/sdk v1.0.1 // indirect + go.opentelemetry.io/otel/sdk/export/metric v0.24.0 // indirect + go.opentelemetry.io/otel/trace v1.0.1 // indirect go.opentelemetry.io/proto/otlp v0.9.0 // indirect ) diff --git a/go.sum b/go.sum index bfc341b558dad..d0c02c632326a 100644 --- a/go.sum +++ b/go.sum @@ -480,6 +480,7 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -714,6 +715,7 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/esimonov/ifshort v1.0.1/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -1218,12 +1220,12 @@ github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7m github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I= github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q= github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= -github.com/influxdata/influxdb-observability/common v0.2.7 h1:C+oDh8Kbw+Ykx9yog/uJXL27rwMN3hgTLQfAFg1eQO0= -github.com/influxdata/influxdb-observability/common v0.2.7/go.mod h1:+8VMGrfWZnXjc1c/oP+N4O/sHoneWgN3ojAHwgYgV4A= -github.com/influxdata/influxdb-observability/influx2otel v0.2.7 h1:YIXH+qNQgAtTA5U3s/wxDxxh5Vz+ylhZhyuRxtfTBqs= -github.com/influxdata/influxdb-observability/influx2otel v0.2.7/go.mod h1:ASyDMoPChvIgbEOvghwc5NxngOgXThp9MFKs7efNLtQ= -github.com/influxdata/influxdb-observability/otel2influx v0.2.7 h1:FACov3tcGCKfEGXsyUbgUOQx3zXffXaCFbN3ntAzh1E= -github.com/influxdata/influxdb-observability/otel2influx v0.2.7/go.mod h1:tE3OSy4RyAHIjxYlFZBsWorEM3aqaUeqSx3mbacm8KI= +github.com/influxdata/influxdb-observability/common v0.2.8 h1:QDvX7rNQkt1mHr2v8sw/OEupa32CxZHlO5f/tsyPCLw= +github.com/influxdata/influxdb-observability/common v0.2.8/go.mod h1:N2wfkPgJvi9CPK6MbNFkD70naEUxAMGCqFyxZXCJQDs= +github.com/influxdata/influxdb-observability/influx2otel v0.2.8 h1:XlVo4WLIFByOADn+88hPmR2SGJkdLppyIbw1BG2obp8= +github.com/influxdata/influxdb-observability/influx2otel v0.2.8/go.mod h1:t9LeYL1mBiVRZBt5TfIj+4MBkJ/1POBxUlKSxEA+uj8= +github.com/influxdata/influxdb-observability/otel2influx v0.2.8 h1:vTamg9mKUXHaXPtydrR1ejpqj/OKAGc56MiedXjlsnA= +github.com/influxdata/influxdb-observability/otel2influx v0.2.8/go.mod h1:xKTR9GLOtkSekysDKhAFNrPYpeiFV31Sy6zDqF54axA= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= @@ -2140,27 +2142,27 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/collector v0.28.0 h1:XmRwoSj3HZtC7O/12fBoQ9DInvwBwFHgHLZrwNxNjQY= go.opentelemetry.io/collector v0.28.0/go.mod h1:AP/BTXwo1eedoJO7V+HQ68CSvJU1lcdqOzJCgt1VsNs= -go.opentelemetry.io/collector/model v0.35.0 h1:NpKjghiqlei4ecwjOYOMhD6tj4gY8yiWHPJmbFs/ArI= -go.opentelemetry.io/collector/model v0.35.0/go.mod h1:+7YCSjJG+MqiIFjauzt7oM2qkqBsaJWh5hcsO4fwsAc= +go.opentelemetry.io/collector/model v0.37.0 h1:K1G6bgzBZ5kKSjZ1+EY9MhCOYsac4Q1K85fBUgpTVH8= +go.opentelemetry.io/collector/model v0.37.0/go.mod h1:ESh1oWDNdS4fTg9sTFoYuiuvs8QuaX8yNGTPix3JZc8= go.opentelemetry.io/otel v0.7.0/go.mod h1:aZMyHG5TqDOXEgH2tyLiXSUKly1jT3yqE9PmrzIeCdo= -go.opentelemetry.io/otel v1.0.0-RC3 h1:kvwiyEkiUT/JaadXzVLI/R1wDO934A7r3Bs2wEe6wqA= -go.opentelemetry.io/otel v1.0.0-RC3/go.mod h1:Ka5j3ua8tZs4Rkq4Ex3hwgBgOchyPVq5S6P2lz//nKQ= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 h1:vKIEsT6IJU0NYd+iZccjgCmk80zsa7dTiC2Bu7U1jz0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0/go.mod h1:pe9oOWRaZyapdajWCn64fnl76v3cmTEmNBgh7MkKvwE= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0 h1:JSsJID+KU3G8wxynfHIlWaefOvYngDjnrmtHOGb1sb0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0/go.mod h1:aSP5oMNaAfOYq+sRydHANZ0vBYLyZR/3lR9pru9aPLk= -go.opentelemetry.io/otel/internal/metric v0.23.0 h1:mPfzm9Iqhw7G2nDBmUAjFTfPqLZPbOW2k7QI57ITbaI= -go.opentelemetry.io/otel/internal/metric v0.23.0/go.mod h1:z+RPiDJe30YnCrOhFGivwBS+DU1JU/PiLKkk4re2DNY= -go.opentelemetry.io/otel/metric v0.23.0 h1:mYCcDxi60P4T27/0jchIDFa1WHEfQeU3zH9UEMpnj2c= -go.opentelemetry.io/otel/metric v0.23.0/go.mod h1:G/Nn9InyNnIv7J6YVkQfpc0JCfKBNJaERBGw08nqmVQ= -go.opentelemetry.io/otel/sdk v1.0.0-RC3 h1:iRMkET+EmJUn5mW0hJzygBraXRmrUwzbOtNvTCh/oKs= -go.opentelemetry.io/otel/sdk v1.0.0-RC3/go.mod h1:78H6hyg2fka0NYT9fqGuFLvly2yCxiBXDJAgLKo/2Us= -go.opentelemetry.io/otel/sdk/export/metric v0.23.0 h1:7NeoKPPx6NdZBVHLEp/LY5Lq85Ff1WNZnuJkuRy+azw= -go.opentelemetry.io/otel/sdk/export/metric v0.23.0/go.mod h1:SuMiREmKVRIwFKq73zvGTvwFpxb/ZAYkMfyqMoOtDqs= -go.opentelemetry.io/otel/sdk/metric v0.23.0 h1:xlZhPbiue1+jjSFEth94q9QCmX8Q24mOtue9IAmlVyI= -go.opentelemetry.io/otel/sdk/metric v0.23.0/go.mod h1:wa0sKK13eeIFW+0OFjcC3S1i7FTRRiLAXe1kjBVbhwg= -go.opentelemetry.io/otel/trace v1.0.0-RC3 h1:9F0ayEvlxv8BmNmPbU005WK7hC+7KbOazCPZjNa1yME= -go.opentelemetry.io/otel/trace v1.0.0-RC3/go.mod h1:VUt2TUYd8S2/ZRX09ZDFZQwn2RqfMB5MzO17jBojGxo= +go.opentelemetry.io/otel v1.0.1 h1:4XKyXmfqJLOQ7feyV5DB6gsBFZ0ltB8vLtp6pj4JIcc= +go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0 h1:NN6n2agAkT6j2o+1RPTFANclOnZ/3Z1ruRGL06NYACk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0/go.mod h1:kgWmavsno59/h5l9A9KXhvqrYxBhiQvJHPNhJkMP46s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0 h1:QyIh7cAMItlzm8xQn9c6QxNEMUbYgXPx19irR/pmgdI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0/go.mod h1:BpCT1zDnUgcUc3VqFVkxH/nkx6cM8XlCPsQsxaOzUNM= +go.opentelemetry.io/otel/internal/metric v0.24.0 h1:O5lFy6kAl0LMWBjzy3k//M8VjEaTDWL9DPJuqZmWIAA= +go.opentelemetry.io/otel/internal/metric v0.24.0/go.mod h1:PSkQG+KuApZjBpC6ea6082ZrWUUy/w132tJ/LOU3TXk= +go.opentelemetry.io/otel/metric v0.24.0 h1:Rg4UYHS6JKR1Sw1TxnI13z7q/0p/XAbgIqUTagvLJuU= +go.opentelemetry.io/otel/metric v0.24.0/go.mod h1:tpMFnCD9t+BEGiWY2bWF5+AwjuAdM0lSowQ4SBA3/K4= +go.opentelemetry.io/otel/sdk v1.0.1 h1:wXxFEWGo7XfXupPwVJvTBOaPBC9FEg0wB8hMNrKk+cA= +go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= +go.opentelemetry.io/otel/sdk/export/metric v0.24.0 h1:innKi8LQebwPI+WEuEKEWMjhWC5mXQG1/WpSm5mffSY= +go.opentelemetry.io/otel/sdk/export/metric v0.24.0/go.mod h1:chmxXGVNcpCih5XyniVkL4VUyaEroUbOdvjVlQ8M29Y= +go.opentelemetry.io/otel/sdk/metric v0.24.0 h1:LLHrZikGdEHoHihwIPvfFRJX+T+NdrU2zgEqf7tQ7Oo= +go.opentelemetry.io/otel/sdk/metric v0.24.0/go.mod h1:KDgJgYzsIowuIDbPM9sLDZY9JJ6gqIDWCx92iWV8ejk= +go.opentelemetry.io/otel/trace v1.0.1 h1:StTeIH6Q3G4r0Fiw34LTokUFESZgIDUr0qIJ7mKmAfw= +go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= @@ -2837,8 +2839,9 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/plugins/inputs/opentelemetry/grpc_services.go b/plugins/inputs/opentelemetry/grpc_services.go index 1c805e2a23ff2..437c723db3e28 100644 --- a/plugins/inputs/opentelemetry/grpc_services.go +++ b/plugins/inputs/opentelemetry/grpc_services.go @@ -7,7 +7,6 @@ import ( "github.com/influxdata/influxdb-observability/common" "github.com/influxdata/influxdb-observability/otel2influx" "go.opentelemetry.io/collector/model/otlpgrpc" - "go.opentelemetry.io/collector/model/pdata" ) type traceService struct { @@ -15,6 +14,8 @@ type traceService struct { writer *writeToAccumulator } +var _ otlpgrpc.TracesServer = (*traceService)(nil) + func newTraceService(logger common.Logger, writer *writeToAccumulator) *traceService { converter := otel2influx.NewOtelTracesToLineProtocol(logger) return &traceService{ @@ -23,8 +24,8 @@ func newTraceService(logger common.Logger, writer *writeToAccumulator) *traceSer } } -func (s *traceService) Export(ctx context.Context, req pdata.Traces) (otlpgrpc.TracesResponse, error) { - err := s.converter.WriteTraces(ctx, req, s.writer) +func (s *traceService) Export(ctx context.Context, req otlpgrpc.TracesRequest) (otlpgrpc.TracesResponse, error) { + err := s.converter.WriteTraces(ctx, req.Traces(), s.writer) return otlpgrpc.NewTracesResponse(), err } @@ -33,6 +34,8 @@ type metricsService struct { writer *writeToAccumulator } +var _ otlpgrpc.MetricsServer = (*metricsService)(nil) + var metricsSchemata = map[string]common.MetricsSchema{ "prometheus-v1": common.MetricsSchemaTelegrafPrometheusV1, "prometheus-v2": common.MetricsSchemaTelegrafPrometheusV2, @@ -54,8 +57,8 @@ func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema }, nil } -func (s *metricsService) Export(ctx context.Context, req pdata.Metrics) (otlpgrpc.MetricsResponse, error) { - err := s.converter.WriteMetrics(ctx, req, s.writer) +func (s *metricsService) Export(ctx context.Context, req otlpgrpc.MetricsRequest) (otlpgrpc.MetricsResponse, error) { + err := s.converter.WriteMetrics(ctx, req.Metrics(), s.writer) return otlpgrpc.NewMetricsResponse(), err } @@ -64,6 +67,8 @@ type logsService struct { writer *writeToAccumulator } +var _ otlpgrpc.LogsServer = (*logsService)(nil) + func newLogsService(logger common.Logger, writer *writeToAccumulator) *logsService { converter := otel2influx.NewOtelLogsToLineProtocol(logger) return &logsService{ @@ -72,7 +77,7 @@ func newLogsService(logger common.Logger, writer *writeToAccumulator) *logsServi } } -func (s *logsService) Export(ctx context.Context, req pdata.Logs) (otlpgrpc.LogsResponse, error) { - err := s.converter.WriteLogs(ctx, req, s.writer) +func (s *logsService) Export(ctx context.Context, req otlpgrpc.LogsRequest) (otlpgrpc.LogsResponse, error) { + err := s.converter.WriteLogs(ctx, req.Logs(), s.writer) return otlpgrpc.NewLogsResponse(), err } diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go index 8df1273bef8c4..4704d779dfd49 100644 --- a/plugins/inputs/opentelemetry/opentelemetry_test.go +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -42,7 +42,7 @@ func TestOpenTelemetry(t *testing.T) { t.Cleanup(func() { _ = metricExporter.Shutdown(context.Background()) }) pusher := controller.New( - processor.New( + processor.NewFactory( simple.NewWithExactDistribution(), metricExporter, ), @@ -53,7 +53,7 @@ func TestOpenTelemetry(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { _ = pusher.Stop(context.Background()) }) - global.SetMeterProvider(pusher.MeterProvider()) + global.SetMeterProvider(pusher) // write metrics meter := global.Meter("library-name") diff --git a/plugins/outputs/opentelemetry/opentelemetry.go b/plugins/outputs/opentelemetry/opentelemetry.go index e1bbc9322e759..7cfe1341b3ff4 100644 --- a/plugins/outputs/opentelemetry/opentelemetry.go +++ b/plugins/outputs/opentelemetry/opentelemetry.go @@ -157,15 +157,16 @@ func (o *OpenTelemetry) Write(metrics []telegraf.Metric) error { } } - md := batch.GetMetrics() - if md.ResourceMetrics().Len() == 0 { + md := otlpgrpc.NewMetricsRequest() + md.SetMetrics(batch.GetMetrics()) + if md.Metrics().ResourceMetrics().Len() == 0 { return nil } if len(o.Attributes) > 0 { - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := 0; i < md.Metrics().ResourceMetrics().Len(); i++ { for k, v := range o.Attributes { - md.ResourceMetrics().At(i).Resource().Attributes().UpsertString(k, v) + md.Metrics().ResourceMetrics().At(i).Resource().Attributes().UpsertString(k, v) } } } diff --git a/plugins/outputs/opentelemetry/opentelemetry_test.go b/plugins/outputs/opentelemetry/opentelemetry_test.go index 6ebf1829bd540..c2f9f1980410d 100644 --- a/plugins/outputs/opentelemetry/opentelemetry_test.go +++ b/plugins/outputs/opentelemetry/opentelemetry_test.go @@ -133,8 +133,8 @@ func (m *mockOtelService) Address() string { return m.listener.Addr().String() } -func (m *mockOtelService) Export(ctx context.Context, request pdata.Metrics) (otlpgrpc.MetricsResponse, error) { - m.metrics = request.Clone() +func (m *mockOtelService) Export(ctx context.Context, request otlpgrpc.MetricsRequest) (otlpgrpc.MetricsResponse, error) { + m.metrics = request.Metrics().Clone() ctxMetadata, ok := metadata.FromIncomingContext(ctx) assert.Equal(m.t, []string{"header1"}, ctxMetadata.Get("test")) assert.True(m.t, ok) From e8678fb4d8de95b5feca7f9f0e6c2ccdb63ea37c Mon Sep 17 00:00:00 2001 From: Fan Zhang <385741668@qq.com> Date: Tue, 9 Nov 2021 05:42:55 +0800 Subject: [PATCH 741/761] fix: Set the default value correctly (#9980) (cherry picked from commit 0133f1206b4128dc9a1b9b9202793c279c263baf) --- plugins/inputs/nvidia_smi/README.md | 8 +++++++- plugins/inputs/nvidia_smi/nvidia_smi.go | 19 +++++++++++++++---- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index c889e016fc464..479634d7befb0 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -7,13 +7,19 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvid ```toml # Pulls statistics from nvidia GPUs attached to the host [[inputs.nvidia_smi]] - ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath + ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" + ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), + ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # bin_path = "/usr/bin/nvidia-smi" ## Optional: timeout for GPU polling # timeout = "5s" ``` +#### Linux + +On Linux, `nvidia-smi` is generally located at `/usr/bin/nvidia-smi` + #### Windows On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe` diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index 3e4fb03f04221..68f25ba428611 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -31,7 +31,9 @@ func (smi *NvidiaSMI) Description() string { // SampleConfig returns the sample configuration for the NvidiaSMI plugin func (smi *NvidiaSMI) SampleConfig() string { return ` - ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath + ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" + ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), + ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # bin_path = "/usr/bin/nvidia-smi" ## Optional: timeout for GPU polling @@ -39,12 +41,21 @@ func (smi *NvidiaSMI) SampleConfig() string { ` } -// Gather implements the telegraf interface -func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { +func (smi *NvidiaSMI) Init() error { if _, err := os.Stat(smi.BinPath); os.IsNotExist(err) { - return fmt.Errorf("nvidia-smi binary not at path %s, cannot gather GPU data", smi.BinPath) + binPath, err := exec.LookPath("nvidia-smi") + // fail-fast + if err != nil { + return fmt.Errorf("nvidia-smi not found in %q and not in PATH; please make sure nvidia-smi is installed and/or is in PATH", smi.BinPath) + } + smi.BinPath = binPath } + return nil +} + +// Gather implements the telegraf interface +func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { data, err := smi.pollSMI() if err != nil { return err From 4cbe052b90c347e4fc594af4b5d69e3cf822be91 Mon Sep 17 00:00:00 2001 From: AlphaAr Date: Mon, 8 Nov 2021 18:47:32 -0300 Subject: [PATCH 742/761] fix: Add metric name is a label with name "__name" to Loki output plugin (#10001) (cherry picked from commit e73ffe56c46dbafe1d700864ae4dae3e63b02125) --- plugins/outputs/loki/README.md | 2 +- plugins/outputs/loki/loki.go | 2 ++ plugins/outputs/loki/loki_test.go | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/loki/README.md b/plugins/outputs/loki/README.md index 681d3b207c1af..6c7eb91c8916a 100644 --- a/plugins/outputs/loki/README.md +++ b/plugins/outputs/loki/README.md @@ -1,6 +1,6 @@ # Loki Output Plugin -This plugin sends logs to Loki, using tags as labels, +This plugin sends logs to Loki, using metric name and tags as labels, log line will content all fields in `key="value"` format which is easily parsable with `logfmt` parser in Loki. Logs within each stream are sorted by timestamp before being sent to Loki. diff --git a/plugins/outputs/loki/loki.go b/plugins/outputs/loki/loki.go index 07d4d473bf396..fcf96e55f6429 100644 --- a/plugins/outputs/loki/loki.go +++ b/plugins/outputs/loki/loki.go @@ -143,6 +143,8 @@ func (l *Loki) Write(metrics []telegraf.Metric) error { }) for _, m := range metrics { + m.AddTag("__name", m.Name()) + tags := m.TagList() var line string diff --git a/plugins/outputs/loki/loki_test.go b/plugins/outputs/loki/loki_test.go index ba6d0808fabaa..6f0678e8dd4b5 100644 --- a/plugins/outputs/loki/loki_test.go +++ b/plugins/outputs/loki/loki_test.go @@ -225,7 +225,7 @@ func TestContentEncodingGzip(t *testing.T) { require.Len(t, s.Streams, 1) require.Len(t, s.Streams[0].Logs, 1) require.Len(t, s.Streams[0].Logs[0], 2) - require.Equal(t, map[string]string{"key1": "value1"}, s.Streams[0].Labels) + require.Equal(t, map[string]string{"__name": "log", "key1": "value1"}, s.Streams[0].Labels) require.Equal(t, "123000000000", s.Streams[0].Logs[0][0]) require.Contains(t, s.Streams[0].Logs[0][1], "line=\"my log\"") require.Contains(t, s.Streams[0].Logs[0][1], "field=\"3.14\"") @@ -404,7 +404,7 @@ func TestMetricSorting(t *testing.T) { require.Len(t, s.Streams, 1) require.Len(t, s.Streams[0].Logs, 2) require.Len(t, s.Streams[0].Logs[0], 2) - require.Equal(t, map[string]string{"key1": "value1"}, s.Streams[0].Labels) + require.Equal(t, map[string]string{"__name": "log", "key1": "value1"}, s.Streams[0].Labels) require.Equal(t, "456000000000", s.Streams[0].Logs[0][0]) require.Contains(t, s.Streams[0].Logs[0][1], "line=\"older log\"") require.Contains(t, s.Streams[0].Logs[0][1], "field=\"3.14\"") From 88f0560a7a65541d92d6d8cb226d5dc0f26f9f72 Mon Sep 17 00:00:00 2001 From: Dane Strandboge Date: Tue, 9 Nov 2021 08:51:14 -0600 Subject: [PATCH 743/761] build: move to new protobuf library (#10019) (cherry picked from commit ddeb6ec890528494bd041566989cd8ae9897cacf) --- go.mod | 10 +- go.sum | 9 +- .../cisco_telemetry_mdt.go | 5 +- .../cisco_telemetry_mdt_test.go | 2 +- .../cisco_telemetry_util.go | 3 +- .../auth/authentication_service.pb.go | 314 ++- .../auth/authentication_service.proto | 1 + .../auth/authentication_service_grpc.pb.go | 101 + .../inputs/jti_openconfig_telemetry/gen.go | 11 + .../jti_openconfig_telemetry/oc/oc.pb.go | 2328 +++++++++++------ .../jti_openconfig_telemetry/oc/oc.proto | 1 + .../jti_openconfig_telemetry/oc/oc_grpc.pb.go | 293 +++ .../openconfig_telemetry_test.go | 1 + .../riemann_listener/riemann_listener.go | 8 +- .../riemann_listener/riemann_listener_test.go | 5 +- plugins/inputs/stackdriver/stackdriver.go | 14 +- .../inputs/stackdriver/stackdriver_test.go | 28 +- plugins/outputs/stackdriver/stackdriver.go | 8 +- .../outputs/stackdriver/stackdriver_test.go | 18 +- .../parsers/prometheusremotewrite/parser.go | 3 +- plugins/serializers/prometheus/collection.go | 2 +- .../serializers/prometheus/collection_test.go | 2 +- .../prometheusremotewrite.go | 4 +- .../prometheusremotewrite_test.go | 3 +- 24 files changed, 2158 insertions(+), 1016 deletions(-) create mode 100644 plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go create mode 100644 plugins/inputs/jti_openconfig_telemetry/gen.go create mode 100644 plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go diff --git a/go.mod b/go.mod index 093975105cb09..dc7b6cb334c1f 100644 --- a/go.mod +++ b/go.mod @@ -113,12 +113,10 @@ require ( github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v3.3.0+incompatible - github.com/gogo/protobuf v1.3.2 github.com/golang-jwt/jwt/v4 v4.1.0 github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 github.com/google/flatbuffers v2.0.0+incompatible // indirect github.com/google/go-cmp v0.5.6 @@ -339,6 +337,8 @@ require ( github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 // indirect github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f // indirect github.com/cenkalti/backoff/v4 v4.1.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect @@ -366,3 +366,9 @@ replace github.com/mdlayher/apcupsd => github.com/influxdata/apcupsd v0.0.0-2021 //https://github.com/WireGuard/wgctrl-go/blob/e35592f146e40ce8057113d14aafcc3da231fbac/go.mod#L12 ) was not working when using GOPROXY=direct. //Replacing with the pseudo-version works around this. replace golang.zx2c4.com/wireguard v0.0.20200121 => golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090 + +// replaced due to open PR updating protobuf https://github.com/cisco-ie/nx-telemetry-proto/pull/1 +replace github.com/cisco-ie/nx-telemetry-proto => github.com/sbezverk/nx-telemetry-proto v0.0.0-20210629125746-3c19a51b1abc + +// replaced due to open PR updating protobuf https://github.com/riemann/riemann-go-client/pull/27 +replace github.com/riemann/riemann-go-client => github.com/dstrand1/riemann-go-client v0.5.1-0.20211028194734-b5eb11fb5754 diff --git a/go.sum b/go.sum index d0c02c632326a..a6ab83e4a16e7 100644 --- a/go.sum +++ b/go.sum @@ -472,8 +472,6 @@ github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= -github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -683,6 +681,8 @@ github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60/go.mod h1: github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= +github.com/dstrand1/riemann-go-client v0.5.1-0.20211028194734-b5eb11fb5754 h1:aDtw0/++yjOoiXB9sldaFYW61mK3m6ia/wYWxPLrwYY= +github.com/dstrand1/riemann-go-client v0.5.1-0.20211028194734-b5eb11fb5754/go.mod h1:4rS0vfmzOMwfFPhi6Zve4k/59TsBepqd6WESNULE0ho= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -985,7 +985,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -1828,8 +1827,6 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqn github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= -github.com/riemann/riemann-go-client v0.5.0 h1:yPP7tz1vSYJkSZvZFCsMiDsHHXX57x8/fEX3qyEXuAA= -github.com/riemann/riemann-go-client v0.5.0/go.mod h1:FMiaOL8dgBnRfgwENzV0xlYJ2eCbV1o7yqVwOBLbShQ= github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff h1:+6NUiITWwE5q1KO6SAfUX918c+Tab0+tGAM/mtdlUyA= @@ -1865,6 +1862,8 @@ github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4 github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= github.com/sanposhiho/wastedassign v0.1.3/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= +github.com/sbezverk/nx-telemetry-proto v0.0.0-20210629125746-3c19a51b1abc h1:9RAsqOFf0U5CuwXR/Jff3nXTv6tAQNN7U4A/2cBRXFc= +github.com/sbezverk/nx-telemetry-proto v0.0.0-20210629125746-3c19a51b1abc/go.mod h1:rJDd05J5hqWVU9MjJ+5jw1CuLn/jRhvU0xtFEzzqjwM= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index 10f1f764c0515..25b5ec9758962 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -15,11 +15,11 @@ import ( dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Cannot switch to "google.golang.org/protobuf/proto", "github.com/golang/protobuf/proto" is used by "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "google.golang.org/grpc" "google.golang.org/grpc/credentials" _ "google.golang.org/grpc/encoding/gzip" // Register GRPC gzip decoder to support compressed telemetry "google.golang.org/grpc/peer" + "google.golang.org/protobuf/proto" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" @@ -61,6 +61,9 @@ type CiscoTelemetryMDT struct { mutex sync.Mutex acc telegraf.Accumulator wg sync.WaitGroup + + // Though unused in the code, required by protoc-gen-go-grpc to maintain compatibility + dialout.UnimplementedGRPCMdtDialoutServer } type NxPayloadXfromStructure struct { diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index 745b26dea4b20..90fc949276948 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -10,9 +10,9 @@ import ( dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" telemetryBis "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Cannot switch to "google.golang.org/protobuf/proto", "github.com/golang/protobuf/proto" is used by "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/protobuf/proto" "github.com/influxdata/telegraf/testutil" ) diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go index 8f6ea93eab4b3..1d7d95a95a757 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go @@ -1,9 +1,10 @@ package cisco_telemetry_mdt import ( - telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "strconv" "strings" + + telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" ) //xform Field to string diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go index 7ddeefacab635..1342758887932 100644 --- a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go @@ -1,182 +1,238 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: authentication_service.proto +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: auth/authentication_service.proto -/* -Package authentication is a generated protocol buffer package. - -It is generated from these files: - authentication_service.proto - -It has these top-level messages: - LoginRequest - LoginReply -*/ package authentication -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // The request message containing the user's name, password and client id type LoginRequest struct { - UserName string `protobuf:"bytes,1,opt,name=user_name,json=userName" json:"user_name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password" json:"password,omitempty"` - ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId" json:"client_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UserName string `protobuf:"bytes,1,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` +} + +func (x *LoginRequest) Reset() { + *x = LoginRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_auth_authentication_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoginRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoginRequest) ProtoMessage() {} + +func (x *LoginRequest) ProtoReflect() protoreflect.Message { + mi := &file_auth_authentication_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LoginRequest) Reset() { *m = LoginRequest{} } -func (m *LoginRequest) String() string { return proto.CompactTextString(m) } -func (*LoginRequest) ProtoMessage() {} -func (*LoginRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +// Deprecated: Use LoginRequest.ProtoReflect.Descriptor instead. +func (*LoginRequest) Descriptor() ([]byte, []int) { + return file_auth_authentication_service_proto_rawDescGZIP(), []int{0} +} -func (m *LoginRequest) GetUserName() string { - if m != nil { - return m.UserName +func (x *LoginRequest) GetUserName() string { + if x != nil { + return x.UserName } return "" } -func (m *LoginRequest) GetPassword() string { - if m != nil { - return m.Password +func (x *LoginRequest) GetPassword() string { + if x != nil { + return x.Password } return "" } -func (m *LoginRequest) GetClientId() string { - if m != nil { - return m.ClientId +func (x *LoginRequest) GetClientId() string { + if x != nil { + return x.ClientId } return "" } +// // The response message containing the result of login attempt. // result value of true indicates success and false indicates // failure type LoginReply struct { - Result bool `protobuf:"varint,1,opt,name=result" json:"result,omitempty"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LoginReply) Reset() { *m = LoginReply{} } -func (m *LoginReply) String() string { return proto.CompactTextString(m) } -func (*LoginReply) ProtoMessage() {} -func (*LoginReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` +} -func (m *LoginReply) GetResult() bool { - if m != nil { - return m.Result +func (x *LoginReply) Reset() { + *x = LoginReply{} + if protoimpl.UnsafeEnabled { + mi := &file_auth_authentication_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return false } -func init() { - proto.RegisterType((*LoginRequest)(nil), "authentication.LoginRequest") - proto.RegisterType((*LoginReply)(nil), "authentication.LoginReply") +func (x *LoginReply) String() string { + return protoimpl.X.MessageStringOf(x) } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +func (*LoginReply) ProtoMessage() {} -// Client API for Login service - -type LoginClient interface { - LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) -} - -type loginClient struct { - cc *grpc.ClientConn +func (x *LoginReply) ProtoReflect() protoreflect.Message { + mi := &file_auth_authentication_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func NewLoginClient(cc *grpc.ClientConn) LoginClient { - return &loginClient{cc} +// Deprecated: Use LoginReply.ProtoReflect.Descriptor instead. +func (*LoginReply) Descriptor() ([]byte, []int) { + return file_auth_authentication_service_proto_rawDescGZIP(), []int{1} } -func (c *loginClient) LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) { - out := new(LoginReply) - err := grpc.Invoke(ctx, "/authentication.Login/LoginCheck", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (x *LoginReply) GetResult() bool { + if x != nil { + return x.Result } - return out, nil + return false } -// Server API for Login service +var File_auth_authentication_service_proto protoreflect.FileDescriptor + +var file_auth_authentication_service_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x64, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x24, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, + 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x32, + 0x51, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x48, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, 0x69, + 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1c, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x22, 0x00, 0x42, 0x12, 0x5a, 0x10, 0x2e, 0x3b, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_auth_authentication_service_proto_rawDescOnce sync.Once + file_auth_authentication_service_proto_rawDescData = file_auth_authentication_service_proto_rawDesc +) -type LoginServer interface { - LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) +func file_auth_authentication_service_proto_rawDescGZIP() []byte { + file_auth_authentication_service_proto_rawDescOnce.Do(func() { + file_auth_authentication_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_auth_authentication_service_proto_rawDescData) + }) + return file_auth_authentication_service_proto_rawDescData } -func RegisterLoginServer(s *grpc.Server, srv LoginServer) { - s.RegisterService(&_Login_serviceDesc, srv) +var file_auth_authentication_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_auth_authentication_service_proto_goTypes = []interface{}{ + (*LoginRequest)(nil), // 0: authentication.LoginRequest + (*LoginReply)(nil), // 1: authentication.LoginReply +} +var file_auth_authentication_service_proto_depIdxs = []int32{ + 0, // 0: authentication.Login.LoginCheck:input_type -> authentication.LoginRequest + 1, // 1: authentication.Login.LoginCheck:output_type -> authentication.LoginReply + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func _Login_LoginCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LoginRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LoginServer).LoginCheck(ctx, in) +func init() { file_auth_authentication_service_proto_init() } +func file_auth_authentication_service_proto_init() { + if File_auth_authentication_service_proto != nil { + return } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/authentication.Login/LoginCheck", + if !protoimpl.UnsafeEnabled { + file_auth_authentication_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoginRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_auth_authentication_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoginReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LoginServer).LoginCheck(ctx, req.(*LoginRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Login_serviceDesc = grpc.ServiceDesc{ - ServiceName: "authentication.Login", - HandlerType: (*LoginServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "LoginCheck", - Handler: _Login_LoginCheck_Handler, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_auth_authentication_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "authentication_service.proto", -} - -func init() { proto.RegisterFile("authentication_service.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 200 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0x2c, 0x2d, 0xc9, - 0x48, 0xcd, 0x2b, 0xc9, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, - 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0x95, 0x55, 0x4a, 0xe1, 0xe2, - 0xf1, 0xc9, 0x4f, 0xcf, 0xcc, 0x0b, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x92, 0xe6, 0xe2, - 0x2c, 0x2d, 0x4e, 0x2d, 0x8a, 0xcf, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, - 0xe2, 0x00, 0x09, 0xf8, 0x25, 0xe6, 0xa6, 0x0a, 0x49, 0x71, 0x71, 0x14, 0x24, 0x16, 0x17, 0x97, - 0xe7, 0x17, 0xa5, 0x48, 0x30, 0x41, 0xe4, 0x60, 0x7c, 0x90, 0xc6, 0xe4, 0x9c, 0xcc, 0xd4, 0xbc, - 0x92, 0xf8, 0xcc, 0x14, 0x09, 0x66, 0x88, 0x24, 0x44, 0xc0, 0x33, 0x45, 0x49, 0x85, 0x8b, 0x0b, - 0x6a, 0x4b, 0x41, 0x4e, 0xa5, 0x90, 0x18, 0x17, 0x5b, 0x51, 0x6a, 0x71, 0x69, 0x4e, 0x09, 0xd8, - 0x02, 0x8e, 0x20, 0x28, 0xcf, 0x28, 0x90, 0x8b, 0x15, 0xac, 0x4a, 0xc8, 0x03, 0xaa, 0xdc, 0x39, - 0x23, 0x35, 0x39, 0x5b, 0x48, 0x46, 0x0f, 0xd5, 0xcd, 0x7a, 0xc8, 0x0e, 0x96, 0x92, 0xc2, 0x21, - 0x5b, 0x90, 0x53, 0xa9, 0xc4, 0x90, 0xc4, 0x06, 0xf6, 0xb5, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, - 0x11, 0x57, 0x52, 0xd2, 0x15, 0x01, 0x00, 0x00, + GoTypes: file_auth_authentication_service_proto_goTypes, + DependencyIndexes: file_auth_authentication_service_proto_depIdxs, + MessageInfos: file_auth_authentication_service_proto_msgTypes, + }.Build() + File_auth_authentication_service_proto = out.File + file_auth_authentication_service_proto_rawDesc = nil + file_auth_authentication_service_proto_goTypes = nil + file_auth_authentication_service_proto_depIdxs = nil } diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto index a41e13a09f7d9..f67b67a6c5730 100644 --- a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto @@ -25,6 +25,7 @@ syntax = "proto3"; package authentication; +option go_package = ".;authentication"; // The Login service definition. service Login { diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go new file mode 100644 index 0000000000000..bbbf200ec68be --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package authentication + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// LoginClient is the client API for Login service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LoginClient interface { + LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) +} + +type loginClient struct { + cc grpc.ClientConnInterface +} + +func NewLoginClient(cc grpc.ClientConnInterface) LoginClient { + return &loginClient{cc} +} + +func (c *loginClient) LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) { + out := new(LoginReply) + err := c.cc.Invoke(ctx, "/authentication.Login/LoginCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LoginServer is the server API for Login service. +// All implementations must embed UnimplementedLoginServer +// for forward compatibility +type LoginServer interface { + LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) + mustEmbedUnimplementedLoginServer() +} + +// UnimplementedLoginServer must be embedded to have forward compatible implementations. +type UnimplementedLoginServer struct { +} + +func (UnimplementedLoginServer) LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method LoginCheck not implemented") +} +func (UnimplementedLoginServer) mustEmbedUnimplementedLoginServer() {} + +// UnsafeLoginServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LoginServer will +// result in compilation errors. +type UnsafeLoginServer interface { + mustEmbedUnimplementedLoginServer() +} + +func RegisterLoginServer(s grpc.ServiceRegistrar, srv LoginServer) { + s.RegisterService(&Login_ServiceDesc, srv) +} + +func _Login_LoginCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LoginRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoginServer).LoginCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/authentication.Login/LoginCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoginServer).LoginCheck(ctx, req.(*LoginRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Login_ServiceDesc is the grpc.ServiceDesc for Login service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Login_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "authentication.Login", + HandlerType: (*LoginServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LoginCheck", + Handler: _Login_LoginCheck_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "auth/authentication_service.proto", +} diff --git a/plugins/inputs/jti_openconfig_telemetry/gen.go b/plugins/inputs/jti_openconfig_telemetry/gen.go new file mode 100644 index 0000000000000..0b97e3bea9e55 --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/gen.go @@ -0,0 +1,11 @@ +package jti_openconfig_telemetry + +// To run these commands, make sure that protoc-gen-go and protoc-gen-go-grpc are installed +// > go install google.golang.org/protobuf/cmd/protoc-gen-go +// > go install google.golang.org/grpc/cmd/protoc-gen-go-grpc +// +// Generated files were last generated with: +// - protoc-gen-go: v1.27.1 +// - protoc-gen-go-grpc: v1.1.0 +//go:generate protoc --go_out=auth/ --go-grpc_out=auth/ auth/authentication_service.proto +//go:generate protoc --go_out=oc/ --go-grpc_out=oc/ oc/oc.proto diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go index bc7c780458f99..19d16dccc501a 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go @@ -1,54 +1,24 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: oc.proto - -/* -Package telemetry is a generated protocol buffer package. - -It is generated from these files: - oc.proto - -It has these top-level messages: - SubscriptionRequest - SubscriptionInput - Collector - Path - SubscriptionAdditionalConfig - SubscriptionReply - SubscriptionResponse - OpenConfigData - KeyValue - Delete - Eom - CancelSubscriptionRequest - CancelSubscriptionReply - GetSubscriptionsRequest - GetSubscriptionsReply - GetOperationalStateRequest - GetOperationalStateReply - DataEncodingRequest - DataEncodingReply -*/ -package telemetry +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: oc/oc.proto -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package telemetry import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Result of the operation type ReturnCode int32 @@ -59,21 +29,46 @@ const ( ReturnCode_UNKNOWN_ERROR ReturnCode = 2 ) -var ReturnCode_name = map[int32]string{ - 0: "SUCCESS", - 1: "NO_SUBSCRIPTION_ENTRY", - 2: "UNKNOWN_ERROR", -} -var ReturnCode_value = map[string]int32{ - "SUCCESS": 0, - "NO_SUBSCRIPTION_ENTRY": 1, - "UNKNOWN_ERROR": 2, +// Enum value maps for ReturnCode. +var ( + ReturnCode_name = map[int32]string{ + 0: "SUCCESS", + 1: "NO_SUBSCRIPTION_ENTRY", + 2: "UNKNOWN_ERROR", + } + ReturnCode_value = map[string]int32{ + "SUCCESS": 0, + "NO_SUBSCRIPTION_ENTRY": 1, + "UNKNOWN_ERROR": 2, + } +) + +func (x ReturnCode) Enum() *ReturnCode { + p := new(ReturnCode) + *p = x + return p } func (x ReturnCode) String() string { - return proto.EnumName(ReturnCode_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ReturnCode) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[0].Descriptor() +} + +func (ReturnCode) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[0] +} + +func (x ReturnCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ReturnCode.Descriptor instead. +func (ReturnCode) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{0} } -func (ReturnCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // Verbosity Level type VerbosityLevel int32 @@ -84,21 +79,46 @@ const ( VerbosityLevel_BRIEF VerbosityLevel = 2 ) -var VerbosityLevel_name = map[int32]string{ - 0: "DETAIL", - 1: "TERSE", - 2: "BRIEF", -} -var VerbosityLevel_value = map[string]int32{ - "DETAIL": 0, - "TERSE": 1, - "BRIEF": 2, +// Enum value maps for VerbosityLevel. +var ( + VerbosityLevel_name = map[int32]string{ + 0: "DETAIL", + 1: "TERSE", + 2: "BRIEF", + } + VerbosityLevel_value = map[string]int32{ + "DETAIL": 0, + "TERSE": 1, + "BRIEF": 2, + } +) + +func (x VerbosityLevel) Enum() *VerbosityLevel { + p := new(VerbosityLevel) + *p = x + return p } func (x VerbosityLevel) String() string { - return proto.EnumName(VerbosityLevel_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (VerbosityLevel) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[1].Descriptor() +} + +func (VerbosityLevel) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[1] +} + +func (x VerbosityLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use VerbosityLevel.Descriptor instead. +func (VerbosityLevel) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{1} } -func (VerbosityLevel) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } // Encoding Type Supported type EncodingType int32 @@ -110,126 +130,248 @@ const ( EncodingType_PROTO3 EncodingType = 3 ) -var EncodingType_name = map[int32]string{ - 0: "UNDEFINED", - 1: "XML", - 2: "JSON_IETF", - 3: "PROTO3", -} -var EncodingType_value = map[string]int32{ - "UNDEFINED": 0, - "XML": 1, - "JSON_IETF": 2, - "PROTO3": 3, +// Enum value maps for EncodingType. +var ( + EncodingType_name = map[int32]string{ + 0: "UNDEFINED", + 1: "XML", + 2: "JSON_IETF", + 3: "PROTO3", + } + EncodingType_value = map[string]int32{ + "UNDEFINED": 0, + "XML": 1, + "JSON_IETF": 2, + "PROTO3": 3, + } +) + +func (x EncodingType) Enum() *EncodingType { + p := new(EncodingType) + *p = x + return p } func (x EncodingType) String() string { - return proto.EnumName(EncodingType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EncodingType) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[2].Descriptor() +} + +func (EncodingType) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[2] +} + +func (x EncodingType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EncodingType.Descriptor instead. +func (EncodingType) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{2} } -func (EncodingType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } // Message sent for a telemetry subscription request type SubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Data associated with a telemetry subscription - Input *SubscriptionInput `protobuf:"bytes,1,opt,name=input" json:"input,omitempty"` + Input *SubscriptionInput `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` // List of data models paths and filters // which are used in a telemetry operation. - PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList" json:"path_list,omitempty"` + PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList,proto3" json:"path_list,omitempty"` // The below configuration is not defined in Openconfig RPC. // It is a proposed extension to configure additional // subscription request features. - AdditionalConfig *SubscriptionAdditionalConfig `protobuf:"bytes,3,opt,name=additional_config,json=additionalConfig" json:"additional_config,omitempty"` + AdditionalConfig *SubscriptionAdditionalConfig `protobuf:"bytes,3,opt,name=additional_config,json=additionalConfig,proto3" json:"additional_config,omitempty"` } -func (m *SubscriptionRequest) Reset() { *m = SubscriptionRequest{} } -func (m *SubscriptionRequest) String() string { return proto.CompactTextString(m) } -func (*SubscriptionRequest) ProtoMessage() {} -func (*SubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (x *SubscriptionRequest) Reset() { + *x = SubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionRequest) GetInput() *SubscriptionInput { - if m != nil { - return m.Input +func (x *SubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionRequest) ProtoMessage() {} + +func (x *SubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionRequest.ProtoReflect.Descriptor instead. +func (*SubscriptionRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{0} +} + +func (x *SubscriptionRequest) GetInput() *SubscriptionInput { + if x != nil { + return x.Input } return nil } -func (m *SubscriptionRequest) GetPathList() []*Path { - if m != nil { - return m.PathList +func (x *SubscriptionRequest) GetPathList() []*Path { + if x != nil { + return x.PathList } return nil } -func (m *SubscriptionRequest) GetAdditionalConfig() *SubscriptionAdditionalConfig { - if m != nil { - return m.AdditionalConfig +func (x *SubscriptionRequest) GetAdditionalConfig() *SubscriptionAdditionalConfig { + if x != nil { + return x.AdditionalConfig } return nil } // Data associated with a telemetry subscription type SubscriptionInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of optional collector endpoints to send data for // this subscription. // If no collector destinations are specified, the collector // destination is assumed to be the requester on the rpc channel. - CollectorList []*Collector `protobuf:"bytes,1,rep,name=collector_list,json=collectorList" json:"collector_list,omitempty"` + CollectorList []*Collector `protobuf:"bytes,1,rep,name=collector_list,json=collectorList,proto3" json:"collector_list,omitempty"` +} + +func (x *SubscriptionInput) Reset() { + *x = SubscriptionInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SubscriptionInput) Reset() { *m = SubscriptionInput{} } -func (m *SubscriptionInput) String() string { return proto.CompactTextString(m) } -func (*SubscriptionInput) ProtoMessage() {} -func (*SubscriptionInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (x *SubscriptionInput) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *SubscriptionInput) GetCollectorList() []*Collector { - if m != nil { - return m.CollectorList +func (*SubscriptionInput) ProtoMessage() {} + +func (x *SubscriptionInput) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionInput.ProtoReflect.Descriptor instead. +func (*SubscriptionInput) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{1} +} + +func (x *SubscriptionInput) GetCollectorList() []*Collector { + if x != nil { + return x.CollectorList } return nil } // Collector endpoints to send data specified as an ip+port combination. type Collector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // IP address of collector endpoint - Address string `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"` + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // Transport protocol port number for the collector destination. - Port uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` } -func (m *Collector) Reset() { *m = Collector{} } -func (m *Collector) String() string { return proto.CompactTextString(m) } -func (*Collector) ProtoMessage() {} -func (*Collector) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (x *Collector) Reset() { + *x = Collector{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Collector) GetAddress() string { - if m != nil { - return m.Address +func (x *Collector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Collector) ProtoMessage() {} + +func (x *Collector) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Collector.ProtoReflect.Descriptor instead. +func (*Collector) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{2} +} + +func (x *Collector) GetAddress() string { + if x != nil { + return x.Address } return "" } -func (m *Collector) GetPort() uint32 { - if m != nil { - return m.Port +func (x *Collector) GetPort() uint32 { + if x != nil { + return x.Port } return 0 } // Data model path type Path struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Data model path of interest // Path specification for elements of OpenConfig data models - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Regular expression to be used in filtering state leaves - Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // If this is set to true, the target device will only send // updates to the collector upon a change in data value - SuppressUnchanged bool `protobuf:"varint,3,opt,name=suppress_unchanged,json=suppressUnchanged" json:"suppress_unchanged,omitempty"` + SuppressUnchanged bool `protobuf:"varint,3,opt,name=suppress_unchanged,json=suppressUnchanged,proto3" json:"suppress_unchanged,omitempty"` // Maximum time in ms the target device may go without sending // a message to the collector. If this time expires with // suppress-unchanged set, the target device must send an update // message regardless if the data values have changed. - MaxSilentInterval uint32 `protobuf:"varint,4,opt,name=max_silent_interval,json=maxSilentInterval" json:"max_silent_interval,omitempty"` + MaxSilentInterval uint32 `protobuf:"varint,4,opt,name=max_silent_interval,json=maxSilentInterval,proto3" json:"max_silent_interval,omitempty"` // Time in ms between collection and transmission of the // specified data to the collector platform. The target device // will sample the corresponding data (e.g,. a counter) and @@ -237,143 +379,263 @@ type Path struct { // // If sample-frequency is set to 0, then the network device // must emit an update upon every datum change. - SampleFrequency uint32 `protobuf:"varint,5,opt,name=sample_frequency,json=sampleFrequency" json:"sample_frequency,omitempty"` + SampleFrequency uint32 `protobuf:"varint,5,opt,name=sample_frequency,json=sampleFrequency,proto3" json:"sample_frequency,omitempty"` // EOM needed for each walk cycle of this path? // For periodic sensor, applicable for each complete reap // For event sensor, applicable when initial dump is over // (same as EOS) // This feature is not implemented currently. - NeedEom bool `protobuf:"varint,6,opt,name=need_eom,json=needEom" json:"need_eom,omitempty"` + NeedEom bool `protobuf:"varint,6,opt,name=need_eom,json=needEom,proto3" json:"need_eom,omitempty"` } -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} -func (*Path) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (x *Path) Reset() { + *x = Path{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Path) GetPath() string { - if m != nil { - return m.Path +func (x *Path) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Path) ProtoMessage() {} + +func (x *Path) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Path.ProtoReflect.Descriptor instead. +func (*Path) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{3} +} + +func (x *Path) GetPath() string { + if x != nil { + return x.Path } return "" } -func (m *Path) GetFilter() string { - if m != nil { - return m.Filter +func (x *Path) GetFilter() string { + if x != nil { + return x.Filter } return "" } -func (m *Path) GetSuppressUnchanged() bool { - if m != nil { - return m.SuppressUnchanged +func (x *Path) GetSuppressUnchanged() bool { + if x != nil { + return x.SuppressUnchanged } return false } -func (m *Path) GetMaxSilentInterval() uint32 { - if m != nil { - return m.MaxSilentInterval +func (x *Path) GetMaxSilentInterval() uint32 { + if x != nil { + return x.MaxSilentInterval } return 0 } -func (m *Path) GetSampleFrequency() uint32 { - if m != nil { - return m.SampleFrequency +func (x *Path) GetSampleFrequency() uint32 { + if x != nil { + return x.SampleFrequency } return 0 } -func (m *Path) GetNeedEom() bool { - if m != nil { - return m.NeedEom +func (x *Path) GetNeedEom() bool { + if x != nil { + return x.NeedEom } return false } // Configure subscription request additional features. type SubscriptionAdditionalConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // limit the number of records sent in the stream - LimitRecords int32 `protobuf:"varint,1,opt,name=limit_records,json=limitRecords" json:"limit_records,omitempty"` + LimitRecords int32 `protobuf:"varint,1,opt,name=limit_records,json=limitRecords,proto3" json:"limit_records,omitempty"` // limit the time the stream remains open - LimitTimeSeconds int32 `protobuf:"varint,2,opt,name=limit_time_seconds,json=limitTimeSeconds" json:"limit_time_seconds,omitempty"` + LimitTimeSeconds int32 `protobuf:"varint,2,opt,name=limit_time_seconds,json=limitTimeSeconds,proto3" json:"limit_time_seconds,omitempty"` // EOS needed for this subscription? - NeedEos bool `protobuf:"varint,3,opt,name=need_eos,json=needEos" json:"need_eos,omitempty"` + NeedEos bool `protobuf:"varint,3,opt,name=need_eos,json=needEos,proto3" json:"need_eos,omitempty"` +} + +func (x *SubscriptionAdditionalConfig) Reset() { + *x = SubscriptionAdditionalConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriptionAdditionalConfig) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SubscriptionAdditionalConfig) Reset() { *m = SubscriptionAdditionalConfig{} } -func (m *SubscriptionAdditionalConfig) String() string { return proto.CompactTextString(m) } -func (*SubscriptionAdditionalConfig) ProtoMessage() {} -func (*SubscriptionAdditionalConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*SubscriptionAdditionalConfig) ProtoMessage() {} -func (m *SubscriptionAdditionalConfig) GetLimitRecords() int32 { - if m != nil { - return m.LimitRecords +func (x *SubscriptionAdditionalConfig) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionAdditionalConfig.ProtoReflect.Descriptor instead. +func (*SubscriptionAdditionalConfig) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{4} +} + +func (x *SubscriptionAdditionalConfig) GetLimitRecords() int32 { + if x != nil { + return x.LimitRecords } return 0 } -func (m *SubscriptionAdditionalConfig) GetLimitTimeSeconds() int32 { - if m != nil { - return m.LimitTimeSeconds +func (x *SubscriptionAdditionalConfig) GetLimitTimeSeconds() int32 { + if x != nil { + return x.LimitTimeSeconds } return 0 } -func (m *SubscriptionAdditionalConfig) GetNeedEos() bool { - if m != nil { - return m.NeedEos +func (x *SubscriptionAdditionalConfig) GetNeedEos() bool { + if x != nil { + return x.NeedEos } return false } // 1. Reply data message sent out using out-of-band channel. type SubscriptionReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Response message to a telemetry subscription creation or // get request. - Response *SubscriptionResponse `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` + Response *SubscriptionResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` // List of data models paths and filters // which are used in a telemetry operation. - PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList" json:"path_list,omitempty"` + PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList,proto3" json:"path_list,omitempty"` } -func (m *SubscriptionReply) Reset() { *m = SubscriptionReply{} } -func (m *SubscriptionReply) String() string { return proto.CompactTextString(m) } -func (*SubscriptionReply) ProtoMessage() {} -func (*SubscriptionReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (x *SubscriptionReply) Reset() { + *x = SubscriptionReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionReply) GetResponse() *SubscriptionResponse { - if m != nil { - return m.Response +func (x *SubscriptionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionReply) ProtoMessage() {} + +func (x *SubscriptionReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionReply.ProtoReflect.Descriptor instead. +func (*SubscriptionReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{5} +} + +func (x *SubscriptionReply) GetResponse() *SubscriptionResponse { + if x != nil { + return x.Response } return nil } -func (m *SubscriptionReply) GetPathList() []*Path { - if m != nil { - return m.PathList +func (x *SubscriptionReply) GetPathList() []*Path { + if x != nil { + return x.PathList } return nil } // Response message to a telemetry subscription creation or get request. type SubscriptionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Unique id for the subscription on the device. This is // generated by the device and returned in a subscription // request or when listing existing subscriptions - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *SubscriptionResponse) Reset() { *m = SubscriptionResponse{} } -func (m *SubscriptionResponse) String() string { return proto.CompactTextString(m) } -func (*SubscriptionResponse) ProtoMessage() {} -func (*SubscriptionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (x *SubscriptionResponse) Reset() { + *x = SubscriptionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionResponse) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *SubscriptionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionResponse) ProtoMessage() {} + +func (x *SubscriptionResponse) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionResponse.ProtoReflect.Descriptor instead. +func (*SubscriptionResponse) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{6} +} + +func (x *SubscriptionResponse) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } @@ -381,112 +643,147 @@ func (m *SubscriptionResponse) GetSubscriptionId() uint32 { // 2. Telemetry data send back on the same connection as the // subscription request. type OpenConfigData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // router name:export IP address - SystemId string `protobuf:"bytes,1,opt,name=system_id,json=systemId" json:"system_id,omitempty"` + SystemId string `protobuf:"bytes,1,opt,name=system_id,json=systemId,proto3" json:"system_id,omitempty"` // line card / RE (slot number) - ComponentId uint32 `protobuf:"varint,2,opt,name=component_id,json=componentId" json:"component_id,omitempty"` + ComponentId uint32 `protobuf:"varint,2,opt,name=component_id,json=componentId,proto3" json:"component_id,omitempty"` // PFE (if applicable) - SubComponentId uint32 `protobuf:"varint,3,opt,name=sub_component_id,json=subComponentId" json:"sub_component_id,omitempty"` + SubComponentId uint32 `protobuf:"varint,3,opt,name=sub_component_id,json=subComponentId,proto3" json:"sub_component_id,omitempty"` // Path specification for elements of OpenConfig data models - Path string `protobuf:"bytes,4,opt,name=path" json:"path,omitempty"` + Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` // Sequence number, monotonically increasing for each // system_id, component_id, sub_component_id + path. - SequenceNumber uint64 `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` + SequenceNumber uint64 `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number,omitempty"` // timestamp (milliseconds since epoch) - Timestamp uint64 `protobuf:"varint,6,opt,name=timestamp" json:"timestamp,omitempty"` + Timestamp uint64 `protobuf:"varint,6,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // List of key-value pairs - Kv []*KeyValue `protobuf:"bytes,7,rep,name=kv" json:"kv,omitempty"` + Kv []*KeyValue `protobuf:"bytes,7,rep,name=kv,proto3" json:"kv,omitempty"` // For delete. If filled, it indicates delete - Delete []*Delete `protobuf:"bytes,8,rep,name=delete" json:"delete,omitempty"` + Delete []*Delete `protobuf:"bytes,8,rep,name=delete,proto3" json:"delete,omitempty"` // If filled, it indicates end of marker for the // respective path in the list. - Eom []*Eom `protobuf:"bytes,9,rep,name=eom" json:"eom,omitempty"` + Eom []*Eom `protobuf:"bytes,9,rep,name=eom,proto3" json:"eom,omitempty"` // If filled, it indicates end of sync for complete subscription - SyncResponse bool `protobuf:"varint,10,opt,name=sync_response,json=syncResponse" json:"sync_response,omitempty"` + SyncResponse bool `protobuf:"varint,10,opt,name=sync_response,json=syncResponse,proto3" json:"sync_response,omitempty"` } -func (m *OpenConfigData) Reset() { *m = OpenConfigData{} } -func (m *OpenConfigData) String() string { return proto.CompactTextString(m) } -func (*OpenConfigData) ProtoMessage() {} -func (*OpenConfigData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (x *OpenConfigData) Reset() { + *x = OpenConfigData{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *OpenConfigData) GetSystemId() string { - if m != nil { - return m.SystemId +func (x *OpenConfigData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OpenConfigData) ProtoMessage() {} + +func (x *OpenConfigData) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OpenConfigData.ProtoReflect.Descriptor instead. +func (*OpenConfigData) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{7} +} + +func (x *OpenConfigData) GetSystemId() string { + if x != nil { + return x.SystemId } return "" } -func (m *OpenConfigData) GetComponentId() uint32 { - if m != nil { - return m.ComponentId +func (x *OpenConfigData) GetComponentId() uint32 { + if x != nil { + return x.ComponentId } return 0 } -func (m *OpenConfigData) GetSubComponentId() uint32 { - if m != nil { - return m.SubComponentId +func (x *OpenConfigData) GetSubComponentId() uint32 { + if x != nil { + return x.SubComponentId } return 0 } -func (m *OpenConfigData) GetPath() string { - if m != nil { - return m.Path +func (x *OpenConfigData) GetPath() string { + if x != nil { + return x.Path } return "" } -func (m *OpenConfigData) GetSequenceNumber() uint64 { - if m != nil { - return m.SequenceNumber +func (x *OpenConfigData) GetSequenceNumber() uint64 { + if x != nil { + return x.SequenceNumber } return 0 } -func (m *OpenConfigData) GetTimestamp() uint64 { - if m != nil { - return m.Timestamp +func (x *OpenConfigData) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp } return 0 } -func (m *OpenConfigData) GetKv() []*KeyValue { - if m != nil { - return m.Kv +func (x *OpenConfigData) GetKv() []*KeyValue { + if x != nil { + return x.Kv } return nil } -func (m *OpenConfigData) GetDelete() []*Delete { - if m != nil { - return m.Delete +func (x *OpenConfigData) GetDelete() []*Delete { + if x != nil { + return x.Delete } return nil } -func (m *OpenConfigData) GetEom() []*Eom { - if m != nil { - return m.Eom +func (x *OpenConfigData) GetEom() []*Eom { + if x != nil { + return x.Eom } return nil } -func (m *OpenConfigData) GetSyncResponse() bool { - if m != nil { - return m.SyncResponse +func (x *OpenConfigData) GetSyncResponse() bool { + if x != nil { + return x.SyncResponse } return false } // Simple Key-value, where value could be one of scalar types type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Key - Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // One of possible values // - // Types that are valid to be assigned to Value: + // Types that are assignable to Value: // *KeyValue_DoubleValue // *KeyValue_IntValue // *KeyValue_UintValue @@ -497,44 +794,44 @@ type KeyValue struct { Value isKeyValue_Value `protobuf_oneof:"value"` } -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -type isKeyValue_Value interface { - isKeyValue_Value() +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -type KeyValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,oneof"` +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) } -type KeyValue_IntValue struct { - IntValue int64 `protobuf:"varint,6,opt,name=int_value,json=intValue,oneof"` -} -type KeyValue_UintValue struct { - UintValue uint64 `protobuf:"varint,7,opt,name=uint_value,json=uintValue,oneof"` -} -type KeyValue_SintValue struct { - SintValue int64 `protobuf:"zigzag64,8,opt,name=sint_value,json=sintValue,oneof"` -} -type KeyValue_BoolValue struct { - BoolValue bool `protobuf:"varint,9,opt,name=bool_value,json=boolValue,oneof"` -} -type KeyValue_StrValue struct { - StrValue string `protobuf:"bytes,10,opt,name=str_value,json=strValue,oneof"` + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type KeyValue_BytesValue struct { - BytesValue []byte `protobuf:"bytes,11,opt,name=bytes_value,json=bytesValue,proto3,oneof"` + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{8} } -func (*KeyValue_DoubleValue) isKeyValue_Value() {} -func (*KeyValue_IntValue) isKeyValue_Value() {} -func (*KeyValue_UintValue) isKeyValue_Value() {} -func (*KeyValue_SintValue) isKeyValue_Value() {} -func (*KeyValue_BoolValue) isKeyValue_Value() {} -func (*KeyValue_StrValue) isKeyValue_Value() {} -func (*KeyValue_BytesValue) isKeyValue_Value() {} +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} func (m *KeyValue) GetValue() isKeyValue_Value { if m != nil { @@ -543,323 +840,412 @@ func (m *KeyValue) GetValue() isKeyValue_Value { return nil } -func (m *KeyValue) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *KeyValue) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*KeyValue_DoubleValue); ok { +func (x *KeyValue) GetDoubleValue() float64 { + if x, ok := x.GetValue().(*KeyValue_DoubleValue); ok { return x.DoubleValue } return 0 } -func (m *KeyValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*KeyValue_IntValue); ok { +func (x *KeyValue) GetIntValue() int64 { + if x, ok := x.GetValue().(*KeyValue_IntValue); ok { return x.IntValue } return 0 } -func (m *KeyValue) GetUintValue() uint64 { - if x, ok := m.GetValue().(*KeyValue_UintValue); ok { +func (x *KeyValue) GetUintValue() uint64 { + if x, ok := x.GetValue().(*KeyValue_UintValue); ok { return x.UintValue } return 0 } -func (m *KeyValue) GetSintValue() int64 { - if x, ok := m.GetValue().(*KeyValue_SintValue); ok { +func (x *KeyValue) GetSintValue() int64 { + if x, ok := x.GetValue().(*KeyValue_SintValue); ok { return x.SintValue } return 0 } -func (m *KeyValue) GetBoolValue() bool { - if x, ok := m.GetValue().(*KeyValue_BoolValue); ok { +func (x *KeyValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*KeyValue_BoolValue); ok { return x.BoolValue } return false } -func (m *KeyValue) GetStrValue() string { - if x, ok := m.GetValue().(*KeyValue_StrValue); ok { +func (x *KeyValue) GetStrValue() string { + if x, ok := x.GetValue().(*KeyValue_StrValue); ok { return x.StrValue } return "" } -func (m *KeyValue) GetBytesValue() []byte { - if x, ok := m.GetValue().(*KeyValue_BytesValue); ok { +func (x *KeyValue) GetBytesValue() []byte { + if x, ok := x.GetValue().(*KeyValue_BytesValue); ok { return x.BytesValue } return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*KeyValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _KeyValue_OneofMarshaler, _KeyValue_OneofUnmarshaler, _KeyValue_OneofSizer, []interface{}{ - (*KeyValue_DoubleValue)(nil), - (*KeyValue_IntValue)(nil), - (*KeyValue_UintValue)(nil), - (*KeyValue_SintValue)(nil), - (*KeyValue_BoolValue)(nil), - (*KeyValue_StrValue)(nil), - (*KeyValue_BytesValue)(nil), - } +type isKeyValue_Value interface { + isKeyValue_Value() } -func _KeyValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*KeyValue) - // value - switch x := m.Value.(type) { - case *KeyValue_DoubleValue: - b.EncodeVarint(5<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.DoubleValue)) - case *KeyValue_IntValue: - b.EncodeVarint(6<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.IntValue)) - case *KeyValue_UintValue: - b.EncodeVarint(7<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.UintValue)) - case *KeyValue_SintValue: - b.EncodeVarint(8<<3 | proto.WireVarint) - b.EncodeZigzag64(uint64(x.SintValue)) - case *KeyValue_BoolValue: - t := uint64(0) - if x.BoolValue { - t = 1 - } - b.EncodeVarint(9<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *KeyValue_StrValue: - b.EncodeVarint(10<<3 | proto.WireBytes) - b.EncodeStringBytes(x.StrValue) - case *KeyValue_BytesValue: - b.EncodeVarint(11<<3 | proto.WireBytes) - b.EncodeRawBytes(x.BytesValue) - case nil: - default: - return fmt.Errorf("KeyValue.Value has unexpected type %T", x) - } - return nil +type KeyValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` } -func _KeyValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*KeyValue) - switch tag { - case 5: // value.double_value - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Value = &KeyValue_DoubleValue{math.Float64frombits(x)} - return true, err - case 6: // value.int_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_IntValue{int64(x)} - return true, err - case 7: // value.uint_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_UintValue{x} - return true, err - case 8: // value.sint_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeZigzag64() - m.Value = &KeyValue_SintValue{int64(x)} - return true, err - case 9: // value.bool_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_BoolValue{x != 0} - return true, err - case 10: // value.str_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Value = &KeyValue_StrValue{x} - return true, err - case 11: // value.bytes_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Value = &KeyValue_BytesValue{x} - return true, err - default: - return false, nil - } -} - -func _KeyValue_OneofSizer(msg proto.Message) (n int) { - m := msg.(*KeyValue) - // value - switch x := m.Value.(type) { - case *KeyValue_DoubleValue: - n += proto.SizeVarint(5<<3 | proto.WireFixed64) - n += 8 - case *KeyValue_IntValue: - n += proto.SizeVarint(6<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.IntValue)) - case *KeyValue_UintValue: - n += proto.SizeVarint(7<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.UintValue)) - case *KeyValue_SintValue: - n += proto.SizeVarint(8<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(uint64(x.SintValue<<1) ^ uint64((int64(x.SintValue) >> 63)))) - case *KeyValue_BoolValue: - n += proto.SizeVarint(9<<3 | proto.WireVarint) - n += 1 - case *KeyValue_StrValue: - n += proto.SizeVarint(10<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.StrValue))) - n += len(x.StrValue) - case *KeyValue_BytesValue: - n += proto.SizeVarint(11<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.BytesValue))) - n += len(x.BytesValue) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type KeyValue_IntValue struct { + IntValue int64 `protobuf:"varint,6,opt,name=int_value,json=intValue,proto3,oneof"` } +type KeyValue_UintValue struct { + UintValue uint64 `protobuf:"varint,7,opt,name=uint_value,json=uintValue,proto3,oneof"` +} + +type KeyValue_SintValue struct { + SintValue int64 `protobuf:"zigzag64,8,opt,name=sint_value,json=sintValue,proto3,oneof"` +} + +type KeyValue_BoolValue struct { + BoolValue bool `protobuf:"varint,9,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type KeyValue_StrValue struct { + StrValue string `protobuf:"bytes,10,opt,name=str_value,json=strValue,proto3,oneof"` +} + +type KeyValue_BytesValue struct { + BytesValue []byte `protobuf:"bytes,11,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +func (*KeyValue_DoubleValue) isKeyValue_Value() {} + +func (*KeyValue_IntValue) isKeyValue_Value() {} + +func (*KeyValue_UintValue) isKeyValue_Value() {} + +func (*KeyValue_SintValue) isKeyValue_Value() {} + +func (*KeyValue_BoolValue) isKeyValue_Value() {} + +func (*KeyValue_StrValue) isKeyValue_Value() {} + +func (*KeyValue_BytesValue) isKeyValue_Value() {} + // Message indicating delete for a particular path type Delete struct { - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *Delete) Reset() { + *x = Delete{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Delete) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Delete) Reset() { *m = Delete{} } -func (m *Delete) String() string { return proto.CompactTextString(m) } -func (*Delete) ProtoMessage() {} -func (*Delete) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*Delete) ProtoMessage() {} -func (m *Delete) GetPath() string { - if m != nil { - return m.Path +func (x *Delete) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Delete.ProtoReflect.Descriptor instead. +func (*Delete) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{9} +} + +func (x *Delete) GetPath() string { + if x != nil { + return x.Path } return "" } // Message indicating EOM for a particular path type Eom struct { - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` } -func (m *Eom) Reset() { *m = Eom{} } -func (m *Eom) String() string { return proto.CompactTextString(m) } -func (*Eom) ProtoMessage() {} -func (*Eom) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (x *Eom) Reset() { + *x = Eom{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Eom) GetPath() string { - if m != nil { - return m.Path +func (x *Eom) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Eom) ProtoMessage() {} + +func (x *Eom) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Eom.ProtoReflect.Descriptor instead. +func (*Eom) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{10} +} + +func (x *Eom) GetPath() string { + if x != nil { + return x.Path } return "" } // Message sent for a telemetry subscription cancellation request type CancelSubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Subscription identifier as returned by the device when // subscription was requested - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *CancelSubscriptionRequest) Reset() { *m = CancelSubscriptionRequest{} } -func (m *CancelSubscriptionRequest) String() string { return proto.CompactTextString(m) } -func (*CancelSubscriptionRequest) ProtoMessage() {} -func (*CancelSubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (x *CancelSubscriptionRequest) Reset() { + *x = CancelSubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *CancelSubscriptionRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *CancelSubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelSubscriptionRequest) ProtoMessage() {} + +func (x *CancelSubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelSubscriptionRequest.ProtoReflect.Descriptor instead. +func (*CancelSubscriptionRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{11} +} + +func (x *CancelSubscriptionRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } // Reply to telemetry subscription cancellation request type CancelSubscriptionReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Return code - Code ReturnCode `protobuf:"varint,1,opt,name=code,enum=telemetry.ReturnCode" json:"code,omitempty"` + Code ReturnCode `protobuf:"varint,1,opt,name=code,proto3,enum=telemetry.ReturnCode" json:"code,omitempty"` // Return code string - CodeStr string `protobuf:"bytes,2,opt,name=code_str,json=codeStr" json:"code_str,omitempty"` + CodeStr string `protobuf:"bytes,2,opt,name=code_str,json=codeStr,proto3" json:"code_str,omitempty"` } -func (m *CancelSubscriptionReply) Reset() { *m = CancelSubscriptionReply{} } -func (m *CancelSubscriptionReply) String() string { return proto.CompactTextString(m) } -func (*CancelSubscriptionReply) ProtoMessage() {} -func (*CancelSubscriptionReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (x *CancelSubscriptionReply) Reset() { + *x = CancelSubscriptionReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *CancelSubscriptionReply) GetCode() ReturnCode { - if m != nil { - return m.Code +func (x *CancelSubscriptionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelSubscriptionReply) ProtoMessage() {} + +func (x *CancelSubscriptionReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelSubscriptionReply.ProtoReflect.Descriptor instead. +func (*CancelSubscriptionReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{12} +} + +func (x *CancelSubscriptionReply) GetCode() ReturnCode { + if x != nil { + return x.Code } return ReturnCode_SUCCESS } -func (m *CancelSubscriptionReply) GetCodeStr() string { - if m != nil { - return m.CodeStr +func (x *CancelSubscriptionReply) GetCodeStr() string { + if x != nil { + return x.CodeStr } return "" } // Message sent for a telemetry get request type GetSubscriptionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Subscription identifier as returned by the device when // subscription was requested // --- or --- // 0xFFFFFFFF for all subscription identifiers - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *GetSubscriptionsRequest) Reset() { *m = GetSubscriptionsRequest{} } -func (m *GetSubscriptionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetSubscriptionsRequest) ProtoMessage() {} -func (*GetSubscriptionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (x *GetSubscriptionsRequest) Reset() { + *x = GetSubscriptionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetSubscriptionsRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *GetSubscriptionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubscriptionsRequest) ProtoMessage() {} + +func (x *GetSubscriptionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubscriptionsRequest.ProtoReflect.Descriptor instead. +func (*GetSubscriptionsRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{13} +} + +func (x *GetSubscriptionsRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } // Reply to telemetry subscription get request type GetSubscriptionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of current telemetry subscriptions - SubscriptionList []*SubscriptionReply `protobuf:"bytes,1,rep,name=subscription_list,json=subscriptionList" json:"subscription_list,omitempty"` + SubscriptionList []*SubscriptionReply `protobuf:"bytes,1,rep,name=subscription_list,json=subscriptionList,proto3" json:"subscription_list,omitempty"` } -func (m *GetSubscriptionsReply) Reset() { *m = GetSubscriptionsReply{} } -func (m *GetSubscriptionsReply) String() string { return proto.CompactTextString(m) } -func (*GetSubscriptionsReply) ProtoMessage() {} -func (*GetSubscriptionsReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (x *GetSubscriptionsReply) Reset() { + *x = GetSubscriptionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetSubscriptionsReply) GetSubscriptionList() []*SubscriptionReply { - if m != nil { - return m.SubscriptionList +func (x *GetSubscriptionsReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubscriptionsReply) ProtoMessage() {} + +func (x *GetSubscriptionsReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubscriptionsReply.ProtoReflect.Descriptor instead. +func (*GetSubscriptionsReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{14} +} + +func (x *GetSubscriptionsReply) GetSubscriptionList() []*SubscriptionReply { + if x != nil { + return x.SubscriptionList } return nil } // Message sent for telemetry agent operational states request type GetOperationalStateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Per-subscription_id level operational state can be requested. // // Subscription identifier as returned by the device when @@ -870,434 +1256,718 @@ type GetOperationalStateRequest struct { // --- or --- // If subscription_id is not present then sent only agent-level // operational stats - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` // Control verbosity of the output - Verbosity VerbosityLevel `protobuf:"varint,2,opt,name=verbosity,enum=telemetry.VerbosityLevel" json:"verbosity,omitempty"` + Verbosity VerbosityLevel `protobuf:"varint,2,opt,name=verbosity,proto3,enum=telemetry.VerbosityLevel" json:"verbosity,omitempty"` } -func (m *GetOperationalStateRequest) Reset() { *m = GetOperationalStateRequest{} } -func (m *GetOperationalStateRequest) String() string { return proto.CompactTextString(m) } -func (*GetOperationalStateRequest) ProtoMessage() {} -func (*GetOperationalStateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (x *GetOperationalStateRequest) Reset() { + *x = GetOperationalStateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetOperationalStateRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *GetOperationalStateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOperationalStateRequest) ProtoMessage() {} + +func (x *GetOperationalStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOperationalStateRequest.ProtoReflect.Descriptor instead. +func (*GetOperationalStateRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{15} +} + +func (x *GetOperationalStateRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } -func (m *GetOperationalStateRequest) GetVerbosity() VerbosityLevel { - if m != nil { - return m.Verbosity +func (x *GetOperationalStateRequest) GetVerbosity() VerbosityLevel { + if x != nil { + return x.Verbosity } return VerbosityLevel_DETAIL } // Reply to telemetry agent operational states request type GetOperationalStateReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of key-value pairs where // key = operational state definition // value = operational state value - Kv []*KeyValue `protobuf:"bytes,1,rep,name=kv" json:"kv,omitempty"` + Kv []*KeyValue `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"` } -func (m *GetOperationalStateReply) Reset() { *m = GetOperationalStateReply{} } -func (m *GetOperationalStateReply) String() string { return proto.CompactTextString(m) } -func (*GetOperationalStateReply) ProtoMessage() {} -func (*GetOperationalStateReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -func (m *GetOperationalStateReply) GetKv() []*KeyValue { - if m != nil { - return m.Kv +func (x *GetOperationalStateReply) Reset() { + *x = GetOperationalStateReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -// Message sent for a data encoding request -type DataEncodingRequest struct { -} - -func (m *DataEncodingRequest) Reset() { *m = DataEncodingRequest{} } -func (m *DataEncodingRequest) String() string { return proto.CompactTextString(m) } -func (*DataEncodingRequest) ProtoMessage() {} -func (*DataEncodingRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -// Reply to data encodings supported request -type DataEncodingReply struct { - EncodingList []EncodingType `protobuf:"varint,1,rep,packed,name=encoding_list,json=encodingList,enum=telemetry.EncodingType" json:"encoding_list,omitempty"` +func (x *GetOperationalStateReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DataEncodingReply) Reset() { *m = DataEncodingReply{} } -func (m *DataEncodingReply) String() string { return proto.CompactTextString(m) } -func (*DataEncodingReply) ProtoMessage() {} -func (*DataEncodingReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*GetOperationalStateReply) ProtoMessage() {} -func (m *DataEncodingReply) GetEncodingList() []EncodingType { - if m != nil { - return m.EncodingList +func (x *GetOperationalStateReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil -} - -func init() { - proto.RegisterType((*SubscriptionRequest)(nil), "telemetry.SubscriptionRequest") - proto.RegisterType((*SubscriptionInput)(nil), "telemetry.SubscriptionInput") - proto.RegisterType((*Collector)(nil), "telemetry.Collector") - proto.RegisterType((*Path)(nil), "telemetry.Path") - proto.RegisterType((*SubscriptionAdditionalConfig)(nil), "telemetry.SubscriptionAdditionalConfig") - proto.RegisterType((*SubscriptionReply)(nil), "telemetry.SubscriptionReply") - proto.RegisterType((*SubscriptionResponse)(nil), "telemetry.SubscriptionResponse") - proto.RegisterType((*OpenConfigData)(nil), "telemetry.OpenConfigData") - proto.RegisterType((*KeyValue)(nil), "telemetry.KeyValue") - proto.RegisterType((*Delete)(nil), "telemetry.Delete") - proto.RegisterType((*Eom)(nil), "telemetry.Eom") - proto.RegisterType((*CancelSubscriptionRequest)(nil), "telemetry.CancelSubscriptionRequest") - proto.RegisterType((*CancelSubscriptionReply)(nil), "telemetry.CancelSubscriptionReply") - proto.RegisterType((*GetSubscriptionsRequest)(nil), "telemetry.GetSubscriptionsRequest") - proto.RegisterType((*GetSubscriptionsReply)(nil), "telemetry.GetSubscriptionsReply") - proto.RegisterType((*GetOperationalStateRequest)(nil), "telemetry.GetOperationalStateRequest") - proto.RegisterType((*GetOperationalStateReply)(nil), "telemetry.GetOperationalStateReply") - proto.RegisterType((*DataEncodingRequest)(nil), "telemetry.DataEncodingRequest") - proto.RegisterType((*DataEncodingReply)(nil), "telemetry.DataEncodingReply") - proto.RegisterEnum("telemetry.ReturnCode", ReturnCode_name, ReturnCode_value) - proto.RegisterEnum("telemetry.VerbosityLevel", VerbosityLevel_name, VerbosityLevel_value) - proto.RegisterEnum("telemetry.EncodingType", EncodingType_name, EncodingType_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for OpenConfigTelemetry service - -type OpenConfigTelemetryClient interface { - // Request an inline subscription for data at the specified path. - // The device should send telemetry data back on the same - // connection as the subscription request. - TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) - // Terminates and removes an existing telemetry subscription - CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) - // Get the list of current telemetry subscriptions from the - // target. This command returns a list of existing subscriptions - // not including those that are established via configuration. - GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) - // Get Telemetry Agent Operational States - GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) - // Return the set of data encodings supported by the device for - // telemetry data - GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) + return mi.MessageOf(x) } -type openConfigTelemetryClient struct { - cc *grpc.ClientConn +// Deprecated: Use GetOperationalStateReply.ProtoReflect.Descriptor instead. +func (*GetOperationalStateReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{16} } -func NewOpenConfigTelemetryClient(cc *grpc.ClientConn) OpenConfigTelemetryClient { - return &openConfigTelemetryClient{cc} -} - -func (c *openConfigTelemetryClient) TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) { - stream, err := grpc.NewClientStream(ctx, &_OpenConfigTelemetry_serviceDesc.Streams[0], c.cc, "/telemetry.OpenConfigTelemetry/telemetrySubscribe", opts...) - if err != nil { - return nil, err - } - x := &openConfigTelemetryTelemetrySubscribeClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err +func (x *GetOperationalStateReply) GetKv() []*KeyValue { + if x != nil { + return x.Kv } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type OpenConfigTelemetry_TelemetrySubscribeClient interface { - Recv() (*OpenConfigData, error) - grpc.ClientStream + return nil } -type openConfigTelemetryTelemetrySubscribeClient struct { - grpc.ClientStream +// Message sent for a data encoding request +type DataEncodingRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (x *openConfigTelemetryTelemetrySubscribeClient) Recv() (*OpenConfigData, error) { - m := new(OpenConfigData) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *DataEncodingRequest) Reset() { + *x = DataEncodingRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return m, nil } -func (c *openConfigTelemetryClient) CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) { - out := new(CancelSubscriptionReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *DataEncodingRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *openConfigTelemetryClient) GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) { - out := new(GetSubscriptionsReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} +func (*DataEncodingRequest) ProtoMessage() {} -func (c *openConfigTelemetryClient) GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) { - out := new(GetOperationalStateReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (x *DataEncodingRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *openConfigTelemetryClient) GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) { - out := new(DataEncodingReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getDataEncodings", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use DataEncodingRequest.ProtoReflect.Descriptor instead. +func (*DataEncodingRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{17} } -// Server API for OpenConfigTelemetry service - -type OpenConfigTelemetryServer interface { - // Request an inline subscription for data at the specified path. - // The device should send telemetry data back on the same - // connection as the subscription request. - TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error - // Terminates and removes an existing telemetry subscription - CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) - // Get the list of current telemetry subscriptions from the - // target. This command returns a list of existing subscriptions - // not including those that are established via configuration. - GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) - // Get Telemetry Agent Operational States - GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) - // Return the set of data encodings supported by the device for - // telemetry data - GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) -} +// Reply to data encodings supported request +type DataEncodingReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func RegisterOpenConfigTelemetryServer(s *grpc.Server, srv OpenConfigTelemetryServer) { - s.RegisterService(&_OpenConfigTelemetry_serviceDesc, srv) + EncodingList []EncodingType `protobuf:"varint,1,rep,packed,name=encoding_list,json=encodingList,proto3,enum=telemetry.EncodingType" json:"encoding_list,omitempty"` } -func _OpenConfigTelemetry_TelemetrySubscribe_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SubscriptionRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (x *DataEncodingReply) Reset() { + *x = DataEncodingReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return srv.(OpenConfigTelemetryServer).TelemetrySubscribe(m, &openConfigTelemetryTelemetrySubscribeServer{stream}) } -type OpenConfigTelemetry_TelemetrySubscribeServer interface { - Send(*OpenConfigData) error - grpc.ServerStream +func (x *DataEncodingReply) String() string { + return protoimpl.X.MessageStringOf(x) } -type openConfigTelemetryTelemetrySubscribeServer struct { - grpc.ServerStream -} +func (*DataEncodingReply) ProtoMessage() {} -func (x *openConfigTelemetryTelemetrySubscribeServer) Send(m *OpenConfigData) error { - return x.ServerStream.SendMsg(m) -} - -func _OpenConfigTelemetry_CancelTelemetrySubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CancelSubscriptionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/CancelTelemetrySubscription", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, req.(*CancelSubscriptionRequest)) +func (x *DataEncodingReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetSubscriptionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetTelemetrySubscriptions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, req.(*GetSubscriptionsRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use DataEncodingReply.ProtoReflect.Descriptor instead. +func (*DataEncodingReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{18} } -func _OpenConfigTelemetry_GetTelemetryOperationalState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetOperationalStateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, in) +func (x *DataEncodingReply) GetEncodingList() []EncodingType { + if x != nil { + return x.EncodingList } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetTelemetryOperationalState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, req.(*GetOperationalStateRequest)) - } - return interceptor(ctx, in, info, handler) + return nil } -func _OpenConfigTelemetry_GetDataEncodings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DataEncodingRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetDataEncodings", +var File_oc_oc_proto protoreflect.FileDescriptor + +var file_oc_oc_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x6f, 0x63, 0x2f, 0x6f, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x22, 0xcd, 0x01, 0x0a, 0x13, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x32, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x05, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x08, 0x70, 0x61, 0x74, 0x68, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x54, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x50, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x3b, 0x0a, + 0x0e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0d, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x39, 0x0a, 0x09, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0xd7, 0x01, 0x0a, 0x04, 0x50, 0x61, 0x74, 0x68, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x73, 0x75, + 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x75, 0x6e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x55, 0x6e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x61, 0x78, + 0x5f, 0x73, 0x69, 0x6c, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x69, 0x6c, 0x65, 0x6e, + 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x46, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x5f, 0x65, 0x6f, 0x6d, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6e, 0x65, 0x65, 0x64, 0x45, 0x6f, 0x6d, 0x22, + 0x8c, 0x01, 0x0a, 0x1c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x10, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x5f, 0x65, 0x6f, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6e, 0x65, 0x65, 0x64, 0x45, 0x6f, 0x73, 0x22, 0x7e, + 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x08, 0x70, 0x61, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x3f, + 0x0a, 0x14, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, + 0xec, 0x02, 0x0a, 0x0e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x49, 0x64, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x49, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x75, + 0x62, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x23, 0x0a, 0x02, 0x6b, 0x76, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x02, 0x6b, 0x76, 0x12, 0x29, 0x0a, 0x06, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x6f, 0x6d, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x45, 0x6f, 0x6d, 0x52, 0x03, 0x65, 0x6f, 0x6d, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x79, 0x6e, + 0x63, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0c, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8e, + 0x02, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, + 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x75, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x09, 0x75, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x73, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x12, 0x48, 0x00, 0x52, 0x09, 0x73, 0x69, 0x6e, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x1c, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x19, 0x0a, + 0x03, 0x45, 0x6f, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x44, 0x0a, 0x19, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x5f, + 0x0a, 0x17, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x29, 0x0a, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, + 0x63, 0x6f, 0x64, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x72, 0x22, + 0x42, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x22, 0x62, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x11, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x52, 0x10, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x7e, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x37, + 0x0a, 0x09, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x19, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x56, 0x65, + 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x09, 0x76, 0x65, + 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x22, 0x3f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x23, 0x0a, 0x02, 0x6b, 0x76, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x02, 0x6b, 0x76, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x51, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x69, + 0x73, 0x74, 0x2a, 0x47, 0x0a, 0x0a, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x19, 0x0a, + 0x15, 0x4e, 0x4f, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x2a, 0x32, 0x0a, 0x0e, 0x56, + 0x65, 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x0a, 0x0a, + 0x06, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x45, 0x52, + 0x53, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x52, 0x49, 0x45, 0x46, 0x10, 0x02, 0x2a, + 0x41, 0x0a, 0x0c, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x58, 0x4d, 0x4c, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x49, 0x45, 0x54, 0x46, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, + 0x10, 0x03, 0x32, 0xfc, 0x03, 0x0a, 0x13, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, 0x53, 0x0a, 0x12, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x12, 0x1e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x4f, 0x70, 0x65, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x69, 0x0a, 0x1b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, + 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x19, 0x67, 0x65, + 0x74, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, + 0x6c, 0x0a, 0x1c, 0x67, 0x65, 0x74, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x25, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x52, 0x0a, + 0x10, 0x67, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x1e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, + 0x00, 0x42, 0x0d, 0x5a, 0x0b, 0x2e, 0x3b, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_oc_oc_proto_rawDescOnce sync.Once + file_oc_oc_proto_rawDescData = file_oc_oc_proto_rawDesc +) + +func file_oc_oc_proto_rawDescGZIP() []byte { + file_oc_oc_proto_rawDescOnce.Do(func() { + file_oc_oc_proto_rawDescData = protoimpl.X.CompressGZIP(file_oc_oc_proto_rawDescData) + }) + return file_oc_oc_proto_rawDescData +} + +var file_oc_oc_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_oc_oc_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_oc_oc_proto_goTypes = []interface{}{ + (ReturnCode)(0), // 0: telemetry.ReturnCode + (VerbosityLevel)(0), // 1: telemetry.VerbosityLevel + (EncodingType)(0), // 2: telemetry.EncodingType + (*SubscriptionRequest)(nil), // 3: telemetry.SubscriptionRequest + (*SubscriptionInput)(nil), // 4: telemetry.SubscriptionInput + (*Collector)(nil), // 5: telemetry.Collector + (*Path)(nil), // 6: telemetry.Path + (*SubscriptionAdditionalConfig)(nil), // 7: telemetry.SubscriptionAdditionalConfig + (*SubscriptionReply)(nil), // 8: telemetry.SubscriptionReply + (*SubscriptionResponse)(nil), // 9: telemetry.SubscriptionResponse + (*OpenConfigData)(nil), // 10: telemetry.OpenConfigData + (*KeyValue)(nil), // 11: telemetry.KeyValue + (*Delete)(nil), // 12: telemetry.Delete + (*Eom)(nil), // 13: telemetry.Eom + (*CancelSubscriptionRequest)(nil), // 14: telemetry.CancelSubscriptionRequest + (*CancelSubscriptionReply)(nil), // 15: telemetry.CancelSubscriptionReply + (*GetSubscriptionsRequest)(nil), // 16: telemetry.GetSubscriptionsRequest + (*GetSubscriptionsReply)(nil), // 17: telemetry.GetSubscriptionsReply + (*GetOperationalStateRequest)(nil), // 18: telemetry.GetOperationalStateRequest + (*GetOperationalStateReply)(nil), // 19: telemetry.GetOperationalStateReply + (*DataEncodingRequest)(nil), // 20: telemetry.DataEncodingRequest + (*DataEncodingReply)(nil), // 21: telemetry.DataEncodingReply +} +var file_oc_oc_proto_depIdxs = []int32{ + 4, // 0: telemetry.SubscriptionRequest.input:type_name -> telemetry.SubscriptionInput + 6, // 1: telemetry.SubscriptionRequest.path_list:type_name -> telemetry.Path + 7, // 2: telemetry.SubscriptionRequest.additional_config:type_name -> telemetry.SubscriptionAdditionalConfig + 5, // 3: telemetry.SubscriptionInput.collector_list:type_name -> telemetry.Collector + 9, // 4: telemetry.SubscriptionReply.response:type_name -> telemetry.SubscriptionResponse + 6, // 5: telemetry.SubscriptionReply.path_list:type_name -> telemetry.Path + 11, // 6: telemetry.OpenConfigData.kv:type_name -> telemetry.KeyValue + 12, // 7: telemetry.OpenConfigData.delete:type_name -> telemetry.Delete + 13, // 8: telemetry.OpenConfigData.eom:type_name -> telemetry.Eom + 0, // 9: telemetry.CancelSubscriptionReply.code:type_name -> telemetry.ReturnCode + 8, // 10: telemetry.GetSubscriptionsReply.subscription_list:type_name -> telemetry.SubscriptionReply + 1, // 11: telemetry.GetOperationalStateRequest.verbosity:type_name -> telemetry.VerbosityLevel + 11, // 12: telemetry.GetOperationalStateReply.kv:type_name -> telemetry.KeyValue + 2, // 13: telemetry.DataEncodingReply.encoding_list:type_name -> telemetry.EncodingType + 3, // 14: telemetry.OpenConfigTelemetry.telemetrySubscribe:input_type -> telemetry.SubscriptionRequest + 14, // 15: telemetry.OpenConfigTelemetry.cancelTelemetrySubscription:input_type -> telemetry.CancelSubscriptionRequest + 16, // 16: telemetry.OpenConfigTelemetry.getTelemetrySubscriptions:input_type -> telemetry.GetSubscriptionsRequest + 18, // 17: telemetry.OpenConfigTelemetry.getTelemetryOperationalState:input_type -> telemetry.GetOperationalStateRequest + 20, // 18: telemetry.OpenConfigTelemetry.getDataEncodings:input_type -> telemetry.DataEncodingRequest + 10, // 19: telemetry.OpenConfigTelemetry.telemetrySubscribe:output_type -> telemetry.OpenConfigData + 15, // 20: telemetry.OpenConfigTelemetry.cancelTelemetrySubscription:output_type -> telemetry.CancelSubscriptionReply + 17, // 21: telemetry.OpenConfigTelemetry.getTelemetrySubscriptions:output_type -> telemetry.GetSubscriptionsReply + 19, // 22: telemetry.OpenConfigTelemetry.getTelemetryOperationalState:output_type -> telemetry.GetOperationalStateReply + 21, // 23: telemetry.OpenConfigTelemetry.getDataEncodings:output_type -> telemetry.DataEncodingReply + 19, // [19:24] is the sub-list for method output_type + 14, // [14:19] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_oc_oc_proto_init() } +func file_oc_oc_proto_init() { + if File_oc_oc_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_oc_oc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Collector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Path); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionAdditionalConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OpenConfigData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Delete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Eom); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSubscriptionReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubscriptionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubscriptionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOperationalStateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOperationalStateReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataEncodingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataEncodingReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, req.(*DataEncodingRequest)) + file_oc_oc_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*KeyValue_DoubleValue)(nil), + (*KeyValue_IntValue)(nil), + (*KeyValue_UintValue)(nil), + (*KeyValue_SintValue)(nil), + (*KeyValue_BoolValue)(nil), + (*KeyValue_StrValue)(nil), + (*KeyValue_BytesValue)(nil), } - return interceptor(ctx, in, info, handler) -} - -var _OpenConfigTelemetry_serviceDesc = grpc.ServiceDesc{ - ServiceName: "telemetry.OpenConfigTelemetry", - HandlerType: (*OpenConfigTelemetryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "cancelTelemetrySubscription", - Handler: _OpenConfigTelemetry_CancelTelemetrySubscription_Handler, - }, - { - MethodName: "getTelemetrySubscriptions", - Handler: _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler, - }, - { - MethodName: "getTelemetryOperationalState", - Handler: _OpenConfigTelemetry_GetTelemetryOperationalState_Handler, - }, - { - MethodName: "getDataEncodings", - Handler: _OpenConfigTelemetry_GetDataEncodings_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "telemetrySubscribe", - Handler: _OpenConfigTelemetry_TelemetrySubscribe_Handler, - ServerStreams: true, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_oc_oc_proto_rawDesc, + NumEnums: 3, + NumMessages: 19, + NumExtensions: 0, + NumServices: 1, }, - }, - Metadata: "oc.proto", -} - -func init() { proto.RegisterFile("oc.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1254 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xcd, 0x6e, 0xdb, 0x46, - 0x17, 0x15, 0x25, 0xd9, 0x12, 0xaf, 0x7e, 0x42, 0x8d, 0xe3, 0x2f, 0xb2, 0xa3, 0xaf, 0x71, 0xe8, - 0x16, 0x71, 0x82, 0xd4, 0x28, 0x94, 0x45, 0x51, 0xa4, 0x40, 0x10, 0xcb, 0x74, 0xac, 0xc6, 0x95, - 0xdc, 0xa1, 0x9c, 0xb6, 0x2b, 0x82, 0x22, 0x27, 0x36, 0x11, 0xfe, 0x95, 0x33, 0x12, 0xc2, 0x4d, - 0x9e, 0xa0, 0xe8, 0x9b, 0x75, 0xdd, 0x97, 0xe8, 0x23, 0x74, 0x51, 0xcc, 0x90, 0x94, 0x46, 0x89, - 0x94, 0x34, 0x2b, 0x91, 0xe7, 0x9e, 0xb9, 0xf7, 0xcc, 0xbd, 0x67, 0x86, 0x82, 0x7a, 0xe4, 0x1c, - 0xc7, 0x49, 0xc4, 0x22, 0xa4, 0x32, 0xe2, 0x93, 0x80, 0xb0, 0x24, 0xd5, 0xff, 0x54, 0x60, 0xc7, - 0x9c, 0x4d, 0xa9, 0x93, 0x78, 0x31, 0xf3, 0xa2, 0x10, 0x93, 0xdf, 0x66, 0x84, 0x32, 0xd4, 0x87, - 0x2d, 0x2f, 0x8c, 0x67, 0xac, 0xab, 0x1c, 0x28, 0x47, 0x8d, 0x7e, 0xef, 0x78, 0xb1, 0xe4, 0x58, - 0xa6, 0x0f, 0x39, 0x07, 0x67, 0x54, 0xf4, 0x18, 0xd4, 0xd8, 0x66, 0x37, 0x96, 0xef, 0x51, 0xd6, - 0x2d, 0x1f, 0x54, 0x8e, 0x1a, 0xfd, 0x5b, 0xd2, 0xba, 0x4b, 0x9b, 0xdd, 0xe0, 0x3a, 0x67, 0x5c, - 0x78, 0x94, 0xa1, 0x09, 0x74, 0x6c, 0xd7, 0xf5, 0x78, 0x16, 0xdb, 0xb7, 0x9c, 0x28, 0x7c, 0xed, - 0x5d, 0x77, 0x2b, 0xa2, 0xda, 0x83, 0x0d, 0xd5, 0x9e, 0x2f, 0xf8, 0x03, 0x41, 0xc7, 0x9a, 0xfd, - 0x1e, 0xa2, 0x5f, 0x42, 0xe7, 0x03, 0x7d, 0xe8, 0x29, 0xb4, 0x9d, 0xc8, 0xf7, 0x89, 0xc3, 0xa2, - 0x24, 0x53, 0xa7, 0x08, 0x75, 0xb7, 0xa5, 0x3a, 0x83, 0x82, 0x80, 0x5b, 0x0b, 0x2e, 0xd7, 0xa9, - 0x7f, 0x07, 0xea, 0x22, 0x86, 0xba, 0x50, 0xb3, 0x5d, 0x37, 0x21, 0x94, 0x8a, 0xc6, 0xa8, 0xb8, - 0x78, 0x45, 0x08, 0xaa, 0x71, 0x94, 0xf0, 0x7d, 0x2b, 0x47, 0x2d, 0x2c, 0x9e, 0xf5, 0xbf, 0x14, - 0xa8, 0xf2, 0x5d, 0x8b, 0xa0, 0xcd, 0x6e, 0xf2, 0x35, 0xe2, 0x19, 0xfd, 0x0f, 0xb6, 0x5f, 0x7b, - 0x3e, 0x23, 0x89, 0x58, 0xa2, 0xe2, 0xfc, 0x0d, 0x7d, 0x0d, 0x88, 0xce, 0xe2, 0x98, 0x27, 0xb5, - 0x66, 0xa1, 0x73, 0x63, 0x87, 0xd7, 0xc4, 0x15, 0x8d, 0xa9, 0xe3, 0x4e, 0x11, 0xb9, 0x2a, 0x02, - 0xe8, 0x18, 0x76, 0x02, 0xfb, 0xad, 0x45, 0x3d, 0x9f, 0x84, 0xcc, 0xf2, 0x42, 0x46, 0x92, 0xb9, - 0xed, 0x77, 0xab, 0x42, 0x46, 0x27, 0xb0, 0xdf, 0x9a, 0x22, 0x32, 0xcc, 0x03, 0xe8, 0x21, 0x68, - 0xd4, 0x0e, 0x62, 0x9f, 0x58, 0xaf, 0x13, 0x3e, 0xeb, 0xd0, 0x49, 0xbb, 0x5b, 0x82, 0x7c, 0x2b, - 0xc3, 0xcf, 0x0a, 0x18, 0xed, 0x41, 0x3d, 0x24, 0xc4, 0xb5, 0x48, 0x14, 0x74, 0xb7, 0x45, 0xfd, - 0x1a, 0x7f, 0x37, 0xa2, 0x40, 0xff, 0x5d, 0x81, 0xde, 0xc7, 0x26, 0x83, 0x0e, 0xa1, 0xe5, 0x7b, - 0x81, 0xc7, 0xac, 0x84, 0x38, 0x51, 0xe2, 0x66, 0xed, 0xda, 0xc2, 0x4d, 0x01, 0xe2, 0x0c, 0x43, - 0x8f, 0x01, 0x65, 0x24, 0xe6, 0x05, 0xc4, 0xa2, 0xc4, 0x89, 0x42, 0x97, 0x8a, 0x76, 0x6c, 0x61, - 0x4d, 0x44, 0x26, 0x5e, 0x40, 0xcc, 0x0c, 0x97, 0xe4, 0xd0, 0xbc, 0x1d, 0xb9, 0x1c, 0xaa, 0xbf, - 0x5b, 0x9d, 0x3a, 0x26, 0xb1, 0x9f, 0xa2, 0xa7, 0x50, 0x4f, 0x08, 0x8d, 0xa3, 0x90, 0x92, 0xdc, - 0xc5, 0xf7, 0x36, 0xf8, 0x0a, 0xe7, 0x34, 0xbc, 0x58, 0xf0, 0x79, 0x5e, 0xd6, 0x9f, 0xc1, 0xed, - 0x75, 0xf9, 0xd0, 0x03, 0xb8, 0x45, 0x25, 0xdc, 0xf2, 0x5c, 0xa1, 0xa4, 0x85, 0xdb, 0x32, 0x3c, - 0x74, 0xf5, 0xbf, 0xcb, 0xd0, 0x1e, 0xc7, 0x24, 0xcc, 0xba, 0x77, 0x6a, 0x33, 0x1b, 0xdd, 0x05, - 0x95, 0xa6, 0x94, 0x91, 0xa0, 0x58, 0xa5, 0xe2, 0x7a, 0x06, 0x0c, 0x5d, 0x74, 0x1f, 0x9a, 0x4e, - 0x14, 0xc4, 0x51, 0x28, 0x86, 0xee, 0xe6, 0xae, 0x6b, 0x2c, 0xb0, 0xa1, 0x8b, 0x8e, 0x40, 0xa3, - 0xb3, 0xa9, 0xb5, 0x42, 0xab, 0x2c, 0x8a, 0x0f, 0x24, 0x66, 0xe1, 0xce, 0xaa, 0xe4, 0x4e, 0xae, - 0x3c, 0xf3, 0x01, 0xb1, 0xc2, 0x59, 0x30, 0x25, 0x89, 0x70, 0x49, 0x15, 0xb7, 0x0b, 0x78, 0x24, - 0x50, 0xd4, 0x03, 0x95, 0x4f, 0x8f, 0x32, 0x3b, 0x88, 0x85, 0x4b, 0xaa, 0x78, 0x09, 0xa0, 0x43, - 0x28, 0xbf, 0x99, 0x77, 0x6b, 0xa2, 0x7f, 0x3b, 0x52, 0xff, 0x5e, 0x92, 0xf4, 0x95, 0xed, 0xcf, - 0x08, 0x2e, 0xbf, 0x99, 0xa3, 0x87, 0xb0, 0xed, 0x12, 0x9f, 0x30, 0xd2, 0xad, 0x0b, 0x62, 0x47, - 0x22, 0x9e, 0x8a, 0x00, 0xce, 0x09, 0xe8, 0x00, 0x2a, 0xdc, 0x8d, 0xaa, 0xe0, 0xb5, 0x25, 0x9e, - 0x11, 0x05, 0x98, 0x87, 0xb8, 0xf1, 0x68, 0x1a, 0x3a, 0xd6, 0x62, 0xf4, 0x20, 0xac, 0xd2, 0xe4, - 0x60, 0x31, 0x17, 0xfd, 0x8f, 0x32, 0xd4, 0x0b, 0x09, 0x48, 0x83, 0xca, 0x1b, 0x92, 0xe6, 0x2d, - 0xe6, 0x8f, 0xe8, 0x10, 0x9a, 0x6e, 0x34, 0x9b, 0xfa, 0xc4, 0x9a, 0x73, 0x86, 0xd8, 0xb9, 0x72, - 0x5e, 0xc2, 0x8d, 0x0c, 0xcd, 0x96, 0xfd, 0x1f, 0x54, 0x2f, 0x64, 0x39, 0x83, 0x6f, 0xbc, 0x72, - 0x5e, 0xc2, 0x75, 0x2f, 0x64, 0x59, 0xf8, 0x1e, 0xc0, 0x6c, 0x19, 0xaf, 0xf1, 0xc6, 0x9c, 0x97, - 0xb0, 0x3a, 0x93, 0x09, 0x74, 0x49, 0xa8, 0x1f, 0x28, 0x47, 0x88, 0x13, 0xa8, 0x4c, 0x98, 0x46, - 0x91, 0x9f, 0x13, 0x54, 0xbe, 0x0d, 0x4e, 0xe0, 0xd8, 0x42, 0x01, 0x65, 0x49, 0x1e, 0xe7, 0xdb, - 0x54, 0xb9, 0x02, 0xca, 0x92, 0x2c, 0x7c, 0x1f, 0x1a, 0xd3, 0x94, 0x11, 0x9a, 0x13, 0x1a, 0x07, - 0xca, 0x51, 0xf3, 0xbc, 0x84, 0x41, 0x80, 0x82, 0x72, 0x52, 0x83, 0x2d, 0x11, 0xd4, 0x7b, 0xb0, - 0x9d, 0x75, 0x7a, 0xdd, 0x55, 0xa5, 0xef, 0x41, 0xc5, 0x88, 0x82, 0xb5, 0xa1, 0x53, 0xd8, 0x1b, - 0xd8, 0xa1, 0x43, 0xfc, 0x75, 0x1f, 0x91, 0xff, 0x6c, 0x7f, 0x0b, 0xee, 0xac, 0xcb, 0xc2, 0x4f, - 0xf1, 0x43, 0xa8, 0x3a, 0x91, 0x9b, 0x9d, 0xe0, 0x76, 0x7f, 0x57, 0x1a, 0x39, 0x26, 0x6c, 0x96, - 0x84, 0x83, 0xc8, 0x25, 0x58, 0x50, 0xf8, 0x05, 0xc1, 0x7f, 0x2d, 0xca, 0x8a, 0x3b, 0xb5, 0xc6, - 0xdf, 0x4d, 0x96, 0xe8, 0x27, 0x70, 0xe7, 0x05, 0x61, 0x72, 0x76, 0xfa, 0xd9, 0x22, 0xa7, 0xb0, - 0xfb, 0x61, 0x0e, 0x2e, 0x71, 0x08, 0x9d, 0x95, 0x0c, 0xd2, 0x17, 0xa6, 0xb7, 0xf1, 0xc6, 0x89, - 0xfd, 0x14, 0x6b, 0xf2, 0x32, 0x71, 0x91, 0xbc, 0x83, 0xfd, 0x17, 0x84, 0x8d, 0x63, 0x92, 0xd8, - 0xd9, 0x75, 0x6a, 0x32, 0x9b, 0x91, 0xcf, 0x95, 0x8a, 0xbe, 0x05, 0x75, 0x4e, 0x92, 0x69, 0x44, - 0x3d, 0x96, 0x8a, 0x56, 0xb4, 0xfb, 0x7b, 0x92, 0x92, 0x57, 0x45, 0xec, 0x82, 0xcc, 0x89, 0x8f, - 0x97, 0x5c, 0xfd, 0x19, 0x74, 0xd7, 0xd6, 0xe7, 0xdb, 0xcc, 0xce, 0xb2, 0xf2, 0xd1, 0xb3, 0xac, - 0xef, 0xc2, 0x0e, 0xbf, 0xbd, 0x8c, 0xd0, 0x89, 0x5c, 0x2f, 0xbc, 0xce, 0x95, 0xeb, 0x3f, 0x41, - 0x67, 0x15, 0xe6, 0x09, 0xbf, 0x87, 0x16, 0xc9, 0x81, 0x65, 0xcf, 0xda, 0xfd, 0x3b, 0xf2, 0xb1, - 0xce, 0xe3, 0x93, 0x34, 0x26, 0xb8, 0x59, 0xb0, 0x79, 0xab, 0x1e, 0xbd, 0x00, 0x58, 0x3a, 0x00, - 0x35, 0xa0, 0x66, 0x5e, 0x0d, 0x06, 0x86, 0x69, 0x6a, 0x25, 0xb4, 0x07, 0xbb, 0xa3, 0xb1, 0x65, - 0x5e, 0x9d, 0x98, 0x03, 0x3c, 0xbc, 0x9c, 0x0c, 0xc7, 0x23, 0xcb, 0x18, 0x4d, 0xf0, 0xaf, 0x9a, - 0x82, 0x3a, 0xd0, 0xba, 0x1a, 0xbd, 0x1c, 0x8d, 0x7f, 0x1e, 0x59, 0x06, 0xc6, 0x63, 0xac, 0x95, - 0x1f, 0xf5, 0xa1, 0xbd, 0xda, 0x10, 0x04, 0xb0, 0x7d, 0x6a, 0x4c, 0x9e, 0x0f, 0x2f, 0xb4, 0x12, - 0x52, 0x61, 0x6b, 0x62, 0x60, 0xd3, 0xd0, 0x14, 0xfe, 0x78, 0x82, 0x87, 0xc6, 0x99, 0x56, 0x7e, - 0xf4, 0x1c, 0x9a, 0xb2, 0x34, 0xd4, 0x02, 0xf5, 0x6a, 0x74, 0x6a, 0x9c, 0x0d, 0x47, 0xc6, 0xa9, - 0x56, 0x42, 0x35, 0xa8, 0xfc, 0xf2, 0xe3, 0x85, 0xa6, 0x70, 0xfc, 0x07, 0x73, 0x3c, 0xb2, 0x86, - 0xc6, 0xe4, 0x4c, 0x2b, 0xf3, 0xc4, 0x97, 0x78, 0x3c, 0x19, 0x3f, 0xd1, 0x2a, 0xfd, 0x7f, 0x2a, - 0xb0, 0xb3, 0xbc, 0xf2, 0x27, 0xc5, 0x96, 0x91, 0x09, 0x68, 0xb1, 0xff, 0xdc, 0x32, 0x53, 0x82, - 0xbe, 0xd8, 0x68, 0x24, 0xd1, 0xe0, 0x7d, 0x79, 0xbc, 0xab, 0x1f, 0x12, 0xbd, 0xf4, 0x8d, 0x82, - 0x3c, 0xb8, 0xeb, 0x88, 0x03, 0x36, 0x79, 0x2f, 0xb5, 0x48, 0x82, 0xbe, 0x94, 0xff, 0x08, 0x6d, - 0x3a, 0xce, 0xfb, 0xfa, 0x27, 0x58, 0xb1, 0x9f, 0xea, 0x25, 0xe4, 0xc0, 0xde, 0x35, 0x61, 0x6b, - 0xeb, 0x50, 0x24, 0xa7, 0xd8, 0x70, 0x20, 0xf7, 0x0f, 0x3e, 0xca, 0xc9, 0x8a, 0xf8, 0xd0, 0x93, - 0x8b, 0xbc, 0x6f, 0x58, 0xf4, 0xd5, 0x6a, 0x8e, 0x0d, 0x07, 0x6a, 0xff, 0xf0, 0x53, 0xb4, 0xac, - 0x1a, 0x06, 0xed, 0x9a, 0x30, 0xd9, 0xc0, 0x74, 0x65, 0x20, 0x6b, 0x1c, 0xbf, 0xdf, 0xdb, 0x18, - 0x17, 0x39, 0xa7, 0xdb, 0xe2, 0xaf, 0xf8, 0x93, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xe3, - 0x4f, 0x0d, 0x96, 0x0b, 0x00, 0x00, + GoTypes: file_oc_oc_proto_goTypes, + DependencyIndexes: file_oc_oc_proto_depIdxs, + EnumInfos: file_oc_oc_proto_enumTypes, + MessageInfos: file_oc_oc_proto_msgTypes, + }.Build() + File_oc_oc_proto = out.File + file_oc_oc_proto_rawDesc = nil + file_oc_oc_proto_goTypes = nil + file_oc_oc_proto_depIdxs = nil } diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto index cf4aa145e6911..8c3ad32b9913f 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto @@ -36,6 +36,7 @@ syntax = "proto3"; package telemetry; +option go_package = ".;telemetry"; // Interface exported by Agent service OpenConfigTelemetry { diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go new file mode 100644 index 0000000000000..593e5a1e1002a --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go @@ -0,0 +1,293 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package telemetry + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// OpenConfigTelemetryClient is the client API for OpenConfigTelemetry service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type OpenConfigTelemetryClient interface { + // Request an inline subscription for data at the specified path. + // The device should send telemetry data back on the same + // connection as the subscription request. + TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) + // Terminates and removes an existing telemetry subscription + CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) + // Get the list of current telemetry subscriptions from the + // target. This command returns a list of existing subscriptions + // not including those that are established via configuration. + GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) + // Get Telemetry Agent Operational States + GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) + // Return the set of data encodings supported by the device for + // telemetry data + GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) +} + +type openConfigTelemetryClient struct { + cc grpc.ClientConnInterface +} + +func NewOpenConfigTelemetryClient(cc grpc.ClientConnInterface) OpenConfigTelemetryClient { + return &openConfigTelemetryClient{cc} +} + +func (c *openConfigTelemetryClient) TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &OpenConfigTelemetry_ServiceDesc.Streams[0], "/telemetry.OpenConfigTelemetry/telemetrySubscribe", opts...) + if err != nil { + return nil, err + } + x := &openConfigTelemetryTelemetrySubscribeClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type OpenConfigTelemetry_TelemetrySubscribeClient interface { + Recv() (*OpenConfigData, error) + grpc.ClientStream +} + +type openConfigTelemetryTelemetrySubscribeClient struct { + grpc.ClientStream +} + +func (x *openConfigTelemetryTelemetrySubscribeClient) Recv() (*OpenConfigData, error) { + m := new(OpenConfigData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *openConfigTelemetryClient) CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) { + out := new(CancelSubscriptionReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) { + out := new(GetSubscriptionsReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) { + out := new(GetOperationalStateReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) { + out := new(DataEncodingReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getDataEncodings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// OpenConfigTelemetryServer is the server API for OpenConfigTelemetry service. +// All implementations must embed UnimplementedOpenConfigTelemetryServer +// for forward compatibility +type OpenConfigTelemetryServer interface { + // Request an inline subscription for data at the specified path. + // The device should send telemetry data back on the same + // connection as the subscription request. + TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error + // Terminates and removes an existing telemetry subscription + CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) + // Get the list of current telemetry subscriptions from the + // target. This command returns a list of existing subscriptions + // not including those that are established via configuration. + GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) + // Get Telemetry Agent Operational States + GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) + // Return the set of data encodings supported by the device for + // telemetry data + GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) + mustEmbedUnimplementedOpenConfigTelemetryServer() +} + +// UnimplementedOpenConfigTelemetryServer must be embedded to have forward compatible implementations. +type UnimplementedOpenConfigTelemetryServer struct { +} + +func (UnimplementedOpenConfigTelemetryServer) TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method TelemetrySubscribe not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelTelemetrySubscription not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTelemetrySubscriptions not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTelemetryOperationalState not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetDataEncodings not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) mustEmbedUnimplementedOpenConfigTelemetryServer() {} + +// UnsafeOpenConfigTelemetryServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to OpenConfigTelemetryServer will +// result in compilation errors. +type UnsafeOpenConfigTelemetryServer interface { + mustEmbedUnimplementedOpenConfigTelemetryServer() +} + +func RegisterOpenConfigTelemetryServer(s grpc.ServiceRegistrar, srv OpenConfigTelemetryServer) { + s.RegisterService(&OpenConfigTelemetry_ServiceDesc, srv) +} + +func _OpenConfigTelemetry_TelemetrySubscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscriptionRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OpenConfigTelemetryServer).TelemetrySubscribe(m, &openConfigTelemetryTelemetrySubscribeServer{stream}) +} + +type OpenConfigTelemetry_TelemetrySubscribeServer interface { + Send(*OpenConfigData) error + grpc.ServerStream +} + +type openConfigTelemetryTelemetrySubscribeServer struct { + grpc.ServerStream +} + +func (x *openConfigTelemetryTelemetrySubscribeServer) Send(m *OpenConfigData) error { + return x.ServerStream.SendMsg(m) +} + +func _OpenConfigTelemetry_CancelTelemetrySubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, req.(*CancelSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSubscriptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, req.(*GetSubscriptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetTelemetryOperationalState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOperationalStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, req.(*GetOperationalStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetDataEncodings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DataEncodingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getDataEncodings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, req.(*DataEncodingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// OpenConfigTelemetry_ServiceDesc is the grpc.ServiceDesc for OpenConfigTelemetry service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var OpenConfigTelemetry_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "telemetry.OpenConfigTelemetry", + HandlerType: (*OpenConfigTelemetryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "cancelTelemetrySubscription", + Handler: _OpenConfigTelemetry_CancelTelemetrySubscription_Handler, + }, + { + MethodName: "getTelemetrySubscriptions", + Handler: _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler, + }, + { + MethodName: "getTelemetryOperationalState", + Handler: _OpenConfigTelemetry_GetTelemetryOperationalState_Handler, + }, + { + MethodName: "getDataEncodings", + Handler: _OpenConfigTelemetry_GetDataEncodings_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "telemetrySubscribe", + Handler: _OpenConfigTelemetry_TelemetrySubscribe_Handler, + ServerStreams: true, + }, + }, + Metadata: "oc/oc.proto", +} diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go index 8db4ce0d543bc..9fed6a324bf34 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go @@ -47,6 +47,7 @@ var dataWithStringValues = &telemetry.OpenConfigData{ } type openConfigTelemetryServer struct { + telemetry.UnimplementedOpenConfigTelemetryServer } func (s *openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.SubscriptionRequest, stream telemetry.OpenConfigTelemetry_TelemetrySubscribeServer) error { diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index a38d5989cb5d0..03b28ad2cb07f 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -15,15 +15,15 @@ import ( "sync" "time" - "github.com/influxdata/telegraf/metric" + riemanngo "github.com/riemann/riemann-go-client" + riemangoProto "github.com/riemann/riemann-go-client/proto" + "google.golang.org/protobuf/proto" - "github.com/gogo/protobuf/proto" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/metric" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - riemanngo "github.com/riemann/riemann-go-client" - riemangoProto "github.com/riemann/riemann-go-client/proto" ) type RiemannSocketListener struct { diff --git a/plugins/inputs/riemann_listener/riemann_listener_test.go b/plugins/inputs/riemann_listener/riemann_listener_test.go index 92dc829ac1312..7a995fc475cb7 100644 --- a/plugins/inputs/riemann_listener/riemann_listener_test.go +++ b/plugins/inputs/riemann_listener/riemann_listener_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/testutil" riemanngo "github.com/riemann/riemann-go-client" "github.com/stretchr/testify/require" "gotest.tools/assert" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" ) func TestSocketListener_tcp(t *testing.T) { diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index cc8b1a40a10a5..b1d6ea59d2f3b 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -10,8 +10,6 @@ import ( "time" monitoring "cloud.google.com/go/monitoring/apiv3/v2" - googlepbduration "github.com/golang/protobuf/ptypes/duration" - googlepbts "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/limiter" @@ -22,6 +20,8 @@ import ( distributionpb "google.golang.org/genproto/googleapis/api/distribution" metricpb "google.golang.org/genproto/googleapis/api/metric" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -393,8 +393,8 @@ func (s *Stackdriver) newTimeSeriesConf( ) *timeSeriesConf { filter := s.newListTimeSeriesFilter(metricType) interval := &monitoringpb.TimeInterval{ - EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, - StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + EndTime: ×tamppb.Timestamp{Seconds: endTime.Unix()}, + StartTime: ×tamppb.Timestamp{Seconds: startTime.Unix()}, } tsReq := &monitoringpb.ListTimeSeriesRequest{ Name: fmt.Sprintf("projects/%s", s.Project), @@ -432,7 +432,7 @@ func (t *timeSeriesConf) initForAggregate(alignerStr string) { } aligner := monitoringpb.Aggregation_Aligner(alignerInt) agg := &monitoringpb.Aggregation{ - AlignmentPeriod: &googlepbduration.Duration{Seconds: 60}, + AlignmentPeriod: &durationpb.Duration{Seconds: 60}, PerSeriesAligner: aligner, } t.fieldKey = t.fieldKey + "_" + strings.ToLower(alignerStr) @@ -522,8 +522,8 @@ func (s *Stackdriver) generatetimeSeriesConfs( if s.timeSeriesConfCache != nil && s.timeSeriesConfCache.IsValid() { // Update interval for timeseries requests in timeseries cache interval := &monitoringpb.TimeInterval{ - EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, - StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + EndTime: ×tamppb.Timestamp{Seconds: endTime.Unix()}, + StartTime: ×tamppb.Timestamp{Seconds: startTime.Unix()}, } for _, timeSeriesConf := range s.timeSeriesConfCache.TimeSeriesConfs { timeSeriesConf.listTimeSeriesRequest.Interval = interval diff --git a/plugins/inputs/stackdriver/stackdriver_test.go b/plugins/inputs/stackdriver/stackdriver_test.go index 0502c7bed9765..ad6b15145031a 100644 --- a/plugins/inputs/stackdriver/stackdriver_test.go +++ b/plugins/inputs/stackdriver/stackdriver_test.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" @@ -15,6 +14,7 @@ import ( metricpb "google.golang.org/genproto/googleapis/api/metric" "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/protobuf/types/known/timestamppb" ) type Call struct { @@ -105,7 +105,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -138,7 +138,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -171,7 +171,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -204,7 +204,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -249,7 +249,7 @@ func TestGather(t *testing.T) { Points: []*monitoringpb.Point{ { Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -283,7 +283,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -378,7 +378,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -473,7 +473,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -556,7 +556,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -702,7 +702,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -717,7 +717,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -732,7 +732,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -1081,7 +1081,7 @@ func TestListMetricDescriptorFilter(t *testing.T) { ch <- createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index d4f660ff7c569..d6b24ff78839b 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -10,7 +10,6 @@ import ( "strings" monitoring "cloud.google.com/go/monitoring/apiv3/v2" // Imports the Stackdriver Monitoring client package. - googlepb "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" @@ -18,6 +17,7 @@ import ( metricpb "google.golang.org/genproto/googleapis/api/metric" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/protobuf/types/known/timestamppb" ) // Stackdriver is the Google Stackdriver config info. @@ -247,16 +247,16 @@ func getStackdriverTimeInterval( switch m { case metricpb.MetricDescriptor_GAUGE: return &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: end, }, }, nil case metricpb.MetricDescriptor_CUMULATIVE: return &monitoringpb.TimeInterval{ - StartTime: &googlepb.Timestamp{ + StartTime: ×tamppb.Timestamp{ Seconds: start, }, - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: end, }, }, nil diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go index 8af553b374c53..bb2a620e93668 100644 --- a/plugins/outputs/stackdriver/stackdriver_test.go +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -12,9 +12,6 @@ import ( "time" monitoring "cloud.google.com/go/monitoring/apiv3/v2" - "github.com/golang/protobuf/proto" - emptypb "github.com/golang/protobuf/ptypes/empty" - googlepb "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -22,6 +19,9 @@ import ( monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" "google.golang.org/grpc" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" ) // clientOpt is the option tests should use to connect to the test server. @@ -181,7 +181,7 @@ func TestWriteAscendingTime(t *testing.T) { ts := request.TimeSeries[0] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 1, }, }) @@ -196,7 +196,7 @@ func TestWriteAscendingTime(t *testing.T) { ts = request.TimeSeries[0] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 2, }, }) @@ -311,7 +311,7 @@ func TestWriteBatchable(t *testing.T) { ts := request.TimeSeries[0] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 3, }, }) @@ -324,7 +324,7 @@ func TestWriteBatchable(t *testing.T) { ts = request.TimeSeries[1] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 1, }, }) @@ -337,7 +337,7 @@ func TestWriteBatchable(t *testing.T) { ts = request.TimeSeries[2] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 3, }, }) @@ -350,7 +350,7 @@ func TestWriteBatchable(t *testing.T) { ts = request.TimeSeries[4] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 5, }, }) diff --git a/plugins/parsers/prometheusremotewrite/parser.go b/plugins/parsers/prometheusremotewrite/parser.go index 9f0a08a682a19..3b9f25de28680 100644 --- a/plugins/parsers/prometheusremotewrite/parser.go +++ b/plugins/parsers/prometheusremotewrite/parser.go @@ -8,7 +8,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/gogo/protobuf/proto" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/prompb" ) @@ -22,7 +21,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { var metrics []telegraf.Metric var req prompb.WriteRequest - if err := proto.Unmarshal(buf, &req); err != nil { + if err := req.Unmarshal(buf); err != nil { return nil, fmt.Errorf("unable to unmarshal request body: %s", err) } diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go index caa8a7334d91d..e160107101ab7 100644 --- a/plugins/serializers/prometheus/collection.go +++ b/plugins/serializers/prometheus/collection.go @@ -7,9 +7,9 @@ import ( "strings" "time" - "github.com/gogo/protobuf/proto" "github.com/influxdata/telegraf" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) const helpString = "Telegraf collected metric" diff --git a/plugins/serializers/prometheus/collection_test.go b/plugins/serializers/prometheus/collection_test.go index deb400ba2d899..67447e66417ae 100644 --- a/plugins/serializers/prometheus/collection_test.go +++ b/plugins/serializers/prometheus/collection_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" dto "github.com/prometheus/client_model/go" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) type Input struct { diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go index fb3cea4edd352..b6dd180dba30b 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go @@ -9,7 +9,6 @@ import ( "strings" "time" - "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/influxdata/telegraf/plugins/serializers/prometheus" @@ -236,7 +235,8 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { return false }) } - data, err := proto.Marshal(&prompb.WriteRequest{Timeseries: promTS}) + pb := &prompb.WriteRequest{Timeseries: promTS} + data, err := pb.Marshal() if err != nil { return nil, fmt.Errorf("unable to marshal protobuf: %v", err) } diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go index f9e47eac54db5..f07c2c3fecfc6 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/prompb" @@ -664,7 +663,7 @@ func prompbToText(data []byte) ([]byte, error) { return nil, err } var req prompb.WriteRequest - err = proto.Unmarshal(protobuff, &req) + err = req.Unmarshal(protobuff) if err != nil { return nil, err } From d1726119615c9d58b4b1a469a3ec381a51f6ccbd Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 9 Nov 2021 08:28:38 -0700 Subject: [PATCH 744/761] chore: update go version from 1.17.2 to 1.17.3 (#10073) (cherry picked from commit d5afd654c69ff30355f33eb03ff3b7cd6596fb12) --- .circleci/config.yml | 2 +- Makefile | 4 ++-- scripts/ci-1.17.docker | 2 +- scripts/installgo_mac.sh | 6 +++--- scripts/installgo_windows.sh | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 3fa611f8b26fb..f282aa8b7a819 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,7 +7,7 @@ executors: go-1_17: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.17.2' + - image: 'quay.io/influxdb/telegraf-ci:1.17.3' environment: GOFLAGS: -p=8 mac: diff --git a/Makefile b/Makefile index 7b91fa1edcfec..52362a307790c 100644 --- a/Makefile +++ b/Makefile @@ -211,8 +211,8 @@ plugin-%: .PHONY: ci-1.17 ci-1.17: - docker build -t quay.io/influxdb/telegraf-ci:1.17.2 - < scripts/ci-1.17.docker - docker push quay.io/influxdb/telegraf-ci:1.17.2 + docker build -t quay.io/influxdb/telegraf-ci:1.17.3 - < scripts/ci-1.17.docker + docker push quay.io/influxdb/telegraf-ci:1.17.3 .PHONY: install install: $(buildbin) diff --git a/scripts/ci-1.17.docker b/scripts/ci-1.17.docker index a69a0d7eddbe3..6b220c0898e94 100644 --- a/scripts/ci-1.17.docker +++ b/scripts/ci-1.17.docker @@ -1,4 +1,4 @@ -FROM golang:1.17.2 +FROM golang:1.17.3 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/installgo_mac.sh b/scripts/installgo_mac.sh index f15aefa6a1641..2676495d3664a 100644 --- a/scripts/installgo_mac.sh +++ b/scripts/installgo_mac.sh @@ -3,13 +3,13 @@ set -eux ARCH=$(uname -m) -GO_VERSION="1.17.2" +GO_VERSION="1.17.3" if [ "$ARCH" = 'arm64' ]; then GO_ARCH="darwin-arm64" - GO_VERSION_SHA="ce8771bd3edfb5b28104084b56bbb532eeb47fbb7769c3e664c6223712c30904" # from https://golang.org/dl + GO_VERSION_SHA="ffe45ef267271b9681ca96ca9b0eb9b8598dd82f7bb95b27af3eef2461dc3d2c" # from https://golang.org/dl elif [ "$ARCH" = 'x86_64' ]; then GO_ARCH="darwin-amd64" - GO_VERSION_SHA="7914497a302a132a465d33f5ee044ce05568bacdb390ab805cb75a3435a23f94" # from https://golang.org/dl + GO_VERSION_SHA="765c021e372a87ce0bc58d3670ab143008dae9305a79e9fa83440425529bb636" # from https://golang.org/dl fi # This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.) diff --git a/scripts/installgo_windows.sh b/scripts/installgo_windows.sh index bd5dcca3dbc14..1571daa28eecb 100644 --- a/scripts/installgo_windows.sh +++ b/scripts/installgo_windows.sh @@ -2,7 +2,7 @@ set -eux -GO_VERSION="1.17.2" +GO_VERSION="1.17.3" setup_go () { choco upgrade golang --version=${GO_VERSION} From 043456fed00b38fdedfb3719edb93d33ceda4e07 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Tue, 9 Nov 2021 15:05:42 -0700 Subject: [PATCH 745/761] fix: do not build modbus on openbsd (#10047) (cherry picked from commit ce46506e19c1763a402358494ff95276b772a6ce) --- plugins/inputs/modbus/configuration.go | 2 ++ plugins/inputs/modbus/configuration_original.go | 2 ++ plugins/inputs/modbus/modbus.go | 2 ++ plugins/inputs/modbus/modbus_openbsd.go | 3 +++ plugins/inputs/modbus/modbus_test.go | 2 ++ plugins/inputs/modbus/request.go | 2 ++ plugins/inputs/modbus/type_conversions.go | 2 ++ plugins/inputs/modbus/type_conversions16.go | 2 ++ plugins/inputs/modbus/type_conversions32.go | 2 ++ plugins/inputs/modbus/type_conversions64.go | 2 ++ 10 files changed, 21 insertions(+) create mode 100644 plugins/inputs/modbus/modbus_openbsd.go diff --git a/plugins/inputs/modbus/configuration.go b/plugins/inputs/modbus/configuration.go index cbf36cab15524..143f12867dea6 100644 --- a/plugins/inputs/modbus/configuration.go +++ b/plugins/inputs/modbus/configuration.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import "fmt" diff --git a/plugins/inputs/modbus/configuration_original.go b/plugins/inputs/modbus/configuration_original.go index cf4b2e1241b8e..78861df74e0f7 100644 --- a/plugins/inputs/modbus/configuration_original.go +++ b/plugins/inputs/modbus/configuration_original.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import ( diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index 18a00e990dc66..4769d6bd0342b 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import ( diff --git a/plugins/inputs/modbus/modbus_openbsd.go b/plugins/inputs/modbus/modbus_openbsd.go new file mode 100644 index 0000000000000..6cc2bfeb3b8fd --- /dev/null +++ b/plugins/inputs/modbus/modbus_openbsd.go @@ -0,0 +1,3 @@ +//go:build openbsd + +package modbus diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index b0b49b5711075..4f9f4eca39434 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import ( diff --git a/plugins/inputs/modbus/request.go b/plugins/inputs/modbus/request.go index 125aebe2eb8c4..b2a31d9dcf4d3 100644 --- a/plugins/inputs/modbus/request.go +++ b/plugins/inputs/modbus/request.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import "sort" diff --git a/plugins/inputs/modbus/type_conversions.go b/plugins/inputs/modbus/type_conversions.go index 556f7b423c13d..88c4b7465a824 100644 --- a/plugins/inputs/modbus/type_conversions.go +++ b/plugins/inputs/modbus/type_conversions.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import "fmt" diff --git a/plugins/inputs/modbus/type_conversions16.go b/plugins/inputs/modbus/type_conversions16.go index 7766e1d0edafe..088a5d10c445a 100644 --- a/plugins/inputs/modbus/type_conversions16.go +++ b/plugins/inputs/modbus/type_conversions16.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import ( diff --git a/plugins/inputs/modbus/type_conversions32.go b/plugins/inputs/modbus/type_conversions32.go index 1a0255ef3e8e0..260a3dc065f70 100644 --- a/plugins/inputs/modbus/type_conversions32.go +++ b/plugins/inputs/modbus/type_conversions32.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import ( diff --git a/plugins/inputs/modbus/type_conversions64.go b/plugins/inputs/modbus/type_conversions64.go index f72dfdf3af66d..55b0a0775c701 100644 --- a/plugins/inputs/modbus/type_conversions64.go +++ b/plugins/inputs/modbus/type_conversions64.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import ( From 3c8b01894e1b0624aded833910d25be9de4232ae Mon Sep 17 00:00:00 2001 From: atetevoortwis Date: Tue, 9 Nov 2021 23:29:36 +0100 Subject: [PATCH 746/761] fix: Changed VM ID from string to int (#10068) (cherry picked from commit 8a3ba854199696bef7d087c73e9a9a51f9e2fcc1) --- plugins/inputs/proxmox/proxmox.go | 8 ++++---- plugins/inputs/proxmox/proxmox_test.go | 2 +- plugins/inputs/proxmox/structs.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index efd7fae7d5d5f..101b458630eeb 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -163,8 +163,8 @@ func gatherVMData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { } } -func getCurrentVMStatus(px *Proxmox, rt ResourceType, id string) (VMStat, error) { - apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + id + "/status/current" +func getCurrentVMStatus(px *Proxmox, rt ResourceType, id json.Number) (VMStat, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + string(id) + "/status/current" jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { @@ -196,8 +196,8 @@ func getVMStats(px *Proxmox, rt ResourceType) (VMStats, error) { return vmStats, nil } -func getVMConfig(px *Proxmox, vmID string, rt ResourceType) (VMConfig, error) { - apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + vmID + "/config" +func getVMConfig(px *Proxmox, vmID json.Number, rt ResourceType) (VMConfig, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + string(vmID) + "/config" jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { return VMConfig{}, err diff --git a/plugins/inputs/proxmox/proxmox_test.go b/plugins/inputs/proxmox/proxmox_test.go index f05b6450bd7be..741a272829474 100644 --- a/plugins/inputs/proxmox/proxmox_test.go +++ b/plugins/inputs/proxmox/proxmox_test.go @@ -13,7 +13,7 @@ import ( var nodeSearchDomainTestData = `{"data":{"search":"test.example.com","dns1":"1.0.0.1"}}` var qemuTestData = `{"data":[{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}]}` var qemuConfigTestData = `{"data":{"hostname":"qemu1","searchdomain":"test.example.com"}}` -var lxcTestData = `{"data":[{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}]}` +var lxcTestData = `{"data":[{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"},{"vmid":112,"type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container2"}]}` var lxcConfigTestData = `{"data":{"hostname":"container1","searchdomain":"test.example.com"}}` var lxcCurrentStatusTestData = `{"data":{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}}` var qemuCurrentStatusTestData = `{"data":{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}}` diff --git a/plugins/inputs/proxmox/structs.go b/plugins/inputs/proxmox/structs.go index c064150c061f6..2f16841b2ff8b 100644 --- a/plugins/inputs/proxmox/structs.go +++ b/plugins/inputs/proxmox/structs.go @@ -41,7 +41,7 @@ type VMCurrentStats struct { } type VMStat struct { - ID string `json:"vmid"` + ID json.Number `json:"vmid"` Name string `json:"name"` Status string `json:"status"` UsedMem json.Number `json:"mem"` From 629bdb7bf63c2d78cbcec79930de39ec0538dd0e Mon Sep 17 00:00:00 2001 From: Felix Edelmann Date: Tue, 9 Nov 2021 23:30:42 +0100 Subject: [PATCH 747/761] fix: mysql: type conversion follow-up (#9966) (cherry picked from commit f7827a0408075cd945a1eaf3b311882973016277) --- plugins/inputs/mysql/mysql.go | 103 ++++++++++++++---------- plugins/inputs/mysql/mysql_test.go | 27 +------ plugins/inputs/mysql/v1/mysql.go | 8 +- plugins/inputs/mysql/v2/convert.go | 12 ++- plugins/inputs/mysql/v2/convert_test.go | 41 ++++++++++ 5 files changed, 116 insertions(+), 75 deletions(-) diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 28313b25534aa..3fbd4654ef2b4 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -1,7 +1,6 @@ package mysql import ( - "bytes" "database/sql" "fmt" "strconv" @@ -638,7 +637,12 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu value, err := m.parseGlobalVariables(key, val) if err != nil { - m.Log.Debugf("Error parsing global variable %q: %v", key, err) + errString := fmt.Errorf("error parsing mysql global variable %q=%q: %v", key, string(val), err) + if m.MetricVersion < 2 { + m.Log.Debug(errString) + } else { + acc.AddError(errString) + } } else { fields[key] = value } @@ -658,11 +662,7 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu func (m *Mysql) parseGlobalVariables(key string, value sql.RawBytes) (interface{}, error) { if m.MetricVersion < 2 { - v, ok := v1.ParseValue(value) - if ok { - return v, nil - } - return v, fmt.Errorf("could not parse value: %q", string(value)) + return v1.ParseValue(value) } return v2.ConvertGlobalVariables(key, value) } @@ -693,35 +693,58 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu // scanning keys and values separately // get columns names, and create an array with its length - cols, err := rows.Columns() + cols, err := rows.ColumnTypes() if err != nil { return err } - vals := make([]interface{}, len(cols)) + vals := make([]sql.RawBytes, len(cols)) + valPtrs := make([]interface{}, len(cols)) // fill the array with sql.Rawbytes for i := range vals { - vals[i] = &sql.RawBytes{} + vals[i] = sql.RawBytes{} + valPtrs[i] = &vals[i] } - if err = rows.Scan(vals...); err != nil { + if err = rows.Scan(valPtrs...); err != nil { return err } + // range over columns, and try to parse values for i, col := range cols { + colName := col.Name() + if m.MetricVersion >= 2 { - col = strings.ToLower(col) + colName = strings.ToLower(colName) } + colValue := vals[i] + if m.GatherAllSlaveChannels && - (strings.ToLower(col) == "channel_name" || strings.ToLower(col) == "connection_name") { + (strings.ToLower(colName) == "channel_name" || strings.ToLower(colName) == "connection_name") { // Since the default channel name is empty, we need this block channelName := "default" - if len(*vals[i].(*sql.RawBytes)) > 0 { - channelName = string(*vals[i].(*sql.RawBytes)) + if len(colValue) > 0 { + channelName = string(colValue) } tags["channel"] = channelName - } else if value, ok := m.parseValue(*vals[i].(*sql.RawBytes)); ok { - fields["slave_"+col] = value + continue } + + if colValue == nil || len(colValue) == 0 { + continue + } + + value, err := m.parseValueByDatabaseTypeName(colValue, col.DatabaseTypeName()) + if err != nil { + errString := fmt.Errorf("error parsing mysql slave status %q=%q: %v", colName, string(colValue), err) + if m.MetricVersion < 2 { + m.Log.Debug(errString) + } else { + acc.AddError(errString) + } + continue + } + + fields["slave_"+colName] = value } acc.AddFields("mysql", fields, tags) @@ -877,7 +900,7 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum key = strings.ToLower(key) value, err := v2.ConvertGlobalStatus(key, val) if err != nil { - m.Log.Debugf("Error parsing global status: %v", err) + acc.AddError(fmt.Errorf("error parsing mysql global status %q=%q: %v", key, string(val), err)) } else { fields[key] = value } @@ -1346,10 +1369,16 @@ func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, serv string, acc telegraf.Accumu if err := rows.Scan(&key, &val); err != nil { return err } + key = strings.ToLower(key) - if value, ok := m.parseValue(val); ok { - fields[key] = value + value, err := m.parseValueByDatabaseTypeName(val, "BIGINT") + if err != nil { + acc.AddError(fmt.Errorf("error parsing mysql InnoDB metric %q=%q: %v", key, string(val), err)) + continue } + + fields[key] = value + // Send 20 fields at a time if len(fields) >= 20 { acc.AddFields("mysql_innodb", fields, tags) @@ -1914,34 +1943,22 @@ func (m *Mysql) gatherSchemaForDB(db *sql.DB, database string, servtag string, a return nil } -func (m *Mysql) parseValue(value sql.RawBytes) (interface{}, bool) { +func (m *Mysql) parseValueByDatabaseTypeName(value sql.RawBytes, databaseTypeName string) (interface{}, error) { if m.MetricVersion < 2 { return v1.ParseValue(value) } - return parseValue(value) -} - -// parseValue can be used to convert values such as "ON","OFF","Yes","No" to 0,1 -func parseValue(value sql.RawBytes) (interface{}, bool) { - if bytes.EqualFold(value, []byte("YES")) || bytes.Equal(value, []byte("ON")) { - return 1, true - } - - if bytes.EqualFold(value, []byte("NO")) || bytes.Equal(value, []byte("OFF")) { - return 0, true - } - - if val, err := strconv.ParseInt(string(value), 10, 64); err == nil { - return val, true - } - if val, err := strconv.ParseFloat(string(value), 64); err == nil { - return val, true - } - if len(string(value)) > 0 { - return string(value), true + switch databaseTypeName { + case "INT": + return v2.ParseInt(value) + case "BIGINT": + return v2.ParseUint(value) + case "VARCHAR": + return v2.ParseString(value) + default: + m.Log.Debugf("unknown database type name %q in parseValueByDatabaseTypeName", databaseTypeName) + return v2.ParseValue(value) } - return nil, false } // findThreadState can be used to find thread state by command and plain state diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index 410f80213252f..868c86f18b9cb 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -1,7 +1,6 @@ package mysql import ( - "database/sql" "fmt" "testing" @@ -178,31 +177,7 @@ func TestMysqlDNSAddTimeout(t *testing.T) { } } } -func TestParseValue(t *testing.T) { - testCases := []struct { - rawByte sql.RawBytes - output interface{} - boolValue bool - }{ - {sql.RawBytes("123"), int64(123), true}, - {sql.RawBytes("abc"), "abc", true}, - {sql.RawBytes("10.1"), 10.1, true}, - {sql.RawBytes("ON"), 1, true}, - {sql.RawBytes("OFF"), 0, true}, - {sql.RawBytes("NO"), 0, true}, - {sql.RawBytes("YES"), 1, true}, - {sql.RawBytes("No"), 0, true}, - {sql.RawBytes("Yes"), 1, true}, - {sql.RawBytes("-794"), int64(-794), true}, - {sql.RawBytes("18446744073709552333"), float64(18446744073709552000), true}, - {sql.RawBytes(""), nil, false}, - } - for _, cases := range testCases { - if got, ok := parseValue(cases.rawByte); got != cases.output && ok != cases.boolValue { - t.Errorf("for %s wanted %t, got %t", string(cases.rawByte), cases.output, got) - } - } -} + func TestNewNamespace(t *testing.T) { testCases := []struct { words []string diff --git a/plugins/inputs/mysql/v1/mysql.go b/plugins/inputs/mysql/v1/mysql.go index 374782f9cb29a..7f4e1a7dcacae 100644 --- a/plugins/inputs/mysql/v1/mysql.go +++ b/plugins/inputs/mysql/v1/mysql.go @@ -182,14 +182,14 @@ var Mappings = []*Mapping{ }, } -func ParseValue(value sql.RawBytes) (float64, bool) { +func ParseValue(value sql.RawBytes) (float64, error) { if bytes.Equal(value, []byte("Yes")) || bytes.Equal(value, []byte("ON")) { - return 1, true + return 1, nil } if bytes.Equal(value, []byte("No")) || bytes.Equal(value, []byte("OFF")) { - return 0, true + return 0, nil } n, err := strconv.ParseFloat(string(value), 64) - return n, err == nil + return n, err } diff --git a/plugins/inputs/mysql/v2/convert.go b/plugins/inputs/mysql/v2/convert.go index d5b73ec7f4c1e..b446890c9baec 100644 --- a/plugins/inputs/mysql/v2/convert.go +++ b/plugins/inputs/mysql/v2/convert.go @@ -25,6 +25,10 @@ func ParseUint(value sql.RawBytes) (interface{}, error) { return strconv.ParseUint(string(value), 10, 64) } +func ParseFloat(value sql.RawBytes) (interface{}, error) { + return strconv.ParseFloat(string(value), 64) +} + func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) { if bytes.EqualFold(value, []byte("YES")) || bytes.EqualFold(value, []byte("ON")) { return int64(1), nil @@ -86,11 +90,15 @@ var GlobalStatusConversions = map[string]ConversionFunc{ "innodb_data_pending_fsyncs": ParseUint, "ssl_ctx_verify_depth": ParseUint, "ssl_verify_depth": ParseUint, + + // see https://galeracluster.com/library/documentation/galera-status-variables.html + "wsrep_local_index": ParseUint, + "wsrep_local_send_queue_avg": ParseFloat, } -// see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html -// see https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html var GlobalVariableConversions = map[string]ConversionFunc{ + // see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html + // see https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html "delay_key_write": ParseString, // ON, OFF, ALL "enforce_gtid_consistency": ParseString, // ON, OFF, WARN "event_scheduler": ParseString, // YES, NO, DISABLED diff --git a/plugins/inputs/mysql/v2/convert_test.go b/plugins/inputs/mysql/v2/convert_test.go index 43133eeb39c1b..95083a1e5016f 100644 --- a/plugins/inputs/mysql/v2/convert_test.go +++ b/plugins/inputs/mysql/v2/convert_test.go @@ -2,6 +2,7 @@ package v2 import ( "database/sql" + "strings" "testing" "github.com/stretchr/testify/require" @@ -84,3 +85,43 @@ func TestCovertGlobalVariables(t *testing.T) { }) } } + +func TestParseValue(t *testing.T) { + testCases := []struct { + rawByte sql.RawBytes + output interface{} + err string + }{ + {sql.RawBytes("123"), int64(123), ""}, + {sql.RawBytes("abc"), "abc", ""}, + {sql.RawBytes("10.1"), 10.1, ""}, + {sql.RawBytes("ON"), 1, ""}, + {sql.RawBytes("OFF"), 0, ""}, + {sql.RawBytes("NO"), 0, ""}, + {sql.RawBytes("YES"), 1, ""}, + {sql.RawBytes("No"), 0, ""}, + {sql.RawBytes("Yes"), 1, ""}, + {sql.RawBytes("-794"), int64(-794), ""}, + {sql.RawBytes("2147483647"), int64(2147483647), ""}, // max int32 + {sql.RawBytes("2147483648"), int64(2147483648), ""}, // too big for int32 + {sql.RawBytes("9223372036854775807"), int64(9223372036854775807), ""}, // max int64 + {sql.RawBytes("9223372036854775808"), uint64(9223372036854775808), ""}, // too big for int64 + {sql.RawBytes("18446744073709551615"), uint64(18446744073709551615), ""}, // max uint64 + {sql.RawBytes("18446744073709551616"), float64(18446744073709552000), ""}, // too big for uint64 + {sql.RawBytes("18446744073709552333"), float64(18446744073709552000), ""}, // too big for uint64 + {sql.RawBytes(""), nil, "unconvertible value"}, + } + for _, cases := range testCases { + got, err := ParseValue(cases.rawByte) + + if err != nil && cases.err == "" { + t.Errorf("for %q got unexpected error: %q", string(cases.rawByte), err.Error()) + } else if err != nil && !strings.HasPrefix(err.Error(), cases.err) { + t.Errorf("for %q wanted error %q, got %q", string(cases.rawByte), cases.err, err.Error()) + } else if err == nil && cases.err != "" { + t.Errorf("for %q did not get expected error: %s", string(cases.rawByte), cases.err) + } else if got != cases.output { + t.Errorf("for %q wanted %#v (%T), got %#v (%T)", string(cases.rawByte), cases.output, cases.output, got, got) + } + } +} From 9b6c8c5e600cdb2bf996859f2a7ce454f1d2ca9e Mon Sep 17 00:00:00 2001 From: reimda Date: Wed, 10 Nov 2021 08:04:17 -0700 Subject: [PATCH 748/761] Chore: Update gosnmp module from 1.32 to 1.33 (#10076) (cherry picked from commit 19d67173bb9821b1805505b80026cbb1325cd6aa) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index dc7b6cb334c1f..bb49bd9e1f9f0 100644 --- a/go.mod +++ b/go.mod @@ -129,7 +129,7 @@ require ( github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.4.2 - github.com/gosnmp/gosnmp v1.32.0 + github.com/gosnmp/gosnmp v1.33.0 github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect diff --git a/go.sum b/go.sum index a6ab83e4a16e7..50d2d2280187a 100644 --- a/go.sum +++ b/go.sum @@ -1108,8 +1108,8 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gosnmp/gosnmp v1.32.0 h1:gctewmZx5qFI0oHMzRnjETqIZ093d9NgZy9TQr3V0iA= -github.com/gosnmp/gosnmp v1.32.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvNDMpgF0= +github.com/gosnmp/gosnmp v1.33.0 h1:WNwN5Rj/9Y70VplIKXuaUiYVxdcaXhfAuLElKx4lnpU= +github.com/gosnmp/gosnmp v1.33.0/go.mod h1:QWTRprXN9haHFof3P96XTDYc46boCGAh5IXp0DniEx4= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= From aa287dc20323b50cf814290716a3c61a5368cc83 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 10 Nov 2021 12:53:11 -0600 Subject: [PATCH 749/761] fix(inputs/mongodb): resolve all markdown linter issues in README.md (#10077) (cherry picked from commit 279fc8352c63ac355a3843643720b3b89bd97808) --- plugins/inputs/mongodb/README.md | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 15a474e6bb66a..3247e3c78afc3 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -2,7 +2,7 @@ All MongoDB server versions from 2.6 and higher are supported. -### Configuration: +## Configuration ```toml [[inputs.mongodb]] @@ -40,20 +40,22 @@ All MongoDB server versions from 2.6 and higher are supported. # insecure_skip_verify = false ``` -#### Permissions: +### Permissions If your MongoDB instance has access control enabled you will need to connect as a user with sufficient rights. With MongoDB 3.4 and higher, the `clusterMonitor` role can be used. In version 3.2 you may also need these additional permissions: -``` + +```shell > db.grantRolesToUser("user", [{role: "read", actions: "find", db: "local"}]) ``` If the user is missing required privileges you may see an error in the Telegraf logs similar to: -``` + +```shell Error in input [mongodb]: not authorized on admin to execute command { serverStatus: 1, recordStats: 0 } ``` @@ -61,7 +63,7 @@ Some permission related errors are logged at debug level, you can check these messages by setting `debug = true` in the agent section of the configuration or by running Telegraf with the `--debug` argument. -### Metrics: +### Metrics - mongodb - tags: @@ -231,7 +233,7 @@ by running Telegraf with the `--debug` argument. - ttl_passes_per_sec (integer, deprecated in 1.10; use `ttl_passes`)) - updates_per_sec (integer, deprecated in 1.10; use `updates`)) -+ mongodb_db_stats +- mongodb_db_stats - tags: - db_name - hostname @@ -293,8 +295,9 @@ by running Telegraf with the `--debug` argument. - commands_time (integer) - commands_count (integer) -### Example Output: -``` +### Example Output + +```shell mongodb,hostname=127.0.0.1:27017 active_reads=3i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=87210i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=0i,assert_warning=0i,available_reads=125i,available_writes=128i,commands=218126i,commands_per_sec=1876i,connections_available=838853i,connections_current=7i,connections_total_created=8i,count_command_failed=0i,count_command_total=7i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=87190i,document_deleted=0i,document_inserted=0i,document_returned=7i,document_updated=43595i,find_and_modify_command_failed=0i,find_and_modify_command_total=43595i,find_command_failed=0i,find_command_total=348819i,flushes=1i,flushes_per_sec=0i,flushes_total_time_ns=5000000i,get_more_command_failed=0i,get_more_command_total=0i,getmores=7i,getmores_per_sec=1i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=44179i,latency_commands_count=122i,latency_reads=36662189i,latency_reads_count=523229i,latency_writes=6768713i,latency_writes_count=87190i,net_in_bytes=837378i,net_in_bytes_count=97692502i,net_out_bytes=690836i,net_out_bytes_count=75377383i,open_connections=7i,operation_scan_and_order=87193i,operation_write_conflicts=7i,page_faults=0i,percent_cache_dirty=0.9,percent_cache_used=1,queries=348816i,queries_per_sec=2988i,queued_reads=0i,queued_writes=0i,resident_megabytes=77i,storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=280136i,tcmalloc_current_allocated_bytes=77677288i,tcmalloc_current_total_thread_cache_bytes=1222608i,tcmalloc_heap_size=142659584i,tcmalloc_max_total_thread_cache_bytes=260046848i,tcmalloc_pageheap_commit_count=1898i,tcmalloc_pageheap_committed_bytes=130084864i,tcmalloc_pageheap_decommit_count=889i,tcmalloc_pageheap_free_bytes=50610176i,tcmalloc_pageheap_reserve_count=50i,tcmalloc_pageheap_scavenge_count=884i,tcmalloc_pageheap_total_commit_bytes=13021937664i,tcmalloc_pageheap_total_decommit_bytes=12891852800i,tcmalloc_pageheap_total_reserve_bytes=142659584i,tcmalloc_pageheap_unmapped_bytes=12574720i,tcmalloc_spinlock_total_delay_ns=9767500i,tcmalloc_thread_cache_free_bytes=1222608i,tcmalloc_total_free_bytes=1797400i,tcmalloc_transfer_cache_free_bytes=294656i,total_available=0i,total_created=0i,total_docs_scanned=43595i,total_in_use=0i,total_keys_scanned=130805i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=0i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=43595i,updates=43595i,updates_per_sec=372i,uptime_ns=60023000000i,version="3.6.17",vsize_megabytes=1048i,wtcache_app_threads_page_read_count=108i,wtcache_app_threads_page_read_time=25995i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=2487250i,wtcache_bytes_written_from=74i,wtcache_current_bytes=5014530i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=505413632i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=139i,wtcache_pages_requested_from=699135i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=4797426i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1586379818000000000 mongodb,hostname=127.0.0.1:27017,node_type=SEC,rs_name=rs0 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=1i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=79i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=1121855i,commands_per_sec=10i,connections_available=51183i,connections_current=17i,connections_total_created=557i,count_command_failed=0i,count_command_total=46307i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=28i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=0i,document_returned=2248129i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=2i,find_command_total=8764i,flushes=7850i,flushes_per_sec=0i,flushes_total_time_ns=4535446000000i,get_more_command_failed=0i,get_more_command_total=1993i,getmores=2018i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=112011949i,latency_commands_count=1072472i,latency_reads=1877142443i,latency_reads_count=57086i,latency_writes=0i,latency_writes_count=0i,member_status="SEC",net_in_bytes=1212i,net_in_bytes_count=263928689i,net_out_bytes=41051i,net_out_bytes_count=2475389483i,open_connections=17i,operation_scan_and_order=34i,operation_write_conflicts=0i,page_faults=317i,percent_cache_dirty=1.6,percent_cache_used=73,queries=8764i,queries_per_sec=0i,queued_reads=0i,queued_writes=0i,repl_apply_batches_num=17839419i,repl_apply_batches_total_millis=399929i,repl_apply_ops=23355263i,repl_buffer_count=0i,repl_buffer_size_bytes=0i,repl_commands=11i,repl_commands_per_sec=0i,repl_deletes=440608i,repl_deletes_per_sec=0i,repl_executor_pool_in_progress_count=0i,repl_executor_queues_network_in_progress=0i,repl_executor_queues_sleepers=4i,repl_executor_unsignaled_events=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=1875729i,repl_inserts_per_sec=0i,repl_lag=0i,repl_network_bytes=39122199371i,repl_network_getmores_num=34908797i,repl_network_getmores_total_millis=434805356i,repl_network_ops=23199086i,repl_oplog_window_sec=619292i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=21034729i,repl_updates_per_sec=38i,repl_state=2,resident_megabytes=6721i,state="SECONDARY",storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=358512400i,tcmalloc_current_allocated_bytes=5427379424i,tcmalloc_current_total_thread_cache_bytes=70349552i,tcmalloc_heap_size=10199310336i,tcmalloc_max_total_thread_cache_bytes=1073741824i,tcmalloc_pageheap_commit_count=790819i,tcmalloc_pageheap_committed_bytes=7064821760i,tcmalloc_pageheap_decommit_count=533347i,tcmalloc_pageheap_free_bytes=1207816192i,tcmalloc_pageheap_reserve_count=7706i,tcmalloc_pageheap_scavenge_count=426235i,tcmalloc_pageheap_total_commit_bytes=116127649792i,tcmalloc_pageheap_total_decommit_bytes=109062828032i,tcmalloc_pageheap_total_reserve_bytes=10199310336i,tcmalloc_pageheap_unmapped_bytes=3134488576i,tcmalloc_spinlock_total_delay_ns=2518474348i,tcmalloc_thread_cache_free_bytes=70349552i,tcmalloc_total_free_bytes=429626144i,tcmalloc_transfer_cache_free_bytes=764192i,total_available=0i,total_created=0i,total_docs_scanned=735004782i,total_in_use=0i,total_keys_scanned=6188216i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=7892i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=473590288000000i,version="3.6.17",vsize_megabytes=11136i,wtcache_app_threads_page_read_count=11467625i,wtcache_app_threads_page_read_time=1700336840i,wtcache_app_threads_page_write_count=13268184i,wtcache_bytes_read_into=348022587843i,wtcache_bytes_written_from=322571702254i,wtcache_current_bytes=5509459274i,wtcache_internal_pages_evicted=109108i,wtcache_max_bytes_configured=7547650048i,wtcache_modified_pages_evicted=911196i,wtcache_pages_evicted_by_app_thread=17366i,wtcache_pages_queued_for_eviction=16572754i,wtcache_pages_read_into=11689764i,wtcache_pages_requested_from=499825861i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=117487510i,wtcache_unmodified_pages_evicted=11058458i,wtcache_worker_thread_evictingpages=11907226i 1586379707000000000 mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000 From c3794f39623291423a9fae098a862431da7afdbc Mon Sep 17 00:00:00 2001 From: anti32 Date: Wed, 10 Nov 2021 21:49:40 +0200 Subject: [PATCH 750/761] fix (inputs/mongodb) readme: correct connection URI (#10075) (cherry picked from commit 62a05b23728ac4bb9e8222a28644f24c501e44bb) --- plugins/inputs/mongodb/README.md | 2 +- plugins/inputs/mongodb/mongodb.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 3247e3c78afc3..678d80c73184d 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -11,7 +11,7 @@ All MongoDB server versions from 2.6 and higher are supported. ## For example: ## mongodb://user:auth_key@10.10.3.30:27017, ## mongodb://10.10.3.33:18832, - servers = ["mongodb://127.0.0.1:27017"] + servers = ["mongodb://127.0.0.1:27017?connect=direct"] ## When true, collect cluster status. ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 0366636200064..3417252ddeb59 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -44,7 +44,7 @@ var sampleConfig = ` ## For example: ## mongodb://user:auth_key@10.10.3.30:27017, ## mongodb://10.10.3.33:18832, - servers = ["mongodb://127.0.0.1:27017"] + servers = ["mongodb://127.0.0.1:27017?connect=direct"] ## When true, collect cluster status ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which From 92d1340d74115c633f19fb953785e2a92bdf4f42 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 10 Nov 2021 17:45:50 -0600 Subject: [PATCH 751/761] fix(parser/csv): resolve linter issues (#10093) (cherry picked from commit 8f309dc34d96901cca44c3870b8a0370b6cf7202) --- plugins/parsers/csv/README.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index 220ac60686636..192c9216b3a82 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -3,7 +3,7 @@ The `csv` parser creates metrics from a document containing comma separated values. -### Configuration +## Configuration ```toml [[inputs.file]] @@ -78,7 +78,8 @@ values. ## The field will be skipped entirely where it matches any values inserted here. csv_skip_values = [] ``` -#### csv_timestamp_column, csv_timestamp_format + +### csv_timestamp_column, csv_timestamp_format By default the current time will be used for all created metrics, to set the time using the JSON document you can use the `csv_timestamp_column` and @@ -104,6 +105,7 @@ columns and rows. ### Examples Config: + ```toml [[inputs.file]] files = ["example"] @@ -114,13 +116,15 @@ Config: ``` Input: -``` + +```shell measurement,cpu,time_user,time_system,time_idle,time cpu,cpu0,42,42,42,2018-09-13T13:03:28Z ``` Output: -``` + +```shell cpu cpu=cpu0,time_user=42,time_system=42,time_idle=42 1536869008000000000 ``` From 15fd948f069dea4335adddb1642436fc999e1eb4 Mon Sep 17 00:00:00 2001 From: David B <36965011+DavidBuettner@users.noreply.github.com> Date: Fri, 12 Nov 2021 00:09:51 +0100 Subject: [PATCH 752/761] fix: update BurntSushi/toml for hex config support (#10089) (cherry picked from commit b9c444bae8617cc0aa2c2dda81abf435420b6272) --- go.mod | 2 +- go.sum | 3 ++- plugins/common/shim/config_test.go | 2 ++ plugins/common/shim/testdata/special.conf | 3 ++- 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index bb49bd9e1f9f0..7022a814b825f 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/BurntSushi/toml v0.3.1 + github.com/BurntSushi/toml v0.4.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/hcsshim v0.8.21 // indirect diff --git a/go.sum b/go.sum index 50d2d2280187a..7a28abe427ce4 100644 --- a/go.sum +++ b/go.sum @@ -142,8 +142,9 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= diff --git a/plugins/common/shim/config_test.go b/plugins/common/shim/config_test.go index 75ad18239fbb0..762ca5dd283b2 100644 --- a/plugins/common/shim/config_test.go +++ b/plugins/common/shim/config_test.go @@ -54,6 +54,7 @@ func TestLoadingSpecialTypes(t *testing.T) { require.EqualValues(t, 3*time.Second, inp.Duration) require.EqualValues(t, 3*1000*1000, inp.Size) + require.EqualValues(t, 52, inp.Hex) } func TestLoadingProcessorWithConfig(t *testing.T) { @@ -72,6 +73,7 @@ func TestLoadingProcessorWithConfig(t *testing.T) { type testDurationInput struct { Duration tgConfig.Duration `toml:"duration"` Size tgConfig.Size `toml:"size"` + Hex int64 `toml:"hex"` } func (i *testDurationInput) SampleConfig() string { diff --git a/plugins/common/shim/testdata/special.conf b/plugins/common/shim/testdata/special.conf index c324b638497c5..53af78620701d 100644 --- a/plugins/common/shim/testdata/special.conf +++ b/plugins/common/shim/testdata/special.conf @@ -1,4 +1,5 @@ # testing custom field types [[inputs.test]] duration = "3s" - size = "3MB" \ No newline at end of file + size = "3MB" + hex = 0x34 \ No newline at end of file From 6a0f82b53ff84bdc2781ce9aefcfdd1194f27a4d Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 15 Nov 2021 09:09:39 -0600 Subject: [PATCH 753/761] fix: super-linter use v4.8.1, issue with latest (#10108) (cherry picked from commit c1263fb03bc83dded7442dc08dfed3e16e91bc60) --- .github/workflows/linter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 8ba9ae2944823..104d71db2230a 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -48,7 +48,7 @@ jobs: # Run Linter against code base # ################################ - name: Lint Code Base - uses: github/super-linter@v4 + uses: github/super-linter@v4.8.1 env: VALIDATE_ALL_CODEBASE: false DEFAULT_BRANCH: master From 5004b49f3540ebb220b0916eb52e3d7a10d9d7de Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 16 Nov 2021 14:11:24 -0800 Subject: [PATCH 754/761] docs: update deprecated plugin readmes (#10100) (cherry picked from commit f71695bc98c2cabb1272b79e71a82ec69423dfc4) --- plugins/inputs/httpjson/README.md | 4 ++-- plugins/inputs/jolokia/README.md | 2 +- plugins/inputs/kafka_consumer_legacy/README.md | 2 ++ plugins/inputs/logparser/README.md | 4 ++-- plugins/inputs/snmp_legacy/README.md | 4 ++++ 5 files changed, 11 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/httpjson/README.md b/plugins/inputs/httpjson/README.md index 19fe014457734..3f7efb10a4098 100644 --- a/plugins/inputs/httpjson/README.md +++ b/plugins/inputs/httpjson/README.md @@ -1,8 +1,8 @@ # HTTP JSON Input Plugin -The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats. +### DEPRECATED in Telegraf v1.6: Use [HTTP input plugin](../http) as replacement. -Deprecated (1.6): use the [http](../http) input. +The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats. ### Configuration: diff --git a/plugins/inputs/jolokia/README.md b/plugins/inputs/jolokia/README.md index 96ee48701b464..9f2a658f16247 100644 --- a/plugins/inputs/jolokia/README.md +++ b/plugins/inputs/jolokia/README.md @@ -1,6 +1,6 @@ # Jolokia Input Plugin -**Deprecated in version 1.5:** Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin. +### Deprecated in version 1.5: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin. #### Configuration diff --git a/plugins/inputs/kafka_consumer_legacy/README.md b/plugins/inputs/kafka_consumer_legacy/README.md index 2f0c219ea8647..86ccaa4c1dc09 100644 --- a/plugins/inputs/kafka_consumer_legacy/README.md +++ b/plugins/inputs/kafka_consumer_legacy/README.md @@ -1,5 +1,7 @@ # Kafka Consumer Legacy Input Plugin +### Deprecated in version 1.4. Please use [Kafka Consumer input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer). + The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka topic and adds messages to InfluxDB. The plugin assumes messages follow the line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup) diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 0abdba2c972df..8cc513e98cb70 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -1,11 +1,11 @@ # Logparser Input Plugin +### Deprecated in Telegraf 1.15: Please use the [tail][] plugin along with the [`grok` data format][grok parser]. + The `logparser` plugin streams and parses the given logfiles. Currently it has the capability of parsing "grok" patterns from logfiles, which also supports regex patterns. -**Deprecated in Telegraf 1.15**: Please use the [tail][] plugin along with the [`grok` data format][grok parser]. - The `tail` plugin now provides all the functionality of the `logparser` plugin. Most options can be translated directly to the `tail` plugin: - For options in the `[inputs.logparser.grok]` section, the equivalent option diff --git a/plugins/inputs/snmp_legacy/README.md b/plugins/inputs/snmp_legacy/README.md index 06bebbcad6176..8e639900ffe0f 100644 --- a/plugins/inputs/snmp_legacy/README.md +++ b/plugins/inputs/snmp_legacy/README.md @@ -1,5 +1,7 @@ # SNMP Legacy Input Plugin +### Deprecated in version 1.0. Use [SNMP input plugin][]. + The SNMP input plugin gathers metrics from SNMP agents ### Configuration: @@ -547,3 +549,5 @@ ifHCOutOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCOutOctets=10565628i ifInDiscards,host=127.0.0.1,instance=enp5s0 ifInDiscards=0i 1456878706044510264 ifHCInOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCInOctets=76351777i 1456878706044531312 ``` + +[SNMP input plugin]: /plugins/inputs/snmp From 03bf658201c2ec548db7650cb347e3a999fda1fd Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 17 Nov 2021 08:24:31 -0600 Subject: [PATCH 755/761] fix(inputs/zfs): resolve README.md linter issues (#10109) (cherry picked from commit 5549bf0f59713c8e34e6bd62afdb9c47be3bb09e) --- plugins/inputs/zfs/README.md | 72 +++++++++++++++++++----------------- 1 file changed, 38 insertions(+), 34 deletions(-) diff --git a/plugins/inputs/zfs/README.md b/plugins/inputs/zfs/README.md index 1f3f125d391ec..77b101915bbe6 100644 --- a/plugins/inputs/zfs/README.md +++ b/plugins/inputs/zfs/README.md @@ -4,7 +4,7 @@ This ZFS plugin provides metrics from your ZFS filesystems. It supports ZFS on Linux and FreeBSD. It gets ZFS stat from `/proc/spl/kstat/zfs` on Linux and from `sysctl`, 'zfs' and `zpool` on FreeBSD. -### Configuration: +## Configuration ```toml [[inputs.zfs]] @@ -27,7 +27,7 @@ from `sysctl`, 'zfs' and `zpool` on FreeBSD. # datasetMetrics = false ``` -### Measurements & Fields: +### Measurements & Fields By default this plugin collects metrics about ZFS internals pool and dataset. These metrics are either counters or measure sizes @@ -189,53 +189,53 @@ each dataset. On Linux (reference: kstat accumulated time and queue length statistics): - zfs_pool - - nread (integer, bytes) - - nwritten (integer, bytes) - - reads (integer, count) - - writes (integer, count) - - wtime (integer, nanoseconds) - - wlentime (integer, queuelength * nanoseconds) - - wupdate (integer, timestamp) - - rtime (integer, nanoseconds) - - rlentime (integer, queuelength * nanoseconds) - - rupdate (integer, timestamp) - - wcnt (integer, count) - - rcnt (integer, count) + - nread (integer, bytes) + - nwritten (integer, bytes) + - reads (integer, count) + - writes (integer, count) + - wtime (integer, nanoseconds) + - wlentime (integer, queuelength * nanoseconds) + - wupdate (integer, timestamp) + - rtime (integer, nanoseconds) + - rlentime (integer, queuelength * nanoseconds) + - rupdate (integer, timestamp) + - wcnt (integer, count) + - rcnt (integer, count) On FreeBSD: - zfs_pool - - allocated (integer, bytes) - - capacity (integer, bytes) - - dedupratio (float, ratio) - - free (integer, bytes) - - size (integer, bytes) - - fragmentation (integer, percent) + - allocated (integer, bytes) + - capacity (integer, bytes) + - dedupratio (float, ratio) + - free (integer, bytes) + - size (integer, bytes) + - fragmentation (integer, percent) #### Dataset Metrics (optional, only on FreeBSD) - zfs_dataset - - avail (integer, bytes) - - used (integer, bytes) - - usedsnap (integer, bytes - - usedds (integer, bytes) + - avail (integer, bytes) + - used (integer, bytes) + - usedsnap (integer, bytes + - usedds (integer, bytes) -### Tags: +### Tags - ZFS stats (`zfs`) will have the following tag: - - pools - A `::` concatenated list of all ZFS pools on the machine. - - datasets - A `::` concatenated list of all ZFS datasets on the machine. + - pools - A `::` concatenated list of all ZFS pools on the machine. + - datasets - A `::` concatenated list of all ZFS datasets on the machine. - Pool metrics (`zfs_pool`) will have the following tag: - - pool - with the name of the pool which the metrics are for. - - health - the health status of the pool. (FreeBSD only) + - pool - with the name of the pool which the metrics are for. + - health - the health status of the pool. (FreeBSD only) - Dataset metrics (`zfs_dataset`) will have the following tag: - - dataset - with the name of the dataset which the metrics are for. + - dataset - with the name of the dataset which the metrics are for. -### Example Output: +### Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter zfs --test * Plugin: zfs, Collection 1 > zfs_pool,health=ONLINE,pool=zroot allocated=1578590208i,capacity=2i,dedupratio=1,fragmentation=1i,free=64456531968i,size=66035122176i 1464473103625653908 @@ -287,8 +287,9 @@ A short description for some of the metrics. `arcstats_evict_l2_ineligible` We evicted something which cannot be stored in the l2. Reasons could be: - - We have multiple pools, we evicted something from a pool without an l2 device. - - The zfs property secondary cache. + +- We have multiple pools, we evicted something from a pool without an l2 device. +- The zfs property secondary cache. `arcstats_c` Arc target size, this is the size the system thinks the arc should have. @@ -313,6 +314,7 @@ A short description for some of the metrics. `zfetchstats_stride_hits` Counts the number of cache hits, to items which are in the cache because of the prefetcher (prefetched stride reads) #### Vdev Cache Stats (FreeBSD only) + note: the vdev cache is deprecated in some ZFS implementations `vdev_cache_stats_hits` Hits to the vdev (device level) cache. @@ -320,6 +322,7 @@ note: the vdev cache is deprecated in some ZFS implementations `vdev_cache_stats_misses` Misses to the vdev (device level) cache. #### ABD Stats (Linux Only) + ABD is a linear/scatter dual typed buffer for ARC `abdstats_linear_cnt` number of linear ABDs which are currently allocated @@ -343,6 +346,7 @@ ABD is a linear/scatter dual typed buffer for ARC `fm_erpt-dropped` counts when an error report cannot be created (eg available memory is too low) #### ZIL (Linux Only) + note: ZIL measurements are system-wide, neither per-pool nor per-dataset `zil_commit_count` counts when ZFS transactions are committed to a ZIL From dbb322067a7a529be54b5c3857b2f9154f6ff487 Mon Sep 17 00:00:00 2001 From: Josh Powers Date: Wed, 17 Nov 2021 12:31:39 -0700 Subject: [PATCH 756/761] update build version --- build_version.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_version.txt b/build_version.txt index f5b00dc262bed..0bd54efd31633 100644 --- a/build_version.txt +++ b/build_version.txt @@ -1 +1 @@ -1.20.3 +1.20.4 From 2fd588f09e2b4ee6da31158b16deb2cf09ad70ef Mon Sep 17 00:00:00 2001 From: Josh Powers Date: Wed, 17 Nov 2021 12:57:57 -0700 Subject: [PATCH 757/761] Update changelog --- CHANGELOG.md | 35 +++++++++++++++++++++++++++++++++++ etc/telegraf.conf | 8 +++++--- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d03253afbcff7..ed54e1ff44f17 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,38 @@ +## v1.20.4 [2021-11-17] + +#### Release Notes + + - [#10073](https://github.com/influxdata/telegraf/pull/10073) Update go version from 1.17.2 to 1.17.3 + - [#10100](https://github.com/influxdata/telegraf/pull/10100) Update deprecated plugin READMEs to better indicate deprecation + +Thank you to @zak-pawel for lots of linter fixes! + + - [#9986](https://github.com/influxdata/telegraf/pull/9986) Linter fixes for plugins/inputs/[h-j]* + - [#9999](https://github.com/influxdata/telegraf/pull/9999) Linter fixes for plugins/inputs/[k-l]* + - [#10006](https://github.com/influxdata/telegraf/pull/10006) Linter fixes for plugins/inputs/m* + - [#10011](https://github.com/influxdata/telegraf/pull/10011) Linter fixes for plugins/inputs/[n-o]* + +#### Bugfixes + + - [#10089](https://github.com/influxdata/telegraf/pull/10089) Update BurntSushi/toml from 0.3.1 to 0.4.1 + - [#10075](https://github.com/influxdata/telegraf/pull/10075) `inputs.mongodb` Update readme with correct connection URI + - [#10076](https://github.com/influxdata/telegraf/pull/10076) Update gosnmp module from 1.32 to 1.33 + - [#9966](https://github.com/influxdata/telegraf/pull/9966) `inputs.mysql` Fix type conversion follow-up + - [#10068](https://github.com/influxdata/telegraf/pull/10068) `inputs.proxmox` Changed VM ID from string to int + - [#10047](https://github.com/influxdata/telegraf/pull/10047) `inputs.modbus` Do not build modbus on openbsd + - [#10019](https://github.com/influxdata/telegraf/pull/10019) `inputs.cisco_telemetry_mdt` Move to new protobuf library + - [#10001](https://github.com/influxdata/telegraf/pull/10001) `outputs.loki` Add metric name with label "__name" + - [#9980](https://github.com/influxdata/telegraf/pull/9980) `inputs.nvidia_smi` Set the default path correctly + - [#10010](https://github.com/influxdata/telegraf/pull/10010) Update go.opentelemetry.io/otel from v0.23.0 to v0.24.0 + - [#10044](https://github.com/influxdata/telegraf/pull/10044) `inputs.sqlserver` Add elastic pool in supported versions in sqlserver + - [#10029](https://github.com/influxdata/telegraf/pull/10029) `inputs.influxdb` Update influxdb input schema docs + - [#10026](https://github.com/influxdata/telegraf/pull/10026) `inputs.intel_rdt` Correct timezone handling + +#### Features +#### New Input Plugins +#### New Output Plugins +#### New External Plugins + ## v1.20.3 [2021-10-27] #### Release Notes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index f6ea72b183638..c6e35887ec907 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -5016,7 +5016,7 @@ # ## For example: # ## mongodb://user:auth_key@10.10.3.30:27017, # ## mongodb://10.10.3.33:18832, -# servers = ["mongodb://127.0.0.1:27017"] +# servers = ["mongodb://127.0.0.1:27017?connect=direct"] # # ## When true, collect cluster status # ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which @@ -5455,7 +5455,9 @@ # # Pulls statistics from nvidia GPUs attached to the host # [[inputs.nvidia_smi]] -# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" +# ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), +# ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # # bin_path = "/usr/bin/nvidia-smi" # # ## Optional: timeout for GPU polling @@ -7834,7 +7836,7 @@ # # Read metrics from MQTT topic(s) # [[inputs.mqtt_consumer]] # ## Broker URLs for the MQTT server or cluster. To connect to multiple -# ## clusters or standalone servers, use a seperate plugin instance. +# ## clusters or standalone servers, use a separate plugin instance. # ## example: servers = ["tcp://localhost:1883"] # ## servers = ["ssl://localhost:1883"] # ## servers = ["ws://localhost:1883"] From 34ad5aa137f0d845d79826839379cb543c3879c5 Mon Sep 17 00:00:00 2001 From: Josh Powers Date: Wed, 17 Nov 2021 12:58:37 -0700 Subject: [PATCH 758/761] Telegraf v1.20.4 From b530a81eac96d3ed3c05620643ee8a0674f68a65 Mon Sep 17 00:00:00 2001 From: Sam Oen Date: Tue, 14 Dec 2021 18:49:45 +0800 Subject: [PATCH 759/761] Elasticsearch headers, exclude plugins --- .github/workflows/test.yml | 25 ++ plugins/inputs/all/all.go | 382 +++++++++--------- plugins/inputs/elasticsearch/README.md | 16 +- plugins/inputs/elasticsearch/elasticsearch.go | 38 +- plugins/outputs/all/all.go | 88 ++-- 5 files changed, 295 insertions(+), 254 deletions(-) create mode 100644 .github/workflows/test.yml diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000000000..8d7b97187bd37 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,25 @@ +name: test + +on: [pull_request] + +jobs: + test: + runs-on: ubuntu-20.04 + steps: + - name: checkout source + uses: actions/checkout@v2 + + - name: build binary + run: | + sudo apt-get update + sudo apt-get install -y wget build-essential git + sudo apt-get upgrade -y + sudo wget https://golang.org/dl/go1.17.5.linux-amd64.tar.gz + sudo tar -C /usr/local -xzf go1.17.5.linux-amd64.tar.gz + export PATH="/usr/local/go/bin:$PATH" + go mod tidy + LDFLAGS='-s -w' make telegraf + if [ ! -f telegraf ]; then + echo "Failed to build binary" + exit 1 + fi diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 60a52903ef079..940dfac4b258e 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -2,207 +2,207 @@ package all import ( //Blank imports for plugins to register themselves - _ "github.com/influxdata/telegraf/plugins/inputs/activemq" - _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" - _ "github.com/influxdata/telegraf/plugins/inputs/aliyuncms" - _ "github.com/influxdata/telegraf/plugins/inputs/amd_rocm_smi" - _ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/apache" - _ "github.com/influxdata/telegraf/plugins/inputs/apcupsd" - _ "github.com/influxdata/telegraf/plugins/inputs/aurora" - _ "github.com/influxdata/telegraf/plugins/inputs/azure_storage_queue" - _ "github.com/influxdata/telegraf/plugins/inputs/bcache" - _ "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" - _ "github.com/influxdata/telegraf/plugins/inputs/beat" - _ "github.com/influxdata/telegraf/plugins/inputs/bind" - _ "github.com/influxdata/telegraf/plugins/inputs/bond" - _ "github.com/influxdata/telegraf/plugins/inputs/burrow" - _ "github.com/influxdata/telegraf/plugins/inputs/cassandra" - _ "github.com/influxdata/telegraf/plugins/inputs/ceph" - _ "github.com/influxdata/telegraf/plugins/inputs/cgroup" - _ "github.com/influxdata/telegraf/plugins/inputs/chrony" - _ "github.com/influxdata/telegraf/plugins/inputs/cisco_telemetry_mdt" - _ "github.com/influxdata/telegraf/plugins/inputs/clickhouse" - _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub" - _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub_push" - _ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch" - _ "github.com/influxdata/telegraf/plugins/inputs/conntrack" - _ "github.com/influxdata/telegraf/plugins/inputs/consul" - _ "github.com/influxdata/telegraf/plugins/inputs/couchbase" - _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" + //_ "github.com/influxdata/telegraf/plugins/inputs/activemq" + //_ "github.com/influxdata/telegraf/plugins/inputs/aerospike" + //_ "github.com/influxdata/telegraf/plugins/inputs/aliyuncms" + //_ "github.com/influxdata/telegraf/plugins/inputs/amd_rocm_smi" + //_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/apache" + //_ "github.com/influxdata/telegraf/plugins/inputs/apcupsd" + //_ "github.com/influxdata/telegraf/plugins/inputs/aurora" + //_ "github.com/influxdata/telegraf/plugins/inputs/azure_storage_queue" + //_ "github.com/influxdata/telegraf/plugins/inputs/bcache" + //_ "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" + //_ "github.com/influxdata/telegraf/plugins/inputs/beat" + //_ "github.com/influxdata/telegraf/plugins/inputs/bind" + //_ "github.com/influxdata/telegraf/plugins/inputs/bond" + //_ "github.com/influxdata/telegraf/plugins/inputs/burrow" + //_ "github.com/influxdata/telegraf/plugins/inputs/cassandra" + //_ "github.com/influxdata/telegraf/plugins/inputs/ceph" + //_ "github.com/influxdata/telegraf/plugins/inputs/cgroup" + //_ "github.com/influxdata/telegraf/plugins/inputs/chrony" + //_ "github.com/influxdata/telegraf/plugins/inputs/cisco_telemetry_mdt" + //_ "github.com/influxdata/telegraf/plugins/inputs/clickhouse" + //_ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub" + //_ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub_push" + //_ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch" + //_ "github.com/influxdata/telegraf/plugins/inputs/conntrack" + //_ "github.com/influxdata/telegraf/plugins/inputs/consul" + //_ "github.com/influxdata/telegraf/plugins/inputs/couchbase" + //_ "github.com/influxdata/telegraf/plugins/inputs/couchdb" _ "github.com/influxdata/telegraf/plugins/inputs/cpu" - _ "github.com/influxdata/telegraf/plugins/inputs/csgo" - _ "github.com/influxdata/telegraf/plugins/inputs/dcos" - _ "github.com/influxdata/telegraf/plugins/inputs/directory_monitor" + //_ "github.com/influxdata/telegraf/plugins/inputs/csgo" + //_ "github.com/influxdata/telegraf/plugins/inputs/dcos" + //_ "github.com/influxdata/telegraf/plugins/inputs/directory_monitor" _ "github.com/influxdata/telegraf/plugins/inputs/disk" _ "github.com/influxdata/telegraf/plugins/inputs/diskio" - _ "github.com/influxdata/telegraf/plugins/inputs/disque" - _ "github.com/influxdata/telegraf/plugins/inputs/dmcache" - _ "github.com/influxdata/telegraf/plugins/inputs/dns_query" - _ "github.com/influxdata/telegraf/plugins/inputs/docker" - _ "github.com/influxdata/telegraf/plugins/inputs/docker_log" - _ "github.com/influxdata/telegraf/plugins/inputs/dovecot" - _ "github.com/influxdata/telegraf/plugins/inputs/dpdk" - _ "github.com/influxdata/telegraf/plugins/inputs/ecs" + //_ "github.com/influxdata/telegraf/plugins/inputs/disque" + //_ "github.com/influxdata/telegraf/plugins/inputs/dmcache" + //_ "github.com/influxdata/telegraf/plugins/inputs/dns_query" + //_ "github.com/influxdata/telegraf/plugins/inputs/docker" + //_ "github.com/influxdata/telegraf/plugins/inputs/docker_log" + //_ "github.com/influxdata/telegraf/plugins/inputs/dovecot" + //_ "github.com/influxdata/telegraf/plugins/inputs/dpdk" + //_ "github.com/influxdata/telegraf/plugins/inputs/ecs" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" - _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch_query" - _ "github.com/influxdata/telegraf/plugins/inputs/ethtool" - _ "github.com/influxdata/telegraf/plugins/inputs/eventhub_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/exec" - _ "github.com/influxdata/telegraf/plugins/inputs/execd" - _ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" - _ "github.com/influxdata/telegraf/plugins/inputs/fibaro" - _ "github.com/influxdata/telegraf/plugins/inputs/file" - _ "github.com/influxdata/telegraf/plugins/inputs/filecount" - _ "github.com/influxdata/telegraf/plugins/inputs/filestat" - _ "github.com/influxdata/telegraf/plugins/inputs/fireboard" - _ "github.com/influxdata/telegraf/plugins/inputs/fluentd" - _ "github.com/influxdata/telegraf/plugins/inputs/github" - _ "github.com/influxdata/telegraf/plugins/inputs/gnmi" - _ "github.com/influxdata/telegraf/plugins/inputs/graylog" + //_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch_query" + //_ "github.com/influxdata/telegraf/plugins/inputs/ethtool" + //_ "github.com/influxdata/telegraf/plugins/inputs/eventhub_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/exec" + //_ "github.com/influxdata/telegraf/plugins/inputs/execd" + //_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" + //_ "github.com/influxdata/telegraf/plugins/inputs/fibaro" + //_ "github.com/influxdata/telegraf/plugins/inputs/file" + //_ "github.com/influxdata/telegraf/plugins/inputs/filecount" + //_ "github.com/influxdata/telegraf/plugins/inputs/filestat" + //_ "github.com/influxdata/telegraf/plugins/inputs/fireboard" + //_ "github.com/influxdata/telegraf/plugins/inputs/fluentd" + //_ "github.com/influxdata/telegraf/plugins/inputs/github" + //_ "github.com/influxdata/telegraf/plugins/inputs/gnmi" + //_ "github.com/influxdata/telegraf/plugins/inputs/graylog" _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" - _ "github.com/influxdata/telegraf/plugins/inputs/hddtemp" - _ "github.com/influxdata/telegraf/plugins/inputs/http" - _ "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" - _ "github.com/influxdata/telegraf/plugins/inputs/http_response" - _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" - _ "github.com/influxdata/telegraf/plugins/inputs/icinga2" - _ "github.com/influxdata/telegraf/plugins/inputs/infiniband" - _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" - _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener" - _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_v2_listener" - _ "github.com/influxdata/telegraf/plugins/inputs/intel_powerstat" - _ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt" - _ "github.com/influxdata/telegraf/plugins/inputs/internal" - _ "github.com/influxdata/telegraf/plugins/inputs/internet_speed" - _ "github.com/influxdata/telegraf/plugins/inputs/interrupts" - _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" - _ "github.com/influxdata/telegraf/plugins/inputs/ipset" - _ "github.com/influxdata/telegraf/plugins/inputs/iptables" - _ "github.com/influxdata/telegraf/plugins/inputs/ipvs" - _ "github.com/influxdata/telegraf/plugins/inputs/jenkins" - _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" - _ "github.com/influxdata/telegraf/plugins/inputs/jolokia2" - _ "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry" - _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy" - _ "github.com/influxdata/telegraf/plugins/inputs/kapacitor" + //_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp" + //_ "github.com/influxdata/telegraf/plugins/inputs/http" + //_ "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" + //_ "github.com/influxdata/telegraf/plugins/inputs/http_response" + //_ "github.com/influxdata/telegraf/plugins/inputs/httpjson" + //_ "github.com/influxdata/telegraf/plugins/inputs/icinga2" + //_ "github.com/influxdata/telegraf/plugins/inputs/infiniband" + //_ "github.com/influxdata/telegraf/plugins/inputs/influxdb" + //_ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener" + //_ "github.com/influxdata/telegraf/plugins/inputs/influxdb_v2_listener" + //_ "github.com/influxdata/telegraf/plugins/inputs/intel_powerstat" + //_ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt" + //_ "github.com/influxdata/telegraf/plugins/inputs/internal" + //_ "github.com/influxdata/telegraf/plugins/inputs/internet_speed" + //_ "github.com/influxdata/telegraf/plugins/inputs/interrupts" + //_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" + //_ "github.com/influxdata/telegraf/plugins/inputs/ipset" + //_ "github.com/influxdata/telegraf/plugins/inputs/iptables" + //_ "github.com/influxdata/telegraf/plugins/inputs/ipvs" + //_ "github.com/influxdata/telegraf/plugins/inputs/jenkins" + //_ "github.com/influxdata/telegraf/plugins/inputs/jolokia" + //_ "github.com/influxdata/telegraf/plugins/inputs/jolokia2" + //_ "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry" + //_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy" + //_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor" _ "github.com/influxdata/telegraf/plugins/inputs/kernel" - _ "github.com/influxdata/telegraf/plugins/inputs/kernel_vmstat" - _ "github.com/influxdata/telegraf/plugins/inputs/kibana" - _ "github.com/influxdata/telegraf/plugins/inputs/kinesis_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/knx_listener" - _ "github.com/influxdata/telegraf/plugins/inputs/kube_inventory" - _ "github.com/influxdata/telegraf/plugins/inputs/kubernetes" - _ "github.com/influxdata/telegraf/plugins/inputs/lanz" - _ "github.com/influxdata/telegraf/plugins/inputs/leofs" - _ "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" - _ "github.com/influxdata/telegraf/plugins/inputs/logparser" - _ "github.com/influxdata/telegraf/plugins/inputs/logstash" - _ "github.com/influxdata/telegraf/plugins/inputs/lustre2" - _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" - _ "github.com/influxdata/telegraf/plugins/inputs/marklogic" - _ "github.com/influxdata/telegraf/plugins/inputs/mcrouter" - _ "github.com/influxdata/telegraf/plugins/inputs/mdstat" + //_ "github.com/influxdata/telegraf/plugins/inputs/kernel_vmstat" + //_ "github.com/influxdata/telegraf/plugins/inputs/kibana" + //_ "github.com/influxdata/telegraf/plugins/inputs/kinesis_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/knx_listener" + //_ "github.com/influxdata/telegraf/plugins/inputs/kube_inventory" + //_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes" + //_ "github.com/influxdata/telegraf/plugins/inputs/lanz" + //_ "github.com/influxdata/telegraf/plugins/inputs/leofs" + //_ "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" + //_ "github.com/influxdata/telegraf/plugins/inputs/logparser" + //_ "github.com/influxdata/telegraf/plugins/inputs/logstash" + //_ "github.com/influxdata/telegraf/plugins/inputs/lustre2" + //_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" + //_ "github.com/influxdata/telegraf/plugins/inputs/marklogic" + //_ "github.com/influxdata/telegraf/plugins/inputs/mcrouter" + //_ "github.com/influxdata/telegraf/plugins/inputs/mdstat" _ "github.com/influxdata/telegraf/plugins/inputs/mem" - _ "github.com/influxdata/telegraf/plugins/inputs/memcached" - _ "github.com/influxdata/telegraf/plugins/inputs/mesos" - _ "github.com/influxdata/telegraf/plugins/inputs/minecraft" - _ "github.com/influxdata/telegraf/plugins/inputs/modbus" - _ "github.com/influxdata/telegraf/plugins/inputs/mongodb" - _ "github.com/influxdata/telegraf/plugins/inputs/monit" - _ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/multifile" - _ "github.com/influxdata/telegraf/plugins/inputs/mysql" - _ "github.com/influxdata/telegraf/plugins/inputs/nats" - _ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/neptune_apex" + //_ "github.com/influxdata/telegraf/plugins/inputs/memcached" + //_ "github.com/influxdata/telegraf/plugins/inputs/mesos" + //_ "github.com/influxdata/telegraf/plugins/inputs/minecraft" + //_ "github.com/influxdata/telegraf/plugins/inputs/modbus" + //_ "github.com/influxdata/telegraf/plugins/inputs/mongodb" + //_ "github.com/influxdata/telegraf/plugins/inputs/monit" + //_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/multifile" + //_ "github.com/influxdata/telegraf/plugins/inputs/mysql" + //_ "github.com/influxdata/telegraf/plugins/inputs/nats" + //_ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/neptune_apex" _ "github.com/influxdata/telegraf/plugins/inputs/net" - _ "github.com/influxdata/telegraf/plugins/inputs/net_response" - _ "github.com/influxdata/telegraf/plugins/inputs/nfsclient" - _ "github.com/influxdata/telegraf/plugins/inputs/nginx" - _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus" - _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus_api" - _ "github.com/influxdata/telegraf/plugins/inputs/nginx_sts" - _ "github.com/influxdata/telegraf/plugins/inputs/nginx_upstream_check" - _ "github.com/influxdata/telegraf/plugins/inputs/nginx_vts" - _ "github.com/influxdata/telegraf/plugins/inputs/nsd" - _ "github.com/influxdata/telegraf/plugins/inputs/nsq" - _ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer" - _ "github.com/influxdata/telegraf/plugins/inputs/nstat" - _ "github.com/influxdata/telegraf/plugins/inputs/ntpq" - _ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi" - _ "github.com/influxdata/telegraf/plugins/inputs/opcua" - _ "github.com/influxdata/telegraf/plugins/inputs/openldap" - _ "github.com/influxdata/telegraf/plugins/inputs/openntpd" - _ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd" - _ "github.com/influxdata/telegraf/plugins/inputs/opentelemetry" - _ "github.com/influxdata/telegraf/plugins/inputs/openweathermap" - _ "github.com/influxdata/telegraf/plugins/inputs/passenger" - _ "github.com/influxdata/telegraf/plugins/inputs/pf" - _ "github.com/influxdata/telegraf/plugins/inputs/pgbouncer" - _ "github.com/influxdata/telegraf/plugins/inputs/phpfpm" - _ "github.com/influxdata/telegraf/plugins/inputs/ping" - _ "github.com/influxdata/telegraf/plugins/inputs/postfix" - _ "github.com/influxdata/telegraf/plugins/inputs/postgresql" - _ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible" - _ "github.com/influxdata/telegraf/plugins/inputs/powerdns" - _ "github.com/influxdata/telegraf/plugins/inputs/powerdns_recursor" - _ "github.com/influxdata/telegraf/plugins/inputs/processes" - _ "github.com/influxdata/telegraf/plugins/inputs/procstat" - _ "github.com/influxdata/telegraf/plugins/inputs/prometheus" - _ "github.com/influxdata/telegraf/plugins/inputs/proxmox" - _ "github.com/influxdata/telegraf/plugins/inputs/puppetagent" - _ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq" - _ "github.com/influxdata/telegraf/plugins/inputs/raindrops" - _ "github.com/influxdata/telegraf/plugins/inputs/ras" - _ "github.com/influxdata/telegraf/plugins/inputs/ravendb" - _ "github.com/influxdata/telegraf/plugins/inputs/redfish" - _ "github.com/influxdata/telegraf/plugins/inputs/redis" - _ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" - _ "github.com/influxdata/telegraf/plugins/inputs/riak" - _ "github.com/influxdata/telegraf/plugins/inputs/riemann_listener" - _ "github.com/influxdata/telegraf/plugins/inputs/salesforce" - _ "github.com/influxdata/telegraf/plugins/inputs/sensors" - _ "github.com/influxdata/telegraf/plugins/inputs/sflow" - _ "github.com/influxdata/telegraf/plugins/inputs/smart" - _ "github.com/influxdata/telegraf/plugins/inputs/snmp" - _ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy" - _ "github.com/influxdata/telegraf/plugins/inputs/snmp_trap" - _ "github.com/influxdata/telegraf/plugins/inputs/socket_listener" + //_ "github.com/influxdata/telegraf/plugins/inputs/net_response" + //_ "github.com/influxdata/telegraf/plugins/inputs/nfsclient" + //_ "github.com/influxdata/telegraf/plugins/inputs/nginx" + //_ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus" + //_ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus_api" + //_ "github.com/influxdata/telegraf/plugins/inputs/nginx_sts" + //_ "github.com/influxdata/telegraf/plugins/inputs/nginx_upstream_check" + //_ "github.com/influxdata/telegraf/plugins/inputs/nginx_vts" + //_ "github.com/influxdata/telegraf/plugins/inputs/nsd" + //_ "github.com/influxdata/telegraf/plugins/inputs/nsq" + //_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer" + //_ "github.com/influxdata/telegraf/plugins/inputs/nstat" + //_ "github.com/influxdata/telegraf/plugins/inputs/ntpq" + //_ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi" + //_ "github.com/influxdata/telegraf/plugins/inputs/opcua" + //_ "github.com/influxdata/telegraf/plugins/inputs/openldap" + //_ "github.com/influxdata/telegraf/plugins/inputs/openntpd" + //_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd" + //_ "github.com/influxdata/telegraf/plugins/inputs/opentelemetry" + //_ "github.com/influxdata/telegraf/plugins/inputs/openweathermap" + //_ "github.com/influxdata/telegraf/plugins/inputs/passenger" + //_ "github.com/influxdata/telegraf/plugins/inputs/pf" + //_ "github.com/influxdata/telegraf/plugins/inputs/pgbouncer" + //_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm" + //_ "github.com/influxdata/telegraf/plugins/inputs/ping" + //_ "github.com/influxdata/telegraf/plugins/inputs/postfix" + //_ "github.com/influxdata/telegraf/plugins/inputs/postgresql" + //_ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible" + //_ "github.com/influxdata/telegraf/plugins/inputs/powerdns" + //_ "github.com/influxdata/telegraf/plugins/inputs/powerdns_recursor" + //_ "github.com/influxdata/telegraf/plugins/inputs/processes" + //_ "github.com/influxdata/telegraf/plugins/inputs/procstat" + //_ "github.com/influxdata/telegraf/plugins/inputs/prometheus" + //_ "github.com/influxdata/telegraf/plugins/inputs/proxmox" + //_ "github.com/influxdata/telegraf/plugins/inputs/puppetagent" + //_ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq" + //_ "github.com/influxdata/telegraf/plugins/inputs/raindrops" + //_ "github.com/influxdata/telegraf/plugins/inputs/ras" + //_ "github.com/influxdata/telegraf/plugins/inputs/ravendb" + //_ "github.com/influxdata/telegraf/plugins/inputs/redfish" + //_ "github.com/influxdata/telegraf/plugins/inputs/redis" + //_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" + //_ "github.com/influxdata/telegraf/plugins/inputs/riak" + //_ "github.com/influxdata/telegraf/plugins/inputs/riemann_listener" + //_ "github.com/influxdata/telegraf/plugins/inputs/salesforce" + //_ "github.com/influxdata/telegraf/plugins/inputs/sensors" + //_ "github.com/influxdata/telegraf/plugins/inputs/sflow" + //_ "github.com/influxdata/telegraf/plugins/inputs/smart" + //_ "github.com/influxdata/telegraf/plugins/inputs/snmp" + //_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy" + //_ "github.com/influxdata/telegraf/plugins/inputs/snmp_trap" + //_ "github.com/influxdata/telegraf/plugins/inputs/socket_listener" _ "github.com/influxdata/telegraf/plugins/inputs/solr" - _ "github.com/influxdata/telegraf/plugins/inputs/sql" - _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" - _ "github.com/influxdata/telegraf/plugins/inputs/stackdriver" - _ "github.com/influxdata/telegraf/plugins/inputs/statsd" - _ "github.com/influxdata/telegraf/plugins/inputs/suricata" - _ "github.com/influxdata/telegraf/plugins/inputs/swap" - _ "github.com/influxdata/telegraf/plugins/inputs/synproxy" + //_ "github.com/influxdata/telegraf/plugins/inputs/sql" + //_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" + //_ "github.com/influxdata/telegraf/plugins/inputs/stackdriver" + //_ "github.com/influxdata/telegraf/plugins/inputs/statsd" + //_ "github.com/influxdata/telegraf/plugins/inputs/suricata" + //_ "github.com/influxdata/telegraf/plugins/inputs/swap" + //_ "github.com/influxdata/telegraf/plugins/inputs/synproxy" _ "github.com/influxdata/telegraf/plugins/inputs/syslog" - _ "github.com/influxdata/telegraf/plugins/inputs/sysstat" + //_ "github.com/influxdata/telegraf/plugins/inputs/sysstat" _ "github.com/influxdata/telegraf/plugins/inputs/system" - _ "github.com/influxdata/telegraf/plugins/inputs/systemd_units" + //_ "github.com/influxdata/telegraf/plugins/inputs/systemd_units" _ "github.com/influxdata/telegraf/plugins/inputs/tail" - _ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener" - _ "github.com/influxdata/telegraf/plugins/inputs/teamspeak" - _ "github.com/influxdata/telegraf/plugins/inputs/temp" - _ "github.com/influxdata/telegraf/plugins/inputs/tengine" - _ "github.com/influxdata/telegraf/plugins/inputs/tomcat" - _ "github.com/influxdata/telegraf/plugins/inputs/trig" - _ "github.com/influxdata/telegraf/plugins/inputs/twemproxy" - _ "github.com/influxdata/telegraf/plugins/inputs/udp_listener" - _ "github.com/influxdata/telegraf/plugins/inputs/unbound" - _ "github.com/influxdata/telegraf/plugins/inputs/uwsgi" - _ "github.com/influxdata/telegraf/plugins/inputs/varnish" - _ "github.com/influxdata/telegraf/plugins/inputs/vsphere" - _ "github.com/influxdata/telegraf/plugins/inputs/webhooks" - _ "github.com/influxdata/telegraf/plugins/inputs/win_eventlog" - _ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters" - _ "github.com/influxdata/telegraf/plugins/inputs/win_services" - _ "github.com/influxdata/telegraf/plugins/inputs/wireguard" - _ "github.com/influxdata/telegraf/plugins/inputs/wireless" - _ "github.com/influxdata/telegraf/plugins/inputs/x509_cert" - _ "github.com/influxdata/telegraf/plugins/inputs/zfs" - _ "github.com/influxdata/telegraf/plugins/inputs/zipkin" - _ "github.com/influxdata/telegraf/plugins/inputs/zookeeper" + //_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener" + //_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak" + //_ "github.com/influxdata/telegraf/plugins/inputs/temp" + //_ "github.com/influxdata/telegraf/plugins/inputs/tengine" + //_ "github.com/influxdata/telegraf/plugins/inputs/tomcat" + //_ "github.com/influxdata/telegraf/plugins/inputs/trig" + //_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy" + //_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener" + //_ "github.com/influxdata/telegraf/plugins/inputs/unbound" + //_ "github.com/influxdata/telegraf/plugins/inputs/uwsgi" + //_ "github.com/influxdata/telegraf/plugins/inputs/varnish" + //_ "github.com/influxdata/telegraf/plugins/inputs/vsphere" + //_ "github.com/influxdata/telegraf/plugins/inputs/webhooks" + //_ "github.com/influxdata/telegraf/plugins/inputs/win_eventlog" + //_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters" + //_ "github.com/influxdata/telegraf/plugins/inputs/win_services" + //_ "github.com/influxdata/telegraf/plugins/inputs/wireguard" + //_ "github.com/influxdata/telegraf/plugins/inputs/wireless" + //_ "github.com/influxdata/telegraf/plugins/inputs/x509_cert" + //_ "github.com/influxdata/telegraf/plugins/inputs/zfs" + //_ "github.com/influxdata/telegraf/plugins/inputs/zipkin" + //_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper" ) diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index 0afb0e325dbdd..d3be315117019 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -12,6 +12,7 @@ In addition, the following optional queries are only made by the master node: [Shard Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) Specific Elasticsearch endpoints that are queried: + - Node: either /_nodes/stats or /_nodes/_local/stats depending on 'local' configuration setting - Cluster Heath: /_cluster/health?level=indices - Cluster Stats: /_cluster/stats @@ -20,7 +21,7 @@ Specific Elasticsearch endpoints that are queried: Note that specific statistics information can change between Elasticsearch versions. In general, this plugin attempts to stay as version-generic as possible by tagging high-level categories only and using a generic json parser to make unique field names of whatever statistics names are provided at the mid-low level. -### Configuration +## Configuration ```toml [[inputs.elasticsearch]] @@ -29,6 +30,9 @@ Note that specific statistics information can change between Elasticsearch versi ## servers = ["http://user:pass@localhost:9200"] servers = ["http://localhost:9200"] + ## HTTP headers to send with each request + http_headers = { "X-Custom-Header" = "Custom" } + ## Timeout for HTTP requests to the elastic search server(s) http_timeout = "5s" @@ -169,7 +173,7 @@ Emitted when `cluster_stats = true`: - shards_total (float) - store_size_in_bytes (float) -+ elasticsearch_clusterstats_nodes +- elasticsearch_clusterstats_nodes - tags: - cluster_name - node_name @@ -230,7 +234,7 @@ Emitted when the appropriate `node_stats` options are set. - tx_count (float) - tx_size_in_bytes (float) -+ elasticsearch_breakers +- elasticsearch_breakers - tags: - cluster_name - node_attribute_ml.enabled @@ -291,7 +295,7 @@ Emitted when the appropriate `node_stats` options are set. - total_free_in_bytes (float) - total_total_in_bytes (float) -+ elasticsearch_http +- elasticsearch_http - tags: - cluster_name - node_attribute_ml.enabled @@ -402,7 +406,7 @@ Emitted when the appropriate `node_stats` options are set. - warmer_total (float) - warmer_total_time_in_millis (float) -+ elasticsearch_jvm +- elasticsearch_jvm - tags: - cluster_name - node_attribute_ml.enabled @@ -480,7 +484,7 @@ Emitted when the appropriate `node_stats` options are set. - swap_used_in_bytes (float) - timestamp (float) -+ elasticsearch_process +- elasticsearch_process - tags: - cluster_name - node_attribute_ml.enabled diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 24142ba38c32e..33b9f93cd55be 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -92,6 +92,9 @@ const sampleConfig = ` # servers = ["http://user:pass@localhost:9200"] servers = ["http://localhost:9200"] + ## HTTP headers to send with each request + http_headers = { "X-Custom-Header" = "Custom" } + ## Timeout for HTTP requests to the elastic search server(s) http_timeout = "5s" @@ -147,19 +150,20 @@ const sampleConfig = ` // Elasticsearch is a plugin to read stats from one or many Elasticsearch // servers. type Elasticsearch struct { - Local bool `toml:"local"` - Servers []string `toml:"servers"` - HTTPTimeout config.Duration `toml:"http_timeout"` - ClusterHealth bool `toml:"cluster_health"` - ClusterHealthLevel string `toml:"cluster_health_level"` - ClusterStats bool `toml:"cluster_stats"` - ClusterStatsOnlyFromMaster bool `toml:"cluster_stats_only_from_master"` - IndicesInclude []string `toml:"indices_include"` - IndicesLevel string `toml:"indices_level"` - NodeStats []string `toml:"node_stats"` - Username string `toml:"username"` - Password string `toml:"password"` - NumMostRecentIndices int `toml:"num_most_recent_indices"` + Local bool `toml:"local"` + Servers []string `toml:"servers"` + HTTPHeaders map[string]string `toml:"http_headers"` + HTTPTimeout config.Duration `toml:"http_timeout"` + ClusterHealth bool `toml:"cluster_health"` + ClusterHealthLevel string `toml:"cluster_health_level"` + ClusterStats bool `toml:"cluster_stats"` + ClusterStatsOnlyFromMaster bool `toml:"cluster_stats_only_from_master"` + IndicesInclude []string `toml:"indices_include"` + IndicesLevel string `toml:"indices_level"` + NodeStats []string `toml:"node_stats"` + Username string `toml:"username"` + Password string `toml:"password"` + NumMostRecentIndices int `toml:"num_most_recent_indices"` tls.ClientConfig @@ -691,6 +695,10 @@ func (e *Elasticsearch) getCatMaster(url string) (string, error) { req.SetBasicAuth(e.Username, e.Password) } + for key, value := range e.HTTPHeaders { + req.Header.Add(key, value) + } + r, err := e.client.Do(req) if err != nil { return "", err @@ -723,6 +731,10 @@ func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error { req.SetBasicAuth(e.Username, e.Password) } + for key, value := range e.HTTPHeaders { + req.Header.Add(key, value) + } + r, err := e.client.Do(req) if err != nil { return err diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 7248b4ddcddb0..16503960c98c9 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -2,53 +2,53 @@ package all import ( //Blank imports for plugins to register themselves - _ "github.com/influxdata/telegraf/plugins/outputs/amon" - _ "github.com/influxdata/telegraf/plugins/outputs/amqp" - _ "github.com/influxdata/telegraf/plugins/outputs/application_insights" - _ "github.com/influxdata/telegraf/plugins/outputs/azure_data_explorer" - _ "github.com/influxdata/telegraf/plugins/outputs/azure_monitor" - _ "github.com/influxdata/telegraf/plugins/outputs/cloud_pubsub" - _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" - _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch_logs" - _ "github.com/influxdata/telegraf/plugins/outputs/cratedb" + //_ "github.com/influxdata/telegraf/plugins/outputs/amon" + //_ "github.com/influxdata/telegraf/plugins/outputs/amqp" + //_ "github.com/influxdata/telegraf/plugins/outputs/application_insights" + //_ "github.com/influxdata/telegraf/plugins/outputs/azure_data_explorer" + //_ "github.com/influxdata/telegraf/plugins/outputs/azure_monitor" + //_ "github.com/influxdata/telegraf/plugins/outputs/cloud_pubsub" + //_ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" + //_ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch_logs" + //_ "github.com/influxdata/telegraf/plugins/outputs/cratedb" _ "github.com/influxdata/telegraf/plugins/outputs/datadog" - _ "github.com/influxdata/telegraf/plugins/outputs/discard" - _ "github.com/influxdata/telegraf/plugins/outputs/dynatrace" - _ "github.com/influxdata/telegraf/plugins/outputs/elasticsearch" - _ "github.com/influxdata/telegraf/plugins/outputs/exec" - _ "github.com/influxdata/telegraf/plugins/outputs/execd" + //_ "github.com/influxdata/telegraf/plugins/outputs/discard" + //_ "github.com/influxdata/telegraf/plugins/outputs/dynatrace" + //_ "github.com/influxdata/telegraf/plugins/outputs/elasticsearch" + //_ "github.com/influxdata/telegraf/plugins/outputs/exec" + //_ "github.com/influxdata/telegraf/plugins/outputs/execd" _ "github.com/influxdata/telegraf/plugins/outputs/file" - _ "github.com/influxdata/telegraf/plugins/outputs/graphite" - _ "github.com/influxdata/telegraf/plugins/outputs/graylog" - _ "github.com/influxdata/telegraf/plugins/outputs/health" + //_ "github.com/influxdata/telegraf/plugins/outputs/graphite" + //_ "github.com/influxdata/telegraf/plugins/outputs/graylog" + //_ "github.com/influxdata/telegraf/plugins/outputs/health" _ "github.com/influxdata/telegraf/plugins/outputs/http" _ "github.com/influxdata/telegraf/plugins/outputs/influxdb" - _ "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" - _ "github.com/influxdata/telegraf/plugins/outputs/instrumental" - _ "github.com/influxdata/telegraf/plugins/outputs/kafka" - _ "github.com/influxdata/telegraf/plugins/outputs/kinesis" - _ "github.com/influxdata/telegraf/plugins/outputs/librato" - _ "github.com/influxdata/telegraf/plugins/outputs/logzio" - _ "github.com/influxdata/telegraf/plugins/outputs/loki" - _ "github.com/influxdata/telegraf/plugins/outputs/mqtt" - _ "github.com/influxdata/telegraf/plugins/outputs/nats" + //_ "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" + //_ "github.com/influxdata/telegraf/plugins/outputs/instrumental" + //_ "github.com/influxdata/telegraf/plugins/outputs/kafka" + //_ "github.com/influxdata/telegraf/plugins/outputs/kinesis" + //_ "github.com/influxdata/telegraf/plugins/outputs/librato" + //_ "github.com/influxdata/telegraf/plugins/outputs/logzio" + //_ "github.com/influxdata/telegraf/plugins/outputs/loki" + //_ "github.com/influxdata/telegraf/plugins/outputs/mqtt" + //_ "github.com/influxdata/telegraf/plugins/outputs/nats" _ "github.com/influxdata/telegraf/plugins/outputs/newrelic" - _ "github.com/influxdata/telegraf/plugins/outputs/nsq" - _ "github.com/influxdata/telegraf/plugins/outputs/opentelemetry" - _ "github.com/influxdata/telegraf/plugins/outputs/opentsdb" - _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" - _ "github.com/influxdata/telegraf/plugins/outputs/riemann" - _ "github.com/influxdata/telegraf/plugins/outputs/riemann_legacy" - _ "github.com/influxdata/telegraf/plugins/outputs/sensu" - _ "github.com/influxdata/telegraf/plugins/outputs/signalfx" - _ "github.com/influxdata/telegraf/plugins/outputs/socket_writer" - _ "github.com/influxdata/telegraf/plugins/outputs/sql" - _ "github.com/influxdata/telegraf/plugins/outputs/stackdriver" - _ "github.com/influxdata/telegraf/plugins/outputs/sumologic" - _ "github.com/influxdata/telegraf/plugins/outputs/syslog" - _ "github.com/influxdata/telegraf/plugins/outputs/timestream" - _ "github.com/influxdata/telegraf/plugins/outputs/warp10" - _ "github.com/influxdata/telegraf/plugins/outputs/wavefront" - _ "github.com/influxdata/telegraf/plugins/outputs/websocket" - _ "github.com/influxdata/telegraf/plugins/outputs/yandex_cloud_monitoring" + //_ "github.com/influxdata/telegraf/plugins/outputs/nsq" + //_ "github.com/influxdata/telegraf/plugins/outputs/opentelemetry" + //_ "github.com/influxdata/telegraf/plugins/outputs/opentsdb" + //_ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" + //_ "github.com/influxdata/telegraf/plugins/outputs/riemann" + //_ "github.com/influxdata/telegraf/plugins/outputs/riemann_legacy" + //_ "github.com/influxdata/telegraf/plugins/outputs/sensu" + //_ "github.com/influxdata/telegraf/plugins/outputs/signalfx" + //_ "github.com/influxdata/telegraf/plugins/outputs/socket_writer" + //_ "github.com/influxdata/telegraf/plugins/outputs/sql" + //_ "github.com/influxdata/telegraf/plugins/outputs/stackdriver" + //_ "github.com/influxdata/telegraf/plugins/outputs/sumologic" + //_ "github.com/influxdata/telegraf/plugins/outputs/syslog" + //_ "github.com/influxdata/telegraf/plugins/outputs/timestream" + //_ "github.com/influxdata/telegraf/plugins/outputs/warp10" + //_ "github.com/influxdata/telegraf/plugins/outputs/wavefront" + //_ "github.com/influxdata/telegraf/plugins/outputs/websocket" + //_ "github.com/influxdata/telegraf/plugins/outputs/yandex_cloud_monitoring" ) From c32f71c299903ba28a0cfa27b75c5621c9509ada Mon Sep 17 00:00:00 2001 From: Dan Simpson Date: Tue, 29 Mar 2022 09:07:51 -0700 Subject: [PATCH 760/761] Enable socket listener plugin --- plugins/inputs/all/all.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 940dfac4b258e..49509bf4eb974 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -169,7 +169,7 @@ import ( //_ "github.com/influxdata/telegraf/plugins/inputs/snmp" //_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy" //_ "github.com/influxdata/telegraf/plugins/inputs/snmp_trap" - //_ "github.com/influxdata/telegraf/plugins/inputs/socket_listener" + _ "github.com/influxdata/telegraf/plugins/inputs/socket_listener" _ "github.com/influxdata/telegraf/plugins/inputs/solr" //_ "github.com/influxdata/telegraf/plugins/inputs/sql" //_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" From eabdd5e2935b3301e4181eaa67fa04a163d02533 Mon Sep 17 00:00:00 2001 From: Sam Oen Date: Wed, 3 May 2023 13:41:03 +0800 Subject: [PATCH 761/761] Enable http input plugins --- plugins/inputs/all/all.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 49509bf4eb974..4b8d6dc3e1362 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -62,10 +62,10 @@ import ( //_ "github.com/influxdata/telegraf/plugins/inputs/graylog" _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" //_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp" - //_ "github.com/influxdata/telegraf/plugins/inputs/http" - //_ "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" - //_ "github.com/influxdata/telegraf/plugins/inputs/http_response" - //_ "github.com/influxdata/telegraf/plugins/inputs/httpjson" + _ "github.com/influxdata/telegraf/plugins/inputs/http" + _ "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" + _ "github.com/influxdata/telegraf/plugins/inputs/http_response" + _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" //_ "github.com/influxdata/telegraf/plugins/inputs/icinga2" //_ "github.com/influxdata/telegraf/plugins/inputs/infiniband" //_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"